pax_global_header00006660000000000000000000000064131403744660014522gustar00rootroot0000000000000052 comment=19e74e0a677122c2ec1385809ab8ac8b5a27e74d UVP-Tools-2.2.0.316/000077500000000000000000000000001314037446600136035ustar00rootroot00000000000000UVP-Tools-2.2.0.316/.gitignore000066400000000000000000000003661314037446600156000ustar00rootroot00000000000000# Object files *.o *.ko *.obj *.elf # Precompiled Headers *.gch *.pch # Libraries *.lib *.a *.la *.lo # Shared objects (inc. Windows DLLs) *.dll *.so *.so.* *.dylib # Executables *.exe *.out *.app *.i*86 *.x86_64 *.hex # Debug files *.dSYM/ UVP-Tools-2.2.0.316/LICENSE000066400000000000000000000431761314037446600146230ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. {description} Copyright (C) {year} {fullname} This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. {signature of Ty Coon}, 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. UVP-Tools-2.2.0.316/Makefile000066400000000000000000000001571314037446600152460ustar00rootroot00000000000000$(shell dos2unix build_tools) $(shell chmod u+x build_tools) all: @./build_tools -b clean: @./build_tools -c UVP-Tools-2.2.0.316/README.md000066400000000000000000000207511314037446600150670ustar00rootroot00000000000000 UVP Tools 2.2.0.xxx The following is the readme file of UVP Tools 2.2.0. From the readme file, you will learn what a UVP Tools project is, how source code of UVP Tools is structured, and how UVP Tools is installed. What Is a UVP Tools Project? UVP Tools is a tool that integrates the Xen front-end driver and uvp-monitor (virtual machine monitoring program). It is designed for use on virtual machines (VMs) equipped with a 32-bit x86-based CPU (386 or higher) or x86_64 CPU. The Xen front-end driver improves I/O processing efficiency of VMs. The uvp-monitor helps administrators gain visibility into VM running status. Components of the Xen front-end driver: - xen-platform-pci driver: Xen front-end pci driver, which provides xenbus access, establishes event channels, and grants references. - xen-balloon driver: Xen front-end balloon driver, which adjusts VM memory through the host. - xen-vbd/xen-blkfront driver: Xen front-end disk driver. - xen-vnif/xen-netfront driver: Xen front-end NIC driver. - xen-scsi/xen-scsifront driver: Xen front-end PV SCSI driver. - xen-procfs driver: provides the xenbus driver and adapts to the VMs that use pvops kernel but does not provide /proc/xen/xenbus or /dev/xen/xenbus interfaces. - vni driver: virtio driver. Features of uvp-monitor: - Collects information about resource usage of every VM and periodically writes the information into domain0 through xenstore. The resource usage information includes the total number of CPUs. CPU usage, memory capacity, memory usage, disk capacity, disk usage, and the number of packets received or sent by a NIC. - Collects VM hostnames and writes them into domain0 through xenstore. - Notifies domain0 whether the current VM supports hot-plug of memory, CPUs, and disks. - Facilitates live migration of Xen VMs and triggers VMs to send gARP and ndp packets to the network at completion of live migration. - Facilitates self-upgrade of UVP Tools on VMs. The uvp-monitor works with xenstore to notify domain0 of the UVP Tools version installed on VMs. If the UVP Tools version on a VM is different from the UVP Tools version on the host, the UVP Tools on the VM is then automatically upgraded to the version installed on the host. Structure of UVP Tools source code: UVP-Tools-2.2.0.xxx |-- bin/ # Directory that stores tools required for installing and using UVP Tools, such as the tool used for acquiring Linux distribution information and the disk hot-plug tool. |-- build_tools # Scripts used for building the UVP Tools package. |-- config/ # Directory that stores UDEV rules used by UVP Tools. |-- cpfile.sh # Tool for copying components of the UVP Tools compatible with the current Linux distribution during self-upgrade of UVP Tools. |-- install # Scripts for installing, uninstalling, and upgrading the UVP Tools package. |-- Makefile # File that define rules for building the UVP Tools package. |-- README # Readme file of UVP Tools. |-- upg.sh # Script for self-upgrading UVP Tools on a VM to the UVP Tools version installed on the host. |-- uvp-monitor/ # Source code of uvp-monitor. |-- uvp-xenpv/ # Source code of the Xen front-end driver. | |-- uvp-classic_xen_driver-2.6.32to3.0.x/ # Source code of the classic Xen front-end driver, which works well with SLES 11 SP. | |-- uvp-classic_xen_driver-3.12.xto3.16.x/ # Source code of the classic Xen front-end driver, which works well with SLES 12 SP and openSUSE 13.2. | |-- uvp-pvops_xen_driver-2.6.32to4.0.x/ # Source code of the pvops front-end driver, which works well with CentOS/RHEL 6.x, Debian 8, and Fedora 22. | |-- others/ # Source code of the classic Xen front-end driver, which works well with CentOS/RHEL 4.x/5.x, Fedora 9/12, SLES 10 SP, and Ubuntu 8/12. |-- version.ini # Version information of UVP Tools source code. Installing UVP Tools - Obtain the UVP Tools source code package. Save the UVP Tools source code package to a directory on the Linux VM where UVP Tools will be installed, and unpack the UVP Tools source code package. Be sure that you have the permission to access this directory. - If the downloaded UVP Tools source code package is an official release, run the following command: tar -xzf UVP-Tools-2.2.0.xxx.tar.gz Or gunzip UVP-Tools-2.2.0.xxx.zip - If the downloaded UVP Tools source code package is a source code package of the master branch, run the following command: gunzip UVP-Tools-master.zip The Linux VM where UVP Tools will be installed must come with gcc, make, libc, and kernel-devel. For simplicity purposes, the Linux VM where UVP Tools will be installed is referred to as the Linux VM. - Go to the uvp-monitor directory. Run the following command to build uvp-monitor: make Note: (1). When uvp-monitor is being built, the make tool automatically downloads the xen-4.1.2.tar.gz source code package from the Internet. If the download fails, uvp-monitor cannot be successfully built. If the Linux VM cannot access the Internet, manually obtain the xen-4.1.2.tar.gz source code package, and save it to the uvp-monitor directory. (2). The uvp-monitor program used to statically build the libxenstore is provided along with the UVP Tools. Using such a uvp-monitor program can avoid the uvp-monitor failure due to defects in the default libxenstore.so shared library file of the Linux-based VM. For example, the default libxenstore.so file of the CoreOS 1010.5.0-based VM is defective and its directory /usr cannot be modified. To address this problem, install the uvp-monitor program that is used to statically build the libxenstore. To install the uvp-monitor that is used to dynamically build the libxenstore, run the following command: make install To install the uvp-monitor that is used to statically build the libxenstore, run the following command: make install_static Note: If you need to build the uvp-monitor in a cross-compilation environment, put the source code of the uvp-monitor in the cross-compilation environment, and then install the built uvp-monitor on the target VM. - Build the Xen front-end driver. Take SLES 11 SP3 x86_64 as an example. - Go to the uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x directory. - Run the following command to build the Xen front-end driver: make KERNDIR="/lib/modules/$(uname -r)/build" BUILDKERNEL=$(uname -r) After the Xen front-end driver is successfully built, run the following command to install the Xen front-end driver: make KERNDIR="/lib/modules/$(uname -r)/build" BUILDKERNEL=$(uname -r) modules_install Note: This operation may replace the Xen front-end driver provided by the OS vendor with the one provided by UVP Tools. If the Xen front-end driver used by the Linux VM is not provided by UVP Tools, uninstall it. Take the SLES 11 SP3 x86_64 as an example. - Run the following command to check whether the Linux VM is armed with the Xen front-end driver provided by the OS vendor. rpm -qa | grep xen-kmp - If a command output is returned, the Linux VM is armed with the Xen front-end driver provided by the OS vendor. Then, run the following command to uninstall the Xen front-end driver: rpm -e xen-kmp-default-4.2.2_04_3.0.76_0.11-0.7.5 Note: "xen-kmp-default-4.2.2_04_3.0.76_0.11-0.7.5" is merely an example. Replace it with the Xen front-end driver installed on the Linux VM. Alternatively, you can use the build_tools script to build a UVP Tools installation package for the Linux VM. Take SLES 11 SP3 x86_64 as an example. - After the UVP Tools source code package is unpacked, run the following command in the UVP-Tools-2.2.0.xxx directory to build the UVP Tools installation package: make all A directory named uvp-tools-linux-2.2.0.xxx is generated in the current directory after the make all command is run. Go to the uvp-tools-linux-2.2.0.xxx directory and run the following command to execute the UVP Tools installation script install. ./install Note: If you want to know what the install script has done, run the sh -x install command to print script execution information, or read through the script. What can you do if a problem is encountered? 1. Briefly describe your problem. An one-line summary is recommended. 2. Describe your problem in detail, including environments, log information, and automatic handling of the problem. 3. Send the mail to uvptools@huawei.com for further assistance. UVP-Tools-2.2.0.316/bin/000077500000000000000000000000001314037446600143535ustar00rootroot00000000000000UVP-Tools-2.2.0.316/bin/CheckKernelUpdate.sh000066400000000000000000000230231314037446600202300ustar00rootroot00000000000000#!/bin/bash ############################################################################### ### error definition ############################################################################### ERR_OK=0 ERR_NG=1 ERR_FILE_NOEXIST=20 Info='eval 2>&1 logger "[CheckKernelUpdate:$FUNCNAME:$LINENO]"' grub_config() { ### grub config if [ -f '/boot/grub/grub.conf' ] then MENULIST=$(readlink -f "/boot/grub/grub.conf") elif [ -f '/boot/grub/menu.lst' ] then MENULIST=$(readlink -f '/boot/grub/menu.lst') elif [ -f '/boot/grub/grub.cfg' ] then MENULIST=$(readlink -f '/boot/grub/grub.cfg') SYS="debian" elif [ -f '/boot/burg/burg.cfg' ] then MENULIST=$(readlink -f '/boot/burg/burg.cfg') SYS="debian" elif [ -f '/boot/grub2/grub.cfg' ] then MENULIST=$(readlink -f '/boot/grub2/grub.cfg') GRUBENV=$(readlink -f '/boot/grub2/grubenv') SYS="grub2" elif [ -f '/boot/efi/EFI/redhat/grub.conf' ] then MENULIST=$(readlink -f '/boot/efi/EFI/redhat/grub.conf') elif [ -f '/etc/elilo.conf' ] then SYS="elilo" MENULIST=$(readlink -f '/etc/elilo.conf') fi } info_boot_entry() { local menu_lst=$1 entry_num=$2 local var_list="menu_default menu_start begin_line end_line title_line initrd_line" local comment="init" ### set default parameter if [ ! -f "$menu_lst" ] then $Info "can not find $menu_lst." return $ERR_FILE_NOEXIST fi menu_default=$(sed -n '/\b\s*default\s*=\{0,1\}"\{0,1\}\s*[0-9]"\{0,1\}\+\s*$/=' $menu_lst | sed -n '$p') if [ -z "$entry_num" ] then if [ -n "$menu_default" ] then if [ -z "$SYS" ] then entry_num=$(($(sed -n "${menu_default}p" $menu_lst | sed "s/^\s*default\s*=\{0,1\}\s*\([0-9]\+\)\s*$/\1/g")+1)) else entry_num=$(($(sed -n "${menu_default}p" $menu_lst | sed "s/\b\s*set\s*default\s*=\{0,1\}\s*\"\{0,1\}\([0-9]\+\)\"\{0,1\}\s*$/\1/g")+1)) fi else $Info "can not find default boot entry in $menu_lst." return $ERR_NG fi $Info "entry_num is $entry_num." elif [ $entry_num -le 0 ] then return 1 fi ### initialize all variables for var in $var_list do unset $var done if [ -z "$SYS" ] then menu_start=$(sed -n '/^\s*title\s\+/=' "${menu_lst}" | sed -n '1p') else menu_start=$(sed -n '/^\s*menuentry\s\+/=' "${menu_lst}" | sed -n '1p') fi if [ -z "$menu_start" ] then menu_start=$(sed -n '$=' $menu_lst) return 0 fi ## process grub entry top # get [title_line] of grub entry if [ -z "$SYS" ] then title_line=$(grep -n "^[[:space:]]*title[[:space:]]\+" $menu_lst | sed -n "${entry_num}p" | awk -F ":" '{print $1}') else title_line=$(grep -n "^[[:space:]]*menuentry[[:space:]]\+" $menu_lst | sed -n "${entry_num}p" | awk -F ":" '{print $1}') fi if [ -z "$title_line" ] then $Info "can not find boot entry ${entry_num} in $menu_lst." return $ERR_NG fi # get [begin_line] of grub entry comment=$(sed -n "$(($title_line-1))p" $menu_lst | grep "^[[:space:]]*###.*YaST.*identifier:.*linux[[:space:]]*###[[:space:]]*$") if [ -n "$comment" ] # include grub comment above then begin_line=$((title_line-1)) else begin_line=$title_line fi ## process grub entry bottom # get [end_line] of grub entry if [ -z "$SYS" ] then end_line=$(grep -n "^[[:space:]]*title[[:space:]]\+" $menu_lst | sed -n "$(($entry_num+1))p" | awk -F ":" '{print $1}') else end_line=$(grep -n "^[[:space:]]*menuentry[[:space:]]\+" $menu_lst | sed -n "$(($entry_num+1))p" | awk -F ":" '{print $1}') fi [ -z "$end_line" ] && end_line=$(sed -n '$=' "$menu_lst") while [ -n "$(sed -n ${end_line}s/\(^\s*#.*\)/\1/p $menu_lst)" ] # omit grub comment below do end_line=$((end_line-1)) done $Info "begin_line is $begin_line end_line is $end_line." ## get other line of grub entry for ((line_num=$begin_line; line_num<=$end_line; line_num++)) do line=$(sed -n "${line_num}p" $menu_lst) if [ -n "$(echo -n "$line" | grep "^[[:space:]]*initrd[[:space:]]\+")" ] ; then initrd_line=$line_num ; fi done ### return susess return $ERR_OK } info_boot_entry_uefi() { local menu_lst=$1 entry_num=$2 local comment="init" local var_list="menu_default menu_start begin_line end_line root_line kernel_line initrd_line" ### set default parameter if [ ! -f "$menu_lst" ] then $Info "can not find $menu_lst." return $ERR_FILE_NOEXIST fi menu_default=$(sed -n '/^\s*default\s*=\{0,1\}\s*\+\S\+\s*$/=' $menu_lst | sed -n '$p') if [ -n "$menu_default" ] then entry_num=$(sed -n "${menu_default}p" $menu_lst | sed "s/^\s*default\s*=\{0,1\}\s*\(\S\+\)\s*$/\1/g") else $Info "can not find default boot entry in $menu_lst." return $ERR_NG fi ### initialize all variables for var in $var_list do unset $var done menu_start=$(sed -n '/^\s*label\s\+/=' "${menu_lst}" | sed -n '1p') if [ -z "$menu_start" ] then menu_start=$(sed -n '$=' $menu_lst) return 0 fi label_line=$(grep -n "^[[:space:]]*label[[:space:]]\+" $menu_lst | awk -F ":" '{print $1}') for line in $label_line do if [ -n "$begin_line" ] then end_line=$line break fi lable_val=$(sed -n "${line}p" $menu_lst | sed "s/^\s*label\s*=\{0,1\}\s*\(\S\+\)\s*$/\1/g") if [ "$lable_val" = "$entry_num" ] then begin_line=$line fi done while [ -z "$(sed -n ${begin_line}s/\^[[:space:]]*$/\1/p $menu_lst)" ] do begin_line=$((begin_line-1)) done if [ -z "$end_line" ] then end_line=$(sed -n '$=' "$menu_lst") else while [ -z "$(sed -n ${end_line}s/\^[[:space:]]*$/\1/p $menu_lst)" ] do end_line=$((end_line-1)) done fi ## get other line of grub entry for ((line_num=$begin_line; line_num<=$end_line; line_num++)) do line=$(sed -n "${line_num}p" $menu_lst) if [ -n "$(echo -n "$line" | grep "^[[:space:]]*initrd[[:space:]]\+")" ] ; then initrd_line=$line_num ; fi done return 0 } info_boot_entry_grub2() { local menu_lst=$1 local grubenv=$2 local var_list="default_entry end_line label_line lable_val tmpline initrd_line" if [ ! -f "$menu_lst" ] then $Info "can not find $menu_lst." return $ERR_FILE_NOEXIST fi if [ ! -f "$grubenv" ] then $Info "can not find $grubenv." return $ERR_FILE_NOEXIST fi ### initialize all variables for var in $var_list do unset $var done ### set default parameter default_entry=$(sed -n '/saved_entry/p' $grubenv | sed -n 's/saved_entry=//gp') if [ -z "$default_entry" ] then $Info "can not find default boot entry in $grubenv." return $ERR_NG fi end_line=$(sed -n '$=' "$menu_lst") label_line=$(grep -n "^[[:space:]]*menuentry[[:space:]]\+" $menu_lst | awk -F ":" '{print $1}') for line in $label_line do lable_val=$(sed -n "${line}p" $menu_lst | sed -n '/^menuentry.*$/p' | sed -n "s/'/\n/gp" | sed -n 2p) if [ "${default_entry}" = "${lable_val}" ] then for ((line_num=$line; line_num<=$end_line; line_num++)) do tmpline=$(sed -n "${line_num}p" $menu_lst) if [ -n "$(echo -n "$tmpline" | grep "^[[:space:]]*initrd[[:space:]]\+")" ] ; then initrd_line=$line_num ; return 0; fi done fi done return 0 } boot_match() { if [ -e '/etc/gentoo-release' ] then return $ERR_OK fi grub_config if [ "$SYS" = "grub2" ] && [ -f /etc/redhat-release ] then if [ ! -f "$GRUBENV" ] then $Info "can not find $GRUBENV." return $ERR_FILE_NOEXIST fi match=$(cat /boot/grub2/grubenv | grep -w `uname -r`) if [ -z "$match" ] then if [ -f /etc/euleros-release ] && [ "`cat /etc/euleros-release`" == "EulerOS V2.0" ] then return $ERR_OK fi $Info "the kernel is $kernel,do not match." return $ERR_NG fi return $ERR_OK fi if [ "$SYS" = "elilo" ] then info_boot_entry_uefi "$MENULIST" "" elif [ "$SYS" = "grub2" ] then # not support openSUSE 13.2 if [ $(echo -n ${kernel} |grep "3.16.6-2") ] then return $ERR_OK fi info_boot_entry_grub2 "$MENULIST" "$GRUBENV" else info_boot_entry "$MENULIST" "" fi if [ -z "$initrd_line" ] then $Info "initrd_line is null." return $ERR_NG fi $Info "initrd_line is $initrd_line." line=$(sed -n "${initrd_line}p" $MENULIST) $Info "line is $line." match=$(echo -n "$line" | grep "$kernel") if [ -z "$match" ] then $Info "the kernel is $kernel,do not match $line." return $ERR_NG fi return $ERR_OK } lib_match() { if [ ! -d "/lib/modules/`uname -r`" ] then $Info "can not find the kernel directory." return $ERR_NG fi return $ERR_OK } main() { kernel=$(uname -r) if ( lib_match && boot_match ) then return $ERR_OK else return $ERR_NG fi } main UVP-Tools-2.2.0.316/bin/GuestOSFeature000066400000000000000000000146301314037446600171470ustar00rootroot00000000000000##################################################################### #first column is GuestOS type name, #second column is the GuestOS support feature bitmap. # #current feature bitmap: #memory hotplug 1 #online detach-disk 2 #vcpu hotplug 4 ##################################################################### #GuestOS Feature #suse suse10_sp1_x86 0 suse10_sp1_x86_64 0 suse10_sp2_x86 0 suse10_sp2_x86_64 0 suse10_sp3_x86 0 suse10_sp3_x86_64 0 suse10_sp4_x86 0 suse10_sp4_x86_64 0 suse11_sp0_x86 6 suse11_sp0_x86_64 7 suse11_sp1_x86 6 suse11_sp1_x86_64 7 suse11_sp2_x86 2 suse11_sp2_x86_64 7 suse11_sp3_x86 2 suse11_sp3_x86_64 2 opensuse_11.3_x86_64 0 opensuse_13.2_x86_64 0 suse10_sp2_customized_x86 0 suse10_sp3_customized_x86_64 0 suse10_sp4_customized_x86_64 0 suse11_sp1_customized_v1_x86_64 0 suse11_sp1_customized_v2_x86_64 0 suse11_sp1_customized_v3_x86_64 0 suse11_sp2_customized_v1_x86_64 0 suse11_sp2_customized_v2_x86_64 0 suse11_sp2_customized_v3_x86_64 0 suse11_sp2_customized_v4_x86_64 0 suse12_x86_64 0 #fedora fedora6_x86 0 fedora6_x86_64 0 fedora8_x86 0 fedora8_x86_64 0 fedora9_x86 0 fedora12_x86 4 fedora14_x86_64 0 fedora22_x86_64 0 fedora23_x86_64 0 #oracle oracle5.7_x86_64 1 oracle5.8_x86 0 oracle5.8_x86_64 0 oracle6.1_x86 0 oracle6.1_x86_64 0 oracle6.2_x86 0 oracle6.2_x86_64 0 oracle6.3_x86 0 oracle6.3_x86_64 0 oracle6.4_x86 0 oracle6.4_x86_64 0 oracle6.5_x86 0 oracle6.5_x86_64 0 #redhat && centos redhat4.2_x86_64 0 redhat4.4_x86 0 redhat4.4_x86_64 0 redhat4.5_x86 0 redhat4.5_x86_64 0 redhat4.6_x86 0 redhat4.6_x86_64 0 redhat4.7_x86 0 redhat4.7_x86_64 0 redhat4.8_x86 0 redhat4.8_x86_64 0 redhat5.0_x86 2 redhat5.0_x86_64 2 redhat5.1_x86 2 redhat5.1_x86_64 2 redhat5.2_x86 2 redhat5.2_x86_64 2 redhat5.3_x86 2 redhat5.3_x86_64 3 redhat5.4_x86 2 redhat5.4_x86_64 3 redhat5.5_x86 2 redhat5.5_x86_64 7 redhat5.6_x86 2 redhat5.6_x86_64 7 redhat5.7_x86 2 redhat5.7_x86_64 3 redhat5.8_x86 2 redhat5.8_x86_64 3 redhat5.9_x86 2 redhat5.9_x86_64 2 redhat6.0_x86 2 redhat6.0_x86_64 7 redhat6.1_x86 2 redhat6.1_x86_64 7 redhat6.2_x86 2 redhat6.2_x86_64 7 redhat6.3_x86 2 redhat6.3_x86_64 7 redhat6.4_x86 2 redhat6.4_x86_64 7 redhat6.5_x86 2 redhat6.5_x86_64 7 redhat7.0_x86_64 0 kylin3.0_x86_64 0 #centos centos5.5_x86_64 3 centos5.6_x86_64 7 centos5.9_x86 2 centos5.9_x86_64 2 centos6.0_x86_64 3 centos6.1_x86_64 3 centos6.2_x86_64 7 centos6.3_x86_64 7 centos6.4_x86_64 3 centos6.5_x86_64 3 #debian debian5.0.10_x86_64 0 debian6_x86 0 debian6_x86_64 0 debian7_x86 0 debian7_x86_64 0 debian8_x86_64 0 #ubuntu ubuntu8.04_x86_64 4 ubuntu8.04.4_x86_64 0 ubuntu9.10_x86 0 ubuntu9.10_x86_64 0 ubuntu10.04_server_x86_64 0 ubuntu10.04_desktop_x86_64 4 ubuntu10.04.1_x86 0 ubuntu10.04.1_x86_64 0 ubuntu10.04.2_x86_64 0 ubuntu10.04.3_x86_64 0 ubuntu10.04.4_x86_64 0 ubuntu10.10_x86_64 0 ubuntu11.04_x86 0 ubuntu11.04_x86_64 0 ubuntu11.10_x86 0 ubuntu11.10_x86_64 0 ubuntu12.04_x86_64 0 ubuntu12.04.1_x86_64 0 ubuntu12.04.2_x86 0 ubuntu12.04.2_x86_64 0 ubuntu12.04.3_x86 0 ubuntu12.04.3_x86_64 0 ubuntu12.04.4_x86 0 ubuntu12.04.4_x86_64 0 ubuntu12.10_x86 0 ubuntu12.10_x86_64 0 ubuntu13.04_x86 0 ubuntu13.04_x86_64 0 ubuntu13.10_x86 0 ubuntu13.10_x86_64 0 ubuntu14.04_x86 0 ubuntu14.04_x86_64 0 ubuntu14.04.1_x86 0 ubuntu14.04.1_x86_64 0 ubuntu10.04.2_customized_x86_64 0 ubuntu14.04.3_x86_64 0 #euleros euleros2.1_x86_64 0 gentoo_customized_x86 0 default_x86 0 default_x86_64 0 #end UVP-Tools-2.2.0.316/bin/get_uvp_kernel_modules000066400000000000000000000413421314037446600210430ustar00rootroot00000000000000#!/bin/bash SYS_TYPE=$1 KERN_RELEASE=$2 CPU_ARCH=$3 MACHINE_TYPE=$4 MODULES_PATH=`pwd`/lib/modules/xvf- GUEST_INFO="default" if [ -f "/etc/SuSE-release" ] && [ ! -f "/etc/EulerLinux.conf" ] then os_version=$(cat /etc/SuSE-release | awk -F" " 'NR==2 {print $3}') os_patchlevel=$(cat /etc/SuSE-release | awk -F" " 'NR==3 {print $3}') ker_rel_sysinfo=$(uname -r | awk -F"-" '{print $3}') OS_DIST_SVER="${SYS_TYPE}${os_version}sp${os_patchlevel}" fi case "$SYS_TYPE" in suse*) if [ "suse10sp1" = "${OS_DIST_SVER}" ]; then #suse10 sp1 KERN_RELEASE="2.6.16.46-0.12-${ker_rel_sysinfo}" SYS_TYPE='SLES10' GUEST_INFO='suse10_sp1' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.29-bigsmp"`" ]; then #suse10 sp2 customized SYS_TYPE='SLES10' GUEST_INFO='suse10_sp2_customized' elif [ "suse10sp2" = "${OS_DIST_SVER}" ]; then #suse10 sp2 KERN_RELEASE="2.6.16.60-0.21-${ker_rel_sysinfo}" SYS_TYPE='SLES10' GUEST_INFO='suse10_sp2' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.16.60-0.74.7"`" ] && [ "x86_64" = "${CPU_ARCH}" ] && [ "smp" = "${ker_rel_sysinfo}" ]; then #suse10 sp3 customized KERN_RELEASE="2.6.16.60-0.54.5-smp" SYS_TYPE='SLES10' GUEST_INFO='suse10_sp3_customized' elif [ "suse10sp3" = "${OS_DIST_SVER}" ]; then #suse10 sp3 KERN_RELEASE="2.6.16.60-0.54.5-${ker_rel_sysinfo}" SYS_TYPE='SLES10' GUEST_INFO='suse10_sp3' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.16.60-0.99.1"`" ] && [ "x86_64" = "${CPU_ARCH}" ] && [ "smp" = "${ker_rel_sysinfo}" ]; then #suse10 sp4 customized KERN_RELEASE="2.6.16.60-0.85.1-smp" SYS_TYPE='SLES10' GUEST_INFO='suse10_sp4_customized' elif [ "suse10sp4" = "${OS_DIST_SVER}" ]; then #suse10 sp4 KERN_RELEASE="2.6.16.60-0.85.1-${ker_rel_sysinfo}" SYS_TYPE='SLES10' GUEST_INFO='suse10_sp4' elif [ "suse11sp0" = "${OS_DIST_SVER}" ]; then #suse11 sp0 KERN_RELEASE="2.6.27.19-5-${ker_rel_sysinfo}" SYS_TYPE='SLES11' GUEST_INFO='suse11_sp0' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32.54-0.3"`" ] && [ "x86_64" = "${CPU_ARCH}" ]; then # suse11sp1_customized_v1 SYS_TYPE='SLES11' GUEST_INFO='suse11_sp1_customized_v1' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32.59-0.3"`" ] && [ "x86_64" = "${CPU_ARCH}" ]; then # suse11sp1_customized_v2 KERN_RELEASE="2.6.32.59-0.3-default" SYS_TYPE='SLES11' GUEST_INFO='suse11_sp1_customized_v2' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32.59-0.7"`" ] && [ "x86_64" = "${CPU_ARCH}" ]; then # suse11sp1_customized_v3 KERN_RELEASE="2.6.32.59-0.7-default" SYS_TYPE='SLES11' GUEST_INFO='suse11_sp1_customized_v3' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32.59-0.17"`" ] && [ "x86_64" = "${CPU_ARCH}" ]; then # suse11sp1_customized_v3 KERN_RELEASE="2.6.32.59-0.7-default" SYS_TYPE='SLES11' GUEST_INFO='suse11_sp1_customized_v3' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32.59-0.19"`" ] && [ "x86_64" = "${CPU_ARCH}" ]; then # suse11sp1_customized_v3 KERN_RELEASE="2.6.32.59-0.7-default" SYS_TYPE='SLES11' GUEST_INFO='suse11_sp1_customized_v3' elif [ "suse11sp1" = "${OS_DIST_SVER}" ]; then #suse11 sp1 KERN_RELEASE="2.6.32.12-0.7-${ker_rel_sysinfo}" SYS_TYPE='SLES11' GUEST_INFO='suse11_sp1' elif [ -n "`echo ${KERN_RELEASE} | grep "3.0.58"`" ] && [ "x86_64" = "${CPU_ARCH}" ]; then # suse11 sp2 customized v1 v2 SYS_TYPE='SLES11' if [ -n "`echo ${KERN_RELEASE} | grep "0.6.2"`" ]; then GUEST_INFO='suse11_sp2_customized_v1' elif [ -n "`echo ${KERN_RELEASE} | grep "0.6.6"`" ]; then GUEST_INFO='suse11_sp2_customized_v2' fi elif [ -n "`echo ${KERN_RELEASE} | grep "3.0.80"`" ] && [ "x86_64" = "${CPU_ARCH}" ]; then # suse11 sp2 customized v3 SYS_TYPE='SLES11' GUEST_INFO='suse11_sp2_customized_v3' elif [ -n "`echo ${KERN_RELEASE} | grep "3.0.93-0.5"`" ]; then # suse11 sp2 customized v4 KERN_RELEASE="3.0.13-0.27-default" SYS_TYPE='SLES11' GUEST_INFO='suse11_sp2_customized_v4' elif [ "suse11sp2" = "${OS_DIST_SVER}" ];then # suse11 sp2 KERN_RELEASE="3.0.13-0.27-${ker_rel_sysinfo}" SYS_TYPE='SLES11' GUEST_INFO='suse11_sp2' elif [ "suse11sp3" = "${OS_DIST_SVER}" ]; then # suse11 sp3 KERN_RELEASE="3.0.76-0.11-${ker_rel_sysinfo}" SYS_TYPE='SLES11' GUEST_INFO='suse11_sp3' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.34"`" ]; then #OpenSUSE 11.3 SYS_TYPE='openSUSE11.3' GUEST_INFO='opensuse_11.3' elif [ -n "`echo ${KERN_RELEASE} | grep "3.16.6-2"`" ]; then #OpenSUSE 13.2 SYS_TYPE='openSUSE13.2' GUEST_INFO='opensuse_13.2' elif [ -n "`echo ${KERN_RELEASE} | grep "3.12.28"`" ]; then SYS_TYPE='SLES12' GUEST_INFO='suse12' fi ;; redhat*) dist_arch="`uname -r | awk -F"." '{print $4}'`" dist_bit="`uname -m`" if [ "CentOS" = "$(cat /etc/redhat-release | awk -F" " '{print $1}')" ] then sys_ver=$(cat /etc/redhat-release | awk -F" " '{print $3}') fi if [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-1.2798"`" ]; then #Fedora 6 SYS_TYPE='FEDORA6' if [ "i686" == ${dist_bit} -a -f "/boot/config-${KERN_RELEASE}" -a -n "`cat /boot/config-${KERN_RELEASE} | grep M586=y`" ];then KERN_RELEASE="2.6.18-1.2798.fc6.i586" fi GUEST_INFO='fedora6' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.23.1-42.fc8"`" ]; then SYS_TYPE='FEDORA8' GUEST_INFO='fedora8' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.25"`" ]; then #Fedora 9 SYS_TYPE='FEDORA9' GUEST_INFO='fedora9' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.31.5"`" ]; then #Fedora 12 SYS_TYPE='FEDORA12' GUEST_INFO='fedora12' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.35.6"`" ]; then #Fedora 14 SYS_TYPE='FEDORA14' GUEST_INFO='fedora14' elif [ -n "`echo ${KERN_RELEASE} | grep "4.0.4-301"`" ]; then #Fedora22_64 SYS_TYPE='FEDORA22' GUEST_INFO='fedora22' elif [ -n "`echo ${KERN_RELEASE} | grep "4.2.3-300"`" ]; then #Fedora23_64 SYS_TYPE='FEDORA23' GUEST_INFO='fedora23' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.9-22"`" ]; then #redhat 4.2 SYS_TYPE='RHEL4' GUEST_INFO='redhat4.2' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.9" | grep "AXS"`" ]; then #red flag asianux 2sp4 SYS_TYPE='RHEL4' KERN_RELEASE="2.6.9-89.${dist_arch}" GUEST_INFO='redflag2_sp4' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.9"`" ]; then #redhat 4.4 4.5 4.6 4.7 4.8 if [ -n "`echo ${KERN_RELEASE} | grep "2.6.9-42"`" ]; then GUEST_INFO='redhat4.4' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.9-55"`" ]; then GUEST_INFO='redhat4.5' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.9-67"`" ]; then GUEST_INFO='redhat4.6' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.9-78"`" ]; then GUEST_INFO='redhat4.7' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.9-89"`" ]; then GUEST_INFO='redhat4.8' fi SYS_TYPE='RHEL4' KERN_RELEASE="2.6.9-42.${dist_arch}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-SKL1.9.4.ky3.173.4.1"`" ]; then #中标麒麟 3.0 SYS_TYPE='RHEL5' GUEST_INFO='kylin3.0' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18.*.AXS"`" ]; then #Red Flag 3 sp2 sp3 if [ -n "`echo ${KERN_RELEASE} | grep "128.7"`" ]; then GUEST_INFO='redflag3_sp2' elif [ -n "`echo ${KERN_RELEASE} | grep "194.1"`" ]; then GUEST_INFO='redflag3_sp3' fi SYS_TYPE='RHEL5' KERN_RELEASE="2.6.18-8.el5" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-194"`" ]; then if [ "i686" == ${dist_bit} ]; then #redhat 5.3 SYS_TYPE='RHEL5' KERN_RELEASE="2.6.18-128.${dist_arch}" else #redhat 5.5 SYS_TYPE='RHEL5' KERN_RELEASE="2.6.18-194.${dist_arch}" fi if [ "5.5" = "${sys_ver}" ] then GUEST_INFO='centos5.5' else GUEST_INFO='redhat5.5' fi elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-[0-9]\{1,2\}\."`" ]; then #redhat 5.0 5.1 5.2 if [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-8"`" ]; then GUEST_INFO='redhat5.0' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-53"`" ]; then GUEST_INFO='redhat5.1' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-92"`" ]; then GUEST_INFO='redhat5.2' fi SYS_TYPE='RHEL5' KERN_RELEASE="2.6.18-8.${dist_arch}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-[0-9]\{3\}\."`" ]; then #redhat 5.3 5.4 5.6 5.7 5.8 5.9 if [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-128"`" ]; then GUEST_INFO='redhat5.3' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-164"`" ]; then GUEST_INFO='redhat5.4' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-238"`" ]; then if [ "5.6" = "${sys_ver}" ] then GUEST_INFO='centos5.6' else GUEST_INFO='redhat5.6' fi elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-274"`" ]; then GUEST_INFO='redhat5.7' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-308"`" ]; then GUEST_INFO='redhat5.8' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-348"`" ]; then if [ "5.9" = "${sys_ver}" ] then GUEST_INFO='centos5.9' else GUEST_INFO='redhat5.9' fi fi SYS_TYPE='RHEL5' if [ -n "`uname -r | grep el5PAE`" ]; then KERN_RELEASE="2.6.18-128.el5PAE" else KERN_RELEASE="2.6.18-128.el5" fi elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.39-200.24.1.el6uek"`" ]; then #Oracle 6.3 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.39-200.24.1.el6uek.${dist_bit}" GUEST_INFO='oracle6.3' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-300.10.1.el5uek"`" ]; then #Oracle 5.8 SYS_TYPE='RHEL5' KERN_RELEASE="2.6.32-300.10.1.el5uek" GUEST_INFO='oracle5.8' elif [ -n "`echo ${KERN_RELEASE} | grep "el5uek"`" ]; then #Oracle 5.7 5.8 if [ -n "`echo ${KERN_RELEASE} | grep "200.13.1"`" ]; then GUEST_INFO='oracle5.7' elif [ -n "`echo ${KERN_RELEASE} | grep "300.10.1"`" ]; then GUEST_INFO='oracle5.8' fi SYS_TYPE='RHEL5' KERN_RELEASE="2.6.32-200.13.1.el5uek" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-300.3.1"`" ]; then #Oracle 6.2 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-300.3.1.el6uek.${dist_bit}" GUEST_INFO='oracle6.2' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.39-400.17.1.el6uek"`" ]; then #Oracle 6.4 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.39-400.17.1.el6uek.${dist_bit}" GUEST_INFO='oracle6.4' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.39-400.211.1.el6uek"`" ]; then #Oracle 6.5 x86 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.39-400.211.1.el6uek.${dist_bit}" GUEST_INFO='oracle6.5' elif [ -n "`echo ${KERN_RELEASE} | grep "3.8.13-16.2.1.el6uek"`" ]; then ##Oracle 6.5 x86_64 SYS_TYPE='RHEL6' KERN_RELEASE="3.8.13-16.2.1.el6uek.${dist_bit}" GUEST_INFO='oracle6.5' elif [ -n "`echo ${KERN_RELEASE} | grep "el6uek"`" ]; then #Oracle 6.1 6.4 if [ -n "`echo ${KERN_RELEASE} | grep "100.34.1"`" ]; then GUEST_INFO='oracle6.1' elif [ -n "`echo ${KERN_RELEASE} | grep "400.17.1"`" ]; then GUEST_INFO='oracle6.4' fi SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-100.34.1.el6uek.${dist_bit}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-71"`" ]; then #redhat 6.0 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-71.el6.${dist_bit}" if [ "6.0" = "${sys_ver}" ] then GUEST_INFO='centos6.0' else GUEST_INFO='redhat6.0' fi elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-279.2.1.el6.i686"`" ]; then #RedFlag 4 sp2 x86 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-279.el6.i686" GUEST_INFO='redflag4_sp2' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-131.0.15"`" ]; then #redhat 6.1 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-131.0.15.el6.${dist_bit}" if [ "6.1" = "${sys_ver}" ] then GUEST_INFO='centos6.1' else GUEST_INFO='redhat6.1' fi elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-220"`" ]; then #redhat 6.2 中标麒麟 6.0 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-220.el6.${dist_bit}" if [ "6.2" = "${sys_ver}" ] then GUEST_INFO='centos6.2' else GUEST_INFO='redhat6.2' fi elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-279"`" ]; then #redhat 6.3 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-279.el6.${dist_bit}" if [ "6.3" = "${sys_ver}" ] then GUEST_INFO='centos6.3' else GUEST_INFO='redhat6.3' fi elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-358"`" ]; then #redhat 6.4 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-358.el6.${dist_bit}" if [ "6.4" = "${sys_ver}" ] then GUEST_INFO='centos6.4' else GUEST_INFO='redhat6.4' fi elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32"`" ]; then #redhat 6.5 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-131.0.15.el6.${dist_bit}" if [ "6.5" = "${sys_ver}" ] then GUEST_INFO='centos6.5' else GUEST_INFO='redhat6.5' fi elif [ -f /etc/euleros-release -a -n "`echo ${KERN_RELEASE} | grep "3.10.0-229"`" ]; then #euleros2.1 SYS_TYPE='RHEL7' KERN_RELEASE="3.10.0-123.el7.${dist_bit}" GUEST_INFO='euleros2.1' elif [ -n "`echo ${KERN_RELEASE} | grep "3.10.0"`" ]; then #redhat 7.0 SYS_TYPE='RHEL7' KERN_RELEASE="3.10.0-123.el7.${dist_bit}" GUEST_INFO='redhat7.0' fi ;; debian*) if [ -n "`echo ${KERN_RELEASE} | grep "2.6.26-2"`" ]; then #Debian 5.0.10 SYS_TYPE="Debian5" GUEST_INFO='debian5.0.10' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-5"`" ]; then #Debian 6 SYS_TYPE="Debian6" GUEST_INFO='debian6' elif [ -n "`echo ${KERN_RELEASE} | grep "3.2.0-4"`" ]; then #Debian 7 SYS_TYPE="Debian7" GUEST_INFO='debian7' elif [ -n "`echo ${KERN_RELEASE} | grep "3.16.0-4"`" ]; then #Debian 8.2 SYS_TYPE="Debian8" GUEST_INFO='debian8' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.24"`" ]; then #Ubuntu 8.04 SYS_TYPE="Ubuntu8" if [ -n "`echo ${KERN_RELEASE} | grep "2.6.24-16"`" ]; then GUEST_INFO='ubuntu8.04' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.24-26"`" ]; then GUEST_INFO='ubuntu8.04.4' fi elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.31-14"`" ]; then #Ubuntu 9.10 SYS_TYPE="Ubuntu9" GUEST_INFO='ubuntu9.10' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.38-16-generic"`" ]; then #Ubuntu 10.04.2 customized SYS_TYPE="Ubuntu10" GUEST_INFO='ubuntu10.04.2_customized' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32"`" ]; then #Ubuntu 10.04 and 10.04.1 and 10.04.2 and 10.04.3 SYS_TYPE="Ubuntu10" if [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-21"`" ]; then GUEST_INFO='ubuntu10.04_server' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-38"`" ]; then GUEST_INFO='ubuntu10.04_desktop' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-24"`" ]; then GUEST_INFO='ubuntu10.04.1' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-28"`" ]; then GUEST_INFO='ubuntu10.04.2' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-33"`" ]; then GUEST_INFO='ubuntu10.04.3' fi elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.35"`" ]; then #Ubuntu 10.10 SYS_TYPE="Ubuntu10" GUEST_INFO='ubuntu10.10' elif [ -n "`echo ${KERN_RELEASE} | grep "3.0.0-15"`" ]; then #Ubuntu 10.04.4 SYS_TYPE="Ubuntu10" GUEST_INFO='ubuntu10.04.4' elif [ -n "`echo ${KERN_RELEASE} | grep "3.0.0"`" ]; then #Ubuntu 11.10 SYS_TYPE="Ubuntu11" GUEST_INFO='ubuntu11.10' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.38-8"`" ]; then #Ubuntu 11.04 SYS_TYPE="Ubuntu11" GUEST_INFO='ubuntu11.04' elif [ -n "`echo ${KERN_RELEASE} | grep "3.2.0"`" ]; then #Ubuntu 12.04 and 12.04.1 SYS_TYPE="Ubuntu12" if [ -n "`echo ${KERN_RELEASE} | grep "3.2.0-23"`" ]; then GUEST_INFO='ubuntu12.04' elif [ -n "`echo ${KERN_RELEASE} | grep "3.2.0-29"`" ]; then GUEST_INFO='ubuntu12.04.1' fi elif [ -n "`echo ${KERN_RELEASE} | grep "3.5.0"`" ]; then #Ubuntu 12.04.2 and 12.10 SYS_TYPE="Ubuntu12" if [ -n "`echo ${KERN_RELEASE} | grep "3.5.0-23"`" ]; then GUEST_INFO='ubuntu12.04.2' elif [ -n "`echo ${KERN_RELEASE} | grep "3.5.0-17"`" ]; then GUEST_INFO='ubuntu12.10' fi elif [ -n "`echo ${KERN_RELEASE} | grep "3.8.0-29"`" ]; then #Ubuntu 12.04.3 SYS_TYPE="Ubuntu12" GUEST_INFO='ubuntu12.04.3' elif [ -n "`echo ${KERN_RELEASE} | grep "3.11.0-15"`" ]; then #Ubuntu 12.04.4 SYS_TYPE="Ubuntu12" GUEST_INFO='ubuntu12.04.4' elif [ -n "`echo ${KERN_RELEASE} | grep "3.8.0"`" ]; then #Ubuntu 13.04 SYS_TYPE="Ubuntu13" GUEST_INFO='ubuntu13.04' elif [ -n "`echo ${KERN_RELEASE} | grep "3.11.0-12"`" ]; then #Ubuntu 13.10 SYS_TYPE="Ubuntu13" GUEST_INFO='ubuntu13.10' elif [ -n "`echo ${KERN_RELEASE} | grep "3.13.0-24"`" ]; then #Ubuntu 14.04 SYS_TYPE="Ubuntu14" GUEST_INFO='ubuntu14.04' elif [ -n "`echo ${KERN_RELEASE} | grep "3.13.0-32"`" ]; then SYS_TYPE="Ubuntu14" GUEST_INFO='ubuntu14.04.1' elif [ -n "`echo ${KERN_RELEASE} | grep "3.19.0-25"`" ]; then #Ubuntu 14.04.3 SYS_TYPE="Ubuntu14" GUEST_INFO='ubuntu14.04.3' fi ;; gentoo*) if [ -n "`echo ${KERN_RELEASE} | grep -w "3.9.0"`" ]; then #Gentoo customized SYS_TYPE="Gentoo" GUEST_INFO='gentoo_customized' fi ;; esac cat > "./CurrentOS" << EOF ${GUEST_INFO}_${MACHINE_TYPE} EOF echo "$MODULES_PATH$KERN_RELEASE-$CPU_ARCH-$SYS_TYPE" UVP-Tools-2.2.0.316/bin/mem_online.sh000066400000000000000000000011141314037446600170260ustar00rootroot00000000000000#!/bin/bash MEM_DEVTREE_PATH="/sys/devices/system/memory" if [ ! -d ${MEM_DEVTREE_PATH} ] then echo "cannot access ${MEM_DEVTREE_PATH}: No such file or directory" exit 1 fi for mem_index in ${MEM_DEVTREE_PATH}/memory* do state=$(grep offline ${mem_index}/state) if [ -n "$state" ] then echo online > ${mem_index}/state echo "${mem_index}/state change to online." fi done allonline=$(grep offline ${MEM_DEVTREE_PATH}/*/state) if [ -z "$allonline" ] then echo "All hotplug memories have online." else echo "There are memories offline, please execute the script again." fi UVP-Tools-2.2.0.316/bin/modify_swappiness.sh000066400000000000000000000012461314037446600204550ustar00rootroot00000000000000#!/bin/bash log() { local level="$1" shift logger -p "daemon.$level" -- "$0:" "$@" || echo "$0 $@" >&2 & } if [ "$1" = "modify" ] then old=`cat /proc/sys/vm/swappiness` if [ "$old" = "1" ] then exit 0 fi cat /proc/sys/vm/swappiness > /etc/.uvp-monitor/swappiness_old echo 1 > /proc/sys/vm/swappiness new=`cat /proc/sys/vm/swappiness` log info "modify in suspend,swappiness is $new" else if [ ! -e "/etc/.uvp-monitor/swappiness_old" ] then exit 0 fi old=`cat /etc/.uvp-monitor/swappiness_old` echo $old > /proc/sys/vm/swappiness rm -rf /etc/.uvp-monitor/swappiness_old old=`cat /proc/sys/vm/swappiness` log info "restore in resume,swappiness is $old" fi UVP-Tools-2.2.0.316/bin/offline_copy_uvp_module000066400000000000000000000042551314037446600212170ustar00rootroot00000000000000#!/bin/bash CPUARCH='' UVPTOOLSPATH=`pwd` SYS_TYPE='' INSTALLER_DIR=${UVPTOOLSPATH} KERLIBMOD='' VMTOOLRVFPATH='' LCL_UVP_MODULES_PATH='' get_linux_cpu_arch() { if [ ! -f "/sbin/depmod" ] then echo "Can not determine linux cpu arch." exit 1 fi if [ -n "$(file /sbin/depmod | grep "64-bit")" -o -n "$(file /bin/kmod | grep "64-bit")" ] then CPUARCH="x86_64" elif [ -n "$(file /sbin/depmod | grep "32-bit")" -o -n "$(file /bin/kmod | grep "32-bit")" ] then CPUARCH="i686" else echo "Can not determine linux cpu arch 32 or 64 bit." exit 1 fi } get_linux_dist() { issue='/etc/issue' if [ -e '/etc/debian_version' -o -n "$(grep -i 'debian' $issue)" ] then SYS_TYPE='debian' elif [ -e '/etc/redhat-release' -o -n "$(grep -i 'red *hat' $issue)" ] then SYS_TYPE='redhat' elif [ -e '/etc/SuSE-release' -o -n "$(grep -i 'suse\|s\.u\.s\.e' $issue)" ] then SYS_TYPE='suse' else echo "cannot determine linux distribution" exit 1 fi } match_patchkernel_version() { KERN_RELEASE=$1 get_uvp_kernel_modules_bin="${INSTALLER_DIR}/bin/offline/offline_get_uvp_kernel_modules" LCL_UVP_MODULES_PATH=$(${get_uvp_kernel_modules_bin} "$SYS_TYPE" "$KERN_RELEASE" "$CPUARCH") echo $LCL_UVP_MODULES_PATH } kernel_libmodule_path() { kernelversion=$1 KERLIBMOD="/lib/modules/$kernelversion/updates/pvdriver" if [ ! -d ${KERLIBMOD} ];then mkdir -p ${KERLIBMOD} fi } copy_xvf_module() { UvptoolsModuleRvfPath=$1 UVPTOOLRVFPATH="${INSTALLER_DIR}/lib/modules/$UvptoolsModuleRvfPath" echo $UVPTOOLRVFPATH if [ -d "${UVPTOOLRVFPATH}" ];then cp -rf $UVPTOOLRVFPATH/* $KERLIBMOD else echo "Unsupported Linux Version." exit 1 fi } find_module() { get_linux_cpu_arch get_linux_dist for kernelversion in `ls /lib/modules/` do illegal_kern="`echo ${kernelversion} | awk -F"." '{print $1}'`" if [ "${illegal_kern}" = ^[0-9] ];then echo "illegal kernel" continue fi match_patchkernel_version ${kernelversion} VmtoolsModuleRvfPath="${LCL_UVP_MODULES_PATH}" echo $VmtoolsModuleRvfPath kernel_libmodule_path ${kernelversion} copy_xvf_module ${LCL_UVP_MODULES_PATH} for mod in $(find $KERLIBMOD -type f -name "*.ko") do echo $mod done done } find_module UVP-Tools-2.2.0.316/bin/offline_get_uvp_kernel_modules000066400000000000000000000244461314037446600225530ustar00rootroot00000000000000#!/bin/bash SYS_TYPE=$1 KERN_RELEASE=$2 CPU_ARCH=$3 MODULES_PATH=xvf- if [ -f "/etc/SuSE-release" ] && [ ! -f "/etc/EulerLinux.conf" ] then os_version=$(cat /etc/SuSE-release | awk -F" " 'NR==2 {print $3}') os_patchlevel=$(cat /etc/SuSE-release | awk -F" " 'NR==3 {print $3}') ker_rel_sysinfo=$(echo ${KERN_RELEASE} | awk -F"-" '{print $3}') OS_DIST_SVER="${SYS_TYPE}${os_version}sp${os_patchlevel}" fi case "$SYS_TYPE" in suse*) if [ "suse10sp1" = "${OS_DIST_SVER}" ]; then #suse10 sp1 KERN_RELEASE="2.6.16.46-0.12-${ker_rel_sysinfo}" SYS_TYPE='SLES10' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.29-bigsmp"`" ]; then #suse10 sp2 customized SYS_TYPE='SLES10' elif [ "suse10sp2" = "${OS_DIST_SVER}" ]; then #suse10 sp2 KERN_RELEASE="2.6.16.60-0.21-${ker_rel_sysinfo}" SYS_TYPE='SLES10' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.16.60-0.74.7"`" ] && [ "x86_64" = "${CPU_ARCH}" ] && [ "smp" = "${ker_rel_sysinfo}" ]; then #suse10 sp3 customized KERN_RELEASE="2.6.16.60-0.54.5-smp" SYS_TYPE='SLES10' elif [ "suse10sp3" = "${OS_DIST_SVER}" ]; then #suse10 sp3 KERN_RELEASE="2.6.16.60-0.54.5-${ker_rel_sysinfo}" SYS_TYPE='SLES10' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.16.60-0.99.1"`" ] && [ "x86_64" = "${CPU_ARCH}" ] && [ "smp" = "${ker_rel_sysinfo}" ]; then #suse10 sp4 customized KERN_RELEASE="2.6.16.60-0.85.1-smp" SYS_TYPE='SLES10' elif [ "suse10sp4" = "${OS_DIST_SVER}" ]; then #suse10 sp4 KERN_RELEASE="2.6.16.60-0.85.1-${ker_rel_sysinfo}" SYS_TYPE='SLES10' elif [ "suse11sp0" = "${OS_DIST_SVER}" ]; then #suse11 sp0 KERN_RELEASE="2.6.27.19-5-${ker_rel_sysinfo}" SYS_TYPE='SLES11' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32.54-0.3"`" ] && [ "x86_64" = "${CPU_ARCH}" ]; then # suse11sp1_customized_v1 SYS_TYPE='SLES11' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32.59-0.3"`" ] && [ "x86_64" = "${CPU_ARCH}" ]; then # suse11sp1_customized_v2 SYS_TYPE='SLES11' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32.59-0.7"`" ] && [ "x86_64" = "${CPU_ARCH}" ]; then # suse11sp1_customized_v3 SYS_TYPE='SLES11' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32.59-0.17"`" ] && [ "x86_64" = "${CPU_ARCH}" ]; then # suse11sp1_customized_v3 KERN_RELEASE="2.6.32.59-0.7-default" SYS_TYPE='SLES11' elif [ "suse11sp1" = "${OS_DIST_SVER}" ]; then #suse11 sp1 KERN_RELEASE="2.6.32.12-0.7-${ker_rel_sysinfo}" SYS_TYPE='SLES11' elif [ -n "`echo ${KERN_RELEASE} | grep "3.0.58"`" ] && [ "x86_64" = "${CPU_ARCH}" ]; then # suse11 sp2 customized v1 v2 SYS_TYPE='SLES11' elif [ -n "`echo ${KERN_RELEASE} | grep "3.0.80"`" ] && [ "x86_64" = "${CPU_ARCH}" ]; then # suse11 sp2 customized v3 SYS_TYPE='SLES11' elif [ -n "`echo ${KERN_RELEASE} | grep "3.0.93-0.5"`" ]; then # suse11 sp2 customized v4 KERN_RELEASE="3.0.13-0.27-default" SYS_TYPE='SLES11' elif [ "suse11sp2" = "${OS_DIST_SVER}" ];then # suse11 sp2 KERN_RELEASE="3.0.13-0.27-${ker_rel_sysinfo}" SYS_TYPE='SLES11' elif [ "suse11sp3" = "${OS_DIST_SVER}" ]; then # suse11 sp3 KERN_RELEASE="3.0.76-0.11-${ker_rel_sysinfo}" SYS_TYPE='SLES11' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.34"`" ]; then #OpenSUSE 11.3 SYS_TYPE='openSUSE11.3' elif [ -n "`echo ${KERN_RELEASE} | grep "3.16.6"`" ]; then #OpenSUSE 13.2 SYS_TYPE='openSUSE13.2' fi ;; redhat*) dist_arch="`echo ${KERN_RELEASE} | awk -F"." '{print $4}'`" dist_bit="${CPU_ARCH}" if [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-1.2798"`" ]; then #Fedora 6 SYS_TYPE='FEDORA6' if [ "i686" == ${dist_bit} -a -f "/boot/config-${KERN_RELEASE}" -a -n "`cat /boot/config-${KERN_RELEASE} | grep M586=y`" ];then KERN_RELEASE="2.6.18-1.2798.fc6.i586" fi elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.23.1-42.fc8"`" ]; then SYS_TYPE='FEDORA8' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.25"`" ]; then #Fedora 9 SYS_TYPE='FEDORA9' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.31.5"`" ]; then #Fedora 12 SYS_TYPE='FEDORA12' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.35.6"`" ]; then #Fedora 14 SYS_TYPE='FEDORA14' elif [ -n "`echo ${KERN_RELEASE} | grep "4.0.4-301"`" ]; then #Fedora22_64 SYS_TYPE='FEDORA22' elif [ -n "`echo ${KERN_RELEASE} | grep "4.2.3-300"`" ]; then #Fedora23_64 SYS_TYPE='FEDORA23' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.9-22"`" ]; then #redhat 4.2 SYS_TYPE='RHEL4' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.9" | grep "AXS"`" ]; then #red flag asianux 2sp4 SYS_TYPE='RHEL4' KERN_RELEASE="2.6.9-89.${dist_arch}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.9"`" ]; then #redhat 4.4 4.5 4.6 4.7 4.8 SYS_TYPE='RHEL4' KERN_RELEASE="2.6.9-42.${dist_arch}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-SKL1.9.4.ky3.173.4.1"`" ]; then #中标麒麟 3.0 SYS_TYPE='RHEL5' elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18.*.AXS"`" ]; then #Red Flag 3 sp2 sp3 SYS_TYPE='RHEL5' KERN_RELEASE="2.6.18-8.el5" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-194"`" ]; then if [ "i686" == ${dist_bit} ]; then #redhat 5.3 SYS_TYPE='RHEL5' KERN_RELEASE="2.6.18-128.${dist_arch}" else #redhat 5.5 SYS_TYPE='RHEL5' KERN_RELEASE="2.6.18-194.${dist_arch}" fi elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-[0-9]\{1,2\}\."`" ]; then #redhat 5.0 5.1 5.2 SYS_TYPE='RHEL5' KERN_RELEASE="2.6.18-8.${dist_arch}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.18-[0-9]\{3\}\."`" ]; then #redhat 5.3 5.4 5.6 5.7 5.8 SYS_TYPE='RHEL5' if [ -n "`echo ${KERN_RELEASE} | grep "el5PAE"`" ]; then KERN_RELEASE="2.6.18-128.el5PAE" else KERN_RELEASE="2.6.18-128.el5" fi elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.39-200.24.1.el6uek"`" ]; then #Oracle 6.3 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.39-200.24.1.el6uek.${dist_bit}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-300.10.1.el5uek"`" ]; then #Oracle 5.8 SYS_TYPE='RHEL5' KERN_RELEASE="2.6.32-300.10.1.el5uek" elif [ -n "`echo ${KERN_RELEASE} | grep "el5uek"`" ]; then #Oracle 5.7 5.8 SYS_TYPE='RHEL5' KERN_RELEASE="2.6.32-200.13.1.el5uek" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-300.3.1"`" ]; then #Oracle 6.2 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-300.3.1.el6uek.${dist_bit}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.39-400.17.1.el6uek"`" ]; then #Oracle 6.4 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.39-400.17.1.el6uek.${dist_bit}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.39-400.211.1.el6uek"`" ]; then #Oracle 6.5 x86 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.39-400.211.1.el6uek.${dist_bit}" elif [ -n "`echo ${KERN_RELEASE} | grep "3.8.13-16.2.1.el6uek"`" ]; then ##Oracle 6.5 x86_64 SYS_TYPE='RHEL6' KERN_RELEASE="3.8.13-16.2.1.el6uek.${dist_bit}" elif [ -n "`echo ${KERN_RELEASE} | grep "el6uek"`" ]; then #Oracle 6.1 6.4 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-100.34.1.el6uek.${dist_bit}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-71"`" ]; then #redhat 6.0 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-71.el6.${dist_bit}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-279.2.1.el6.i686"`" ]; then #RedFlag 4 sp2 x86 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-279.el6.i686" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-131.0.15"`" ]; then #redhat 6.1 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-131.0.15.el6.${dist_bit}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-220"`" ]; then #redhat 6.2 中标麒麟 6.0 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-220.el6.${dist_bit}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-279"`" ]; then #redhat 6.3 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-279.el6.${dist_bit}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-358"`" ]; then #redhat 6.4 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-358.el6.${dist_bit}" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32"`" ]; then #redhat 6.5 SYS_TYPE='RHEL6' KERN_RELEASE="2.6.32-131.0.15.el6.${dist_bit}" elif [ -n "`echo ${KERN_RELEASE} | grep "3.10.0"`" ]; then #redhat 7.0 SYS_TYPE='RHEL7' KERN_RELEASE="3.10.0-123.el7.${dist_bit}" fi ;; debian*) if [ -n "`echo ${KERN_RELEASE} | grep "2.6.26-2"`" ]; then #Debian 5.0.10 SYS_TYPE="Debian5" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32-5"`" ]; then #Debian 6 SYS_TYPE="Debian6" elif [ -n "`echo ${KERN_RELEASE} | grep "3.2.0-4"`" ]; then #Debian 7 SYS_TYPE="Debian7" elif [ -n "`echo ${KERN_RELEASE} | grep "3.16.0-4"`" ]; then #Debian 8.2 SYS_TYPE="Debian8" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.24"`" ]; then #Ubuntu 8.04 SYS_TYPE="Ubuntu8" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.31-14"`" ]; then #Ubuntu 9.10 SYS_TYPE="Ubuntu9" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.38-16-generic"`" ]; then #Ubuntu 10.04.2 customized SYS_TYPE="Ubuntu10" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.32"`" ]; then #Ubuntu 10.04 and 10.04.1 and 10.04.2 and 10.04.3 SYS_TYPE="Ubuntu10" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.35"`" ]; then #Ubuntu 10.10 SYS_TYPE="Ubuntu10" elif [ -n "`echo ${KERN_RELEASE} | grep "3.0.0-15"`" ]; then #Ubuntu 10.04.4 SYS_TYPE="Ubuntu10" elif [ -n "`echo ${KERN_RELEASE} | grep "3.0.0"`" ]; then #Ubuntu 11.10 SYS_TYPE="Ubuntu11" elif [ -n "`echo ${KERN_RELEASE} | grep "2.6.38-8"`" ]; then #Ubuntu 11.04 SYS_TYPE="Ubuntu11" elif [ -n "`echo ${KERN_RELEASE} | grep "3.2.0"`" ]; then #Ubuntu 12.04 and 12.04.1 SYS_TYPE="Ubuntu12" elif [ -n "`echo ${KERN_RELEASE} | grep "3.5.0"`" ]; then #Ubuntu 12.04.2 and 12.10 SYS_TYPE="Ubuntu12" elif [ -n "`echo ${KERN_RELEASE} | grep "3.8.0-29"`" ]; then #Ubuntu 12.04.3 SYS_TYPE="Ubuntu12" elif [ -n "`echo ${KERN_RELEASE} | grep "3.11.0-15"`" ]; then #Ubuntu 12.04.4 SYS_TYPE="Ubuntu12" elif [ -n "`echo ${KERN_RELEASE} | grep "3.8.0"`" ]; then #Ubuntu 13.04 SYS_TYPE="Ubuntu13" elif [ -n "`echo ${KERN_RELEASE} | grep "3.11.0-12"`" ]; then #Ubuntu 13.10 SYS_TYPE="Ubuntu13" elif [ -n "`echo ${KERN_RELEASE} | grep "3.13.0-24"`" ]; then #Ubuntu 14.04 SYS_TYPE="Ubuntu14" elif [ -n "`echo ${KERN_RELEASE} | grep "3.13.0-32"`" ]; then SYS_TYPE="Ubuntu14" elif [ -n "`echo ${KERN_RELEASE} | grep "3.19.0-25"`" ]; then #Ubuntu 14.04.3 SYS_TYPE="Ubuntu14" fi ;; gentoo*) if [ -n "`echo ${KERN_RELEASE} | grep -w "3.9.0"`" ]; then #Gentoo customized SYS_TYPE="Gentoo" fi ;; esac echo "$MODULES_PATH$KERN_RELEASE-$CPU_ARCH-$SYS_TYPE" UVP-Tools-2.2.0.316/bin/pvumount.sh000066400000000000000000000036531314037446600166130ustar00rootroot00000000000000#!/bin/bash devname=$1 flag=0 Info() { echo "`date` $1" >>/var/log/unplug.log } Info "begin to umount ${devname}" if [ -z ${devname} ]; then exit 1 fi #TODO:xvdaa xvdab parts=`mount |grep /dev/${devname} |awk '{print $1}'` for part in ${parts} do count=0 while [ 1 ] do count=`expr ${count} + 1` fuser -k -m ${part} ret=$? Info "kill ${part} process: ${ret}" umount -l ${part} ret=$? Info "umount -l ${part}: ${ret}" if [ $ret = 0 ]; then break fi if [ ${count} = 3 ]; then flag=1 break fi done done #handle logic volume vges=`pvs|grep /dev/${devname}|awk '{print $2}'` #symbol name may start with 'hd' or 'sd' if [ "${vges}" = "" ]; then devsymbol="hd${devname:3}" vges=`pvs|grep /dev/${devsymbol}|awk '{print $2}'` fi if [ "${vges}" = "" ]; then devsymbol="sd${devname:3}" vges=`pvs|grep /dev/${devsymbol}|awk '{print $2}'` fi for vg in ${vges} do lves=`lvs |awk '$2=="'$vg'" {print $1}'` for lv in ${lves} do count=0 lvname="/dev/mapper/${vg}-${lv}" echo "${lvname}" mountpoint=`mount | awk '$1=="'${lvname}'" {print $1}'` if [ "${mountpoint}" = "" ]; then break fi while [ 1 ] do count=`expr ${count} + 1` fuser -k -m /dev/mapper/${vg}-${lv} ret=$? Info "kill /dev/mapper/${vg}-${lv} process: ${ret}" umount -l /dev/mapper/${vg}-${lv} ret=$? Info "umount -l /dev/mapper/${vg}-${lv}: ${ret}" if [ $ret = 0 ]; then break fi if [ ${count} = 3 ]; then flag=1 break fi done done vgchange -an ${vg} done Info "end of umount ${devname}: ${flag}" Info "================================================" exit ${flag} UVP-Tools-2.2.0.316/bin/uvp-config-mouse000066400000000000000000000121021314037446600174750ustar00rootroot00000000000000#!/bin/bash ####################################################################################### # 2010-7-6 chentao # Description: If VM is configured with the tablet usb parameter, then check # the configuration of the event number for the usb mouse in the # /etc/X11/xorg.conf and make some changes based on specific conditions. # If VM is configured with the mouse ps2 parameter or this script is # uninstalled with PV Driver, then revert the conf to the original state. ####################################################################################### XORG_CONF='/etc/X11/xorg.conf' INPUT_DEVICES='/proc/bus/input/devices' if [ ! -f "${XORG_CONF}" -o ! -f "${INPUT_DEVICES}" ] then exit 0 fi ############################################################################### # Config File Editor ############################################################################### ### modification operations MARKER_BEGIN='###pvdriver' MARKER_END='###pvdriver' MARKER_COMMENT='###pvdriver#' MARKER_WARNING=' do not change this comment' insert_block() { object_file=$1 anchor_address=$2 block_text=$3 test ! -f "$object_file" && return 2 sed_cmd='a' ### if [ "${anchor_address}" = '$' ] then anchor_address='$' elif [ "${anchor_address}" = '0' ] then anchor_address=1 sed_cmd='i' elif [ -z "$(echo "${anchor_address}" | sed -n '/^\s*[0-9]\+\s*$/p')" ] then anchor_address=$(echo "${anchor_address}" | sed 's/\//\\\//g') anchor_address="/${anchor_address}/" fi ### if [ -s "$object_file" ] then sed -i "${anchor_address}${sed_cmd}\ ${MARKER_BEGIN}${MARKER_WARNING}\n\ $(echo "$block_text" | sed ':a;N;s/\n/\\n/;ta')\n\ ${MARKER_END}${MARKER_WARNING}" "$object_file" else cat > "$object_file" << EOF ${MARKER_BEGIN}${MARKER_WARNING} $block_text ${MARKER_END}${MARKER_WARNING} EOF fi ret=$? if [ $ret = 0 ] then return $ret else abort "insert_block $object_file with ${anchor_address} failed." fi } remove_block() { object_file=$1 test ! -f "$object_file" && return 2 while :; do marker_begin_line=$(grep -n "${MARKER_BEGIN}.*" "${object_file}" | sed -n '1p' | awk -F ':' '{print $1}') marker_end_line=$(grep -n "${MARKER_END}.*" "${object_file}" | sed -n '1p' | awk -F ':' '{print $1}') test -z "${marker_begin_line}" -o -z "${marker_end_line}" && break if ! sed -i "${marker_begin_line},${marker_end_line}d" "${object_file}" then abort "remove_block $object_file failed." fi done } comment_on_line() { object_file=$1 anchor_address=$2 anchor_expreg=$3 test ! -f "$object_file" && return 2 ### if [ "${anchor_address}" = '$' ] then anchor_address='$' elif [ -z "$(echo "${anchor_address}" | sed -n '/^\s*[0-9]\+\s*$/p')" ] then anchor_address=$(echo "${anchor_address}" | sed 's/\//\\\//g') anchor_address="/${anchor_address}/" fi ### anchor_expreg=$(echo "${anchor_expreg}" | sed 's/\//\\\//g') if ! sed -i "${anchor_address}s;\(${anchor_expreg}.*\);${MARKER_COMMENT}\1;g" "$object_file" then abort "comment_on_line $object_file at ${anchor_expreg} failed." fi } comment_off_line() { object_file=$1 test ! -f "$object_file" && return 2 ### if ! sed -i "s;\(^\s*\)${MARKER_COMMENT}\(.*\);\1\2;g" "$object_file" then abort "comment_off_line $object_file at ${anchor_expreg} failed." fi } ############################################################################### # Config File Editor ############################################################################### ChangeXorg() { remove_block "${XORG_CONF}" comment_off_line "${XORG_CONF}" ### add UvpMouseEvent usb_tablet_line=$(sed -n '/QEMU\s*USB\s*Tablet/=' "${INPUT_DEVICES}" | sed -n '1p') usb_tablet_event=$(sed -n "${usb_tablet_line},\$s/H:\s*Handlers=\S\+\s*\(\S\+\).*/\1/p" "$INPUT_DEVICES" | sed -n '1p') insert_block "${XORG_CONF}" '$' "\ Section \"InputDevice\" Identifier \"UvpMouseEvent\" Driver \"evdev\" Option \"Device\" \"/dev/input/${usb_tablet_event}\" EndSection" #syntax highlight" ### replace ServerLayout server_layout_line=$(sed -n '/^\s*Section.*ServerLayout/=' "${XORG_CONF}" | sed -n '$p') core_pointer_line=$(sed -n "${server_layout_line},/^\s*InputDevice.*CorePointer.*/=" "$XORG_CONF" | sed -n '$p') comment_on_line "${XORG_CONF}" "${core_pointer_line}" ".*" insert_block "${XORG_CONF}" "${core_pointer_line}" " InputDevice \"UvpMouseEvent\" \"CorePointer\"" } RestoreXorg() { remove_block "${XORG_CONF}" comment_off_line "${XORG_CONF}" } OPT_ACTION='install' while getopts "u" option do case $option in u) OPT_ACTION='uninstall' ;; *) exit 1 ;; esac done if [ "$OPT_ACTION" = 'install' -a -n "$(sed -n '/QEMU/p' ${INPUT_DEVICES})" ] then ChangeXorg else RestoreXorg fi UVP-Tools-2.2.0.316/build_tools000066400000000000000000000320111314037446600160420ustar00rootroot00000000000000#!/bin/bash ############################################################################### ###### File name : build_tools ###### ###### Author : ###### ###### Description : To build the UVP Tools for Linux distribution. ###### ###### History : ###### ###### 2012-02-24: Create the file ###### ###### 2012-09-04: To support multi kernels in an OS, ###### ###### 2012-12-24: Rename this script ###### ###### build_xenpv -> build_tools ###### ###### 2013-01-19: To support the PV OPS VMs ###### ############################################################################### # UVP Tools name & version PACKAGE_NAME="uvp-tools-linux" UVP_TOOLS_VERSION="" # The target OS distribution in detail, such as rhel-5_series. OS_RELEASE="" OS_DIST="" OS_DIST_SRC="" # UVP-tools sources absolute path for linux. DEV_CUR_DIR="`pwd`" UVP_TOOLS_DIR="" UVP_TOOLS_VER="" CI_TAG_VER="2." # Linux tools sources path. DRV_SRC_DIR="`pwd`/uvp-xenpv" SRV_SRC_DIR="`pwd`/uvp-monitor" BIN_SRV="uvp-monitor" BIN_SO="libxenstore.so" KERN_VER="`uname -r`" CPU_ARCH="`uname -m`" SYS_BIT="" # Prompt BUILD_LOG="build_tools_" BUILD_WARN="[ Warning ] ---" BUILD_ERR="[ Error ] ---" OPT_PARAMETER="" case "$CPU_ARCH" in i[3456789]86|x86) SYS_BIT="32" ;; x86_64|amd64) SYS_BIT="64" ;; *) echo "$CPU_ARCH has been not supported!" exit esac # help usage() { echo "Usage: `basename $0` " echo " Build the PV Drivers and uvp-monitor. " echo " -b make all. " echo " -c make clean. " } # get_os_release() { if [ -f /etc/redhat-release ]; then if [ "Fedora" == `cat /etc/redhat-release | awk -F" " '{print $1}'` ]; then OS_RELEASE="Fedora" else OS_RELEASE="redhat" fi elif [ -f /etc/SuSE-release ]; then if [ "openSUSE" == `cat /etc/SuSE-release | awk -F" " 'NR==1 {print $1}'` ]; then OS_RELEASE="openSUSE" else OS_RELEASE="suse" fi elif [ -f /etc/debian_version ]; then OS_RELEASE="`cat /etc/issue | awk -F" " '{print $1}'`" elif [ -f /etc/gentoo-release ]; then OS_RELEASE="Gentoo" else OS_RELEASE="unknown" fi } get_os_distribution_info() { local ver_os_rel="" get_os_release if [ "redhat" == ${OS_RELEASE} ]; then if [ -n "`uname -r | grep 2.6.18-SKL1.9.4.ky3.173.4.1`" ]; then OS_DIST="RHEL5" OS_DIST_SRC="others/RHEL5" return fi ver_os_rel="`cat /etc/redhat-release | awk -F" " '{print $7}' | awk -F"." '{print $1}'`" OS_DIST="RHEL${ver_os_rel}" # For RHEL PV_OPS VMs which version had been greater than RHEL5. if [ "5" -lt "${ver_os_rel}" ]; then # For RHEL 6.0 VMs which version must be change manufacturer's kernel. if [ "32-71" == "`uname -r | awk -F'.' '{printf $3}'`" ]; then OS_DIST_SRC='others/Ubuntu10' else OS_DIST_SRC='uvp-pvops_xen_driver-2.6.32to4.0.x' fi else # For OEL PV_OPS VM which version is OEL5.7\OEL5.8. if [ "32-200" == "`uname -r | awk -F'.' '{printf $3}'`" ] || [ "32-300" == "`uname -r | awk -F'.' '{printf $3}'`" ]; then OS_DIST_SRC='uvp-pvops_xen_driver-2.6.32to4.0.x' else OS_DIST_SRC="others/${OS_DIST}" fi fi elif [ "Fedora" == ${OS_RELEASE} ]; then ver_os_rel="`cat /etc/redhat-release | awk -F" " '{print $3}'`" OS_DIST="FEDORA${ver_os_rel}" if [ "12" -lt "${ver_os_rel}" ]; then OS_DIST_SRC='others/Ubuntu10' elif [ -n "`uname -r | grep "2.6.18-1.2798"`" ]; then OS_DIST_SRC='others/RHEL5' OS_DIST="FEDORA6" else OS_DIST_SRC="others/${OS_DIST}" fi elif [ "suse" == ${OS_RELEASE} ]; then ver_os_rel="`cat /etc/SuSE-release | awk -F" " '{print $5}'`" OS_DIST="SLES${ver_os_rel}" if [ "SLES11" == ${OS_DIST} ]; then OS_DIST_SRC="uvp-classic_xen_driver-2.6.32to3.0.x" elif [ "SLES12" == ${OS_DIST} ]; then OS_DIST_SRC="uvp-classic_xen_driver-3.12.xto3.16.x" else OS_DIST_SRC="others/${OS_DIST}" fi if [ -n "`uname -r | grep "2.6.29-bigsmp"`" ]; then OS_DIST="SLES10" OS_DIST_SRC="others/Ubuntu10" fi elif [ "openSUSE" == ${OS_RELEASE} ]; then ver_os_rel="`cat /etc/SuSE-release | awk -F" " 'NR==1 {print $2}'`" OS_DIST="openSUSE${ver_os_rel}" OS_DIST_SRC='others/Ubuntu10' if [ -n "`echo ${kernel_uname_release} | grep "3.16.6-2"`" ]; then OS_DIST_SRC="uvp-classic_xen_driver-3.12.xto3.16.x" fi elif [ "Debian" == ${OS_RELEASE} ]; then ver_os_rel="`cat /etc/issue | awk -F" " '{print $3}' | awk -F"." '{print $1}'`" OS_DIST="Debian${ver_os_rel}" # For Debian PV_OPS VMs which version had been greater than Ubuntu10. if [ "6" -lt "${ver_os_rel}" ] || [ "6" -eq "${ver_os_rel}" -a -n "`uname -m | grep x86_64`" ]; then OS_DIST_SRC='uvp-pvops_xen_driver-2.6.32to4.0.x' else OS_DIST_SRC='others/Ubuntu10' fi elif [ "Ubuntu" == ${OS_RELEASE} ]; then if [ -n "`uname -r | grep 3.0.0-15`" ] || [ -n "`uname -r | grep 2.6.38-16-generic`" ]; then OS_DIST="Ubuntu10" OS_DIST_SRC="uvp-pvops_xen_driver-2.6.32to4.0.x" return fi if [ -n "`uname -r | grep 2.6.31-14`" ]; then OS_DIST="Ubuntu9" OS_DIST_SRC="others/Ubuntu10" return fi ver_os_rel="`cat /etc/issue | awk -F" " '{print $2}' | awk -F"." '{print $1}'`" OS_DIST="Ubuntu${ver_os_rel}" # For Ubuntu PV_OPS VMs which version had been greater than Ubuntu10. if [ "10" -lt "${ver_os_rel}" ]; then OS_DIST_SRC='uvp-pvops_xen_driver-2.6.32to4.0.x' else OS_DIST_SRC="others/${OS_DIST}" fi elif [ "Gentoo" == ${OS_RELEASE} ]; then OS_DIST="Gentoo" OS_DIST_SRC="uvp-pvops_xen_driver-2.6.32to4.0.x" else echo "Sorry, this Operating System could not support!" fi } build_xenpv() { local build_ops=$1 local kern_ver=$2 local kern_dir="/lib/modules/$kern_ver/build" local mod_dir="${UVP_TOOLS_DIR}/lib/modules/xvf-${kern_ver}-${CPU_ARCH}-${OS_DIST}/" local mod_with_dir="" cd ${DRV_SRC_DIR}/${OS_DIST_SRC} if [ 'b' == ${build_ops} ]; then mkdir -p ${mod_dir} make all KERNDIR=${kern_dir} BUILDKERNEL=$(uname -r) CONFIG_DEBUG_SECTION_MISMATCH=y -s \ 2>> ${BUILD_LOG}${OS_DIST}_xenpv.log mod_with_dir="`find -name *.ko`" for mod_xenpv in ${mod_with_dir} do cp --parents ${mod_xenpv} ${mod_dir} done elif [ 'c' == ${build_ops} ]; then make clean KERNDIR=${kern_dir} BUILDKERNEL=$(uname -r) -s else echo "${BUILD_WARN} bad parameter, please show help." fi cd - } build_xenpv_alldist() { local build_ops=$1 local del_illegal_kern="" for kernel in `ls /lib/modules/` do # When in the RHEL4 should not build the illegal kernel. ex. kabi-4.0.0/kabi-4.0.0smp del_illegal_kern="`echo ${kernel} | awk -F"." '{print $1}'`" standard_kern="`uname -r | awk -F"." '{print $1}'`" if [ "${standard_kern}" != "${del_illegal_kern}" ]; then continue fi echo "Starting to compile ${kernel} in the ${OS_DIST}: " build_xenpv ${build_ops} ${kernel} build_xenpv 'c' ${kernel} done } hacking_driver_ver() { local mode_info=$1 local ver_file=$2 local base_ver_line=$(grep -n "MODULE_VERSION" ${ver_file} | head -1 | awk -F":" '{print $1}') local hack_kver=$(cat "${DRV_SRC_DIR}/${osdir}/version.ini" | grep -w KernModeVersion | awk -F"=" '{print $2}') local hack_uver=$(cat "${DRV_SRC_DIR}/${osdir}/version.ini" | grep -w UserModeVersion | awk -F"=" '{print $2}') if [ "X" = "X${base_ver_line}" ] then #MODULE_VERSION("1"); if [ "kern" = "${mode_info}" ] then echo "MODULE_VERSION(\"${hack_kver}\");" >> ${ver_file} else echo "MODULE_VERSION(\"${hack_uver}\");" >> ${ver_file} fi else base_ver=$(cat ${ver_file} | grep -w MODULE_VERSION | awk -F"\"" '{print $2}') if [ "kern" = "${mode_info}" ] then sed -i "${base_ver_line}s;${base_ver};${hack_kver};" ${ver_file} else sed -i "${base_ver_line}s;${base_ver};${hack_uver};" ${ver_file} fi fi } build_server() { local build_ops=$1 local srv_dir="${UVP_TOOLS_DIR}/usr/bin${SYS_BIT}" local srv_target="uvp-monitor-${SYS_BIT}" local tools_arp="arping" local tools_ndp="ndsend" mkdir -p ${srv_dir} cd ${SRV_SRC_DIR} if [ 'a' == ${build_ops} ] || [ 'b' == ${build_ops} ]; then make ${srv_target} 2>> ${BUILD_LOG}${OS_DIST}_monitor.log cp -f ${srv_target} ${srv_dir}/${BIN_SRV} cp -f ${tools_arp} ${srv_dir}/${tools_arp} cp -f ${tools_ndp} ${srv_dir}/${tools_ndp} elif [ 'c' == ${build_ops} ]; then make clean else echo "$BUILD_WARN bad parameter, please show help." exit fi cd - } get_init_scripts() { local dir_init_scripts="$UVP_TOOLS_DIR/etc" cp -af $SRV_SRC_DIR/monitor/* $dir_init_scripts } get_runtime_libs() { local dir_dynamic_libs="${UVP_TOOLS_DIR}/usr/lib" local mach_bit=`uname -m` local libpath="" if [ "x86_64" != "${mach_bit}" ]; then dir_dynamic_libs="${UVP_TOOLS_DIR}/usr/lib32" mach_bit="i386" libpath="/usr/lib" else dir_dynamic_libs="${UVP_TOOLS_DIR}/usr/lib64" libpath="/usr/lib64" fi mkdir -p ${dir_dynamic_libs} cp -af ./uvp-monitor/xen-4.1.2/tools/xenstore/$BIN_SO.3.0.0 ${dir_dynamic_libs}/$BIN_SO } get_kernel_modules() { local dir_support_scripts="$UVP_TOOLS_DIR/bin" local dir_offline_support="$UVP_TOOLS_DIR/bin/offline" cp -af $DEV_CUR_DIR/bin/get_uvp_kernel_modules $dir_support_scripts chmod 544 ${dir_support_scripts}/get_uvp_kernel_modules #package disk unplug script cp -af $DEV_CUR_DIR/bin/pvumount.sh $dir_support_scripts chmod 544 ${dir_support_scripts}/pvumount.sh #package CheckKernelUpdate script cp -af $DEV_CUR_DIR/bin/CheckKernelUpdate.sh $dir_support_scripts chmod 544 ${dir_support_scripts}/CheckKernelUpdate.sh #package mem online script cp -af $DEV_CUR_DIR/bin/mem_online.sh $dir_support_scripts chmod 544 ${dir_support_scripts}/mem_online.sh cp -af $DEV_CUR_DIR/bin/modify_swappiness.sh $dir_support_scripts cp -af $DEV_CUR_DIR/bin/GuestOSFeature $dir_support_scripts mkdir -p ${dir_offline_support} cp -af $DEV_CUR_DIR/bin/offline_get_uvp_kernel_modules $dir_offline_support chmod 544 ${dir_offline_support}/offline_get_uvp_kernel_modules cp -af $DEV_CUR_DIR/bin/offline_copy_uvp_module $dir_offline_support chmod 544 ${dir_offline_support}/offline_copy_uvp_module } build_tools() { local build_ops=$1 local ver_file="${DEV_CUR_DIR}/version.ini" get_os_distribution_info UVP_TOOLS_VER="`cat ${ver_file} | awk -F"=" 'NR==1 {print $2}'`" UVP_TOOLS_DIR="${DEV_CUR_DIR}/${PACKAGE_NAME}-${CI_TAG_VER}${UVP_TOOLS_VER}" mkdir -p ${UVP_TOOLS_DIR} mkdir -p $UVP_TOOLS_DIR/bin \ $UVP_TOOLS_DIR/config \ $UVP_TOOLS_DIR/etc 2>/dev/null build_xenpv_alldist ${build_ops} build_server ${build_ops} get_init_scripts get_runtime_libs get_kernel_modules ### # To compress the Linux PV Driver into an iso ### cp -r -f ${DEV_CUR_DIR}/config/ ${UVP_TOOLS_DIR} cp -f ${DEV_CUR_DIR}/install ${UVP_TOOLS_DIR} first_line="$(sed -n '/^\s*[^#]/=' ${UVP_TOOLS_DIR}/install | sed -n '1p')" version_info="$(cat ${ver_file} | grep UVPToolsVersion | sed 's;UVPToolsVersion;INSTALL_VERSION;g')" driver_info="$(cat ${ver_file} | grep DriverVersion | awk -F"-" '{print $1}' | sed 's;DriverVersion;DRIVER_VERSION;g')" sed -i "${first_line} i${version_info}\n${driver_info}" "${UVP_TOOLS_DIR}/install" chmod 744 ${UVP_TOOLS_DIR}/install find ${UVP_TOOLS_DIR} -name .svn | xargs rm -rf } clean_tools() { local build_ops=$1 build_server ${build_ops} rm -rf ${PACKAGE_NAME}* } for i in `ls` do ret=$(file $i | grep text) if [ -n "$ret" ] then dos2unix $i 2>/dev/null fi done while getopts "bc" options; do case $options in b) OPT_PARAMETER='b' build_tools $OPT_PARAMETER ;; c) OPT_PARAMETER='c' clean_tools $OPT_PARAMETER ;; *) usage exit 0 ;; esac done UVP-Tools-2.2.0.316/config/000077500000000000000000000000001314037446600150505ustar00rootroot00000000000000UVP-Tools-2.2.0.316/config/udev/000077500000000000000000000000001314037446600160135ustar00rootroot00000000000000UVP-Tools-2.2.0.316/config/udev/60-xen-storage.rules000066400000000000000000000013051314037446600215450ustar00rootroot00000000000000### KERNEL=="xvd*", IMPORT{program}="%XEN_ID% $kernel" ###KERNEL=="xvdd", SYMLINK+="cdrom" KERNEL=="xvd*[!0-9]", ENV{EMU_SERIAL}=="?*", SYMLINK+="$env{EMU_KERNEL}" KERNEL=="xvd*[!0-9]", ENV{EMU_SERIAL}=="?*", SYMLINK+="disk/by-id/$env{EMU_BUS}-$env{EMU_MODEL}_$env{EMU_SERIAL}" KERNEL=="xvd*[!0-9]", ENV{EMU_BUS2}=="?*", SYMLINK+="disk/by-id/$env{EMU_BUS2}-$env{EMU_MODEL2}_$env{EMU_SERIAL}" KERNEL=="xvd*[0-9]", ENV{EMU_SERIAL}=="?*", SYMLINK+="$env{EMU_KERNEL}%n" KERNEL=="xvd*[0-9]", ENV{EMU_SERIAL}=="?*", SYMLINK+="disk/by-id/$env{EMU_BUS}-$env{EMU_MODEL}_$env{EMU_SERIAL}-part%n" KERNEL=="xvd*[0-9]", ENV{EMU_BUS2}=="?*", SYMLINK+="disk/by-id/$env{EMU_BUS2}-$env{EMU_MODEL2}_$env{EMU_SERIAL}-part%n" UVP-Tools-2.2.0.316/config/udev/60-xen-storage.rules.legacy000066400000000000000000000033241314037446600230130ustar00rootroot00000000000000### devices KERNEL="xvda", SYMLINK="hda" KERNEL="xvdb", SYMLINK="hdb" KERNEL="xvdc", SYMLINK="hdc" #KERNEL="xvdd", SYMLINK="cdrom" KERNEL="xvde", SYMLINK="hde" KERNEL="xvdf", SYMLINK="hdf" KERNEL="xvdg", SYMLINK="hdg" KERNEL="xvdh", SYMLINK="hdh" KERNEL="xvdi", SYMLINK="hdi" KERNEL="xvdj", SYMLINK="hdj" KERNEL="xvdk", SYMLINK="hdk" KERNEL="xvdl", SYMLINK="hdl" KERNEL="xvdm", SYMLINK="hdm" KERNEL="xvdn", SYMLINK="hdn" KERNEL="xvdo", SYMLINK="hdo" KERNEL="xvdp", SYMLINK="hdp" KERNEL="xvdq", SYMLINK="hdq" KERNEL="xvdr", SYMLINK="hdr" KERNEL="xvds", SYMLINK="hds" KERNEL="xvdt", SYMLINK="hdt" KERNEL="xvdu", SYMLINK="hdu" KERNEL="xvdv", SYMLINK="hdv" KERNEL="xvdw", SYMLINK="hdw" KERNEL="xvdx", SYMLINK="hdx" KERNEL="xvdy", SYMLINK="hdy" KERNEL="xvdz", SYMLINK="hdz" ### partitions KERNEL="xvda[0-9]*", SYMLINK="hda%n" KERNEL="xvdb[0-9]*", SYMLINK="hdb%n" KERNEL="xvdc[0-9]*", SYMLINK="hdc%n" #KERNEL="xvdd[0-9]*", SYMLINK="hdd%n" KERNEL="xvde[0-9]*", SYMLINK="hde%n" KERNEL="xvdf[0-9]*", SYMLINK="hdf%n" KERNEL="xvdg[0-9]*", SYMLINK="hdg%n" KERNEL="xvdh[0-9]*", SYMLINK="hdh%n" KERNEL="xvdi[0-9]*", SYMLINK="hdi%n" KERNEL="xvdj[0-9]*", SYMLINK="hdj%n" KERNEL="xvdk[0-9]*", SYMLINK="hdk%n" KERNEL="xvdl[0-9]*", SYMLINK="hdl%n" KERNEL="xvdm[0-9]*", SYMLINK="hdm%n" KERNEL="xvdn[0-9]*", SYMLINK="hdn%n" KERNEL="xvdo[0-9]*", SYMLINK="hdo%n" KERNEL="xvdp[0-9]*", SYMLINK="hdp%n" KERNEL="xvdq[0-9]*", SYMLINK="hdq%n" KERNEL="xvdr[0-9]*", SYMLINK="hdr%n" KERNEL="xvds[0-9]*", SYMLINK="hds%n" KERNEL="xvdt[0-9]*", SYMLINK="hdt%n" KERNEL="xvdu[0-9]*", SYMLINK="hdu%n" KERNEL="xvdv[0-9]*", SYMLINK="hdv%n" KERNEL="xvdw[0-9]*", SYMLINK="hdw%n" KERNEL="xvdx[0-9]*", SYMLINK="hdx%n" KERNEL="xvdy[0-9]*", SYMLINK="hdy%n" KERNEL="xvdz[0-9]*", SYMLINK="hdz%n" UVP-Tools-2.2.0.316/config/udev/xen.agent000066400000000000000000000002621314037446600176250ustar00rootroot00000000000000#!/bin/sh case "$ACTION" in add) devtype=$(cat /sys/$DEVPATH/devtype) # echo -ne "\r\n### $0 debug: modprobe xen:$devtype ###\r\n" >/dev/console modprobe xen:$devtype ;; esac UVP-Tools-2.2.0.316/config/udev/xen_id000066400000000000000000000040441314037446600172060ustar00rootroot00000000000000#!/bin/bash shopt -u expand_aliases ############################################################################### # main entrance ############################################################################### function abort { exit 1 } function finish { exit 0 } ### parse options while [ $OPTIND -le $# ] do while getopts "b:" option do case $option in b) PARAM_EMU_BUS="$OPTARG" ;; *) abort ;; esac done if [ $OPTIND -gt $# ] then break fi if [ -z $PARAM_DEV_NAME ] then eval PARAM_DEV_NAME=$`echo $OPTIND` OPTIND=$((OPTIND + 1)) else abort fi done [ -z "$PARAM_DEV_NAME" -o -z "$PARAM_EMU_BUS" ] && abort [ "$PARAM_DEV_NAME" = "xvdd" ] && finish PARAM_DEV_NAME=$(echo -n "$PARAM_DEV_NAME" | sed "s;[0-9]*$;;g") emu_type="disk" emu_bus="$PARAM_EMU_BUS" ### id_serial=$(echo "$PARAM_DEV_NAME" | sed -n '/^xvd[a-z]\+[0-9]*$/p' | sed "s;^xvd\([a-z]\+\)[0-9]*$;\1;g") [ -z "$id_serial" ] && abort if [ "$emu_bus" = "ata" ] then emu_kernel="hd${id_serial}" emu_model="QEMU_HARDDISK" elif [ "$emu_bus" = "scsi" ] then emu_kernel="sd${id_serial}" emu_model="SATA_QEMU_HARDDISK" else abort fi ### id_serial_num=0 alpha_scale=1 for n in `echo -n "$id_serial" | sed '/\n/!G;s/\(.\)\(.*\n\)/&\2\1/;//D;s/.//' | sed "s;\([a-z]\);\1 ;g"` do id_serial_num=$(( id_serial_num + (`printf "%d" "'$n"` - `printf "%d" "'a"` + 1) * alpha_scale)) alpha_scale=$((alpha_scale * 26)) done emu_serial="$(printf 'QM%05d' $id_serial_num)" ### export information printf "EMU_TYPE='%s' \n" $emu_type printf "EMU_MODEL='%s' \n" $emu_model printf "EMU_SERIAL='%s' \n" $emu_serial printf "EMU_BUS='%s' \n" $emu_bus printf "EMU_KERNEL='%s' \n" $emu_kernel if [ "$emu_bus" = "scsi" ] then printf "EMU_BUS2='%s' \n" $(echo -n "$emu_bus" | sed "s;scsi;ata;g") printf "EMU_MODEL2='%s' \n" $(echo -n "$emu_model" | sed "s;^SATA_;;g") else printf "EMU_BUS2='' \n" printf "EMU_MODEL2='' \n" fi UVP-Tools-2.2.0.316/cpfile.sh000066400000000000000000000172471314037446600154140ustar00rootroot00000000000000#!/bin/bash ##################################################### # Date: 2011-6-7 # Description: cp upgrade file # support redhat5.3_32,suse11 # History: # Date: 2011-6-24 # 1޸redhatϵͳΪredhat$_centos$ # 2ϵͳ ################################################### OS_VERSION=`uname -r` OS_RELEASE="" MOUNT_DIR=`dirname $0` tar_file="" UPGRADE_LOG=/etc/.tools_upgrade/pv_upgrade.log DEST_DIR=/tmp/uvptools_temp PATH=$PATH:/bin:/sbin:/usr/sbin:/usr/bin BINARY='/usr/bin/uvp-monitor' TMP_FILE=/tmp/pv_temp_file ####add for optimize upgrade SYS_TYPE="" ####added end v_min_size=40960 ALIAS_CP=0 ALIAS_MV=0 ALIAS_RM=0 PIDFILE=$(basename "$BINARY") if [ -f /etc/redhat-release -o -n "$(grep -i 'GreatTurbo' /etc/issue)" ] then PIDFILE="/var/lock/subsys/$PIDFILE" elif [ -f /etc/SuSE-release ] then PIDFILE="/var/run/$PIDFILE" elif [ -f /etc/debian_version ] then PIDFILE="/var/run/$PIDFILE" else if [ -d /var/run -a -w /var/run ] then PIDFILE="/var/run/$PIDFILE" fi fi cpu_arch_width="" CPU_ARCH="$(uname -m)" case "$CPU_ARCH" in i[3456789]86|x86) cpu_arch_width="32" ;; x86_64|amd64) cpu_arch_width="64" ;; *) echo "$(date +%x-%X) Can not determine processor type" >> ${UPGRADE_LOG} exit 1 esac # check upgrade package function check_exit() { tar_file=uvp-tools-*.tar.bz2 if [ ! -f ${MOUNT_DIR}/${tar_file} ]; then echo "$(date +%x-%X) uvp-tools.tar.bz2 no exist" >> ${UPGRADE_LOG} exit 1 fi } # remove -i function modify_alias() { alias | grep "cp -i" >/dev/null 2>&1 if [ $? -eq 0 ];then alias cp='cp' ALIAS_CP=1 fi alias | grep "mv -i" >/dev/null 2>&1 if [ $? -eq 0 ];then alias mv='mv' ALIAS_MV=1 fi alias | grep "rm -i" >/dev/null 2>&1 if [ $? -eq 0 ];then alias rm='rm' ALIAS_RM=1 fi } # restore alias function restore_alias() { if [ $ALIAS_CP -eq 1 ];then alias cp='cp -i' fi if [ $ALIAS_MV -eq 1 ];then alias mv='mv -i' fi if [ $ALIAS_RM -eq 1 ];then alias rm='rm -i' fi } ####added for check linux distribution to obtain SYS_TYPE function check_distribution() { local issue ###determine linux distribution issue='/etc/issue' if [ -e '/etc/debian_version' -o -n "$(grep -i 'debian' $issue)" ] then SYS_TYPE='debian' elif [ -e '/etc/redhat-release' -o -n "$(grep -i 'red *hat' $issue)" -o -n "$(grep -i 'GreatTurbo' $issue)" ] then SYS_TYPE='redhat' elif [ -e '/etc/SuSE-release' -o -n "$(grep -i 'suse\|s\.u\.s\.e' $issue)" ] then SYS_TYPE='suse' elif [ -e '/etc/gentoo-release' ] then SYS_TYPE='gentoo' else echo "$(date +%x-%X) cannot determine linux distribution." >> ${UPGRADE_LOG} exit 1 fi } ###added end function main() { modify_alias check_exit check_distribution ###get install package full name such as uvp-tools-linux-1.3.0.13-164.tar.bz2 tar_file_basename=`basename ${MOUNT_DIR}/${tar_file}` echo "cpfile:$(date +%x-%X) tar_file_basename ${tar_file_basename}" >> ${UPGRADE_LOG} ###get uvp-tools file name such as uvp-tools-linux-1.3.0.13-164 tar_uvpversion=$(echo ${tar_file_basename} | sed "s/.tar.bz2//g") echo "cpfile:$(date +%x-%X) tar_uvpversion ${tar_uvpversion}" >> ${UPGRADE_LOG} ###get xvf_modules_path such as /mnt/pvmount/lib/modules/xvf-2.6.18-128.el5-i686-RHEL5 xvf_modules_path=$(. ${MOUNT_DIR}/get_uvp_kernel_modules "$SYS_TYPE" "$(uname -r)" "$(uname -m)") echo "cpfile:$(date +%x-%X) xvf_modules_path ${xvf_modules_path}" >> ${UPGRADE_LOG} ###get xvf_modules_path basename such as xvf-2.6.18-128.el5-i686-RHEL5 xvf_modules=`basename ${xvf_modules_path}` echo "cpfile:$(date +%x-%X) xvf_modules ${xvf_modules}" >> ${UPGRADE_LOG} echo "$(date +%x-%X) start run cpfile.sh." >> ${UPGRADE_LOG} v_avail=`df / | grep "[0-9]%" | sed "s/%.*//" | awk '{print $(NF-1)}'` if [ "${v_avail}" -lt "${v_min_size}" ];then echo "$(date +%x-%X) The disk space is not enough to install." >> ${UPGRADE_LOG} exit 1 fi chattr -i ${DEST_DIR} 2>/dev/null find ${DEST_DIR}/pvdriver -type f | xargs chattr -i 2>/dev/null rm -rf ${DEST_DIR}/pvdriver 2>/dev/null rm -rf ${DEST_DIR}/uvp-tools-* 2>/dev/null mkdir -p ${DEST_DIR} 2>/dev/null ####modified for optimization : tar the system needed file tar -jxf ${MOUNT_DIR}/${tar_file_basename} -C ${DEST_DIR} ./${tar_uvpversion}/bin ./${tar_uvpversion}/config ./${tar_uvpversion}/install ./${tar_uvpversion}/usr ./${tar_uvpversion}/etc ./${tar_uvpversion}/lib/modules/$xvf_modules 1>/dev/null 2>&1 ####modified end if [ $? -ne 0 ];then echo "$(date +%x-%X) tar -jxf files failed!" >> ${UPGRADE_LOG} restore_alias exit 1 fi mv ${DEST_DIR}/uvp-tools-* ${DEST_DIR}/pvdriver 1>/dev/null 2>&1 if [ $? -ne 0 ];then echo "$(date +%x-%X) mv ${DEST_DIR}/uvp-tools-* failed!" >> ${UPGRADE_LOG} restore_alias exit 1 fi cp -f ${MOUNT_DIR}/upg.sh ${DEST_DIR}/pvdriver 1>/dev/null 2>&1 if [ $? -ne 0 ];then echo "$(date +%x-%X) mv ${DEST_DIR}/uvp-tools-* failed!" >> ${UPGRADE_LOG} restore_alias exit 1 fi dos2unix /tmp/uvptools_temp/pvdriver/upg.sh 1> /dev/null 2>&1 chmod +x /tmp/uvptools_temp/pvdriver/upg.sh 2> /dev/null chattr +i ${DEST_DIR} 2>/dev/null find ${DEST_DIR}/pvdriver -type f | xargs chattr +i 2>/dev/null if [ "$?" != "0" ] then touch ${TMP_FILE} 2>/dev/null if [ $? -ne 0 ];then echo "$(date +%x-%X) touch ${TMP_FILE} failed!" >> ${UPGRADE_LOG} restore_alias rm -f "${TMP_FILE}" 2>/dev/null exit 1 fi echo "$(date +%x-%X) copy uvp-monitor begin" >> $UPGRADE_LOG cp -f ${DEST_DIR}/pvdriver/usr/bin${cpu_arch_width}/uvp-monitor ${BINARY} 1>/dev/null 2>&1 if [ $? -ne 0 ];then echo "$(date +%x-%X) copy uvp-monitor failed!" >> ${UPGRADE_LOG} restore_alias rm -f "${TMP_FILE}" 2>/dev/null exit 1 else echo "$(date +%x-%X) copy uvp-monitor ok!" >> $UPGRADE_LOG fi old_ppid=`ps -ef |grep ${BINARY} |grep -v grep | awk '{print $2}' | sort -nu | head -1` echo "$(date +%x-%X) old_ppid is ${old_ppid}, restart uvp-monitor begin" >> ${UPGRADE_LOG} #/etc/init.d/uvp-monitor restart kill -15 ${old_ppid} rm -f "${PIDFILE}" 2>/dev/null pid_num=`ps -ef |grep ${BINARY} |grep -v grep | awk '{print $2}' | sort -nu | wc -l` if [ "${pid_num}" != "0" ];then echo "$(date +%x-%X) kill -15 failed, try again." >> ${UPGRADE_LOG} ppid=`ps -ef |grep ${BINARY} |grep -v grep | awk '{print $2}' | sort -nu | head -1` kill -9 ${ppid} rm -f "${PIDFILE}" 2>/dev/null fi sleep 1 #${BINARY} #exec uvp-monitor now /etc/init.d/uvp-monitor restart #service uvp-monitor restart new_ppid=`ps -ef |grep ${BINARY} |grep -v grep | awk '{print $2}' | sort -nu | head -1` pid_num=`ps -ef |grep ${BINARY} |grep -v grep | awk '{print $2}' | sort -nu | wc -l` if [ -z "${new_ppid}" ] || [ "${old_ppid}" = "${new_ppid}" ] || [ "${pid_num}" = "0" ];then echo "$(date +%x-%X) new_ppid is ${new_ppid}, restart uvp-monitor failed" >> ${UPGRADE_LOG} restore_alias rm -f "${TMP_FILE}" 2>/dev/null exit 1 else echo $new_ppid > $PIDFILE echo "$(date +%x-%X) new_ppid is ${new_ppid}, restart uvp-monitor ok" >> ${UPGRADE_LOG} fi fi echo "$(date +%x-%X) copy file successful" >> $UPGRADE_LOG restore_alias } main UVP-Tools-2.2.0.316/docs/000077500000000000000000000000001314037446600145335ustar00rootroot00000000000000UVP-Tools-2.2.0.316/docs/Bug List of Linux Distributions.xlsx000066400000000000000000000667361314037446600233760ustar00rootroot00000000000000PK% J _rels/.relsN0 {*5@LH!4$năD Îq~bLn,0&K^UY&c}qy/6͢~8GRoC*rOJAʤ{tJ MKcd=@r]Uw2dYqgVMmk5nIz>3W"!vJL|8 e ye} 0HM!ӷ!阘rpbͼ0gt{M#}HL3_JZPK4PK% JdocProps/custom.xmlkH0LM"מ' (@)Ƚ 6ߗuMv;q@"qp܇I# $dƌF@b&N+,!B1 ssb?I,f;o9IN[E`L/rػWIZorsQ7ȷ#JC0M'M M%pM'K5ҷH@\GIS=O=E1G I=_?=gd:@ݱG;ܧ_ޑ3nP'.$E$;~n!a97.sg({3=;3oC$MHbZt]+v5*?(R>N;Ȧ\ eJljȬUcg:0T]ه*פ9>\qI|h͆2? bP`縴mל- \ _JeD ӫ/~ xЈT _|W]@aC7=E~`d_bFH4)©PEIcpuSA^Z-ie0 Kda?zixH74~PKȓ( PK% JdocProps/app.xmlOO0 |*ڵMTS 8MCAܪ[P)qۓ~lXS &]OnWz>) zrD6E TdK=#5ITeeg 2&-قCuKaAp T@G\P1Cq6 F pF/QA[mgC0Z ̑WMP5v2~vWcӬ*Fa^b?PKTQ{PK% JdocProps/core.xml}RN0OpHm+VJ&$@܌3m]DzݍIP634؀2W#D[DH4: Fh㳡LM8 6F2Ghf[ۖW(OsqS8pqa,XIKD`H!,-LfO>(#e&^IiM6ꝕpݶR5X0a;Hor5Ҟ\Ϧ(텤RF, a&j% oA]҆)T XavC1xi^\xyĨVl]œ;^?n Z/f|߰zQnyoڰ~.,W76MayH]ѼxcW+'?|_?|O<,ޡWa9~ )|FZ& /#uR1ǻ(3׼i^7nl cRgEË]CnLs3vP6nVquS6?ZoƟe~G' ۧ%ͮ]Ťr?¸k fM̫#}Uō5|LKBRnk67ꥼ Ԝt?M,f,`8ӿ5bRW V?~Έ &t6HWr՜}@nW6,Or6^U_skI -VGvGל0P,oOG[׫lNͪ[Fy@^Xqɍ7@euwBڈysÁ5Q+df8 ́9-`PeNdr`>g:lyai5&M+ s\Y5+EN ɚUacowGcH9Jy.!p_ƛo׻Kmh &<"q|U!3gXVG R{asc[1[һ^;66x>7c*Pz;ߔE9*HcEhof@꒓Y6-/P{邙Ҹ=g.e?A96IM3#<Z̰Mlg7m;t3cLuUD' Nn1e):`l(!93+(繿=sA?>"7O U*bՁJ>am¹埼65Sŗ/ΤMZ`Ӧ:azܹ=|"?B A3 7D@+2bH;n7sCx_d-;`OOsn(eoIyWFs㡊Tg/Ƹ)+Ad( Œ'v[gMٗze*@@ W0l͏/@rhrR/3QɔJzә./| uߠ} m1Vex0-ZV' ,;U'шA65(,X^6qz +^|m  g޵g| jWoI410&/&ibOA =8] A]-H̀#bY'ha&`]OF$ i,Z# |'Hr7 y"L|/}3Yp'<'el5Yf[v̲1=]XNC'lh"E6^E-"aqσ9,ƿke `m/mgi{.@Ts׫:L4^#/J?F`AeA@r<7e.̹g_пۜJ$*.|JDhve/-giKkǡI$ ȱld"dݷ<),qZ"ErAȋAYonc3J_As40a 86ۄo| nA:-y] 7y26uo\]&tWYg9AAD jfpϋwAS0 v2XՐ/#wj[w5?>$$l?'Ðo06M'ydIkQaafFlorNYrV4 6Ms PFAT߁k 4V嗗lT!bJhA;6ByIBOZJ cRzŧǴRفǼ x☞NlYadێX=͛6=`~{<NTF| YxWv~^Q 33:9*O!biU\8֜5;k;|.~Q`Dm)0\ $"/ c8Co7Jc!%M|2C<܌IF{G,CE>|bM½XCL҄ ={MD^ԹU{+@SP\ŁͤHGۅi˓<˱uX eJr# 4zM~f@15 Yk^,A Vm/`ٝ°lG]lOGn2s`*70>o6QJ0cA،LjjݱĮ0b OsShיrHvovÔ1TcYv -zFPHyU]HAЌn?dᤥ[Q\žT2!m #Azj,a:}K ::GKaQT8U蝯pRGH1" {Xɭuqyإ7Z;"27ծn8'kii`ȶ=ѦMPZ`0@ #W1}NE,Ə뉓c%:j|&"OZKrɮ,%rJ8*3$r nx{!V/2;-3q+Yຮm%vYܵ4zΐ |;4*l.\@BF $(?U~*4lia<Uom/^e G]Z^zPcA]iWQbbH27H.£h,.PԀr"Dl:VZ-״ّQꄮx^"/4u'9wĴ,ۚDԡhf@+IL'һC~=5X5-Ꞃd2sGq:e߲V5qG.Tc"wܪ6$N춂Ƃvk+WaRma]&qmEe=wʦT`_x+&^hC@7Vc4s,ײxln>\šixI;Rh4' xxȪ t+\7%J ҧltU`{l -'FwB3;,ZKJ0GVu.eZdͫ;Y(_vî@y+(.P.ZO7JB_:Z++jMZcS2^ù.2fhRNgT(iUgU,l1@qdSE #00 |,Nfl9U}o' =]΀~Dj?Y_>WICe_( Dcqb0q MӕMgIv9 cB8-B+7h D{!gz $uE]>_5[xbO?|kX2~M MQ`|  f=YR_&^6( j aLlojJ.kgWfOCg;*D0my<*z\ڔGQ4z=tP \$ ,ΤDfBU4H_++?; ۙY9V&i#;v”1^L6⟽HCqC+LB+A{`NjCahe^8M$8KK!lKkHAn&Nw/>ѳ? FS"(hfStbSeklRͨajNp(o"[mv nAaV2D|`ƛ_L S)h8DG_7toO%LBe%n{]0mLڢqUo\&W=26RY{Ggugⷙ4NC{!RRosfkfbF6HKCļ;1'p|}*XWNLC)ZӏE6 G)b;脘!WsuA拢$Xsӱe^ԮCrG$PR-*uJy&'O>;ճE4ߍk_aC&m՚9ۓR 񦋄VtɕF$h7 x=A]FgA 3Ơo( r 8¼G;SI& V*-dGDN(M& {qmS:6D" {1={M=$V@a3F Bq֧ {~}E;UV!p{!"^Mej`xvMH:4N Nk)T O&S2T0=\GGG+8dZc[J[Eǀ(@!쨔xρ:%&# dže5{F&Ba &;6MjoJqJ@Q j3y,ע`C Gb6#μݭanJbqZmYCEl)"s!)6Ӊm_=i9n54]_*%.K/~aω0GGnlgFV%Alpd)O*4(t=_s,oCz gv8 27̲0K2؅t!^s̟FL=2 poku靦uIN8Y-76 ޿c ej(AFbp_zձL@|M^1 !1tXwr tjHݞEkN5sOwK7s)(T ꍹ< D 6mN긪ysEzQvB }*r\m;R9 zP@} TŏڎCEZB+aGmQY}M!gND M=ݤb=㋃8}8FqC;aEken8^\7c?v`ڥZ_5]kkO^}a֘ʓG~$ia9R!P(#Vz jRaaM>itk D2+k:L2O]--gdM쟂,]VW~0=SWI eERf3p3^8 wL/sJI<ƶksz~bMDnڮ33E]ˋ(}+d9qfiDqyځi_`&qY²Ƞ9$klhIBD1fJio1sJyyo [ɒü.q+*.dbdJk(.S~^.NCq^W?lTI)7aå&K Y;vx~&ԠS͘A'xcΧ14 xȁѷy< %/wyo|Pe Y }g/YPs;Hqʛz艕 r2L%ҷ{'70b];"E(ͪ=xgev 37I8N#;Yji'p,H88Hc?>mas>Gho/l6Oa~L 8>d>׮=~9M/ f@Pp'=1GPOF =Y`(5!QUwC.iMt~5]햠MyFѹ;QWʘڸh G"6>`4mp>!>מޔEZĮa&E_LR>EYOiZ/h9Ԗd7%˔٤;)A# Õu՘L[ΩZA743,ר )Rr6'ĺ G>s|LozV,dq憙inq%) Q0R\׼jʭĴ nyW9sdZo?Vuf{ M6PU\wsP"4 JAx S) "xXbjt=0ʩ&5>Ow9.+nzs}'F ̀{QuBfj^L0B+ CNKW2MsT4ըT-/je5"B0vا[RP-O!^qRֳMb>n)=|hEJ| 7ďd47ir" fo2nIJF:ǥz-*ۿFY%Ҭ@e+ws/:?KA_vt9YP&B厅dR5¥_* |pt]ЎP5)>|TD^pR:~sB u1QvI~{xI%Az0Y(#f+n'S*)HŅ(konIFKi2#J3v|/Mi:e>3x>ͥۅ =ٺcص~j} >_L )qt a bI% 5l-:hơ3kz(GLcx<۩7K)oN!C sDao庞\P"<,!Q1u޶۔ H~w'S8*iw.@Œ[ڙҬg~|gɚ3-|6!n} Uف_-Jf!<$vFn(A[ ;Z A9:ּMRmqC7Ji8 }/beI σ*Iܶ)}IT2w-RC*=XTӿhC^Z~Vp"T,z-NWewbV8\ !7'̜ ,v:~`zowMߎAOiX<9G<$= %-q;mka@OZ(#xIK~jMH#aD}v؋3{ۢM YSEOT~gqdaL\?خj)4BG.}7(-wK=R| CfVuaFZ^Zǯɚ_!|+)_oH3ŝy8diQOń/p;KJn] ?oSomk#a׭KlkC.oŊ4+hb[̦@WXz|KXT4U1=R}iZfL?8}^xl%ivbZvjgrOsDw|K_)YzI0sN0;Ǩ2ckYv| )Ȅ%:@+m?sg!="PkhSRRh==vXOL-/,rELuBQ,ЪNyx$D7xlQL4P5;32)LT.rz q+>58HKK$S') TswWmzG^%9z폹@!8n6/|רJSk\-/k\`[h`p:oSj9?K r_d!c4zc*fץs^jn{Ag&3qgtqѤ BmsxB4#n.(j5g2Sݠ3jMlT>/ru€ nV.)„)Ƒ(B`z^(Xl?M 2kFS>$Hq_.<3 yŧNᎰ0rWF 9uݭWodYmӞovPyvgL;ힽ=)aCj2OŌ$f@&gY+{ÝbŘnyo{Hab+Tem0 z_Yh1?r")sGTe~Gqe4pYWs+@K/T[e$%#WMSY,KkǏ2ه3ATmq,C9r. UUUfHQi$ 7{/=w*E8i'[9NQk2tz%P!*6&mYlQYšfisAu6U%0 i~飉kbʻ##{gѺc+!C-@P;.$ +R^ڳ{[SXj)oDFn]/ղ%&A;gD1s%a.bJu-rֲWty;`?<7=UMPÈDdK;M6,׳38tweY^Di{̲O?L6!˒63yϸ.^a%Aq3 5xBι:^6YTmjeا $g{ aww׻uWD0^P "UP˹mZܴ`j6I2㵼 gs!c;\kU[+`ilggOYfeVjw&Ğĩ\hύw?ǿo\|#ǛnyأE y,I{~ EM$+0Zml¾T&u;)*U{FJ/\z\| bGAYEgF.IC;dc2 G@:<cmʸUww]knt'*3r+;1 -= ҥ'WRI$NJԖq9> *ncQC)pF] _uLF`5o$6 [K xw%ϣ"F[76ACqgN CΝ|/ۛ " q6/ S2+)D[k.)J,,PKG5PK% Jxl/workbook.xmlVn@fqK'NSiC+Bk{/Z$p 8>s04"l|o_L1#RQdY <17}d(y$@D\țP Pu3Md՞D) ʉ2IpRBe)Uzl$9Q>%\WF$aXzLA?\U8`09X~-G7y6J0SMUD"Œ!#ƚ]U_ydBh n`ظd x,$("i/PMM'E.q5屘Jtk vVwL$ "Q,PKTtRZɌB̵ʚ_ "eJQ NbQ6{3hQ8'S_7uOS%Yh"9f$!:l{PnʛbPN]KF̫6Ii"T@,ΈHUc /ߟ6Vx;v\sg&YKz1ӭ߲|6z3יqߵD M~& Qvs]rlgwp:vrήZkvp]eZmmU p|"Ix ho$rl<-IR R>$Lc =˲N.TC% ЧNit۴#yUhc&QwfmiϨ¯?@0:ZD uw ͬPK3CPK% J xl/styles.xml\͎6)teYW*lY.z (P@K,,E s꽗=-6mEI?L:6fșO#RIӾ> "vw/]H@Î9E;eSߎ dӎ9b,ʲRcސbOih HEYZe fq?f1fr]!H| 1qsע5m ιM){x[ "Ԡcl˼f㟿ׯ{mp T6gF.i?#}<\ƼڅuHc(t kV,*M&u jDw3R(>O}vi]ó9o!ajO=.pTOMxDpuJ>OqvW)١4;I}"d)] &+W( q P ϲP&1"4M!Vb"_|R`p  DA #d.?>~Ps s(]s-ɰTmɓ/O3EsyzF_L~) Ssu!/诚F5fm9og1ldK:y!vjJKf'VIY'*dg>G꿎diLK]+)̞,gi58 NklOإ-0mgɖs6 |T6֨8yTH>U?}vYlgt/+5V9s+vo3%?|v1skFkfNjbZeqXgg8e$c0"93Ύfj?.LVLv05btb\Tbc~9S;tV"'0Ni8 e-ɺFGT4j#]!Wk\=6Wq}\meaOQEXhgSZiZJPhԊDz$?4 T *^z)zPͣCէ1i} h7Lҏ# rf!3AݐllJyԦQ"0aBR&t eqKCB)1.@p-$QB:Ν2 }E#Q3d&§B)DŽZwRf zq;hW[&\0az~]?̞Nv3MFcv1Ji5kuVMy8{|sH'o}E /PKG1PK% J"xl/externalLinks/externalLink1.xmlMKA}e9ڋP]023IB#:F}foѪ[)y2h9̸Mb(01iv7f@9`aVS3+P`Fi}- \^BnXEdovc޿TA\U6哋$z/V&Γz3R=˘,զ61x.W_e&_ּ̏"i(XR:ACe$+s8i(Qs0Z{jL12iܙ ݾ/V<&z>#mG~˓WK4k-hXk#F=^D?HB +@U&%Zʨ T*.',RZK%Âk &"sħ@)"4w= cH)%tb`bsЯ|s|J=1ϥQ$0|VG)eCCcB2Sw}Gm'g곕%&-U.\MRٝQ%JRMwBph8Ѻl(ʺH:+h>PKhlPK% Jxl/worksheets/sheet2.xml]ێH}߯4YbxqhI`ܳ/IKJԒ,_'H2yH=b7.:< FɌ^0`j_bga_c}ǗcQv32p滺>=_.tgjQ>ɋ1mnYJg/=c?b}j7EhHiqMW|юr-Rdv*Kwqj?)F/_,/EVf?[4{꽞U}:Ug=?>{?Z:ZO/>D1_,d'onyThp<~:ԍ4^Qv?cClf=goE6DΏ6?O-/OhI}s5=SDŋ,ٛο;&͇k֘ζ.mC,f>+̽>l#/o|A,U _T.IT?Y8혏'r~Ϫpްw6cj#ˈ:C`!T1:K8gR_`H/ \4!1oӿ B)<ٝiFa{uEY|mG>XTy8nM|q9?:f&Qz~.>4//5D qyu>FbDK9¥BEdRScdzcwũDq0WaǸv\.\wө\:%Sf0,6 n-c:A.Dã1ã1iFlM0+0<8lDva\ݙNߥOB>p'DKσ3ҕ\Dw3p'DDv3S!MgBoyx" Ot<<:1:Mpt2V~5° W{,@)?x.j|Yb.'RXo ]WSY Rd$8S A(']/YbvL ߛ \IxkxsNH.-57X! 0oXc_(4{VkPI;&&I0oXC7,zՙحfըU 0ӇQND9BbidLd 1ŅB(HQ|֭ ̀3Ԙkiqc])$z rEiӀ3˜kj컉btK/0_ŋ[:c {a#(1ŕB1+1r#Vby $vt3lL P+dYccŘBaed m ;!/'* Ǹ\uLY -(gCa3![܎ 2rkUiQH92$x#S@q@}jnmo` I 0;%&₩%tUF!m@RcN 5G'1@7:2i#[x>H92cjljUtKH9 3 bAvLMGFh1ķ@͉YLe$ch14 [ԎJHN@uUow[&9F[:Ac$Ӷ@E đt:f0k $%b[+zL9Kܫt-tYk$o`_@IEN@񍁕jL򾥵H DW1ScnJ5&yZm$ 1C`؎)/ ƸZi Ƥᡁ6NFEE vƤm ux156Mj5J5&zZm$ 1ɗPԚѭIzo( c/ PGl0_17b[3fLQ19@}ؕL:#"` Rd.Ж wLMo Pd|_ze"% ŋ F/ PRd$fݒW PHyF[.(? | Ę(Sq D X$ܧB >'C Q>1Q6Q Ut#nr2,F; e<>V23H"|o\j([dki-Pe2TX 2L8vO$jWE6O*پ:G\㢴.ieIy3 L(ZC4n[lK{[}zfT}}`I>һBS&qr6Ge<'442tgLnL2IiYu|MѿBVu~5e(?T6H"k^hLE6τQF^*2KއIr/Ui(؏HC`tm8V"ԁ&fYj<6uHv7DbEX2|ψ<ҋ|Ȅ"L(n͈wq0tc:z&'<\=0΍M*/DfRy6أTo~:xB:40OPGYQCwħjL?}{w0rEeN3*e3|Q\IePyFqq3GV~֣2 KJP"r*cgY@g.D=.B*)~KON 4!m䔻R:f!lnV(z(ѽ֖jDw2,߃|K[y}]]褨{kygr+Gwo1tWud٫_^(!Pl|?~wPKlZePK% Jxl/worksheets/sheet1.xmlY[F~*Z[#ɶnSdC6io4h橥iPHB y ou\$Y.:sf}s9+@["8e&H}k}G #8B#%[L&s>4dBp"2$=Y; m0.GWO"D@'s?NPDAPDa A-3tX[3dv=EhMG]>:I ߵdOEl&0dvUXeb9is) >ш dZb))@{ܶ+Qdy<1-3?ؖcc^r엁ʑ5zj*nV`Z50nq,. 57JN ]aE풢Eh(*=b nM]WҫbNNOp vGUCquZ}].ԞB)q22Z2 3f闈Ca„ =oh|V@r@Nt}J|e_x6#PR2#[e1˟hYZo@=RP@= N%Ju]$3)73\a[q^.㵷˼f)zԦ> ˴] -dzFkܺ|ܼ2r8k_dTGjd{EhUIfNţ˴M͓W/UNS_Aͫ.9F|ekV:W4vY;MH[IdN-:Y_}t ]cPRZ-"*vW\*T=p*{gv콁۫;שnVU0xgx?%ksy|] `G.("O mYgQ5Z=²Gc^/ǘUX0EAwpNkSh1ЋXaȑsRN}P^ͽeOY2OKo }̫Ƈ=$O[s2lkUcB )ݝȻ3i^< ܟulX LpF/<:eQ`)3zA0bϓe| W9 { L/_޻fi[o]}G}᧬]wMaϙ_:/D7Ք߰]=PKqd^PK% J[Content_Types].xml_O0K_VOFIgS kIoAt ,,liwdtd eCFH,1# o-`j5S03t0N1nI-[AG 2_IɌ9;fI]CdY[K|GZ{RljoB3g,RBoTK;b!9 -N&Ap<0.&&t ]8?$n~W}SE58dīwa:`KNp}GZL1O5@#ziV?I]~{+Nñ?.z}h4mt{ [/巏AN PK@>hPK% J4 _rels/.relsPK% Jȓ( 'docProps/custom.xmlPK% JTQ{docProps/app.xmlPK% Jf"z/docProps/core.xmlPK% JxLxl/_rels/workbook.xml.relsPK% JG5 xl/sharedStrings.xmlPK% J3C>xl/workbook.xmlPK% J'$&C Axl/styles.xmlPK% JG1-Gxl/externalLinks/_rels/externalLink1.xml.relsPK% Jg"@Ixl/externalLinks/externalLink1.xmlPK% Jhl#Kxl/worksheets/_rels/sheet2.xml.relsPK% JlZePxl/worksheets/sheet2.xmlPK% Jqd^axl/worksheets/sheet1.xmlPK% J@>h]h[Content_Types].xmlPKjUVP-Tools-2.2.0.316/docs/Linux发行版缺陷列表.xlsx000066400000000000000000001016261314037446600261150ustar00rootroot00000000000000PK J _rels/.relsN0 {*5@LH!4$năD Îq~bLn,0&K^UY&c}qy/6͢~8GRoC*rOJAʤ{tJ MKcd=@r]Uw2dYqgVMmk5nIz>3W"!vJL|8 e ye} 0HM!ӷ!阘rpbͼ0gt{M#}HL3_JZPK4PK JdocProps/custom.xmlkH0Lٍ"DAt[/  m}Qd3{)rKIRE= Dh4UX+69=rR/Jb'|'m5 Qü,q<<'4ӻf9jV\fL\׷YЉ2[4Ca'>aozy_%mdKuLJoU 3a 4/4;Đ;A#9X\֊_7X+2&~oyj$>4K!q|zO&O&c?>l? Gp&qIGho-B7'*̹1{.-3pF@ ޘǞx@zX6 Ayu#NjjQRy9z2V'H~n^ೃhrjҔX-U̡.&Kivm}y2==-/ҊR('h͏j{\ #Kl\JۙWQzLU 3+\JY(~\ىyns^Ub9,a,\w$>i!x$[/+MjG_~1YK@O;šI3}91`VBOb'"$L\Z]rtSL6fBmweW:'r9 S;MJU*mrZ]q&0[wU\yx7V,u5mEh%Ɗf7)aU)imllWHx3rmrXgBJtiܻà* ?Nx_ya]]{2׻M4G|G"?ncp7>{6sݸ X(nǴQT?zn{Ű4C74~> PKPW |( PK JdocProps/app.xmlOO0 |*ڵM)P)B& nUH-(@v~lALڻV5)Icx-;R$N  {>@D zE KJ܂ʶeP?MZ‹)Pe8?qk}s8l@0?)R`wjǪYW׎}q10~@X<ڨa96 PKd){PK JdocProps/core.xml}RN0OpHm􁕤E=EHgۺ$e/'mBxፆ< `,Th(R1z *Y F;hEB3Qx4$)˄90b9-P&Χf5|MHxǥaGtLEcW& R!(:0=bt; '5٨V6ft*S5j(U*(0a;Hoj3Q&ː2a[/ qa[E)mRFj?43+s*Iq&f8!酴?%W XWT eyIWUmmz_pN p PKsfN{PK Jxl/_rels/workbook.xml.relsJ0!iHو0Jm&3ooDqZ*ܟNq;`I^d9go$=qWbeĉOk-biCED3d{34B}3j۶H^#hJAQzF:0)=i)eb8f!l0KA2N iČQYEG=">! O4 Ob5`<^@h"9"j]wՕ[!Ds-{) Tsͳ!P»,yi+HN|#:[:O1Ll[J $1>i FdA)m$Ed/UT̬Zhś*v"\Ġi4 8"Vmyye_?/1{R 1 L (A>ĠpƹQ8AD9KnKVb v$,F,+3HL-}>+͕F;{n^Jֽ%!Deї1ďd9z}i.o78jS|%Z9FL1zVӫy=^sZmX !_+Q#~!>LC=G.tPK{rPK J xl/styles.xml\ϒ4Yq!Ii Áv;pPb%IOܹpx:[ Ykw4S~,e>Y<ͫA<'.Hu7tk>po`?2Ƈ/2#h/ ?3hH1, 3BFRN6>U0 X ne&Mr97knB\NK!n (f}kqy#5.]ZdsQo6ҧN|aAe?Uh[B  #uc%WtdJJIMUI+(||*^z1>ӣR6˲#)G>I۝29U<ΐµ=(ޡF |ݒjL8x̓Ѓ1O0 =x0#=]0) -~ Y=Pڣ`cv7)߾m;U Fǂٶc2QFO\.ԤxU885>\0"o)Ό aKZGEY(O%'W5U$R~hVt*h*_Jn,g|;Rf%pCk _%HDJ.@) 0' "%Sy8/(apUlAZP[qh1yRF ӜT\[€DRqc&1n%Ee 3O毡\"W-4֋R\'s_URi\nKFAfՒiɴdZ2-}&=){fŦ$6Ι黜ξbzЁ/mRl/Z^Vm5!1ޫ t̓uܺ>Rn}rc{(T31٩FIV.H<)/;1h^l,,gtjs_ЬYl-4k1KY˘Z4:~=b'Ô19*bhixVekvcMo%۽_Zv~f5k٭fgg;-7ճ[l`F~#6f+39EXhgSZiZJPhԊDz$?4 T *^z)zPͣCէ1i} h7Lҏ# rf!3AݐllJyԦQ"0aBR&t eqKCB)1.@p-$QB:Ν2 }E#Q3d&§B)DŽZwRf zq;hW[&\0az~]?̞Nv3MFcv1Ji5kuVMy8{|sH'o}E /PKG1PK J"xl/externalLinks/externalLink1.xmlMKA}e9ڋP]023IB#:F}foѪ[)y2h9̸Mb(01iv7f@9`aVS3+P`Fi}- \^BnXEdovc޿TA\U6哋$z/V&Γz3R=˘,զ61x.W_e&_ּ̏"i(YǪשlxnnvjMZ`pU ga>ރ}|nVivWTKի憍n'ݲu4JҐT 8[-~kR=>{u$~o{|WibܺZ<'4[!QY)I^$e >n ޥnEaWyfiw|9'٩h %!ycEE&F0ms$ٙaKwn 臔kO狯(k4,ATY+#t$_"DAhL<=% Cݦ?Dho-hiw[J~wJ^- (U]_w<v*7Vћ,"Jo/->b(t8 (4x;]>'j'R\妺fچbG VyQ$ D%(L.y ';$=URdּ#z BJ :RX 4^In_}kQ9& :"K5OAD\JZBNKREv^cdZ3Iz',+w^ PK5wPK Jxl/worksheets/sheet2.xml]ˎHv+ 0OݙhIjS=P(̬yaƋYswG7Y Vn+%dD$oNMQnͼŇrmL5 麪e6gEZDiK{~zl9mYI*Ovf_)6zs(w7lL|{i!5Yj8.N5[f~m,vk;*$N~ݻ$Z絾}zfM9h9%]NM?jQZ'7}4GwRwRSsQꓗV7|ؤEs?}`4=zY;hUMM-頋m~|s:h]lӡn|xn揵'@.d aƺ#{yxny̿:!^} 4F:-rqQ;)T,:¥P48b@D-:A;a:`t^KrIBj!44&DjKF6K'> MHCahh0=]:,ݻOԠ4:F'ERZ>Mҩ Gmit4:FҩitvtOt9R8D|˧>D'jpO@17~>%E|'DrMVClFG \2o^gc$K(DO2e H ЧD "Ǡ2ܻާʐ~ A4Vɚ gbb_]!1$Fb@!(":SZAK}*RJV J @PZ $ӔVR/CIyuRfKgyHfR%JBKAPCi h'F !#& ь sFVRoJ0zR-ͫ:X'j  j!-e0BπbTN1QbaBU/UlHWIov}bbTW1 Z@0ϙ ʙͲ/XtmNRp} @k"P|HlbTn@\w ,*Q("C-.CK#_Y;-S@7ppngq[/P]Kq@3t@@1:j0/F{YმTr nJ.ÁbչNrÁb.`}۩;V K%X;&QĸBO}zQUZ*ú^j0b@(hˡFA%G\fl8#eb^`QΈqebV>j.L"E#V,&4Z)b߀0Xʱ.'EBJmu&2餶"KGm ]>Ǥ#lC@-d]йX s[]jjZ1kR=&rj10)(T)@"\ 1+!dH9UuZ "p[}emCrL>Ԃ=0:/RcN@ȩ8Վ3g b  6`4[Zh=R eR-F%lzbdR%&+33BA L`;& twYQeXZ:;3;3c^0xMgP)Ubi,wg0nct}FqÂl ]^H 8-YJ 8-Ya3!-@fh!E6NEF$N+ch-NX!G ]/PdrP,Pdt`!>1K 1+rEiԽ>n$8IF7]$N c-NEVdТ3"@!I6J";U [K+BZlE8EF #N7і h-NVE'2J]nL IQԮLQI@Nb;EE%d H2Zr;ɘAQFwl I$PTHn`Qt5Hu˞ f5 \>B!Uy(2eآ-ReRe,IS7rQ"eL ɲq2ZSr0WR,҉z_}LQ(SH#9e4 2hzy@qEj歧?LaP>Y?fc*~$ݔmb&]G,wsS~X|{Lb2 gjFSͶyfPٙMu:1ww ўhq#L ? 2^ĥ&M_y͗}lܴ@}ʋh3Pˮe8 2EM1ŢN3_nBxTFZ2`s, @r%AڗTkHǨ8R^4̺\VfWEګoۣ_߮o3kŚJ 2,3/u'1#y@ [^}Wߒa?_pi]'MUϳϹu>v/du1L0MBe3L2̏򕌳PJ/K49ʛ۟Z,(ޚW/r,:ƏX\KieF,*!qE ᖦa*)|(Ti*"L[_Tgo>} ?ycg\?F'{Ukl24Ҿ:/0؎W RSIbFg~ ZNɽt:v0-66O>Ա*~Ȋ|oI7YʔP|/q%" /<0 :37"ő cJ;y~r_-dQ,Yu`6*{4H}CX/Ɯsxe{ņQ, e&TRЎV}y m}m1B, J0T6_ 4cȈSY>}мaf,&z%=TgviZUnc$kxۼ86ZW3U~h> Uo$O//x8Lg6M_?>=iz|+ gsۘzy)PK7K?dPK Jxl/worksheets/sheet1.xmlY[F}*Z[%9Zm l& MkmYlZJ/ !P(]E呬ԻowΙG+f8 QWAKSPƓzď?B1'0U #|/BH@)!INGS8J`LK|B&4ںu3?UBIh<Gp3O(t&: 72#uPc?J!-K N>6aGzs,'# p#8tWcflDD K(bBUeİ;cM罳G 5|Ԍ*H806(Y:ڤ|l^ ^{SALo f믡%|cVC2J@#іhQO_g:l` eG͊䨹Q*a˩k;Mۧ<:z4쒃`ёFC8+ũ%)U#Iq6Hq/V[!JRIZ ZvbX>D0<*YCT1eA` <U2a2F^ LEPV{F01Tku95+t*]s#mw˧PtfA4mۦ:belU^W ٦xm+/u ]^kA7inWcA|drP,vg@WsPun[nzКwM[SoMn_Ո6aěr~6Lg2ALG dZ<5M熭/X^ihLNiL*Ӑ׬vd3{CӞTn!,!y0&.pT-h;>4qįµVv?z$(a0*s߶3Y?U#DxQ'J'!.!N 1}BveYUX3q|{ RF_M& {;qpw`2`DC3.$e1S'˧lnU)ɍ_EF( YGq#L(A8Ή O┇b z(;:JW?K.|7>~#/~W'55uϾ//<:WO^|qeϖ>_)>~g?in&/ɥfzxo6PK $j{PK J[Content_Types].xml_O0K_VOFIgS kIoAt ,,liwdtd eCFH,1# o-`j5S03t0N1nI-[AG 2_IɌ9;fI]CdY[K|GZ{RljoB3g,RBoTK;b!9 -N&Ap<0.&&t ]8?$n~W}SE58dīwa:`KNp}GZL1O5@#ziV?I]~{+Nñ?.z}h4mt{ [/巏AN PK@>hPK Jxl/sharedStrings.xmliWI. ݺ^w#)SsWv lrwcr ޳mm̌ fS'o*03DV]U(ቈ7y:r"Ǯ|9ݟ8"+˖'dJIB4|!L9Oc+Rwb!$N9Q!?&\΄,HvYNuD]wu'1K]t"HFz';_')'ʟQ]m\4O&WBą~U#ңo䶮 g)BG$=ƓbH9rE/ /*I[GK4Th}?OC>{c%w#$(H[e+S+;ʎk J7#KZ:&\(ij8>xS.h+/ `:0i} 1fŹ?r7=Q*'곇QU̡VSI~gPdVkU7+(E#Յ">M~1d;SyS*K@sϿ6fV 5{sG.ŷp5=fG/?mk KBw;uxW⽼(VOQ*jZ+#=ꛧ'x |YZ>:ooAmRO@,oձ 80Aԙ49җŢLYz̠WRL렞 ZnzY.M%uf?Wkn_s(qB*Q_{#ت%Jh Q-00kƂdX0X2OȌ7Yޞ uYtl"FB4Bs<T&A^ɠ*sKD\Q?H'x;LKd `G_481lqG.dcm x[&yVP07V ϓ0؆zr;4Bh"a,P.}0p+s2+u))O,˝<#`0`t Kݸ;}{;t|#K? )LDT[ |,|YO}yI2A&HP,~BQC%ƣ#ɗ_5ෑ9.&!G;}`yMHy)ЂPsHbTG!]dU7/+'=;L7^lΑ{D[=kw[H"DrQ-=,6!䥠&"".˅~=kڡNm {wh9ɒy:X},HThvS1gBڅSwDBH oZx7iἡ=m.]hO{Ajv ,H̏4,ET}-Z Z^ 4]gxWm@4h#zz HcQPثbd~'fh'B+=fju.w<0_ҕ>zYLo㈮s+)$a4AB^[C5DOy=&r0 sX6Td'`_ERyuWVJpTb׮t7{UɬٱΣ̼: O蟸#'br2 `e1 ER.1с?{Dn"D GHPW+{eIrS\>/ݣv(xC-%'9d[Px A="/ J EW{!uǞ#4G0u_E `ӑ1q'NEv}`*oe{9"~gʼU*OđMxg+* b\Bs W(Y.M:QB ό-M;"m !|frqȿ'g7`n[Ǣ;DjDy7[m39Q(>S^hkN-ڗ32o䄿vo[M` fԁu D՞'Q+A4XH y) +8l/yu*ca4ZF}JKu4vbY)^ >8, XfIjˏw8 6$8ޗMdAk XցmtJ e ux=$SI=9֙/YL#w夋-|W&-8_H=>pH1C>""f5ot<6:@kb8Ot6:@B_':fleXTٹD9Y.>>!40 嗲Ow hKEGO` `VimV= =o^>BY 8VPl[f0fM =N|{w:唣罠I~Ϛb][=Otm-KUN?<.b*Ú9hfJEEo4̦ug;D߬҂[#1%\4a}ͦ /n-MWS%6a'wdA/Y9KlI&.w@;~#0rG;Q}/]. ڳ1Q2CQBmV7tu0"gG53u3A \13" &Cf$p#i"<'`j&Yla%ķ qu&W v͉n8Qm f+nՓ{-(ؤ d<2x2R1 h<s0OK{kz_@_!ȑ!T3Cc0 ]Lv7S\(ĨL3DMؓ,';D>|'ϯ<| s!AQ aiͿ޸y h4;/q7WZ-:om RuL*s[OA [ul~#V}M(Y_B3'xҚjQ4S. US'jKU42|U3?_.97%w4Q7˗KߏiFY<ۘ{DI}wNH=L [C !χ% C^'O }VA)$gyv&…uR7Q1+Z aΫ^QBo'ry!ǫ" zeTzif>@BY佫>XE{KeK_Rz6Uz:Ar}ONfs$F^Jl:tOxP] !-/u@~1ݣKF%DW:xLa6%Zy1 Ḓ*`2G6#S*&λ^?|s X<Qw\qp?y*">_*fPjh-!uz\Bn%#(ڏOJ0o: gECX-Φ_{f%ָ>q7/4k(ZnȔh~K1;[xk2e{͝`soVQ /μԆPn bvv"ނڝ'<EAgfYz|Eea\FMm}>%>v i+Qv nc-ˈT OImmf]vd{F_lLH,&{W]lf'VMN:');\DW2kŲ9'`Ps#wB)yHZVv m2i0{ŏA@O؏(&[(_.- n>jY}9jAB#۝. i0΂tOj3Ħz:X2<""Y#sxq:5O + @ulZ-VH2 Wypժ; xjߊh8:ٕc`nj`[I}2TޝIG6ֲΤn-=o'xm-B7י t'C8ǪVX  _taRHƱuszjZgf\)#%t=f6SsrlG`~I, HCHDvi)Fohv0FSS`re竓X۬нzV.#J+`pt ɇ&F$3Lsiա?L#JwBBd"vmb쐄=S g|~E~! 2JȇS%G.Z7Dʥw^4of jgjKLx:˅| *p{ܰفfv<1-$z 76z}R*7My9%yǿ\XG +r>bw=PΩSMϫ=b{l<dzZkh */jS\']|] 0(lN{r3*xS=M $$c&|+j"DlzFw9ۦfJӷ8)y(-/1+%Yy2Iȥ/ﭫn%q53kYJrvw 3Ru| Fɔ%Ew\["KH -׷[7~qN!0',q qr}OuR bPdnc;<?{W $|#I+~>쑽>q|L(Q5I?vمm  ,y3ccyfXzm, fm,ocy6ge pm ?΋ }='d`˳$M-=hۊ?Otm<ѵ-D׶]pZ?`j#D`λo*rPRFXu8>@;O S&!8y?桺3Š7BC$.`M=VF6]6 17- +6+..9/y7 hd̙ ᯠlfǠx_,XD䮜Hbr0˴* S$aVQVd;e rxo0 !^Q5fe,X׉NЯf-_':̵c f-_':ςV<8%1;xٯ 3&x,>`u2|6^'xn;ԏ*O%9K57R~7m9ʽxELzcLJsa),:ljz`@'w6 W6rcf 3qa|/̝~̼XT^Hzae T!nZgDXq‚qv(zbuݗ K{<*ɹ0}`ç'Cwjxs6t?͌ڏ;WMGJ΃FHs Sx dQa7FٴCdl_1NT.{پNsdqm.)-qm­]TNY3S\,OW!ocyh^l0O>i>ݎu3Hpyk"9 ]V|uHd>!m=V7ʅE4^GBnf3¬|l AՃrˌFt3^Cdׯ?83h6Mz% BC^L=/MVJ1lb|F(S 5?U1TǶ#=>3_{ evb~vҎhrp`H޹ .'(4a׷]?6v):\ މF))wb%)“Wh㝑&VQ1LԣAI~ga:^!+ }H,Ӄ3Sp.ϸ<9iBK1fGX>_zGZ>E}2'{8f傁h k'QSJ؜ETZTyol B?ӳ>oܻwo 6i"T)aC :{[:1f̪"*uvI22TY@7ZAS32rjvM[S:.  @DZʞn{FgBp%ߵ/b[ty;L5lN!I~$1Hi]ƚ/<̃Ӵ:֖թ   u~"1)%rRz-Ŝ& _mo+$gXŖ)c9ZՉMurq+b|<=Bl8}f1j­YBe,ohdP߂Nf)Q'a{UɬuNj?=-Nokc>; ڧYe%ԬTֱ9N`kxFž*I˽ e<fH*lMv%hKȝ{CMHHFLcP["ʿ?ogTH)DGkarZ/@^Qa\<ٖMH <՟$&Q<)X]@줖N u򏆼n CضG=z ѱ^$ZQ.,i=]HÓ jJE$8Zl#NtU; xWF" d>X`j3:(haC:fbA8mUø*@l6 /{(]xPz?'> Hgs8,a&X$Ew:( :|n %;gahi.j}Edžmeϸ|m` \C_\6QhW_OPp @G}MIm+O p7&x,`R'hxoJw7䓪18mK XjlN@_<{\y~ewz$;v,q}ZV2FopV}vbT. w(!~kkK|Sb;>[K$v&%i /Ml~f7VJ͖C|ct]25y?!mmt=_R9(2e,x,dML@)(Mg#ɮw8]YFGF`C;$! 1 -&Ot3O꧑z\ǙriL̏̚ ^2jg;CeAbA"9sI7YE}xʛ to5QWz~0IAwѼ/`sFYéD&*@#cnu ٱȁYQ){ʅ79JJ )j<Ŕx ,݆RS0|g<;?,L|O?B4QR"rWN$]h\K%$0NX"q>EN a-z 9C00vCyj{5d1л1^)=,6 pU:Zng26D/)Vf=k 3#m`%vF-U7?d2\i~2/JhZPWoZI7ن: nɗae oYzs|O\TEWLT0봽E\Wg.Vg'>[ޑI%aI}']I.cS |~%$CnNQdɧȂ_ +!׭HaIAIafc:yj1k^G廌Ilbh،Ze9xK,}iGB\!-&[*H,a 6ꭲ rx i pi<%էYrҰQL :=ѻ! 䊬?Qמx eӤ\XOera,w,ʼ!KZFՌfՑNG4697 d3uVxI[yzE/zVXS9'8ǭfz\ۚ8N`vrna;}n> kl;Eƕ?G٥۪DbR+=1nԔ "3YǠ^=+M\8h5xmT)媣i5lUAm0)lB)""ۀ3s Zzre P8 yQP)ޠ GzpxU/:g \~4r[M58l9ޖ69&'ԤĪ788~r>KY D8)Y[br2E=R0 Էf\!.Z.. *&R$)e*P3[Ɩ_jywδ.~Y5PaB66ڋױw)#ITPuxCl -6B W$׳p)A#a4 W,cOs0盚K]ƾO=)^)ȉ#By/y 3/1yCA뗼>-{9_X ~."oH[ܲ;,yE"I C02#o:N;=;y'EӜᄬYw'['g#"&b\:)zLEe#<šjo:N>®mX,06ca&1 ei*&<8iEE#ŞE4fgd]ʧQ/KKh_&F>QxIvc]r 7pa/| 1$(T1V<|6* SDϹPE6֜Y%s7RdvSe@xP].G'|^i?M ˅:2ԌJlqoZo_7xEy~#/bP._.- P,kV4cm4Pysil $.#à=Yd ]xq yOΰ#)'CH9r[$JHS:4[S 6?*v68y3;yVmcyfXjzs*8?)5άRԽ/⫺wz<\ ^7r/R]T5Jٞ0%ٞlY)=Q=Hze ZnuY凢"o6T. e'+o %%#Z%!TpAH3AۛŸHlMRQ"UO%R\ڣz>WOmũr ~4_IzW~\#F22KlGԙ?g2 7$@ZAu0o{}ͬj{#Ǣp`@ޟp_5<~Gܡ  ?/nŠ7!_ C!QbyV*Oiw\NΆa%!luIrN*މᵱ=#l})k3E:V k79NQ\ hp ^ʅ~bp>U H,$FXOD .6fS6!Oښ1\9HR`x)IRڱmLGEv 1YP$ !N$:O/rL\Yrgfe~4.D. T3Ct=槤dC -l#У,rC~I%w@‚7x!xOa۵ Ǝ#&Չz6Ľ:;_{qP"W`GLOΔ!51K$|_/ Hc>An@3p<U)=V%#4?hZ7p̫ɣ<%mok! ɔ:d+ 1,nh40"_KKƀQ~G:[)`e6tUl ~V.BM<~wul 0X`L5tvilq Oٍ=ywۋ䢱Qi3a&ѻ #~Wv$3F:DL.DL]jwxIgŰۑf,sm<6pxB!I,ƫ|㖃x;t2y}N}傱8: m7+MNFYSYjࠀOۣp\/|JXCM=8˅t af[?љ3-daiN!%#ˠW#U{hqe6~`;T oWPf- FmIlfv:!2ߨg?Po}Sߪc<*-i+/2͙7 c@h}:NjJ^NJn<-}HoIBsQ*J项Z,?Kj>W. oK[Zz6Qc' tl]"OPߐM\"&PvBL "ߴ>%Z]X$ֆ;<j!Ws"7U M燶uh(V+\N)Ww{AoHܜ?9/MKY'fQ*fhR-"ni=R$ILZJCN2zZy2 1Mt$J"ܭB,x\y ͓L^$,FybmhЇ]Q܊' b$H^|HT| /][iP5baX{: Q k!JImLS jXc.A;W" {rx<5ff>zP9=I7Z~ ?΄IZ%c4ci⇚RM$} &sG¿xJgLUS$0ej+Xy{w$vU{X/ 'Jhd8fhBZ28gy(g<F=#E(ɿq(q4XGsLBV!0=ym΂Aq-vuȱ.g4fZ" V]&VSIxng'D kUbGwuhtQ^:qx :B=LDﶒ'"A5<}WgdE芦[ HDlG -:1y{ S55ؖdnRڒ=)On^v[85'r-3X_-1S(:*Fe!.K֙$G[IPԬ\722=$f`6t.$?jw?̜=Xj3g8rNI&>N8|CwmدȞ WmhӜypC!n/+>  _ PxꆚH[r8"_]-%Hm\Wz֊6>6֧tM-BӢd >ݗĻ|_}6S. ckhVޙG% C fmoM?Cނڝ7$/Hy $jQ'V 8l>qҜ .m$~g73P沏OsmWòٻ'lmV}_ ~{# ~A_xSS&,3LْV^V2vŊOé ڴ0o8{D>"cztoxGXT=o1řEQ~֎T"Jנ-~7Գ u\:֖(D$%-Kn||ZF2->, K ~JC!=HϦ]7iAQA(?pf"cp?Iڸi{H܃  y WAN |;]eY$,~E ʜD@ ~Q|8y/B")qZEd8jdÏE]|.lXր yʹi]U>xfp}I2W@Qta_ېXעKp'#0JYE=z;#3kLQn 벜 QҀxTw]N失mƤgfVo^@}CK9+\{NwP*!ϥ?o@o5bOISE| C}7Njd͜Ƀ+Pd8-ǯ[X`@{Ǽ`X;, /hάBPl`) C""irm(3D(yBǃWB 22f^e{CR& i5߽qt\]S\0j$ώ9rIN{9ZNlWO [jK=Xg 47$rT-P_<F g>7xZ)ڌuœ*lԏM߷H^7=n >FR>$wKTW-Mlc@~R A|FbuA|:16hmh{t_xlٴXG׫G}N@qtMugxraK)47% Ot=ߣg q$"}oR'ʸ`[O/}:WsLJtJ?:m!'P ?RٱzQ#dכ@u='"̎`&vZ_b<+jzIٔ }FtĒ:d$ʑo>k>B\y,NLbIr5%Dz:Kl❧d>Է W*(RGKDb47WyuuWĊ<z='(1\yɬnbDߢI%];ge9w%܅/H?2-b/+o(KWFH 3~=O{KNԙe)`yG(޴3 шtTK¹qEqtc͒'m"m.|;ƭC=?]Ϳʞڕ$0RǠ{X =l/-Q+6_ >f%h@[[F~Be'ȟڈȃ^%T;g`8$[0!Ck K`{ZĨ𱍡=sW|0zulrT00 ײy%ΛOF=r( )D;Sq筐^Du@2]z#ڣ!YZ$umD qh݄ofǠl8μԓ@3kTiz~ {YoJ:"u8 owx{M3z>~W W$AHf#go'\{-+jEpMJ+?<;t_:$@q\2j `Iъo!! ,>&1°p.y_ϕLPK|iILaPK J4 _rels/.relsPK JPW |( 'docProps/custom.xmlPK Jd){docProps/app.xmlPK JsfN{0docProps/core.xmlPK JxLxl/_rels/workbook.xml.relsPK J{r! xl/workbook.xmlPK J xDC xl/styles.xmlPK JG1-xl/externalLinks/_rels/externalLink1.xml.relsPK Jg"0xl/externalLinks/externalLink1.xmlPK J5w#xl/worksheets/_rels/sheet2.xml.relsPK J7K?dUxl/worksheets/sheet2.xmlPK J $j{*xl/worksheets/sheet1.xmlPK J@>h1[Content_Types].xmlPK J|iILa33xl/sharedStrings.xmlPKUVP-Tools-2.2.0.316/docs/UVP Tools Components and Functions Matrix.xlsx000066400000000000000000001571401314037446600252250ustar00rootroot00000000000000PK!#3[Content_Types].xml (Un0?C6@`Ě/p׎]JFH}hqsBB|%ʹ(PTӷQ )_+PK!rxl/_rels/workbook.xml.rels (j0C{v)e^JavAG#j"i}]a%03Lfs׊oT; bhsWԶRyz{Am[gQfLݓ*YJ v"ף Ns}%{7BNK X(iڮ,_\ա+όMվBVpŝ$ [8ߐAkJ+L0m CF{,>؇;Zg7 0Ö g19\,f PK!)xl/workbook.xmlSn0 ?xBc%N NZ`(kMBeɐ&?؎e0ic.r2)=򑚜n ݃6\Ɉ]LT:"Έx22$DdN_l[)u!4ɭ-Ǿo fU o2 fkߔXjr[v`\aCeOL%U6 Xyitq7MG+KV`[A<]B>j*6?[Jkloz>餸1OI\jv[a 7sFd8e+]D>ᚭ0-ZYOOm/TQܤ58FvGEZ3J"JhM炭- 4. W'`%nXϭTk2||χ|u4K߂AqZ&L$8Hw8i z[^yD> t h΂^'ag뇝x8$_\`y_ihֵ~\:pc5>3NZϚb6߿PK!'1Txl/theme/theme1.xmlYMoE#F{om'vGuرhF[x=ޝzvg53NjHHzAB\8 R+D5)EH 3މ$)wy?+"-v!|D.{H*01iy3"+soD},7p J6*2yBbx7" EP ||#VYV*bۛ1 zO/zmfܻ DJMMj!g:偠?{C K/Z^| HZB[OJ&+FZ޼7qnӭ >Xju)kgd.Tպ/_]йnTԀ~VZqd|9x|Rs (d4,u@{{sS _z5Q yvice\ +#5K Z .raIB4Q-CI~gOg?=уGEW_ߞ|gxYG4}ţ_~%ЈHt}m1d(F11u(pKXwUo0+õGk?SEK$_ #9ksQkZVÃi "n2;NYR:QsXD!O)_w/cPƴ%:tiNC#ˬfۨYEBA`V0ǍWTᨌGXeJg/RA8ꎈe47[5 4lH褌uy'GIO㰈}ON E1 / mJpnQi TXBvoD?kƌB79-ؚJbX ^6m<.vyޫ;)9Z:$)c}5c4sbԃEMgN$?4%!|M 64HpUa? 5O3 d:(vfÜɰ Hv.h1[N`ΟUV/L7VJZZͨfZ#-7bh,ބ)^^3 gHnYXL3D2#H۽ R+2r$Fw Қ_v ՗ˢWeOptl*1_*3n'LGpm^9-u̒VhVn8<Ju7ƝSdJ1g VG:>\ t<.Tȡ %!{; [:^CR/ȁok0e >O$(G*A[2wZwY,ed2LCr@@5{(T7$mw<紂r|5wO>(&bɮj y /cV= V iٿ gjmZx)Q\(KGψIc>V5hf6tCM&ʺ6ײ'\1gkN3:;\qN-S;kK] =^42&0gO|x WSI&YI`=DCPK!5_ ?xl/worksheets/sheet2.xmlI#YH|NP] U5Jvy7+Ӳ N̡ 4 2=We%Kueˈ,Wˏ^?b/n鬯⩓dl?qH'|3\lraŋG{l5]viPlk]ŭԅUMחKՎ1w|`kߚ%ǼԞ;ǞcHI3fc}\gӴܢZ] f|._]v7pspu>~ؼ.q ڇP+Jv[nbW5ۻ'{y/q7?cKvcAN[X'l2o]}2έZعd⦅3x_%Ō\?ej3HǻGt:2s}rJgN"NcT${I2'g\6C11+ϜESbk4wח[U4Y)>ӲLS-#RY2қbab/oRhCЬf=4ylf;4; ^hCs84'9̈́YJ+ {1ԱA>U}?%+N=x5RH*Rh3K7EK/E$.CP{Jix]"IXvɈYf%'}"rq%#"1'֊jG9OT?"2+ёٰ"yē%ʲp8lE)PBE U)ԤPBC 7RhJ%:RJ'RJa$&RiFv_G'N#?ur  XD` U klP ܻA` 6 e]{ 80#6c e* AbTj^fjXee({EYᓨX;ϋ,!XF`B 56(T`m;<#8@pH!0!8P2ApAZ)ZEJe6zEY+^QV$*{pE^||(T`B_e e!XGApl#ءPE`C !8P2ApAZ)ZEJe6zEY+^QV$*_z"ҍ~_Z V,"XDt8r U 2#ؠPu8M[P("C!~S(S 8EpƠp-QL-V2Ue:eOb.yUB2ߧEK XP.CBmB9Apl#ءPFE`C ڎS(S 8EpƠp-QL-V2Ueu(+|poQ-᧸H(:hrHK!@ZeT.GQsQ9jvH mCaT܅iCF: 3*3@:tƩ}H-c G~9[!0o r3Ht$2 SHgz~]-2 P,s2{j4yC(;Ba$4W"Oޭ>9]HIRJ(RJi(F)MVJG)]W@)C2VD)ӽ=ޝU 6oop:{<{@"K.BYnMb&v]b'Cb=&/8s|ww[8/[ǣ&./́9.Bo/^Lx[%32$q˜?~?:EQRv~.IJ帺Ev@nvqiRV[r+g=tMz~풬CY&0z,_ĂJ_~bt֚U/%gqu\.]Tzo T\J$h ,Wy4~`KJ% ⏣]:b "zTSԠODyjJk:?& '6. kiol]H>~Vhu ~,E:yuLY!u2穻΅ѨPgj<5H}?8?OE³ݽgF;oiEۨvmTGv4nRxJ>v0wJzKsgtk±+Bav(o 6\1x:n:„ | g20a(E3]Tzc DF u/n33Aur)y 6*TSaB \T3e0" QQac(*jCUdR#Z2eЕXz2e8Ls2 eHCt*jI|5|{"pI31)Sxa*H1p/B6"G +v&digtT== HTO1@yA YPH<%Tx  ۙjӧ!c{_ |!L*`QC c*LQ” 3*̩P!BEW?#H6;1D&刃m:t98 d$TM`T - $W,E}|'ύ٘*̂ 5 d#Z9pzUH0LY8 H fQdNY@f!P3 ,98hqAcN8qWfT!@lE@f1hk Ij~S2K-,uXt K=ja Q<, 0%B((`C&ti b Bə6F[SvY:f鄥K}DustQR]%QlqrSdnJu Jښ-]#-[qȸ.K,c(53Di, 0%%QlqUdХE7R"ə{-䆰έrϊt K=QkE -4mI5BAE["hn-MsrCXVbkgEt K=RLY:C`SRPPbE[E]EJu J&gH:ρe阥z,֍jEmKiےZ( 8+ rG!CtbJؚXjfRcNX!!J-2e Qj9KLɻ J (@.79WGk]"HmKw#m;bRK]Y:aRQ@j%)Զf hK (@ @^?h:j)#M4GS\MkDS:,`_kE^!9, ̾V:BiY>\'w"|`N{^jJ_~7_?* @:x_~^񓯿?ǯ??}O_7_f4___k:i^E]Uo.W/.:BPK!i=xl/drawings/vmlDrawing2.vmlTo0>i}1ZʚIcRM~D^r!Ml?VT$L{i(F_m7Vgng*ɶoHs iJş9+r+@ՙբ%6sqY6݋Rpc6FJo2.'c clPzz$QT $v %](FZuΛ|hIU\pVIU9f>#H^a#l|m6J9SRg+4keꆶĚ%}m6OXR,a y5%(ojP]E 1xB %=chbSj&PJ|iV,tRmpMG]B!ۍFuʊR`vF2YaSsn^bozZBX2$e xv~p"zg(,{3ޥїgq*LvFmaWRT%O;4-mduyIRe$F/>Wg׊d,n߿\i ,BSm}KhV{2oPK !''xl/media/image1.jpegJFIF,,JExifMM*bj(1r2i,,Adobe Photoshop 7.02006:04:28 13:51:01,,(&HHJFIFHH Adobe_CMAdobed            "?   3!1AQa"q2B#$Rb34rC%Scs5&DTdE£t6UeuF'Vfv7GWgw5!1AQaq"2B#R3$brCScs4%&5DTdEU6teuFVfv'7GWgw ?TI%)$ILlu2h9赫?*GGqvl|~}l3 zEF 1K?k\Ry p@| R9b"|wc"?NQ.'`g,mwYcY}pk6?玣ӨiikGlk]ԜWh{`SyIp~a|wN8GڗAe?f6=coj_֋^,q{8\N{~pf#)uGH5*ӹ1~Q8|8pd_7Xʥ_!^kv2encĵ .+])0άm~Zc!#`Ou/1@lbJ㞤I%?T JDr2ᙾ#g{_?-f"9 ?K~LVG.b滯Ӌlo&N%?k`T<#,G'ðL%Iw iNK($6=Etu7k$w-汌|YĀ$~%]k][$EϣzJNrCh9OOλ'roKKOO/pㄿr_?װ1*vnh{]?d2kvh?+iz'/r~#˔g{_UZ}Y:BZy/4McFeD`Wo.e9FZK&3y'L{{la-{Hs\ F*)s Cy}r~;j-kAѶ,wk6}V V״;\՞+;fMm[Fh=1d4rO| 9s3vF$ڜ_neCu%{To=GIпSCI?-Rku7[Ŧk:^/L?9os0GXk734{KyqTTlP'^x]{:NsLP$x9ku̇`E?S_kt+=Lg)仟N5+?/Y)2kyu{>>XܼGAwޯjxDXӋ|G&#<} 7˵W^?V nDhfYe_ָ5qck>h?6Nm0c1o,s)wos00dB_2Gv5cݗR4plHn#Ui[]zeT}h&ߵ ʬ-nIf}A6;2_O3;(kO1kcXeф8-͒xʏjmʪik$k@fmc7HF6\ymKW R2gQI=y`!F(B-pB? ~[֗Ӭ~[RJպOdgۣypo;Z=W 7X\CZ$>)iLGCg >y<{.O|Yjts(SZM`-1y {s<޳gW:׬ޝnT s h}=OԿ>@YO zG>#<8>/,OӟLe/jX saY˭wIds~Uߝomr̵ٙ\۝EC}tWX7j8v| sri\eؗ?-cs6ͼ =1XXaT۱n+w\{XUPQ2vYXoJ[YǗِn>#.GH|Mrst{|K Ԭǁ3US[],p?cMxU164!cWW֘0:a۞ͺ{j,^+tߵ2\oO~~ѹT ۩xs0Fm=ޛO(Zw9_W%><}yLuN}W΢pAT9eg@}ruZ0{}{710~uh!7 x3㏥~Oֺ}p?YNJ f1Q{p?Ã< ~aWcZeY_"^'3uԑ]pfn?o۠nsdu`sSnnS=Gb2L|@K'-H19gN?]wX-k>ψ'"oU_ӻ?­}V,7Wl!yʱD}21쾚\vX]emr}v'-#UL5S(G'} :5M&L̋N'H<q> emk}Fgmşc&EFie ?X= :6ܗ{wTc[˄y_^_?.4WީV>=Am82X罶?/I%d dFF?(RN/j䊲ֺ3ӱDXB|*;K>o03mu&]Yw1ÿ{\..Ǭ^}jWs?^o~ - hsfz1 D-y, @L#_92Fg>?9Nx:#?Ɔ340yhM^z^տK^cA͟p?(<H/ C#LZU׷ TC[c=w7[hXxaUE-u~ 8sH⇷*T bit밫m8{\IkoL.Ƈ.k []`ivKjJm(z4n Ir?Pdk@݈oݎ.l?Uo/WU*N9{Cn7%=K7Ut<ΩUb6 \?/Y[1rfhcswk5%=RK}SŲVN]1ͿP[Sqsq Sݵ=ҤIp[bo[0忢xٶ]VoLyE06͛bJmڷu^V-yEmͰiuM{}ػ_nfK+u"Y^)h.54mIr_Ts:We=7X~9;WK-w[O@g2w1lIV_LIy߮9VȢnuw9mcgï%./{׷kZq%?>`nNgTwhkM{*?.өηr_[*_E`sX}Ic[24pmzԣ_Wv.gw}Z_9;ρR裪_0²cv:[v;ݶI})\VSHɝO>}l_Xz%'7k׿c(~Ku=/jUfGY5,,šXƏ}VWw=;RS]B9UOIUcgkxkd O,X:Vig9Aa}-?*W~:=OݓMEϦƁOѩ6-ٵ_K]EW݊s=kh۫sc=U%7WBqpMcē鼛1evІ)}o܏_k,/W3]'v} m7"̓gywNK7}\{) }պSN_m?S׺/T(=6}=VBX}U_WN.#SssZA-g腕/?'޺_eUK`H>`}/{XؼWOfKzs$Ȉ(Tk/jJz]9VŏOs謁 =ͻ6w75{?>qF>ջ9Vak[ſo}yg? FPs.s4=umyocUt}4L{^7p(}?e~Sεg|k*u}0=aӆϡw\ޛe]]-{ ڃ˚l{`i?%?Photoshop 3.08BIM%8BIM,,8BIM&?8BIM 8BIM8BIM 8BIM 8BIM' 8BIMH/fflff/ff2Z5-8BIMp8BIM@@8BIM8BIM[,,HW_POS_RGB_Vertical,,nullboundsObjcRct1Top longLeftlongBtomlong,Rghtlong,slicesVlLsObjcslicesliceIDlonggroupIDlongoriginenum ESliceOrigin autoGeneratedTypeenum ESliceTypeImg boundsObjcRct1Top longLeftlongBtomlong,Rghtlong,urlTEXTnullTEXTMsgeTEXTaltTagTEXTcellTextIsHTMLboolcellTextTEXT horzAlignenumESliceHorzAligndefault vertAlignenumESliceVertAligndefault bgColorTypeenumESliceBGColorTypeNone topOutsetlong leftOutsetlong bottomOutsetlong rightOutsetlong8BIM8BIM8BIM 8JFIFHH Adobe_CMAdobed            "?   3!1AQa"q2B#$Rb34rC%Scs5&DTdE£t6UeuF'Vfv7GWgw5!1AQaq"2B#R3$brCScs4%&5DTdEU6teuFVfv'7GWgw ?TI%)$ILlu2h9赫?*GGqvl|~}l3 zEF 1K?k\Ry p@| R9b"|wc"?NQ.'`g,mwYcY}pk6?玣ӨiikGlk]ԜWh{`SyIp~a|wN8GڗAe?f6=coj_֋^,q{8\N{~pf#)uGH5*ӹ1~Q8|8pd_7Xʥ_!^kv2encĵ .+])0άm~Zc!#`Ou/1@lbJ㞤I%?T JDr2ᙾ#g{_?-f"9 ?K~LVG.b滯Ӌlo&N%?k`T<#,G'ðL%Iw iNK($6=Etu7k$w-汌|YĀ$~%]k][$EϣzJNrCh9OOλ'roKKOO/pㄿr_?װ1*vnh{]?d2kvh?+iz'/r~#˔g{_UZ}Y:BZy/4McFeD`Wo.e9FZK&3y'L{{la-{Hs\ F*)s Cy}r~;j-kAѶ,wk6}V V״;\՞+;fMm[Fh=1d4rO| 9s3vF$ڜ_neCu%{To=GIпSCI?-Rku7[Ŧk:^/L?9os0GXk734{KyqTTlP'^x]{:NsLP$x9ku̇`E?S_kt+=Lg)仟N5+?/Y)2kyu{>>XܼGAwޯjxDXӋ|G&#<} 7˵W^?V nDhfYe_ָ5qck>h?6Nm0c1o,s)wos00dB_2Gv5cݗR4plHn#Ui[]zeT}h&ߵ ʬ-nIf}A6;2_O3;(kO1kcXeф8-͒xʏjmʪik$k@fmc7HF6\ymKW R2gQI=y`!F(B-pB? ~[֗Ӭ~[RJպOdgۣypo;Z=W 7X\CZ$>)iLGCg >y<{.O|Yjts(SZM`-1y {s<޳gW:׬ޝnT s h}=OԿ>@YO zG>#<8>/,OӟLe/jX saY˭wIds~Uߝomr̵ٙ\۝EC}tWX7j8v| sri\eؗ?-cs6ͼ =1XXaT۱n+w\{XUPQ2vYXoJ[YǗِn>#.GH|Mrst{|K Ԭǁ3US[],p?cMxU164!cWW֘0:a۞ͺ{j,^+tߵ2\oO~~ѹT ۩xs0Fm=ޛO(Zw9_W%><}yLuN}W΢pAT9eg@}ruZ0{}{710~uh!7 x3㏥~Oֺ}p?YNJ f1Q{p?Ã< ~aWcZeY_"^'3uԑ]pfn?o۠nsdu`sSnnS=Gb2L|@K'-H19gN?]wX-k>ψ'"oU_ӻ?­}V,7Wl!yʱD}21쾚\vX]emr}v'-#UL5S(G'} :5M&L̋N'H<q> emk}Fgmşc&EFie ?X= :6ܗ{wTc[˄y_^_?.4WީV>=Am82X罶?/I%d dFF?(RN/j䊲ֺ3ӱDXB|*;K>o03mu&]Yw1ÿ{\..Ǭ^}jWs?^o~ - hsfz1 D-y, @L#_92Fg>?9Nx:#?Ɔ340yhM^z^տK^cA͟p?(<H/ C#LZU׷ TC[c=w7[hXxaUE-u~ 8sH⇷*T bit밫m8{\IkoL.Ƈ.k []`ivKjJm(z4n Ir?Pdk@݈oݎ.l?Uo/WU*N9{Cn7%=K7Ut<ΩUb6 \?/Y[1rfhcswk5%=RK}SŲVN]1ͿP[Sqsq Sݵ=ҤIp[bo[0忢xٶ]VoLyE06͛bJmڷu^V-yEmͰiuM{}ػ_nfK+u"Y^)h.54mIr_Ts:We=7X~9;WK-w[O@g2w1lIV_LIy߮9VȢnuw9mcgï%./{׷kZq%?>`nNgTwhkM{*?.өηr_[*_E`sX}Ic[24pmzԣ_Wv.gw}Z_9;ρR裪_0²cv:[v;ݶI})\VSHɝO>}l_Xz%'7k׿c(~Ku=/jUfGY5,,šXƏ}VWw=;RS]B9UOIUcgkxkd O,X:Vig9Aa}-?*W~:=OݓMEϦƁOѩ6-ٵ_K]EW݊s=kh۫sc=U%7WBqpMcē鼛1evІ)}o܏_k,/W3]'v} m7"̓gywNK7}\{) }պSN_m?S׺/T(=6}=VBX}U_WN.#SssZA-g腕/?'޺_eUK`H>`}/{XؼWOfKzs$Ȉ(Tk/jJz]9VŏOs謁 =ͻ6w75{?>qF>ջ9Vak[ſo}yg? FPs.s4=umyocUt}4L{^7p(}?e~Sεg|k*u}0=aӆϡw\ޛe]]-{ ڃ˚l{`i?%?8BIM!UAdobe PhotoshopAdobe Photoshop 7.08BIMHhttp://ns.adobe.com/xap/1.0/ adobe:docid:photoshop:e650ea62-d67a-11da-bb5a-e2fea8b761e3 Adobed            ,,"?   3!1AQa"q2B#$Rb34rC%Scs5&DTdE£t6UeuF'Vfv7GWgw5!1AQaq"2B#R3$brCScs4%&5DTdEU6teuFVfv'7GWgw ?TI%)$IJI$RI$I%)$o]} v>?@I4Xqǎ&sundlm50Kzz©ƾKHo׺Z7en`K{j5;rw8sGܟq }t2sq{TQ??(msIWI?WB< _?ϡR颫]7O%|[~>P&+͟ס-['1{^O⼸˄T2~?_?)$S4>fVkcXepcA~'g>aU9/bK)ԏb*I^KmžZ-r-~z,^d.HFpVNQdi+XMOCߣG?%@b~oI+-%$I)I$JRI$I$$I)TI%)$IJI$RI$I%!ʣܬlAx]9=g?.C>5c?55u=oM;gI;y#a2LC#؀v* VXb-Y+%99A ӰFV9+,]WDCcJ'*.1qaN_I$p>ݻ"q9Y YzY27}$ř\SvC #k/QJƢ&b0MY)k/jUނ[Zqm2 a= u2}&^ܶN(9 .I%aI%)$IJI$RI$TI%)$IJIBk[k+`.{ݠj\Wesplu4.{ÿ\,NrI|zO`` tfMP܊l)#! $ ͆xrKA8$HK(dNڛOȳf,|#b7%WFČGbvIX1VaGaE0b~FFEƼ|Zv*·>a.Y"|kg38l_z^=*,] DT:ݾKj2=/+iU$Wu$Jc/)XYͱ oO3yrl6.K'5ggIaYέGq Ψ~LN"Q6 |0d,Q\QRI$ĤI%?TI%)$9Țǁu)0alŌ\_wFj?<~sWH8d%%!#"%cpG?s)CuA4aL?CWoiI-'RI$TI%)y/^z\tM5Wjk7'/358ѵ׉IdJHþ, |:d$I%_Y>ی:VS'q:׫=xn&Uy5eceԸ9 zX*m>?+C |/~~)O/Gbt/ð8Q/My,Z1.ŴMwջxf-Wb"iڡaR_PŞgN\V)qO q 9Uv9&Ŷ"hUVE(t?a1qQ?{K=NYR)/+C?yYww~gxVw9Af<2u~ {Pd靈܂'{^U݄X*or }JǩJGn6ok}y5u[ոVg^9y;I><_JdN`v.d=>[z}-4k7=5ĝ@޷tN[Ӊ6gOL":.bqyec&`S9{YZ:x[*?KjHZY4cٓ{USK u[uvl{1F;Cyrfˮ89Lpߵ<'=ECssF .rNr4b9A$evX4Dž>~a05^9/?35$JӂTI%5320Bڬhen,{Obӵ{oN.h7Gmx"cww61/#C6i/0N=h]$\I$˿ *wluYgWM_dщC]<6+c>q;!|3~~Sc6]嗲b *Co7XW`-{ kyhB. eת~}n sbI(vAЈנA ~`ş.+뻣%Ԯ&?S[^?|$_KERUԃ0lKz-l95@^:`}.b?JF ~Nu0~m>G_W9XFYeyߍel>X3i/?C'}kJw!wkծD /{Ku$I*Lr2=]O-#/ߟI#JclAI/RI"cݕ^5 /1) $^Zt윞kh{5oUҪ2,cy[l1>o\L:I%#I/qͽ#*Bڶ͗8o_ qk AxG\vgZqRn-ow}kc[󱬗V?aԈ!ܟ8BXeY8G'Uցf&Kݯi_Յ~t~]5VV[O ?xV+?W<KG)ToDr.Z_m cGw8UuY kNjmm~,g$GɩP9y芄!"S]X#mmny[a^e_c8x utrڿ;Ecb#decOVE餦ITz%$I)I$~Uǭa1I) $Q{ hԒ{R_ h0iѯ?S)!m#9mSg"[?u3xKɗ!UzSnm,|W ?&VF:C=GC67TɚcgPu/N8_=V/׻6ב-ZUF_dnݿ[un{\I>n+[훣;7Gtnw%qPNiKՐ@178ڗ?ҧi8G+W9M+:A1N=҂^Xb =NV&.`0Sq4|$Jољa7Al޽ 0gk`3/wdy.`h, 3:<gWi )k_ >{xG1%iЂ9K*g=+_W+lvܦ>nW0檓a9nv)i&qzP#p>t<k~Y \̫wIƷ/E៓|\?{QQUWĽ'~#l3‡|xJ0IԳ7f .#k^/vv]waU~ں=.Gw&eã?KygWԒJM=C_`]h֏XI,Sa)F1G#[=TN4lwWaпwEC?η^uzF0.XL~mU+xM8/g͒^7^ZK=ld%Mnt`Ykl5iq{`gnt6O\??$8:y%:|`8pы~b~2Hk91]soU$zW}+V"I*rnFa XIɖ~CX71<PA+>YuG1' Νqkau 0s[5V}&8}+?̘LjٵsggXKISiZ_Xq򛺧d4{_HJb^n=Fp2=:dfPmԼhAV 0LG¤1 G[[wn\K!pOL7ղ1ʢ{U,{O= žέ4Y6z>]YtSopi}k}7 lwW2bN~9y1,'P?#?IFϫw+ky?U W1Vǀc_NJqM~W?b\^:+1?ie226*:o]'Rϧ`ߛqP?ڬ#`dۏаl|]s ڛr8=4֑m>%σЙɯ {_Kfe]nUu<̔0.bw `.r-wWg?g{>޻Lq{Ͻ>Rs~zs.=\ ӈ:`_x.gƨJh鸻)ex 1 h_^3Mnw2cD _k:^֟՛hY>_=Lg)/Nn>W?ɞg/i7wcf{[?kظLܼF]r@ITɚyۣr_H(z͒^ I$I$)$$R@@It/A1gTꌋߙ hI&,R*S٧sعLG&CCKu>\=ߒ,gS~t$~?)~w5^Ucts \>-rAsOLLoms亏[ǁ[4fgR\]l?R2T^M>Σ`uߚdhEv['! jh'CJƫ,_m(lQG'?Y/Cw 㼇&Oϥh~Ժ5:vߗan7X?4b兟ɤ9l|?|3$L2OXGc>Ci\Gw> A\@HMu7,mOGgp7W諘Z r"\|RA:f8-BWFd/\;?V! zƯ9.?, pkX?T矃>ʐ YG>&K!F!7~/h/0I/~}|$'ޚޛ)&?{ g%jŸ߻kfP}j}L@L9b? ̏l֏'E0 A$Ԟ쾭]Wv.)x ?D<5fC.,qRbZ>uCUM`1o;^XVa=š u6Y#y8J##"{;1 qDG^9$IHRI$I$EYjnOEUm>E7-\`Xa‚G cvEuZ=?3c}XGos^L71 h}*1?wTAo^zNGIߚXտRL |^? nH9FZD, 6~O+O\)}7~?Y?@oҫsV&?Ǝ`X>ir߽4Gy?IoG^>N@?JS ?OrMۈILOI^Q+VN^@l[߫}O|Oǟ,"+js /Yfr/шnhtЉRI%uTI$TI%)$IO+m}wx3iZ螼.ǹ^UudC 3`g!-ejCڬyu2D[#.nMr?U qD! z'2zGSz=?Z=c?Z!dԵDzޞ %{׳zj-zC?yKhƭXV>M\e>3#_~A|W>tΊ}e }gkO TVc@S6L3#9I$,jI$RI$I%)$IM|1ݍSnܵÃ?IϬ?3vOI.hOo-%L05!rr/:SJ>Ku_}~LiTomη+gqZA.jNZpzpܟy~`Y?rgO&ҔrJ6RSU vWžUnO[&y?wcM? R;Y?/S'd,TMw _k1šSCX šնkY[DUX1ܝNo%N8ԒI)JI$STI%)$IJI$R"ASc:u{s[Uk$-tg(x\:>U:uN?=lSE4V+Xe$<)KljI$+I$$I)I$JRI$I$$I)I$JRI$vfVa6Q7jݸ`5{zI4'S2Ǚ{ Nf=ccӻk=%>}g/XQ[hu/-s~\歹b׋UyDӰmMм쏤k?%uɯI ֆZfEM26n3INI/*cF}2 R[ >׵)T dfz*6j.0wzRIyֿѺ_LĢq\X_kM%>~61}ױ"*;Ů$Z6&1߽S s{{kzJwR^9w\_~ұ2m9W-~t))]t,oLƝ^?6+_s:gN}mVjo|IOp>ޝߒǻ~/gM%>ƒ7R]>fW}t/˼׫$$?su\fc9Zl>${WS7|Jo)6K_+տ'_3eS3ŵEe %+KyS|Ouޗ%>waWL˾cIX~֬1/`۵gNl D`~8_I{?XbUC.Cw5$e㧢=KV=l.?YN[5uމ*cG=365?`t_?UsqRߴQ㾯|J ՏFÃfM|wܾ84:8!|_<fYkWܼؒ1.ʷJu{W9vfߙi2,}>o%w=Sř<eºnC6dʛ{?Jk{\u/8ո͸w{^??u zO#ӫ5oIL>ts־`w[gf'X,)ŬhIB*I}uŠ>=A}_adKSwRJ|G=X}TcUQ֋piŨ* m\ϪoHީWKM}GR~$Fմz]ۻG+wK.x}UlOy$_IO#?LꘝEQKpٻvнeQE̢eָ2kKZփU%?0g? VM\_ʚJ7|J6?Oa~65.xX %{7g?'^p2R?Oy$Z2hZ,c ~ձ# x+&.C/,:GgIOнCO&]fK7/ IiЃ%>'3ovu[bPG s]JRhK˯3TU#Ʀ2ӭ%;_gvK?^ܼ[Jdʻjp?>^0~~}nۗ/u&$G/ۿYn5;vKhO#]UC0i6X?IcCU=Ki))u[=s\p6cg0IG]nދZD)ykVkвDm]wbulK'Y?͟I)H}gVY3.:{}qj!$SZsnTε,᛿ܩ7|Jo)6Kʝ7dDoO}__OF}m>_GԔ$J|x+&.C/,:Gg]^}+6z;"77zuC^YgSTs>utܫf-KFUMSwGO+oYլtxu/] @x`GH;}Kؿm&}(?ӮJOyl'IOurGKfx-7^+޼cǟ$96w6O c_i*jOD_TzK;=ޠZ}'}&Tݱ~pkO?tjӃs;0kro]=ޗJϥK؞hoޮOvV.:֚ ݍ>p:XG\Y_g? Ͽ_uziYwl} ng? U]Sh&V\HcI)s; kUM;J?1{F,cԳ;GfSh=oξ+jGp~G?CIO}jWzi&+JO֯1Ŏi/WZ^ڶ=~ڒe}+Ӱv)Z;ұx>n?KgӍWM۴m1x$?ݟKZUM@q?'O}?~ڽϣߣ/IO/R}lV7FA#oy {41?gnYo[IOTu./mߏ]8r75YI%?8}jԾceĜl=7־r?fRᾷջzoGOF/޼S6>e~Й3ӤE+Zp{mG'ǯ6,7gOGi/J{7zR8=>LVՕ}tn {[6ݪ_z?k~NޞIO=kY7չS[_lkwG  %?PK!  xl/styles.xmlX͎FG; c EHf"务[Ƃ(r܇-mR#U`0V33bꧻxU-J3 .2r?=Di*ijse YZ͵eɚq}UnNVVE`ZzS8Qȭ1cUBYVJUI*0a>qig2.x;D#) [ jjgK |N??9 :jJ ܙ Unjuڍ[e^VB4FT!e5Q(/1Md0~ol4.^U1%SV)솮mܒ泜g rYUeTY qC8 =^t2CnPߧ {#a~|b% ..yD*)oj'20mCƙ.Fپ,měM UM$ KoYT*> ?U6t^: 6QrI җ5g- .%5YJ9trvÿn由܊\ ٟZ1D^u.\L4ݱ56/ᎈmw *-YC m~jTq{0.dw< jl 3X9'jB1HsGq -tZ4g1QPdJrZE ?e|xhO}q&TyhaC D!ZmĬ˜}~: ZS}C T9}v>mkqBx}:?hG}=CacLKGݨvPr` 1?-FrZ~d%75_c[n{GS&wGpq{lߠ[=MK!L]%W2Oߵ߽m%uCs>#-0bY{{Ƶ(r=xt4w֛,`w?,>w(x.dwnC*\*`J _ePK!e,;xl/drawings/vmlDrawing1.vmlT]0|G?X^(Mz E: XdӚ:v7~=k;wV{gg'NZ1jmsZb pZ8S0uGsHSU,.ȁgTgj2 (3-mJ!㲬Esa@c6FJ5deg1g1tr>JouX9# DѮP Y#pzv>V}\pVIU9f>#Hp6T֭.$ueŔъف,q͒>}ښ/aU,XSG1xB "j^)i=ł7L.PJhV,stVmqMGG]B!I&xbeE)Ac0?#C0ZCީ9\L94p !,ƲGzS<;?n]7.MF4ܫ.jTAg3E]I=PPa<4y>̮7qp[O8.K&Ab~~i:LV'cqۧ`QBk5[BrܓtHx?PK!jxBExl/sharedStrings.xmlXMoIG(aב Ș{h1dsr o꽪UB-F^M@pdys9I9+d'|y_&)|X0Nny~#DPev3ŤN 7下ﭴ^~2x9b OoNB/{5~cwx)kUq,~=ycKCn0AsaʇI?#m&F?4^YV,TM8Z˻¡Pw*u&/<]*hM;x*d XX/ǜˀ(W ,d"MK( @MRGYb4Bs{R0< zCAjQFK?𹓖Tp>\- ?|9L#Rxlbxl{[iQBd U΅ HŇ N4F2J/)}0T٦PnH;6] ͦh ߽&hDzW獹%{|J+^跍@*[ag+T66NQKB;v v3ϳ :,$ch_E KQF-f٘W"H˅96]8ďO EXPȒ0sJ@".p*qQJ˼3zصȍRDEw]b 8F9Sz1Nq cXU kRf8ȉ8=5% 8w=Lf/wg3biCݑPX8aE;_LЇQl}D;QPNJ2 'i?Rp7AOY\-z=X?rDF{GoHb8ǔd%1%(n*ݣtIK:Ñ ro#XNjk-W:zپQwd(Cl#^PT8(˜CJ|]ht EqF PK!K[q"xl/externalLinks/externalLink1.xmlkA2nf C Yag'I-i RRx)ARҘH'I gigvRO}ޏo6]GӀپg"@ʾe{&zE1N<8GMEZ*޾UqVm&xDUky]g*u [kԓ'?p fZ@ŪrэD"Pq|2]~K= CUCZ-+ugNq}"-DZz''w! $vNr$1qQŁؽ)M =sn3{w y,QhØdQ;Wߢy(^_?ư!}5/NS>z cVIC?V ?ŸQ UԤ`SL'|WJ!@zE׋=AaN袔%F C`8 ] f$*!fT2p1G1 I(s؇zqo \P)#*$3$s02U;D&ФҒ<3L̪TG0;(5$RZZCJsrRAPK!ޫE-xl/externalLinks/_rels/externalLink1.xml.relsAKAAaoV20 23v.EPحcjߢ.~b7#*dș Fe.B[h HPJT d&&-(lH71P'm.bt+|H!>,9^`UBh}6oCV{S8`$KTDTYkr%Qt 7w(M 8g,~=+pڤ6NǏp2Z^7\qJڨuQ?f/uɹ#{o,$nPK!bX`'xl/printerSettings/printerSettings1.binJe`(bdc(aPA@ΐ!dRj42\|`da`fdm‘d3D0120If ϑ:#0>AaN袔%F C`8 ] f$*!fT2p1G1 I(s؇zqo \P)#*$3$s02U;D&ФҒ<3L̪TG0;(5$RZZCJsrRAPK!4odocProps/core.xml (QO0MK߷cҌ(⃒ִXܺ- ]sDsۦcYxP: DH^\ngmiä`E%aN?4}cb2| qݗ7g,K\wP$v%_D42p`[İ<-4*Hv>6 )hz׀C> {WAu^ hM+;PKQzT4 ȷ2.kB`tטkEAkлh sǩXkeaI'Z T7͕21I>f|yYI%8 ֪hCچmH}}Cp҇Z[DZy#}` aJX>+_=À;,}hO1 nuw}N 8iQ*BEqOqKs3Yn@u[Va|o&i3PK!CodocProps/custom.xml ([XMTlnrhuP@Q/KA3}Lua&xO,O0 @ ɣ v0! P恛L K7`ne! eȏAoryp$WK"+TlX|azu_%u,-oaV&!13IbfHlHt(7$xLD/lP}AfA2z-T镣zD?9-T^ dG{^543تtag/cO:/d:ފHE:sr:$Ts)j5n.t!u| jؖF+f/9Q(*/Y++6QܝR ={vQ/LrڽL(Ռ@WIEDP{팮3+7mEňVG {ZM]mA ;b'?OUsHӁ]ӂ8OVfpI~!ۭX_Ku>k;%=#ɞv&$h W7X]TC; u}Z- H>Fz Է{d"|Kx0S^N=Oλ\)[Es*!CbR_{'ܝjy #㑹B+3!C:zJ{Lxι`)ΏjfB9f2Gpοēc7;raC _+}#GIQ@0bK7PK-!#3[Content_Types].xmlPK-!^e _rels/.relsPK-!rxl/_rels/workbook.xml.relsPK-!)xl/workbook.xmlPK-!'1TQ xl/theme/theme1.xmlPK-!5_ ? xl/worksheets/sheet2.xmlPK-! #xl/worksheets/_rels/sheet1.xml.relsPK-!%&xl/drawings/_rels/vmlDrawing1.vml.relsPK-!#xl/worksheets/_rels/sheet2.xml.relsPK-!%& xl/drawings/_rels/vmlDrawing2.vml.relsPK-!~T[+~ 5,!xl/worksheets/sheet1.xmlPK-!i=+xl/drawings/vmlDrawing2.vmlPK- !''.xl/media/image1.jpegPK-!  hxl/styles.xmlPK-!e,;xl/drawings/vmlDrawing1.vmlPK-!jxBExl/sharedStrings.xmlPK-!K[q"xl/externalLinks/externalLink1.xmlPK-!bX`'xl/printerSettings/printerSettings2.binPK-!ޫE- xl/externalLinks/_rels/externalLink1.xml.relsPK-!bX`'xl/printerSettings/printerSettings1.binPK-!4odocProps/core.xmlPK-!-ѥ8docProps/app.xmlPK-!Co|docProps/custom.xmlPKzUVP-Tools-2.2.0.316/install000066400000000000000000003112721314037446600152020ustar00rootroot00000000000000#!/bin/bash ############################################################################### # History: 2012-5-5: # install kernel modules and services for pvdriver ############################################################################### #cut don't change the next two lines. when build file "install" for each OS distributation, #cut this line will be changed by the command " sed "/^DESCRIPTION=/s/.*/&${dscrpt} " #cut means this line will be cut when build the install file DESCRIPTION= shopt -u expand_aliases ### ubuntu14.04.3 and debian8.2 and Fedora22/23 no procfs NO_PROCFS="false" if [ -n "`uname -r | grep "4.2.3-300\|3.16.0-4-amd64\|3.19.0-25-generic\|4.0.4-301"`" -a -e "/dev/xen/xenbus" ] then NO_PROCFS="true" fi ### debug switch(log/all) # log: output log to stdout # all: output log and execution detail to stdout DEBUG_SCRIPT='' ############################################################################### ### installation configuration ############################################################################### PACKAGE_NAME='PV Driver for Linux' PACKAGE='pvdriver' INSTALL_PATH="/etc/.uvp-monitor" WORKDIR="/tmp/$PACKAGE-$INSTALL_VERSION-$$" ### package INSTALLER_DIR=$(cd "$(dirname "$0")" && pwd) INSTALLER=$(basename "$0") UNINSTALLER_DIR="$INSTALL_PATH" UNINSTALLER="uninstall" UPGRADE_VERSION="/etc/.tools_upgrade/pvdriver_version.ini" LAST_VERSION="/var/run/tools_upgrade/pvdriver_version.ini" ### 0;chang the menu.lst; kernel_param_flag="0" ## free boot disk size need 25M BOOT_FREE_SPACE=25600 ## the common compression ratio of initrd is 2.5, we set the ratio to 3 INITRD_RATIO=3 ############################################################################### ### hide suspend/hibernate ############################################################################### SUSPEND_PATH="/usr/sbin/pm-suspend" HIBERNATE_PATH="/usr/sbin/pm-hibernate" ############################################################################### # environment ############################################################################### PATH="${INSTALLER_DIR}/bin:/bin:/sbin:/usr/bin:/usr/sbin:$PATH" ; export PATH BALLOONMOD='' HCALLMOD='' VMDQMOD='' SCSIMOD='' ############################################################################### ### error definition ############################################################################### ERR_OK=0 ERR_NG=1 UVPMONITOR_OK=3 ERR_CANCEL=11 ERR_INITRD=12 ERR_ABORT=12 ERR_VERSION=14 ERR_TIMEOUT=16 ERR_FILE_NOEXIST=20 ############################################################################### ### debug ############################################################################### ### Info='eval 2>&1 logger "[pvdriver-$INSTALLER:$FUNCNAME:$LINENO]"' if [ -n "$DEBUG_SCRIPT" -a -z "$DEBUG_INSTALL" ] then export DEBUG_INSTALL=$DEBUG_SCRIPT fi if [ -n "$DEBUG_INSTALL" ] then Info='eval 2>&1 echo -e "\033[32;49;1m[$INSTALLER:$FUNCNAME:$LINENO]\033[39;49;0m"' WAIT=" [waiting] ; read" test "$DEBUG_INSTALL" = "all" && set -x fi ############################################################################### # Common functions ############################################################################### ## remove line from a file remove_block_line_by_line() { file_path=$1 line_names=$2 eval line_name_arr=($(echo \"${line_names//:/\" \"}\")) for line_name in "${line_name_arr[@]}" do exist_line=`egrep -wn "${line_name}" "${file_path}" | head -1 | cut -d":" -f 1` if [ -n "$exist_line" ] then sed -i "${exist_line}d" "${file_path}" > /dev/null 2>&1 fi done } ############################################################################### # Common functions ############################################################################### ## kill all processes kill_proc() { local proc_name=$1 killall -TERM $proc_name > /dev/null 2>&1 for ((i=0; i<3; ++i)) do if [ -n "$(pidof $proc_name 2>/dev/null)" ] then killall -TERM $proc_name > /dev/null 2>&1 sleep 1 else break fi done if [ -n "$(pidof $proc_name 2>/dev/null)" ] then warn "kill process $proc_name failed" return 1 else return 0 fi } ## compare version # version1 = version1 : return 0 # version1 < version1 : return 1 # version1 > version1 : return 2 compare_version() { local version1="$1" local version2="$2" for ((num=1; num<=100; num++)) do num1=$(echo -n "$version1" | sed 's;[-.]; ;g' | awk "{print \$$num}" | sed 's;[^0-9];;g') num2=$(echo -n "$version2" | sed 's;[-.]; ;g' | awk "{print \$$num}" | sed 's;[^0-9];;g') if [ -z "$num1" -a -z "$num2" ] then return 0 elif [ -z "$num1" ] || ([ -n "$num2" ] && [ $num1 -lt $num2 ]) then return 1 elif [ -z "$num2" ] || ([ -n "$num1" ] && [ $num1 -gt $num2 ]) then return 2 else continue fi done } ## get binary path none_binary() { return $ERR_FILE_NOEXIST } which_binary() { local binary=$1; local bin_path='' if [ -z "${binary%%/*}" ] then if [ -f "$binary" -a -x "$binary" ] then bin_path="$binary" fi else eval path_array=($(echo \"${PATH//:/\" \"}\")) for path in "${path_array[@]}" do if [ -f "$path/$binary" -a -x "$path/$binary" ] then bin_path="$path/$binary" break fi done fi if [ -n "$bin_path" ] then echo "$bin_path" return 0 else echo 'none_binary' return 1 fi } uniform_string() { local input_string=$1 local output_string='' for word in $input_string do local word_regex=$(echo "${word//\//\\/}" | sed 's;\.;\\.;g') if [ -n "$(echo "$output_string" | sed -n '/\(^\|\s\)'"${word_regex}"'\(\s\|$\)/p')" ] then continue fi output_string="$output_string $word" done echo "$output_string" } ############################################################################### # Config File Editor ############################################################################### ### modification operations MARKER_BEGIN='###pvdriver' MARKER_END='###pvdriver' MARKER_COMMENT='###pvdriver#' MARKER_WARNING=' do not change this comment' check_block() { object_file=$1 if [ "$INSTALL_ACTION" = "uninstall" ] then test ! -f "$INSTALL_PATH/version.ini" && return 0 fi test ! -f "$object_file" && return 0 if [ -z "`cat $object_file | grep -w $MARKER_BEGIN`" -a -n "`cat $object_file | grep -w $MARKER_END`" ] then comment_line_num=`grep -wn "${MARKER_END}" "${object_file}" | head -1 | cut -d":" -f 1` comment_line_num=$((comment_line_num-1)) sed -i "$comment_line_num i${MARKER_BEGIN}${MARKER_WARNING}" "$object_file" fi if [ -n "`cat $object_file | grep -w $MARKER_BEGIN`" -a -z "`cat $object_file | grep -w $MARKER_END`" ] then comment_line_num=`grep -wn "${MARKER_BEGIN}" "${object_file}" | head -1 | cut -d":" -f 1` comment_line_num=$((comment_line_num+1)) sed -i "$comment_line_num a${MARKER_END}${MARKER_WARNING}" "$object_file" fi } insert_block() { object_file=$1 anchor_address=$2 block_text=$3 test ! -f "$object_file" && return 2 sed_cmd='a' ### if [ -z "${anchor_address}" ] then abort "insert_block anchor_address=${anchor_address}." elif [ "${anchor_address}" = '0' ] then anchor_address=1 sed_cmd='i' elif [ -z "$(echo "${anchor_address}" | sed -n '/^\s*\([0-9]\+\|\$\)\s*$/p')" ] then anchor_address=$(echo "${anchor_address}" | sed 's/\//\\\//g') anchor_address="/${anchor_address}/" fi ### if [ -s "$object_file" ] then sed -i "${anchor_address}${sed_cmd}\ ${MARKER_BEGIN}${MARKER_WARNING}\n\ $(echo "$block_text" | sed ':a;N;s/\n/\\n/;ta')\n\ ${MARKER_END}${MARKER_WARNING}" "$object_file" else cat > "$object_file" << EOF ${MARKER_BEGIN}${MARKER_WARNING} $block_text ${MARKER_END}${MARKER_WARNING} EOF fi ret=$? if [ $ret = 0 ] then return $ret else abort "insert_block $object_file with ${anchor_address} failed." fi } remove_block() { object_file=$1 test ! -f "$object_file" && return 2 while :; do marker_begin_line=$(grep -n "${MARKER_BEGIN}.*" "${object_file}" | sed -n '1p' | awk -F ':' '{print $1}') marker_end_line=$(grep -n "${MARKER_END}.*" "${object_file}" | sed -n '1p' | awk -F ':' '{print $1}') [ -z "${marker_begin_line}" -a -z "${marker_end_line}" ] && break if [ -z "${marker_begin_line}" -o -z "${marker_end_line}" ] || [ ${marker_begin_line} -gt ${marker_end_line} ] then abort "remove_block $object_file format failed." fi if ! sed -i "${marker_begin_line},${marker_end_line}d" "${object_file}" then abort "remove_block $object_file failed." fi done } insert_word() { object_file=$1 anchor_expreg=$2 word_text=$3 test ! -f "$object_file" && return 2 if [ -z "${anchor_expreg}" ] then abort "insert_word anchor_expreg=${anchor_expreg}." fi ### anchor_expreg=$(echo "${anchor_expreg}" | sed 's/\//\\\//g') if ! sed -i "s;\(${anchor_expreg}\);\1 ${word_text};g" "$object_file" then abort "insert_word $object_file at ${anchor_expreg} failed." fi } remove_word() { object_file=$1 anchor_expreg=$2 word_text=$3 test ! -f "$object_file" && return 2 ### anchor_expreg=$(echo "${anchor_expreg}" | sed 's/\//\\\//g') if ! sed -i "s;\(${anchor_expreg}\).* ${word_text};\1;g" "$object_file" then abort "remove_word $object_file at ${anchor_expreg} failed." fi } replace_word() { object_file=$1 anchor_expreg=$2 word_text1=$3 word_text2=$4 test ! -f "$object_file" && return 2 ### if [ -z "${anchor_expreg}" ] then abort "replace_word anchor_expreg=${anchor_expreg}." elif [ -z "$(echo "${anchor_expreg}" | sed -n '/^\s*\([0-9]\+\|\$\)\(,\([0-9]\+\|\$\)\)\{0,1\}\s*$/p')" ] then anchor_expreg=$(echo "${anchor_expreg}" | sed 's/\//\\\//g') anchor_expreg="/${anchor_expreg}/" fi ### anchor_expreg=$(echo "${anchor_expreg}" | sed 's/\//\\\//g') if ! sed -i "${anchor_expreg}s;${word_text1};${word_text2};g" "$object_file" then abort "replace_word $object_file at ${anchor_expreg} failed." fi } comment_on_line() { object_file=$1 anchor_address=$2 anchor_expreg=$3 test ! -f "$object_file" && return 2 ### if [ -z "${anchor_address}" ] then abort "comment_on_line anchor_address=${anchor_address}." elif [ -z "$(echo "${anchor_address}" | sed -n '/^\s*\([0-9]\+\|\$\)\(,\([0-9]\+\|\$\)\)\{0,1\}\s*$/p')" ] then anchor_address=$(echo "${anchor_address}" | sed 's/\//\\\//g') anchor_address="/${anchor_address}/" fi ### anchor_expreg=$(echo "${anchor_expreg}" | sed 's/\//\\\//g') if ! sed -i "${anchor_address}s;\(${anchor_expreg}.*\);${MARKER_COMMENT}\1;g" "$object_file" then abort "comment_on_line $object_file at ${anchor_expreg} failed." fi } comment_off_line() { object_file=$1 test ! -f "$object_file" && return 2 ### if ! sed -i '1,$'"s;\(^\s*\)${MARKER_COMMENT}\(.*\);\1\2;g" "$object_file" then abort "comment_off_line $object_file at ${anchor_expreg} failed." fi } ############################################################################### # check version of pvdriver and linux ############################################################################### SYS_TYPE="" #debian,gentoo,redhat,suse,lfs,pardus,bsd INIT_TYPE="" #sysv,lfs,pardus,bsd KERN_RELEASE="" CPU_ARCH="" #x86,x86_64 LIB_PATH="" check_root() { # check root privilege if [ $(id -u) = 0 ] then return 0 else return 1 fi } # get type of unix-like system check_dist() { local issue # singleton execution if [ -n "$SYS_TYPE" ] then $Info "check_dist already called before" return $ERR_OK fi ### determine linux distribution issue='/etc/issue' if [ -e '/etc/debian_version' -o -n "$(grep -i 'debian' $issue)" ] then SYS_TYPE='debian' KERN_RELEASE="$(uname -r)" CPU_ARCH="$(uname -m)" INIT_TYPE='sysv' PIDPATH='/var/run' RC_SYSINIT='/etc/init.d/rc.local' elif [ -e '/etc/redhat-release' -o -n "$(grep -i 'red *hat' $issue)" -o -n "$(grep -i 'GreatTurbo' $issue)" ] then SYS_TYPE='redhat' KERN_RELEASE="$(uname -r)" CPU_ARCH="$(uname -m)" INIT_TYPE='sysv' PIDPATH='/var/lock/subsys' if [ -n "`uname -r | grep "3.10.0"`" ];then RC_SYSINIT='/etc/rc.d/rc.local' else RC_SYSINIT='/etc/rc.d/rc.sysinit' fi elif [ -e '/etc/SuSE-release' -o -n "$(grep -i 'suse\|s\.u\.s\.e' $issue)" ] then SYS_TYPE='suse' KERN_RELEASE="$(uname -r)" CPU_ARCH="$(uname -m)" INIT_TYPE='sysv' PIDPATH='/var/run' RC_SYSINIT='/etc/init.d/boot.local' elif [ -e '/etc/gentoo-release' ] then SYS_TYPE='gentoo' KERN_RELEASE="$(uname -r)" CPU_ARCH="$(uname -m)" INIT_TYPE='sysv' PIDPATH='/var/run' RC_SYSINIT='' else $Info "cannot determine linux distribution" return $ERR_VERSION fi ### check architecture LIB_PATH="/usr/lib" case "$CPU_ARCH" in i[3456789]86|x86) CPU_ARCH="x86" ;; x86_64|amd64) CPU_ARCH="x86_64" test -d "/usr/lib64" && LIB_PATH="/usr/lib64" ;; *) $Info "cannot determine processor type" return $ERR_VERSION esac return $ERR_OK } DEP_CMDS_LIST='' DEP_PKGS_LIST='' check_tools() { local do_query=$1 check_dist ### DEP_CMDS_LIST="basename dirname id mv rm rmdir uname lsmod rmmod depmod modprobe pidof xargs find tar sed grep awk logger" case $SYS_TYPE in suse) pkg_query='rpm -qf' initrd_tool='mkinitrd' ;; redhat) pkg_query='rpm -qf' if [ -f "/etc/dracut.conf" ] then initrd_tool='dracut' else initrd_tool='mkinitrd' fi ;; debian) pkg_query='dpkg -S' initrd_tool='update-initramfs' ;; esac DEP_CMDS_LIST="$DEP_CMDS_LIST $initrd_tool" ### DEP_PKGS_LIST='' if [ "$do_query" = 'noquery' -o "$(which_binary $pkg_query)" = 'none_binary' ] then pkg_query=':' fi bin_need='' for bin in $DEP_CMDS_LIST do bin_path=$(which_binary $bin) if [ "$bin_path" = 'none_binary' ] then bin_need="$bin_need $bin" pkg_info="no_package_found" bin_path="$bin" else pkg_info="$($pkg_query $bin_path)" fi case "$pkg_query" in rpm*) DEP_PKGS_LIST="$DEP_PKGS_LIST $pkg_info: $bin_path" ;; dpkg*) DEP_PKGS_LIST="$DEP_PKGS_LIST $pkg_info " ;; esac done ### DEP_CMDS_LIST="$(echo "$DEP_CMDS_LIST" | sed '/^$/d')" DEP_PKGS_LIST="$(echo "$DEP_PKGS_LIST" | sed '/^$/d')" if [ -z "$bin_need" ] then return 0 else return 1 fi } InitSystemInfo() { if ! check_root then $Info "please run this script with root privilege." echo "please run this script with root privilege." exit 1 fi if ! check_dist then $Info "unsupported linux distribution." echo "unsupported linux distribution." exit 1 fi if ! (check_tools 'noquery') then $Info "check depending tools failed." echo "check depending tools failed." exit 1 fi } InitPackageInfo() { ### get cpu bus width case "$CPU_ARCH" in x86) cpu_arch_width='32' ;; x86_64) cpu_arch_width='64' ;; *) cpu_arch_width='' esac ### source files if [ "$INSTALLER" = 'install' ] then if [ "$(which_binary get_uvp_kernel_modules)" != 'none_binary' ] then LCL_UVP_MODULES_PATH=$(get_uvp_kernel_modules "$SYS_TYPE" "$KERN_RELEASE" "$(uname -m)" "$CPU_ARCH") LCL_UVP_MONITOR="${INSTALLER_DIR}/usr/bin${cpu_arch_width}/uvp-monitor" LCL_UVP_ARPING="${INSTALLER_DIR}/usr/bin${cpu_arch_width}/arping" LCL_UVP_NDSEND="${INSTALLER_DIR}/usr/bin${cpu_arch_width}/ndsend" if [ "$SYS_TYPE" = "gentoo" ] then cp -f ${INSTALLER_DIR}/etc/uvp-monitor-gentoo ${INSTALLER_DIR}/etc/uvp-monitor 2>/dev/null 1>/dev/null fi LCL_UVP_MONITOR_INIT="${INSTALLER_DIR}/etc/uvp-monitor" LCL_XENVNET_ARP="${INSTALLER_DIR}/etc/xenvnet-arp" LCL_UVP_KERUP="${INSTALLER_DIR}/bin/CheckKernelUpdate.sh" LCL_UVP_FEATURE="${INSTALLER_DIR}/bin/GuestOSFeature" LCL_UVP_CURRENTOS="${INSTALLER_DIR}/CurrentOS" LCL_UVP_MEMONLINE="${INSTALLER_DIR}/bin/mem_online.sh" LCL_UVP_SWAP="${INSTALLER_DIR}/bin/modify_swappiness.sh" LCL_LIB_XENSTORE="${INSTALLER_DIR}/usr/lib${cpu_arch_width}/libxenstore.so" LCL_XEN_STORAGE_ID="${INSTALLER_DIR}/config/udev/xen_id" LCL_XEN_STORAGE_RULES="${INSTALLER_DIR}/config/udev/60-xen-storage.rules" else LCL_UVP_MODULES_PATH="./${KERN_RELEASE}/updates/pvdriver" LCL_UVP_MONITOR='./uvp-monitor' LCL_UVP_MONITOR_INIT='./uvp-monitor.init' LCL_LIB_XENSTORE="./libxenstore.so" LCL_XEN_STORAGE_ID='./xen_id' LCL_XEN_STORAGE_RULES="./extra/60-xen-storage.rules" local dist=$(echo $DESCRIPTION | awk -F":" '{print $1}') case "$dist" in suse*) dist='suse' ;; redhat*|fedora*|neoshine*) dist='redhat' ;; debian*|ubuntu*) dist='debian' ;; esac local arch=$(echo $DESCRIPTION | awk -F":" '{print $2}') case "$arch" in i[3456789]86|x86) arch="x86" ;; x86_64|amd64) arch="x86_64" ;; esac if [ "$dist" != "$SYS_TYPE" -o "$arch" != "$CPU_ARCH" ] then $Info "this package can not be installed in ${SYS_TYPE}-${CPU_ARCH}." echo "this package can not be installed in ${SYS_TYPE}-${CPU_ARCH}." ; exit 1 fi fi # check support if [ ! -d "$LCL_UVP_MODULES_PATH" ] then $Info "unsupported linux version." echo "unsupported linux version." ; exit 1 fi else LCL_UVP_MODULES_PATH='' LCL_UVP_MONITOR='' LCL_MOUSECONFIG='' LCL_UVP_MONITOR_INIT='' LCL_LIB_XENSTORE='' LCL_XEN_STORAGE_ID='' LCL_XEN_STORAGE_RULES='' fi ### system files UVP_MODULES_PATH="/lib/modules/$KERN_RELEASE/updates/pvdriver" UVP_MODULES_PATH_BACKUP="/lib/modules/${KERN_RELEASE}/uvp_mods_backup.tar" UVP_MONITOR='/usr/bin/uvp-monitor' UVP_ARPING='/etc/.uvp-monitor/arping' UVP_NDSEND='/etc/.uvp-monitor/ndsend' UVP_KERUP='/etc/.uvp-monitor/CheckKernelUpdate.sh' UVP_MEMONLINE='/etc/.uvp-monitor/mem_online.sh' UVP_SWAP='/etc/.uvp-monitor/modify_swappiness.sh' UVP_FEATURE='/etc/.uvp-monitor/GuestOSFeature' UVP_CURRENTOS='/etc/.uvp-monitor/CurrentOS' UVP_MONITOR_INIT='/etc/init.d/uvp-monitor' XENVNET_ARP='/etc/init.d/xenvnet-arp' LIB_XENSTORE="${LIB_PATH}/libxenstore.so.3.0" if [ -f "/lib/udev/ata_id" ] then XEN_STORAGE_ID="/lib/udev/xen_id" else XEN_STORAGE_ID="/sbin/xen_id" fi if [ -d "/lib/udev/rules.d" ] then XEN_STORAGE_RULES='/lib/udev/rules.d/60-xen-storage.rules' elif [ -d "/etc/udev/rules.d" ] then XEN_STORAGE_RULES='/etc/udev/rules.d/60-xen-storage.rules' fi # else MODPROBE_XEN_PVDRIVER='/etc/modprobe.d/uvp-pvdriver.conf' } ############################################################################### # backup and restore ############################################################################### lock_install() { $Info "lock_install" LOCK_FILE="${PIDPATH}/pvdriver-install.pid" if [ ! -f "$LOCK_FILE" ] then echo $$ > $LOCK_FILE return $ERR_OK else echo "another pvdriver is installing, please wait for a moment." return $ERR_NG fi } unlock_install() { $Info "unlock_install" LOCK_FILE="${PIDPATH}/pvdriver-install.pid" if [ "$$" = "$(cat $LOCK_FILE)" ] then rm -f "$LOCK_FILE" return $ERR_OK else echo "another pvdriver is installing, please wait." return $ERR_NG fi } ############################################################################### ### exception process ############################################################################### BACKUP_LEVEL_FILE="/etc/.pvdriver_backup_level" unset INSTALLED_FILE_LIST unset MODIFIED_FILE_LIST ### init install level if [ -z "$CUR_INSTALL_LEVEL" ] then export CUR_INSTALL_LEVEL=1 else export CUR_INSTALL_LEVEL=$((CUR_INSTALL_LEVEL+1)) fi # if [ -z "$BAK_INSTALL_LEVEL" -a -f "$BACKUP_LEVEL_FILE" ] then export BAK_INSTALL_LEVEL=$(cat $BACKUP_LEVEL_FILE) fi if [ -z "$BAK_INSTALL_LEVEL" ] || [ $BAK_INSTALL_LEVEL -lt 0 ] then export BAK_INSTALL_LEVEL=0 fi # init_backup() { ### init unset MODPROBE_XEN_PVDRIVER unset MKINITRD_XEN_PVDRIVER ### MODIFIED_FILE_LIST MODIFIED_FILE_LIST=$(readlink -f '/etc/fstab') XORG_CONF=$(readlink -f '/etc/X11/xorg.conf') ### grub config if [ -f '/boot/grub/grub.conf' ] then MENULIST=$(readlink -f "/boot/grub/grub.conf") MENULIST_CONFIG='' elif [ -f '/boot/grub/menu.lst' ] then MENULIST=$(readlink -f '/boot/grub/menu.lst') MENULIST_CONFIG='' elif [ -f '/boot/grub/grub.cfg' ] then MENULIST=$(readlink -f '/boot/grub/grub.cfg') MENULIST_CONFIG=$(readlink -f '/etc/default/grub') elif [ -f '/boot/grub2/grub.cfg' ] then MENULIST=$(readlink -f '/boot/grub2/grub.cfg') MENULIST_CONFIG=$(readlink -f '/etc/default/grub') elif [ -f '/boot/burg/burg.cfg' ] then MENULIST=$(readlink -f '/boot/burg/burg.cfg') MENULIST_CONFIG=$(readlink -f '/etc/default/burg') elif [ -f '/etc/elilo.conf' ] then UEFI_FLAG="yes" MENULIST=$(readlink -f '/etc/elilo.conf') MENULIST_CONFIG='' elif [ -f '/boot/efi/EFI/redhat/grub.conf' ] then MENULIST=$(readlink -f '/boot/efi/EFI/redhat/grub.conf') MENULIST_CONFIG='' fi ## mkinitrd INITRD_FILE='' if [ -f "/etc/initramfs-tools/modules" ] then MKINITRD=$(which_binary update-initramfs) MKINITRD_CONFIG=$(readlink -f "/etc/initramfs-tools/modules") MKINITRD_SETUPUDEV=$(readlink -f "/usr/share/initramfs-tools/hooks/udev") INITRD_FILE="/boot/initrd.img-${KERN_RELEASE}" elif [ "$(which_binary dracut)" != 'none_binary' ] && [ -z "`echo "${KERN_RELEASE}" | grep "3.16.6-2"`" ] then MKINITRD=$(which_binary dracut) MKINITRD_CONFIG=$(readlink -f "/etc/dracut.conf") MKINITRD_SETUPUDEV=$(readlink -f "/usr/share/dracut/modules.d/95udev-rules/install") INITRD_FILE="/boot/initramfs-${KERN_RELEASE}.img" elif [ "$(which_binary mkinitrd)" != 'none_binary' ] then MKINITRD=$(which_binary mkinitrd) if [ -z "$(mkinitrd --help 2>&1 | sed -n '/--with=/p')" ] then MKINITRD_CONFIG=$(readlink -f "/etc/sysconfig/kernel") MKINITRD_SETUPUDEV=$(readlink -f "/lib/mkinitrd/scripts/setup-udev.sh") INITRD_FILE="/boot/initrd-${KERN_RELEASE}" else if [ -n "$(sed -n '/\/etc\/sysconfig\/mkinitrd/p' /sbin/mkinitrd)" ] then if [ -d "/etc/sysconfig/mkinitrd" ] then MKINITRD_XEN_PVDRIVER="/etc/sysconfig/mkinitrd/uvp-pvdriver.conf" else touch "/etc/sysconfig/mkinitrd" MKINITRD_CONFIG=$(readlink -f "/etc/sysconfig/mkinitrd") fi else MKINITRD_CONFIG=$(readlink -f "/etc/udev/udev.conf") fi MKINITRD_SETUPUDEV='' INITRD_FILE="/boot/initrd-${KERN_RELEASE}.img" fi else return 1 fi if [ -d "/etc/modprobe.d" ] then MODPROBE_XEN_PVDRIVER='/etc/modprobe.d/uvp-pvdriver.conf' elif [ -f '/etc/modprobe.conf' ] then MODPROBE_CONFIG=$(readlink -f '/etc/modprobe.conf') else return 1 fi MODPROBE_UNSUPPORTED_MODULES=$(readlink -f '/etc/modprobe.d/unsupported-modules') NET_GENERATOR_RULES='' if [ -f '/lib/udev/rules.d/75-persistent-net-generator.rules' ] then NET_GENERATOR_RULES=$(readlink -f '/lib/udev/rules.d/75-persistent-net-generator.rules') elif [ -f '/etc/udev/rules.d/75-persistent-net-generator.rules' ] then NET_GENERATOR_RULES=$(readlink -f '/etc/udev/rules.d/75-persistent-net-generator.rules') fi LVM_CONF=$(readlink -f '/etc/lvm/lvm.conf') UDEV_CONFIG=$(readlink -f "/etc/udev/udev.conf") MODIFIED_FILE_LIST="$XORG_CONF $MODIFIED_FILE_LIST $LVM_CONF $MKINITRD $MKINITRD_CONFIG $MKINITRD_SETUPUDEV $MENULIST $MENULIST_CONFIG $LVM_CONF $MODPROBE_CONFIG $NET_GENERATOR_RULES $MODPROBE_UNSUPPORTED_MODULES" ### INSTALLED_FILE_LIST INSTALLED_FILE_LIST="${UVP_MONITOR_INIT} ${UVP_MONITOR} ${LIB_XENSTORE} ${MOUSECONFIG_INIT} \ $UVP_MODULES_PATH $UVP_MODULES_PATH_BACKUP $XEN_STORAGE_ID $XEN_STORAGE_RULES $MODPROBE_XEN_PVDRIVER $MKINITRD_XEN_PVDRIVER" ### uniform words INSTALLED_FILE_LIST=$(uniform_string "$INSTALLED_FILE_LIST") MODIFIED_FILE_LIST=$(uniform_string "$MODIFIED_FILE_LIST") return 0 } backup_all_files() { ### backup pvdriver directory if [ -d "$INSTALL_PATH" -a $CUR_INSTALL_LEVEL = 1 ] then if ! cp -afp "$INSTALL_PATH" "${INSTALL_PATH}.bak" then abort "backup pvdriver $INSTALL_PATH failed" fi fi ### recursive backup $Info "CUR_INSTALL_LEVEL=$CUR_INSTALL_LEVEL, BAK_INSTALL_LEVEL=$BAK_INSTALL_LEVEL" # restore higher levels if [ $BAK_INSTALL_LEVEL -gt $CUR_INSTALL_LEVEL ] then restore_all_files # already backup elif [ $BAK_INSTALL_LEVEL = $CUR_INSTALL_LEVEL ] then return $ERR_OK # backup lower levels elif [ $BAK_INSTALL_LEVEL -lt $((CUR_INSTALL_LEVEL-1)) ] then abort "inner error." fi ### backup all files BAK_INSTALL_LEVEL=$((BAK_INSTALL_LEVEL+1)) $Info "Backup all files level=${BAK_INSTALL_LEVEL}." mkdir -p "$INSTALL_PATH" echo "$BAK_INSTALL_LEVEL" > $BACKUP_LEVEL_FILE for file in $INSTALLED_FILE_LIST $MODIFIED_FILE_LIST do if [ -f "$file" ] then rm -f "$(dirname $file)/.$(basename $file).$BAK_INSTALL_LEVEL.bak" if ! cp -fp "$file" "$(dirname $file)/.$(basename $file).$BAK_INSTALL_LEVEL.bak" then abort "backup file $file failed" fi elif [ -d "$file" ] then rm -fr "$(dirname $file)/.$(basename $file).$BAK_INSTALL_LEVEL.bak" if ! cp -afp "$file" "$(dirname $file)/.$(basename $file).$BAK_INSTALL_LEVEL.bak" then abort "backup directory $file failed" fi fi done return 0 } restore_all_files() { ### restore installation directory if [ $CUR_INSTALL_LEVEL = 1 ] then if [ "$INSTALL_ACTION" = "install" ] then rm -fr "${INSTALL_PATH}" "${INSTALL_PATH}.bak" elif [ -d "${INSTALL_PATH}.bak" ] then rm -fr "$INSTALL_PATH" mv -f "${INSTALL_PATH}.bak" "${INSTALL_PATH}" fi fi ### recursive restore # already restore if [ $BAK_INSTALL_LEVEL -lt $CUR_INSTALL_LEVEL ] then return 0 # restore higher levels elif [ $BAK_INSTALL_LEVEL -gt $CUR_INSTALL_LEVEL ] then # restore deeper level if ! $UNINSTALLER_DIR/$UNINSTALLER -r then warn "restore upper backup failed, backup level $CUR_INSTALL_LEVEL-$BAK_INSTALL_LEVEL is discarded." fi BAK_INSTALL_LEVEL=$CUR_INSTALL_LEVEL fi ### restore all files for file in $INSTALLED_FILE_LIST do if [ "$INSTALL_ACTION" = "install" -a -e "$UVP_MODULES_PATH_BACKUP" ] then if [ "$file" = "$UVP_MODULES_PATH_BACKUP" ] then tar -xPf "$UVP_MODULES_PATH_BACKUP" fi rm -fr "$file" elif [ -e "$(dirname $file)/.$(basename $file).$BAK_INSTALL_LEVEL.bak" ] then rm -fr "$file" mv -f "$(dirname $file)/.$(basename $file).$BAK_INSTALL_LEVEL.bak" "$file" if [ "$file" = "$UVP_MODULES_PATH_BACKUP" ] then tar -tPf "$file" | xargs rm -fr fi fi done for file in $MODIFIED_FILE_LIST do if [ -e "$(dirname $file)/.$(basename $file).$BAK_INSTALL_LEVEL.bak" ] then rm -fr "$file" mv -f "$(dirname $file)/.$(basename $file).$BAK_INSTALL_LEVEL.bak" "$file" fi done $Info "Restore all files level=${BAK_INSTALL_LEVEL}." BAK_INSTALL_LEVEL=$((BAK_INSTALL_LEVEL-1)) echo "$BAK_INSTALL_LEVEL" > $BACKUP_LEVEL_FILE ### decrease level if [ $CUR_INSTALL_LEVEL = 1 ] then if [ "$INSTALL_ACTION" = "install" ] then [ "$AUTO_UPGRADE" != 'true' ] && kill_proc "${UVP_MONITOR}" else if ! $UNINSTALLER_DIR/$UNINSTALLER -s then echo "restart service for pvdriver failed, please start it by yourself" fi fi rm -f $BACKUP_LEVEL_FILE else echo "$BAK_INSTALL_LEVEL" > $BACKUP_LEVEL_FILE fi return 0 } clean_all_files() { ### check level if [ $BAK_INSTALL_LEVEL -lt $CUR_INSTALL_LEVEL ] then return 0 fi ### iterative restore all files for ((level=3; level>=$CUR_INSTALL_LEVEL; level--)) do for file in $INSTALLED_FILE_LIST $MODIFIED_FILE_LIST do rm -fr "$(dirname $file)/.$(basename $file).$level.bak" done done $Info "Clean all files level=$CUR_INSTALL_LEVEL$([ $CUR_INSTALL_LEVEL -lt $BAK_INSTALL_LEVEL ] && echo -n -${BAK_INSTALL_LEVEL})." BAK_INSTALL_LEVEL=$((CUR_INSTALL_LEVEL-1)) echo "$BAK_INSTALL_LEVEL" > $BACKUP_LEVEL_FILE ### decrease level if [ $CUR_INSTALL_LEVEL = 1 ] then rm -f $BACKUP_LEVEL_FILE else echo "$BAK_INSTALL_LEVEL" > $BACKUP_LEVEL_FILE fi ### cleanup installation directory if [ $CUR_INSTALL_LEVEL = 1 ] then rm -fr "${INSTALL_PATH}.bak" fi return 0 } ############################################################################### # Exception Process Interface ############################################################################### # prompt installation warn() { $Info "call warn: $1" local message=$1 if [ -n "$message" ] then echo 1>&2 "warning: $message" fi return $ERR_OK } # abort installation abort() { trap "" INT TERM QUIT HUP $Info "call abort: $1" local message=$1 if [ -n "$message" ] then echo 1>&2 "error: $message" fi if restore_all_files then $Info "Restore all files success." else $Info "Restore all files failed." fi if [ $CUR_INSTALL_LEVEL = 1 ] then rmdir $INSTALL_PATH 1>/dev/null 2>&1 unlock_install fi rm -fr $WORKDIR sync exit $ERR_ABORT } # cancel signal process cancel() { trap "" INT TERM QUIT HUP $Info "call cancel: $1" if restore_all_files then $Info "Restore all files success." else $Info "Restore all files failed." fi if [ $CUR_INSTALL_LEVEL = 1 ] then rmdir $INSTALL_PATH 1>/dev/null 2>&1 unlock_install echo echo "Operation canceled." fi rm -fr $WORKDIR sync exit $ERR_CANCEL } ############################################################################### # PV ops kernels emit gratuitous ARPs on migrate when this is set ############################################################################### GetArpNotifySet() { # PV ops kernels do not emit gratuitous ARPs on migrate unless this is set sys_file=/proc/sys/net/ipv4/conf/all/arp_notify if [ -f ${sys_file} ] then return 0 fi return 1 } EnableArpNotifySetting() { sysctl_file=/etc/sysctl.conf if GetArpNotifySet then arp_notify_val=1 else arp_notify_val=0 fi if [ "${arp_notify_val}" -gt 0 ] ; then echo " enable arp_notify setting." >&2 else return 1 fi if [ ! -e "${sysctl_file}" ] ; then echo " Fail to enable arp_notify setting as no ${sysctl_file} present." >&2 return 1 fi ETHDEV=$(ls /proc/sys/net/ipv4/conf/) sed -i -e 's/\(^\s*net\.ipv4\.conf\.[^.]\+\.arp_notify\s*=\s*0\)/${MARKER_COMMENT}\n#\1/' ${sysctl_file} echo -e "${MARKER_BEGIN}${MARKER_WARNING}" >> ${sysctl_file} for dev in $ETHDEV do echo -e "net.ipv4.conf.${dev}.arp_notify = 1" >> ${sysctl_file} done echo -e "${MARKER_END}" >> ${sysctl_file} sysctl -p ${sysctl_file} >/dev/null 2>&1 return 0 } RestoreArpNotifySetting() { sysctl_file=/etc/sysctl.conf if [ ! -e "${sysctl_file}" ] ; then echo " Fail to restore arp_notify setting as no ${sysctl_file} present." >&2 return 1 fi sed -i '/###pvdriver/,/###pvdriver/d' ${sysctl_file} >/dev/null 2>&1 sysctl -p ${sysctl_file} >/dev/null 2>&1 rm -rf ${XENVNET_ARP} return 0; } ############################################################################### # install driver ############################################################################### #unzip initrd and change initrd InstallModules() { echo " install kernel modules." ### replace xen modules if [ ! -d "$LCL_UVP_MODULES_PATH" ] then abort "kernel $KERN_RELEASE is not supported" fi if [ "$kernel_param_flag" = "1" ] then mkdir -p "$UVP_MODULES_PATH" 2>/dev/null 1>/dev/null cp -Rf "$LCL_UVP_MODULES_PATH"/* "$UVP_MODULES_PATH" 2>/dev/null 1>/dev/null chmod -R 755 "$UVP_MODULES_PATH" 2>/dev/null 1>/dev/null else xen_mods_dir=$(dirname "$UVP_MODULES_PATH") mkdir -p "$xen_mods_dir" for item in $(find ${xen_mods_dir} -path "${xen_mods_dir}/.*" -prune -o -type f -name 'xen*.ko' -print | sed "s;\(${xen_mods_dir}/[^/]*\).*;\1;g" | sed '$!N; /^\(.*\)\n\1$/!P; D') do if ! (tar -rPf "$UVP_MODULES_PATH_BACKUP" "${item}" && rm -fr "${item}") then abort "backup kernel built-in modules ${item} failed." fi done rm -fr "${UVP_MODULES_PATH}" if ! (cp -Rf "$LCL_UVP_MODULES_PATH" "$UVP_MODULES_PATH" && chmod -R 755 "$UVP_MODULES_PATH") then abort "install $UVP_MODULES_PATH failed" fi fi if [ ! -f "${UVP_MODULES_PATH}/xen-balloon/xen-balloon.ko" ] then BALLOONMOD="#" fi if [ ! -f "${UVP_MODULES_PATH}/xen-hcall/xen-hcall.ko" ] then HCALLMOD="#" fi if [ ! -f "${UVP_MODULES_PATH}/xen-vmdq/xen-vmdq.ko" ] then VMDQMOD="#" fi if [ ! -f "${UVP_MODULES_PATH}/xen-scsi/xen-scsifront.ko" ] then SCSIMOD="#" fi chmod -R 544 $RC_SYSINIT >/dev/null 2>&1 sed -i "$ a\ ###pvdriver \n\ # Let load XENPV modules \n\ ${BALLOONMOD}modprobe xen-balloon >/dev/null 2>&1\n\ ${HCALLMOD}modprobe xen-hcall >/dev/null 2>&1\n\ ${VMDQMOD}modprobe xen-vmdq >/dev/null 2>&1\n\ ${SCSIMOD}modprobe xen-scsifront >/dev/null 2>&1\n\ ###pvdriver" $RC_SYSINIT >/dev/null 2>&1 } #unzip initrd.img and restore initrd UninstallModules() { echo " uninstall kernel modules." if [ "$kernel_param_flag" = "1" ] then true elif [ -f "$UVP_MODULES_PATH_BACKUP" ] then if ! (tar -xPf "$UVP_MODULES_PATH_BACKUP" && rm -fr "$UVP_MODULES_PATH_BACKUP") then abort "restore kernel built-in modules failed." fi fi rm -fr "$UVP_MODULES_PATH" sed -i '/###pvdriver/,/###pvdriver/d' $RC_SYSINIT >/dev/null 2>&1 } ############################################################################### ### service functions ############################################################################### # Install a init script add_init_script() { local srv_script=$1 local srv_name=$(basename "${srv_script}" | sed 's/\.init$//g') if [ "$INIT_TYPE" = "sysv" ] then if ! (cp -f "$srv_script" "/etc/init.d/$srv_name" && chmod 755 "/etc/init.d/$srv_name") then abort "install /etc/init.d/$srv_name failed" fi else $Info "inner error: INIT_TYPE=$INIT_TYPE" return $ERR_NG fi return $ERR_OK } # Remove a init script remove_init_script() { local srv_script=$1 local srv_name=$(basename "${srv_script}" | sed 's/\.init$//g') if [ "$INIT_TYPE" = "sysv" ] then rm -f "/etc/init.d/$srv_name" else $Info "inner error: INIT_TYPE=$INIT_TYPE" return $ERR_NG fi return $ERR_OK } # Add a service to a runlevel add_rc_config() { local srv_script=$1 local srv_name=$(basename "${srv_script}" | sed 's/\.init$//g') # Redhat-based systems if [ "$SYS_TYPE" = "redhat" ] then chkconfig --del $srv_name > /dev/null 2>&1 if chkconfig -v > /dev/null 2>&1 then if ! chkconfig --level 35 $srv_name on then $Info "enable $srv_name failed." return $ERR_NG fi else if ! chkconfig $srv_name 35 then $Info "enable $srv_name failed." return $ERR_NG fi fi # Suse-based systems elif [ "$SYS_TYPE" = "suse" ] then insserv -r $srv_name > /dev/null 2>&1 if ! insserv $srv_name > /dev/null 2>&1 then $Info "enable $srv_name failed." return $ERR_NG fi # Debian-based systems elif [ "$SYS_TYPE" = "debian" ] then # Debian does not support dependencies currently -- $2: start sequence number and argument, $3: stop sequence number update-rc.d -f $srv_name remove > /dev/null 2>&1 if ! update-rc.d $srv_name defaults $2 $3 > /dev/null 2>&1 then $Info "enable $srv_name failed." return $ERR_NG fi elif [ "$SYS_TYPE" = "gentoo" ] then # Debian does not support dependencies currently -- $2: start sequence number and argument, $3: stop sequence number rc-update del $srv_name default > /dev/null 2>&1 if ! rc-update add $srv_name default > /dev/null 2>&1 then $Info "enable $srv_name failed." return $ERR_NG fi else $Info "inner error: INIT_TYPE=$INIT_TYPE" return $ERR_NG fi return $ERR_OK } # Delete a service from a runlevel remove_rc_config() { local srv_script=$1 local srv_name=$(basename "${srv_script}" | sed 's/\.init$//g') # Redhat-based systems if [ "$SYS_TYPE" = "redhat" ] then if ! chkconfig --del $srv_name > /dev/null 2>&1 then $Info "disable $srv_name failed." warn "disable $srv_name failed." fi # Suse-based systems elif [ "$SYS_TYPE" = "suse" ] then if ! insserv -r $srv_name > /dev/null 2>&1 then $Info "disable $srv_name failed." warn "disable $srv_name failed." fi # Debian-based systems elif [ "$SYS_TYPE" = "debian" ] then if ! update-rc.d -f $srv_name remove > /dev/null 2>&1 then $Info "disable $srv_name failed." warn "disable $srv_name failed." fi elif [ "$SYS_TYPE" = "gentoo" ] then if ! rc-update del $srv_name default > /dev/null 2>&1 then $Info "disable $srv_name failed." warn "disable $srv_name failed." fi else $Info "inner error: INIT_TYPE=$INIT_TYPE" return $ERR_NG fi return $ERR_OK } # Start the init script start_init_script() { local srv_script=$1 local srv_name=$(basename "${srv_script}" | sed 's/\.init$//g') if [ "$INIT_TYPE" = "sysv" ] then if [ -x "/etc/init.d/$srv_name" ] then /etc/init.d/$srv_name start >/dev/null return $? else return 2 fi else $Info "inner error: INIT_TYPE=$INIT_TYPE" return 1 fi } # Stop the init script stop_init_script() { local srv_script=$1 local srv_name=$(basename "${srv_script}" | sed 's/\.init$//g') if [ "$INIT_TYPE" = "sysv" ] then if [ -x "/etc/init.d/$srv_name" ] then /etc/init.d/$srv_name stop >/dev/null return $? else return 2 fi else $Info "inner error: INIT_TYPE=$INIT_TYPE" return 1 fi } ############################################################################### # install GuestOS Support Feature File ############################################################################### InstallFeatureFile() { echo " install GuestOS Support Feature File." if ! (cp -f "${LCL_UVP_CURRENTOS}" "${UVP_CURRENTOS}" && chmod 640 "${UVP_CURRENTOS}") then $Info "install ${UVP_CURRENTOS} failed" abort "install ${UVP_CURRENTOS} failed" fi if ! (cp -f "${LCL_UVP_FEATURE}" "${UVP_FEATURE}" && chmod 640 "${UVP_FEATURE}") then $Info "install ${UVP_FEATURE} failed" abort "install ${UVP_FEATURE} failed" fi } ############################################################################### # install monitor ############################################################################### InstallServices() { echo " install uvp-monitor service." if ! (cp -f "${LCL_LIB_XENSTORE}" "${LIB_XENSTORE}" && chmod 755 "${LIB_XENSTORE}") then $Info "install ${LIB_XENSTORE} failed" abort "install ${LIB_XENSTORE} failed" fi if ! (cp -f "${LCL_UVP_MONITOR}" "${UVP_MONITOR}" && chmod 755 "${UVP_MONITOR}") then $Info "install ${UVP_MONITOR} failed" abort "install ${UVP_MONITOR} failed" fi if ! (cp -f "${LCL_UVP_KERUP}" "${UVP_KERUP}" && chmod 755 "${UVP_KERUP}") then $Info "install ${UVP_KERUP} failed" abort "install ${UVP_KERUP} failed" fi if ! (cp -f "${LCL_UVP_MEMONLINE}" "${UVP_MEMONLINE}" && chmod 544 "${UVP_MEMONLINE}") then $Info "install ${UVP_MEMONLINE} failed" abort "install ${UVP_MEMONLINE} failed" fi if ! ( cp -f "${LCL_UVP_SWAP}" "${UVP_SWAP}" && chmod 644 "${UVP_SWAP}") then $Info "install ${UVP_SWAP} failed" abort "install ${UVP_SWAP} failed" fi if ! add_init_script "${LCL_UVP_MONITOR_INIT}" then $Info "add_init_script ${LCL_UVP_MONITOR_INIT} failed" abort "add_init_script ${LCL_UVP_MONITOR_INIT} failed" fi if ! add_rc_config "${UVP_MONITOR_INIT}" then $Info "setup ${UVP_MONITOR_INIT} failed" abort "setup ${UVP_MONITOR_INIT} failed" fi [ "$AUTO_UPGRADE" != 'true' ] && start_init_script "${UVP_MONITOR_INIT}" if ! cat > "$INSTALL_PATH/version.ini" << EOF pvdriverVersion=${INSTALL_VERSION} DriverVersion=${DRIVER_VERSION} kernelVersion="${KERN_RELEASE}" UserDriVer=${USERDRI_VERSION} EOF then abort "install $INSTALL_PATH/version.ini failed" fi } UninstallFeatureFile() { echo " uninstall GuestOS Support Feature File." rm -f ${UVP_FEATURE} ${UVP_CURRENTOS} } UninstallServices() { echo " uninstall uvp-monitor service." [ "$AUTO_UPGRADE" != 'true' ] && stop_init_script "${UVP_MONITOR_INIT}" ### uninstall monitor service if ! remove_rc_config "${UVP_MONITOR_INIT}" then $Info "remove_rc_config ${UVP_MONITOR_INIT} failed" abort "remove_rc_config ${UVP_MONITOR_INIT} failed" fi if ! remove_init_script "${UVP_MONITOR_INIT}" then $Info "remove_init_script ${UVP_MONITOR_INIT} failed" abort "remove_init_script ${UVP_MONITOR_INIT} failed" fi rm -f "${LIB_XENSTORE}" "${UVP_MONITOR}" ${UVP_NDSEND} ${UVP_ARPING} "${UVP_KERUP}" "${UVP_MEMONLINE}" "${UVP_SWAP}" } ############################################################################### # change linux configuration ############################################################################### # output emu_bus lvm_blkdev_type check_disk_driver() { ### add udev storage rules #depmod for mod in ide-core ide-disk scsi-mod sd-mod do modprobe $mod >/dev/null 2>&1 done if [ -n "`uname -r | grep 2.6.29-bigsmp`" ] then emu_bus='ata' elif [ -n "`uname -r | grep 2.6.23.1-42.fc8`" ] then emu_bus='scsi' elif [ -d '/sys/bus/scsi/drivers/sd' -a -f '/sys/bus/scsi/drivers/sd/uevent' -a -z "`uname -r | grep 2.6.26-2`" ] then emu_bus='scsi' elif [ -d '/sys/bus/ide/drivers/ide-disk' ] then emu_bus='ata' else abort "can not determine your block device driver." fi } change_mkinitrd() { local emu_bus local lvm_blkdev_type ### cleanup for config_file in "$MKINITRD" "$MKINITRD_CONFIG" "$MKINITRD_SETUPUDEV" "$UDEV_CONF" do remove_block "$config_file" done ### get modules list if [ -d "$UVP_MODULES_PATH" ] then pv_mods='' for mod in $(find "$UVP_MODULES_PATH" -type f -name "*.ko") do mod="${mod##*/}" ; mod="${mod%%.ko}" pv_mods="$pv_mods $mod" done else pv_mods=' xen-platform-pci xen-vbd xen-vnif xen-balloon' fi # udev rules for storage if ! (cp -f "${LCL_XEN_STORAGE_ID}" "$XEN_STORAGE_ID" && chmod -R 755 "$XEN_STORAGE_ID") then abort "install $XEN_STORAGE_ID failed" fi if ! (cp -f "${LCL_XEN_STORAGE_RULES}" "${XEN_STORAGE_RULES}" && chmod -R 644 "${XEN_STORAGE_RULES}") then abort "install $XEN_STORAGE_RULES failed" fi check_disk_driver sed -i "s;%XEN_ID%;${XEN_STORAGE_ID} -b $emu_bus;g" "$XEN_STORAGE_RULES" sed -i "s;%XEN_DRIVERS%;${pv_mods};g" "$XEN_STORAGE_RULES" modprobe_conf=$MODPROBE_XEN_PVDRIVER if [ -n "$modprobe_conf" ] then touch "$MODPROBE_XEN_PVDRIVER" elif [ -f "$MODPROBE_CONFIG" ] then modprobe_conf=$MODPROBE_CONFIG else abort "config modprobe failed." fi insert_block "$modprobe_conf" "$" "\ alias 8139cp off install 8139cp : alias 8139too off install 8139too :" ### modify mkinitrd if [ -f "/etc/initramfs-tools/modules" ] then ### anchor_expreg=$(sed -n '/^\s*\([^#].*\|$\)/=' "$MKINITRD_CONFIG" | sed -n '1p') if [ -z "$anchor_expreg" ] then anchor_expreg='$' else anchor_expreg=$((anchor_expreg-1)) fi insert_block "$MKINITRD_CONFIG" "$anchor_expreg" "$(echo "${pv_mods}" | sed 's/\s\+/\n/g')" insert_block "$MKINITRD_SETUPUDEV" "$" "\ copy_exec ${XEN_STORAGE_ID} /lib/udev cp -p ${XEN_STORAGE_RULES} \${DESTDIR}/etc/udev/rules.d" elif [ "$(which_binary dracut)" != 'none_binary' ] then insert_block "$MKINITRD_CONFIG" "$" "add_drivers=\"${pv_mods} \$add_drivers\"" insert_block "$MKINITRD_SETUPUDEV" "$" "inst_rules \"${XEN_STORAGE_RULES}\"" insert_block "$MKINITRD_SETUPUDEV" "$" "dracut_install \"${XEN_STORAGE_ID}\"" elif [ "$(which_binary mkinitrd)" != 'none_binary' ] then if [ -n "$(mkinitrd --help 2>&1 | sed -n '/--with=/p')" ] then ## if [ -n "$MKINITRD_XEN_PVDRIVER" ] then touch "$MKINITRD_XEN_PVDRIVER" && chmod 755 "$MKINITRD_XEN_PVDRIVER" insert_block "$MKINITRD_XEN_PVDRIVER" "$" "PREMODS=\"${pv_mods} \${PREMODS}\"" elif [ -n "${MKINITRD_CONFIG}" ] then insert_block "$MKINITRD_CONFIG" "$" "PREMODS=\"${pv_mods} \${PREMODS}\"" else basic_mods=$(echo "${pv_mods}" | sed 's/\s\+/ --preload=/g') insert_block "$MKINITRD" "1" "\ [ -z \"\$uvp_pvdriver_mkinitrd_level\" ] && export uvp_pvdriver_mkinitrd_level=0 uvp_pvdriver_mkinitrd_level=\$((uvp_pvdriver_mkinitrd_level + 1)) if [ \$uvp_pvdriver_mkinitrd_level = 1 ] then mkinitrd ${basic_mods} \$@ exit \$? fi" ##syntax highlight" fi if [ -z "$(mkinitrd --help 2>&1 | sed -n '/--help/p')" ] then if ! (cp -f "${LCL_XEN_STORAGE_RULES}.legacy" "$XEN_STORAGE_RULES" && chmod -R 644 "$XEN_STORAGE_RULES") then abort "install ${XEN_STORAGE_RULES} failed" fi fi else ### modules insert_block "${MKINITRD_CONFIG}" "$" "INITRD_MODULES=\"${pv_mods} \${INITRD_MODULES}\"" ### udev rules if [ -f "$MKINITRD_SETUPUDEV" ] then insert_block "$MKINITRD_SETUPUDEV" "$" "cp ${XEN_STORAGE_RULES} \$tmp_mnt/${XEN_STORAGE_RULES}" else insert_word "$MKINITRD" "ata_id" "$(basename $XEN_STORAGE_ID)" insert_word "$MKINITRD" "60-persistent-storage.rules" "$(basename $XEN_STORAGE_RULES)" fi fi else return $ERR_NG fi return $ERR_OK } restore_mkinitrd() { ### get modules list hv_mods=" sd_mod ata_piix" if [ -n "`uname -r | grep 2.6.18-SKL1.9.4.ky3.173.4.1`" ]; then hv_mods=" sd_mod" fi if [ -n "`uname -r | grep 2.6.29-bigsmp`" ]; then hv_mods="" fi ### cleanup # remove installed files rm -f "$XEN_STORAGE_ID" "$XEN_STORAGE_RULES" "$MODPROBE_XEN_PVDRIVER" # remove inserted blocks for config_file in "$MKINITRD" "$MKINITRD_CONFIG" "$MKINITRD_SETUPUDEV" "$MODPROBE_CONFIG" "$UDEV_CONF" do remove_block "$config_file" done ### if [ -f "/etc/initramfs-tools/modules" ] then insert_block "$MKINITRD_CONFIG" "$" "$(echo "${hv_mods}" | sed 's/\s\+/\n/g')" elif [ "$(which_binary dracut)" != 'none_binary' ] then insert_block "$MKINITRD_CONFIG" "$" "add_drivers=\"${hv_mods} \$add_drivers\"" elif [ "$(which_binary mkinitrd)" != 'none_binary' ] then if [ -n "$(mkinitrd --help 2>&1 | sed -n '/--with=/p')" ] then ### modules if [ -n "$MKINITRD_XEN_PVDRIVER" ] then : > "$MKINITRD_XEN_PVDRIVER" && chmod 755 "$MKINITRD_XEN_PVDRIVER" insert_block "$MKINITRD_XEN_PVDRIVER" "$" "basicmodules=\"${hv_mods} \${basicmodules}\"" elif [ -n "${MKINITRD_CONFIG}" ] then insert_block "$MKINITRD_CONFIG" "$" "basicmodules=\"${hv_mods} \${basicmodules}\"" else basic_mods=$(echo "${hv_mods}" | sed 's/\s\+/ --with=/g') insert_block "$MKINITRD" "1" "\ [ -z \"\$uvp_pvdriver_mkinitrd_level\" ] && export uvp_pvdriver_mkinitrd_level=0 uvp_pvdriver_mkinitrd_level=\$((uvp_pvdriver_mkinitrd_level + 1)) if [ \$uvp_pvdriver_mkinitrd_level = 1 ] then mkinitrd ${basic_mods} \$@ exit \$? fi" ##syntax highlight" fi else ### modules insert_block "${MKINITRD_CONFIG}" "$" "INITRD_MODULES=\"${hv_mods} \${INITRD_MODULES}\"" ### udev rules if [ -f "$MKINITRD_SETUPUDEV" ] then remove_block "$MKINITRD_SETUPUDEV" "$" "cp ${XEN_STORAGE_RULES} $tmp_mnt/${XEN_STORAGE_RULES}" else remove_word "$MKINITRD" "ata_id" "$(basename $XEN_STORAGE_ID)" remove_word "$MKINITRD" "60-persistent-storage.rules" "$(basename $XEN_STORAGE_RULES)" fi fi else return $ERR_NG fi return $ERR_OK } ## export these information of a boot entry: ## Import Variables: ## $DESCRIPTION # # Export Information Variables: # $entry_num # $begin_line # $title_line # $end_line # $root_line # $kernel_line # $initrd_line # $kernel_release # $root_file # $kernel_file # $initrd_file info_boot_entry() { local menu_lst=$1 entry_num=$2 local var_list="menu_default menu_start begin_line end_line title_line root_line kernel_line initrd_line" local comment="init" ### set default parameter if [ ! -f "$menu_lst" ] then $Info "can not find $menu_lst." return $ERR_FILE_NOEXIST fi menu_default=$(sed -n '/^\s*default\s*=\{0,1\}\s*[0-9]\+\s*$/=' $menu_lst | sed -n '$p') if [ -z "$entry_num" ] then if [ -n "$menu_default" ] then entry_num=$(($(sed -n "${menu_default}p" $menu_lst | sed "s/^\s*default\s*=\{0,1\}\s*\([0-9]\+\)\s*$/\1/g")+1)) else $Info "can not find default boot entry in $menu_lst." return $ERR_NG fi elif [ $entry_num -le 0 ] then return 1 fi ### initialize all variables for var in $var_list do unset $var done menu_start=$(sed -n '/^\s*title\s\+/=' "${menu_lst}" | sed -n '1p') if [ -z "$menu_start" ] then menu_start=$(sed -n '$=' $menu_lst) return 0 fi ## process grub entry top # get [title_line] of grub entry title_line=$(grep -n "^[[:space:]]*title[[:space:]]\+" $menu_lst | sed -n "${entry_num}p" | awk -F ":" '{print $1}') if [ -z "$title_line" ] then $Info "can not find boot entry ${entry_num} in $menu_lst." return $ERR_NG fi # get [begin_line] of grub entry comment=$(sed -n "$(($title_line-1))p" $menu_lst | grep "^[[:space:]]*###.*YaST.*identifier:.*linux[[:space:]]*###[[:space:]]*$") if [ -n "$comment" ] # include grub comment above then begin_line=$((title_line-1)) else begin_line=$title_line fi ## process grub entry bottom # get [end_line] of grub entry end_line=$(grep -n "^[[:space:]]*title[[:space:]]\+" $menu_lst | sed -n "$(($entry_num+1))p" | awk -F ":" '{print $1}') [ -z "$end_line" ] && end_line=$(sed -n '$=' "$menu_lst") while [ -n "$(sed -n ${end_line}s/\(^\s*#.*\)/\1/p $menu_lst)" ] # omit grub comment below do end_line=$((end_line-1)) done ## get other line of grub entry for ((line_num=$begin_line; line_num<=$end_line; line_num++)) do line=$(sed -n "${line_num}p" $menu_lst) if [ -n "$(echo -n "$line" | grep "^[[:space:]]*root[[:space:]]\+")" ] ; then root_line=$line_num ; fi if [ -n "$(echo -n "$line" | grep "^[[:space:]]*kernel[[:space:]]\+")" ] ; then kernel_line=$line_num ; fi if [ -n "$(echo -n "$line" | grep "^[[:space:]]*initrd[[:space:]]\+")" ] ; then initrd_line=$line_num ; fi done ### return susess return 0 } info_boot_entry_uefi() { local menu_lst=$1 entry_num=$2 local comment="init" local var_list="menu_default menu_start begin_line end_line root_line kernel_line initrd_line" ### set default parameter if [ ! -f "$menu_lst" ] then $Info "can not find $menu_lst." return $ERR_FILE_NOEXIST fi menu_default=$(sed -n '/^\s*default\s*=\{0,1\}\s*\+\S\+\s*$/=' $menu_lst | sed -n '$p') if [ -n "$menu_default" ] then entry_num=$(sed -n "${menu_default}p" $menu_lst | sed "s/^\s*default\s*=\{0,1\}\s*\(\S\+\)\s*$/\1/g") else $Info "can not find default boot entry in $menu_lst." return $ERR_NG fi ### initialize all variables for var in $var_list do unset $var done menu_start=$(sed -n '/^\s*label\s\+/=' "${menu_lst}" | sed -n '1p') if [ -z "$menu_start" ] then menu_start=$(sed -n '$=' $menu_lst) return 0 fi label_line=$(grep -n "^[[:space:]]*label[[:space:]]\+" $menu_lst | awk -F ":" '{print $1}') for line in $label_line do if [ -n "$begin_line" ] then end_line=$line break fi lable_val=$(sed -n "${line}p" $menu_lst | sed "s/^\s*label\s*=\{0,1\}\s*\(\S\+\)\s*$/\1/g") if [ "$lable_val" = "$entry_num" ] then begin_line=$line fi done while [ -z "$(sed -n ${begin_line}s/\^[[:space:]]*$/\1/p $menu_lst)" ] do begin_line=$((begin_line-1)) done if [ -z "$end_line" ] then end_line=$(sed -n '$=' "$menu_lst") else while [ -z "$(sed -n ${end_line}s/\^[[:space:]]*$/\1/p $menu_lst)" ] do end_line=$((end_line-1)) done fi ## get other line of grub entry for ((line_num=$begin_line; line_num<=$end_line; line_num++)) do line=$(sed -n "${line_num}p" $menu_lst) if [ -n "$(echo -n "$line" | grep "^[[:space:]]*root[[:space:]]\+")" ] ; then root_line=$line_num ; fi if [ -n "$(echo -n "$line" | grep "^[[:space:]]*append[[:space:]]\+")" ] ; then kernel_line=$line_num ; fi if [ -n "$(echo -n "$line" | grep "^[[:space:]]*initrd[[:space:]]\+")" ] ; then initrd_line=$line_num ; fi done return 0 } GRUB_ENTRY_TITLE="Linux ${KERN_RELEASE} with PV Driver" add_kernel_param() { check_disk_driver ide_to_xen_flag="" if [ -n "`uname -r | grep 2.6.26-2`" ] then ide_to_xen_flag="yes" fi if [ -n "$MENULIST_CONFIG" ] then cmdline="" elif [ "yes" == "$UEFI_FLAG" ] then info_boot_entry_uefi "$MENULIST" "" if [ -n "${kernel_line}" ] then cmdline=$(sed -n "${kernel_line}p" "$MENULIST" | sed -n 's/^\s*append\s\+\S\+\s*\(.*\)/\1/p') else cmdline=$(cat /proc/cmdline) fi else info_boot_entry "$MENULIST" "" #仅支持默认启动项 if [ -n "${kernel_line}" ] then cmdline=$(sed -n "${kernel_line}p" "$MENULIST" | sed -n 's/^\s*kernel\s\+\S\+\s*\(.*\)/\1/p') else cmdline=$(cat /proc/cmdline) fi fi ## check kernel parameter in $MENULIST if [ -f "$LVM_CONF" ] then # scan vg on /dev/xvdx [ -n "$(sed -n '/^\s*types\s*=/p' "$LVM_CONF")" ] && comment_on_line "$LVM_CONF" '1,$' '^\s*types\s*=.*' [ -n "$(sed -n '/^\s*filter\s*=/p' "$LVM_CONF")" ] && comment_on_line "$LVM_CONF" '1,$' '^\s*filter\s*=.*' devices_line=$(sed -n '/^\s*devices\s*{/=' "$LVM_CONF") if [ -z "$devices_line" ] then devices_line=$(sed -n '/^\s*devices\s*/=' "$LVM_CONF") if [ -z "$devices_line" ] then abort "can not find devices information in $LVM_CONF." fi total_line=$(sed -n '$=' "$LVM_CONF") while [ -z "$(sed -n "${devices_line}s/^\s*\({\)/\1/p" $LVM_CONF)" -a ${devices_line} -le ${total_line} ] do devices_line=$((devices_line+1)) done if [ ${devices_line} -gt ${total_line} ] then abort "incorrect syntax in $LVM_CONF." fi fi insert_block "$LVM_CONF" "${devices_line}" "\ types = [ \"xvd\" , 16 ] filter = [ \"a|.*|\" ]" #syntax highlight" if ! $(which_binary 'vgscan') >/dev/null 2>&1 then abort "reset volume groups failed." fi comment_off_line "$LVM_CONF" ; remove_block "$LVM_CONF" fi # check values of root and resume for device in root resume do device_value=$(echo "$cmdline" | sed -n "s/.*${device}=\(\S*\).*/\1/p" | sed 's/^\(LABEL=\|UUID=\).*//g') # redhat support label, uuid or lvm now if [ "$SYS_TYPE" = 'redhat' -a -n "$device_value" ] && ! $(which_binary lvdisplay) -c $device_value >/dev/null 2>&1 then ide_to_xen_flag="yes" fi # support [hs]d,by-id,by-uuid,label,uuid,lvm if [ -z "$device_value" ] \ || [ -z "$(echo $device_value | sed 's;^\s*/dev/\(\([hs]\|xv\)d\|disk/\(by-id/\(ata\|scsi\)-\|by-uuid/\)\).*;;g')" ] \ || $(which_binary lvdisplay) -c "$device_value" >/dev/null 2>&1 then continue else warn "$device=$device_value specified in $MENULIST is not supported." warn "please replace it with [hs]d, by-id, by-uuid, label, uuid or lvm." abort "check kernel parameter failed." fi done if [ $SYS_TYPE == "suse" -o $SYS_TYPE == "redhat" ] then nr_ide_dev=1 else nr_ide_dev=2 fi if [ $(compare_version "$KERN_RELEASE" "2.6.25" ; echo $?) = 1 ] then for ((ide_num=0; ide_num<$nr_ide_dev; ide_num++)) do if [ -z "$(echo "${cmdline}" | sed -n "/ide${ide_num}=noprobe/p")" ] then cmdline=$(echo "$cmdline" | sed "s;\(^\|.*\)\(\"\|$\);\1 ide${ide_num}=noprobe\2;g") fi done if [ "${nr_ide_dev}" == "1" ] then if [ -z "$(echo "${cmdline}" | sed -n "/hdc=noprobe/p")" ] then cmdline=$(echo "$cmdline" | sed "s;\(^\|.*\)\(\"\|$\);\1 hdc=noprobe\2;g") fi fi elif [ $(compare_version "$KERN_RELEASE" "3.0" ; echo $?) = 2 ] then if [ -z "$(echo "${cmdline}" | sed -n '/xen_emul_unplug=all/p')" ] then cmdline=$(echo "$cmdline" | sed "s;\(^\|.*\)\(\"\|$\);\1 xen_emul_unplug=all\2;g") fi else for ((ide_num=0; ide_num<$nr_ide_dev; ide_num++)) do if [ -z "$(echo "${cmdline}" | sed -n "/ide_core\.noprobe=0\.${ide_num}/p")" ] then cmdline=$(echo "$cmdline" | sed "s;\(^\|.*\)\(\"\|$\);\1 ide_core.noprobe=0.${ide_num}\2;g") fi done fi # Ubuntu 10.04/10.10 & Fedora 12 are unsupported USB Storage Devices. UN_USBSTG_OS="$(echo "$LCL_UVP_MODULES_PATH" | awk -F'/' '{print $NF}' | awk -F'-' '{print $NF}')" # RHEL/CentOS 6.0 are unsupported remote ISO9660 Devices. UN_CDROM_OS="$(uname -r | awk -F'.' '{print $3}')" if [ "Ubuntu10" == "$UN_USBSTG_OS" ] || [ "FEDORA12" == "$UN_USBSTG_OS" ] || [ "FEDORA14" == "$UN_USBSTG_OS" ] || [ "32-71" == "$UN_CDROM_OS" ] || [ -n "`uname -r | grep '2.6.31-14'`" ] then SCSI_MOD_PARAM="yes" fi if [ "yes" == "$SCSI_MOD_PARAM" ] then if [ "$emu_bus" = 'scsi' -a -z "$(echo "${cmdline}" | sed -n '/scsi_mod.scan=none/p')" ] then cmdline=$(echo "$cmdline" | sed "s;\(^\|.*\)\(\"\|$\);\1 scsi_mod.scan=none\2;g") fi fi if [ "yes" == "$ide_to_xen_flag" ] && [ -n "echo ${cmdline} | grep '=/dev/hd'" ] then cmdline=$(echo "$cmdline" | sed "s;=/dev/hd;=/dev/xvd;g") fi cmdline=$(echo "$cmdline" | sed "s/\s\{2,\}/ /g") if [ -n "$MENULIST_CONFIG" ] then remove_block "$MENULIST_CONFIG" insert_block "$MENULIST_CONFIG" '$' "GRUB_CMDLINE_LINUX_DEFAULT=\"\$GRUB_CMDLINE_LINUX_DEFAULT $cmdline\"" #change the default value if [ -n "cat /etc/issue 2>/dev/null | grep Ubuntu" -a -f "$MENULIST_CONFIG" -a -f "$MENULIST" ]; then sed -i "s/$(cat $MENULIST_CONFIG | grep GRUB_DEFAULT)/GRUB_DEFAULT=$(cat $MENULIST | grep -w "set default=" | awk -F"\"" 'NR==1{print $2}')/g" $MENULIST_CONFIG fi if [ -n "`cat /etc/issue | grep "Canaima"`" ] then if ! $(which_binary burg-mkconfig) -o "$MENULIST" 2>/dev/null then abort "update $MENULIST failed." fi elif [ -n "`echo "${KERN_RELEASE}" | grep "3.16.6-2\|4.2.3-300\|4.0.4-301"`" ] then if ! $(which_binary grub2-mkconfig) -o "$MENULIST" 2>/dev/null then abort "update $MENULIST failed." fi else if ! $(which_binary grub-mkconfig) -o "$MENULIST" 2>/dev/null then abort "update $MENULIST failed." fi fi elif [ "yes" == "$UEFI_FLAG" ] then info_boot_entry_uefi "$MENULIST" "" if [ -n "${kernel_line}" ] then replace_word "$MENULIST" "${kernel_line}" '^\(\s*append\s\+\S\+\s*\).*' "\1${cmdline}" /sbin/elilo else abort "update kernel parameters failed." fi else # Make sure there is an entry for current kernel info_boot_entry "$MENULIST" "" #仅支持默认启动项 if [ -n "${kernel_line}" ] then replace_word "$MENULIST" "${kernel_line}" '^\(\s*kernel\s\+\S\+\s*\).*' "\1${cmdline}" elif [ "$(which_binary grubby)" != 'none_binary' ] then for kernel in /boot/vmlinu*-${KERN_RELEASE} do grubby --add-kernel="${kernel}" --args="${cmdline}" --initrd="${INITRD_FILE}" --title="${GRUB_ENTRY_TITLE}" \ --grub --make-default done else abort "update kernel parameters failed." fi fi return $ERR_OK } remove_kernel_param() { cp -f "${MENULIST}" "${MENULIST}.bak" check_disk_driver ide_to_xen_flag="" if [ -n "`uname -r | grep 2.6.26-2`" ] then ide_to_xen_flag="yes" fi if [ -n "$MENULIST_CONFIG" ] then cmdline="" elif [ "yes" == "$UEFI_FLAG" ] then info_boot_entry_uefi "$MENULIST" "" if [ -n "${kernel_line}" ] then cmdline=$(sed -n "${kernel_line}p" "$MENULIST" | sed -n 's/^\s*append\s\+\S\+\s*\(.*\)/\1/p') else cmdline=$(cat /proc/cmdline) fi else info_boot_entry "$MENULIST" "" #仅支持默认启动项 if [ -n "${kernel_line}" ] then cmdline=$(sed -n "${kernel_line}p" "$MENULIST" | sed -n 's/^\s*kernel\s\+\S\+\s*\(.*\)/\1/p') else cmdline=$(cat /proc/cmdline) fi fi ## check kernel parameter in $MENULIST $(which_binary vgscan) >/dev/null 2>&1 for device in root resume do device_value=$(echo "$cmdline" | sed -n "s/.*${device}=\(\S*\).*/\1/p" | sed 's/^\(LABEL=\|UUID=\).*//g') # redhat support label, uuid or lvm now if [ "$SYS_TYPE" = 'redhat' -a -n "$device_value" ] && ! $(which_binary lvdisplay) -c $device_value >/dev/null 2>&1 then ide_to_xen_flag="yes" continue fi # support [hs]d,by-id,by-uuid,label,uuid,lvm if [ -z "$device_value" ] \ || [ -z "$(echo $device_value | sed 's;^\s*/dev/\(\([hs]\|xv\)d\|disk/\(by-id/\(ata\|scsi\)-\|by-uuid/\)\).*;;g')" ] \ || $(which_binary lvdisplay) -c "$device_value" >/dev/null 2>&1 then continue else warn "$device=$device_value specified in $MENULIST is not supported." warn "please replace it with [hs]d, by-id, by-uuid, label, uuid or lvm." fi done for ((ide_num=0; ide_num<1; ide_num++)) do cmdline=$(echo "$cmdline" | sed "s;\s*ide${ide_num}=noprobe\s*; ;g") done cmdline=$(echo "$cmdline" | sed "s;\s*hdc=noprobe\s*; ;g") cmdline=$(echo "$cmdline" | sed "s;\s*xen_emul_unplug=all\s*; ;g") for ((ide_num=0; ide_num<2; ide_num++)) do cmdline=$(echo "$cmdline" | sed "s;\s*ide_core\.noprobe=0\.${ide_num}\s*; ;g") done cmdline=$(echo "$cmdline" | sed "s;\s*scsi_mod.scan=none\s*; ;g") cmdline=$(echo "$cmdline" | sed "s/\s\{2,\}/ /g") if [ "yes" == "$ide_to_xen_flag" ] && [ -n "echo ${cmdline} | grep '=/dev/xvd'" ] then cmdline=$(echo "$cmdline" | sed "s;=/dev/xvd;=/dev/hd;g") fi if [ -n "$MENULIST_CONFIG" ] then remove_block "$MENULIST_CONFIG" #change the default value if [ -n "cat /etc/issue 2>/dev/null | grep Ubuntu" -a -f "$MENULIST_CONFIG" -a -f "$MENULIST" ]; then sed -i "s/$(cat $MENULIST_CONFIG | grep GRUB_DEFAULT)/GRUB_DEFAULT=$(cat $MENULIST | grep -w "set default=" | awk -F"\"" 'NR==1{print $2}')/g" $MENULIST_CONFIG fi if [ -n "`cat /etc/issue | grep "Canaima"`" ] then if ! burg-mkconfig -o "$MENULIST" 2>/dev/null then abort "update $MENULIST failed." fi elif [ -n "`echo "${KERN_RELEASE}" | grep "3.16.6-2\|4.2.3-300\|4.0.4-301"`" ] then if ! grub2-mkconfig -o "$MENULIST" 2>/dev/null then abort "update $MENULIST failed." fi else if ! grub-mkconfig -o "$MENULIST" 2>/dev/null then abort "update $MENULIST failed." fi fi elif [ "yes" == "$UEFI_FLAG" ] then info_boot_entry_uefi "$MENULIST" "" if [ -n "${kernel_line}" ] then replace_word "$MENULIST" "${kernel_line}" '^\(\s*append\s\+\S\+\s*\).*' "\1${cmdline}" /sbin/elilo else abort "update kernel parameters failed." fi else info_boot_entry "$MENULIST" "" if [ -n "${kernel_line}" ] then replace_word "$MENULIST" "${kernel_line}" '^\(\s*kernel\s\+\S\+\s*\).*' "\1${cmdline}" elif [ "$(which_binary grubby)" != 'none_binary' ] then for kernel in /boot/vmlinu*-${KERN_RELEASE} do grubby --add-kernel="${kernel}" --args="${cmdline}" --initrd="${INITRD_FILE}" --title="Linux (${KERN_RELEASE})" \ --grub --make-default done else abort "update kernel parameters failed." fi fi return $ERR_OK } change_blkdev() { ###LVM2 supports ide, sd ... if [ ! -f "$LVM_CONF" ] then return 0 fi ### get HV disk driver type check_disk_driver case $emu_bus in scsi) lvm_blkdev_type='sd' lvm_blkdev_name='sd' ;; ata) lvm_blkdev_type='ide' lvm_blkdev_name='hd' ;; esac ### clean lvm configuration comment_off_line "$LVM_CONF" ; remove_block "$LVM_CONF" ### get line of devices information devices_line=$(sed -n '/^\s*devices\s*{/=' "$LVM_CONF") if [ -z "$devices_line" ] then devices_line=$(sed -n '/^\s*devices\s*/=' "$LVM_CONF") if [ -z "$devices_line" ] then abort "can not find devices information in $LVM_CONF." fi total_line=$(sed -n '$=' "$LVM_CONF") while [ -z "$(sed -n "${devices_line}s/^\s*\({\)/\1/p" $LVM_CONF)" -a ${devices_line} -le ${total_line} ] do devices_line=$((devices_line+1)) done if [ ${devices_line} -gt ${total_line} ] then abort "incorrect syntax in $LVM_CONF." fi fi ### force relocate vg on /dev/[hs]dx [ -n "$(sed -n '/^\s*types\s*=/p' $LVM_CONF)" ] && comment_on_line "$LVM_CONF" '1,$' '^\s*types\s*=.*' [ -n "$(sed -n '/^\s*filter\s*=/p' $LVM_CONF)" ] && comment_on_line "$LVM_CONF" '1,$' '^\s*filter\s*=.*' insert_block "$LVM_CONF" "${devices_line}" "\ types = [ \"${lvm_blkdev_type}\" , 16 ] filter = [ \"a|/dev/${lvm_blkdev_name}.*|\", \"r|/dev/xvd.*|\" ]" #syntax highlight" if ! $(which_binary 'vgscan') >/dev/null 2>&1 then abort "reset volume groups failed." fi comment_off_line "$LVM_CONF" ; remove_block "$LVM_CONF" ### locate vg on /dev/xvdx [ -n "$(sed -n '/^\s*types\s*=/p' $LVM_CONF)" ] && comment_on_line "$LVM_CONF" '1,$' '^\s*types\s*=.*' [ -n "$(sed -n '/^\s*filter\s*=/p' $LVM_CONF)" ] && comment_on_line "$LVM_CONF" '1,$' '^\s*filter\s*=.*' insert_block "$LVM_CONF" "${devices_line}" "\ types = [ \"xvd\" , 16 ] filter = [ \"r|/dev/.*/by-path/.*|\", \"r|/dev/.*/by-id/.*|\", \"a/.*/\" ]" #syntax highlight" } restore_blkdev() { #LVM2 supports ide, sd ... if [ ! -f "$LVM_CONF" ] then return 0 fi ### get HV disk driver type check_disk_driver case $emu_bus in scsi) lvm_blkdev_type='sd' lvm_blkdev_name='sd' ;; ata) lvm_blkdev_type='ide' lvm_blkdev_name='hd' ;; esac ### clean lvm configuration comment_off_line "$LVM_CONF" ; remove_block "$LVM_CONF" return 0 ### get line of devices information devices_line=$(sed -n '/^\s*devices\s*{/=' "$LVM_CONF") if [ -z "$devices_line" ] then devices_line=$(sed -n '/^\s*devices\s*/=' "$LVM_CONF") if [ -z "$devices_line" ] then abort "can not find devices information in $LVM_CONF." fi total_line=$(sed -n '$=' "$LVM_CONF") while [ -z "$(sed -n "${devices_line}s/^\s*\({\)/\1/p" $LVM_CONF)" -a ${devices_line} -le ${total_line} ] do devices_line=$((devices_line+1)) done if [ ${devices_line} -gt ${total_line} ] then abort "incorrect syntax in $LVM_CONF." fi fi ### locate vg on /dev/[hs]dx if [ -z "$(sed -n '/^\s*types\s*=/p' $LVM_CONF)" ] then insert_block "$LVM_CONF" "${devices_line}" " types = [ \"${lvm_blkdev_type}\" , 16 ]" fi } clean_blkdev_cache() { ## check blkid.tab local blkid_cache='' if [ -f "/etc/blkid.tab" ] then blkid_cache="/etc/blkid.tab" elif [ -f "/etc/blkid/blkid.tab" ] then blkid_cache="/etc/blkid/blkid.tab" else blkid_cache=$(blkid -h | sed -n '/^\s*-c\s/s/.*default:\s*\([./0-9a-zA-Z]*\).*/\1/p') fi ## check lvm cache local lvm_cache='/etc/lvm/cache/.cache /etc/lvm/.cache' ## cleanup all cache for cache in $blkid_cache $lvm_cache do rm -f "$cache" >/dev/null 2>&1 done } ChangeConfig() { echo " change system configurations." ## if [ -f "$MODPROBE_UNSUPPORTED_MODULES" ] then comment_on_line "$MODPROBE_UNSUPPORTED_MODULES" '1,$' '^\s*allow_unsupported_modules\s\+[0-9]\+\s*' insert_block "$MODPROBE_UNSUPPORTED_MODULES" '$' 'allow_unsupported_modules 1' fi ## modify udev network rules for suse11, etc. if [ -f "$NET_GENERATOR_RULES" ] then comment_on_line "$NET_GENERATOR_RULES" '1,$' '^SUBSYSTEMS=="xen",\s*GOTO=.*' fi ### xen-procfs means pvops kernel if [ -d "$UVP_MODULES_PATH" -a -n "$(find "$UVP_MODULES_PATH" -type f -name 'xen-procfs.ko')" ] || [ "$kernel_param_flag" = "1" ] || [ "$NO_PROCFS" = "true" ] then return 0 fi ### change hv to pv if ! add_kernel_param then abort "add_kernel_param failed" fi if ! change_blkdev then abort "change_blkdev failed" fi if ! change_mkinitrd then abort "change_mkinitrd failed" fi } RestoreConfig() { echo " restore system configurations." ## if [ -f "$MODPROBE_UNSUPPORTED_MODULES" ] then comment_off_line "$MODPROBE_UNSUPPORTED_MODULES" remove_block "$MODPROBE_UNSUPPORTED_MODULES" fi ## modify udev network rules for suse11, etc. if [ -f "$NET_GENERATOR_RULES" ] then comment_off_line "$NET_GENERATOR_RULES" fi ### xen-procfs means pvops kernel if [ -d "$UVP_MODULES_PATH" -a -n "$(find "$UVP_MODULES_PATH" -type f -name 'xen-procfs.ko')" ] || [ "$kernel_param_flag" = "1" ] || [ "$NO_PROCFS" = "true" ] then return 0 fi ### change hv to pv if ! remove_kernel_param then abort "remove_kernel_param failed" fi if ! restore_blkdev then abort "restore_blkdev failed" fi ### if ! restore_mkinitrd then abort "restore_mkinitrd failed" fi } Install() { cd "$INSTALLER_DIR" $Info "Execute Installation" echo "Start Installation :" mkdir -p $INSTALL_PATH mkdir -p $WORKDIR ## install version.ini and uninstall rm -f "$INSTALL_PATH/install" if ! (cp -f "$0" "$INSTALL_PATH/uninstall" && ln -s "$INSTALL_PATH/uninstall" "$INSTALL_PATH/install" && chmod -R 755 "$INSTALL_PATH/uninstall") then abort "install $INSTALL_PATH/uninstall failed" fi ## install GuestOSFeature Files InstallFeatureFile ## install drivers InstallModules ### ubuntu14.04.3 and debian8.2 and Fedora23 no procfs if [ -d "$UVP_MODULES_PATH" -a -n "$(find "$UVP_MODULES_PATH" -type f -name 'xen-procfs.ko')" ] || [ "$NO_PROCFS" = "true" ] then EnableArpNotifySetting fi install -m755 ${LCL_UVP_ARPING} ${UVP_ARPING} if ! (cp -f "${LCL_XENVNET_ARP}" "${XENVNET_ARP}" && chmod 755 "${XENVNET_ARP}") then $Info "install ${XENVNET_ARP} failed" abort "install ${XENVNET_ARP} failed" fi install -m755 ${LCL_UVP_NDSEND} ${UVP_NDSEND} if ! (cp -f "${LCL_XENVNET_ARP}" "${XENVNET_ARP}" && chmod 755 "${XENVNET_ARP}") then $Info "install ${XENVNET_ARP} failed" abort "install ${XENVNET_ARP} failed" fi ## hide suspend/hibernate if [ -e "$SUSPEND_PATH" ] then chmod a-x "$SUSPEND_PATH" fi if [ -e "$HIBERNATE_PATH" ] then chmod a-x "$HIBERNATE_PATH" fi #install unplug shell script cp -a ${INSTALLER_DIR}/bin/pvumount.sh /usr/bin/pvumount.sh chmod 755 /usr/bin/pvumount.sh ChangeConfig return 0 } Uninstall() { cd "$INSTALLER_DIR" $Info "Execute Uninstallation" echo "Start Uninstallation :" mkdir -p $WORKDIR ## remove services and drivers ### ubuntu14.04.3 and debian8.2 and Fedora23 no procfs if [ -d "$UVP_MODULES_PATH" -a -n "$(find "$UVP_MODULES_PATH" -type f -name 'xen-procfs.ko')" ] || [ "$NO_PROCFS" = "true" ] then RestoreArpNotifySetting fi RestoreConfig UninstallFeatureFile UninstallServices UninstallModules ## delete uninstall and version.ini rm -f "$INSTALL_PATH/install" "$INSTALL_PATH/uninstall" rm -f "$INSTALL_PATH/version.ini" return 0 } ############################################################################### # main entrance ############################################################################### make_initrd() { local binary="" local options="" local image="" local kernel="" if [ -f '/etc/initramfs-tools/modules' ] then binary=$(which_binary update-initramfs) if [ "$binary" = 'none_binary' ] then $Info "can not find update-initramfs." return 1 fi options="-u" kernel="-k $KERN_RELEASE" elif [ -f '/etc/dracut.conf' ] && [ "$kernel_param_flag" = "0" ] then binary=$(which_binary dracut) if [ "$binary" = 'none_binary' ] then $Info "can not find dracut." return 1 fi options="--force" image="$INITRD_FILE.uvptmp" elif [ -f "/sbin/mkinitrd" ] then binary=$(which_binary mkinitrd) if [ "$binary" = 'none_binary' ] then $Info "can not find mkinitrd." return 1 fi if [ -n "$(mkinitrd --help 2>&1 | sed -n '/--with=/p')" ] then options="-f" image="$INITRD_FILE.uvptmp" if [ "el5uek" == "$(uname -r | awk -F"." '{print $6}')" ] then kernel="$KERN_RELEASE --builtin=ehci-hcd --builtin=ohci-hcd --builtin=uhci-hcd --with=xen-blkfront --with=xen-netfront" else kernel="$KERN_RELEASE" fi for disk in `ls /sys/block/| grep sd` do isscsi=`ls -l /sys/block/${disk}/device/driver | grep scsi` if test "x${isscsi}" != x then kernel="$KERN_RELEASE --allow-missing" break fi done else options='' image='' kernel="-k vmlinuz-$KERN_RELEASE -i initrd-$KERN_RELEASE" fi else return 1 fi if [ "$kernel_param_flag" = "1" ] then true elif [ "$INSTALL_ACTION" = "install" -o "$INSTALL_ACTION" = "upgrade" ] && [ -f "/lib/modules/`uname -r`/kernel/drivers/scsi/sym53c8xx_2/sym53c8xx.ko" ] then mv /lib/modules/`uname -r`/kernel/drivers/scsi/sym53c8xx_2/sym53c8xx.ko /lib/modules/`uname -r`/kernel/drivers/scsi/sym53c8xx_2/.sym53c8xx.ko.bak elif [ "$INSTALL_ACTION" == "uninstall" ] && [ -f "/lib/modules/`uname -r`/kernel/drivers/scsi/sym53c8xx_2/.sym53c8xx.ko.bak" ] then mv /lib/modules/`uname -r`/kernel/drivers/scsi/sym53c8xx_2/.sym53c8xx.ko.bak /lib/modules/`uname -r`/kernel/drivers/scsi/sym53c8xx_2/sym53c8xx.ko fi depmod -a if [ -f "/etc/sysconfig/kdump" ] # update initrd for kdump then touch /etc/sysconfig/kdump fi $binary $options $image $kernel >/dev/null 2>&1 ret=$? if [ "${ret}" = "0" -a -f "$INITRD_FILE.uvptmp" ] then mv -f $INITRD_FILE.uvptmp $INITRD_FILE fi return ${ret} } # enter critical section EnterCriticalSection() { $Info "EnterCriticalSection: INSTALL_VERSION=$INSTALL_VERSION" trap "cancel" INT TERM QUIT HUP ### lock installation if [ $CUR_INSTALL_LEVEL = 1 ] then if ! lock_install then $Info "Lock installation failed." exit 1 fi fi if [ "$kernel_param_flag" = "1" ] then MKINITRD=$(which_binary mkinitrd) INITRD_FILE="/boot/initrd-${KERN_RELEASE}" MODPROBE_UNSUPPORTED_MODULES=$(readlink -f '/etc/modprobe.d/10-unsupported-modules.conf') NET_GENERATOR_RULES=$(readlink -f '/lib/udev/rules.d/75-persistent-net-generator.rules') elif [ ! "$SYS_TYPE" = "gentoo" ] then if ! init_backup then $Info "init_backup failed." abort "init_backup failed." fi else NET_GENERATOR_RULES=$(readlink -f '/lib/udev/rules.d/75-persistent-net-generator.rules') fi if ! backup_all_files then $Info "Backup all files failed." abort "Backup all files failed." fi if [ $BAK_INSTALL_LEVEL -le 0 -o 3 -le $BAK_INSTALL_LEVEL ] then echo "FATLE ERROR: CUR_INSTALL_LEVEL=$CUR_INSTALL_LEVEL,BAK_INSTALL_LEVEL=$BAK_INSTALL_LEVEL." abort "FATLE ERROR: CUR_INSTALL_LEVEL=$CUR_INSTALL_LEVEL,BAK_INSTALL_LEVEL=$BAK_INSTALL_LEVEL." fi } # leave critical section LeaveCriticalSection() { $Info "LeaveCriticalSection: INSTALL_VERSION=$INSTALL_VERSION" local ret_val=0 local rebootflag=1 if ! clean_all_files then $Info "Clean all files failed." fi trap "" INT TERM QUIT HUP if [ $CUR_INSTALL_LEVEL = 1 ] then ## install_action=$INSTALL_ACTION if [ "$install_action" = 'upgrade_uvpmonitor' ] then unlock_install InstallServices sync ; sync ; sync ret_val=$UVPMONITOR_OK else if [ "$SYS_TYPE" = "gentoo" ] then test "$install_action" = 'upgrade' && install_action='install' if [ "$install_action" = "upgrade_hcall" ] then install_action='install' rebootflag=0 fi if [ "$INSTALL_ACTION" != "uninstall" ] then InstallServices fi sync ; sync ; sync if [ "${rebootflag}" = "1" ] then echo "The PV driver is ${install_action}ed successfully." echo "Reboot the system for the installation to take effect." fi rmdir $INSTALL_PATH 1>/dev/null 2>&1 unlock_install sync rm -fr $WORKDIR if [ "${rebootflag}" = "1" ] then return $ERR_OK else ret_val=$UVPMONITOR_OK fi fi test "$install_action" = 'upgrade' && install_action='install' if [ "$install_action" = "upgrade_hcall" ] then install_action='install' rebootflag=0 fi echo " Update kernel initrd image." sync ; sync ; sync # make sure that all files saved ## if [ -n "`uname -r | grep 2.6.18-SKL1.9.4.ky3.173.4.1`" ]; then remove_block_line_by_line "/etc/modprobe.conf" "ata_piix" fi if [ ! -f "$INITRD_FILE.uvpbak" ] then BACKUP_INITRD="cp -f $INITRD_FILE $INITRD_FILE.uvpbak" else BACKUP_INITRD=':' fi if ($BACKUP_INITRD && make_initrd) then $Info "make_initrd success" ret_val=$ERR_OK if [ "$INSTALL_ACTION" != "uninstall" ] then InstallServices elif [ "$INSTALL_ACTION" = "uninstall" ] then remove_block "$MKINITRD_CONFIG" fi if [ "${rebootflag}" = "1" ] then echo "The PV driver is ${install_action}ed successfully." echo "Reboot the system for the installation to take effect." fi else warn "make initrd failed, please do not reboot and remake it by yourself." echo "The PV driver ${install_action}ation failed." $Info "make_initrd failed" ret_val=$ERR_INITRD fi rmdir $INSTALL_PATH 1>/dev/null 2>&1 if [ "$kernel_param_flag" = "0" ] then clean_blkdev_cache fi unlock_install sync fi if [ "${rebootflag}" = "0" ] then ret_val=$UVPMONITOR_OK fi else ret_val=$ERR_OK fi rm -fr $WORKDIR return $ret_val } Confirm() { echo -n "$1" echo -n "OK [Y/n]?" read ANSWER if [ "$ANSWER" == "y" ] || [ "$ANSWER" == "Y" ] || [ -z "$ANSWER" ] then eval $1 echo "... parameter set" else echo "... parameter NOT set" fi } CheckVersion() { if [ ! -f "${INSTALL_PATH}/version.ini" ] then return 1 fi ### check pvdriver version pvdriverVersion=$(. "${INSTALL_PATH}/version.ini" ; echo "$pvdriverVersion") kernelVersion=$(. "${INSTALL_PATH}/version.ini" ; echo "$kernelVersion") [ -z "$pvdriverVersion" ] && pvdriverVersion='0' [ -z "$kernelVersion" ] && kernelVersion="$KERN_RELEASE" #compatible with legacy pvdriver if [ "$FORCE_INSTALL" != 'true' -a $(compare_version "$INSTALL_VERSION" "$pvdriverVersion" ; echo $?) = 1 ] then abort "can not upgrade pvdriver from ${pvdriverVersion} to ${INSTALL_VERSION}." fi if [ "$kernelVersion" != "$KERN_RELEASE" -a "$FORCE_INSTALL" != 'true' ] then abort "Please boot kernel $kernelVersion and $INSTALL_ACTION again." fi return 0 } showhelp() { echo "\ Usage: $0 [-f] Options: -f force to downgrade pvdriver. -o offline to copy driver. " } ##added on 2013-6-20 ##single upgrade uvpmonitor function upgrade_uvpmonitor() { echo "upgrading uvpmonitor services.." mkdir -p ${WORKDIR} if ! ( rm -f ${INSTALL_PATH}/version.ini ) then abort "remove version.ini failed" fi if ! (cp -f "${LCL_UVP_MONITOR}" "${UVP_MONITOR}" && chmod 755 "${UVP_MONITOR}") then $Info "install ${UVP_MONITOR} failed" abort "install ${UVP_MONITOR} failed" fi if ! cat > "$INSTALL_PATH/version.ini" << EOF pvdriverVersion=${INSTALL_VERSION} DriverVersion=${DRIVER_VERSION} kernelVersion="${KERN_RELEASE}" EOF then abort "install $INSTALL_PATH/version.ini failed" fi if ! (cp -f "$0" "$INSTALL_PATH/uninstall" && ln -s "$INSTALL_PATH/uninstall" "$INSTALL_PATH/install" && chmod -R 755 "$INSTALL_PATH/uninstall") then abort "update $INSTALL_PATH/uninstall failed" fi return 0 } ###added end ### parse options INSTALL_ACTION='none' OTHER_FUNC='none' FORCE_INSTALL='false' AUTO_UPGRADE='false' while getopts "iusrdfho" option do case $option in i) # compatible with "install -i" if [ "$INSTALLER" != "install" ] then echo "$INSTALLER do not support -i option." exit 1 fi AUTO_UPGRADE='true' FORCE_INSTALL='true' INSTALLER='install' UNINSTALLER='install' ;; u) # compatible with "install -u" if [ "$INSTALLER" != "install" ] then echo "$INSTALLER do not support -u option." exit 1 fi AUTO_UPGRADE='true' FORCE_INSTALL='true' INSTALLER='uninstall' UNINSTALLER='install' ;; f) FORCE_INSTALL='true' ;; s) OTHER_FUNC="service" if [ "$INSTALLER" = "install" ] then AUTO_UPGRADE='true' fi INSTALLER='uninstall' ;; r) OTHER_FUNC="restore" if [ "$INSTALLER" = "install" ] then AUTO_UPGRADE='true' fi INSTALLER='uninstall' ;; d) OTHER_FUNC="dep_tools" ;; o) OTHER_FUNC="offline" ./bin/offline/offline_copy_uvp_module exit $? ;; *) showhelp exit 1 ;; esac done if [ "$INSTALLER" = "install" -a "$INSTALLER_DIR" = "$INSTALL_PATH" ] || [ "$INSTALLER" = "uninstall" -a "$INSTALLER_DIR" != "$INSTALL_PATH" ] then $Info "you can not run ${INSTALLER}ation in $INSTALLER_DIR" echo "you can not run ${INSTALLER}ation in $INSTALLER_DIR" exit 1 fi ### check system InitSystemInfo InitPackageInfo check_block "/etc/sysconfig/kernel" upgrade_uvpmonitor_flag="0" if [ "${INSTALLER}" = "install" ] then LCL_UVP_XENPCI_PATH=$LCL_UVP_MODULES_PATH/xen-platform-pci/xen-platform-pci.ko LCL_UVP_XENVIF_PATH=$LCL_UVP_MODULES_PATH/xen-vnif/xen-netfront.ko LCL_UVP_XENHCALL_PATH=$LCL_UVP_MODULES_PATH/xen-hcall/xen-hcall.ko LCL_UVP_XENPROCFS_PATH=$LCL_UVP_MODULES_PATH/xen-procfs/xen-procfs.ko if [ -f ${LCL_UVP_XENPCI_PATH} ] then XenpciVer="$(echo $(modinfo ${LCL_UVP_XENPCI_PATH} | grep -w version | awk -F":" '{print $2}'))" DRIVER_VERSION="${XenpciVer}" elif [ -f ${LCL_UVP_XENVIF_PATH} ] then XenVifVer="$(echo $(modinfo ${LCL_UVP_XENVIF_PATH} | grep -w version | awk -F":" '{print $2}'))" DRIVER_VERSION="${XenVifVer}" elif [ -f ${LCL_UVP_XENPROCFS_PATH} ] then XenProcfsVer="$(echo $(modinfo ${LCL_UVP_XENPROCFS_PATH} | grep -w version | awk -F":" '{print $2}'))" DRIVER_VERSION="${XenProcfsVer}" fi if [ -f ${LCL_UVP_XENHCALL_PATH} ] then XenhcallVer="$(echo $(modinfo ${LCL_UVP_XENHCALL_PATH} | grep -w version | awk -F":" '{print $2}'))" USERDRI_VERSION="$XenhcallVer" if [ "X${DRIVER_VERSION}" = "X" ] then DRIVER_VERSION="2.2.0.202" fi fi upgrade_hcall_flag="0" if [ -f "${INSTALL_PATH}/version.ini" ] then DRIVERINFO_TMP="`cat ${INSTALL_PATH}/version.ini | grep -w "DriverVersion"`" if [ -n "${DRIVERINFO_TMP}" ] then DRIVERINFO_TMP=$(. "${INSTALL_PATH}/version.ini" ; echo ${DriverVersion} ) OldPvdriverVersion_TMP=$(. "${INSTALL_PATH}/version.ini" ; echo ${pvdriverVersion} ) CurRunVersion_TMP=`cat $UPGRADE_VERSION 2>/dev/null` LastRunversion_TMP=`cat $LAST_VERSION 2>/dev/null` UserDriINFO_TMP="`cat ${INSTALL_PATH}/version.ini | grep -w "UserDriVer"`" if [ -n "${UserDriINFO_TMP}" ] then UserDriINFO_TMP=$(. "${INSTALL_PATH}/version.ini" ; echo ${UserDriVer} ) fi if [ "${DRIVERINFO_TMP}" = "${DRIVER_VERSION}" ] then if [ "X" = "X${UserDriINFO_TMP}" ] || [ $(compare_version "$USERDRI_VERSION" "$UserDriINFO_TMP" ; echo $?) != 0 ] then upgrade_hcall_flag="1" else upgrade_uvpmonitor_flag="1" fi fi fi fi fi ### check /boot and /var/tmp size if [ "$upgrade_uvpmonitor_flag" = "0" ] then if [ -f "/boot/initramfs-${KERN_RELEASE}.img" ]; then initrd_size=$(du -sk "/boot/initramfs-${KERN_RELEASE}.img" | awk -F " " '{print $1}') BOOT_MIN_SIZE=$((initrd_size+BOOT_FREE_SPACE)) elif [ -f "/boot/initrd.img-${KERN_RELEASE}" ]; then initrd_size=$(du -sk "/boot/initrd.img-${KERN_RELEASE}" | awk -F " " '{print $1}') BOOT_MIN_SIZE=$((initrd_size+BOOT_FREE_SPACE)) elif [ -f "/boot/initrd-${KERN_RELEASE}" ]; then initrd_size=$(du -sk "/boot/initrd-${KERN_RELEASE}" | awk -F " " '{print $1}') BOOT_MIN_SIZE=$((initrd_size+BOOT_FREE_SPACE)) elif [ -f "/boot/initrd-${KERN_RELEASE}.img" ]; then initrd_size=$(du -sk "/boot/initrd-${KERN_RELEASE}.img" | awk -F " " '{print $1}') BOOT_MIN_SIZE=$((initrd_size+BOOT_FREE_SPACE)) elif [ -f "/etc/gentoo-release" ]; then BOOT_MIN_SIZE=$((0+BOOT_FREE_SPACE)) else $Info "Can not find initrd file in /boot, Please check." echo "Can not find initrd file in /boot, Please check." exit 1 fi uvp_bak_file=$(ls /boot/ | grep "uvpbak$") if [ ! -z "${uvp_bak_file}" ]; then BOOT_MIN_SIZE=${BOOT_FREE_SPACE} fi boot_size=$(df -k /boot | grep "[0-9]%" | sed "s/%.*//" | awk '{print $(NF-1)}') if [ -f "/etc/gentoo-release" ]; then $Info "Gentoo has no initrd." elif [ "${boot_size}" -lt "${BOOT_MIN_SIZE}" ]; then $Info "The /boot disk space is not enough to install/uninstall." echo "The /boot disk space is not enough to install/uninstall." exit 1 fi tmp_size=$(df -k /var/tmp | grep "[0-9]%" | sed "s/%.*//" | awk '{print $(NF-1)}') tmp_min_size=$(($initrd_size*$INITRD_RATIO)) if [ -f "/etc/gentoo-release" ]; then $Info "Gentoo has no initrd." elif [ "${tmp_size}" -lt "${tmp_min_size}" ]; then $Info "The /var/tmp disk space is not enough to change initrd or initramfs." echo "The /var/tmp disk space is not enough to change initrd or initramfs." exit 1 fi fi if [ -e '/etc/SuSE-release' -a -n "`cat /etc/SuSE-release 2>/dev/null | grep 'VERSION' | grep '12'`" ] then kernel_param_flag="1" fi $Info "Run $INSTALLER: SYS_TYPE=$SYS_TYPE, KERN_RELEASE=$KERN_RELEASE, CPU_ARCH=$CPU_ARCH INSTALL_VERSION=$INSTALL_VERSION" ### run tools if [ "$OTHER_FUNC" = 'dep_tools' ] then check_tools echo "[ PV DRIVER DEPENDING COMMANDS ]" echo "$DEP_CMDS_LIST" echo "" echo "[ DEPENDING PACKAGE: DEPENDING COMMAND ]" echo "$DEP_PKGS_LIST" echo "" exit 0 elif [ "$OTHER_FUNC" = 'restore' ] then trap "" INT TERM QUIT HUP $Info "restore backups." restore_all_files exit $? elif [ "$OTHER_FUNC" = 'service' ] then trap "" INT TERM QUIT HUP $Info "restart services." remove_rc_config ${UVP_MONITOR_INIT} && add_rc_config ${UVP_MONITOR_INIT} if [ "$AUTO_UPGRADE" != 'true' ] then stop_init_script ${UVP_MONITOR_INIT} && start_init_script ${UVP_MONITOR_INIT} fi exit $? fi #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@# EnterCriticalSection #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@# cd "$INSTALLER_DIR" if [ "$INSTALLER" = "install" ] then # INSTALL_ACTION="install" ##modfied on 2013-6-20 if [ -f "${INSTALL_PATH}/version.ini" ] then DRIVERINFO="`cat ${INSTALL_PATH}/version.ini | grep "DriverVersion"`" if [ -z ${DRIVERINFO} ] then INSTALL_ACTION="upgrade" elif [ -n ${DRIVERINFO} ] then ### old version information DRIVERINFO=$(. "${INSTALL_PATH}/version.ini" ; echo ${DriverVersion} ) OldPvdriverVersion=$(. "${INSTALL_PATH}/version.ini" ; echo ${pvdriverVersion} ) CurRunVersion=`cat $UPGRADE_VERSION` LastRunversion=`cat $LAST_VERSION` if [ "${DRIVERINFO_TMP}" = "${DRIVER_VERSION}" ] then USERDRIINFO="`cat ${INSTALL_PATH}/version.ini | grep "UserDriVer"`" if [ -z "${USERDRIINFO}" ] then INSTALL_ACTION="upgrade_hcall" elif [ -n ${USERDRIINFO} ] then USERDRIINFO=$(. "${INSTALL_PATH}/version.ini" ; echo ${UserDriVer} ) if [ "X" = "X${USERDRIINFO}" ] || [ $(compare_version "$USERDRI_VERSION" "$USERDRIINFO" ; echo $?) != 0 ] then INSTALL_ACTION="upgrade_hcall" else INSTALL_ACTION="upgrade_uvpmonitor" fi fi else INSTALL_ACTION="upgrade" fi fi fi ##modfied end if CheckVersion then # replace uninstall with install, for cpmpatibility with legacy pvdriver if [ ! -f "$UNINSTALLER_DIR/$UNINSTALLER" ] then if [ -f "$UNINSTALLER_DIR/install" ] then UNINSTALLER="install" else abort "you have a legacy pvdriver installed, please uninstall it manually." fi fi fi elif [ "$INSTALLER" = "uninstall" ] then INSTALL_ACTION="uninstall" if ! CheckVersion then abort "unknown error for uninstallation." fi else abort "invalid installer $INSTALLER" fi ### uninstall pvdriver if [ "$INSTALL_ACTION" = "uninstall" ] then if ! Uninstall then abort "Uninstall failed." fi if [ "$(which_binary kudzu)" != 'none_binary' ]; then chkconfig kudzu on 1>/dev/null 2>&1 $Info "chkconfig kudzu on" fi elif [ "$INSTALL_ACTION" = "upgrade" ] || [ "$INSTALL_ACTION" = "upgrade_uvpmonitor" ] || [ "$INSTALL_ACTION" = "upgrade_hcall" ] then UVP_MONITOR_INI='/var/run/uvp-monitor.ini' if [ ! -f "$UVP_MONITOR_INI" ] then if [ -z ${DRIVERINFO} ] then echo -n $(. ${INSTALL_PATH}/version.ini ; echo ${pvdriverVersion}) | sed 's/\([0-9]\+\.[0-9]\+\.[0-9]\+\).*/1.\1/g' > "$UVP_MONITOR_INI" elif [ -n ${DRIVERINFO} ] then driverver=$(echo -n $DRIVERINFO | awk -F"." '{ print $1 }') if [ "1" = "$driverver" ] then echo -n $(. ${INSTALL_PATH}/version.ini ; echo ${pvdriverVersion}) | sed 's/\([0-9]\+\.[0-9]\+\.[0-9]\+\).*/1.\1/g' > "$UVP_MONITOR_INI" else echo -n $(. ${INSTALL_PATH}/version.ini ; echo ${pvdriverVersion}) | sed 's/\([0-9]\+\.[0-9]\+\.[0-9]\+\).*/2.\1/g' > "$UVP_MONITOR_INI" fi fi fi if [ "$UNINSTALLER" = 'install' ] then UNINSTALLER_PARAM='-u' export upgrade_state="reinstall" else UNINSTALLER_PARAM='' fi if ! (cd "$UNINSTALLER_DIR" && ./$UNINSTALLER "$UNINSTALLER_PARAM" && cd - >/dev/null) then abort "Uninstall failed." fi [ "$AUTO_UPGRADE" != 'true' ] && kill_proc ${UVP_MONITOR} fi if [ -e '/etc/SuSE-release' -o -n "$(grep -i 'suse\|s\.u\.s\.e' /etc/issue)" ] && [ -n "`uname -r | grep 2.6.16.46`" -o -n "`uname -r | grep 2.6.16.60`" ] then $Info "remove_block_line_by_line" remove_block_line_by_line "/etc/modprobe.d/blacklist" \ 'blacklist *\s*ide-cd:blacklist *\s*ide-core:blacklist *\s*ide-disk:blacklist *\s*ide-floppy:blacklist *\s*ide-tape:blacklist *\s*ide-scsi:blacklist *\s*piix:blacklist *\s*osst:blacklist *\s*scsi_mod:blacklist *\s*sd_mod:blacklist *\s*sg:blacklist *\s*sr_mod:blacklist *\s*st' fi ### install pvdriver if [ "$INSTALL_ACTION" = "install" -o "$INSTALL_ACTION" = "upgrade" ] || [ "$INSTALL_ACTION" = "upgrade_uvpmonitor" ] || [ "$INSTALL_ACTION" = "upgrade_hcall" ] then # install pvdriver if ! Install then abort "Install failed." fi if [ "$(which_binary kudzu)" != 'none_binary' ]; then chkconfig kudzu off 1>/dev/null 2>&1 $Info "chkconfig kudzu off" fi fi #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@# LeaveCriticalSection ret_val=$? #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@# if [ "$CUR_INSTALL_LEVEL" = '1' ] then if [ "$INSTALL_ACTION" = "uninstall" ] then rm -f "${UPGRADE_VERSION}" rmdir "$(dirname $UPGRADE_VERSION)" 1>/dev/null 2>&1 else mkdir -p "$(dirname $UPGRADE_VERSION)" echo "2.$(echo ${INSTALL_VERSION} | sed 's/\([0-9]\+\.[0-9]\+\.[0-9]\+\).*/\1/g')" > "${UPGRADE_VERSION}" if [ ! -d "/tmp/uvptools_temp" ] && ([ "$INSTALL_ACTION" == "upgrade_uvpmonitor" ] || [ "$INSTALL_ACTION" = "upgrade_hcall" ]) then OLD_UPGRADE_VERSION="/var/run/tools_upgrade/pvdriver_version.ini" cat ${UPGRADE_VERSION} > ${OLD_UPGRADE_VERSION} cat ${UPGRADE_VERSION} > ${UVP_MONITOR_INI} ${UVP_MONITOR_INIT} restart 1>/dev/null 2>&1 echo "UVP TOOLS is upgraded successfully." elif [ -d "/tmp/uvptools_temp" ] && ([ "$INSTALL_ACTION" == "upgrade_uvpmonitor" ] || [ "$INSTALL_ACTION" = "upgrade_hcall" ]) then OLD_UPGRADE_VERSION="/var/run/tools_upgrade/pvdriver_version.ini" cat ${UPGRADE_VERSION} > ${OLD_UPGRADE_VERSION} cat ${UPGRADE_VERSION} > ${UVP_MONITOR_INI} fi fi fi exit $ret_val UVP-Tools-2.2.0.316/upg.sh000066400000000000000000000061641314037446600147410ustar00rootroot00000000000000#!/bin/bash ##################################################### # Date: 2011-6-7 # Description: cp upgrade file # support redhat5.3_32,suse11 # History: ################################################### UPGRADE_LOG=/etc/.tools_upgrade/pv_upgrade.log resultFile=/tmp/uvptools_temp/tmp_result DEST_DIR=/tmp/uvptools_temp oldVfile=/var/run/tools_upgrade/pvdriver_version.ini newVfile=/etc/.tools_upgrade/pvdriver_version.ini dir=`dirname $0` UPGRADE_FILE=install v_min_size=40960 ALIAS_CP=0 ALIAS_MV=0 ALIAS_RM=0 chattr -i ${DEST_DIR} 2>/dev/null find ${DEST_DIR}/pvdriver -type f | xargs chattr -i 2>/dev/null # remove -i function modify_alias() { alias | grep "cp -i" >/dev/null 2>&1 if [ $? -eq 0 ];then alias cp='cp' ALIAS_CP=1 fi alias | grep "mv -i" >/dev/null 2>&1 if [ $? -eq 0 ];then alias mv='mv' ALIAS_MV=1 fi alias | grep "rm -i" >/dev/null 2>&1 if [ $? -eq 0 ];then alias rm='rm' ALIAS_RM=1 fi } # restore alias function restore_alias() { if [ $ALIAS_CP -eq 1 ];then alias cp='cp -i' fi if [ $ALIAS_MV -eq 1 ];then alias mv='mv -i' fi if [ $ALIAS_RM -eq 1 ];then alias rm='rm -i' fi } function main() { modify_alias echo "$(date +%x-%X) start run upg.sh" >> ${UPGRADE_LOG} v_avail=`df / | grep "[0-9]%" | sed "s/%.*//" | awk '{print $(NF-1)}'` if [ "${v_avail}" -lt "${v_min_size}" ];then echo "$(date +%x-%X) The disk space is not enough to install." >> ${UPGRADE_LOG} echo failed:pvdriver:no-space >> ${resultFile} exit 1 fi if [ ! -f ${dir}/$UPGRADE_FILE ]; then echo "$(date +%x-%X) install files is not exist" >> $UPGRADE_LOG echo failed:pvdriver:no-install >> ${resultFile} restore_alias exit 1 fi dos2unix ${dir}/$UPGRADE_FILE 1>/dev/null 2>&1 chmod +x ${dir}/$UPGRADE_FILE 1>/dev/null 2>&1 cd ${dir} && ./${UPGRADE_FILE} -i 1>/dev/null 2>&1 exit_value=$? ## PD Driver upgrade successfully with return value 0 , monitor single upgrade successfully with return value 3 if [ $exit_value -ne 0 -a $exit_value -ne 3 ];then echo "$(date +%x-%X) $UPGRADE_FILE -i error" >> $UPGRADE_LOG restore_alias echo failed:pvdriver:error-install >> ${resultFile} exit 1 fi echo "$(date +%x-%X) ${UPGRADE_FILE} -i successful" >> $UPGRADE_LOG if diff ${oldVfile} ${newVfile} 1>/dev/null 2>&1 then restore_alias echo success:pvdriver:uninstall >> ${resultFile} echo "$(date +%x-%X) success:pvdriver:uninstall" >> $UPGRADE_LOG exit 5 else if [ $exit_value -eq 0 ]; then echo success:pvdriver:ready-upgrade >> ${resultFile} echo "$(date +%x-%X) success:pvdriver:ready-upgrade" >> $UPGRADE_LOG fi if [ $exit_value -eq 3 ]; then echo "$(date +%x-%X) success:pvdriver:monitor-upgrade-ok" >> $UPGRADE_LOG cat ${newVfile} > ${oldVfile} exit 3 fi fi restore_alias } main UVP-Tools-2.2.0.316/uvp-monitor/000077500000000000000000000000001314037446600161025ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-monitor/Makefile000066400000000000000000000062561314037446600175530ustar00rootroot00000000000000############################################################################## # Copyright 2016, Huawei Tech. Co., Ltd. # FileName : Makefile # Desciption: The main makefile to build monitor module for Linux Distr. ############################################################################## TARGET := uvp-monitor CC := gcc cpu_bit = $(shell getconf LONG_BIT) ifeq ($(cpu_bit), 32) cpu_bit=32 else cpu_bit=64 endif # for debug version, set DBG_FLAAG=y DBG_FLAG:= INC_FLAGS += -Iinclude -Isecurec/include CFLAGS += -lpthread ifeq ($(DBG_FLAG), y) CFLAGS += -g endif ${TARGET}-${cpu_bit}: securec_api has_xs patch_xs $(CC) -o $@ ${INC_FLAGS} main.c xenctlmon.c network.c netinfo.c memory.c cpuinfo.c xenstore_common.c hostname.c cpu_hotplug.c disk.c ${CFLAGS} libsecurec.a -L. -lxenstore $(CC) -o $@-static ${INC_FLAGS} main.c xenctlmon.c network.c netinfo.c memory.c cpuinfo.c xenstore_common.c hostname.c cpu_hotplug.c disk.c ${CFLAGS} libsecurec.a -L. libxenstore.a -L. $(CC) -o arping iputils/arping.c $(CC) -o ndsend iputils/ndsend.c has_xs: @if [ -d "xen-4.1.2/tools/xenstore" ]; then \ echo "You have a xenstore directory."; \ exit 0; \ elif [ -f "xen-4.1.2.tar.gz" ]; then \ echo "You have a xen-4.1.2 source tarball."; \ else \ wget https://downloads.xenproject.org/release/xen/4.1.2/xen-4.1.2.tar.gz; \ if [ "X1" = "X$$?" ]; then \ echo "You must download xen-4.1.2 source tarball." \ exit 1; \ fi; \ fi; \ tar -xzf xen-4.1.2.tar.gz xen-4.1.2/tools/xenstore; \ exit 0 patch_xs: @if [ -d "xen-4.1.2/tools/xenstore" ]; then \ patch -N -p0 < xenstore_dev_xen_xenbus_enable.diff; \ cd xen-4.1.2/tools/xenstore/; \ make all; \ install -m755 libxenstore.so.3.0.0 /usr/lib; \ ln -sf /usr/lib/libxenstore.so.3.0.0 /usr/lib/libxenstore.so.3.0; \ ln -sf /usr/lib/libxenstore.so.3.0 /usr/lib/libxenstore.so; \ cd -; \ fi securec_api: @cd ./securec/src/; \ make -f MakefileCustom securecstatic; \ cp libsecurec.a ../../libsecurec.a; \ cd - install: install -m544 uvp-monitor-${cpu_bit} /usr/bin/uvp-monitor install -m544 ./xen-4.1.2/tools/xenstore/libxenstore.so.3.0.0 /usr/lib @cd /usr/lib; \ ln -sf libxenstore.so.3.0.0 libxenstore.so.3.0; \ ln -sf libxenstore.so.3.0.0 libxenstore.so; \ cd - install -m544 monitor/uvp-monitor /etc/init.d/ @ln -sf /etc/init.d/uvp-monitor /etc/rc.d/rc3.d/S99uvp-monitor @ln -sf /etc/init.d/uvp-monitor /etc/rc.d/rc3.d/K99uvp-monitor @ln -sf /etc/init.d/uvp-monitor /etc/rc.d/rc5.d/S99uvp-monitor @ln -sf /etc/init.d/uvp-monitor /etc/rc.d/rc5.d/K99uvp-monitor install_static: install -m544 uvp-monitor-${cpu_bit}-static /usr/bin/uvp-monitor install -m544 monitor/uvp-monitor /etc/init.d/ @ln -sf /etc/init.d/uvp-monitor /etc/rc.d/rc3.d/S99uvp-monitor @ln -sf /etc/init.d/uvp-monitor /etc/rc.d/rc3.d/K99uvp-monitor @ln -sf /etc/init.d/uvp-monitor /etc/rc.d/rc5.d/S99uvp-monitor @ln -sf /etc/init.d/uvp-monitor /etc/rc.d/rc5.d/K99uvp-monitor .PHONY: clean clean: @cd ./securec/src; \ make clean; \ cd - @if [ -d "./xen-4.1.2/tools/xenstore" ]; then \ cd ./xen-4.1.2/tools/xenstore; \ make clean; \ cd -; \ fi rm -f libsecurec.a rm -f libxenstore.a rm -f $(TARGET)-* rm -f *.o arping ndsend UVP-Tools-2.2.0.316/uvp-monitor/cpu_hotplug.c000066400000000000000000000266501314037446600206100ustar00rootroot00000000000000/* * Enhances the cpu hot-plug * * Copyright 2016, Huawei Tech. Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "include/xenstore_common.h" #include "include/public_common.h" /* Linux */ #include #include #include #include #include #include "securec.h" /* ִнű ʱʱ */ #define POPEN_TIMEOUT 30 /* ʱ */ #define ERROR_TIMEOUT -2 /* pipeʧ */ #define ERROR_PIPE -6 /* forkʧ */ #define ERROR_FORK -7 /* fdopenʧ */ #define ERROR_FDOPEN -8 /* selectʧ */ #define ERROR_SELECT -9 /* waitpidʧ */ #define ERROR_WAITPID -10 /* waitpidʧ */ #define ERROR_PARAMETER -11 /*CPU Ȳֵ֧cpu */ #define CPU_NR_MAX 64 /******************************************************************************* Function : uvpPopen Description : ͨϵͳִshellűؽ @param : const char* pszCmd @param : char* pszBuffer @param : int size С Output : Return : 0-127 űֵ <0 űִʧ Calls : Called by : History : 1 Date : 2011-7-14 Author : z00179060 Modification : Ӻ *******************************************************************************/ int uvpPopen(const char* pszCmd, char* pszBuffer, int size) { int nRet = 0; FILE *fp = NULL; int pfd[2] = {0}; pid_t pid = 0; int stat = 0; fd_set rfds; struct timeval tv; /* selectֵ */ int retval; /*μ*/ if((pszCmd == NULL) ||(pszBuffer == NULL) ||(0 == strlen(pszCmd))) { return ERROR_PARAMETER; } (void)memset_s(pszBuffer, size, 0, size); /* pipeʧ */ if (0 > pipe(pfd)) { return ERROR_PIPE; } if (0 > (pid = fork())) { return ERROR_FORK; } else if (0 == pid) { /* ӽ */ (void)close(pfd[0]); if (pfd[1] != STDOUT_FILENO) { (void) dup2(pfd[1], STDOUT_FILENO); (void)close(pfd[1]); } (void) execl("/bin/sh", "sh", "-c", pszCmd, NULL); _exit(127); } /* */ (void)close(pfd[1]); if (NULL == (fp = fdopen(pfd[0], "r"))) { (void) kill(pid, SIGKILL); (void) waitpid(pid, &stat, 0); return ERROR_FDOPEN; } /* Watch pfd[0] to see when it has input. */ /*lint -save -e573 */ /*lint -e530 */ FD_ZERO(&rfds); FD_SET(pfd[0], &rfds); /*lint +e530 */ /*lint -restore -e573 */ /* Wait up to POPEN_TIMEOUT seconds. */ tv.tv_sec = POPEN_TIMEOUT; tv.tv_usec = 0; retval = select(pfd[0] + 1, &rfds, NULL, NULL, &tv); if (-1 == retval) { /* select()ִʧ */ (void) kill(pid, SIGKILL); nRet = ERROR_SELECT; } else if (retval) { /* ӽ̵ı׼дַ,ֻһ */ (void) fgets(pszBuffer, size, fp); } else { /* No data within POPEN_TIMEOUT seconds */ (void) kill(pid, SIGKILL); nRet = ERROR_TIMEOUT; } (void) fclose(fp); fp = NULL; while (0 > waitpid(pid, &stat, 0)) { if (EINTR != errno) { return ERROR_WAITPID; } } /* õĽű ֵ0-127 */ if (WIFEXITED(stat)) { nRet = WEXITSTATUS(stat); } return nRet; } /***************************************************************************** Function : uvp_sleep Description: sleep 1 seconds Input : None Output : None Return : None *****************************************************************************/ void uvp_sleep() { (void)sleep(1); return; } /***************************************************************************** Function : IsSupportCpuHotplug Description: get os info to detect if os support cpu hotplug Input : None Output : None Return : TRUE or FALSE *****************************************************************************/ int IsSupportCpuHotplug(void) { int ret = 0; char *pszHotplugFlagScript = "uname_str=`uname -r`;" "cpu_hotplug_conf=`grep 'CONFIG_HOTPLUG_CPU' /boot/config-$uname_str 2>/dev/null | awk -F= '{print $2}'`;" "printf $cpu_hotplug_conf 2>/dev/null ;"; char pszHotplugFlag[1024] = {0}; ret = uvpPopen(pszHotplugFlagScript, pszHotplugFlag, 1024); if (0 != ret) { ERR_LOG("Failed to call uvpPopen, ret=%d.", ret); return ret; } if (0 == strcmp("y", pszHotplugFlag)) { return XEN_SUCC; } else { INFO_LOG("This OS is not supported cpu hotplug."); return XEN_FAIL; } } /***************************************************************************** Function : GetSupportMaxnumCpu Description: cpu hotplug support the maxnum cpu Input : None Output : None Return : return the maxnum *****************************************************************************/ int GetSupportMaxnumCpu(void) { int ret = 0; int cpu_nr = 0; char *pszSysCpuNumScript = "uname_str=`uname -r`;" "syscpu_enable_num=`grep 'CONFIG_NR_CPUS' /boot/config-$uname_str 2>/dev/null | awk -F= '{print($2)}'`;" "printf $syscpu_enable_num 2>/dev/null ;"; char pszSysCpuNum[1024] = {0}; ret = uvpPopen(pszSysCpuNumScript, pszSysCpuNum, 1024); if (0 != ret) { ERR_LOG("Failed to call uvpPopen, ret=%d.", ret); return ret; } cpu_nr = strtoul(pszSysCpuNum, NULL, 10); /*lint -e648 */ if (ULONG_MAX == cpu_nr) { return -ERANGE; } /*lint +e648 */ else if (cpu_nr <= CPU_NR_MAX) { return cpu_nr; } else { return CPU_NR_MAX; } } /***************************************************************************** Function : SetCpuHotplugFeature Description: write Cpu hotplug token Input : handle -- xenbus file handle Output : None Return : return OS support or not support cpu hotplug *****************************************************************************/ int SetCpuHotplugFeature(void * phandle) { int cpu_hotplug_status = XEN_FAIL; /* enter OS has been supported cpu hotplug */ cpu_hotplug_status = IsSupportCpuHotplug(); /* set CpuHotplug Feature flag */ (void)write_to_xenstore(phandle, CPU_HOTPLUG_FEATURE, \ cpu_hotplug_status == XEN_SUCC ? "1" : "0"); return cpu_hotplug_status; } /***************************************************************************** Function : cpuhotplug_regwatch Description: Register Cpu hotplug watch Input : handle -- xenbus file handle Output : None Return : None *****************************************************************************/ void cpuhotplug_regwatch(void * phandle) { (void)regwatch(phandle, CPU_HOTPLUG_SIGNAL , ""); return; } /***************************************************************************** Function : do_cpu_online Description: update cpu to online and wrtie state into xenstore Input : handle -- xenbus file handle Output : None Return : XEN_SUCC or XEN_ERROR *****************************************************************************/ int DoCpuHotplug(void * phandle) { char pszBuff[1024] = {0}; char pszCommand[1024] = {0}; int i = 0; int cpu_enable_num = 0; int cpu_nr = 0; char *cpu_online = NULL; int idelay = 0; int ret = -1; int rc = -1; cpu_nr = GetSupportMaxnumCpu(); if (cpu_nr <= 0) { INFO_LOG("This OS has unexpectable cpus: %d.", cpu_nr); goto out; } INFO_LOG("This OS has cpu hotplug and less than %d.", cpu_nr); /* obtain cpu numbers */ for(i = 0; i <= cpu_nr - 1; i++) { (void)snprintf_s(pszCommand, 1024, 1024, "cpu/%d/availability", i); cpu_online = read_from_xenstore(phandle, pszCommand); if((NULL != cpu_online) && (0 == strcmp(cpu_online, "online"))) { cpu_enable_num ++; } free(cpu_online); cpu_online = NULL; } i = 1; /* loop for upgrade cpu online */ while (i < cpu_enable_num ) { if (idelay >= 300) { i ++; idelay = 0; continue; } (void)memset_s(pszCommand, 1024, 0, 1024); (void)memset_s(pszBuff, 1024, 0, 1024); (void)snprintf_s(pszCommand, 1024, 1024, "if [ -d \"/sys/devices/system/cpu/cpu%d\" ]; then \n printf \"online\" \nfi", i); ret = uvpPopen(pszCommand, pszBuff, 1024); if (0 != ret) { ERR_LOG("Failed to call uvpPopen, ret=%d.", ret); idelay++; uvp_sleep(); continue; } if (0 != strcmp("online", pszBuff)) { idelay++; uvp_sleep(); continue; } /* жcpuǰǷonline */ (void)memset_s(pszCommand, 1024, 0, 1024); (void)memset_s(pszBuff, 1024, 0, 1024); (void)snprintf_s(pszCommand, 1024, 1024, "cat /sys/devices/system/cpu/cpu%d/online", i); ret = uvpPopen(pszCommand, pszBuff, 1024); if (0 != ret) { ERR_LOG("Failed to call uvpPopen, ret=%d.", ret); idelay++; uvp_sleep(); continue; } if ('1' == pszBuff[0]) { INFO_LOG("Cpu%d is always online.", i); i++; idelay = 0; continue; } (void)memset_s(pszCommand, 1024, 0, 1024); (void)memset_s(pszBuff, 1024, 0, 1024); (void)snprintf_s(pszCommand, 1024, 1024, "echo 1 > /sys/devices/system/cpu/cpu%d/online", i); ret = uvpPopen(pszCommand, pszBuff, 1024); if (0 != ret) { ERR_LOG("Failed to call uvpPopen, ret=%d.", ret); idelay++; uvp_sleep(); continue; } idelay = 0; i++; } rc = XEN_SUCC; out: (void)write_to_xenstore(phandle, CPU_HOTPLUG_STATE, "0"); (void)write_to_xenstore(phandle, CPU_HOTPLUG_SIGNAL, "0"); return rc; } UVP-Tools-2.2.0.316/uvp-monitor/cpuinfo.c000066400000000000000000000333631314037446600177210ustar00rootroot00000000000000/* * Obtains the CPU usage, and writes these to xenstore. * * Copyright 2016, Huawei Tech. Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "libxenctl.h" #include "public_common.h" #include #include "securec.h" #define NCPUSTATES 9 #define MAXCPUNUM 64 #define BUFFSIZE 2048 #define TIMEBUFSIZE 128 #define IDLE_USAGE 3 #define CPU_STATE_FILE "/proc/stat" #define CPU_INFO_SIZE 10 /*record the information of CPU in form of "%d:%.2f;", since there max number of cpu is 64, the longest size of the information for each CPU is 8bytes*/ #define CPU_USAGE_SIZE 32 typedef unsigned long long TIC_t; typedef long long SIC_t; #define TRIMz(x) ((tz = (SIC_t)(x)) < 0 ? 0 : tz) typedef struct CPU_t { TIC_t u_current, n_current, s_current, i_current, w_current, x_current, y_current, z_current; // as represented in /proc/stat TIC_t u_save, s_save, n_save, i_save, w_save, x_save, y_save, z_save; } CPU_t; static int CpuTimeFirstFlag=0; struct cpu_data { long cp_new[NCPUSTATES]; long cp_old[NCPUSTATES]; long cp_diff[NCPUSTATES]; int cpu_usage[NCPUSTATES]; }; /***************************************************************************** Function : skip_token Description: skip the string, delete the "cpu" head Input : pointer to character Output : None Return : pointer to character *****************************************************************************/ char *cpu_skip_token(const char *pString) { /*coverity warning *pString Ϊ'\0' ж*/ while (*pString && isspace((unsigned char)(*pString))) { pString++; } while (*pString && !isspace((unsigned char)(*pString))) { pString++; } return (char *)pString; } /***************************************************************************** Function : OpenStatFile Description: Open the /proc/stat file Input : None Output : None Return : success : return pointer to file, fail : return NULL *****************************************************************************/ FILE *OpenStatFile() { FILE *pstat = NULL; if(NULL == (pstat = fopen(CPU_STATE_FILE, "r"))) { // LogPrint("open file /proc/stat error\n"); return NULL; } else { return pstat; } } /***************************************************************************** Function : GetCPUCount Description: get the CPU Count Input : None Output : None Return : Count [1~32] *****************************************************************************/ int GetCPUCount() { char buf[BUFFSIZE]; int check_char; int count = 0; FILE *pFile = NULL; if(NULL == (pFile = OpenStatFile())) { return 0; } while(EOF != (check_char = fgetc(pFile))) { if (('c' == check_char) && ('p' == fgetc(pFile))) { (void)fseek(pFile, -2, SEEK_CUR); if(NULL != fgets(buf, BUFFSIZE, pFile)) { count++; } } else { break; } } if (1 >= count) { count = 1; } else if (MAXCPUNUM < count) { count = MAXCPUNUM + 1; } count--; fclose(pFile); //lint -save -e438 pFile = NULL; return count; //lint -restore } /***************************************************************************** Function : percentages Description: Calculates the CPU usage percentages Input : cpu states count, pointer to cpu_usage struct Output : None Return : *****************************************************************************/ long percentages(int StateCount, struct cpu_data *pCpuTmp) { int i = 0; long change = 0; long total_change = 0; int *usage = NULL; long *new = NULL; long *old = NULL; long *diffs = NULL; /* initialization */ usage = pCpuTmp->cpu_usage; new = pCpuTmp->cp_new; old = pCpuTmp->cp_old; diffs = pCpuTmp->cp_diff; /* calculate changes for each state and the overall change */ for (i = 0; i < StateCount; i++) { if ((change = *new - *old) < 0) { /* this only happens when the counter wraps */ change = *old - *new; } total_change += (*diffs++ = change); *old++ = *new++; } /* avoid divide by zero potential */ if (0 == total_change) { total_change = 1; } /* calculate percentages based on overall change, rounding up */ diffs = pCpuTmp->cp_diff; for (i = 0; i < StateCount; i++) { *usage++ = (int)((*diffs++ * 1000) / total_change); } /* return the total in case the caller wants to use it */ return(total_change); } /***************************************************************************** Function : pGetCPUUsage Description: get the CPU usage Input :None Output : CPU usage string Return : success : return CPU usage string, fail : return ERR_STR *****************************************************************************/ char *pGetCPUUsage(char *pResult) { int i = 0; int j = 0; int flg = 0; int cpucount = 0; int shiftsize = 0; char *pTmpString = NULL; char *pResultTmp = NULL; FILE *pFileStat = NULL; char CpuInfoTmp[CPU_INFO_SIZE*4]; errno_t rc = 0; char BufTmp[BUFFSIZE]; float fCpuUsage[MAXCPUNUM]; struct cpu_data CpuRecord[MAXCPUNUM]; for(i = 0; i < MAXCPUNUM; i ++) { fCpuUsage[i] = 0.0; } (void)memset_s(BufTmp, BUFFSIZE, 0, BUFFSIZE); (void)memset_s(CpuRecord, MAXCPUNUM, 0, MAXCPUNUM * sizeof(struct cpu_data)); cpucount = GetCPUCount(); while(1) { pFileStat = OpenStatFile(); if(NULL == pFileStat) { return ERR_STR; } if(NULL == fgets(BufTmp, BUFFSIZE, pFileStat)) { fclose(pFileStat); return ERR_STR; } for(i = 0; i < cpucount; i++) { if(NULL == fgets(BufTmp, BUFFSIZE, pFileStat)) { fclose(pFileStat); return ERR_STR; } pTmpString = cpu_skip_token(BufTmp); /* skip "cpu" */ for(j = 0; j < NCPUSTATES; j ++) { CpuRecord[i].cp_new[j] = strtoul(pTmpString, &pTmpString, 0); } (void)percentages(NCPUSTATES, &CpuRecord[i]); fCpuUsage[i] = 0.0; for(j = 0; j < NCPUSTATES ; j ++) { fCpuUsage[i] = fCpuUsage[i] + CpuRecord[i].cpu_usage[j] / 10.0; } fCpuUsage[i] = fCpuUsage[i] - CpuRecord[i].cpu_usage[IDLE_USAGE] / 10.0; if( fCpuUsage[i] < 0) { fCpuUsage[i] = abs(fCpuUsage[i]); } } fclose(pFileStat); pFileStat = NULL; if (0 != flg) { break; } (void)usleep(500000); flg++; }/* End of while(1) */ pResultTmp = pResult; for (i = 0; i < cpucount; i++) { memset_s(CpuInfoTmp,sizeof(CpuInfoTmp),0,sizeof(CpuInfoTmp)); shiftsize = snprintf_s(CpuInfoTmp,sizeof(CpuInfoTmp), sizeof(CpuInfoTmp), "%d:%.2f;", i, fCpuUsage[i]); if(-1 == shiftsize) { return ERR_STR; } rc = memcpy_s(pResultTmp, strlen(CpuInfoTmp), CpuInfoTmp, strlen(CpuInfoTmp)); if(rc != 0 ) { /* hand erro*/ return ERR_STR; } pResultTmp = pResultTmp + shiftsize; } (void)snprintf_s(pResultTmp, 1, 1, '\0'); return SUCC; } /***************************************************************************** Function : CpuTimeWaitPercentage Description: get cpu wait time percentage on usr task, sys task, wait task Input : cputimevalue Output : Return : -1: fail 0: success *****************************************************************************/ int CpuTimeWaitPercentage(char *cputimevalue) { SIC_t u_frme, s_frme, n_frme, i_frme, w_frme, x_frme, y_frme, z_frme, tot_frme, tz; FILE *fp = NULL; char *CpuTimeValue = NULL; char buf[TIMEBUFSIZE] = {0}; float scale; static CPU_t cpus; if (!(fp = OpenStatFile())) { DEBUG_LOG("Failed open /proc/stat, errno=%d.", errno); return ERROR; } if (!fgets(buf, sizeof(buf), fp)) { fclose(fp); DEBUG_LOG("/proc/stat content is NULL."); return ERROR; } cpus.x_current = 0; // FIXME: can't tell by kernel version number cpus.y_current = 0; // FIXME: can't tell by kernel version number cpus.z_current = 0; // FIXME: can't tell by kernel version number /*ÿеݸֵǰ*/ (void)sscanf(buf, "cpu %llu %llu %llu %llu %llu %llu %llu %llu",&cpus.u_current,&cpus.n_current,&cpus.s_current,&cpus.i_current,&cpus.w_current,&cpus.x_current,&cpus.y_current,&cpus.z_current); /*һζȡĻǰǰֵ*/ if(0 == CpuTimeFirstFlag) { cpus.u_save = cpus.u_current; cpus.s_save = cpus.s_current; cpus.n_save = cpus.n_current; cpus.i_save = cpus.i_current; cpus.w_save = cpus.w_current; cpus.x_save = cpus.x_current; cpus.y_save = cpus.y_current; cpus.z_save = cpus.z_current; CpuTimeFirstFlag = 1; } /*ǰǰֵñ仯ֵ*/ u_frme = cpus.u_current - cpus.u_save; s_frme = cpus.s_current - cpus.s_save; n_frme = cpus.n_current - cpus.n_save; i_frme = TRIMz(cpus.i_current - cpus.i_save); w_frme = cpus.w_current - cpus.w_save; x_frme = cpus.x_current - cpus.x_save; y_frme = cpus.y_current - cpus.y_save; z_frme = cpus.z_current - cpus.z_save; /*ܵı仯*/ tot_frme = u_frme + s_frme + n_frme + i_frme + w_frme + x_frme + y_frme + z_frme; if (tot_frme < 1) { tot_frme = 1; } scale = 100.0 / (float)tot_frme; CpuTimeValue = cputimevalue; /*ٷֱȣʽʽַ*/ (void)snprintf_s(CpuTimeValue, CPU_USAGE_SIZE, CPU_USAGE_SIZE, "%.2f:%.2f:%.2f", (float)u_frme * scale, (float)s_frme * scale, (float)w_frme * scale); /*ǰֵǰֵ*/ cpus.u_save = cpus.u_current; cpus.s_save = cpus.s_current; cpus.n_save = cpus.n_current; cpus.i_save = cpus.i_current; cpus.w_save = cpus.w_current; cpus.x_save = cpus.x_current; cpus.y_save = cpus.y_current; cpus.z_save = cpus.z_current; fclose(fp); return SUCC; } /***************************************************************************** Function : cpuworkctlmon Description: Input :handle : handle of xenstore Output : None Return : None *****************************************************************************/ int cpuworkctlmon(struct xs_handle *handle) { char *value = NULL; char *cputimevalue = NULL; int CpuUsageFlag = 0; value = (char *)malloc(MAXCPUNUM * CPU_INFO_SIZE + 1); if(NULL == value) { return ERROR; } (void)memset_s(value, MAXCPUNUM * CPU_INFO_SIZE + 1, 0, MAXCPUNUM * CPU_INFO_SIZE + 1); cputimevalue = (char *)malloc(CPU_USAGE_SIZE); if(NULL == cputimevalue) { if(value) { free(value); value = NULL; } return ERROR; } (void)memset_s(cputimevalue, CPU_USAGE_SIZE, 0, CPU_USAGE_SIZE); (void)pGetCPUUsage(value); if(g_exinfo_flag_value & EXINFO_FLAG_CPU_USAGE) { CpuUsageFlag = CpuTimeWaitPercentage(cputimevalue); } if(xb_write_first_flag == 0) { write_to_xenstore(handle, CPU_DATA_PATH, value); if(g_exinfo_flag_value & EXINFO_FLAG_CPU_USAGE) { if(SUCC == CpuUsageFlag) { write_to_xenstore(handle, CPU_TIME_PATH, cputimevalue); } else { write_to_xenstore(handle, CPU_TIME_PATH, "error"); } } } else { write_weak_to_xenstore(handle, CPU_DATA_PATH, value); if(g_exinfo_flag_value & EXINFO_FLAG_CPU_USAGE) { if(SUCC == CpuUsageFlag) { write_weak_to_xenstore(handle, CPU_TIME_PATH, cputimevalue); } else { write_weak_to_xenstore(handle, CPU_TIME_PATH, "error"); } } } free(value); free(cputimevalue); //lint -save -e438 value = NULL; cputimevalue = NULL; return SUCC; //lint -save -e438 } UVP-Tools-2.2.0.316/uvp-monitor/disk.c000066400000000000000000002243011314037446600172020ustar00rootroot00000000000000/* * Obtains disks capacity and usage, and writes these to xenstore. * * Copyright 2016, Huawei Tech. Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "libxenctl.h" #include "public_common.h" #include #include #include #include #include #include #include #include "securec.h" #define PROC_PARTITIONS "/proc/partitions" #define PROC_SWAPS "/proc/swaps" #define PROC_MOUNTS "/proc/mounts" #define PROC_DEVICES "/proc/devices" #define DISK_DATA_PATH "control/uvp/disk" #define DISK_DATA_EXT_PATH "control/uvp/disk-ext" #define FILE_DATA_PATH "control/uvp/filesystem" /*xenstore֧4096ֽڣƺֵٽfilesystemϢд*/ #define FILE_DATA_EXTRA_PATH_PREFIX "control/uvp/filesystem_extra" /*filesystemֵĿ*/ #define FILE_NUM_PATH "control/uvp/filesystem_extra_num" #define DEVPATH "/dev/disk/by-id/" #define CMPSTRING "../../" #define SCSI_DISK_PATH "device/vscsi" /*must be equal with the value defined in uvpext*/ #define MAX_ROWS 4 #define MAX_XS_LENGTH 100 /*ÿxenstoreֵдĴʸ*/ #define MAX_DISKUSAGE_NUM_PER_KEY 30 #define RAM_MAJOR 1 #define LOOP_MAJOR 7 #define MAX_DISKUSAGE_LEN 1024 #define MAX_NAME_LEN 64 #define MAX_STR_LEN 4096 #define UNIT_TRANSFER_CYCLE 1024 #define MAX_DISK_NUMBER 128 /* TODO:1.ֽ׶֧11xvdạ17scsịԺֵ֧Ĵ˴ӦӦ. 2. Ϊ֧60ֵ̣Ϊ128*/ /*֧50filesystem*/ #define MAX_FILENAMES_SIZE 52800 /*xenstoreֵ󳤶Ϊ4096ȥֵȣʣ³Ϊfilesystemɷŵij*/ #define MAX_FILENAMES_XENSTORLEN 4042 /*ֻϱ60̵Ϣ*/ #define MAX_DISKUSAGE_STRING_NUM 60 #define SECTOR_SIZE 512 #define MEGATOBYTE (1024 * 1024) /*FilenamesArrװļϵͳʹ*/ char FilenameArr[MAX_FILENAMES_SIZE] = {0}; /* device-mapperӦ豸 */ int g_deviceMapperNum = 0; /* 豸Ƽ豸Žṹ壨/proc/partitions */ struct DevMajorMinor { int devMajor; /* 豸豸 */ int devMinor; /* 豸豸 */ char devBlockSize[MAX_STR_LEN]; /* 豸ĿС(KB) */ char devName[MAX_NAME_LEN]; /* 豸 */ }; /* Ƽʹÿռ䡢ܿռĽṹ */ struct DeviceInfo { char phyDevName[MAX_NAME_LEN]; /* */ char deviceTotalSpace[MAX_STR_LEN]; /* ̵ܿռ(MB) */ char diskUsage[MAX_DISKUSAGE_LEN]; /* ̵ʹÿռ(MB) */ }; /* صӦļϵͳϢʹÿռĽṹ */ struct DiskInfo { int st_rdev; /* 洢ļϵͳӦ豸ID */ char filesystem[MAX_STR_LEN]; /* ļϵͳӦFileSystem */ char usage[MAX_DISKUSAGE_LEN]; /* ļϵͳʹÿռ(MB) */ char mountPoint[MAX_STR_LEN]; /* ļϵͳĹص */ }; /* ¼ǰṹʵʴ洢ĸ */ struct NumberCount { int partNum; /* ¼DevMajorMinor */ int diskNum; /* ¼DeviceInfo */ int mountNum; /* ¼DiskInfo */ }; struct DmInfo { int nParentMajor; int nParentMinor; long long nSectorNum; }; enum dm_type { DM_LINEAR = 0, DM_STRIPED, DM_CRYPT, DM_RAID, DM_MIRROR, DM_SNAPSHOT, DM_UNKNOWN }; extern CheckName(const char * name); void write_xs_disk(struct xs_handle *handle, int is_weak, char xs_value[][MAX_DISKUSAGE_LEN], int row_num); /***************************************************************************** Function : strAdd() Description: ַӺ Input : inputNum1 һַ inputNum2 ڶַ Output : outputResult ַ͵ַ Return : SUCC = success, ERROR = failure Other : N/A *****************************************************************************/ int strAdd(char *inputNum1, char *inputNum2, char *outputResult) { char *pszFirstNum = inputNum1; char *pszSecondNum = inputNum2; char szTmpResult[MAX_STR_LEN] = {0}; int nFirstNumLen; int nSecondNumLen; int nTmpStrLen; int nNum1 = 0; int nNum2 = 0; int sum = 0; int ten = 0; int i; int j = 0; int k = 0; /* һΪNULLش */ if (NULL == pszFirstNum || NULL == pszSecondNum || NULL == outputResult) { return ERROR; } nFirstNumLen = strlen(pszFirstNum); nSecondNumLen = strlen(pszSecondNum); /* нϳһݵij */ nTmpStrLen = (nFirstNumLen < nSecondNumLen) ? nSecondNumLen : nFirstNumLen; if (MAX_STR_LEN <= nTmpStrLen) { return ERROR; } /* ѭȡÿһλ */ for (i = 0; i < nTmpStrLen; i++) { /* ӦݳСѭλΪ0ֱӻȡλݣҽASCIIתΪ */ j = nFirstNumLen - i - 1; k = nSecondNumLen - i - 1; (j >= 0) ? (nNum1 = pszFirstNum[j] - 48) : (nNum1 = 0); (k >= 0) ? (nNum2 = pszSecondNum[k] - 48) : (nNum2 = 0); /* ʱ־λĺͣȡλʮλ */ sum = nNum1 + nNum2 + ten; ten = sum / 10; sum = sum % 10; /* Ӧĺ͵ĸλָʱַ */ *(szTmpResult + i) = sum + 48; } /* ˳ѭλ־1ٽλ1ʱַ */ if (1 == ten) { *(szTmpResult + i) = ten + 48; } /* ȡʱַijȣ䰴Ƹ */ nTmpStrLen = strlen(szTmpResult); for (i = 0; i < nTmpStrLen; i++) { outputResult[i] = szTmpResult[nTmpStrLen - i - 1]; } /* ֶַĩβ\0ֹظʹô */ outputResult[i] = '\0'; return SUCC; } /***************************************************************************** Function : strMinus() Description: ַ Input : inputNum1 inputNum2 Output : outputResult ַַ Return : SUCC = success, ERROR = failure Other : N/A *****************************************************************************/ int strMinus(char *inputNum1, char *inputNum2, char *outputResult) { char *pszFirstNum = inputNum1; char *pszSecondNum = inputNum2; char szTmpResult[MAX_STR_LEN] = {0}; int nFirstNumLen; int nSecondNumLen; int nTmpStrLen; int nNum1 = 0; int nNum2 = 0; /* ÿλŵʱ */ int wanting = 0; /* ǽλ */ int isNegative = 0; /* Žַ± */ int nTmpLocation = 0; int i; /* һΪNULLش */ if (NULL == pszFirstNum || NULL == pszSecondNum || NULL == outputResult) { return ERROR; } nFirstNumLen = strlen(pszFirstNum); nSecondNumLen = strlen(pszSecondNum); /* ijȴڱij(Ϊ)ش */ if (nFirstNumLen < nSecondNumLen) { return ERROR; } /* ѭȡÿһλݣȻжӦݲ */ for (i = 0; i < nSecondNumLen; i++) { /* ȡӦλϵ */ nNum1 = pszFirstNum[nFirstNumLen - i - 1] - 48; nNum2 = pszSecondNum[nSecondNumLen - i - 1] - 48; /* жϱӦݱλǷڼĶӦСڣҪһλλ򣬲Ҫ */ if (nNum1 >= nNum2 + isNegative) { wanting = nNum1 - nNum2 - isNegative; isNegative = 0; } else { wanting = nNum1 + 10 - nNum2 - isNegative; isNegative = 1; } /* Ӧλֵַʱַ */ *(szTmpResult + i) = wanting + 48; } /* ѭֱijΪֹ */ for (; i < nFirstNumLen; i++) { nNum1 = pszFirstNum[nFirstNumLen - i - 1] - 48; /* λ־0Ӧ㣬ֱӽǰλݴݸ */ if (1 == isNegative) { /* ǰλõֵС1ֱһλλ */ if (nNum1 >= 1) { wanting = nNum1 - isNegative; *(szTmpResult + i) = wanting + 48; isNegative = 0; } else { wanting = nNum1 + 10 - isNegative; *(szTmpResult + i) = wanting + 48; isNegative = 1; } } else { *(szTmpResult + i) = nNum1 + 48; } } /* 걻λλǻǴ0򷵻ش */ if (1 == isNegative) { return ERROR; } /* ȡʱַijȣ䰴Ƹ */ nTmpStrLen = strlen(szTmpResult); for (i = 0; i < nTmpStrLen; i++) { /* ȥλ0 */ if (0 == strlen(outputResult) && '0' == szTmpResult[nTmpStrLen - i - 1]) { continue; } outputResult[nTmpLocation] = szTmpResult[nTmpStrLen - i - 1]; nTmpLocation++; } /* ַΪ0θֵһ0 */ if (0 == strlen(outputResult)) { (void)strncpy_s(outputResult, 3, "0", 2); } return SUCC; } /***************************************************************************** Function : strMulti() Description: ַ˺ Input : inputNum1 һַ inputNum1 ڶַ Output : outputResult ַַ Return : SUCC = success, ERROR = failure Other : N/A *****************************************************************************/ int strMulti(char *inputNum1, char *inputNum2, char *outputResult) { char *pszFirstNum = inputNum1; char *pszSecondNum = inputNum2; char szTmpResult[MAX_STR_LEN] = {0}; char szOneTimeResult[MAX_STR_LEN] = {0}; char szFinalResult[MAX_STR_LEN] = {0}; int nFirstNumLen; int nSecondNumLen; int nTmpStrLen = 0; int nNum1 = 0; int nNum2 = 0; /* Žλʱ */ int carry = 0; int nTempResult = 0; int nLen; int i; int j = 0; /* һΪNULLش */ if (NULL == pszFirstNum || NULL == pszSecondNum || NULL == outputResult) { return ERROR; } nFirstNumLen = strlen(pszFirstNum); nSecondNumLen = strlen(pszSecondNum); /* ѭһַÿһλһλʼ */ for (i = nFirstNumLen; i > 0; i--) { /* ȡӦλõ */ nNum1 = *(pszFirstNum + i - 1) - 48; /* ѭһַÿһλһλʼ */ for (j = nSecondNumLen; j > 0; j--) { /* ȡӦλõ */ nNum2 = *(pszSecondNum + j - 1) - 48; /* ȡ˺λ */ nTempResult = nNum1 * nNum2 + carry; /* ֱȡλββݸʱַ */ carry = nTempResult / 10; nTempResult = nTempResult % 10; *(szTmpResult + nSecondNumLen - j) = nTempResult + 48; } /* ɺ󣬽λԲΪ0򽫽λݸʱַ */ if (0 != carry) { *(szTmpResult + nSecondNumLen) = carry + 48; carry = 0; } /* ȡʱַijȣ䰴ƴݸִеʱַ */ nTmpStrLen = strlen(szTmpResult); for (j = 0; j < nTmpStrLen; j++) { szOneTimeResult[j] = szTmpResult[nTmpStrLen - j - 1]; } /* ÿνĵλ0Թ˺ַӦλ */ for (j = 0; j < nFirstNumLen - i; j++) { (void)strncat_s(szOneTimeResult, MAX_STR_LEN, "0", 2); } /* ִн˽ۼ */ (void)strAdd(szOneTimeResult, szFinalResult, szFinalResult); /* ʱַ͵νַ */ memset_s(szTmpResult, MAX_STR_LEN, 0, sizeof(szTmpResult)); memset_s(szOneTimeResult, MAX_STR_LEN, 0, sizeof(szOneTimeResult)); } /* ַɣսݸ */ nLen = strlen(szFinalResult); /* ַΪ0ʱ˵Ϊ0 */ if ('0' == szFinalResult[0]) { nLen = 1; } (void)strncpy_s(outputResult, nLen + 1, szFinalResult, nLen); outputResult[nLen] = '\0'; return SUCC; } /***************************************************************************** Function : unitTransfer() Description: λתֽںKBתΪMBʹõǽλ Input : sourceStr ΣֽڴС/KBС level Σλı Output : targetStr Σڴ洢 Return : SUCC = success, ERROR = failure Other : N/A *****************************************************************************/ int unitTransfer(char *sourceStr, unsigned long level, char *targetStr) { char *pszFirstNum = sourceStr; char szResult[MAX_STR_LEN] = {0}; unsigned long nCycle = level; int nNumLen; unsigned long tmpNum = 0; /* ̳ʼΪ0 */ int divide = 0; /* ʼΪ0 */ unsigned long remainder = 0; int nLen; int i; /* ̵ַ± */ int j = 0; /* жַǷΪNULL߳Ϊ0򷵻ش */ if (NULL == pszFirstNum || NULL == targetStr || 0 == nCycle) { return ERROR; } nNumLen = strlen(pszFirstNum); /* ѭȡÿһλ */ for (i = 0; i < nNumLen; i++) { /* λȡֵַ */ tmpNum = remainder * 10 + (pszFirstNum[i] - 48); /* жʱֵǷڳ̵ַǷΪգСΪգʱֵַ */ if(tmpNum < nCycle && 0 == strlen(szResult)) { remainder = tmpNum; } else { /* ʱݴڳʼʽļ㣬̵ʱݳԳʱģ */ divide = tmpNum / nCycle; remainder = tmpNum % nCycle; /* ̸ֵӦĽ */ *(szResult + j) = divide + 48; j++; } } /* ַȫȡϣûдڳ򽫽Ϊ0 */ if(0 == j) { *(szResult + j) = divide + 48; } /* ִ֮Ϊ0λ(+1) */ if (0 != remainder) { (void)strAdd(szResult, "1", szResult); } /* սַƸ */ nLen = strlen(szResult); if (MAX_STR_LEN <= nLen) { nLen = MAX_STR_LEN - 1; } (void)strncpy_s(targetStr, nLen + 1, szResult, nLen); targetStr[nLen] = '\0'; return SUCC; } /***************************************************************************** Function : openPipe() Description: popenзװUT Input : command popenΣҪִе Output : type popenΣģʽ Return : FILE* ļָ Other : N/A *****************************************************************************/ FILE *openPipe(const char *pszCommand, const char *pszType) { return popen(pszCommand, pszType); } /***************************************************************************** Function : openFile() Description: fopenзװUT Input : filepath fopenΣļ· Output : type fopenΣģʽ Return : FILE* ļָ Other : N/A *****************************************************************************/ FILE *openFile(const char *pszFilePath, const char *pszType) { return fopen(pszFilePath, pszType); } /***************************************************************************** Function : getDeviceMapperNumber() Description: Device-Mapper͵豸 Input : N/A Output : N/A Return : SUCC = success, ERROR = failure Other : N/A *****************************************************************************/ int getDeviceMapperNumber() { FILE *fpDevices; char szLine[MAX_STR_LEN] = {0}; const char *pszDeviceMapperName = "device-mapper"; char szMajor[MAX_NAME_LEN] = {0}; char szDeviceType[MAX_STR_LEN] = {0}; /* /proc/devicesļ */ fpDevices = openFile(PROC_DEVICES, "r"); if (NULL == fpDevices) { return ERROR; } while (fgets(szLine, sizeof(szLine), fpDevices)) { /* ȡ豸ͺͶӦ豸 */ if (sscanf_s(szLine, "%s %[^\n ]", szMajor, sizeof(szMajor), szDeviceType, sizeof(szDeviceType)) != 2) { continue; } /* ȶǷΪdevice-mapper߼ */ if (0 == strcmp(szDeviceType, pszDeviceMapperName)) { g_deviceMapperNum = atoi(szMajor); break; } } (void)fclose(fpDevices); //lint -save -e438 fpDevices = NULL; return SUCC; //lint -save -e438 } /***************************************************************************** Function : freePath() Description: ͷָ뵽Ŀռ Input : char *path1 char *path2 char *path3 Output : N/A Return : void Other : N/A *****************************************************************************/ void freePath(char *path1, char *path2, char *path3) { if (path1 != NULL) { free(path1); } if (path2 != NULL) { free(path2); } if (path3 != NULL) { free(path3); } } /***************************************************************************** Function : startsWith() Description: жַstrǷַprefixʼ Input : str prefix Output : N/A Return : 1: ַstrַprefixʼ 0: ַstrַprefixʼ Other : N/A History : 2013.09.02, h00227765 created this function. *****************************************************************************/ static int startsWith(const char *str, const char *prefix) { return strncmp(str, prefix, strlen(prefix)) == 0; } /***************************************************************************** Function : getScsiBackendPath() Description: öӦxenstore scsi豸· Input : struct xs_handle* handle handle of xenstore dir ǰxenstoreĿ¼ Output : N/A Return : NULL = success, NULL = failure Other : N/A History : 2013.09.02, h00227765 created this function. *****************************************************************************/ static char* getScsiBackendPath(struct xs_handle *handle, char *dir) { char szXsPath[MAX_STR_LEN]; int ret; /* 磺ȡdevice/vscsi/2048/backendúxenstore· */ ret = snprintf_s(szXsPath, sizeof(szXsPath), sizeof(szXsPath), "%s/%s/%s", SCSI_DISK_PATH, dir, "backend"); if (ret < 0) { return NULL; } return read_from_xenstore(handle, szXsPath); } /***************************************************************************** Function : getScsiBackendParams() Description: öӦxenstore scsi豸·params Input : struct xs_handle* handle handle of xenstore bePath xenstore· Output : N/A Return : NULL = success, NULL = failure Other : N/A History : 2013.09.02, h00227765 created this function. *****************************************************************************/ static char* getScsiBackendParams(struct xs_handle *handle, char *bePath) { char szXsPath[MAX_STR_LEN]; int ret; /* 磺ȡ˼ֵ/local/domain/0/backend/vbd/%d/2048/paramsȡ豸id */ ret = snprintf_s(szXsPath, sizeof(szXsPath), sizeof(szXsPath), "%s/%s", bePath, "params"); if (ret < 0) { return NULL; } return read_from_xenstore(handle, szXsPath); } /***************************************************************************** Function : getScsiBackendName() Description: öӦxenstore scsi豸·豸XML豸 Input : struct xs_handle* handle handle of xenstore bePath xenstore· Output : N/A Return : NULL = success, NULL = failure Other : N/A History : 2013.09.02, h00227765 created this function. *****************************************************************************/ static char* getScsiBackendName(struct xs_handle *handle, char *bePath) { char szXsPath[MAX_STR_LEN]; int ret; /* 磺ȡ˼ֵ/local/domain/0/backend/vbd/%d/2048/devȡxml豸 */ ret = snprintf_s(szXsPath, sizeof(szXsPath), sizeof(szXsPath), "%s/%s", bePath, "dev"); if (ret < 0) { return NULL; } return read_from_xenstore(handle, szXsPath); } /***************************************************************************** Function : getScsiDiskXmlName() Description: ͨxenstore豸, scsi豸xmlе豸 Input : struct xs_handle *handle handle of xenstore char *d_name 豸sda Output : N/A Return : NULL = success, NULL = failure Other : N/A *****************************************************************************/ static char* getScsiDiskXmlName(struct xs_handle *handle, const char *d_name) { unsigned int i; unsigned int num; char **xs_dir; char *fptr; char *bePath = NULL; char *params = NULL; char *devname = NULL; /* ȡscsi豸żscsi̸ */ xs_dir = xs_directory(handle, 0, SCSI_DISK_PATH, &num); if (xs_dir == NULL) { return NULL; } for (i = 0; i < num; i++) { /* 磺ȡdevice/vscsi/2048/backendúxenstore· */ bePath = getScsiBackendPath(handle, xs_dir[i]); if (bePath == NULL) { free(xs_dir); return NULL; } /* 磺ȡ˼ֵ/local/domain/0/backend/vbd/%d/2048/paramsȡ豸id */ params = getScsiBackendParams(handle, bePath); if (params == NULL) { free(xs_dir); freePath(bePath, NULL, NULL); return NULL; } /* 磺ȡ˼ֵ/local/domain/0/backend/vbd/%d/2048/devȡxml豸 */ fptr = params + strlen(DEVPATH); if (0 == strcmp(fptr, d_name)) { devname = getScsiBackendName(handle, bePath); free(xs_dir); freePath(bePath, params, NULL); return devname; } freePath(bePath, params, devname); } free(xs_dir); return NULL; } /***************************************************************************** Function : isCorrectDev() Description: /dev/disk/by-idµļǷҪҵ豸 Input : ptName ҵ豸 entry /dev/disk/by-idµһļ Output : N/A Return : 1: Ǵҵ豸 0: Ǵҵ豸 Other : N/A History : 2013.09.02, h00227765 created this function. *****************************************************************************/ static int isCorrectDev(const char *ptName, struct dirent *entry) { char fullPath[MAX_STR_LEN]; char actualPath[MAX_STR_LEN]; ssize_t len; /* Ŀ¼Ϊ"...wwn-" */ if (startsWith(entry->d_name, ".") || startsWith(entry->d_name, "..") || startsWith(entry->d_name, "wwn-")) { return 0; } snprintf_s(fullPath, sizeof(fullPath), sizeof(fullPath), "%s%s", DEVPATH, entry->d_name); /* * ȡidӣԵõ"../../sda" * readlink() does not append a null byte to buf. */ len = readlink(fullPath, actualPath, sizeof(actualPath) - 1); if (-1 == len) { return 0; } actualPath[len] = '\0'; /* On success, readlink() returns the number of bytes placed in buf. */ /* ǰ"../../"ôַ(sda)бȽsda */ if (0 != strcmp(ptName, actualPath + strlen(CMPSTRING))) { return 0; } return 1; } /***************************************************************************** Function : getXmlDevName() Description: scsiȡXML豸 Input : handle handle of xenstore ptName XML豸ڷ Output : N/A Return : XML豸Ҫͷ NULL = success, NULL = failure Other : N/A History : 2013.09.02, h00227765 created this function. *****************************************************************************/ static char* getXmlDevName(struct xs_handle *handle, const char *ptName) { char *xmlDevName = NULL; DIR *dir; struct dirent entry; struct dirent *result = NULL; int ret; dir = opendir(DEVPATH); if (NULL == dir) { return NULL; } /* /dev/disk/by-idĿ¼µļ */ for (ret = readdir_r(dir, &entry, &result); (ret == 0) && (result != NULL); ret = readdir_r(dir, &entry, &result)) { if (isCorrectDev(ptName, &entry)) { xmlDevName = getScsiDiskXmlName(handle, entry.d_name); if (xmlDevName) break; } } (void)closedir(dir); return xmlDevName; } /***************************************************************************** Function : getPartitionsInfo() Description: ȡ/proc/partition豸Լ豸Ϣ Input : struct xs_handle* handle handle of xenstore Output : struct DevMajorMinor* devMajorMinor 豸Ƽ豸ŵĽṹ int* pnPartNum struct DeviceInfo* diskUsage Ƽʹÿռ䡢ܿռĽṹ int* pnDiskNum Return : SUCC = success, ERROR = failure Other : N/A *****************************************************************************/ int getPartitionsInfo(struct xs_handle *handle, struct DevMajorMinor *devMajorMinor, int *pnPartNum, struct DeviceInfo *diskUsage, int *pnDiskNum) { FILE *fpProcPt; char szLine[MAX_STR_LEN]; char szPtName[MAX_NAME_LEN]; int nMajor = 0; int nMinor = 0; char szSize[MAX_STR_LEN]; int nPartitionNum = 0; int nDiskNum = 0; int nPtNameLen; char *xmlDevName; struct DevMajorMinor *tmpDev = NULL; struct DeviceInfo *tmpUsage = NULL; /* /proc/partitionsļ */ fpProcPt = openFile(PROC_PARTITIONS, "r"); if (NULL == fpProcPt) { return ERROR; } /* ѭȡÿһеϢ */ while (fgets(szLine, sizeof(szLine), fpProcPt)) { /* ʽȡӦ */ if (sscanf_s(szLine, " %d %d %s %[^\n ]", &nMajor, &nMinor, szSize, sizeof(szSize), szPtName, sizeof(szPtName)) != 4) { continue; } /* loopramdm豸Ϣ */ if (LOOP_MAJOR == nMajor || RAM_MAJOR == nMajor) { continue; } /* ȫݣҵdmsetup豸ʱҪѰҶӦ豸 */ tmpDev = devMajorMinor + nPartitionNum; tmpDev->devMajor = nMajor; tmpDev->devMinor = nMinor; szSize[MAX_STR_LEN - 1] = '\0'; szPtName[MAX_NAME_LEN - 1] = '\0'; (void)strncpy_s(tmpDev->devBlockSize, MAX_STR_LEN, szSize, strlen(szSize)); (void)strncpy_s(tmpDev->devName, MAX_NAME_LEN, szPtName, strlen(szPtName)); nPartitionNum++; /* ĿǰUVPֹ֧xvd*hd*sd*͵豸 */ if ('h' != szPtName[0] && 's' != szPtName[0] && 'x' != szPtName[0]) { continue; } /* ĿǰUVPֹ֧شֵ豸ͣԻȡszPtNameеһַ鿴ǷΪ֣ǣ˵Ǵ*/ nPtNameLen = strlen(szPtName); if (szPtName[nPtNameLen - 1] >= '0' && szPtName[nPtNameLen - 1] <= '9') { continue; } tmpUsage = diskUsage + nDiskNum; /* * ϢݴϢĽṹС * scsi豸޸Ϊxmlļ豸(target dev)scsi豸ڲʾΪ׼ */ if ( 's' == szPtName[0]) { xmlDevName = getXmlDevName(handle, szPtName); if (NULL == xmlDevName) { continue; } (void)strncpy_s(tmpUsage->phyDevName, sizeof(tmpUsage->phyDevName), xmlDevName, sizeof(tmpUsage->phyDevName)-1); free(xmlDevName); } else { (void)strncpy_s(tmpUsage->phyDevName, sizeof(tmpUsage->phyDevName), szPtName, sizeof(tmpUsage->phyDevName)-1); } tmpUsage->phyDevName[MAX_NAME_LEN - 1] = '\0'; /* ʼʹϢΪ0 */ (void)strncpy_s(tmpUsage->diskUsage, MAX_DISKUSAGE_LEN, "0", 2); (void)unitTransfer(szSize, UNIT_TRANSFER_CYCLE, tmpUsage->deviceTotalSpace); nDiskNum++; } *pnPartNum = nPartitionNum; *pnDiskNum = nDiskNum; (void)fclose(fpProcPt); return SUCC; } /***************************************************************************** Function : getSwapInfo() Description: ȡswapļϵͳԼӦĿռС Input : N/A Output : struct DiskInfo* diskMap swapϢʹÿռ int* pnMountNum swap Return : SUCC = success, ERROR = failure Other : N/A *****************************************************************************/ int getSwapInfo(struct DiskInfo *diskMap, int *pnMountNum) { FILE *fpProcSwap; int nFsSize = 0; int nSwapPartitionNum = *pnMountNum; char szLine[MAX_STR_LEN] = {0}; char szSwapName[MAX_STR_LEN] = {0}; struct DiskInfo *tmpDisk = NULL; /* /proc/swapsļ */ fpProcSwap = openFile(PROC_SWAPS, "r"); if (NULL == fpProcSwap) { return ERROR; } /* diskMapswapϢ */ while (fgets(szLine, sizeof(szLine), fpProcSwap)) { if (sscanf_s(szLine, "%s %*s %d %*[^\n ]", szSwapName, sizeof(szSwapName), &nFsSize) != 2) { continue; } tmpDisk = diskMap + nSwapPartitionNum; szSwapName[MAX_STR_LEN - 1] = '\0'; (void)strncpy_s(tmpDisk->filesystem, MAX_STR_LEN, szSwapName, strlen(szSwapName)); (void)snprintf_s(tmpDisk->usage, sizeof(tmpDisk->usage), sizeof(tmpDisk->usage), "%d", nFsSize / UNIT_TRANSFER_CYCLE); nSwapPartitionNum++; } *pnMountNum = nSwapPartitionNum; (void)fclose(fpProcSwap); //lint -save -e438 fpProcSwap = NULL; return SUCC; //lint -restore } /***************************************************************************** Function : getDrbdParent() Description: drbd豸ȡӦĸڵ Input : char* drbdName drbd豸 Output : char* parentName ڵ Return : 0 Other : N/A *****************************************************************************/ int getDrbdParent(char *drbdName, char *parentName) { char szCmd[MAX_STR_LEN] = {0}; FILE *fpDmCmd; char szLine[MAX_STR_LEN] = {0}; char tmpParentName[MAX_NAME_LEN] = {0}; if(SUCC != CheckName(drbdName)) { return ERROR; } /* ݵǰdrbd豸drbdsetupȡ豸 */ (void)snprintf_s(szCmd, sizeof(szCmd), sizeof(szCmd), "drbdsetup \"%s$\" show |grep /dev", drbdName); fpDmCmd = openPipe(szCmd, "r"); if (NULL == fpDmCmd) { return 0; } /* ȡִнļָ */ if (NULL != fgets(szLine, sizeof(szLine), fpDmCmd)) { /* drbdsetupȡ豸 */ if (sscanf_s(szLine, "%*[^\"]\"%[^\"]", tmpParentName, sizeof(tmpParentName)) != 1) { (void)pclose(fpDmCmd); return 0; } } (void)pclose(fpDmCmd); //lint -save -e438 fpDmCmd = NULL; //lint -restore memset_s(parentName, strlen(parentName), 0, strlen(parentName)); (void)strncpy_s(parentName, strlen(parentName) + 1, tmpParentName, strlen(tmpParentName)); return 0; } /***************************************************************************** Function : getMountInfo() Description: ȡ/proc/mountsʵļϵͳԼصӦϢ(ȥظtmp) Input : N/A Output : struct DiskInfo mountInfo[] صӦļϵͳϢʹÿռĽṹ int* pnMountNum صӦļϵͳϢĸ Return : SUCC = success, ERROR = failure Other : N/A *****************************************************************************/ int getMountInfo(struct DiskInfo *mountInfo, int *pnMountNum) { FILE *fpProcMount; char szLine[MAX_STR_LEN] = {0}; struct DiskInfo *tmpMountInfo; struct DiskInfo *firstInfo = NULL; struct DiskInfo *secondInfo = NULL; char szFilesystemName[MAX_NAME_LEN] = {0}; char szMountPointName[MAX_STR_LEN] = {0}; char szMountType[MAX_STR_LEN] = {0}; int nTmpMountNum = *pnMountNum; int nMountNum = 0; int nFlag = 0; /* жϹصǷΪĿ¼־λ */ int nIsGetOneInfo = 0; int i; int j = 0; tmpMountInfo = (struct DiskInfo *)malloc(MAX_STR_LEN * sizeof(struct DiskInfo)); if (NULL == tmpMountInfo) { return ERROR; } memset_s(tmpMountInfo, MAX_STR_LEN * sizeof(struct DiskInfo), 0, MAX_STR_LEN * sizeof(struct DiskInfo)); /* /proc/mountsļ */ fpProcMount = openFile(PROC_MOUNTS, "r"); if (NULL == fpProcMount) { free(tmpMountInfo); //lint -save -e438 tmpMountInfo = NULL; return ERROR; //lint -restore } for (i = 0; i < nTmpMountNum; i++) { firstInfo = tmpMountInfo + i; secondInfo = mountInfo + i; (void)strncpy_s(firstInfo->filesystem, MAX_STR_LEN, secondInfo->filesystem, strlen(secondInfo->filesystem)); } /* ѭȡÿһ */ while (fgets(szLine, sizeof(szLine), fpProcMount)) { /* ʽȡӦҪ */ if (sscanf_s(szLine, "%s %s %s %*[^\n ]", szFilesystemName, sizeof(szFilesystemName), szMountPointName, sizeof(szMountPointName), szMountType, sizeof(szMountType)) != 3) { continue; } /* ȥ/dev/ͷļϵͳtmpfsȣȥloop豸ȥظص/Ŀ¼ĺ漸ļϵͳ * ȥcdromĹص㣬صĹramfsiso9660 */ if (0 != strncmp(szFilesystemName, "/dev/", 5) || 0 == strncmp(szFilesystemName, "/dev/loop", 9) || (1 == nIsGetOneInfo && 0 == strcmp(szMountPointName, "/")) || 5 == strspn(szMountType , "ramfs") || 7 == strspn(szMountType , "iso9660")) { continue; } /* ص㱻ظ */ for (i = 0; i < nTmpMountNum; i++) { firstInfo = tmpMountInfo + i; /* ѯtmpMountInfoǷڸùصӦϢѾڣtmpMountInfoиùصӦϢ */ if (0 == strcmp(szMountPointName, firstInfo->mountPoint)) { memset_s(firstInfo->filesystem, MAX_STR_LEN, 0, strlen(firstInfo->filesystem)); (void)strncpy_s(firstInfo->filesystem, MAX_STR_LEN, szFilesystemName, strlen(szFilesystemName)); nFlag = 1; break; } } if (0 == nFlag) { firstInfo = tmpMountInfo + nTmpMountNum; /* жͨ˵ϢΪҪӵһ¼¼ӵӦṹУݼۼ */ strncpy_s(firstInfo->filesystem, MAX_STR_LEN, szFilesystemName, strlen(szFilesystemName)); strncpy_s(firstInfo->mountPoint, MAX_STR_LEN, szMountPointName, strlen(szMountPointName)); nTmpMountNum++; } nFlag = 0; nIsGetOneInfo = 1; } /* ļϵͳظ */ for (i = 0; i < nTmpMountNum; i++) { firstInfo = tmpMountInfo + i; for (j = 0; j < nMountNum; j++) { /* ѯmountInfoǷڸļϵͳӦϢѾڣӸ¼ */ if (0 == strcmp(firstInfo->filesystem, (mountInfo + j)->filesystem)) { nFlag = 1; break; } } if (1 == nFlag) { nFlag = 0; continue; } secondInfo = mountInfo + nMountNum; /* mountInfoûиļϵͳӦϢһ¼¼ݼۼ */ strncpy_s(secondInfo->filesystem, MAX_STR_LEN, firstInfo->filesystem, strlen(firstInfo->filesystem)); strncpy_s(secondInfo->mountPoint, MAX_STR_LEN, firstInfo->mountPoint, strlen(firstInfo->mountPoint)); nMountNum++; } *pnMountNum = nMountNum; (void)fclose(fpProcMount); //lint -save -e438 fpProcMount = NULL; free(tmpMountInfo); tmpMountInfo = NULL; return SUCC; //lint -restore } /***************************************************************************** Function : getInfoFromID() Description: 豸豸ţȡӦ豸֣Լ豸С Input : struct DevMajorMinor* devMajorMinor 豸Ƽ豸Žṹ int partNum int devID 豸豸 Output : char* devName ڵ char* devBlockSize ڵռС Return : 0 Other : N/A *****************************************************************************/ int getInfoFromID(struct DevMajorMinor *devMajorMinor, int partNum, int devID, char *devName, char *devBlockSize) { int i; char szDevName[MAX_STR_LEN] = {0}; char szBlockSize[MAX_STR_LEN] = {0}; int nMajor = major(devID); int nMinor = minor(devID); struct DevMajorMinor *tmpDev = NULL; for(i = 0; i < partNum; i++) { tmpDev = devMajorMinor + i; /* 豸Ŷ/proc/partitionsһ˵ͬһ豸豸ݸ */ if (nMajor == tmpDev->devMajor && nMinor == tmpDev->devMinor) { (void)strncpy_s(szDevName, MAX_STR_LEN, tmpDev->devName, strlen(tmpDev->devName)); (void)strncpy_s(szBlockSize, MAX_STR_LEN, tmpDev->devBlockSize, strlen(tmpDev->devBlockSize)); /* devNameΪNULL豸ŶӦ豸ݸ */ if (NULL != devName && 0 != strlen(szDevName)) { (void)strncpy_s(devName, strlen(szDevName)+1, szDevName, strlen(szDevName)); } /* devBlockSizeΪNULL豸ŶӦ豸ʵʿռСݸ */ if (NULL != devBlockSize && 0 != strlen(szBlockSize)) { (void)strncpy_s(devBlockSize, strlen(szBlockSize)+1, szBlockSize, strlen(szBlockSize)); } break; } } return 0; } /***************************************************************************** Function : getDiskInfo() Description: ȡdfжӦϢ洢ṹ Input : struct DevMajorMinor* devMajorMinor 豸Ƽ豸Žṹ int partNum Output : struct DiskInfo* diskMap صӦļϵͳϢʹÿռĽṹ int* pnMountNum ڴ洢ش Return : SUCC = success, ERROR = failure Other : N/A *****************************************************************************/ int getDiskInfo(struct DevMajorMinor *devMajorMinor, int partNum, struct DiskInfo *diskMap, int *pnMountNum) { /* ֮ǰڵúУȻȡswapȽswapֵص */ int nMountedFsNum = *pnMountNum; int nResult; struct statfs statfsInfo; struct stat statBuf; char szFreeSize[MAX_STR_LEN] = {0}; char szFsFreeBlock[MAX_STR_LEN] = {0}; char szFsBlockSize[MAX_STR_LEN] = {0}; char szFsSize[MAX_STR_LEN] = {0}; int i; struct DiskInfo *tmpDisk = NULL; char *substr = NULL; char tmp_mountPoint[MAX_STR_LEN] = {0}; int offset = 0; int len = 0; /* getMountInfoȡصļϵͳϢ(swap֮ۼ) */ nResult = getMountInfo(diskMap, &nMountedFsNum); if (ERROR == nResult) { return ERROR; } /* ѭļϵͳϢ */ for (i = 0; i < nMountedFsNum; i++) { tmpDisk = diskMap + i; /* ͨstatȡļϵͳӦ豸Ϣ浽ṹӦԱ */ nResult = stat(tmpDisk->filesystem, &statBuf); if (0 != nResult || 0 == statBuf.st_rdev) { continue; } /* swapʱͨصȡʹϢ */ if (*pnMountNum <= i) { /* ͨstatfsļϵͳص㣬ȡļϵͳϢ */ if (statfs(tmpDisk->mountPoint, &statfsInfo) != 0) { if (ENOENT != errno) { return ERROR; } /* should retry, such as redhat-6.1 at GUI mode, USB DISK shown as /media/STEC\040DISK, need to replace \040 with ' '*/ substr = strstr(tmpDisk->mountPoint, "\\040"); if (!substr) { return ERROR; } (void)memset_s(tmp_mountPoint, MAX_STR_LEN, 0 ,MAX_STR_LEN); offset = substr - tmpDisk->mountPoint; len = offset + strlen("\\040"); (void)memcpy_s(tmp_mountPoint, MAX_STR_LEN, tmpDisk->mountPoint, offset); (void)memset_s(tmp_mountPoint + offset, MAX_STR_LEN - strlen("\\040"), '\040', 1); (void)memcpy_s(tmp_mountPoint + offset + 1 , MAX_STR_LEN - offset - 1, tmpDisk->mountPoint + len, strlen(tmpDisk->mountPoint) - len); if (statfs(tmp_mountPoint, &statfsInfo) != 0) { return ERROR; } } /* ܿ0mountϢǿգصϢļִָ² */ if (statfsInfo.f_blocks > 0) { /* ÿĴСֵszFsBlockSizeļϵͳֵṹĶӦԱ */ (void)snprintf_s(szFsBlockSize, sizeof(szFsBlockSize), sizeof(szFsBlockSize), "%ld", (long)statfsInfo.f_bsize); /* ʣfreeĿСֵӦı */ (void)snprintf_s(szFsFreeBlock, sizeof(szFsFreeBlock), sizeof(szFsFreeBlock), "%lu", statfsInfo.f_bfree); /* ʹַ˷ʣСм㣬תΪKB */ (void)strMulti(szFsFreeBlock, szFsBlockSize, szFreeSize); (void)unitTransfer(szFreeSize, UNIT_TRANSFER_CYCLE, szFreeSize); } } tmpDisk->st_rdev = statBuf.st_rdev; /* ͨgetInfoFromIDļϵͳ豸ţȡӦʵʿռСϢ */ (void)getInfoFromID(devMajorMinor, partNum, statBuf.st_rdev, NULL, szFsSize); /* ȡļϵͳʵʿռȥļϵͳʣÿռ䣬ļϵͳʹÿռ(Ϊ˼ļϵͳʽռÿռ) */ nResult = strMinus(szFsSize, szFreeSize, tmpDisk->usage); if (ERROR == nResult) { return ERROR; } /* еλת󣬴ṹӦԱ */ nResult = unitTransfer(tmpDisk->usage, UNIT_TRANSFER_CYCLE, tmpDisk->usage); if (ERROR == nResult) { return ERROR; } /* һЩʱַϢȫ */ memset_s(szFsBlockSize, MAX_STR_LEN, 0, sizeof(szFsBlockSize)); memset_s(szFreeSize, MAX_STR_LEN, 0, sizeof(szFreeSize)); memset_s(szFsFreeBlock, MAX_STR_LEN, 0, sizeof(szFsFreeBlock)); memset_s(szFsSize, MAX_STR_LEN, 0, sizeof(szFsSize)); } *pnMountNum = nMountedFsNum; return SUCC; } /***************************************************************************** Function : getDiskNameFromPartName() Description: ѷеĸַֿĸΪ Input : char *pPartName Output : char* pDiskName ڵĴ Return : SUCC = success, ERROR = failure Other : LVMPV ΪʱpPartNamepDiskName *****************************************************************************/ int getDiskNameFromPartName(const char *pPartName, char *pDiskName) { int i = 0; int j = 0; if (NULL == pPartName || 0 == strlen(pPartName) || NULL == pDiskName) { return ERROR; } while(*(pPartName + i) != '\0') { if(*(pPartName + i) <= 'z' && *(pPartName + i) >= 'A') { pDiskName[j] = *(pPartName + i); j++; } i++; } if(0 == i) { return ERROR; } else { return SUCC; } } /***************************************************************************** Function : addDiskUsageToDevice() Description: ݷʵ֣Աȴ̣Ϣһ£ۼʹÿռϢӦ Input : struct DeviceInfo* linuxDiskUsage Ƽʹÿռ䡢ܿռĽṹ int diskNum char* partitionsName ڱȶǷһ char* partitionsUsage ʹϢۼӵ̿ռ Output : struct DeviceInfo linuxDiskUsage Return : 0 Other : N/A *****************************************************************************/ int addDiskUsageToDevice(struct DeviceInfo *linuxDiskUsage, int diskNum, const char *partitionsName, char *partitionsUsage) { char szTmpPartition[MAX_NAME_LEN] = {0}; int i; int nPartNameLen = strlen(partitionsName); struct DeviceInfo *tmpUsage = NULL; for (i = 0; i < nPartNameLen; i++) { if (partitionsName[i] >= '0' && partitionsName[i] <= '9') { break; } szTmpPartition[i] = partitionsName[i]; } for (i = 0; i < diskNum; i++) { tmpUsage = linuxDiskUsage + i; /* ͨԱȷ豸(ĩβ)Ƿһһ򽫷ʹϢۼӵʹÿռ */ if (0 == strcmp(szTmpPartition, tmpUsage->phyDevName)) { (void)strAdd(tmpUsage->diskUsage, partitionsUsage, tmpUsage->diskUsage); break; } } return 0; } /***************************************************************************** Function : getDmType() Description: ȡļϵͳ Input : char *szLine dmsetupȡ Output : char *pParentMajor ڵ豸 char *pParentMinor ڵ豸 int *pStripCnt ڵ Return : SUCC = success, ERROR = failure Other : N/A History : 2016.11.11 *****************************************************************************/ int getDmType(int devID, int *pDmType) { char szCmd[64] = {0}; FILE *fpDmCmd = NULL; char szLine[MAX_STR_LEN] = {0}; (void)snprintf_s(szCmd, sizeof(szCmd), sizeof(szCmd), "dmsetup table -j %d -m %d", major(devID), minor(devID)); fpDmCmd = openPipe(szCmd, "r"); if (NULL == fpDmCmd) { printf("openPipe error \n"); return ERROR; } /* ѭȡִнļָ */ if(NULL == fgets(szLine, sizeof(szLine), fpDmCmd)) { (void)pclose(fpDmCmd); fpDmCmd = NULL; return ERROR; } if (strstr(szLine, "striped") != NULL) { *pDmType = DM_STRIPED; } else if (strstr(szLine, "crypt") != NULL) { *pDmType = DM_CRYPT; } else if (strstr(szLine, "linear") != NULL) { *pDmType = DM_LINEAR; } else if (strstr(szLine, "snapshot") != NULL) { *pDmType = DM_SNAPSHOT; } else if (strstr(szLine, "raid") != NULL) { *pDmType = DM_RAID; } else if (strstr(szLine, "mirror") != NULL) { *pDmType = DM_MIRROR; } else { *pDmType = DM_UNKNOWN; } (void)pclose(fpDmCmd); fpDmCmd = NULL; return SUCC; } /***************************************************************************** Function : getDmStripParentNode() Description: strip£ȡļϵͳĸڵ Input : char *szLine dmsetupȡ Output : char *pParentMajor ڵ豸 char *pParentMinor ڵ豸 int *pStripCnt ڵ Return : SUCC = success, ERROR = failure Other : N/A History : 2016.11.11 *****************************************************************************/ int getDmStripParentNode(int devID, struct DmInfo **pstDmInfo, int *pStripCnt) { char szCmd[64] = {0}; FILE *fpDmCmd = NULL; char szLine[MAX_STR_LEN] = {0}; int i = 0; char *tmp = NULL; struct DmInfo *pDmInfo = NULL; (void)snprintf_s(szCmd, sizeof(szCmd), sizeof(szCmd), "dmsetup table -j %d -m %d", major(devID), minor(devID)); fpDmCmd = openPipe(szCmd, "r"); if (NULL == fpDmCmd) { return ERROR; } if(NULL == fgets(szLine, sizeof(szLine), fpDmCmd)) { (void)pclose(fpDmCmd); fpDmCmd = NULL; return ERROR; } (void)pclose(fpDmCmd); fpDmCmd = NULL; if (sscanf_s(szLine, "%*d %*d %*s %d %*[^\n ]", pStripCnt) != 1) { return ERROR; } if(*pStripCnt <= 0) { return ERROR; } pDmInfo = (struct DmInfo *)malloc((*pStripCnt) * sizeof(struct DmInfo)); if (NULL == pDmInfo) { return ERROR; } (void)memset_s(pDmInfo, (*pStripCnt) * sizeof(struct DmInfo), 0, (*pStripCnt) * sizeof(struct DmInfo)); /* 0 16777216 striped 2 128 202:160 8390656 202:176 8390656ַǰ5ַ */ tmp = szLine; while (i < 5) { tmp = strchr(tmp, ' '); tmp+=1; i++; } /* ʼ202:160 8390656ȡڵ豸 */ i = 0; while (i < *pStripCnt) { if (sscanf_s(tmp, "%d:%d %*[^\n ]", &pDmInfo[i].nParentMajor, &pDmInfo[i].nParentMinor) != 2) { free(pDmInfo); return ERROR; } tmp = strchr(tmp, ' '); tmp+=1; tmp = strchr(tmp, ' '); tmp+=1; i++; } *pstDmInfo = pDmInfo; return SUCC; } /***************************************************************************** Function : getDmLinearParentNodeCount() Description: linear£ȡļϵͳĸڵ Input : struct DevMajorMinor* devMajorMinor /proc/partitions豸豸Žṹ int partNum Ӧϵ int devID ļϵͳ豸 Output : char* parentName ڵ Return : SUCC = success, ERROR = failure Other : N/A History : 2016.11.11, created this function. *****************************************************************************/ int getDmLinearParentNode(int devID, struct DmInfo **pstDmInfo, int *pCnt) { FILE *fpDmCmd = NULL; char szCmd[128] = {0}; char szLine[MAX_STR_LEN] = {0}; int i = 0; int nFlag = SUCC; struct DmInfo *pDmInfo = NULL; (void)snprintf_s(szCmd, sizeof(szCmd), sizeof(szCmd), "dmsetup table -j %d -m %d | wc -l", major(devID), minor(devID)); fpDmCmd = openPipe(szCmd, "r"); if (NULL == fpDmCmd) { return ERROR; } if (NULL == fgets(szLine, sizeof(szLine), fpDmCmd)) { (void)pclose(fpDmCmd); fpDmCmd = NULL; return ERROR; } (void)pclose(fpDmCmd); if (sscanf_s(szLine, "%d %*[^\n ]", pCnt) != 1) { return ERROR; } pDmInfo = (struct DmInfo *)malloc((*pCnt) * sizeof(struct DmInfo)); if (NULL == pDmInfo) { return ERROR; } (void)memset_s(pDmInfo, (*pCnt) * sizeof(struct DmInfo), 0, (*pCnt) * sizeof(struct DmInfo)); (void)snprintf_s(szCmd, sizeof(szCmd), sizeof(szCmd), "dmsetup table -j %d -m %d", major(devID), minor(devID)); fpDmCmd = openPipe(szCmd, "r"); if (NULL == fpDmCmd) { free(pDmInfo); pDmInfo = NULL; return ERROR; } while (fgets(szLine, sizeof(szLine), fpDmCmd)) { if (sscanf_s(szLine, "%*d %lld %*s %d:%d %*[^\n ]", &pDmInfo[i].nSectorNum, &pDmInfo[i].nParentMajor, &pDmInfo[i].nParentMinor) != 3) { nFlag = ERROR; free(pDmInfo); pDmInfo = NULL; break; } i++; } (void)pclose(fpDmCmd); fpDmCmd = NULL; *pstDmInfo = pDmInfo; return nFlag; } /***************************************************************************** Function : getDmCryptParentNode() Description: crypt£ȡļϵͳĸڵ Input : char *szLine dmsetupȡ Output : char *pParentMajor ڵ豸 char *pParentMinor ڵ豸 int *pStripCnt ڵ Return : SUCC = success, ERROR = failure Other : N/A History : 2016.11.11 *****************************************************************************/ int getDmCryptParentNode(int devID, struct DmInfo **pstDmInfo, int *pCnt) { char szCmd[64] = {0}; FILE *fpDmCmd = NULL; char szLine[MAX_STR_LEN] = {0}; int i = 0; struct DmInfo *pDmInfo = NULL; (void)snprintf_s(szCmd, sizeof(szCmd), sizeof(szCmd), "dmsetup table -j %d -m %d", major(devID), minor(devID)); fpDmCmd = openPipe(szCmd, "r"); if (NULL == fpDmCmd) { return ERROR; } pDmInfo = (struct DmInfo *)malloc(sizeof(struct DmInfo)); if (NULL == pDmInfo) { (void)pclose(fpDmCmd); fpDmCmd = NULL; return ERROR; } (void)memset_s(pDmInfo, sizeof(struct DmInfo), 0, sizeof(struct DmInfo)); while (fgets(szLine, sizeof(szLine), fpDmCmd)) { if(i > 0 || sscanf_s(szLine, "%*d %*d %*s %*s %*s %*d %d:%d %*[^\n ]", &pDmInfo[i].nParentMajor, &pDmInfo[i].nParentMinor) != 2) { free(pDmInfo); pDmInfo = NULL; (void)pclose(fpDmCmd); fpDmCmd = NULL; return ERROR; } i++; } (void)pclose(fpDmCmd); fpDmCmd = NULL; /* ܸʽֻ1ڵ */ *pCnt = 1; *pstDmInfo = pDmInfo; return SUCC; } /***************************************************************************** Function : getDmParentNode() Description: ļϵͳ豸ţȡļϵͳĸڵ Input : struct DevMajorMinor* devMajorMinor /proc/partitions豸豸Žṹ int partNum Ӧϵ int devID ļϵͳ豸 Output : char* parentName ڵ Return : SUCC = success, ERROR = failure Other : N/A History : 2011.09.16, created this function. *****************************************************************************/ int getDmParentNode(struct DeviceInfo *linuxDiskUsage, struct DevMajorMinor *devMajorMinor, struct NumberCount numCnt, int devID, long long usage) { char szRealName[MAX_NAME_LEN] = {0}; int nParentDevID = 0; int nFlag = SUCC; int nParentCnt = 0; struct DmInfo *pstDmInfo = NULL; int i = 0; char szUsage[64] = {0}; char szDiskName[MAX_NAME_LEN] = {0}; int nDmType = 0; long long lsubusage = 0L; long long lpvSize = 0L; if(SUCC != getDmType(devID, &nDmType)) { return ERROR; } switch(nDmType) { case DM_LINEAR: { if(SUCC != getDmLinearParentNode(devID, &pstDmInfo, &nParentCnt)) { return ERROR; } break; } case DM_STRIPED: { if(SUCC != getDmStripParentNode(devID, &pstDmInfo, &nParentCnt)) { return ERROR; } break; } case DM_CRYPT: { if(SUCC != getDmCryptParentNode(devID, &pstDmInfo, &nParentCnt)) { return ERROR; } break; } case DM_RAID: case DM_SNAPSHOT: case DM_MIRROR: default: { /* ʱسɹ */ return SUCC; } } for (i = 0; i < nParentCnt && usage > 0; i++) { /* nParentCnt=1黯ͬ */ if (DM_CRYPT == nDmType || DM_STRIPED == nDmType) { lsubusage = usage / nParentCnt; } else { lpvSize = pstDmInfo[i].nSectorNum * SECTOR_SIZE / MEGATOBYTE; if (usage >= lpvSize) { lsubusage = lpvSize; usage -= lpvSize; } else { lsubusage = usage; usage = 0; } } nParentDevID = makedev(pstDmInfo[i].nParentMajor, pstDmInfo[i].nParentMinor); if (g_deviceMapperNum == pstDmInfo[i].nParentMajor) { if (SUCC != getDmParentNode(linuxDiskUsage, devMajorMinor, numCnt, nParentDevID, lsubusage)) { nFlag = ERROR; break; } } else { (void)memset_s(szRealName, MAX_NAME_LEN, 0, MAX_NAME_LEN); (void)memset_s(szDiskName, MAX_NAME_LEN, 0, MAX_NAME_LEN); (void)getInfoFromID(devMajorMinor, numCnt.partNum, nParentDevID, szRealName, NULL); if(ERROR == getDiskNameFromPartName(szRealName, szDiskName)) { nFlag = ERROR; break; } if (0 != strlen(szRealName)) { /* ͨaddDiskUsageToDeviceķʹÿռۼӵӦĴ */ (void)snprintf_s(szUsage, sizeof(szUsage), sizeof(szUsage), "%lld", lsubusage); (void)addDiskUsageToDevice(linuxDiskUsage, numCnt.diskNum, szRealName, szUsage); (void)memset_s(szRealName, MAX_NAME_LEN, 0, sizeof(szRealName)); } } } free(pstDmInfo); return nFlag; } /***************************************************************************** Function : getAllDeviceUsage() Description: еķϢۼӵӦ̵ʹÿռ Input : struct DevMajorMinor* devMajorMinor 豸豸Žṹ struct DeviceInfo* linuxDiskUsage Ƽʹÿռ䡢ܿռĽṹ struct DiskInfo diskMap صӦļϵͳϢʹÿռĽṹ struct NumberCount numberCount ԼصӦļϵͳϢĽṹ Output : struct DeviceInfo* linuxDiskUsage ̵ʹÿռȡ Return : SUCC = success, ERROR = failure Other : N/A *****************************************************************************/ int getAllDeviceUsage(struct DevMajorMinor *devMajorMinor, struct DeviceInfo *linuxDiskUsage, struct DiskInfo *diskMap, struct NumberCount numberCount) { char szParentName[MAX_NAME_LEN] = {0}; int nResult = 0; int i; struct DiskInfo *tmpDisk = NULL; /* drbd豸Ӧĸ豸豸 */ struct stat statBuf; long long usage = 0L; for (i = 0; i < numberCount.mountNum; i++) { tmpDisk = diskMap + i; usage = atoll(tmpDisk->usage); /* mount¼ļϵͳdrbd豸ʱҪҵӦĸ豸 */ if (NULL != strstr(tmpDisk->filesystem, "drbd")) { (void)getDrbdParent(tmpDisk->filesystem, tmpDisk->filesystem); /* ͨstatȡdrbd豸Ӧĸ豸豸Ϣ浽ṹӦԱ */ nResult = stat(tmpDisk->filesystem, &statBuf); if (0 != nResult || 0 == statBuf.st_rdev) { continue; } /* mount¼е豸 */ tmpDisk->st_rdev = statBuf.st_rdev; } if (g_deviceMapperNum == major(tmpDisk->st_rdev)) { /* ͨdmsetupȡ߼Ӧ豸 */ nResult = getDmParentNode(linuxDiskUsage, devMajorMinor, numberCount, tmpDisk->st_rdev, usage); if (ERROR == nResult) { return ERROR; } } else { (void)getInfoFromID(devMajorMinor, numberCount.partNum, tmpDisk->st_rdev, szParentName, NULL); if (0 != strlen(szParentName)) { /* ͨaddDiskUsageToDeviceķʹÿռۼӵӦĴ */ (void)addDiskUsageToDevice(linuxDiskUsage, numberCount.diskNum, szParentName, tmpDisk->usage); (void)memset_s(szParentName, MAX_NAME_LEN, 0, sizeof(szParentName)); } } } return SUCC; } /***************************************************************************** Function : freeSpace() Description: ͷṹ뵽Ŀռ Input : N/A Output : struct DevMajorMinor* devMajorMinor 豸豸Žṹ struct DeviceInfo* linuxDiskUsage Ƽʹÿռ䡢ܿռĽṹ struct DiskInfo* diskMap صӦļϵͳϢʹÿռĽṹ Return : void Other : N/A *****************************************************************************/ void freeSpace(struct DevMajorMinor *devMajorMinor, struct DeviceInfo *linuxDiskUsage, struct DiskInfo *diskMap) { //lint -save -e438 if (devMajorMinor != NULL) { free(devMajorMinor); devMajorMinor = NULL; } if (linuxDiskUsage != NULL) { free(linuxDiskUsage); linuxDiskUsage = NULL; } if (diskMap != NULL) { free(diskMap); diskMap = NULL; } //lint -restore } /***************************************************************************** Function : getDiskUsage() Description: ô Input : struct xs_handle* handle handle of xenstore Output : char* pszDiskUsage 洢ʵַ Return : SUCC = success, ERROR = failure Other : N/A *****************************************************************************/ int getDiskUsage(struct xs_handle *handle, char pszDiskUsage[][MAX_DISKUSAGE_LEN], int *row_num) { /* 洢ܴ̿ռϢַ(λΪMB) */ char szTotalSize[MAX_DISKUSAGE_LEN] = {0}; /* 洢ʹÿռϢַ(λΪMB) */ char szTotalUsage[MAX_DISKUSAGE_LEN] = {0}; /* 洢ÿϢӵַ */ char szUsageString[MAX_ROWS][MAX_DISKUSAGE_LEN] = {0}; struct DevMajorMinor *devMajorMinor; struct DeviceInfo *linuxDiskUsage; struct DiskInfo *diskMap = NULL; struct NumberCount numberCount = {0}; int nSwapNum; int nResult = 0; int i; if (!pszDiskUsage || !row_num) return ERROR; *row_num = 0; devMajorMinor = (struct DevMajorMinor *)malloc(MAX_DISKUSAGE_LEN * sizeof(struct DevMajorMinor)); if (NULL == devMajorMinor) { return ERROR; } memset_s(devMajorMinor, MAX_DISKUSAGE_LEN * sizeof(struct DevMajorMinor), 0, MAX_DISKUSAGE_LEN * sizeof(struct DevMajorMinor)); linuxDiskUsage = (struct DeviceInfo *)malloc(MAX_DISK_NUMBER * sizeof(struct DeviceInfo)); if (NULL == linuxDiskUsage) { freeSpace(devMajorMinor, linuxDiskUsage, diskMap); return ERROR; } memset_s(linuxDiskUsage, MAX_DISK_NUMBER * sizeof(struct DeviceInfo), 0, MAX_DISK_NUMBER * sizeof(struct DeviceInfo)); diskMap = (struct DiskInfo *)malloc(MAX_DISKUSAGE_LEN * sizeof(struct DiskInfo)); if (NULL == diskMap) { freeSpace(devMajorMinor, linuxDiskUsage, diskMap); return ERROR; } memset_s(diskMap, MAX_DISKUSAGE_LEN * sizeof(struct DiskInfo), 0, MAX_DISKUSAGE_LEN * sizeof(struct DiskInfo)); /* Ӧdevice-mapper͵豸Ϊ0getDeviceMapperNumberȡ豸 */ if (0 == g_deviceMapperNum) { nResult = getDeviceMapperNumber(); if (ERROR == nResult) { freeSpace(devMajorMinor, linuxDiskUsage, diskMap); return ERROR; } } /* * getPartitionsInfoȡ̣ԼӦĴܿռСKB豸Ϣ * ȡӦ豸ռСfdiskӦϢ */ nResult = getPartitionsInfo(handle, devMajorMinor, &numberCount.partNum, linuxDiskUsage, &numberCount.diskNum); if (ERROR == nResult || 0 == numberCount.partNum || 0 == numberCount.diskNum) { freeSpace(devMajorMinor, linuxDiskUsage, diskMap); return ERROR; } /* getSwapInfoȡǰswapϢ */ nResult = getSwapInfo(diskMap, &numberCount.mountNum); if (ERROR == nResult) { freeSpace(devMajorMinor, linuxDiskUsage, diskMap); return ERROR; } nSwapNum = numberCount.mountNum; /* getDiskInfoȡǰصļϵͳص㣬豸ԼʹÿռϢ */ nResult = getDiskInfo(devMajorMinor, numberCount.partNum, diskMap, &numberCount.mountNum); if (ERROR == nResult || nSwapNum >= numberCount.mountNum) { freeSpace(devMajorMinor, linuxDiskUsage, diskMap); return ERROR; } /* getAllDeviceUsageͨȡĹصļϵͳʹϢҶӦĸ豸Ӧ̵ʹϢ */ nResult = getAllDeviceUsage(devMajorMinor, linuxDiskUsage, diskMap, numberCount); if (ERROR == nResult) { freeSpace(devMajorMinor, linuxDiskUsage, diskMap); return ERROR; } /* ѭȡṹݵÿԱ */ for (i = 0; i < numberCount.diskNum; i++) { /* ȡϢתΪַҼܿռС */ (void)strAdd(linuxDiskUsage[i].deviceTotalSpace, szTotalSize, szTotalSize); (void)strAdd(linuxDiskUsage[i].diskUsage, szTotalUsage, szTotalUsage); //ֻƴ61Ϣ(1+60) if(i <= MAX_DISKUSAGE_STRING_NUM) { *row_num = i / MAX_DISKUSAGE_NUM_PER_KEY; (void)snprintf_s(szUsageString[*row_num] + strlen(szUsageString[*row_num]), (MAX_DISKUSAGE_LEN - strlen(szUsageString[*row_num])), (MAX_DISKUSAGE_LEN - strlen(szUsageString[*row_num])), "%s:%s:%s;", linuxDiskUsage[i].phyDevName, linuxDiskUsage[i].deviceTotalSpace, linuxDiskUsage[i].diskUsage); } } /* ܵĴ̿ռʹÿռԼյÿ̵ʹʶӦֵַ */ for (i = 0; i <= *row_num; i++) { if (0 == i) (void)snprintf_s(pszDiskUsage[i], MAX_DISKUSAGE_LEN, MAX_DISKUSAGE_LEN, "0:%s:%s;%s", szTotalSize, szTotalUsage, szUsageString[i]); else (void)snprintf_s(pszDiskUsage[i], MAX_DISKUSAGE_LEN, MAX_DISKUSAGE_LEN, "%s", szUsageString[i]); } freeSpace(devMajorMinor, linuxDiskUsage, diskMap); return SUCC; } /***************************************************************************** Function : FilesystemUsage() Description: get Filesystemname list and its usage Input : struct xs_handle* handle handle of xenstore Output : int Return : SUCC = success, ERROR = failure Other : N/A Remark : 2013-9-4 create this function for suse11sp1 Linux OS *****************************************************************************/ int FilesystemUsage(struct xs_handle *handle) { FILE *file; char buf[1056] = {0}; char filename[1024] = {0}; char path[32] = {0}; char value[MAX_FILENAMES_XENSTORLEN+1] = {0}; char numbuf[32] = {0}; int FileNameArrLen; int size,used; int num; int exceedflag; int i; /*ͻṩshellʽshellȡļϵͳƣܴСôС*/ file = openPipe("df -lmP | grep -v Filesystem | grep -v Used | grep -v tmpfs | grep -v shm","r"); if(NULL == file) { DEBUG_LOG("Failed to exec df -lmP shell command."); (void)write_to_xenstore(handle, FILE_DATA_PATH, "error"); return ERROR; } (void)memset_s(FilenameArr,MAX_FILENAMES_SIZE,0,MAX_FILENAMES_SIZE); while(NULL != fgets(buf,sizeof(buf),file)) { (void)sscanf_s(buf,"%s %d %d",filename,sizeof(filename),&size,&used); (void)snprintf_s(FilenameArr+strlen(FilenameArr), sizeof(FilenameArr) - strlen(FilenameArr), sizeof(FilenameArr) - strlen(FilenameArr), "%s:%d:%d;",filename,size,used); } (void)pclose(file); FileNameArrLen = strlen(FilenameArr); num = FileNameArrLen / MAX_FILENAMES_XENSTORLEN; exceedflag = FileNameArrLen % MAX_FILENAMES_XENSTORLEN; (void)snprintf_s(value, MAX_FILENAMES_XENSTORLEN, MAX_FILENAMES_XENSTORLEN, "%s", FilenameArr); if(xb_write_first_flag == 0) { (void)write_to_xenstore(handle, FILE_DATA_PATH, value); } else { (void)write_weak_to_xenstore(handle, FILE_DATA_PATH, value); } /*ٽֵnum+1;:33/32=1;1ֽڣҪд*/ if(exceedflag) { num += 1; } /*xenstoreֵܳfilesystem_extra%dֵд*/ for(i=1; i #include #include #include #include /* linux */ #include #include #include #include /* socket */ #include #include /* time */ #include #include #include #include #include #include #include "xenstore_common.h" #include #include #include #define IP_DOWN "disconnected" #define ERR_STR "error" #define VIF_DATA_PATH "control/uvp/vif" #define VIFEXTRA_DATA_PATH "control/uvp/vif_extra1" #define IPV6_VIF_DATA_PATH "control/uvp/netinfo" #define VIF_DROP_PATH "control/uvp/networkloss" #define CPU_DATA_PATH "control/uvp/cpu" #define CPU_TIME_PATH "control/uvp/cpuusage" #define ERROR -1 #define UNKNOWN -2 #define SUCC 0 #endif #ifdef __cplusplus #if __cplusplus } #endif #endif UVP-Tools-2.2.0.316/uvp-monitor/include/public_common.h000066400000000000000000000060531314037446600225300ustar00rootroot00000000000000/* * public_common.h * * Copyright 2016, Huawei Tech. Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifdef __cplusplus #if __cplusplus extern "C" { #endif #endif #ifndef _PUBLIC_COMMON_H #define _PUBLIC_COMMON_H #include extern void sys_log(const char* process, int Level, const char *func, int line, const char *format, ...); #define DEBUG_LOG(fmt, args...) sys_log("uvp-monitor", LOG_DEBUG, __func__, __LINE__, fmt, ##args) #define INFO_LOG(fmt, args...) sys_log("uvp-monitor", LOG_INFO, __func__, __LINE__, fmt, ##args) #define ERR_LOG(fmt, args...) sys_log("uvp-monitor", LOG_ERR, __func__, __LINE__, fmt, ##args) #define RELEASE_BOND "control/uvp/release_bond" #define REBOND_SRIOV "control/uvp/rebond_sriov" #define HEALTH_CHECK_PATH "control/uvp/upgrade/inspection" #define HEALTH_CHECK_RESULT_PATH "control/uvp/upgrade/inspect-result" int cpuworkctlmon(struct xs_handle *handle); int memoryworkctlmon(struct xs_handle *handle); int hostnameworkctlmon( struct xs_handle *handle ); int diskworkctlmon(struct xs_handle *handle); void networkctlmon(void *handle); void NetinfoNetworkctlmon(void *handle); int netbond(); int releasenetbond(void *handle); int rebondnet(void *handle); void InitBond(); void start_service(void); int SetCpuHotplugFeature(void *phandle); void cpuhotplug_regwatch(void *phandle); int DoCpuHotplug(void *phandle); void doUpgrade(void *handle, char *path); void doMountISOUpgrade(void *handle); char *trim(char *str); int exe_command(char *path); void write_tools_result(void *handle); int do_healthcheck(void * phandle); int eject_command(); int CheckDiskspace(); int do_command(char *path); int is_suse(); #endif #ifdef __cplusplus #if __cplusplus } #endif #endif UVP-Tools-2.2.0.316/uvp-monitor/include/qlist.h000066400000000000000000000370161314037446600210410ustar00rootroot00000000000000 #ifndef _QLIST_H #define _QLIST_H #define QLIST_HEAD(name, type) \ struct name { \ struct type *lh_first; /* first element */ \ } #define QLIST_HEAD_INITIALIZER(head) \ { NULL } #define QLIST_ENTRY(type) \ struct { \ struct type *le_next; /* next element */ \ struct type **le_prev; /* address of previous next element */ \ } #define QLIST_INIT(head) do { \ (head)->lh_first = NULL; \ } while (/*CONSTCOND*/0) #define QLIST_INSERT_AFTER(listelm, elm, field) do { \ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ (listelm)->field.le_next->field.le_prev = \ &(elm)->field.le_next; \ (listelm)->field.le_next = (elm); \ (elm)->field.le_prev = &(listelm)->field.le_next; \ } while (/*CONSTCOND*/0) #define QLIST_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.le_prev = (listelm)->field.le_prev; \ (elm)->field.le_next = (listelm); \ *(listelm)->field.le_prev = (elm); \ (listelm)->field.le_prev = &(elm)->field.le_next; \ } while (/*CONSTCOND*/0) #define QLIST_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.le_next = (head)->lh_first) != NULL) \ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ (head)->lh_first = (elm); \ (elm)->field.le_prev = &(head)->lh_first; \ } while (/*CONSTCOND*/0) #define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \ (elm)->field.le_prev = &(head)->lh_first; \ (elm)->field.le_next = (head)->lh_first; \ smp_wmb(); /* fill elm before linking it */ \ if ((head)->lh_first != NULL) { \ (head)->lh_first->field.le_prev = &(elm)->field.le_next; \ } \ (head)->lh_first = (elm); \ smp_wmb(); \ } while (/* CONSTCOND*/0) #define QLIST_REMOVE(elm, field) do { \ if ((elm)->field.le_next != NULL) \ (elm)->field.le_next->field.le_prev = \ (elm)->field.le_prev; \ *(elm)->field.le_prev = (elm)->field.le_next; \ } while (/*CONSTCOND*/0) #define QLIST_FOREACH(var, head, field) \ for ((var) = ((head)->lh_first); \ (var); \ (var) = ((var)->field.le_next)) #define QLIST_FOREACH_SAFE(var, head, field, next_var) \ for ((var) = ((head)->lh_first); \ (var) && ((next_var) = ((var)->field.le_next), 1); \ (var) = (next_var)) /* * List access methods. */ #define QLIST_EMPTY(head) ((head)->lh_first == NULL) #define QLIST_FIRST(head) ((head)->lh_first) #define QLIST_NEXT(elm, field) ((elm)->field.le_next) /* * Singly-linked List definitions. */ #define QSLIST_HEAD(name, type) \ struct name { \ struct type *slh_first; /* first element */ \ } #define QSLIST_HEAD_INITIALIZER(head) \ { NULL } #define QSLIST_ENTRY(type) \ struct { \ struct type *sle_next; /* next element */ \ } /* * Singly-linked List functions. */ #define QSLIST_INIT(head) do { \ (head)->slh_first = NULL; \ } while (/*CONSTCOND*/0) #define QSLIST_INSERT_AFTER(slistelm, elm, field) do { \ (elm)->field.sle_next = (slistelm)->field.sle_next; \ (slistelm)->field.sle_next = (elm); \ } while (/*CONSTCOND*/0) #define QSLIST_INSERT_HEAD(head, elm, field) do { \ (elm)->field.sle_next = (head)->slh_first; \ (head)->slh_first = (elm); \ } while (/*CONSTCOND*/0) #define QSLIST_REMOVE_HEAD(head, field) do { \ (head)->slh_first = (head)->slh_first->field.sle_next; \ } while (/*CONSTCOND*/0) #define QSLIST_REMOVE_AFTER(slistelm, field) do { \ (slistelm)->field.sle_next = \ QSLIST_NEXT(QSLIST_NEXT((slistelm), field), field); \ } while (/*CONSTCOND*/0) #define QSLIST_FOREACH(var, head, field) \ for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) #define QSLIST_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = QSLIST_FIRST((head)); \ (var) && ((tvar) = QSLIST_NEXT((var), field), 1); \ (var) = (tvar)) /* * Singly-linked List access methods. */ #define QSLIST_EMPTY(head) ((head)->slh_first == NULL) #define QSLIST_FIRST(head) ((head)->slh_first) #define QSLIST_NEXT(elm, field) ((elm)->field.sle_next) /* * Simple queue definitions. */ #define QSIMPLEQ_HEAD(name, type) \ struct name { \ struct type *sqh_first; /* first element */ \ struct type **sqh_last; /* addr of last next element */ \ } #define QSIMPLEQ_HEAD_INITIALIZER(head) \ { NULL, &(head).sqh_first } #define QSIMPLEQ_ENTRY(type) \ struct { \ struct type *sqe_next; /* next element */ \ } /* * Simple queue functions. */ #define QSIMPLEQ_INIT(head) do { \ (head)->sqh_first = NULL; \ (head)->sqh_last = &(head)->sqh_first; \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ (head)->sqh_last = &(elm)->field.sqe_next; \ (head)->sqh_first = (elm); \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.sqe_next = NULL; \ *(head)->sqh_last = (elm); \ (head)->sqh_last = &(elm)->field.sqe_next; \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL) \ (head)->sqh_last = &(elm)->field.sqe_next; \ (listelm)->field.sqe_next = (elm); \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_REMOVE_HEAD(head, field) do { \ if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL)\ (head)->sqh_last = &(head)->sqh_first; \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_REMOVE(head, elm, type, field) do { \ if ((head)->sqh_first == (elm)) { \ QSIMPLEQ_REMOVE_HEAD((head), field); \ } else { \ struct type *curelm = (head)->sqh_first; \ while (curelm->field.sqe_next != (elm)) \ curelm = curelm->field.sqe_next; \ if ((curelm->field.sqe_next = \ curelm->field.sqe_next->field.sqe_next) == NULL) \ (head)->sqh_last = &(curelm)->field.sqe_next; \ } \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_FOREACH(var, head, field) \ for ((var) = ((head)->sqh_first); \ (var); \ (var) = ((var)->field.sqe_next)) #define QSIMPLEQ_FOREACH_SAFE(var, head, field, next) \ for ((var) = ((head)->sqh_first); \ (var) && ((next = ((var)->field.sqe_next)), 1); \ (var) = (next)) #define QSIMPLEQ_CONCAT(head1, head2) do { \ if (!QSIMPLEQ_EMPTY((head2))) { \ *(head1)->sqh_last = (head2)->sqh_first; \ (head1)->sqh_last = (head2)->sqh_last; \ QSIMPLEQ_INIT((head2)); \ } \ } while (/*CONSTCOND*/0) #define QSIMPLEQ_LAST(head, type, field) \ (QSIMPLEQ_EMPTY((head)) ? \ NULL : \ ((struct type *)(void *) \ ((char *)((head)->sqh_last) - offsetof(struct type, field)))) /* * Simple queue access methods. */ #define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) #define QSIMPLEQ_FIRST(head) ((head)->sqh_first) #define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) /* * Tail queue definitions. */ #define Q_TAILQ_HEAD(name, type, qual) \ struct name { \ qual type *tqh_first; /* first element */ \ qual type *qual *tqh_last; /* addr of last next element */ \ } #define QTAILQ_HEAD(name, type) Q_TAILQ_HEAD(name, struct type,) #define QTAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).tqh_first } #define Q_TAILQ_ENTRY(type, qual) \ struct { \ qual type *tqe_next; /* next element */ \ qual type *qual *tqe_prev; /* address of previous next element */\ } #define QTAILQ_ENTRY(type) Q_TAILQ_ENTRY(struct type,) /* * Tail queue functions. */ #define QTAILQ_INIT(head) do { \ (head)->tqh_first = NULL; \ (head)->tqh_last = &(head)->tqh_first; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ (head)->tqh_first->field.tqe_prev = \ &(elm)->field.tqe_next; \ else \ (head)->tqh_last = &(elm)->field.tqe_next; \ (head)->tqh_first = (elm); \ (elm)->field.tqe_prev = &(head)->tqh_first; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.tqe_next = NULL; \ (elm)->field.tqe_prev = (head)->tqh_last; \ *(head)->tqh_last = (elm); \ (head)->tqh_last = &(elm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ (elm)->field.tqe_next->field.tqe_prev = \ &(elm)->field.tqe_next; \ else \ (head)->tqh_last = &(elm)->field.tqe_next; \ (listelm)->field.tqe_next = (elm); \ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ (elm)->field.tqe_next = (listelm); \ *(listelm)->field.tqe_prev = (elm); \ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define QTAILQ_REMOVE(head, elm, field) do { \ if (((elm)->field.tqe_next) != NULL) \ (elm)->field.tqe_next->field.tqe_prev = \ (elm)->field.tqe_prev; \ else \ (head)->tqh_last = (elm)->field.tqe_prev; \ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define QTAILQ_FOREACH(var, head, field) \ for ((var) = ((head)->tqh_first); \ (var); \ (var) = ((var)->field.tqe_next)) #define QTAILQ_FOREACH_SAFE(var, head, field, next_var) \ for ((var) = ((head)->tqh_first); \ (var) && ((next_var) = ((var)->field.tqe_next), 1); \ (var) = (next_var)) #define QTAILQ_FOREACH_REVERSE(var, head, headname, field) \ for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ (var); \ (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) /* * Tail queue access methods. */ #define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL) #define QTAILQ_FIRST(head) ((head)->tqh_first) #define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next) #define QTAILQ_LAST(head, headname) \ (*(((struct headname *)((head)->tqh_last))->tqh_last)) #define QTAILQ_PREV(elm, headname, field) \ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) #endif UVP-Tools-2.2.0.316/uvp-monitor/include/uvpmon.h000066400000000000000000000035431314037446600212270ustar00rootroot00000000000000/* * uvp-monitor header file * * Copyright 2016, Huawei Tech. Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _UVPMON_H #define _UVPMON_H int uvpPopen(const char* pszCmd, char* pszBuffer, int size); void freePath(char *path1, char *path2, char *path3); int CheckArg(char* chCmdStr, char** pchCmdType, char** pchFileName, char** pchPara); int getFluxinfoLine(char *ifname, char *pline); int getVifData(char *pline, int nNumber, char *out); void uvp_unregwatch(void *phandle); long getfreedisk(char *path); int is_suse(); #endif UVP-Tools-2.2.0.316/uvp-monitor/include/xenstore_common.h000066400000000000000000000075451314037446600231300ustar00rootroot00000000000000/* * xenstore head file * * Copyright 2016, Huawei Tech. Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifdef __cplusplus #if __cplusplus extern "C" { #endif #endif #ifndef _XENSTORE_COMMOM_H #define _XENSTORE_COMMOM_H /* ANSC C */ #include #include #include #include #include #include #include #include "xs.h" #define XEN_SUCC 0 #define XEN_FAIL -1 #define SERVICE_FLAG_WATCH_PATH "control/uvp/monitor-service-flag" #define UVP_VM_STATE_PATH "control/uvp/vm_state" #define FEATURE_FLAG_WATCH_PATH "control/uvp/feature_flag" #define UVP_PATH "control/uvp/performance" /* xenstoreעpv-driver汾ļֵ */ #define PVDRIVER_VERSION "control/uvp/pvdriver-version" /* ѯ汾ϢȲϢļֵ */ //#define PVDRIVER_STATIC_INFO_PATH "control/uvp/static-info-flag" /* define couhotplug watch path */ #define CPU_HOTPLUG_FEATURE "control/uvp/cpuhotplug_feature" #define CPU_HOTPLUG_SIGNAL "control/uvp/cpu_hotplug" #define CPU_HOTPLUG_STATE "control/uvp/cpuhotplug_flag" #define CPU_HOTPLUG_ONLINE "control/uvp/enable_cpuonline" /* üֵpv driver汾֧pvscsi*/ #define SCSI_FEATURE_PATH "control/uvp/vscsi_feature" #define GUSET_OS_FEATURE "control/uvp/guest_feature" /*չϢ·λ*/ #define EXINFO_FLAG_PATH "control/uvp/exinfo_flag" /*չϢϱļֵ*/ #define DISABLE_EXINFO_PATH "control/uvp/disable_exinfo" #define EXINFO_FLAG_DEFAULT 0 //ĬϣֻչϢ #define EXINFO_FLAG_CPU_USAGE (1<<0) //cpuʱϢ #define EXINFO_FLAG_FILESYSTEM (1<<1) //ļϵͳϢ #define EXINFO_FLAG_GATEWAY (1<<2) //Ϣ #define EXINFO_FLAG_NET_LOSS (1<<3) //Ϣ #define DISABLE_EXINFO 1 #define ENABLE_EXINFO 0 long g_exinfo_flag_value; //չϢȫֱ־λ long g_disable_exinfo_value; // for netinfo global value long g_netinfo_value; // for upgrade pv restart value long g_monitor_restart_value; char *read_from_xenstore (void *handle, char *path); void write_to_xenstore (void *handle, char *path, char *buf); void write_weak_to_xenstore (void *handle, char *path, char *buf); char **readWatch(void *handle); bool regwatch(void *handle, const char *path, const char *token); void *openxenstore(void); void closexenstore(void *handle); int watch_listen(int xsfd); int condition(void); int getxsfileno(void *handle); //monitorһxenstoreдչϢıʾ int xb_write_first_flag; #endif #ifdef __cplusplus #if __cplusplus } #endif #endif UVP-Tools-2.2.0.316/uvp-monitor/include/xs.h000066400000000000000000000142061314037446600203330ustar00rootroot00000000000000/* Xen Store Daemon providing simple tree-like database. Copyright (C) 2005 Rusty Russell IBM Corporation This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _XS_H #define _XS_H #include #define XBT_NULL 0 struct xs_handle; typedef uint32_t xs_transaction_t; /* IMPORTANT: For details on xenstore protocol limits, see * docs/misc/xenstore.txt in the Xen public source repository, and use the * XENSTORE_*_MAX limit macros defined in xen/io/xs_wire.h. */ /* On failure, these routines set errno. */ /* Connect to the xs daemon. * Returns a handle or NULL. */ struct xs_handle *xs_daemon_open(void); struct xs_handle *xs_domain_open(void); /* Connect to the xs daemon (readonly for non-root clients). * Returns a handle or NULL. */ struct xs_handle *xs_daemon_open_readonly(void); /* Close the connection to the xs daemon. */ void xs_daemon_close(struct xs_handle *); /* Get contents of a directory. * Returns a malloced array: call free() on it after use. * Num indicates size. */ char **xs_directory(struct xs_handle *h, xs_transaction_t t, const char *path, unsigned int *num); /* Get the value of a single file, nul terminated. * Returns a malloced value: call free() on it after use. * len indicates length in bytes, not including terminator. */ void *xs_read(struct xs_handle *h, xs_transaction_t t, const char *path, unsigned int *len); /* Write the value of a single file. * Returns false on failure. */ bool xs_write(struct xs_handle *h, xs_transaction_t t, const char *path, const void *data, unsigned int len); /* Write the value of a single file. * Returns false on failure. */ bool xs_weak_write(struct xs_handle *h, xs_transaction_t t, const char *path, const void *data, unsigned int len); /* Create a new directory. * Returns false on failure, or success if it already exists. */ bool xs_mkdir(struct xs_handle *h, xs_transaction_t t, const char *path); /* Destroy a file or directory (and children). * Returns false on failure, or if it doesn't exist. */ bool xs_rm(struct xs_handle *h, xs_transaction_t t, const char *path); /* Get permissions of node (first element is owner, first perms is "other"). * Returns malloced array, or NULL: call free() after use. */ struct xs_permissions *xs_get_permissions(struct xs_handle *h, xs_transaction_t t, const char *path, unsigned int *num); /* Set permissions of node (must be owner). * Returns false on failure. */ bool xs_set_permissions(struct xs_handle *h, xs_transaction_t t, const char *path, struct xs_permissions *perms, unsigned int num_perms); /* Watch a node for changes (poll on fd to detect, or call read_watch()). * When the node (or any child) changes, fd will become readable. * Token is returned when watch is read, to allow matching. * Returns false on failure. */ bool xs_watch(struct xs_handle *h, const char *path, const char *token); /* Return the FD to poll on to see if a watch has fired. */ int xs_fileno(struct xs_handle *h); /* Find out what node change was on (will block if nothing pending). * Returns array containing the path and token. Use XS_WATCH_* to access these * elements. Call free() after use. */ char **xs_read_watch(struct xs_handle *h, unsigned int *num); /* Remove a watch on a node: implicitly acks any outstanding watch. * Returns false on failure (no watch on that node). */ bool xs_unwatch(struct xs_handle *h, const char *path, const char *token); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. * Returns NULL on failure. */ xs_transaction_t xs_transaction_start(struct xs_handle *h); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. * Returns false on failure: if errno == EAGAIN, you have to restart * transaction. */ bool xs_transaction_end(struct xs_handle *h, xs_transaction_t t, bool abort); /* Introduce a new domain. * This tells the store daemon about a shared memory page, event channel and * store path associated with a domain: the domain uses these to communicate. */ bool xs_introduce_domain(struct xs_handle *h, unsigned int domid, unsigned long mfn, unsigned int eventchn); /* Set the target of a domain * This tells the store daemon that a domain is targetting another one, so * it should let it tinker with it. */ bool xs_set_target(struct xs_handle *h, unsigned int domid, unsigned int target); /* Resume a domain. * Clear the shutdown flag for this domain in the store. */ bool xs_resume_domain(struct xs_handle *h, unsigned int domid); /* Release a domain. * Tells the store domain to release the memory page to the domain. */ bool xs_release_domain(struct xs_handle *h, unsigned int domid); /* Query the home path of a domain. Call free() after use. */ char *xs_get_domain_path(struct xs_handle *h, unsigned int domid); /* Return whether the domain specified has been introduced to xenstored. */ bool xs_is_domain_introduced(struct xs_handle *h, unsigned int domid); /* Only useful for DEBUG versions */ char *xs_debug_command(struct xs_handle *h, const char *cmd, void *data, unsigned int len); int xs_suspend_evtchn_port(int domid); #endif /* _XS_H */ /* * Local variables: * c-file-style: "linux" * indent-tabs-mode: t * c-indent-level: 8 * c-basic-offset: 8 * tab-width: 8 * End: */ UVP-Tools-2.2.0.316/uvp-monitor/include/xs_lib.h000066400000000000000000000055131314037446600211620ustar00rootroot00000000000000/* Common routines between Xen store user library and daemon. Copyright (C) 2005 Rusty Russell IBM Corporation This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _XS_LIB_H #define _XS_LIB_H #include #include #include #include #include #include // without this file, pclint will report errors and warnings on line 64 /* Bitmask of permissions. */ enum xs_perm_type { XS_PERM_NONE = 0, XS_PERM_READ = 1, XS_PERM_WRITE = 2, /* Internal use. */ XS_PERM_ENOENT_OK = 4, XS_PERM_OWNER = 8, }; struct xs_permissions { unsigned int id; enum xs_perm_type perms; }; /* Each 10 bits takes ~ 3 digits, plus one, plus one for nul terminator. */ #define MAX_STRLEN(x) ((sizeof(x) * CHAR_BIT + CHAR_BIT-1) / 10 * 3 + 2) /* Path for various daemon things: env vars can override. */ const char *xs_daemon_rootdir(void); const char *xs_daemon_rundir(void); const char *xs_daemon_socket(void); const char *xs_daemon_socket_ro(void); const char *xs_domain_dev(void); const char *xs_daemon_tdb(void); /* Simple write function: loops for you. */ bool xs_write_all(int fd, const void *data, unsigned int len); /* Convert strings to permissions. False if a problem. */ bool xs_strings_to_perms(struct xs_permissions *perms, unsigned int num, const char *strings); /* Convert permissions to a string (up to len MAX_STRLEN(unsigned int)+1). */ bool xs_perm_to_string(const struct xs_permissions *perm, char *buffer, size_t buf_len); /* Given a string and a length, count how many strings (nul terms). */ unsigned int xs_count_strings(const char *strings, unsigned int len); /* Sanitising (quoting) possibly-binary strings. */ struct expanding_buffer { char *buf; int avail; }; /* Ensure that given expanding buffer has at least min_avail characters. */ char *expanding_buffer_ensure(struct expanding_buffer *, int min_avail); /* sanitise_value() may return NULL if malloc fails. */ char *sanitise_value(struct expanding_buffer *, const char *val, unsigned len); /* *out_len_r on entry is ignored; out must be at least strlen(in)+1 bytes. */ void unsanitise_value(char *out, unsigned *out_len_r, const char *in); #endif /* _XS_LIB_H */ UVP-Tools-2.2.0.316/uvp-monitor/include/xs_wire.h000066400000000000000000000072411314037446600213620ustar00rootroot00000000000000/* * Details of the "wire" protocol between Xen Store Daemon and client * library or guest kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Rusty Russell IBM Corporation */ #include // without this file, pclint will report errors and warnings on lines with uint32_t #ifndef _XS_WIRE_H #define _XS_WIRE_H enum xsd_sockmsg_type { XS_DEBUG, XS_DIRECTORY, XS_READ, XS_GET_PERMS, XS_WATCH, XS_UNWATCH, XS_TRANSACTION_START, XS_TRANSACTION_END, XS_INTRODUCE, XS_RELEASE, XS_GET_DOMAIN_PATH, XS_WRITE, XS_MKDIR, XS_RM, XS_SET_PERMS, XS_WATCH_EVENT, XS_ERROR, XS_IS_DOMAIN_INTRODUCED, XS_RESUME, XS_SET_TARGET, XS_RESTRICT, XS_WEAK_WRITE, XS_LINUX_WEAK_WRITE = 128 //дxenstore }; #define XS_WRITE_NONE "NONE" #define XS_WRITE_CREATE "CREATE" #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" /* We hand errors as strings, for portability. */ struct xsd_errors { int errnum; const char *errstring; }; #ifdef EINVAL #define XSD_ERROR(x) { x, #x } /* LINTED: static unused */ static struct xsd_errors xsd_errors[] #if defined(__GNUC__) __attribute__((unused)) #endif = { XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), XSD_ERROR(EISDIR), XSD_ERROR(ENOENT), XSD_ERROR(ENOMEM), XSD_ERROR(ENOSPC), XSD_ERROR(EIO), XSD_ERROR(ENOTEMPTY), XSD_ERROR(ENOSYS), XSD_ERROR(EROFS), XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN) }; #endif struct xsd_sockmsg { uint32_t type; /* XS_??? */ uint32_t req_id;/* Request identifier, echoed in daemon's response. */ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ uint32_t len; /* Length of data following this. */ /* Generally followed by nul-terminated string(s). */ }; enum xs_watch_type {XS_WATCH_PATH = 0, XS_WATCH_TOKEN}; /* Inter-domain shared memory communications. */ #define XENSTORE_RING_SIZE 1024 typedef uint32_t XENSTORE_RING_IDX; #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) struct xenstore_domain_interface { char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ XENSTORE_RING_IDX req_cons, req_prod; XENSTORE_RING_IDX rsp_cons, rsp_prod; }; /* Violating this is very bad. See docs/misc/xenstore.txt. */ #define XENSTORE_PAYLOAD_MAX 4096 /* Violating these just gets you an error back */ #define XENSTORE_ABS_PATH_MAX 3072 #define XENSTORE_REL_PATH_MAX 2048 #endif /* _XS_WIRE_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-monitor/iputils/000077500000000000000000000000001314037446600175735ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-monitor/iputils/SNAPSHOT.h000066400000000000000000000000461314037446600212030ustar00rootroot00000000000000static char SNAPSHOT[] = "s20121221"; UVP-Tools-2.2.0.316/uvp-monitor/iputils/arping.c000066400000000000000000000631231314037446600212240ustar00rootroot00000000000000/* * arping.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, * YOSHIFUJI Hideaki */ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CAPABILITIES #include #include #endif #include #include #include #include #include #include #include #include #ifdef USE_SYSFS #include struct sysfs_devattr_values; #endif #ifndef WITHOUT_IFADDRS #include #endif #ifdef USE_IDN #include #include #endif #include "SNAPSHOT.h" static void usage(void) __attribute__((noreturn)); #ifdef DEFAULT_DEVICE # define DEFAULT_DEVICE_STR DEFAULT_DEVICE #else # define DEFAULT_DEVICE NULL #endif struct device { char *name; int ifindex; #ifndef WITHOUT_IFADDRS struct ifaddrs *ifa; #endif #ifdef USE_SYSFS struct sysfs_devattr_values *sysfs; #endif }; int quit_on_reply=0; struct device device = { .name = DEFAULT_DEVICE, }; char *source; struct in_addr src, dst; char *target; int dad, unsolicited, advert; int quiet; int count=-1; int timeout; int unicasting; int s; int broadcast_only; struct sockaddr_storage me; struct sockaddr_storage he; struct timeval start, last; int sent, brd_sent; int received, brd_recv, req_recv; #ifndef CAPABILITIES static uid_t euid; #endif #define MS_TDIFF(tv1,tv2) ( ((tv1).tv_sec-(tv2).tv_sec)*1000 + \ ((tv1).tv_usec-(tv2).tv_usec)/1000 ) #define OFFSET_OF(name,ele) ((size_t)(((name *)0)->ele)) static inline socklen_t sll_len(size_t halen) { socklen_t len = OFFSET_OF(struct sockaddr_ll, sll_addr) + halen; if (len < sizeof(struct sockaddr_ll)) len = sizeof(struct sockaddr_ll); return len; } #define SLL_LEN(hln) sll_len(hln) void usage(void) { fprintf(stderr, "Usage: arping [-fqbDUAV] [-c count] [-w timeout] [-I device] [-s source] destination\n" " -f : quit on first reply\n" " -q : be quiet\n" " -b : keep broadcasting, don't go unicast\n" " -D : duplicate address detection mode\n" " -U : Unsolicited ARP mode, update your neighbours\n" " -A : ARP answer mode, update your neighbours\n" " -V : print version and exit\n" " -c count : how many packets to send\n" " -w timeout : how long to wait for a reply\n" " -I device : which ethernet device to use" #ifdef DEFAULT_DEVICE_STR " (" DEFAULT_DEVICE_STR ")" #endif "\n" " -s source : source ip address\n" " destination : ask for what ip address\n" ); exit(2); } void set_signal(int signo, void (*handler)(void)) { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = (void (*)(int))handler; sa.sa_flags = SA_RESTART; sigaction(signo, &sa, NULL); } #ifdef CAPABILITIES static const cap_value_t caps[] = { CAP_NET_RAW, }; static cap_flag_value_t cap_raw = CAP_CLEAR; #endif void limit_capabilities(void) { #ifdef CAPABILITIES cap_t cap_p; cap_p = cap_get_proc(); if (!cap_p) { perror("arping: cap_get_proc"); exit(-1); } cap_get_flag(cap_p, CAP_NET_RAW, CAP_PERMITTED, &cap_raw); if (cap_raw != CAP_CLEAR) { if (cap_clear(cap_p) < 0) { perror("arping: cap_clear"); exit(-1); } cap_set_flag(cap_p, CAP_PERMITTED, 1, caps, CAP_SET); if (cap_set_proc(cap_p) < 0) { perror("arping: cap_set_proc"); if (errno != EPERM) exit(-1); } } if (prctl(PR_SET_KEEPCAPS, 1) < 0) { perror("arping: prctl"); exit(-1); } if (setuid(getuid()) < 0) { perror("arping: setuid"); exit(-1); } if (prctl(PR_SET_KEEPCAPS, 0) < 0) { perror("arping: prctl"); exit(-1); } cap_free(cap_p); #else euid = geteuid(); #endif } int modify_capability_raw(int on) { #ifdef CAPABILITIES cap_t cap_p; if (cap_raw != CAP_SET) return on ? -1 : 0; cap_p = cap_get_proc(); if (!cap_p) { perror("arping: cap_get_proc"); return -1; } cap_set_flag(cap_p, CAP_EFFECTIVE, 1, caps, on ? CAP_SET : CAP_CLEAR); if (cap_set_proc(cap_p) < 0) { perror("arping: cap_set_proc"); return -1; } cap_free(cap_p); #else if (setuid(on ? euid : getuid())) { perror("arping: setuid"); return -1; } #endif return 0; } static inline int enable_capability_raw(void) { return modify_capability_raw(1); } static inline int disable_capability_raw(void) { return modify_capability_raw(0); } void drop_capabilities(void) { #ifdef CAPABILITIES cap_t cap_p = cap_init(); if (!cap_p) { perror("arping: cap_init"); exit(-1); } if (cap_set_proc(cap_p) < 0) { perror("arping: cap_set_proc"); exit(-1); } cap_free(cap_p); #else if (setuid(getuid()) < 0) { perror("arping: setuid"); exit(-1); } #endif } int send_pack(int s, struct in_addr src, struct in_addr dst, struct sockaddr_ll *ME, struct sockaddr_ll *HE) { int err; struct timeval now; unsigned char buf[256]; struct arphdr *ah = (struct arphdr*)buf; unsigned char *p = (unsigned char *)(ah+1); ah->ar_hrd = htons(ME->sll_hatype); if (ah->ar_hrd == htons(ARPHRD_FDDI)) ah->ar_hrd = htons(ARPHRD_ETHER); ah->ar_pro = htons(ETH_P_IP); ah->ar_hln = ME->sll_halen; ah->ar_pln = 4; ah->ar_op = advert ? htons(ARPOP_REPLY) : htons(ARPOP_REQUEST); memcpy(p, &ME->sll_addr, ah->ar_hln); p+=ME->sll_halen; memcpy(p, &src, 4); p+=4; if (advert) memcpy(p, &ME->sll_addr, ah->ar_hln); else memcpy(p, &HE->sll_addr, ah->ar_hln); p+=ah->ar_hln; memcpy(p, &dst, 4); p+=4; gettimeofday(&now, NULL); err = sendto(s, buf, p-buf, 0, (struct sockaddr*)HE, SLL_LEN(ah->ar_hln)); if (err == p-buf) { last = now; sent++; if (!unicasting) brd_sent++; } return err; } void finish(void) { if (!quiet) { printf("Sent %d probes (%d broadcast(s))\n", sent, brd_sent); printf("Received %d response(s)", received); if (brd_recv || req_recv) { printf(" ("); if (req_recv) printf("%d request(s)", req_recv); if (brd_recv) printf("%s%d broadcast(s)", req_recv ? ", " : "", brd_recv); printf(")"); } printf("\n"); fflush(stdout); } if (dad) exit(!!received); if (unsolicited) exit(0); exit(!received); } void catcher(void) { struct timeval tv, tv_s, tv_o; gettimeofday(&tv, NULL); if (start.tv_sec==0) start = tv; timersub(&tv, &start, &tv_s); tv_o.tv_sec = timeout; tv_o.tv_usec = 500 * 1000; if (count-- == 0 || (timeout && timercmp(&tv_s, &tv_o, >))) finish(); timersub(&tv, &last, &tv_s); tv_o.tv_sec = 0; if (last.tv_sec==0 || timercmp(&tv_s, &tv_o, >)) { send_pack(s, src, dst, (struct sockaddr_ll *)&me, (struct sockaddr_ll *)&he); if (count == 0 && unsolicited) finish(); } alarm(1); } void print_hex(unsigned char *p, int len) { int i; for (i=0; isll_pkttype != PACKET_HOST && FROM->sll_pkttype != PACKET_BROADCAST && FROM->sll_pkttype != PACKET_MULTICAST) return 0; /* Only these types are recognised */ if (ah->ar_op != htons(ARPOP_REQUEST) && ah->ar_op != htons(ARPOP_REPLY)) return 0; /* ARPHRD check and this darned FDDI hack here :-( */ if (ah->ar_hrd != htons(FROM->sll_hatype) && (FROM->sll_hatype != ARPHRD_FDDI || ah->ar_hrd != htons(ARPHRD_ETHER))) return 0; /* Protocol must be IP. */ if (ah->ar_pro != htons(ETH_P_IP)) return 0; if (ah->ar_pln != 4) return 0; if (ah->ar_hln != ((struct sockaddr_ll *)&me)->sll_halen) return 0; if (len < sizeof(*ah) + 2*(4 + ah->ar_hln)) return 0; memcpy(&src_ip, p+ah->ar_hln, 4); memcpy(&dst_ip, p+ah->ar_hln+4+ah->ar_hln, 4); if (!dad) { if (src_ip.s_addr != dst.s_addr) return 0; if (src.s_addr != dst_ip.s_addr) return 0; if (memcmp(p+ah->ar_hln+4, ((struct sockaddr_ll *)&me)->sll_addr, ah->ar_hln)) return 0; } else { /* DAD packet was: src_ip = 0 (or some src) src_hw = ME dst_ip = tested address dst_hw = We fail, if receive request/reply with: src_ip = tested_address src_hw != ME if src_ip in request was not zero, check also that it matches to dst_ip, otherwise dst_ip/dst_hw do not matter. */ if (src_ip.s_addr != dst.s_addr) return 0; if (memcmp(p, ((struct sockaddr_ll *)&me)->sll_addr, ((struct sockaddr_ll *)&me)->sll_halen) == 0) return 0; if (src.s_addr && src.s_addr != dst_ip.s_addr) return 0; } if (!quiet) { int s_printed = 0; printf("%s ", FROM->sll_pkttype==PACKET_HOST ? "Unicast" : "Broadcast"); printf("%s from ", ah->ar_op == htons(ARPOP_REPLY) ? "reply" : "request"); printf("%s [", inet_ntoa(src_ip)); print_hex(p, ah->ar_hln); printf("] "); if (dst_ip.s_addr != src.s_addr) { printf("for %s ", inet_ntoa(dst_ip)); s_printed = 1; } if (memcmp(p+ah->ar_hln+4, ((struct sockaddr_ll *)&me)->sll_addr, ah->ar_hln)) { if (!s_printed) printf("for "); printf("["); print_hex(p+ah->ar_hln+4, ah->ar_hln); printf("]"); } if (last.tv_sec) { long usecs = (tv.tv_sec-last.tv_sec) * 1000000 + tv.tv_usec-last.tv_usec; long msecs = (usecs+500)/1000; usecs -= msecs*1000 - 500; printf(" %ld.%03ldms\n", msecs, usecs); } else { printf(" UNSOLICITED?\n"); } fflush(stdout); } received++; if (FROM->sll_pkttype != PACKET_HOST) brd_recv++; if (ah->ar_op == htons(ARPOP_REQUEST)) req_recv++; if (quit_on_reply) finish(); if(!broadcast_only) { memcpy(((struct sockaddr_ll *)&he)->sll_addr, p, ((struct sockaddr_ll *)&me)->sll_halen); unicasting=1; } return 1; } #ifdef USE_SYSFS union sysfs_devattr_value { unsigned long ulong; void *ptr; }; enum { SYSFS_DEVATTR_IFINDEX, SYSFS_DEVATTR_FLAGS, SYSFS_DEVATTR_ADDR_LEN, SYSFS_DEVATTR_BROADCAST, SYSFS_DEVATTR_NUM }; struct sysfs_devattr_values { char *ifname; union sysfs_devattr_value value[SYSFS_DEVATTR_NUM]; }; static int sysfs_devattr_ulong_dec(char *ptr, struct sysfs_devattr_values *v, unsigned idx); static int sysfs_devattr_ulong_hex(char *ptr, struct sysfs_devattr_values *v, unsigned idx); static int sysfs_devattr_macaddr(char *ptr, struct sysfs_devattr_values *v, unsigned idx); struct sysfs_devattrs { const char *name; int (*handler)(char *ptr, struct sysfs_devattr_values *v, unsigned int idx); int free; } sysfs_devattrs[SYSFS_DEVATTR_NUM] = { [SYSFS_DEVATTR_IFINDEX] = { .name = "ifindex", .handler = sysfs_devattr_ulong_dec, }, [SYSFS_DEVATTR_ADDR_LEN] = { .name = "addr_len", .handler = sysfs_devattr_ulong_dec, }, [SYSFS_DEVATTR_FLAGS] = { .name = "flags", .handler = sysfs_devattr_ulong_hex, }, [SYSFS_DEVATTR_BROADCAST] = { .name = "broadcast", .handler = sysfs_devattr_macaddr, .free = 1, }, }; #endif /* * find_device() * * This function checks 1) if the device (if given) is okay for ARP, * or 2) find fist appropriate device on the system. * * Return value: * >0 : Succeeded, and appropriate device not found. * device.ifindex remains 0. * 0 : Succeeded, and approptiate device found. * device.ifindex is set. * <0 : Failed. Support not found, or other * : system error. Try other method. * * If an appropriate device found, it is recorded inside the * "device" variable for later reference. * * We have several implementations for this. * by_ifaddrs(): requires getifaddr() in glibc, and rtnetlink in * kernel. default and recommended for recent systems. * by_sysfs(): requires libsysfs , and sysfs in kernel. * by_ioctl(): unable to list devices without ipv4 address; this * means, you need to supply the device name for * DAD purpose. */ /* Common check for ifa->ifa_flags */ static int check_ifflags(unsigned int ifflags, int fatal) { if (!(ifflags & IFF_UP)) { if (fatal) { if (!quiet) printf("Interface \"%s\" is down\n", device.name); exit(2); } return -1; } if (ifflags & (IFF_NOARP | IFF_LOOPBACK)) { if (fatal) { if (!quiet) printf("Interface \"%s\" is not ARPable\n", device.name); exit(dad ? 0 : 2); } return -1; } return 0; } static int find_device_by_ifaddrs(void) { #ifndef WITHOUT_IFADDRS int rc; struct ifaddrs *ifa0, *ifa; int count = 0; rc = getifaddrs(&ifa0); if (rc) { perror("getifaddrs"); return -1; } for (ifa = ifa0; ifa; ifa = ifa->ifa_next) { if (!ifa->ifa_addr) continue; if (ifa->ifa_addr->sa_family != AF_PACKET) continue; if (device.name && ifa->ifa_name && strcmp(ifa->ifa_name, device.name)) continue; if (check_ifflags(ifa->ifa_flags, device.name != NULL) < 0) continue; if (!((struct sockaddr_ll *)ifa->ifa_addr)->sll_halen) continue; if (!ifa->ifa_broadaddr) continue; device.ifa = ifa; if (count++) break; } if (count == 1 && device.ifa) { device.ifindex = if_nametoindex(device.ifa->ifa_name); if (!device.ifindex) { perror("arping: if_nametoindex"); freeifaddrs(ifa0); return -1; } device.name = device.ifa->ifa_name; return 0; } return 1; #else return -1; #endif } #ifdef USE_SYSFS static void sysfs_devattr_values_init(struct sysfs_devattr_values *v, int do_free) { int i; if (do_free) { free(v->ifname); for (i = 0; i < SYSFS_DEVATTR_NUM; i++) { if (sysfs_devattrs[i].free) free(v->value[i].ptr); } } memset(v, 0, sizeof(*v)); } static int sysfs_devattr_ulong(char *ptr, struct sysfs_devattr_values *v, unsigned int idx, unsigned int base) { unsigned long *p; char *ep; if (!ptr || !v) return -1; p = &v->value[idx].ulong; errno = 0; *p = strtoul(ptr, &ep, base); if ((*ptr && isspace(*ptr & 0xff)) || errno || (*ep != '\0' && *ep != '\n')) goto out; return 0; out: return -1; } static int sysfs_devattr_ulong_dec(char *ptr, struct sysfs_devattr_values *v, unsigned int idx) { int rc = sysfs_devattr_ulong(ptr, v, idx, 10); return rc; } static int sysfs_devattr_ulong_hex(char *ptr, struct sysfs_devattr_values *v, unsigned int idx) { int rc = sysfs_devattr_ulong(ptr, v, idx, 16); return rc; } static int sysfs_devattr_macaddr(char *ptr, struct sysfs_devattr_values *v, unsigned int idx) { unsigned char *m; int i; unsigned int addrlen; if (!ptr || !v) return -1; addrlen = v->value[SYSFS_DEVATTR_ADDR_LEN].ulong; m = malloc(addrlen); if (!m) { perror("malloc addrlen failed"); return -1; } memset(m, 0, addrlen); for (i = 0; i < addrlen; i++) { if (i && *(ptr + i * 3 - 1) != ':') goto out; if (sscanf(ptr + i * 3, "%02hhx", &m[i]) != 1) goto out; } v->value[idx].ptr = m; return 0; out: free(m); return -1; } #endif int find_device_by_sysfs(void) { int rc = -1; #ifdef USE_SYSFS struct sysfs_class *cls_net; struct dlist *dev_list; struct sysfs_class_device *dev; struct sysfs_attribute *dev_attr; struct sysfs_devattr_values sysfs_devattr_values; int count = 0; if (!device.sysfs) { device.sysfs = malloc(sizeof(*device.sysfs)); if (!device.sysfs) { perror("malloc device.sysfs failed"); return -1; } sysfs_devattr_values_init(device.sysfs, 0); } cls_net = sysfs_open_class("net"); if (!cls_net) { perror("sysfs_open_class"); return -1; } dev_list = sysfs_get_class_devices(cls_net); if (!dev_list) { perror("sysfs_get_class_devices"); goto out; } sysfs_devattr_values_init(&sysfs_devattr_values, 0); dlist_for_each_data(dev_list, dev, struct sysfs_class_device) { int i; int rc = -1; if (device.name && strcmp(dev->name, device.name)) goto do_next; sysfs_devattr_values_init(&sysfs_devattr_values, 1); for (i = 0; i < SYSFS_DEVATTR_NUM; i++) { dev_attr = sysfs_get_classdev_attr(dev, sysfs_devattrs[i].name); if (!dev_attr) { perror("sysfs_get_classdev_attr"); rc = -1; break; } if (sysfs_read_attribute(dev_attr)) { perror("sysfs_read_attribute"); rc = -1; break; } rc = sysfs_devattrs[i].handler(dev_attr->value, &sysfs_devattr_values, i); if (rc < 0) break; } if (rc < 0) goto do_next; if (check_ifflags(sysfs_devattr_values.value[SYSFS_DEVATTR_FLAGS].ulong, device.name != NULL) < 0) goto do_next; if (!sysfs_devattr_values.value[SYSFS_DEVATTR_ADDR_LEN].ulong) goto do_next; if (device.sysfs->value[SYSFS_DEVATTR_IFINDEX].ulong) { if (device.sysfs->value[SYSFS_DEVATTR_FLAGS].ulong & IFF_RUNNING) goto do_next; } sysfs_devattr_values.ifname = strdup(dev->name); if (!sysfs_devattr_values.ifname) { perror("malloc"); goto out; } sysfs_devattr_values_init(device.sysfs, 1); memcpy(device.sysfs, &sysfs_devattr_values, sizeof(*device.sysfs)); sysfs_devattr_values_init(&sysfs_devattr_values, 0); if (count++) break; continue; do_next: sysfs_devattr_values_init(&sysfs_devattr_values, 1); } if (count == 1) { device.ifindex = device.sysfs->value[SYSFS_DEVATTR_IFINDEX].ulong; device.name = device.sysfs->ifname; } rc = !device.ifindex; out: sysfs_close_class(cls_net); #endif return rc; } static int check_device_by_ioctl(int s, struct ifreq *ifr) { if (ioctl(s, SIOCGIFFLAGS, ifr) < 0) { perror("ioctl(SIOCGIFINDEX"); return -1; } if (check_ifflags(ifr->ifr_flags, device.name != NULL) < 0) return 1; if (ioctl(s, SIOCGIFINDEX, ifr) < 0) { perror("ioctl(SIOCGIFINDEX"); return -1; } return 0; } static int find_device_by_ioctl(void) { int s; struct ifreq *ifr0 = NULL; struct ifreq *ifr, *ifr_end; size_t ifrsize = sizeof(*ifr); struct ifconf ifc; static struct ifreq ifrbuf; int count = 0; s = socket(AF_INET, SOCK_DGRAM, 0); if (s < 0) { perror("socket"); return -1; } memset(&ifrbuf, 0, sizeof(ifrbuf)); if (device.name) { strncpy(ifrbuf.ifr_name, device.name, sizeof(ifrbuf.ifr_name) - 1); if (check_device_by_ioctl(s, &ifrbuf)) goto out; count++; } else { do { int rc; ifr0 = malloc(ifrsize); if (!ifr0) { perror("malloc"); goto out; } ifc.ifc_buf = (char *)ifr0; ifc.ifc_len = ifrsize; rc = ioctl(s, SIOCGIFCONF, &ifc); if (rc < 0) { perror("ioctl(SIOCFIFCONF"); free(ifr0); ifr0 = NULL; goto out; } if (ifc.ifc_len + sizeof(*ifr0) + sizeof(struct sockaddr_storage) - sizeof(struct sockaddr) <= ifrsize) break; ifrsize *= 2; free(ifr0); ifr0 = NULL; } while(ifrsize < INT_MAX / 2); if (!ifr0) { fprintf(stderr, "arping: too many interfaces!?\n"); goto out; } ifr_end = (struct ifreq *)(((char *)ifr0) + ifc.ifc_len - sizeof(*ifr0)); for (ifr = ifr0; ifr <= ifr_end; ifr++) { if (check_device_by_ioctl(s, &ifrbuf)) continue; memcpy(&ifrbuf.ifr_name, ifr->ifr_name, sizeof(ifrbuf.ifr_name)); if (count++) break; } free(ifr0); ifr0 = NULL; } close(s); if (count == 1) { device.ifindex = ifrbuf.ifr_ifindex; device.name = ifrbuf.ifr_name; } return !device.ifindex; out: close(s); return -1; } static int find_device(void) { int rc; rc = find_device_by_ifaddrs(); if (rc >= 0) goto out; rc = find_device_by_sysfs(); if (rc >= 0) goto out; rc = find_device_by_ioctl(); out: return rc; } /* * set_device_broadcast() * * This fills the device "broadcast address" * based on information found by find_device() funcion. */ static int set_device_broadcast_ifaddrs_one(struct device *device, unsigned char *ba, size_t balen, int fatal) { #ifndef WITHOUT_IFADDRS struct ifaddrs *ifa; struct sockaddr_ll *sll; if (!device) return -1; ifa = device->ifa; if (!ifa) return -1; sll = (struct sockaddr_ll *)ifa->ifa_broadaddr; if (sll->sll_halen != balen) { if (fatal) { if (!quiet) printf("Address length does not match...\n"); exit(2); } return -1; } memcpy(ba, sll->sll_addr, sll->sll_halen); return 0; #else return -1; #endif } int set_device_broadcast_sysfs(struct device *device, unsigned char *ba, size_t balen) { #ifdef USE_SYSFS struct sysfs_devattr_values *v; if (!device) return -1; v = device->sysfs; if (!v) return -1; if (v->value[SYSFS_DEVATTR_ADDR_LEN].ulong != balen) return -1; memcpy(ba, v->value[SYSFS_DEVATTR_BROADCAST].ptr, balen); return 0; #else return -1; #endif } static int set_device_broadcast_fallback(struct device *device, unsigned char *ba, size_t balen) { if (!quiet) fprintf(stderr, "WARNING: using default broadcast address.\n"); memset(ba, -1, balen); return 0; } static void set_device_broadcast(struct device *dev, unsigned char *ba, size_t balen) { if (!set_device_broadcast_ifaddrs_one(dev, ba, balen, 0)) return; if (!set_device_broadcast_sysfs(dev, ba, balen)) return; set_device_broadcast_fallback(dev, ba, balen); } void setsock() { struct sockaddr_in saddr; int probe_fd = socket(AF_INET, SOCK_DGRAM, 0); if (probe_fd < 0) { perror("socket"); exit(2); } if (device.name) { enable_capability_raw(); if (setsockopt(probe_fd, SOL_SOCKET, SO_BINDTODEVICE, device.name, strlen(device.name)+1) == -1) perror("WARNING: interface is ignored"); disable_capability_raw(); } memset(&saddr, 0, sizeof(saddr)); saddr.sin_family = AF_INET; if (src.s_addr) { saddr.sin_addr = src; if (bind(probe_fd, (struct sockaddr*)&saddr, sizeof(saddr)) == -1) { perror("bind"); exit(2); } } else if (!dad) { int on = 1; socklen_t alen = sizeof(saddr); saddr.sin_port = htons(1025); saddr.sin_addr = dst; if (setsockopt(probe_fd, SOL_SOCKET, SO_DONTROUTE, (char*)&on, sizeof(on)) == -1) perror("WARNING: setsockopt(SO_DONTROUTE)"); if (connect(probe_fd, (struct sockaddr*)&saddr, sizeof(saddr)) == -1) { perror("connect"); exit(2); } if (getsockname(probe_fd, (struct sockaddr*)&saddr, &alen) == -1) { perror("getsockname"); exit(2); } src = saddr.sin_addr; } close(probe_fd); } int main(int argc, char **argv) { int socket_errno; int ch; limit_capabilities(); #ifdef USE_IDN setlocale(LC_ALL, ""); #endif enable_capability_raw(); s = socket(PF_PACKET, SOCK_DGRAM, 0); socket_errno = errno; disable_capability_raw(); while ((ch = getopt(argc, argv, "h?bfDUAqc:w:s:I:V")) != EOF) { switch(ch) { case 'b': broadcast_only=1; break; case 'D': dad++; quit_on_reply=1; break; case 'U': unsolicited++; break; case 'A': advert++; unsolicited++; break; case 'q': quiet++; break; case 'c': count = atoi(optarg); break; case 'w': timeout = atoi(optarg); break; case 'I': device.name = optarg; break; case 'f': quit_on_reply=1; break; case 's': source = optarg; break; case 'V': printf("arping utility, iputils-%s\n", SNAPSHOT); exit(0); case 'h': case '?': default: usage(); } } argc -= optind; argv += optind; if (argc != 1) usage(); target = *argv; if (device.name && !*device.name) device.name = NULL; if (s < 0) { errno = socket_errno; perror("arping: socket"); exit(2); } if (find_device() < 0) exit(2); if (!device.ifindex) { if (device.name) { fprintf(stderr, "arping: Device %s not available.\n", device.name); exit(2); } fprintf(stderr, "arping: device (option -I) is required.\n"); usage(); } if (inet_aton(target, &dst) != 1) { struct hostent *hp; char *idn = target; #ifdef USE_IDN int rc; rc = idna_to_ascii_lz(target, &idn, 0); if (rc != IDNA_SUCCESS) { fprintf(stderr, "arping: IDN encoding failed: %s\n", idna_strerror(rc)); exit(2); } #endif hp = gethostbyname2(idn, AF_INET); if (!hp) { fprintf(stderr, "arping: unknown host %s\n", target); exit(2); } #ifdef USE_IDN free(idn); #endif memcpy(&dst, hp->h_addr, 4); } if (source && inet_aton(source, &src) != 1) { fprintf(stderr, "arping: invalid source %s\n", source); exit(2); } if (!dad && unsolicited && src.s_addr == 0) src = dst; if (!dad || src.s_addr) { setsock(); }; ((struct sockaddr_ll *)&me)->sll_family = AF_PACKET; ((struct sockaddr_ll *)&me)->sll_ifindex = device.ifindex; ((struct sockaddr_ll *)&me)->sll_protocol = htons(ETH_P_ARP); if (bind(s, (struct sockaddr*)&me, sizeof(me)) == -1) { perror("bind"); exit(2); } if (1) { socklen_t alen = sizeof(me); if (getsockname(s, (struct sockaddr*)&me, &alen) == -1) { perror("getsockname"); exit(2); } } if (((struct sockaddr_ll *)&me)->sll_halen == 0) { if (!quiet) printf("Interface \"%s\" is not ARPable (no ll address)\n", device.name); exit(dad?0:2); } he = me; set_device_broadcast(&device, ((struct sockaddr_ll *)&he)->sll_addr, ((struct sockaddr_ll *)&he)->sll_halen); if (!quiet) { printf("ARPING %s ", inet_ntoa(dst)); printf("from %s %s\n", inet_ntoa(src), device.name ? : ""); } if (!src.s_addr && !dad) { fprintf(stderr, "arping: no source address in not-DAD mode\n"); exit(2); } drop_capabilities(); set_signal(SIGINT, finish); set_signal(SIGALRM, catcher); catcher(); while(1) { sigset_t sset, osset; unsigned char packet[4096]; struct sockaddr_storage from; socklen_t alen = sizeof(from); int cc; if ((cc = recvfrom(s, packet, sizeof(packet), 0, (struct sockaddr *)&from, &alen)) < 0) { perror("arping: recvfrom"); continue; } sigemptyset(&sset); sigaddset(&sset, SIGALRM); sigaddset(&sset, SIGINT); sigprocmask(SIG_BLOCK, &sset, &osset); recv_pack(packet, cc, (struct sockaddr_ll *)&from); sigprocmask(SIG_SETMASK, &osset, NULL); } } UVP-Tools-2.2.0.316/uvp-monitor/iputils/ndsend.c000066400000000000000000000064711314037446600212220ustar00rootroot00000000000000/* * Copyright (C) 2000-2008, Parallels, Inc. All rights reserved. * * This code is public domain. * * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define EXC_OK 0 #define EXC_USAGE 1 #define EXC_SYS 2 #define EXC_RECV 3 #define EXC_NORECV 4 #define NAME "ndsend: " char* iface = NULL; struct in6_addr src_ipaddr; int ifindex; __u8 real_hwaddr[6]; struct nd_packet { __u8 icmp6_type; __u8 icmp6_code; __u16 icmp6_cksum; __u32 reserved:5, override:1, solicited:1, router:1, reserved2:24; struct in6_addr target; __u8 otype; __u8 ospace; __u8 obits[6]; }; static int init_device_addresses(int sock, const char* device) { struct ifreq ifr; memset(&ifr, 0, sizeof(ifr)); strncpy(ifr.ifr_name, device, IFNAMSIZ-1); if (ioctl(sock, SIOCGIFINDEX, &ifr) != 0) { fprintf(stderr, NAME "Unknown network interface %s: %m\n", device); return -1; } ifindex = ifr.ifr_ifindex; /* get interface HW address */ if (ioctl(sock, SIOCGIFHWADDR, &ifr) != 0) { fprintf(stderr, NAME "Can not get the MAC address of " "network interface %s: %m\n", device); return -1; } memcpy(real_hwaddr, ifr.ifr_hwaddr.sa_data, 6); return 0; } int sock; struct nd_packet pkt; static void create_nd_packet(struct nd_packet* pkt) { pkt->icmp6_type = ND_NEIGHBOR_ADVERT; pkt->icmp6_code = 0; pkt->icmp6_cksum = 0; pkt->reserved = 0; pkt->override = 1; pkt->solicited = 0; pkt->router = 0; pkt->reserved = 0; memcpy(&pkt->target, &src_ipaddr, 16); pkt->otype = ND_OPT_TARGET_LINKADDR; pkt->ospace = 1; memcpy(&pkt->obits, real_hwaddr, 6); } static void sender(void) { struct sockaddr_in6 to; to.sin6_family = AF_INET6; to.sin6_port = 0; ((__u32*)&to.sin6_addr)[0] = htonl(0xFF020000); ((__u32*)&to.sin6_addr)[1] = 0; ((__u32*)&to.sin6_addr)[2] = 0; ((__u32*)&to.sin6_addr)[3] = htonl(0x1); to.sin6_scope_id = ifindex; if (sendto(sock, &pkt, sizeof(pkt), 0, (struct sockaddr*) &to, sizeof(to)) < 0) { fprintf(stderr, NAME "Error in sendto(): %m\n"); exit(EXC_SYS); } } static void usage() { printf( "ndsend sends an unsolicited Neighbor Advertisement ICMPv6 multicast packet\n" "announcing a given IPv6 address to all IPv6 nodes as per RFC4861.\n\n" "Usage: ndsend
(example: ndsend 2001:DB8::1 eth0)\n\n" "Note: ndsend is called by arpsend -U -i, see arpsend(8) man page.\n" ); } int main(int argc,char** argv) { int value; if (argc != 3) { usage(); exit(EXC_USAGE); } if (inet_pton(AF_INET6, argv[1], &src_ipaddr) <= 0) return -1; iface = argv[2]; sock = socket(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6); if (sock < 0) { fprintf(stderr, NAME "Error in socket(): %m\n"); exit(EXC_SYS); } if (init_device_addresses(sock, iface) < 0) exit(EXC_SYS); value = 255; setsockopt (sock, SOL_IPV6, IPV6_MULTICAST_HOPS, &value, sizeof (value)); create_nd_packet(&pkt); sender(); exit(EXC_OK); } UVP-Tools-2.2.0.316/uvp-monitor/main.c000066400000000000000000000041301314037446600171700ustar00rootroot00000000000000/* * main.c * * Copyright 2016, Huawei Tech. Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include "xenstore_common.h" #include "public_common.h" /***************************************************************************** Function : main Description: main function Input :None Output : None Return : 0 *****************************************************************************/ int main(int argc, char **argv) { pid_t proc_id = fork(); if(proc_id == 0) { DEBUG_LOG("Start uvp monitor service."); start_service(); } else { if(proc_id < 0) DEBUG_LOG("Start uvp monitor service fail!"); proc_id > 0 ? exit(0) : exit(1); } return 0; } UVP-Tools-2.2.0.316/uvp-monitor/memory.c000066400000000000000000000214041314037446600175570ustar00rootroot00000000000000/* * Obtains the memory capacity and usage, and writes these to xenstore. * * Copyright 2016, Huawei Tech. Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "libxenctl.h" #include "securec.h" #include "uvpmon.h" #include #define PROC_MEMINFO "/proc/meminfo" #define MEM_DATA_PATH "control/uvp/memory" #define SWAP_MEM_DATA_PATH "control/uvp/mem_swap" #define TMP_BUFFER_SIZE 255 #define DECIMAL 10 #define SHELL_BUFFER 256 int is_suse() { FILE *pF = NULL; char strIssue[SHELL_BUFFER] = {0}; if(0 == access("/etc/SuSE-release", R_OK)) { return 1; } pF = fopen("/etc/issue", "r"); if (NULL == pF) { ERR_LOG("[Monitor-Upgrade]: open /etc/issue fail."); return 0; } while(fgets(strIssue, SHELL_BUFFER-1, pF) != NULL) { if (strstr(strIssue, "SUSE")) { fclose(pF); return 1; } } fclose(pF); return 0; } /***************************************************************************** Function : GetMMUseRatio Description: memory Input :meminfoļ· Input :char size Output :˳򱣴ڴڴڴʹڴbuffer ڴcacheԼswapswapʹswapλKB Return : memory *****************************************************************************/ int GetMMUseRatio(const char *mem_file, char *meminfo_buf, int size, char *swap_meminfo_buf) { FILE *file; char tmp_buffer[TMP_BUFFER_SIZE + 1]; char *start = NULL; char *start_swap = NULL; int MemAvailable_flag = 0; unsigned long ulMemTotal = 0L; unsigned long ulMemFree = 0L; unsigned long ulMemAvailable = 0L; unsigned long ulMemBuffers = 0L; unsigned long ulMemCached = 0L; unsigned long ulMemSwapCached = 0L; unsigned long ulMemSReclaimable = 0L; unsigned long ulMemNFSUnstable = 0L; unsigned long ulSwapTotal = 0L; unsigned long ulSwapFree = 0L; int iRetLen = 0; file = fopen(mem_file, "r"); if (NULL == file) { // LogPrint("Unable to open %s, errno: %d\n", PROC_MEMINFO, errno); (void)strncpy_s(meminfo_buf, size+1, ERR_STR, size); (void)strncpy_s(swap_meminfo_buf, size+1, ERR_STR, size); meminfo_buf[size] = '\0'; swap_meminfo_buf[size] = '\0'; return ERROR; //lint -save -e438 } while (NULL != fgets(tmp_buffer, TMP_BUFFER_SIZE, file)) { /*get total memory*/ start = strstr(tmp_buffer, "MemTotal:"); if ( NULL != start ) { start = start + strlen("MemTotal:"); /*lMemTotal = atol(start);*/ ulMemTotal = strtoul(start, NULL, DECIMAL); } /*get free memory*/ start = strstr(tmp_buffer, "MemFree:"); if ( NULL != start ) { start = start + strlen("MemFree:"); /*lMemFree = atol(start);*/ ulMemFree = strtoul(start, NULL, DECIMAL); } /*get MemAvailable memory*/ start = strstr(tmp_buffer, "MemAvailable:"); if ( NULL != start ) { start = start + strlen("MemAvailable:"); /*lMemFree = atol(start);*/ ulMemAvailable = strtoul(start, NULL, DECIMAL); MemAvailable_flag = 1; } /*get buffers memory*/ start = strstr(tmp_buffer, "Buffers:"); if ( NULL != start ) { start = start + strlen("Buffers:"); ulMemBuffers = strtoul(start, NULL, DECIMAL); } /*get cached memory*/ start = strstr(tmp_buffer, "Cached:"); if ( NULL != start ) { if(0 == strncmp(tmp_buffer, "Cached:", 7)) { start = start + strlen("Cached:"); ulMemCached = strtoul(start, NULL, DECIMAL); } } /*get SwapCached memory*/ start = strstr(tmp_buffer, "SwapCached:"); if ( NULL != start ) { start = start + strlen("SwapCached:"); ulMemSwapCached = strtoul(start, NULL, DECIMAL); } /*get swap total*/ start = strstr(tmp_buffer, "SwapTotal:"); if ( NULL != start ) { start = start + strlen("SwapTotal:"); ulSwapTotal = strtoul(start, NULL, DECIMAL); } /*get swap free*/ start = strstr(tmp_buffer, "SwapFree:"); if ( NULL != start ) { start = start + strlen("SwapFree:"); ulSwapFree = strtoul(start, NULL, DECIMAL); } /*get SReclaiable memory*/ start = strstr(tmp_buffer, "SReclaimable:"); if ( NULL != start ) { start = start + strlen("SReclaimable:"); ulMemSReclaimable = strtoul(start, NULL, DECIMAL); } /*get NFSUnstable memory*/ start = strstr(tmp_buffer, "NFS_Unstable:"); if ( NULL != start ) { start = start + strlen("NFS_Unstable:"); ulMemNFSUnstable = strtoul(start, NULL, DECIMAL); break; } } (void)fclose(file); if(MemAvailable_flag) { iRetLen = snprintf_s(meminfo_buf, size - 1, size - 1, "%lu:%lu:%lu:%lu:%lu", ulMemAvailable, ulMemTotal, ulMemTotal - ulMemAvailable, ulMemBuffers, ulMemCached); } else { if(is_suse()) { iRetLen = snprintf_s(meminfo_buf, size - 1, size - 1, "%lu:%lu:%lu:%lu:%lu", ulMemFree + ulMemBuffers + ulMemCached + ulMemSwapCached + ulMemSReclaimable + ulMemNFSUnstable, ulMemTotal, ulMemTotal - ulMemFree, ulMemBuffers, ulMemCached); } else { iRetLen = snprintf_s(meminfo_buf, size - 1, size - 1, "%lu:%lu:%lu:%lu:%lu", ulMemFree + ulMemBuffers + ulMemCached, ulMemTotal, ulMemTotal - ulMemFree, ulMemBuffers, ulMemCached); } } meminfo_buf[iRetLen] = '\0'; iRetLen = snprintf_s(swap_meminfo_buf, size - 1, size - 1, "%lu:%lu:%lu", ulSwapTotal, ulSwapTotal - ulSwapFree, ulSwapFree); swap_meminfo_buf[iRetLen] = '\0'; return SUCC; } /***************************************************************************** Function : memoryworkctlmon Description: get󣬰freeֽ + ":"+ totalֽд뵽xenstore Input :handle : handle of xenstore Output : None Return : ʧ:-1ɹ:0 *****************************************************************************/ int memoryworkctlmon(struct xs_handle *handle) { char tmp_buffer[TMP_BUFFER_SIZE + 1]; char tmp_swap_buffer[TMP_BUFFER_SIZE + 1]; if (NULL == handle) { return -1; } (void)GetMMUseRatio(PROC_MEMINFO, tmp_buffer, TMP_BUFFER_SIZE, tmp_swap_buffer); if(xb_write_first_flag == 0) { write_to_xenstore(handle, MEM_DATA_PATH, tmp_buffer); write_to_xenstore(handle, SWAP_MEM_DATA_PATH, tmp_swap_buffer); } else { write_weak_to_xenstore(handle, MEM_DATA_PATH, tmp_buffer); write_weak_to_xenstore(handle, SWAP_MEM_DATA_PATH, tmp_swap_buffer); } //write_to_xenstore(handle, MEM_DATA_PATH, tmp_buffer); return 0; } UVP-Tools-2.2.0.316/uvp-monitor/monitor/000077500000000000000000000000001314037446600175715ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-monitor/monitor/uvp-monitor000066400000000000000000000115321314037446600220150ustar00rootroot00000000000000#!/bin/sh # # UVP Monitor. # chkconfig: 35 35 65 # description: uvp monitor # ### BEGIN INIT INFO # Provides: uvp-monitor # Required-Start: # Should-Start: $network $remote_fs # Required-Stop: # Should-Stop: $network $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Description: uvp monitor ### END INIT INFO PATH=$PATH:/bin:/sbin:/usr/sbin:/usr/bin BINARY='/usr/bin/uvp-monitor' KERN_RELEASE="$(uname -r)" UVP_MODULES_PATH="/lib/modules/$KERN_RELEASE/updates/pvdriver" Info='eval 2>&1 logger "[uvp-monitor:$FUNCNAME:$LINENO]"' ############################################################################### SYSTEM=unknown PIDFILE=$(basename "$BINARY") if [ -f /etc/redhat-release -o -n "$(grep -i 'GreatTurbo' /etc/issue)" ] then SYSTEM=redhat PIDFILE="/var/lock/subsys/$PIDFILE" elif [ -f /etc/SuSE-release ] then SYSTEM=suse PIDFILE="/var/run/$PIDFILE" elif [ -f /etc/debian_version ] then SYSTEM=debian PIDFILE="/var/run/$PIDFILE" else SYSTEM=other if [ -d /var/run -a -w /var/run ] then PIDFILE="/var/run/$PIDFILE" fi fi ############################################################################### if [ "$SYSTEM" = "redhat" ] then . /etc/init.d/functions msg_failed() { echo_failure 2>&1 echo } msg_success() { echo_success 2>&1 echo } elif [ "$SYSTEM" = "suse" ] then . /etc/rc.status daemon() { startproc ${1+"$@"} } msg_failed() { rc_failed 1 2>&1 rc_status -v 2>&1 } msg_success() { rc_reset 2>&1 rc_status -v 2>&1 } elif [ "$SYSTEM" = "debian" ] then daemon() { start-stop-daemon --start --exec $1 -- $2 } killproc() { start-stop-daemon --stop --retry 2 --exec $@ } msg_failed() { echo " ...fail." } msg_success() { echo " ...done." } fi ############################################################################### check_running() { if which pidof 2>&1 | : then PIDOF='pidof' else PIDOF=':' fi if [ -f "${PIDFILE}" -o -n "$($PIDOF $BINARY)" ] then return 0 else return 1 fi } StartService() { $Info "Starting uvp-monitor service " if ! check_running then ### if [ ! -x "$BINARY" ] then $Info "Cannot run $BINARY" msg_failed return 1 fi daemon $BINARY > /dev/null RETVAL=$? if [ $RETVAL -eq 0 ] then echo `pidof $BINARY` > $PIDFILE msg_success return 0 else $Info "Start uvp monitor fail" msg_failed return 1 fi else $Info "- Already started" msg_success return 0 fi } StopService() { $Info "Stopping uvp-monitor service " if check_running then # wait for uvptools upgrading trap 'echo 1>&2 ; exit 0' INT TERM QUIT HUP trap '-' TERM for pid in $(pidof "$BINARY") do kill -15 $pid > /dev/null 2>&1 sleep 1 break done killproc "$BINARY" ; rm -f "${PIDFILE}" rm -f "$PIDFILE" if ! check_running then msg_success return 0 else $Info "Stop uvp monitor fail" msg_failed return 1 fi else $Info "- Already stoped" msg_success return 0 fi } RestartService() { ret=0 StopService ret=$? i=0 while [ "$i" -lt 5 ] do i=$((i+1)) if check_running then sleep 5 $Info "Stop uvp-monitor fail, try again" StopService ret=$? fi done StartService ret=$? i=0 while [ "$i" -lt 5 ] do i=$((i+1)) if ! check_running then sleep 5 $Info "Start uvp-monitor fail, try again" StartService ret=$? fi done return $ret } ServiceStatus() { echo -n "Checking for uvp-monitor" if check_running then echo " running..." else echo " not running... " fi return 0 } ############################################################################### case "$1" in start) StartService retv=$? i=0 while [ "$i" -lt 5 ] do i=$((i+1)) if ! check_running then sleep 5 $Info "Start uvp-monitor fail, try again" StartService retv=$? fi done ;; stop) StopService retv=$? ;; restart) RestartService retv=$? ;; status) ServiceStatus retv=$? ;; *) echo "Usage: $0 {start|stop|restart|status}" exit 1 esac exit $retv UVP-Tools-2.2.0.316/uvp-monitor/monitor/uvp-monitor-gentoo000066400000000000000000000053501314037446600233070ustar00rootroot00000000000000#!/sbin/runscript # # UVP Monitor. depend() { after * } UVP_CONFDIR=${UVP_CONFDIR:-/etc/.uvp-monitor} UVP_PIDFILE=${UVP_PIDFILE:-/var/run/${SVCNAME}.pid} UVP_BINARY=${UVP_BINARY:-/usr/bin/uvp-monitor} Info='eval 2>&1 logger "[uvp-monitor]"' msg_failed() { echo " ...fail." } msg_success() { echo " ...done." } check_running() { if [ -f "${UVP_PIDFILE}" -o -n "$(pidof $UVP_BINARY)" ] then return 0 else return 1 fi } start() { echo -n "Starting uvp-monitor service " $Info "Starting uvp-monitor service " if ! check_running then modprobe xen_hcall >/dev/null 2>&1 modprobe xen_procfs >/dev/null 2>&1 if [ ! -x "$UVP_BINARY" ] then $Info "Cannot run $BINARY" msg_failed return 1 fi start-stop-daemon --start --exec $UVP_BINARY --pidfile "${UVP_PIDFILE}" > /dev/null RETVAL=$? if [ $RETVAL -eq 0 ] then echo `pidof $UVP_BINARY` > $UVP_PIDFILE msg_success return 0 else $Info "Start uvp monitor fail" msg_failed return 1 fi else $Info "- Already started" msg_success return 0 fi } stop() { echo -n "Stopping uvp-monitor service " $Info "Stopping uvp-monitor service " if check_running then # wait for uvptools upgrading trap 'echo 1>&2 ; exit 0' INT TERM QUIT HUP timeout=300 while [ -d "/tmp/uvptools_temp" -a $timeout -gt 0 ] do echo -n '.' 1>&2 ; sleep 1 timeout=$((timeout-1)) done rm -fr /tmp/uvptools_temp trap '-' TERM for pid in "$(pidof "$UVP_BINARY")" do kill -9 $pid > /dev/null 2>&1 sleep 1 break done rm -f "$UVP_PIDFILE" if ! check_running then msg_success return 0 else $Info "Stop uvp monitor fail" msg_failed return 1 fi else $Info "- Already stoped" msg_success return 0 fi } restart() { stop i=0 while [ "$i" -lt 5 ] do i=$((i+1)) if check_running then sleep 5 $Info "Stop uvp-monitor fail, try again" stop fi done start i=0 while [ "$i" -lt 5 ] do i=$((i+1)) if ! check_running then sleep 5 $Info "Start uvp-monitor fail, try again" start ret=$? fi done return $ret } status() { echo -n "Checking for uvp-monitor" if check_running then echo " running..." else echo " not running... " fi return 0 } UVP-Tools-2.2.0.316/uvp-monitor/monitor/xenvnet-arp000066400000000000000000000010261314037446600217620ustar00rootroot00000000000000#!/bin/bash ETHDEV=$(ls /sys/class/net/) for j in $(seq 1 3) do for i in $ETHDEV do for IP in $(/sbin/ip addr show $i | awk '/inet /{split($2,x,"/");print x[1]}') do if [ ! -z $IP ] then /etc/.uvp-monitor/arping -U -c 1 -I $i $IP >/dev/null /etc/.uvp-monitor/arping -A -c 1 -I $i $IP >/dev/null fi done for IP in $(/sbin/ip addr show $i | awk '/inet6 /{split($2,x,"/");print x[1]}') do if [ ! -z $IP ] then /etc/.uvp-monitor/ndsend $IP $i >/dev/null fi done done sleep 0.5 done UVP-Tools-2.2.0.316/uvp-monitor/netinfo.c000066400000000000000000001051521314037446600177140ustar00rootroot00000000000000/* * Obtains the IP addr the number of packets received or send by a NIC, and * writes these to xenstore. * * Copyright 2016, Huawei Tech. Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "libxenctl.h" #include "public_common.h" #include "securec.h" #include #include #include #define UPFLAG 1 #define DOWNFLAG 0 #define MAX_NICINFO_LENGTH 256 #define MAX_COMMAND_LENGTH 128 #define VIF_NAME_LENGTH 16 #define MAC_NAME_LENGTH 18 #define PACKAGE_LENGTH 31 //define for ipv4/6 info #define XENSTORE_COUNT 6 #define XENSTORE_LEN 1024 #define NETINFO_PATH_LEN 30 #define BONDBRIDGE_PATH_LEN 64 #define IPADDR_LEN 4 /*can get 128 ip, but Result: you should filter remain 36 ip*/ #define IPV6_NIC_MAX 128 #define IPV6_ADDR_LEN 40 #define IPV6_FLUX_LEN 64 #define IPV6_IPNUM 35 #define IPV6_ADDR_LINKLOCAL 0x0020U #define IPV6_ADDR_SITELOCAL 0x0040U #define IPV6_ADDR_COMPATv4 0x0080U #ifndef IN6_IS_ADDR_UNSPECIFIED #define IN6_IS_ADDR_UNSPECIFIED(a) \ (((__u32 *) (a))[0] == 0 && ((__u32 *) (a))[1] == 0 && \ ((__u32 *) (a))[2] == 0 && ((__u32 *) (a))[3] == 0) #endif char ArrRetNet[XENSTORE_COUNT][XENSTORE_LEN] = {0}; //end define //added by huanglingling for ipv4/6 info typedef struct { char ifname[VIF_NAME_LENGTH]; char ipaddr[IPV6_ADDR_LEN]; char mac[MAC_NAME_LENGTH]; char tp[IPV6_FLUX_LEN]; char packs[IPV6_FLUX_LEN]; char gateway[IPV6_ADDR_LEN]; long sentdrop; long recievedrop; /*interface status*/ int netstatusflag; /*if has ip info, then mark ipv4: 4, ipv6: 6*/ int ipversionflag; } IPV6_VIF_DATA; typedef struct { int count; IPV6_VIF_DATA info[IPV6_NIC_MAX]; } IPV6_VIF_INFO; /*all ipv4/6 info value*/ IPV6_VIF_INFO gtNicIpv6Info; /*filter ipv4/6 info Result value*/ IPV6_VIF_INFO gtNicIpv6InfoResult; //added end extern FILE *opendevfile(const char *path); extern int openNetSocket(void); extern void NetworkDestroy(int sktd); extern char *GetVifName(char **namep, char *p); extern int GetVifFlag(int skt, const char *ifname); extern int getVifData(char *pline, int nNumber, char *out); extern FILE *openPipe(const char *pszCommand, const char *pszType); extern int getFluxinfoLine(char *ifname, char *pline); /***************************************************************************** Function : GetIpv6NetFlag Description: get interface flag Input : None Output : None Return : *****************************************************************************/ void GetIpv6NetFlag(int skt,char *vifname) { char *vlan_path = "/proc/net/vlan/config"; char bond_path[BONDBRIDGE_PATH_LEN] = {0}; char bridge_path[BONDBRIDGE_PATH_LEN] = {0}; char namebuf[VIF_NAME_LENGTH] = {0}; char *vifnamebuf = NULL; int vlan_flag = 0; int vlanfile_flag = 1; int bond_flag = 0; int bridge_flag = 0; int vifnamelen = 0; FILE *file = NULL; char *begin = NULL; char pline[128] = {0}; int i = 0; if (NULL == vifname) { return ; } vifnamebuf = vifname; (void)memset_s(namebuf,VIF_NAME_LENGTH,0,VIF_NAME_LENGTH); /*if has bond0:0 br0:0 muti ip Etc,you should get bond0 br0 name*/ if(NULL != strstr(vifname, ":")) { while(*vifname != ':') { namebuf[i++] = *vifname; vifname++; } namebuf[i] = '\0'; vifnamebuf = namebuf; } /*vlan*/ if(!access(vlan_path,0)) { if(NULL == (file = opendevfile(vlan_path))) { DEBUG_LOG("Failed to open /proc/net/vlan/config."); vlanfile_flag = 0; } vifnamelen = strlen(vifnamebuf); if(1 == vlanfile_flag) { /*if has vlan, then query /proc/net/vlan/config to get interface name*/ /*lint -e668 */ while (fgets(pline, 128, file)) { begin = pline; /*skip blank*/ while (' ' == *begin) begin++; /*if find matched name,then vlan_flag = 1 and break*/ if (0 == strncmp(begin, vifnamebuf, vifnamelen)) { if (' ' == *(begin + vifnamelen) ) { vlan_flag = 1; break; } } /*init pline 0*/ memset_s(pline, 128, 0, 128); } /*lint +e668 */ fclose(file); } } /*assemble bond path*/ snprintf_s(bond_path,BONDBRIDGE_PATH_LEN,BONDBRIDGE_PATH_LEN,"/proc/net/bonding/%s",vifnamebuf); if(!access(bond_path,0)) { bond_flag = 1; } /*assemble bridge path*/ snprintf_s(bridge_path,BONDBRIDGE_PATH_LEN,BONDBRIDGE_PATH_LEN,"/sys/class/net/%s/bridge",vifnamebuf); if(!access(bridge_path,0)) { bridge_flag = 1; } /*if has vlan and status is down, then vlan_flag=2*/ if(vlan_flag == 1 && DOWNFLAG == GetVifFlag(skt, vifnamebuf)) { vlan_flag = 2; } if(UPFLAG == GetVifFlag(skt, vifnamebuf)) { /*status: bond upflag:2, vlan upflag:3, normal NIC upflag:1,bridge upflag:4*/ if(1 == bond_flag) { gtNicIpv6Info.info[gtNicIpv6Info.count].netstatusflag = 2; } else if(1 == vlan_flag) { gtNicIpv6Info.info[gtNicIpv6Info.count].netstatusflag = 3; } else if(1 == bridge_flag) { gtNicIpv6Info.info[gtNicIpv6Info.count].netstatusflag = 4; } else { gtNicIpv6Info.info[gtNicIpv6Info.count].netstatusflag = 1; } } else { /*status: bond/vlan/bridge downflag:9, normal NIC downflag:0*/ if(1 == bond_flag || vlan_flag == 2 || 1 == bridge_flag) { gtNicIpv6Info.info[gtNicIpv6Info.count].netstatusflag = 9; } else { gtNicIpv6Info.info[gtNicIpv6Info.count].netstatusflag = 0; } } } /***************************************************************************** Function : GetIpv6VifMac Description: get Ipv4/6 mac address Input : None Output : None Return : *****************************************************************************/ void GetIpv6VifMac(int skt, const char *ifname) { struct ifreq ifrequest; if (NULL == ifname) { return ; } strncpy_s(ifrequest.ifr_name, IFNAMSIZ, ifname, IFNAMSIZ-1); ifrequest.ifr_name[IFNAMSIZ - 1] = '\0'; if ( ! (ioctl(skt, SIOCGIFHWADDR, (char *) &ifrequest) ) ) { (void)snprintf_s(gtNicIpv6Info.info[gtNicIpv6Info.count].mac, sizeof(gtNicIpv6Info.info[gtNicIpv6Info.count].mac), sizeof(gtNicIpv6Info.info[gtNicIpv6Info.count].mac), "%02x:%02x:%02x:%02x:%02x:%02x", (unsigned char)ifrequest.ifr_hwaddr.sa_data[0], (unsigned char)ifrequest.ifr_hwaddr.sa_data[1], (unsigned char)ifrequest.ifr_hwaddr.sa_data[2], (unsigned char)ifrequest.ifr_hwaddr.sa_data[3], (unsigned char)ifrequest.ifr_hwaddr.sa_data[4], (unsigned char)ifrequest.ifr_hwaddr.sa_data[5]); } return ; } /***************************************************************************** Function : GetIpv6Flux Description: get Ipv4/6 Flux Input : None Output : None Return : *****************************************************************************/ int GetIpv6Flux(int skt,char *ifname) { char Sent[PACKAGE_LENGTH] = {0}; char Recived[PACKAGE_LENGTH] = {0}; char SentPkt[PACKAGE_LENGTH] = {0}; char RecivedPkt[PACKAGE_LENGTH] = {0}; char SentPktDrop[PACKAGE_LENGTH] = {0}; char RecivedPktDrop[PACKAGE_LENGTH] = {0}; char *ptmp = NULL; /*get flux info line*/ char fluxline[255] = {0}; /* get vifname info line*/ char *vifline = NULL; size_t linelen = 0; char *foundStr = NULL; FILE *file; char *path="/proc/net/dev"; char *namebuf = NULL; char *p = NULL; int ifnamelen = 0; if(NULL == (file = opendevfile(path))) { DEBUG_LOG("Failed to open /proc/net/dev."); return ERROR; } if (NULL == ifname)//for pclint warning { DEBUG_LOG("ifname is NULL."); fclose(file); return ERROR; } /*multi ip: such as eth0:0, you should match eth0 flux*/ if (getline(&vifline, &linelen, file) == -1 /* eat line */ || getline(&vifline, &linelen, file) == -1) {} while (getline(&vifline, &linelen, file) != -1) { (void)GetVifName(&namebuf, vifline); ifnamelen = strlen(namebuf); /*consider this scene: such as ifname = "eth10:0" namebuf="eth1" and ifname = "eth1:0" namebuf="eth1"*/ if(NULL != strstr(ifname,namebuf) && NULL != strstr(ifname,":")) { p = ifname + ifnamelen; if(':' == *p) { ifname = namebuf; break; } } } /*get flux line*/ if (ERROR == getFluxinfoLine(ifname, fluxline)) { strncpy_s(gtNicIpv6Info.info[gtNicIpv6Info.count].tp, IPV6_FLUX_LEN, ERR_STR, strlen(ERR_STR)); gtNicIpv6Info.info[gtNicIpv6Info.count].tp[strlen(ERR_STR)] = '\0'; DEBUG_LOG("getFluxinfoLine failed, ifname=%s.", ifname); fclose(file); return ERROR; } if(0 == strlen(fluxline)) { DEBUG_LOG("Line is NULL."); fclose(file); return ERROR; } foundStr = strstr(fluxline, ifname); if (NULL == foundStr) { DEBUG_LOG("foundStr is NULL, fluxline=%s, ifname=%s.", fluxline, ifname); fclose(file); return ERROR; } /*get first data*/ ptmp = foundStr + strlen(ifname) + 1; if (NULL == ptmp) { DEBUG_LOG("ptmp is NULL."); fclose(file); return ERROR; } /*get each column data info*/ if(ERROR == getVifData(ptmp, 1, Recived) || ERROR == getVifData(ptmp, 2, RecivedPkt) || ERROR == getVifData(ptmp, 4, RecivedPktDrop) || ERROR == getVifData(ptmp, 9, Sent) || ERROR == getVifData(ptmp, 10, SentPkt) || ERROR == getVifData(ptmp, 12, SentPktDrop)) { DEBUG_LOG("getVifData is ERROR."); fclose(file); return ERROR; } (void)snprintf_s(gtNicIpv6Info.info[gtNicIpv6Info.count].tp, sizeof(gtNicIpv6Info.info[gtNicIpv6Info.count].tp), sizeof(gtNicIpv6Info.info[gtNicIpv6Info.count].tp), "%s:%s", Sent, Recived); gtNicIpv6Info.info[gtNicIpv6Info.count].tp[strlen(Sent) + strlen(Recived) + 1] = '\0'; (void)snprintf_s(gtNicIpv6Info.info[gtNicIpv6Info.count].packs, sizeof(gtNicIpv6Info.info[gtNicIpv6Info.count].packs), sizeof(gtNicIpv6Info.info[gtNicIpv6Info.count].packs), "%s:%s", SentPkt, RecivedPkt); gtNicIpv6Info.info[gtNicIpv6Info.count].packs[strlen(SentPkt) + strlen(RecivedPkt) + 1] = '\0'; /*networkloss*/ (void)sscanf_s(SentPktDrop,"%ld", >NicIpv6Info.info[gtNicIpv6Info.count].sentdrop); (void)sscanf_s(RecivedPktDrop,"%ld",>NicIpv6Info.info[gtNicIpv6Info.count].recievedrop); fclose(file); free(vifline); return SUCC; } /***************************************************************************** Function : CheckName Description: get Ipv4/6 Gateway Input : const char *name Output : None Return : *****************************************************************************/ int CheckName(const char *name) { if(NULL == name) { return ERROR; } if(strstr(name, "<") || strstr(name, ">") || strstr(name, "\\") || strstr(name, "/") || strstr(name, "&") || strstr(name, "|") || strstr(name, ";") || strstr(name, "$") || strstr(name, "?") || strstr(name, ",") || strstr(name, "*") || strstr(name, "{") || strstr(name, "}") || strstr(name, "`") || strstr(name, "'") || strstr(name, "\"")) { return ERROR; } return SUCC; } /***************************************************************************** Function : GetiIpv6VifGateway Description: get Ipv4/6 Gateway Input : None Output : None Return : *****************************************************************************/ int GetIpv4VifGateway(int skt, const char *ifname) { char pathBuf[MAX_NICINFO_LENGTH] = {0}; char pszGateway[VIF_NAME_LENGTH] = {0}; char pszGatewayBuf[VIF_NAME_LENGTH] = {0}; FILE *iRet; if(NULL == ifname) { return ERROR; } if(SUCC != CheckName(ifname)) { return ERROR; } (void)memset_s(pathBuf, MAX_NICINFO_LENGTH, 0, MAX_NICINFO_LENGTH); /*exec shell command to get ipv4 route gataway info*/ (void)snprintf_s(pathBuf, MAX_NICINFO_LENGTH, MAX_NICINFO_LENGTH, "route -n | grep -i \"%s$\" | grep UG | awk '{print $2}'", ifname); iRet = openPipe(pathBuf, "r"); if (NULL == iRet) { DEBUG_LOG("Failed to exec route shell command, pathBuf=%s.", pathBuf); gtNicIpv6Info.info[gtNicIpv6Info.count].gateway[0] = '\0'; return ERROR; } /*save default gw*/ if(NULL != fgets(pszGatewayBuf,sizeof(pszGatewayBuf),iRet)) { (void)sscanf_s(pszGatewayBuf,"%s",pszGateway,sizeof(pszGateway)); } trim(pszGateway); /*if strlen(pszGateway) < 1, then 0*/ if(strlen(pszGateway) < 1) { pszGateway[0]='0'; pszGateway[1]='\0'; } (void)pclose(iRet); (void)strncpy_s(gtNicIpv6Info.info[gtNicIpv6Info.count].gateway, IPV6_ADDR_LEN, pszGateway, strlen(pszGateway)); return SUCC; } int GetIpv6VifGateway(int skt, const char *ifname) { char pathBuf[MAX_NICINFO_LENGTH] = {0}; char pszGateway[VIF_NAME_LENGTH] = {0}; char pszGatewayBuf[VIF_NAME_LENGTH] = {0}; FILE *iRet; if(NULL == ifname) { return ERROR; } if(SUCC != CheckName(ifname)) { return ERROR; } (void)memset_s(pathBuf, MAX_NICINFO_LENGTH, 0, MAX_NICINFO_LENGTH); /*exec shell command to get ipv6 route gataway info*/ (void)snprintf_s(pathBuf, MAX_NICINFO_LENGTH, MAX_NICINFO_LENGTH, "route -A inet6 -n | grep -w \"%s\" | grep UG | awk '{print $2}'", ifname); iRet = openPipe(pathBuf, "r"); if (NULL == iRet) { INFO_LOG("Failed to execute route shell command, pathBuf=%s.", pathBuf); gtNicIpv6Info.info[gtNicIpv6Info.count].gateway[0] = '\0'; return ERROR; } /*save default gw*/ if(NULL != fgets(pszGatewayBuf,sizeof(pszGatewayBuf),iRet)) { (void)sscanf_s(pszGatewayBuf,"%s",pszGateway,sizeof(pszGateway)); } trim(pszGateway); /*if strlen(pszGateway) < 1, then 0*/ if(strlen(pszGateway) < 1) { pszGateway[0]='0'; pszGateway[1]='\0'; } (void)pclose(iRet); (void)strncpy_s(gtNicIpv6Info.info[gtNicIpv6Info.count].gateway, IPV6_ADDR_LEN, pszGateway, strlen(pszGateway)); return SUCC; } /***************************************************************************** Function : GetIpv4VifIp Description: get Ipv4 ip info Input : None Output : None Return : *****************************************************************************/ int GetIpv4VifIp(int skt, int num, char *ifname) { struct ifaddrs *ifaddr, *ifa; int ret; char address[IPV6_ADDR_LEN]; if (getifaddrs(&ifaddr) == -1) { ERR_LOG("Call getifaddrs failed, errno=%d", errno); return ERROR; } for (ifa = ifaddr; ifa; ifa = ifa->ifa_next) { if (NULL == ifa->ifa_addr) continue; if( AF_INET != ifa->ifa_addr->sa_family) continue; ret = getnameinfo(ifa->ifa_addr,sizeof(struct sockaddr_in),address, IPV6_ADDR_LEN, NULL, 0, NI_NUMERICHOST); if (0 != ret) { ERR_LOG("Call getnameinfo for nic %s failed: %s.", ifname, gai_strerror(ret)); continue; } if((NULL != ifa->ifa_name) && (strcmp(ifa->ifa_name,ifname) == 0) && (AF_INET == ifa->ifa_addr->sa_family)) { snprintf_s(gtNicIpv6Info.info[gtNicIpv6Info.count].ifname, VIF_NAME_LENGTH, VIF_NAME_LENGTH, "%s", ifname); GetIpv6VifMac(skt,ifname); gtNicIpv6Info.info[gtNicIpv6Info.count].ipversionflag = 4; snprintf_s(gtNicIpv6Info.info[gtNicIpv6Info.count].ipaddr, IPV6_ADDR_LEN, IPV6_ADDR_LEN, "%s", address); GetIpv4VifGateway(skt, ifname); GetIpv6Flux(skt,ifname); GetIpv6NetFlag(skt,ifname); num++; gtNicIpv6Info.count = num; } } freeifaddrs(ifaddr); return gtNicIpv6Info.count; } /***************************************************************************** Function : Inet6Rresolve Description: get abbreviation Ipv6 info Input : None Output : None Return : int *****************************************************************************/ int Inet6Rresolve(char *name, struct sockaddr_in6 *sin6, int numeric) { int s; /* Grmpf. -FvK */ if (sin6->sin6_family != AF_INET6) { errno = EAFNOSUPPORT; return -1; } if (numeric & 0x7FFF) { inet_ntop(AF_INET6, &sin6->sin6_addr, name, 80); return 0; } if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { if (numeric & 0x8000) { strncpy_s(name, strlen("default")+1, "default", strlen("default")); } else { strncpy_s(name, strlen("*")+1, "*", strlen("*")); } return 0; } s = getnameinfo((struct sockaddr *) sin6, sizeof(struct sockaddr_in6),name, 255 , NULL, 0, 0); if (s) { DEBUG_LOG("getnameinfo failed, name=%s, s=%s", name, s); return -1; } return 0; } /***************************************************************************** Function : Inet6Sprint Description: get abbreviation Ipv6 info Input : None Output : None Return : buf *****************************************************************************/ char *Inet6Sprint(struct sockaddr *sap, int numeric) { static char buff[128] = {0}; (void)Inet6Rresolve(buff, (struct sockaddr_in6 *) sap, numeric); return buff; } /***************************************************************************** Function : Inet6Resolve Description: resolve ipv6addr Input : None Output : None Return : int *****************************************************************************/ int Inet6Resolve(char *name, struct sockaddr_in6 *sin6) { struct addrinfo req; struct addrinfo *ai; int s; memset_s (&req, sizeof(struct addrinfo), '\0', sizeof(struct addrinfo)); req.ai_family = AF_INET6; if ((s = getaddrinfo(name, NULL, &req, &ai))) { INFO_LOG("getaddrinfo: %s: %d", name, s); return -1; } memcpy_s(sin6, sizeof(struct sockaddr_in6), ai->ai_addr, sizeof(struct sockaddr_in6)); freeaddrinfo(ai); return 0; } /***************************************************************************** Function : Inet6Input Description: return INET6_resolve result Input : None Output : None Return : int *****************************************************************************/ int Inet6Input(int type, char *bufp, struct sockaddr *sap) { return (Inet6Resolve(bufp, (struct sockaddr_in6 *) sap)); } /***************************************************************************** Function : GetIpv6VifIp Description: get Ipv6 ip info Input : None Output : None Return : *****************************************************************************/ int GetIpv6VifIp(int skt,int num, char *vifname) { char *path="/proc/net/if_inet6"; FILE *file; char addr6p[8][5] = {0}; char devname[20] = {0}; char *line = NULL; char addr6[40] = {0}; size_t linelen = 0; int plen, scope, dad_status, if_idx; struct sockaddr_in6 sap; char *abbreviationIpv6 = NULL; if(NULL == vifname) { return ERROR; } /*if no support ipv6, return error*/ if(0 != access(path,F_OK)) { return ERROR; } if(NULL == (file = opendevfile(path))) { DEBUG_LOG("Failed to open /proc/net/if_inet6."); return ERROR; } /*query /proc/net/if_inet6 to get ipv6 info*/ while(getline(&line, &linelen, file) != -1) { (void)memset_s(devname,20,0,20); sscanf_s(line, "%4s%4s%4s%4s%4s%4s%4s%4s %02x %02x %02x %02x %20s\n", addr6p[0], sizeof(addr6p[0]), addr6p[1], sizeof(addr6p[1]), addr6p[2], sizeof(addr6p[2]), addr6p[3], sizeof(addr6p[3]), addr6p[4], sizeof(addr6p[4]), addr6p[5], sizeof(addr6p[5]), addr6p[6], sizeof(addr6p[6]), addr6p[7], sizeof(addr6p[7]), &if_idx, &plen, &scope, &dad_status, devname, sizeof(devname)); /*do not modify strcmp -> strncmp*/ if(0 == strcmp(devname,vifname) && (scope == 0 || scope == IPV6_ADDR_LINKLOCAL || scope == IPV6_ADDR_SITELOCAL || scope == IPV6_ADDR_COMPATv4)) { snprintf_s(gtNicIpv6Info.info[gtNicIpv6Info.count].ifname, VIF_NAME_LENGTH, VIF_NAME_LENGTH, "%s", vifname); snprintf_s(addr6, IPV6_ADDR_LEN, IPV6_ADDR_LEN, "%s:%s:%s:%s:%s:%s:%s:%s", addr6p[0], addr6p[1], addr6p[2], addr6p[3], addr6p[4], addr6p[5], addr6p[6], addr6p[7]); /*abbreviation Ipv6 info*/ Inet6Input(1, addr6, (struct sockaddr *) &sap); abbreviationIpv6 = Inet6Sprint((struct sockaddr *) &sap, 1); snprintf_s(gtNicIpv6Info.info[gtNicIpv6Info.count].ipaddr, IPV6_ADDR_LEN, IPV6_ADDR_LEN, "%s", abbreviationIpv6); GetIpv6VifMac(skt,vifname); /*gw info has switch function*/ if(g_exinfo_flag_value & EXINFO_FLAG_GATEWAY) { GetIpv6VifGateway(skt,vifname); } else { (void)strncpy_s(gtNicIpv6Info.info[gtNicIpv6Info.count].gateway, IPV6_ADDR_LEN, "0", strlen("0")); } gtNicIpv6Info.info[gtNicIpv6Info.count].ipversionflag = 6; GetIpv6Flux(skt,vifname); GetIpv6NetFlag(skt,vifname); num++; gtNicIpv6Info.count = num; } } fclose(file); free(line); return gtNicIpv6Info.count; } /***************************************************************************** Function : GetIpv6Info Description: get Ipv4/6 info Input : None Output : None Return : *****************************************************************************/ int GetIpv6Info() { struct ifconf ifconfigure; struct ifreq *ifreqIdx; struct ifreq *ifreq; char *path="/proc/net/dev"; FILE *file; char *line = NULL; size_t linelen = 0; char namebuf[16] = {0}; char buf[4096] = {0}; int uNICCount = 0; int num = 0; int i = 0; int novifnameFlag = 0; int lastcount = 0; int getipv6flag = 0; int vifnameLen = 0; int skt; ifconfigure.ifc_len = 4096; ifconfigure.ifc_buf = buf; skt = openNetSocket(); if (ERROR == skt) { DEBUG_LOG("Failed to openNetSocket."); return ERROR; } memset_s(>NicIpv6Info, sizeof(gtNicIpv6Info), 0, sizeof(gtNicIpv6Info)); /*ioctl interface: interface has ipv4 and its status is up*/ if(!ioctl(skt, SIOCGIFCONF, (char *) &ifconfigure)) { ifreq = (struct ifreq *)buf; uNICCount = ifconfigure.ifc_len / (int)sizeof(struct ifreq); for(i=0; iifr_name); for(j = 0; num > 0 && j < num; j++) { /*do not modify strcmp -> strncmp*/ if(0 == strcmp(namebuf, gtNicIpv6Info.info[j].ifname)) { nicRepeatFlag = 1; break; } } vifnameLen = strlen(namebuf); if(nicRepeatFlag || 0 == strncmp(namebuf,"lo",vifnameLen) || 0 == strncmp(namebuf,"sit0",vifnameLen)) { continue; } /*get ipv4 info */ if(UPFLAG == GetVifFlag(skt, namebuf)) { getipv6flag = GetIpv4VifIp(skt,num,namebuf); if(ERROR != getipv6flag) { num = getipv6flag; } if(num >= IPV6_NIC_MAX) break; } /*get ipv6 info, /proc/net/if_inet6: the interface status is up */ getipv6flag = GetIpv6VifIp(skt,num,namebuf); if(ERROR != getipv6flag) { num = getipv6flag; } if(num >= IPV6_NIC_MAX) break; } } /*patch interface by query /proc/net/dev*/ if(NULL == (file = opendevfile(path))) { NetworkDestroy(skt); DEBUG_LOG("Failed to open /proc/net/dev."); return ERROR; } if(getline(&line, &linelen, file) == -1 /* eat line */ || getline(&line, &linelen, file) == -1) {} while (getline(&line, &linelen, file) != -1) { novifnameFlag = 0; char *vifname; (void)GetVifName(&vifname, line); vifnameLen = strlen(vifname); if(0 == strncmp(vifname,"lo",vifnameLen) || 0== strncmp(vifname,"sit0",vifnameLen)) { continue; } for(i=0;i strncmp*/ if(0 == strcmp(vifname,gtNicIpv6Info.info[i].ifname)) { /*has exist vifname*/ novifnameFlag = 1; } } if(num >= IPV6_NIC_MAX) break; /*new vifname*/ if(novifnameFlag == 0) { /*if interface status is down, then xenstore info only has mac and its status*/ if(DOWNFLAG == GetVifFlag(skt,vifname)) { num++; GetIpv6VifMac(skt, vifname); GetIpv6NetFlag(skt,vifname); gtNicIpv6Info.count=num; } else { /*if interface status is up and no ip, then xenstore info has mac,staus,ip(none),gw,flux*/ /*if interface status is up and has ipv6 info,then xenstore info has mac,staus,ip6,gw,flux*/ lastcount = gtNicIpv6Info.count; getipv6flag = GetIpv6VifIp(skt,num,vifname); if(ERROR != getipv6flag) { num = getipv6flag; } if(num == lastcount) { num++; GetIpv6VifMac(skt, vifname); gtNicIpv6Info.info[gtNicIpv6Info.count].gateway[0] = '\0'; (void)strncpy_s(gtNicIpv6Info.info[gtNicIpv6Info.count].ipaddr, IPV6_ADDR_LEN, "none", strlen("none")); gtNicIpv6Info.info[gtNicIpv6Info.count].ipaddr[sizeof(gtNicIpv6Info.info[gtNicIpv6Info.count].ipaddr)-1]='\0'; GetIpv6Flux(skt,vifname); if(g_exinfo_flag_value & EXINFO_FLAG_GATEWAY) { GetIpv4VifGateway(skt,vifname); } else { (void)strncpy_s(gtNicIpv6Info.info[gtNicIpv6Info.count].gateway, IPV6_ADDR_LEN, "0", strlen("0")); } GetIpv6NetFlag(skt,vifname); gtNicIpv6Info.count =num; } else { gtNicIpv6Info.count =num; } } } } fclose(file); free(line); NetworkDestroy(skt); return gtNicIpv6Info.count; } /***************************************************************************** Function : Ipv6PrintInfo Description: print Ipv4/6 info Input : None Output : None Return : *****************************************************************************/ void Ipv6PrintInfo(void * handle) { unsigned int i = 0; char vif_path[NETINFO_PATH_LEN] = {0}; /*xenstore print info*/ for (i = 0; i < XENSTORE_COUNT; i++) { memset_s(vif_path,NETINFO_PATH_LEN,0,NETINFO_PATH_LEN); /*assemble xenstore path*/ if(i == 0) { (void)snprintf_s(vif_path, NETINFO_PATH_LEN, NETINFO_PATH_LEN, "%s", IPV6_VIF_DATA_PATH); } else { (void)snprintf_s(vif_path, NETINFO_PATH_LEN, NETINFO_PATH_LEN, "%s_%u", IPV6_VIF_DATA_PATH, i); } if(xb_write_first_flag == 0) { (void)write_to_xenstore(handle, vif_path, ArrRetNet[i]); } else { (void)write_weak_to_xenstore(handle, vif_path, ArrRetNet[i]); } } } /***************************************************************************** Function : NetinfoNetworkctlmon Description: ip4/6 main entry Input : None Output : None Return : *****************************************************************************/ void NetinfoNetworkctlmon(void *handle) { char NetworkLoss[32] = {0}; int i = 0; int num = 0; int count = 0; int BuffLen = 0; long sumrecievedrop = 0; long sumsentdrop = 0; num = GetIpv6Info(); memset_s(ArrRetNet,sizeof(ArrRetNet),0,sizeof(ArrRetNet)); /*init ArrRetNet*/ for(i=0;iIPV6_IPNUM ) break; } for(i=0;iIPV6_IPNUM ) break; if(IPADDR_LEN >= strlen(gtNicIpv6Info.info[i].ipaddr)) { (void)memcpy_s(>NicIpv6InfoResult.info[gtNicIpv6InfoResult.count], sizeof(IPV6_VIF_DATA), >NicIpv6Info.info[i], sizeof(IPV6_VIF_DATA)); gtNicIpv6InfoResult.count++; } } /*assemble array*/ for(i=0;i-<%s>]", gtNicIpv6InfoResult.info[i].mac, gtNicIpv6InfoResult.info[i].netstatusflag, gtNicIpv6InfoResult.info[i].ipaddr, trim(gtNicIpv6InfoResult.info[i].gateway), gtNicIpv6InfoResult.info[i].tp, gtNicIpv6InfoResult.info[i].packs); } else { (void)snprintf_s(ArrRetNet[count] + BuffLen, sizeof(ArrRetNet[count]) - BuffLen, sizeof(ArrRetNet[count]) - BuffLen, "[%s-%d]", gtNicIpv6InfoResult.info[i].mac, gtNicIpv6InfoResult.info[i].netstatusflag); } } Ipv6PrintInfo(handle); if(g_exinfo_flag_value & EXINFO_FLAG_NET_LOSS) { (void)snprintf_s(NetworkLoss, sizeof(NetworkLoss), sizeof(NetworkLoss), "%ld:%ld",sumrecievedrop, sumsentdrop); } if(xb_write_first_flag == 0) { if(g_exinfo_flag_value & EXINFO_FLAG_NET_LOSS) { write_to_xenstore(handle, VIF_DROP_PATH, NetworkLoss); } } else { if(g_exinfo_flag_value & EXINFO_FLAG_NET_LOSS) { write_weak_to_xenstore(handle, VIF_DROP_PATH, NetworkLoss); } } return ; } UVP-Tools-2.2.0.316/uvp-monitor/network.c000066400000000000000000001634451314037446600177540ustar00rootroot00000000000000/* * Obtains the IP addr the number of packets received or send by a NIC, and * writes these to xenstore. * * Copyright 2016, Huawei Tech. Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "libxenctl.h" #include "public_common.h" #include "securec.h" #include #include "uvpmon.h" #include #define NIC_MAX 15 #define NORMAL_NIC "fe:ff:ff" #define VIF_MAX 7 #define UPFLAG 1 #define DOWNFLAG 0 #define MAX_NICINFO_LENGTH 256 #define MAX_COMMAND_LENGTH 128 typedef struct { char ifname[16]; char mac[18]; char ip[16]; char tp[64]; char packs[64]; char gateway[16]; int GmnExFlag; long sentdrop; long recievedrop; } VIF_DATA; typedef struct { int count; VIF_DATA info[NIC_MAX]; } VIF_INFO; VIF_INFO gtNicInfo; #define MAC_NAME_LENGTH 18 #define VIF_NAME_LENGTH 16 VIF_INFO gtNicInfo_bond; typedef struct { char vifname[VIF_NAME_LENGTH]; char mac[MAC_NAME_LENGTH]; char sriov_vifname[VIF_NAME_LENGTH]; char sriov_mac[MAC_NAME_LENGTH]; int release_count; char bondname[VIF_NAME_LENGTH]; } VIF_BOUND_DATA; typedef struct { int count; VIF_BOUND_DATA info[NIC_MAX]; } VIF_BOUND_INFO; VIF_BOUND_INFO BoundInfo; extern int uvpPopen(const char *pszCmd, char *pszBuffer, int size); extern FILE *openPipe(const char *pszCommand, const char *pszType); extern int CheckName(const char * name); /***************************************************************************** Function : opendevfile Description: open the dev file Input :None Output : None Return : fd *****************************************************************************/ FILE *opendevfile(const char *path) { if (NULL == path) { return NULL; } return fopen(path, "r"); } /***************************************************************************** Function : NetworkDestroy Description: network model destroy Input : sktd : socket Output : None Return : None *****************************************************************************/ void NetworkDestroy(int sktd) { if (ERROR != sktd) { close(sktd); } } /***************************************************************************** Function : GetVifName Description: get the vif name Input : namep p Output : None Return : p Remark : ifconfig ԴĴ룬ץȡ豸 *****************************************************************************/ char *GetVifName(char **namep, char *p) { int count = 0; while (isspace(*p)) p++; char *name = *namep = p; while (*p) { if (isspace(*p)) break; if (*p == ':') { /* could be an alias */ char *dot = p, *dotname = name; *name++ = *p++; count++; while (isdigit(*p)){ *name++ = *p++; count++; if (count == (IFNAMSIZ-1)) break; } if (*p != ':') { /* it wasn't, backup */ p = dot; name = dotname; } if (*p == '\0') return NULL; p++; break; } *name++ = *p++; count++; if (count == (IFNAMSIZ-1)) break; } *name++ = '\0'; return p; } /***************************************************************************** +Function : GetVifGateway +Description: get the NIC gateway address +Input :ifname -- the NIC name +Output : None +Return : success : return gateway address, fail : return error or disconnected +*****************************************************************************/ int GetVifGateway(int skt, const char *ifname) { char pathBuf[MAX_NICINFO_LENGTH] = {0}; char pszGateway[VIF_NAME_LENGTH] = {0}; char pszGatewayBuf[VIF_NAME_LENGTH] = {0}; FILE *iRet; (void)memset_s(pathBuf, MAX_NICINFO_LENGTH, 0, MAX_NICINFO_LENGTH); if(SUCC != CheckName(ifname)) { return ERROR; } /*ƴʱṩshellͨroute -nȡϢ*/ (void)snprintf_s(pathBuf, MAX_NICINFO_LENGTH, MAX_NICINFO_LENGTH, "route -n | grep -i \"%s$\" | grep UG | awk '{print $2}'", ifname); iRet = openPipe(pathBuf, "r"); if (NULL == iRet) { DEBUG_LOG("Failed to exec route shell command."); gtNicInfo.info[gtNicInfo.count].gateway[0] = '\0'; return ERROR; } /*ȡϢ*/ if(NULL != fgets(pszGatewayBuf,sizeof(pszGatewayBuf),iRet)) { (void)sscanf_s(pszGatewayBuf,"%s",pszGateway,sizeof(pszGateway)); } trim(pszGateway); /*ûϢ0*/ if(strlen(pszGateway) < 1) { pszGateway[0]='0'; pszGateway[1]='\0'; } (void)pclose(iRet); (void)strncpy_s(gtNicInfo.info[gtNicInfo.count].gateway, 16, pszGateway, strlen(pszGateway)); return SUCC; } /***************************************************************************** Function : GetVifFlag Description: get the NIC flags Input : ifname -- the NIC name Output : None Return : SUCC or ERROR *****************************************************************************/ int GetVifFlag(int skt, const char *ifname) { struct ifreq ifreq; if (NULL == ifname) { return ERROR; } (void)strncpy_s(ifreq.ifr_name, IFNAMSIZ, ifname, IFNAMSIZ-1); ifreq.ifr_name[IFNAMSIZ - 1] = '\0'; if (!(ioctl(skt, SIOCGIFFLAGS, (char *) &ifreq))) { /* ж״̬ */ if (ifreq.ifr_flags & IFF_UP) { return UPFLAG; } } return DOWNFLAG; } /***************************************************************************** Function : GetVifIp Description: get the NIC ip address Input :ifname -- the NIC name Output : None Return : success : return ip address, fail : return error or disconnected *****************************************************************************/ int GetVifIp(int skt, const char *ifname) { struct ifreq ifrequest; struct sockaddr_in *pAddr; (void)strncpy_s(ifrequest.ifr_name, IFNAMSIZ, ifname, IFNAMSIZ-1); ifrequest.ifr_name[IFNAMSIZ - 1] = '\0'; if (!(ioctl(skt, SIOCGIFFLAGS, (char *) &ifrequest))) { /* ж״̬ */ if (ifrequest.ifr_flags & IFF_UP) { if ( ! (ioctl(skt, SIOCGIFADDR, (char *) &ifrequest) ) ) { pAddr = (struct sockaddr_in *) (&ifrequest.ifr_addr); strncpy_s(gtNicInfo.info[gtNicInfo.count].ip, 16, inet_ntoa( (pAddr->sin_addr ) ), strlen(inet_ntoa( (pAddr->sin_addr ) ))); return SUCC; } } } return ERROR; } /***************************************************************************** Function : GetFlux Description: get the NIC ThroughPut Input :ifname -- the NIC name Output : None Return : success : return ThroughPut string, fail : return error or disconnected *****************************************************************************/ int GetFlux(int skt, char *ifname) { char Sent[31] = {0}; char Recived[31] = {0}; char SentPkt[31] = {0}; char RecivedPkt[31] = {0}; char SentPktDrop[31] = {0}; char RecivedPktDrop[31] = {0}; char *ptmp = NULL; char line[255] = {0}; char *foundStr = NULL; if (NULL == ifname)//for pclint warning { DEBUG_LOG("ifname is NULL."); return ERROR; } if (ERROR == getFluxinfoLine(ifname, line)) { strncpy_s(gtNicInfo.info[gtNicInfo.count].tp, 64, ERR_STR, strlen(ERR_STR)); gtNicInfo.info[gtNicInfo.count].tp[strlen(ERR_STR)] = '\0'; DEBUG_LOG("getFluxinfoLine is ERROR."); return ERROR; } if(0 == strlen(line)) { DEBUG_LOG("Line is NULL."); return ERROR; } foundStr = strstr(line, ifname); if (NULL == foundStr) { DEBUG_LOG("foundStr is NULL, line=%s, ifname=%s.", line, ifname); return ERROR; } /*ҵһ*/ ptmp = foundStr + strlen(ifname) + 1; if (NULL == ptmp) { DEBUG_LOG("ptmp is NULL."); return ERROR; } /*ҵһ͵ھֱǽͷ*/ if(ERROR == getVifData(ptmp, 1, Recived) || ERROR == getVifData(ptmp, 2, RecivedPkt) || ERROR == getVifData(ptmp, 4, RecivedPktDrop) || ERROR == getVifData(ptmp, 9, Sent) || ERROR == getVifData(ptmp, 10, SentPkt) || ERROR == getVifData(ptmp, 12, SentPktDrop)) { DEBUG_LOG("getVifData is ERROR."); return ERROR; } (void)snprintf_s(gtNicInfo.info[gtNicInfo.count].tp, sizeof(gtNicInfo.info[gtNicInfo.count].tp), sizeof(gtNicInfo.info[gtNicInfo.count].tp), "%s:%s", Sent, Recived); gtNicInfo.info[gtNicInfo.count].tp[strlen(Sent) + strlen(Recived) + 1] = '\0'; (void)snprintf_s(gtNicInfo.info[gtNicInfo.count].packs, sizeof(gtNicInfo.info[gtNicInfo.count].packs), sizeof(gtNicInfo.info[gtNicInfo.count].packs), "%s:%s", SentPkt, RecivedPkt); gtNicInfo.info[gtNicInfo.count].packs[strlen(SentPkt) + strlen(RecivedPkt) + 1] = '\0'; (void)sscanf_s(SentPktDrop,"%ld", >NicInfo.info[gtNicInfo.count].sentdrop); (void)sscanf_s(RecivedPktDrop,"%ld",>NicInfo.info[gtNicInfo.count].recievedrop); return SUCC; } /***************************************************************************** Function : getData Description: õÿϢ Input :pline:ݵһ Output : out ݵֵ Return : *****************************************************************************/ int getVifData(char *pline, int nNumber, char *out) { char *ptr = NULL; int i; if(NULL == pline || NULL == out) { DEBUG_LOG("pline=%p out=%p.", pline, out); return ERROR; } for(i = 1; (0 != *pline) && (i <= nNumber); i++) { /*ȥո*/ while(*pline == ' ') { pline++; } /*Ҹո״γֵλãһеĿͷ*/ ptr = strchr(pline, ' '); if(NULL == ptr) { DEBUG_LOG("ptr is NULL."); return ERROR; } /*ҵѽյ*/ if ((i == nNumber) && (NULL != ptr)) { strncpy_s(out, ptr - pline + 1, pline, ptr - pline); } pline = ptr; } return SUCC; } /***************************************************************************** Function : getFluxinfoLine Description: õļķҪҵ Input :ifname -- the NIC name Output : pline: зbuffer Return : success : fail : *****************************************************************************/ int getFluxinfoLine(char *ifname, char *pline) { char *path = "/proc/net/dev"; FILE *file = NULL; char *begin = NULL; int len = 0; if(NULL == ifname || NULL == pline) { DEBUG_LOG("pline=%p ifname=%p.", pline, ifname); return ERROR; } if(NULL == (file = opendevfile(path))) { DEBUG_LOG("Failed to open /proc/net/dev."); return ERROR; } len = strlen(ifname); while (fgets(pline, 255, file)) { begin = pline; //ȥո while (' ' == *begin) begin++; //ҵƥ˳ if (0 == strncmp(begin, ifname, len)) { if (':' == *(begin + len) ) { break; } } //pline memset_s(pline, 255, 0, 255); } fclose(file); return SUCC; } /***************************************************************************** Function : openNetSocket Description: open the network socket connect Input :None Output : None Return : success : socket handle, fail : ERROR *****************************************************************************/ int openNetSocket(void) { return socket(AF_INET, SOCK_DGRAM, 0); } /***************************************************************************** Function : GetVifMac Description: get the NIC MAC address Input :ifname -- the NIC name Output : None Return : None *****************************************************************************/ void GetVifMac(int skt, const char *ifname) { struct ifreq ifrequest; if (NULL == ifname) { return ; } strncpy_s(ifrequest.ifr_name, IFNAMSIZ, ifname, IFNAMSIZ-1); ifrequest.ifr_name[IFNAMSIZ - 1] = '\0'; if ( ! (ioctl(skt, SIOCGIFHWADDR, (char *) &ifrequest) ) ) { (void)snprintf_s(gtNicInfo.info[gtNicInfo.count].mac, sizeof(gtNicInfo.info[gtNicInfo.count].mac), sizeof(gtNicInfo.info[gtNicInfo.count].mac), "%02x:%02x:%02x:%02x:%02x:%02x", (unsigned char)ifrequest.ifr_hwaddr.sa_data[0], (unsigned char)ifrequest.ifr_hwaddr.sa_data[1], (unsigned char)ifrequest.ifr_hwaddr.sa_data[2], (unsigned char)ifrequest.ifr_hwaddr.sa_data[3], (unsigned char)ifrequest.ifr_hwaddr.sa_data[4], (unsigned char)ifrequest.ifr_hwaddr.sa_data[5]); (void)snprintf_s(gtNicInfo.info[gtNicInfo.count].ifname, sizeof(gtNicInfo.info[gtNicInfo.count].ifname), sizeof(gtNicInfo.info[gtNicInfo.count].ifname), "%s", ifname); } return ; } /***************************************************************************** Function : GetVifInfo Description: ȡϢ Input : None Output : None Return : Count [0~7] *****************************************************************************/ int GetVifInfo() { /* ifconfͨнӿϢ */ //struct ifconf ifconfigure; char *path="/proc/net/dev"; FILE *file; char *line = NULL; size_t linelen = 0; //char buf[4096]; int num = 0; int skt; skt = openNetSocket(); if (ERROR == skt) { DEBUG_LOG("Failed to openNetSocket."); return ERROR; } /* ʼifconf */ //ifconfigure.ifc_len = 4096; //ifconfigure.ifc_buf = buf; memset_s(>NicInfo, sizeof(gtNicInfo), 0, sizeof(gtNicInfo)); /* control device which name is NIC */ if(NULL == (file = opendevfile(path))) { NetworkDestroy(skt); DEBUG_LOG("Failed to open /proc/net/dev."); return ERROR; } /*ȥ/proc/net/devļǰ(ͷϢ)*/ if (getline(&line, &linelen, file) == -1 /* eat line */ || getline(&line, &linelen, file) == -1) { DEBUG_LOG("Remove /proc/net/dev head."); } /*бʣµıϢ*/ while(getline(&line, &linelen, file) != -1) { char *namebuf; (void)GetVifName(&namebuf, line); /*info bond*/ if (NULL != strstr(namebuf, "bond")) { DEBUG_LOG("Has bond model."); continue; } if (NULL != strstr(namebuf, "eth") || NULL != strstr(namebuf, "Gmn")) { /*if interface is Gmn br, GmnExFlag = 1*/ if(NULL != strstr(namebuf, "Gmn")) { gtNicInfo.info[gtNicInfo.count].GmnExFlag = 1; } else { gtNicInfo.info[gtNicInfo.count].GmnExFlag = 0; } if (UPFLAG == GetVifFlag(skt, namebuf)) { num++; GetVifMac(skt, namebuf); if (ERROR == GetVifIp(skt, namebuf)) { gtNicInfo.info[gtNicInfo.count].gateway[0] = '\0'; (void)strncpy_s(gtNicInfo.info[gtNicInfo.count].ip, 16, "none", strlen("none")); //end by '\0' gtNicInfo.info[gtNicInfo.count].ip[sizeof(gtNicInfo.info[gtNicInfo.count].ip)-1]='\0'; } if(g_exinfo_flag_value & EXINFO_FLAG_GATEWAY) { (void)GetVifGateway(skt, namebuf); } else { (void)strncpy_s(gtNicInfo.info[gtNicInfo.count].gateway, 16, "0", strlen("0")); } if (ERROR == GetFlux(skt, namebuf)) { continue; } } else { GetVifMac(skt, namebuf); num++; } } gtNicInfo.count=num; if (num >= NIC_MAX) { DEBUG_LOG("Only support 15 nics "); break; } } fclose(file); free(line); NetworkDestroy(skt); return gtNicInfo.count; } /***************************************************************************** Function : networkctlmon Description: 繦ܴ Input :handle -- xenstore Output : xenstoreдϢ Return : SUCC OR ERROR *****************************************************************************/ void networkctlmon(void *handle) { char ArrRet[1024] = {0}; char ArrRet1[1024] = {0}; char NetworkLoss[32] = {0}; int num; int i; int j = 0; long sumrecievedrop = 0; long sumsentdrop = 0; VIF_DATA stVifDataTemp; memset_s(&stVifDataTemp, sizeof(stVifDataTemp), 0, sizeof(stVifDataTemp)); num = GetVifInfo(); if (ERROR == num) { write_to_xenstore(handle, VIF_DATA_PATH, "error"); write_to_xenstore(handle, VIFEXTRA_DATA_PATH, "error"); DEBUG_LOG("Num is ERROR."); return; } /* xenstoreдַ0 */ if (0 == num) { write_to_xenstore(handle, VIF_DATA_PATH, "0"); write_to_xenstore(handle, VIFEXTRA_DATA_PATH, "0"); return; } /*for Gmn br info*/ for (i = 0; i < num; i++) { if(1 == gtNicInfo.info[i].GmnExFlag) { if(i == j) { j++; continue; } //temp=i (void)memcpy_s(&stVifDataTemp,sizeof(VIF_DATA),>NicInfo.info[i],sizeof(VIF_DATA)); gtNicInfo.info[i].GmnExFlag = 0; //i=j (void)memcpy_s(>NicInfo.info[i],sizeof(VIF_DATA),>NicInfo.info[j],sizeof(VIF_DATA)); gtNicInfo.info[i].GmnExFlag = 0; //j=temp (void)memcpy_s(>NicInfo.info[j],sizeof(VIF_DATA),&stVifDataTemp,sizeof(VIF_DATA)); gtNicInfo.info[j].GmnExFlag = 0; j++; } } for (i = 0; i < num; i++) { /*շ*/ if(g_exinfo_flag_value & EXINFO_FLAG_NET_LOSS) { sumsentdrop += gtNicInfo.info[i].sentdrop; sumrecievedrop += gtNicInfo.info[i].recievedrop; } if(i < VIF_MAX) { if (0 != strlen(gtNicInfo.info[i].ip)) { (void)snprintf_s(ArrRet + strlen(ArrRet), sizeof(ArrRet) - strlen(ArrRet), sizeof(ArrRet) - strlen(ArrRet), "[%s-1-%s-%s-<%s>-<%s>]", gtNicInfo.info[i].mac, gtNicInfo.info[i].ip, trim(gtNicInfo.info[i].gateway), gtNicInfo.info[i].tp, gtNicInfo.info[i].packs); } else { (void)snprintf_s(ArrRet + strlen(ArrRet), sizeof(ArrRet) - strlen(ArrRet), sizeof(ArrRet) - strlen(ArrRet), "[%s-0]",gtNicInfo.info[i].mac); } } else { if (0 != strlen(gtNicInfo.info[i].ip)) { (void)snprintf_s(ArrRet1 + strlen(ArrRet1), sizeof(ArrRet1) - strlen(ArrRet1), sizeof(ArrRet1) - strlen(ArrRet1), "[%s-1-%s-%s-<%s>-<%s>]", gtNicInfo.info[i].mac, gtNicInfo.info[i].ip, trim(gtNicInfo.info[i].gateway), gtNicInfo.info[i].tp, gtNicInfo.info[i].packs ); } else { (void)snprintf_s(ArrRet1 + strlen(ArrRet1), sizeof(ArrRet1) - strlen(ArrRet1), sizeof(ArrRet1) - strlen(ArrRet1), "[%s-0]",gtNicInfo.info[i].mac); } } } if(g_exinfo_flag_value & EXINFO_FLAG_NET_LOSS) { (void)snprintf_s(NetworkLoss, sizeof(NetworkLoss), sizeof(NetworkLoss), "%ld:%ld",sumrecievedrop, sumsentdrop); } if(xb_write_first_flag == 0) { write_to_xenstore(handle, VIF_DATA_PATH, ArrRet); /*ĿСڵ7ij£ڶֵϢ0*/ if (VIF_MAX >= num) { write_to_xenstore(handle, VIFEXTRA_DATA_PATH, "0"); } /*Ŀ7ij£ڶֵϢд7ֵϢ*/ else { write_to_xenstore(handle, VIFEXTRA_DATA_PATH, ArrRet1); } if(g_exinfo_flag_value & EXINFO_FLAG_NET_LOSS) { write_to_xenstore(handle, VIF_DROP_PATH, NetworkLoss); } } else { write_weak_to_xenstore(handle, VIF_DATA_PATH, ArrRet); if (VIF_MAX >= num) { write_weak_to_xenstore(handle, VIFEXTRA_DATA_PATH, "0"); } else { write_weak_to_xenstore(handle, VIFEXTRA_DATA_PATH, ArrRet1); } if(g_exinfo_flag_value & EXINFO_FLAG_NET_LOSS) { write_weak_to_xenstore(handle, VIF_DROP_PATH, NetworkLoss); } } return; } /***************************************************************************** Function : left Description: ߿ʼȡַ Input : Output : Return : *****************************************************************************/ char * left(char *dst,char *src, unsigned int n) { char *p = src; char *q = dst; unsigned int len = strlen(src); if(n>len) n = len; /*p += (len-n);*/ /*ұߵnַʼ*/ while(n--) { *(q++) = *(p++); } *(q++)='\0'; /*бҪ𣿺бҪ*/ return dst; } /***************************************************************************** Function : right Description: ұ߿ʼȡַ Input : Output : Return : *****************************************************************************/ /*ַұ߽ȡnַ*/ char * right(char *dst,char *src, unsigned int n) { char *p = src; char *q = dst; unsigned int len = strlen(src); if(n>len) n = len; p += (len-n); /*ұߵnַʼ*/ //while(*(q++) = *(p++)); while (*p != '\0') { *(q++) = *(p++); } return dst; } /***************************************************************************** Function : getlineinfo Description: ָbondļеַ Input : Output : Return : SUCC OR ERROR *****************************************************************************/ int getlineinfo(char *dst,char *src, unsigned int n,char c) { char *p = src; char *q = dst; char chr_tem; int count = 0; unsigned int len = strlen(src); if(n>len) n = len; /*p += (len-n);*/ /*ұߵnַʼ*/ while(n--) { chr_tem = *(p++); if(chr_tem == c) { *(q++)='\0'; break; } *(q++) = chr_tem; count++; } *(q++)='\0'; /*бҪ𣿺бҪ*/ return count; } void InitBond() { memset_s(>NicInfo_bond, sizeof(gtNicInfo_bond), 0, sizeof(gtNicInfo_bond)); memset_s(&BoundInfo, sizeof(BoundInfo), 0, sizeof(BoundInfo)); } /***************************************************************************** Function : getVifInfo_forbond Description: ȡͬmacַ Input : Output : Return : SUCC OR ERROR *****************************************************************************/ int getVifInfo_forbond() { int skfd; FILE *fp; char *line = NULL; size_t linelen = 0; char left_ifname[10] = {0}; struct ifreq ifr; fp = opendevfile("/proc/net/dev"); if(NULL == fp ) { return -1; } skfd=socket(AF_INET, SOCK_DGRAM, 0); if (ERROR == skfd) { DEBUG_LOG("Failed to openNetSocket."); fclose(fp); return ERROR; } if (getline(&line, &linelen, fp) == -1 /* eat line */ || getline(&line, &linelen, fp) == -1) { DEBUG_LOG("Eat two lines."); } gtNicInfo_bond.count = 0; while (getline(&line, &linelen, fp) != -1) { char *s, *name; s = GetVifName(&name, line); if(NULL == s) { INFO_LOG("GetVifName is NULL."); } INFO_LOG("getVifInfo_forbond vif name is %s.", name); if (strcmp(name, "lo") == 0 || strcmp(name, "sit0") == 0) { continue; } memset_s(left_ifname,10,0,10); left(left_ifname,name,4); if(strcmp(left_ifname, "bond") == 0) { memset_s(left_ifname,10,0,10); continue; } memset_s(left_ifname,10,0,10); left(left_ifname,name,6); if(strcmp(left_ifname, "virbr0") == 0) { memset_s(left_ifname,10,0,10); continue; } strncpy_s(ifr.ifr_name, IFNAMSIZ, name, IFNAMSIZ-1); ifr.ifr_name[IFNAMSIZ - 1] = '\0'; if (!(ioctl(skfd, SIOCGIFHWADDR, (char *)&ifr))) { (void)snprintf_s(gtNicInfo_bond.info[gtNicInfo_bond.count].mac, sizeof(gtNicInfo_bond.info[gtNicInfo_bond.count].mac), sizeof(gtNicInfo_bond.info[gtNicInfo_bond.count].mac), "%02x:%02x:%02x:%02x:%02x:%02x", (unsigned char)ifr.ifr_hwaddr.sa_data[0], (unsigned char)ifr.ifr_hwaddr.sa_data[1], (unsigned char)ifr.ifr_hwaddr.sa_data[2], (unsigned char)ifr.ifr_hwaddr.sa_data[3], (unsigned char)ifr.ifr_hwaddr.sa_data[4], (unsigned char)ifr.ifr_hwaddr.sa_data[5]); (void)snprintf_s(gtNicInfo_bond.info[gtNicInfo_bond.count].ifname, sizeof(gtNicInfo_bond.info[gtNicInfo_bond.count].ifname), sizeof(gtNicInfo_bond.info[gtNicInfo_bond.count].ifname), "%s", name); gtNicInfo_bond.count++; } if(gtNicInfo_bond.count >= NIC_MAX) { INFO_LOG("The monitor only support 15 nics gtNicInfo_bond.count=%d.", gtNicInfo_bond.count); break; } } if(line) { free(line); } fclose(fp); NetworkDestroy(skfd); return gtNicInfo_bond.count; } /***************************************************************************** Function : getXenVif Description: жϴǷ xen Input : Output : Return : SUCC OR ERROR *****************************************************************************/ int getXenVif(char *vifname) { FILE *pFileVer; char vifdriver[1024] = {0}; char CurrentPath[128] = {0}; if( NULL == vifname ) { return 0; } (void)snprintf_s(CurrentPath, 128, 128, "/sys/class/net/%s/device/modalias", vifname); pFileVer = opendevfile(CurrentPath); if (NULL == pFileVer) { return 0; } (void)fgets(vifdriver, 1024, pFileVer); if(strstr((char *)vifdriver, "xen:vif")) { INFO_LOG("This is a nomorl nic name=%s.", vifdriver); fclose(pFileVer); return 1; } fclose(pFileVer); return 0; } /***************************************************************************** Function : getVifNameFromBond Description: bondлȡ,ȻжDzxen vif Input : Output : Return : SUCC OR ERROR *****************************************************************************/ int getVifNameFromBond(char *bondname) { FILE *pFileVer; char vifname[256] = {0}; char CurrentPath[128] = {0}; char arZe[] = " "; char arGet[VIF_NAME_LENGTH]; char *szGet; char *nexttoken1 = NULL; char *nexttoken2 = NULL; struct ifreq ifrequest; INFO_LOG("Enter the getVifNameFromBond."); (void)snprintf_s(CurrentPath, 128, 128, "/sys/class/net/%s/bonding/slaves", bondname); int skt; skt = openNetSocket(); if (ERROR == skt) { return 0; } pFileVer = opendevfile(CurrentPath); if (NULL == pFileVer) { NetworkDestroy(skt); return 0; } (void)memset_s(vifname,256,0,256); (void)fgets(vifname, 256, pFileVer); //fgetȡļ껻зټ \0 vifname[strlen(vifname)-1] = '\0'; INFO_LOG("vifname=%s.", vifname); fclose(pFileVer); (void)memset_s(arGet,VIF_NAME_LENGTH,0,VIF_NAME_LENGTH); for(szGet = strtok_s(vifname, arZe, &nexttoken1); szGet != NULL; szGet = strtok_s(NULL, arZe, &nexttoken2)) { (void)memset_s(arGet,VIF_NAME_LENGTH,0,VIF_NAME_LENGTH); strncpy_s(arGet, VIF_NAME_LENGTH, szGet, strlen(szGet)); szGet = NULL; INFO_LOG("This is a nic name arGet=%s.", arGet); if(getXenVif(arGet)) { (void)memset_s(BoundInfo.info[BoundInfo.count].vifname, VIF_NAME_LENGTH, 0, VIF_NAME_LENGTH); strncpy_s(BoundInfo.info[BoundInfo.count].vifname, VIF_NAME_LENGTH, arGet, sizeof(arGet)-1); BoundInfo.info[BoundInfo.count].vifname[VIF_NAME_LENGTH -1]='\0'; strncpy_s(ifrequest.ifr_name, IFNAMSIZ, (char *)arGet, IFNAMSIZ-1); ifrequest.ifr_name[IFNAMSIZ - 1] = '\0'; if ( ! (ioctl(skt, SIOCGIFHWADDR, (char *) &ifrequest) ) ) { (void)snprintf_s(BoundInfo.info[BoundInfo.count].mac, sizeof(BoundInfo.info[BoundInfo.count].mac), sizeof(BoundInfo.info[BoundInfo.count].mac), "%02x:%02x:%02x:%02x:%02x:%02x", (unsigned char)ifrequest.ifr_hwaddr.sa_data[0], (unsigned char)ifrequest.ifr_hwaddr.sa_data[1], (unsigned char)ifrequest.ifr_hwaddr.sa_data[2], (unsigned char)ifrequest.ifr_hwaddr.sa_data[3], (unsigned char)ifrequest.ifr_hwaddr.sa_data[4], (unsigned char)ifrequest.ifr_hwaddr.sa_data[5]); } BoundInfo.count++; } else { (void)memset_s(BoundInfo.info[BoundInfo.count].sriov_vifname, VIF_NAME_LENGTH, 0, VIF_NAME_LENGTH); strncpy_s(BoundInfo.info[BoundInfo.count].sriov_vifname, VIF_NAME_LENGTH, arGet, sizeof(arGet)-1); BoundInfo.info[BoundInfo.count].sriov_vifname[sizeof(arGet)-1]='\0'; strncpy_s(ifrequest.ifr_name, IFNAMSIZ, (char *)arGet, IFNAMSIZ-1); ifrequest.ifr_name[IFNAMSIZ - 1] = '\0'; if ( ! (ioctl(skt, SIOCGIFHWADDR, (char *) &ifrequest) ) ) { (void)snprintf_s(BoundInfo.info[BoundInfo.count].sriov_mac, sizeof(BoundInfo.info[BoundInfo.count].sriov_mac), sizeof(BoundInfo.info[BoundInfo.count].sriov_mac), "%02x:%02x:%02x:%02x:%02x:%02x", (unsigned char)ifrequest.ifr_hwaddr.sa_data[0], (unsigned char)ifrequest.ifr_hwaddr.sa_data[1], (unsigned char)ifrequest.ifr_hwaddr.sa_data[2], (unsigned char)ifrequest.ifr_hwaddr.sa_data[3], (unsigned char)ifrequest.ifr_hwaddr.sa_data[4], (unsigned char)ifrequest.ifr_hwaddr.sa_data[5]); } } } INFO_LOG("Exit the getVifNameFromBond IsBondNic."); NetworkDestroy(skt); return BoundInfo.count; } /***************************************************************************** Function : reset_bond_file Description: monitorʱȡѾõbondϢṹ Input : Output : Return : 0 OR 0 *****************************************************************************/ int reset_bond_file() { FILE *fp; char *line = NULL; size_t linelen = 0; char left_ifname[10] = {0}; char bondname_tmp[VIF_NAME_LENGTH] = {0}; int bond_count = 0; INFO_LOG("Enter the reset_bond_file."); BoundInfo.count = 0; fp = opendevfile("/proc/net/dev"); if(NULL == fp) { ERR_LOG("Open file /proc/net/dev failed, errno=%d.", errno); return -1; } if (getline(&line, &linelen, fp) == -1 /* eat line */ || getline(&line, &linelen, fp) == -1) {} while (getline(&line, &linelen, fp) != -1) { char *s, *name; s = GetVifName(&name, line); if(NULL == s) { INFO_LOG("GetVifName is NULL."); } (void)memset_s(left_ifname,10,0,10); left(left_ifname,name,4); if(strcmp(left_ifname, "bond") == 0) { INFO_LOG("This is a bond, not nic name=%s.", name); (void)memset_s(left_ifname,10,0,10); (void)memset_s(bondname_tmp, VIF_NAME_LENGTH, 0, VIF_NAME_LENGTH); (void)snprintf_s(bondname_tmp, VIF_NAME_LENGTH, VIF_NAME_LENGTH, "%s", name); (void)memset_s(BoundInfo.info[BoundInfo.count].bondname, VIF_NAME_LENGTH, 0, VIF_NAME_LENGTH); BoundInfo.info[BoundInfo.count].release_count = 0; strncpy_s(BoundInfo.info[BoundInfo.count].bondname, VIF_NAME_LENGTH, bondname_tmp, sizeof(bondname_tmp)-1); BoundInfo.info[BoundInfo.count].bondname[sizeof(bondname_tmp)-1]='\0'; INFO_LOG("BoundInfo.info[%d].bondname=%s.", BoundInfo.count, BoundInfo.info[BoundInfo.count].bondname); if(getVifNameFromBond(name)) { bond_count++; } } } if(line) { free(line); } fclose(fp); INFO_LOG("Exit the reset_bond_file."); return bond_count; } /***************************************************************************** Function : GetBondVifIp Description: ȡdhcp ip,Ȼ󿽱bond Input : Output : Return : NULL or IP *****************************************************************************/ int GetBondVifIp(const char *ifname,char **vifip) { struct ifreq ifrequest; struct sockaddr_in *pAddr; int skt; skt = openNetSocket(); if (ERROR == skt) { return 0; } (void)strncpy_s(ifrequest.ifr_name, IFNAMSIZ, ifname, IFNAMSIZ-1); ifrequest.ifr_name[IFNAMSIZ - 1] = '\0'; if (!(ioctl(skt, SIOCGIFFLAGS, (char *) &ifrequest))) { /* ж״̬ */ if (ifrequest.ifr_flags & IFF_UP) { if ( ! (ioctl(skt, SIOCGIFADDR, (char *) &ifrequest) ) ) { pAddr = (struct sockaddr_in *) (&ifrequest.ifr_addr); *vifip = inet_ntoa( (pAddr->sin_addr )); NetworkDestroy(skt); return 1; } } } NetworkDestroy(skt); return 0; } int GetBondVifGateway(const char *ifname,char **gateway) { char pathBuf[MAX_NICINFO_LENGTH] = {0}; char pszGateway[VIF_NAME_LENGTH] = {0}; char pszGatewayBuf[VIF_NAME_LENGTH] = {0}; FILE *iRet; int skt; skt = openNetSocket(); if (ERROR == skt) { return 0; } if(SUCC != CheckName(ifname)) { NetworkDestroy(skt); return ERROR; } (void)memset_s(pathBuf, MAX_NICINFO_LENGTH, 0, MAX_NICINFO_LENGTH); /*ƴʱṩshellͨroute -nȡϢ*/ (void)snprintf_s(pathBuf, MAX_NICINFO_LENGTH, MAX_NICINFO_LENGTH, "route -n | grep -i \"%s$\" | grep 0.0.0.0 | grep UG | awk '{print $2}'", ifname); iRet = openPipe(pathBuf, "r"); if (NULL == iRet) { DEBUG_LOG("Failed to exec route shell command."); *gateway = NULL; NetworkDestroy(skt); return 0; } /*ȡϢ*/ if(NULL != fgets(pszGatewayBuf,sizeof(pszGatewayBuf),iRet)) { (void)sscanf_s(pszGatewayBuf,"%s",pszGateway,sizeof(pszGateway)); } trim(pszGateway); /*ûϢ0*/ if(strlen(pszGateway) < 1) { (void)pclose(iRet); iRet = NULL; NetworkDestroy(skt); *gateway = NULL; return 0; } (void)pclose(iRet); iRet = NULL; (void)strncpy_s(*gateway, VIF_NAME_LENGTH, pszGateway, strlen(pszGateway)); NetworkDestroy(skt); return 1; } /***************************************************************************** Function : netbond Description: ֱͨ󶨣mac ַжЩҪ Input :handle -- xenstore Output : Return : SUCC OR ERROR *****************************************************************************/ int netbond() { int num; int i; int j; int k; int bond_count; char *bondvifip; char *bondvifgateway; int iRet; char left_mac[MAC_NAME_LENGTH] = {0}; char right_mac[MAC_NAME_LENGTH] = {0}; char left_mac_1[MAC_NAME_LENGTH] = {0}; char right_mac_1[MAC_NAME_LENGTH] = {0}; char pszCommand[MAX_COMMAND_LENGTH] = {0}; char pszBuff[MAX_COMMAND_LENGTH] = {0}; num = getVifInfo_forbond(); if (ERROR == num || 0 == num) { ERR_LOG("Number error, num=%d.", num); return 0; } bond_count = reset_bond_file(); BoundInfo.count = bond_count; INFO_LOG("BoundInfo.count=%d.", BoundInfo.count); for (i = 0; i < num; i++) { INFO_LOG("gtNicInfo.info[%d].mac=%s.", i, gtNicInfo_bond.info[i].mac); INFO_LOG("gtNicInfo.info[%d].ifname=%s.", i, gtNicInfo_bond.info[i].ifname); memset_s(left_mac,MAC_NAME_LENGTH,0,MAC_NAME_LENGTH); memset_s(right_mac,MAC_NAME_LENGTH,0,MAC_NAME_LENGTH); left(left_mac,(char *)gtNicInfo_bond.info[i].mac,8); right(right_mac,(char *)gtNicInfo_bond.info[i].mac,8); INFO_LOG("left_mac=%s, right_mac=%s.",left_mac,right_mac); for(j=i+1;j /sys/class/net/bonding_masters", k); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s fail ret=%d.", pszCommand, iRet); } (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); /*down sriov then add sriov to bond*/ (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "echo 1 > /sys/class/net/bond%d/bonding/mode",k); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s fail ret=%d.", pszCommand, iRet); } bondvifip = NULL; if(GetBondVifIp(BoundInfo.info[k].sriov_vifname,&bondvifip)) { (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "ifconfig bond%d %s", k,bondvifip); INFO_LOG("Call uvpPopen pszCommand=%s bondvifip=%s.", pszCommand, bondvifip); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s fail ret=%d.", pszCommand, iRet); } } INFO_LOG("bondvifip=%s.", bondvifip); bondvifgateway = NULL; if(GetBondVifGateway(BoundInfo.info[k].sriov_vifname,&bondvifgateway)) { (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "route add default gw %s", bondvifgateway); INFO_LOG("Call uvpPopen pszCommand=%s bondvigateway=%s.", pszCommand, bondvifip); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s fail ret=%d ", pszCommand, iRet); } } INFO_LOG("bondvigateway=%s.", bondvifgateway); (void)memset_s(BoundInfo.info[k].bondname, VIF_NAME_LENGTH, 0, VIF_NAME_LENGTH); (void)snprintf_s(BoundInfo.info[k].bondname, VIF_NAME_LENGTH, VIF_NAME_LENGTH, "bond%d", k); INFO_LOG("BoundInfo.info[%d].bondname=%s.", k, BoundInfo.info[k].bondname); (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "ifconfig %s down", BoundInfo.info[k].sriov_vifname); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s fail ret=%d.", pszCommand, iRet); } (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); /*down sriov then add sriov to bond*/ (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "echo +%s > /sys/class/net/bond%d/bonding/slaves", BoundInfo.info[k].sriov_vifname,k); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s fail ret=%d.", pszCommand, iRet); } (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "ifconfig %s down", BoundInfo.info[k].vifname); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s fail ret=%d.", pszCommand, iRet); } (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); /*down vif then add vif to bond*/ (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "echo +%s > /sys/class/net/bond%d/bonding/slaves", BoundInfo.info[k].vifname,k); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s failed ret=%d.", pszCommand, iRet); } INFO_LOG("Call openPipe pszCommand=%s success.", pszCommand); (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); /*turn sriov to primary*/ (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "echo %s > /sys/class/net/bond%d/bonding/primary", BoundInfo.info[k].sriov_vifname,k); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s failed ret=%d.", pszCommand, iRet); } (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "ifconfig %s up", BoundInfo.info[k].sriov_vifname); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s failed ret=%d.", pszCommand, iRet); } (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "ifconfig %s up", BoundInfo.info[k].vifname); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s failed ret=%d.", pszCommand, iRet); } (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "ifconfig bond%d up", k); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s failed ret=%d." ,pszCommand, iRet); } //redhat ϵͳɾIPᵼж·ɣ粻ͨ (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "ifconfig %s 0", BoundInfo.info[k].vifname); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s failed ret=%d.", pszCommand, iRet); } (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "ifconfig %s 0", BoundInfo.info[k].sriov_vifname); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s failed ret = %d.", pszCommand, iRet); } } INFO_LOG("BoundInfo.count=%d.", BoundInfo.count); return 1; } /***************************************************************************** Function : releasenetbond Description: ǨƻʱbondлΪͨ Input : Output : Return : *****************************************************************************/ int releasenetbond(void *handle) { int k; int iRet = 0; char pszCommand[MAX_COMMAND_LENGTH] = {0}; char pszBuff[MAX_COMMAND_LENGTH] = {0}; INFO_LOG("releasenetbond BoundInfo.count=%d.", BoundInfo.count); for(k=0;k /sys/class/net/%s/bonding/primary", BoundInfo.info[k].vifname, BoundInfo.info[k].bondname); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s failed ret=%d.", pszCommand, iRet); //ִʧ write_to_xenstore(handle, RELEASE_BOND, "-1"); return 0; } BoundInfo.info[k].release_count = 1; } write_to_xenstore(handle, RELEASE_BOND, "2"); return 1; } /***************************************************************************** Function : ReGetSriovNicInfo Description: Input :handle -- xenstore Output : Return : SUCC OR ERROR *****************************************************************************/ int ReGetSriovNicInfo() { int nic_num; int sriov_num; int i,j; char left_mac[MAC_NAME_LENGTH] = {0}; char right_mac[MAC_NAME_LENGTH] = {0}; char left_mac_1[MAC_NAME_LENGTH] = {0}; char right_mac_1[MAC_NAME_LENGTH] = {0}; nic_num = getVifInfo_forbond(); INFO_LOG("ReGetSriovNicInfo num=%d.", nic_num); if (0 == nic_num) { INFO_LOG("getVifInfo_forbond failed."); return -1; } sriov_num = 0; for (i = 0; i < BoundInfo.count; i++) { INFO_LOG("gtNicInfo.info[%d].mac=%s.", i, BoundInfo.info[i].mac); INFO_LOG("gtNicInfo.info[%d].ifname=%s.", i, BoundInfo.info[i].vifname); (void)memset_s(BoundInfo.info[i].sriov_mac, MAC_NAME_LENGTH, 0, MAC_NAME_LENGTH); (void)memset_s(BoundInfo.info[i].sriov_vifname, VIF_NAME_LENGTH, 0, VIF_NAME_LENGTH); memset_s(left_mac,MAC_NAME_LENGTH,0,MAC_NAME_LENGTH); memset_s(right_mac,MAC_NAME_LENGTH,0,MAC_NAME_LENGTH); left(left_mac,(char *)BoundInfo.info[i].mac,8); right(right_mac,(char *)BoundInfo.info[i].mac,8); for(j=0;j /sys/class/net/%s/bonding/slaves", BoundInfo.info[j].sriov_vifname,BoundInfo.info[j].bondname); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s failed ret=%d.", pszCommand, iRet); } (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "ifconfig %s up", BoundInfo.info[j].sriov_vifname); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s failed ret=%d.", pszCommand, iRet); } (void)memset_s(pszCommand, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)memset_s(pszBuff, MAX_COMMAND_LENGTH, 0, MAX_COMMAND_LENGTH); (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "echo %s > /sys/class/net/%s/bonding/primary", BoundInfo.info[j].sriov_vifname,BoundInfo.info[j].bondname); iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand=%s failed ret=%d.", pszCommand, iRet); } } write_to_xenstore(handle, REBOND_SRIOV, "2"); (void)netbond(); return 1; } UVP-Tools-2.2.0.316/uvp-monitor/securec/000077500000000000000000000000001314037446600175335ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-monitor/securec/include/000077500000000000000000000000001314037446600211565ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-monitor/securec/include/securec.h000066400000000000000000000207201314037446600227610ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: securec.h * Decription: * the user of this secure c library should include this header file * in you source code. This header file declare all supported API * prototype of the library, such as memcpy_s, strcpy_s, wcscpy_s, * strcat_s, strncat_s, sprintf_s, scanf_s, and so on. * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #ifndef __SECUREC_H__5D13A042_DC3F_4ED9_A8D1_882811274C27 #define __SECUREC_H__5D13A042_DC3F_4ED9_A8D1_882811274C27 /* If you need high performance, enable the WITH_PERFORMANCE_ADDONS macro! */ #define WITH_PERFORMANCE_ADDONS #include "securectype.h" /*lint !e537*/ #include /* If stack size on some embedded platform is limited, you can define the following macro * which will put some variables on heap instead of stack. #define STACK_SIZE_LESS_THAN_1K */ /* for performance consideration, the following macro will call the corresponding API * of libC for memcpy, memmove and memset */ #define CALL_LIBC_COR_API /* codes should run under the macro COMPATIBLE_LINUX_FORMAT in unknow system on default, and strtold. The function strtold is referenced first at ISO9899:1999(C99), and some old compilers can not support these functions. Here provides a macro to open these functions: SECUREC_SUPPORT_STRTOLD -- if defined, strtold will be used */ /*define error code*/ #ifndef errno_t typedef int errno_t; #endif /* success */ #define EOK (0) /* invalid parameter */ #ifdef EINVAL #undef EINVAL #endif #define EINVAL (22) #define EINVAL_AND_RESET (22 | 0X80) /* invalid parameter range */ #ifdef ERANGE #undef ERANGE /* to avoid redefinition */ #endif #define ERANGE (34) #define ERANGE_AND_RESET (34 | 0X80) /* A wide-character code has been detected that does not correspond to a * valid character, or a byte sequence does not form a valid wide-character code */ #ifdef EILSEQ #undef EILSEQ #endif #define EILSEQ (42) #ifdef EOVERLAP_AND_RESET #undef EOVERLAP_AND_RESET #endif /*Once the buffer overlap is detected, the dest buffer must be reseted! */ #define EOVERLAP_AND_RESET (54 | 0X80) /*if you need export the function of this library in Win32 dll, use __declspec(dllexport) */ #ifdef __cplusplus extern "C" { #endif /* return SecureC Version */ void getHwSecureCVersion(char* verStr, int bufSize, unsigned short* verNumber); /* wmemcpy */ errno_t wmemcpy_s(wchar_t* dest, size_t destMax, const wchar_t* src, size_t count); /* memmove */ errno_t memmove_s(void* dest, size_t destMax, const void* src, size_t count); errno_t wmemmove_s(wchar_t* dest, size_t destMax, const wchar_t* src, size_t count); errno_t wcscpy_s(wchar_t* strDest, size_t destMax, const wchar_t* strSrc); errno_t wcsncpy_s(wchar_t* strDest, size_t destMax, const wchar_t* strSrc, size_t count); errno_t wcscat_s(wchar_t* strDest, size_t destMax, const wchar_t* strSrc); errno_t wcsncat_s(wchar_t* strDest, size_t destMax, const wchar_t* strSrc, size_t count); /* strtok */ char* strtok_s(char* strToken, const char* strDelimit, char** context); wchar_t* wcstok_s(wchar_t* strToken, const wchar_t* strDelimit, wchar_t** context); /* sprintf */ int sprintf_s(char* strDest, size_t destMax, const char* format, ...) SECUREC_ATTRIBUTE(3,4); int swprintf_s(wchar_t* strDest, size_t destMax, const wchar_t* format, ...); /* vsprintf */ int vsprintf_s(char* strDest, size_t destMax, const char* format, va_list argptr) SECUREC_ATTRIBUTE(3,0); int vswprintf_s(wchar_t* strDest, size_t destMax, const wchar_t* format, va_list argptr); int vsnprintf_s(char* strDest, size_t destMax, size_t count, const char* format, va_list arglist) SECUREC_ATTRIBUTE(4,0); /* snprintf */ int snprintf_s(char* strDest, size_t destMax, size_t count, const char* format, ...) SECUREC_ATTRIBUTE(4,5); /* scanf */ int scanf_s(const char* format, ...); int wscanf_s(const wchar_t* format, ...); /* vscanf */ int vscanf_s(const char* format, va_list arglist); int vwscanf_s(const wchar_t* format, va_list arglist); /* fscanf */ int fscanf_s(FILE* stream, const char* format, ...); int fwscanf_s(FILE* stream, const wchar_t* format, ...); /* vfscanf */ int vfscanf_s(FILE* stream, const char* format, va_list arglist); int vfwscanf_s(FILE* stream, const wchar_t* format, va_list arglist); /* sscanf */ int sscanf_s(const char* buffer, const char* format, ...); int swscanf_s(const wchar_t* buffer, const wchar_t* format, ...); /* vsscanf */ int vsscanf_s(const char* buffer, const char* format, va_list argptr); int vswscanf_s(const wchar_t* buffer, const wchar_t* format, va_list arglist); /* gets */ char* gets_s(char* buffer, size_t destMax); /* memset function*/ errno_t memset_s(void* dest, size_t destMax, int c, size_t count); /* memcpy function*/ errno_t memcpy_s(void* dest, size_t destMax, const void* src, size_t count); /* strcpy */ errno_t strcpy_s(char* strDest, size_t destMax, const char* strSrc); /* strncpy */ errno_t strncpy_s(char* strDest, size_t destMax, const char* strSrc, size_t count); /* strcat */ errno_t strcat_s(char* strDest, size_t destMax, const char* strSrc); /* strncat */ errno_t strncat_s(char* strDest, size_t destMax, const char* strSrc, size_t count); errno_t strncpy_error(char* strDest, size_t destMax, const char* strSrc, size_t count); errno_t strcpy_error(char* strDest, size_t destMax, const char* strSrc); #if defined(WITH_PERFORMANCE_ADDONS) /* those functions are used by macro */ errno_t memset_sOptTc(void* dest, size_t destMax, int c, size_t count); errno_t memcpy_sOptTc(void* dest, size_t destMax, const void* src, size_t count); /* strcpy_sp is a macro, NOT a function in performance optimization mode. */ #define strcpy_sp(dest, destMax, src) /*lint -save -e506 -e1055 */ (( __builtin_constant_p((destMax)) && __builtin_constant_p((src))) ? \ STRCPY_SM((dest), (destMax), (src)) : strcpy_s((dest), (destMax), (src)) ) /*lint -restore */ /* strncpy_sp is a macro, NOT a function in performance optimization mode. */ #define strncpy_sp(dest, destMax, src, count) /*lint -save -e506 -e1055 */ ((__builtin_constant_p((count)) && __builtin_constant_p((destMax)) && __builtin_constant_p((src))) ? \ STRNCPY_SM((dest), (destMax), (src), (count)) : strncpy_s((dest), (destMax), (src), (count)) ) /*lint -restore */ /* strcat_sp is a macro, NOT a function in performance optimization mode. */ #define strcat_sp(dest, destMax, src) /*lint -save -e506 -e1055 */ (( __builtin_constant_p((destMax)) && __builtin_constant_p((src))) ? \ STRCAT_SM((dest), (destMax), (src)) : strcat_s((dest), (destMax), (src)) ) /*lint -restore */ /* strncat_sp is a macro, NOT a function in performance optimization mode. */ #define strncat_sp(dest, destMax, src, count) /*lint -save -e506 -e1055 */ ((__builtin_constant_p((count)) && __builtin_constant_p((destMax)) && __builtin_constant_p((src))) ? \ STRNCAT_SM((dest), (destMax), (src), (count)) : strncat_s((dest), (destMax), (src), (count)) ) /*lint -restore */ /* memcpy_sp is a macro, NOT a function in performance optimization mode. */ #define memcpy_sp(dest, destMax, src, count) /*lint -save -e506 -e1055 */ (__builtin_constant_p((count)) ? (MEMCPY_SM((dest), (destMax), (src), (count))) : \ (__builtin_constant_p((destMax)) ? (((size_t)(destMax) > 0 && (((UINT64T)(destMax) & (UINT64T)(-2)) < SECUREC_MEM_MAX_LEN)) ? memcpy_sOptTc((dest), (destMax), (src), (count)) : ERANGE ) : memcpy_s((dest), (destMax), (src), (count)))) /*lint -restore */ /* memset_sp is a macro, NOT a function in performance optimization mode. */ #define memset_sp(dest, destMax, c, count) /*lint -save -e506 -e1055 */ (__builtin_constant_p((count)) ? (MEMSET_SM((dest), (destMax), (c), (count))) : \ (__builtin_constant_p((destMax)) ? (((size_t)(destMax) > 0 && (((UINT64T)(destMax) & (UINT64T)(-2)) < SECUREC_MEM_MAX_LEN)) ? memset_sOptTc((dest), (destMax), (c), (count)) : ERANGE ) : memset_s((dest), (destMax), (c), (count)))) /*lint -restore */ #endif #ifdef __cplusplus } #endif /* __cplusplus */ #endif/* __SECUREC_H__5D13A042_DC3F_4ED9_A8D1_882811274C27 */ UVP-Tools-2.2.0.316/uvp-monitor/securec/include/securectype.h000066400000000000000000000244061314037446600236700ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: securectype.h * Decription: * define internal used macro and data type. The marco of SECUREC_ON_64BITS * will be determined in this header file, which is a switch for part * of code. Some macro are used to supress warning by MS compiler. *Note: * user can change the value of SECUREC_STRING_MAX_LEN and SECUREC_MEM_MAX_LEN * macro to meet their special need. * History: ******************************************************************************** */ #ifndef __SECURECTYPE_H__A7BBB686_AADA_451B_B9F9_44DACDAE18A7 #define __SECURECTYPE_H__A7BBB686_AADA_451B_B9F9_44DACDAE18A7 /*Shielding VC symbol redefinition warning*/ #if defined(_MSC_VER) && (_MSC_VER >= 1400) #ifdef __STDC_WANT_SECURE_LIB__ #undef __STDC_WANT_SECURE_LIB__ #endif #define __STDC_WANT_SECURE_LIB__ 0 #ifdef _CRTIMP_ALTERNATIVE #undef _CRTIMP_ALTERNATIVE #endif #define _CRTIMP_ALTERNATIVE //comment microsoft *_s function #endif #include #include #include /* #include this file is used to define some macros, such as INT_MAX and SIZE_MAX */ /*if enable COMPATIBLE_WIN_FORMAT, the output format will be compatible to Windows.*/ #if (defined(_WIN32) || defined(_WIN64) || defined(_MSC_VER)) #define COMPATIBLE_WIN_FORMAT #endif #if defined(COMPATIBLE_WIN_FORMAT) /* in windows platform, can't use optimized function for there is no __builtin_constant_p like function */ /* If need optimized macro, can define this: #define __builtin_constant_p(x) 1 */ #ifdef WITH_PERFORMANCE_ADDONS #undef WITH_PERFORMANCE_ADDONS #endif #endif #if (defined(__VXWORKS__) || defined(__vxworks) || defined(__VXWORKS) || defined(_VXWORKS_PLATFORM_) || defined(SECUREC_VXWORKS_VERSION_5_4)) #if !defined(SECUREC_VXWORKS_PLATFORM) #define SECUREC_VXWORKS_PLATFORM #endif #endif #ifdef SECUREC_VXWORKS_PLATFORM #include #endif /*if enable COMPATIBLE_LINUX_FORMAT, the output format will be compatible to Linux.*/ #if !(defined(COMPATIBLE_WIN_FORMAT) || defined(SECUREC_VXWORKS_PLATFORM)) #define COMPATIBLE_LINUX_FORMAT #endif #ifdef COMPATIBLE_LINUX_FORMAT #include #endif #if defined(__GNUC__) && !defined(WIN32) #define SECUREC_ATTRIBUTE(x,y) __attribute__((format(printf, (x), (y) ))) #else #define SECUREC_ATTRIBUTE(x,y) #endif #if defined(__GNUC__) && ((__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 3 /*above 3.4*/ )) ) long __builtin_expect(long exp, long c); #define LIKELY(x) __builtin_expect(!!(x), 1) #define UNLIKELY(x) __builtin_expect(!!(x), 0) #else #define LIKELY(x) (x) #define UNLIKELY(x) (x) #endif #ifndef TWO_MIN #define TWO_MIN(a, b) ((a) < (b) ? (a) : (b)) #endif #define WCHAR_SIZE sizeof(wchar_t) /*ref //sourceforge.net/p/predef/wiki/OperatingSystems/ #if !(defined(__hpux) || defined(_AIX) || defined(__VXWORKS__) || defined(__vxworks) ||defined(__ANDROID__) || defined(__WRLINUX__)|| defined(_TYPE_uint8_t)) typedef unsigned char unit8_t; #endif */ typedef signed char INT8T; typedef unsigned char UINT8T; #if defined(COMPATIBLE_WIN_FORMAT) || defined(__ARMCC_VERSION) typedef __int64 INT64T; typedef unsigned __int64 UINT64T; #if defined(__ARMCC_VERSION) typedef int INT32T; typedef unsigned int UINT32T; #else typedef __int32 INT32T; typedef unsigned __int32 UINT32T; #endif #else typedef int INT32T; typedef unsigned int UINT32T; typedef long long INT64T; typedef unsigned long long UINT64T; #endif /* define the max length of the string */ #define SECUREC_STRING_MAX_LEN (0x7fffffffUL) #define SECUREC_WCHAR_STRING_MAX_LEN (SECUREC_STRING_MAX_LEN / WCHAR_SIZE) /* add SECUREC_MEM_MAX_LEN for memcpy and memmove*/ #define SECUREC_MEM_MAX_LEN (0x7fffffffUL) #define SECUREC_WCHAR_MEM_MAX_LEN (SECUREC_MEM_MAX_LEN / WCHAR_SIZE) #if SECUREC_STRING_MAX_LEN > 0x7fffffff #error "max string is 2G, or you may remove this macro" #endif #if (defined(__GNUC__ ) && defined(__SIZEOF_POINTER__ )) #if (__SIZEOF_POINTER__ != 4) && (__SIZEOF_POINTER__ != 8) #error "unsupported system, contact Security Design Technology Department of 2012 Labs" #endif #endif #define IN_REGISTER register #define SECC_MALLOC(x) malloc((size_t)(x)) #define SECC_FREE(x) free((void *)(x)) #if defined(_WIN64) || defined(WIN64) || defined(__LP64__) || defined(_LP64) #define SECUREC_ON_64BITS #endif #if (!defined(SECUREC_ON_64BITS) && defined(__GNUC__ ) && defined(__SIZEOF_POINTER__ )) #if __SIZEOF_POINTER__ == 8 #define SECUREC_ON_64BITS #endif #endif #if defined(__SVR4) || defined(__svr4__) #define __SOLARIS #endif #if (defined(__hpux) || defined(_AIX) || defined(__SOLARIS)) #define __UNIX #endif #if ((!defined(SECUREC_SUPPORT_STRTOLD)) && defined(COMPATIBLE_LINUX_FORMAT)) #if defined(__USE_ISOC99) \ || (defined(_AIX) && defined(_ISOC99_SOURCE)) \ || (defined(__hpux) && defined(__ia64)) \ || (defined(__SOLARIS) && (!defined(_STRICT_STDC) && !defined(__XOPEN_OR_POSIX)) || defined(_STDC_C99) || defined(__EXTENSIONS__)) #define SECUREC_SUPPORT_STRTOLD #endif #endif #if ((defined(SECUREC_WRLINUX_BELOW4) || defined(_WRLINUX_BELOW4_)) && defined(SECUREC_SUPPORT_STRTOLD)) #undef SECUREC_SUPPORT_STRTOLD #endif #if defined(WITH_PERFORMANCE_ADDONS) /* for strncpy_s performance optimization */ #define STRNCPY_SM(dest, destMax, src, count) \ ((NULL != (void*)dest && NULL != (void*)src && (size_t)destMax >0 && (((UINT64T)(destMax) & (UINT64T)(-2)) < SECUREC_STRING_MAX_LEN) && (TWO_MIN(count , strlen(src)) + 1) <= (size_t)destMax ) ? ( (count < strlen(src))? (memcpy(dest, src, count), *((char*)dest + count) = '\0', EOK) :( memcpy(dest, src, strlen(src) + 1), EOK ) ) :(strncpy_error(dest, destMax, src, count)) ) #define STRCPY_SM(dest, destMax, src) \ (( NULL != (void*)dest && NULL != (void*)src && (size_t)destMax >0 && (((UINT64T)(destMax) & (UINT64T)(-2)) < SECUREC_STRING_MAX_LEN) && ( strlen(src) + 1) <= (size_t)destMax )? (memcpy(dest, src, strlen(src) + 1), EOK) :( strcpy_error(dest, destMax, src))) /* for strcat_s performance optimization */ #if defined(__GNUC__) #define STRCAT_SM(dest, destMax, src) \ ({ int catRet =EOK;\ if ( NULL != (void*)dest && NULL != (void*)src && (size_t)(destMax) >0 && (((UINT64T)(destMax) & (UINT64T)(-2)) < SECUREC_STRING_MAX_LEN) ) {\ char* pCatTmpDst = (dest);\ size_t catRestSz = (destMax);\ do{\ while(catRestSz > 0 && *pCatTmpDst) {\ ++pCatTmpDst;\ --catRestSz;\ }\ if (catRestSz == 0) {\ catRet = EINVAL;\ break;\ }\ if ( ( strlen(src) + 1) <= catRestSz ) {\ memcpy(pCatTmpDst, (src), strlen(src) + 1);\ catRet = EOK;\ }else{\ catRet = ERANGE;\ }\ }while(0);\ if ( EOK != catRet) catRet = strcat_s((dest), (destMax), (src));\ }else{\ catRet = strcat_s((dest), (destMax), (src));\ }\ catRet;}) #else #define STRCAT_SM(dest, destMax, src) strcat_s(dest, destMax, src) #endif /*for strncat_s performance optimization*/ #if defined(__GNUC__) #define STRNCAT_SM(dest, destMax, src, count) \ ({ int ncatRet = EOK;\ if (NULL != (void*)dest && NULL != (void*)src && (size_t)destMax > 0 && (((UINT64T)(destMax) & (UINT64T)(-2)) < SECUREC_STRING_MAX_LEN) && (((UINT64T)(count) & (UINT64T)(-2)) < SECUREC_STRING_MAX_LEN)) {\ char* pCatTmpDest = (dest);\ size_t ncatRestSz = (destMax);\ do{\ while(ncatRestSz > 0 && *pCatTmpDest) {\ ++pCatTmpDest;\ --ncatRestSz;\ }\ if (ncatRestSz == 0) {\ ncatRet = EINVAL;\ break;\ }\ if ( (TWO_MIN((count) , strlen(src)) + 1) <= ncatRestSz ) {\ if ((count) < strlen(src)) {\ memcpy(pCatTmpDest, (src), (count));\ *(pCatTmpDest + (count)) = '\0';\ }else {\ memcpy(pCatTmpDest, (src), strlen(src) + 1);\ }\ }else{\ ncatRet = ERANGE;\ }\ }while(0);\ if ( EOK != ncatRet) ncatRet = strncat_s((dest), (destMax), (src), (count));\ }else{\ ncatRet = strncat_s((dest), (destMax), (src), (count));\ }\ ncatRet;}) #else #define STRNCAT_SM(dest, destMax, src, count) strncat_s(dest, destMax, src, count) #endif /* MEMCPY_SM do NOT check buffer overlap by default, or you can add this check to improve security condCheck = condCheck || (dest == src) || (dest > src && dest < (void*)((UINT8T*)src + count));\ condCheck = condCheck || (src > dest && src < (void*)((UINT8T*)dest + count)); \ */ #define MEMCPY_SM(dest, destMax, src, count)\ (!(((size_t)destMax== 0 )||(((UINT64T)(destMax) & (UINT64T)(-2)) > SECUREC_MEM_MAX_LEN)||((size_t)count > (size_t)destMax) || (NULL == (void*)dest) || (NULL == (void*)src))? (memcpy(dest, src, count), EOK) : (memcpy_s(dest, destMax, src, count))) #define MEMSET_SM(dest, destMax, c, count)\ (!(((size_t)destMax == 0 ) || (((UINT64T)(destMax) & (UINT64T)(-2)) > SECUREC_MEM_MAX_LEN) || (NULL == (void*)dest) || ((size_t)count > (size_t)destMax)) ? (memset(dest, c, count), EOK) : ( memset_s(dest, destMax, c, count))) #endif /* WITH_PERFORMANCE_ADDONS */ /* 20150105 For software and hardware decoupling,such as UMG*/ #ifdef SECUREC_SYSAPI4VXWORKS #ifdef feof #undef feof #endif extern int feof(FILE *stream); #ifndef isspace #define isspace(c) (((c) == ' ') || ((c) == '\t') || ((c) == '\r') || ((c) == '\n')) #endif #ifndef isascii #define isascii(c) (((unsigned char)(c))<=0x7f) #endif #ifndef isupper #define isupper(c) ((c) >= 'A' && (c) <= 'Z') #endif #ifndef islower #define islower(c) ((c) >= 'a' && (c) <= 'z') #endif #ifndef isalpha #define isalpha(c) (isupper(c) || (islower(c))) #endif #ifndef isdigit #define isdigit(c) ((c) >= '0' && (c) <= '9') #endif #ifndef isxdigit #define isxupper(c) ((c) >= 'A' && (c) <= 'F') #define isxlower(c) ((c) >= 'a' && (c) <= 'f') #define isxdigit(c) (isdigit(c) || isxupper(c) ||isxlower(c)) #endif #endif #endif /*__SECURECTYPE_H__A7BBB686_AADA_451B_B9F9_44DACDAE18A7*/ UVP-Tools-2.2.0.316/uvp-monitor/securec/src/000077500000000000000000000000001314037446600203225ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-monitor/securec/src/Makefile000066400000000000000000000020111314037446600217540ustar00rootroot00000000000000PROJECT=libsecurec.so #if you need a debug version library, use "-g" instead of "-s -DNDEBUG -O2". # If you compiler report a warning on "break strict-aliasing rules", there is no problem. If you need to clear all warnings, you can add "-fno-strict-aliasing" option to your compiler, but this will impact the performance a little. CFLAG= -I ../include -Wall -s -DNDEBUG -O2 CXXFLAG= $(CFLAG) CC=gcc GCC=gcc ARCH:=$(shell getconf LONG_BIT) ifeq ($(ARCH), 64) CFLAG += -fPIC CXXFLAG += -fPIC endif SOURCES=$(wildcard *.c) OBJECTS=$(patsubst %.c,%.o,$(SOURCES)) $(PROJECT):$(OBJECTS) @mkdir -p ../obj mkdir -p ../lib $(GCC) -shared -o ../lib/$@ $(patsubst %.o,../obj/%.o,$^) $(CFLAG) ar crv libsecurec.a $(patsubst %.o,../obj/%.o,$^) ranlib libsecurec.a #cp ../lib/libsecurec.so /usr/local/lib/libsecurec.so #you may add you custom commands here @echo "finish $(PROJECT)" .c.o: @mkdir -p ../obj $(GCC) -c $< $(CFLAG) -o ../obj/$(patsubst %.c,%.o,$<) clean: rm -rf *.o ../obj ../lib $(PROJECT) libsecurec.a UVP-Tools-2.2.0.316/uvp-monitor/securec/src/Makefile.Linux64000066400000000000000000000011261314037446600232320ustar00rootroot00000000000000PROJECT=libsecurec.so CFLAG= -I ../include -Wall -m64 -s -DNDEBUG -O2 CXXFLAG= $(CFLAG) CC=gcc GCC=gcc ARCH:=$(shell getconf LONG_BIT) ifeq ($(ARCH), 64) CFLAG += -fPIC CXXFLAG += -fPIC endif SOURCES=$(wildcard *.c) OBJECTS=$(patsubst %.c,%.o,$(SOURCES)) $(PROJECT):$(OBJECTS) @mkdir -p ../obj mkdir -p ../lib $(GCC) -shared -o ../lib/$@ $(patsubst %.o,../obj/%.o,$^) $(CFLAG) cp ../lib/libsecurec.so /usr/local/lib/libsecurec.so echo "finish $(PROJECT)" .c.o: @mkdir -p ../obj $(GCC) -c $< $(CFLAG) -o ../obj/$(patsubst %.c,%.o,$<) clean: rm -rf *.o ../obj ../lib $(PROJECT)UVP-Tools-2.2.0.316/uvp-monitor/securec/src/MakefileCustom000066400000000000000000000105361314037446600231620ustar00rootroot00000000000000 ANSI_OBJS = fscanf_s.o gets_s.o memcpy_s.o memmove_s.o memset_s.o scanf_s.o securecutil.o secureinput_a.o secureprintoutput_a.o snprintf_s.o sprintf_s.o sscanf_s.o strcat_s.o strcpy_s.o strncat_s.o strncpy_s.o strtok_s.o vfscanf_s.o vscanf_s.o vsnprintf_s.o vsprintf_s.o vsscanf_s.o UNICODE_OBJS = secureinput_w.o vswscanf_s.o vwscanf_s.o fwscanf_s.o swprintf_s.o swscanf_s.o vfwscanf_s.o vswprintf_s.o wcscat_s.o wcscpy_s.o wcsncat_s.o wcsncpy_s.o wcstok_s.o wmemcpy_s.o wmemmove_s.o wscanf_s.o secureprintoutput_w.o ALL_OBJS = $(ANSI_OBJS) $(UNICODE_OBJS) TESTOBJS = $(ALL_OBJS) test.o CC = gcc FLAGS = -I ../include -Wall -DNDEBUG -s -O2 -fno-strict-aliasing ARCH:=$(shell getconf LONG_BIT) ifeq ($(ARCH), 64) FLAGS += -fPIC endif securecshare: $(ALL_OBJS) $(CC) $(FLAGS) -shared -o libsecurec.so $(ALL_OBJS) ansisecurecshare: $(ANSI_OBJS) $(CC) $(FLAGS) -shared -o libsecurec.so $(ANSI_OBJS) securecstatic: $(ALL_OBJS) ar crv libsecurec.a $(ALL_OBJS) ranlib libsecurec.a ansisecurecstatic: $(ANSI_OBJS) ar crv libsecurec.a $(ANSI_OBJS) ranlib libsecurec.a secureinput_a.o : secureinput_a.c secinput.h input.inl $(CC) $(FLAGS) -c secureinput_a.c secureinput_w.o : secureinput_w.c secinput.h input.inl $(CC) $(FLAGS) -c secureinput_w.c fscanf_s.o : fscanf_s.c securecutil.h $(CC) $(FLAGS) -c fscanf_s.c fwscanf_s.o : fwscanf_s.c securecutil.h $(CC) $(FLAGS) -c fwscanf_s.c gets_s.o : gets_s.c securecutil.h $(CC) $(FLAGS) -c gets_s.c memcpy_s.o : memcpy_s.c securecutil.h $(CC) $(FLAGS) -c memcpy_s.c memmove_s.o : memmove_s.c securecutil.h $(CC) $(FLAGS) -c memmove_s.c memset_s.o : memset_s.c securecutil.h $(CC) $(FLAGS) -c memset_s.c scanf_s.o : scanf_s.c securecutil.h $(CC) $(FLAGS) -c scanf_s.c securecutil.o : securecutil.c securecutil.h secureprintoutput.h $(CC) $(FLAGS) -c securecutil.c secureprintoutput_a.o : secureprintoutput_a.c securecutil.h secureprintoutput.h output.inl $(CC) $(FLAGS) -c secureprintoutput_a.c secureprintoutput_w.o : secureprintoutput_w.c securecutil.h secureprintoutput.h output.inl $(CC) $(FLAGS) -c secureprintoutput_w.c snprintf_s.o : snprintf_s.c securecutil.h $(CC) $(FLAGS) -c snprintf_s.c sprintf_s.o : sprintf_s.c securecutil.h $(CC) $(FLAGS) -c sprintf_s.c sscanf_s.o : sscanf_s.c securecutil.h $(CC) $(FLAGS) -c sscanf_s.c strcat_s.o : strcat_s.c securecutil.h $(CC) $(FLAGS) -c strcat_s.c strcpy_s.o : strcpy_s.c securecutil.h $(CC) $(FLAGS) -c strcpy_s.c strncat_s.o : strncat_s.c securecutil.h $(CC) $(FLAGS) -c strncat_s.c strncpy_s.o : strncpy_s.c securecutil.h $(CC) $(FLAGS) -c strncpy_s.c strtok_s.o : strtok_s.c securecutil.h $(CC) $(FLAGS) -c strtok_s.c swprintf_s.o : swprintf_s.c securecutil.h $(CC) $(FLAGS) -c swprintf_s.c swscanf_s.o : swscanf_s.c securecutil.h $(CC) $(FLAGS) -c swscanf_s.c vfscanf_s.o : vfscanf_s.c securecutil.h input.inl $(CC) $(FLAGS) -c vfscanf_s.c vfwscanf_s.o : vfwscanf_s.c securecutil.h input.inl $(CC) $(FLAGS) -c vfwscanf_s.c vscanf_s.o : vscanf_s.c securecutil.h input.inl $(CC) $(FLAGS) -c vscanf_s.c vsnprintf_s.o : vsnprintf_s.c securecutil.h $(CC) $(FLAGS) -c vsnprintf_s.c vsprintf_s.o : vsprintf_s.c securecutil.h $(CC) $(FLAGS) -c vsprintf_s.c vsscanf_s.o : vsscanf_s.c securecutil.h input.inl $(CC) $(FLAGS) -c vsscanf_s.c vswprintf_s.o : vswprintf_s.c securecutil.h $(CC) $(FLAGS) -c vswprintf_s.c vswscanf_s.o : vswscanf_s.c securecutil.h input.inl $(CC) $(FLAGS) -c vswscanf_s.c vwscanf_s.o : vwscanf_s.c securecutil.h $(CC) $(FLAGS) -c vwscanf_s.c wcscat_s.o : wcscat_s.c securecutil.h $(CC) $(FLAGS) -c wcscat_s.c wcscpy_s.o : wcscpy_s.c securecutil.h $(CC) $(FLAGS) -c wcscpy_s.c wcsncat_s.o : wcsncat_s.c securecutil.h $(CC) $(FLAGS) -c wcsncat_s.c wcsncpy_s.o : wcsncpy_s.c securecutil.h $(CC) $(FLAGS) -c wcsncpy_s.c wcstok_s.o : wcstok_s.c securecutil.h $(CC) $(FLAGS) -c wcstok_s.c wmemcpy_s.o : wmemcpy_s.c securecutil.h $(CC) $(FLAGS) -c wmemcpy_s.c wmemmove_s.o : wmemmove_s.c securecutil.h $(CC) $(FLAGS) -c wmemmove_s.c wscanf_s.o : wscanf_s.c securecutil.h $(CC) $(FLAGS) -c wscanf_s.c maketest: $(TESTOBJS) $(CC) $(FLAGS) -o tst $(TESTOBJS) test.o : test.c $(CC) $(FLAGS) -c test.c staticlink: $(TESTOBJS) $(CC) $(FLAGS) -o tst test.c -L. -lsecurec.a dynLink: gcc -g -o tst test.c -L. -l/home/l00254400/sec -lsecurec gcc -g -o tst test.c -L. -lsecurec gcc -g -o tst test.c -L. -l./securec clean: rm *.o UVP-Tools-2.2.0.316/uvp-monitor/securec/src/fscanf_s.c000066400000000000000000000037651314037446600222630ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: fscanf_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "secinput.h" /******************************************************************************* * * fscanf_s * * * int fscanf_s(FILE *stream, const char *format, ...) * * * The fscanf function reads data from the current position of stream into * the locations given by argument (if any). Each argument must be a pointer * to a variable of a type that corresponds to a type specifier in format. * format controls the interpretation of the input fields and has the same * form and function as the format argument for scanf; see scanf for a * description of format. * * * stream Pointer to FILE structure. * format Format control string, see Format Specifications. * ... Optional arguments. * * * ... The convered value stored in user assigned address * * * Each of these functions returns the number of fields successfully converted * and assigned; the return value does not include fields that were read but * not assigned. A return value of 0 indicates that no fields were assigned. * If an error occurs, or if the end of the file stream is reached before the * first conversion, the return value is EOF for fscanf and fwscanf. ******************************************************************************* */ int fscanf_s(FILE* stream, const char* format, ...) { int ret = 0; va_list arglist; va_start(arglist, format); ret = vfscanf_s(stream, format, arglist); va_end(arglist); return ret; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/fwscanf_s.c000066400000000000000000000040311314037446600224350ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: fwscanf_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "secinput.h" /******************************************************************************* * * fwscanf_s * * * int fwscanf_s (FILE* stream, const wchar_t* format, ...) * * * The fwscanf_s function reads data from the current position of stream into * the locations given by argument (if any). Each argument must be a pointer * to a variable of a type that corresponds to a type specifier in format. * format controls the interpretation of the input fields and has the same * form and function as the format argument for scanf; see scanf for a * description of format. * * * stream Pointer to FILE structure. * format Format control string, see Format Specifications. * ... Optional arguments. * * * ... The converted value stored in user assigned address * * * Each of these functions returns the number of fields successfully converted * and assigned; the return value does not include fields that were read but * not assigned. A return value of 0 indicates that no fields were assigned. * If an error occurs, or if the end of the file stream is reached before the * first conversion, the return value is EOF for fscanf and fwscanf. ******************************************************************************* */ int fwscanf_s(FILE* stream, const wchar_t* format, ...) { int ret = 0; va_list arglist; va_start(arglist, format); ret = vfwscanf_s( stream, format, arglist); va_end(arglist); return ret; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/gets_s.c000066400000000000000000000054571314037446600217650ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: gets_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" #include /******************************************************************************* * * gets_s * * * char gets_s(char* buffer, size_t numberOfElements); * * * The gets_s function reads a line from the standard input stream stdin and * stores it in buffer. The line consists of all characters up to and including * the first newline character ('\n'). gets_s then replaces the newline * character with a null character ('\0') before returning the line. * If the first character read is the end-of-file character, a null character * is stored at the beginning of buffer and NULL is returned. * * * buffer Storage location for input string. * numberOfElements The size of the buffer. * * * buffer is updated * * * buffer Successful operation * NULL Buffer == NULL, numberOfElements == 0, * numberOfElements > SECUREC_STRING_MAX_LEN, * or read fail ******************************************************************************* */ char* gets_s(char* buffer, size_t numberOfElements) { int len = 0; int i = 1; #ifdef COMPATIBLE_WIN_FORMAT if (buffer == NULL || numberOfElements == 0 || (numberOfElements > SECUREC_STRING_MAX_LEN && numberOfElements != (size_t)-1)) #else if (buffer == NULL || numberOfElements == 0 || numberOfElements > SECUREC_STRING_MAX_LEN) #endif { SECUREC_ERROR_INVALID_PARAMTER("gets_s"); return NULL; } #ifdef COMPATIBLE_WIN_FORMAT if (fgets(buffer, (int)((numberOfElements == (size_t)-1)?SECUREC_STRING_MAX_LEN:numberOfElements), stdin) == NULL ) #else if (fgets(buffer, (int)numberOfElements, stdin) == NULL ) #endif { return NULL; } else { len = (int)strlen(buffer); if (len > 0) { if (buffer[len - 1] == '\r' || buffer[len - 1] == '\n') { /* remove ending carriage return and linefeed */ while (i <= len && (buffer[len - i] == '\r' || buffer[len - i] == '\n') ) { ++i; } buffer[(len - i) + 1] = '\0'; } } return buffer; } } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/input.inl000066400000000000000000002610601314037446600221720ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: input.inl * Description: * used by secureinput_a.c and secureinput_w.c to include. This file * provides a template function for ANSI and UNICODE compiling by * different type definition. The functions of securec_input_s or * securec_winput_s provides internal implementation for scanf family * API, such as sscanf_s, fscanf_s. * History: * 1. Date: 2014/4/11 * Author: LiShunda * Modification: add "#ifdef va_copy ... #endif" wraper to variable "arglistBeenCopied", which make the meaning * of this variable more clean and can save a a variable space if va_copy macro NOT exist. * 2. Date: 2014/4/11 * Author: LiShunda * Modification: the code "if (!suppress)" at Line:1435 missing brace, which can cause logic error. Add "{ }" to * wrap the code. * 3. Date: 2014/4/22 * Author: LiShunda * Modification: Line851, change "memcpy(pointer, tmpbuf, temp)" to * memcpy_s(pointer, arrayWidth, tmpbuf, temp), which is more secure. * 4. Date: 2014/6/10 * Author: LiShunda * Modification: add ANDROID macro to detect locale.h exist, if locale.h not exist, * change (localeconv()->decimal_point) to be "." by macro DECIMAL_POINT_PTR. * 5. Date: 2014/6/13 * Author: LiShunda * Modification: remove redundant else which has empty body in line 946, 1212, 1420. * 6. Date: 2014/6/13 * Author: LiShunda * Modification: add judgement on variable dec_point in function _safecrt_fassign to avoid * warning on NOT used variable. * 7. Date: 2014/6/13 * Author: LiShunda * Modification: change the value of MB_LEN_MAX from 5 to 16. ******************************************************************************** */ #ifndef __INPUT_INL__5D13A042_DC3F_4ED9_A8D1_882811274C27 #define __INPUT_INL__5D13A042_DC3F_4ED9_A8D1_882811274C27 #ifndef _INTEGRAL_MAX_BITS #define _INTEGRAL_MAX_BITS 64 #endif /* _INTEGRAL_MAX_BITS */ #include #if !defined(SECUREC_SYSAPI4VXWORKS) #include #endif #include #include #include #define SECUREC_BUF_EXT_MUL (2) #define BUFFERED_BLOK_SIZE 1024 #if defined(ANDROID) || defined(SECUREC_SYSAPI4VXWORKS) #define DECIMAL_POINT_PTR "." #else #include /*if this file NOT exist, you can remove it*/ #define DECIMAL_POINT_PTR (localeconv()->decimal_point) #endif #if defined(SECUREC_VXWORKS_PLATFORM) && !defined(va_copy) && !defined(__va_copy) #define __va_copy(d,s) {\ size_t size_of_d = (size_t)sizeof(d);\ size_t size_of_s = (size_t)sizeof(s);\ if(size_of_d != size_of_s){\ (void)memcpy(d,s, sizeof(va_list));\ }else{\ (void)memcpy(&d, &s, sizeof(va_list));\ }\ } #endif #if (defined(_MSC_VER)) && (_MSC_VER >= 1400) #define MASK_MSVC_SECURE_CRT_WARNING __pragma(warning(push)) \ __pragma(warning(disable:4996)) #define END_MASK_MSVC_SECURE_CRT_WARNING __pragma(warning(pop)) #else #define MASK_MSVC_SECURE_CRT_WARNING #define END_MASK_MSVC_SECURE_CRT_WARNING #endif #define MUL10(x) ((((x) << 2) + (x)) << 1) #define MULTI_BYTE_MAX_LEN (6) #if !defined(UNALIGNED) #if defined(_M_IA64) || defined(_M_AMD64) #define UNALIGNED __unaligned #else #define UNALIGNED #endif #endif #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) #define MAX_64BITS_VALUE 18446744073709551615ULL #define MAX_64BITS_VALUE_DIV_TEN 1844674407370955161ULL #define MAX_64BITS_VALUE_CUT_LAST_DIGIT 18446744073709551610ULL #define MIN_64BITS_NEG_VALUE 9223372036854775808ULL #define MAX_64BITS_POS_VALUE 9223372036854775807ULL #define MIN_32BITS_NEG_VALUE 2147483648ULL #define MAX_32BITS_POS_VALUE 2147483647ULL #define MAX_32BITS_VALUE 4294967295ULL #define MAX_32BITS_VALUE_INC 4294967296ULL #define MAX_32BITS_VALUE_DIV_TEN 429496729ULL #endif #define _T(x) (x) #ifdef _UNICODE #define _TEOF WEOF #ifdef MB_LEN_MAX #undef MB_LEN_MAX #endif #define MB_LEN_MAX 16 /* max. # bytes in multibyte char */ #else /*None unicode*/ #define _TEOF EOF #endif #if defined (UNICODE) #define ALLOC_TABLE 1 #else /* defined (UNICODE) */ #define ALLOC_TABLE 0 #endif /* defined (UNICODE) */ #define LEFT_BRACKET ('[' | ('a' - 'A')) /* 'lowercase' version */ static _TINT _hextodec(_TCHAR);/*lint !e10*/ #define INC() (++charCount, _inc(stream)) #define UN_INC(chr) (--charCount, _un_inc(chr, stream)) #define EAT_WHITE() _whiteout(&charCount, stream) static _TINT _inc(SEC_FILE_STREAM*);/*lint !e31 !e10*/ static void _un_inc(_TINT, SEC_FILE_STREAM*);/*lint !e528 !e110 !e10*/ static _TINT _whiteout(int*, SEC_FILE_STREAM*);/*lint !e31 !e10*/ #if defined(_IS_DIGIT) #undef _IS_DIGIT #endif #if defined(_IS_XDIGIT) #undef _IS_XDIGIT #endif #if defined(UNICODE) || defined(_UNICODE) #define _IS_DIGIT(chr) (!((chr) & 0xff00) && isdigit( ((chr) & 0x00ff) )) #define _IS_XDIGIT(chr) (!((chr) & 0xff00) && isxdigit( ((chr) & 0x00ff))) #else /* ANSI */ #define _IS_DIGIT(chr) isdigit((UINT8T)(chr)) #define _IS_XDIGIT(chr) isxdigit((UINT8T)(chr)) #endif /* _UNICODE */ /* 1 means long long is same as int64, * 0 means long long is same as long */ #define LONGLONG_IS_INT64 1 static void _safecrt_fassign(int flag, char* argument, char* number, int dec_point) { char* endptr; double d; #if defined(SECUREC_SUPPORT_STRTOLD) long double d2; #endif if (dec_point != '.') { return; /*don't support multi-language decimal point*/ } if ((NULL != argument) && (NULL != number)) { #if defined(SECUREC_SUPPORT_STRTOLD) if(flag == 2) { d2 = strtold(number, &endptr); *(long double UNALIGNED*)argument = (long double)d2; /*lint !e826*/ return; } #endif d = strtod(number, &endptr); if (flag > 0) { *(double UNALIGNED*)argument = (double)d; /*lint !e826*/ } else { *(float UNALIGNED*)argument = (float)d; /*lint !e826*/ } } } /****************************************************************************** * int __check_float_string(size_t,size_t *, _TCHAR**, _TCHAR*, int*) * * Purpose: * Check if there is enough space insert one more character in the given * block, if not then allocate more memory. * * Return: * 0 if more memory needed and the reallocation failed. * ******************************************************************************* */ static int __check_float_string(size_t nFloatStrUsed, size_t* pnFloatStrSz, _TCHAR** pFloatStr,/*lint !e40 !e601 */ _TCHAR* floatstring,/*lint !e40 !e601 */ int* pmalloc_FloatStrFlag) { void* tmpPointer; if (nFloatStrUsed == (*pnFloatStrSz)) { if ((*pFloatStr) == floatstring) { size_t oriBufSize = (*pnFloatStrSz) * SECUREC_BUF_EXT_MUL * sizeof(_TCHAR); if (NULL == ((*pFloatStr ) = (_TCHAR*)SECC_MALLOC(oriBufSize)))/*lint !e586 */ { return 0; } (void)memset_s((*pFloatStr), oriBufSize, 0, oriBufSize); (*pmalloc_FloatStrFlag) = 1; (void)memcpy_s((*pFloatStr), oriBufSize, floatstring, (*pnFloatStrSz) * sizeof(_TCHAR));/*lint !e40 */ (*pnFloatStrSz) *= SECUREC_BUF_EXT_MUL; } else { /* LSD 2014.3.6 fix, replace realloc to malloc to avoid heap injection */ size_t oriBufSize = (*pnFloatStrSz) * sizeof(_TCHAR);/*lint !e40 */ size_t nextSize = oriBufSize * SECUREC_BUF_EXT_MUL; if (nextSize > SECUREC_MEM_MAX_LEN)/*lint !e40 !e574 */ { return 0; } tmpPointer = (void*)SECC_MALLOC(nextSize);/*lint !e586 */ if (tmpPointer == NULL) { return 0; } (void)memcpy_s(tmpPointer, nextSize, (*pFloatStr), oriBufSize ); (void)memset_s((*pFloatStr), oriBufSize, 0 , oriBufSize);/*lint !e1055*/ SECC_FREE( (*pFloatStr) );/*lint !e586 */ (*pFloatStr) = (_TCHAR*)(tmpPointer);/*lint !e40 !e26 !e10 */ (*pnFloatStrSz) *= SECUREC_BUF_EXT_MUL; } } return 1; } #ifndef _UNICODE /*LSD only multi-bytes string need isleadbyte() function*/ #if !(defined(_WIN32) || defined(_INC_WCTYPE)) /*#define isleadbyte(c) ( (c) & 0x80 )*/ static int isleadbyte(UINT8T c) { /*lint !e401*/ return (c & 0x80 ); } #endif #endif #define ASCII (32) /* # of bytes needed to hold 256 bits */ #ifndef _CVTBUFSIZE #define _CVTBUFSIZE (309+40) /* # of digits in max. dp value + slop */ #endif #ifndef _UNICODE #define TABLESIZE ASCII #else /* _UNICODE */ #define TABLESIZE (ASCII * 256) #endif /* _UNICODE */ /******************************************************************************* * int securec_input_s(stream, format, arglist) * Entry: * SEC_FILE_STREAM *stream - struct pointer which contains needed info to read from * char *format - format string controlling the data to read * arglist - list of pointer to data items * * Exit: * returns number of items assigned and fills in data items ******************************************************************************** */ #ifdef _UNICODE #define _tfgetc fgetwc #define _tCharMask 0xffff #else #define _tfgetc fgetc #define _tCharMask 0xff #endif /* LSD 2014 1 24 add to protect NULL pointer access */ #define CHECK_INPUT_ADDR(p) if (!p) { paraIsNull = 1; goto error_return; } #ifdef _UNICODE void clearwDestBuf (const wchar_t* buffer,const wchar_t* cformat, va_list arglist) #else void clearDestBuf(const char* buffer,const char* cformat, va_list arglist) #endif { const _TUCHAR* fmt = ( _TUCHAR*)cformat; void *pDestBuf = NULL; va_list arglistsave; /*lint !e530*/ /* backup for arglist value, this variable don't need initialized */ size_t bufSize = 0; int spec = 0; signed char wideChar = 0; char doneFlag = 0; if (fmt != NULL)/*lint !e944*/ { while (*fmt) /*lint !e944*/ { if (_T('%') == *fmt) { doneFlag = 0; wideChar = 0; while (!doneFlag) { spec = *(++fmt); /* coverity[returned_null] */ if (_IS_DIGIT((_TUCHAR)spec)) /*lint !e48 !e409*/ { continue; } else if(_T('h') == spec) { --wideChar; continue; } else if(_T('l') == spec || _T('w') == spec) { ++wideChar; continue; } ++doneFlag; } /*if no l or h flag */ if (!wideChar) { if ((*fmt == _T('S')) || (*fmt == _T('C'))) #if defined(_UNICODE) && (defined(COMPATIBLE_WIN_FORMAT)) { --wideChar; } else { ++wideChar; } #else /* _UNICODE */ { ++wideChar; } else { --wideChar; } #endif /* _UNICODE */ } spec = *fmt | (_T('a') - _T('A')); if (!(spec == _T('c') || spec == _T('s') || spec == LEFT_BRACKET)) { return;/*first argument is not a string type*/ } if ((buffer !=NULL) && (*buffer != _T('\0')) && (spec != _T('s'))) { /*when buffer not empty just clear %s. example sscanf(" \n","%s",str,sizeof(str)) */ return; } if( LEFT_BRACKET == spec) { #if !(defined(COMPATIBLE_WIN_FORMAT)) if (_T('{') == *fmt) { return; /*lint !e801*/ } #endif ++fmt; if (_T('^') == *fmt) { ++fmt; } if (_T(']') == *fmt) { ++fmt; } /*2014 3.14 LSD add _T('\0') != *scanptr*/ while ((_T('\0') != *fmt) && (_T(']') != *fmt)) { ++fmt; } if (!*fmt) { return; /* trunc'd format string */ /*lint !e801*/ } } #if defined(va_copy) va_copy(arglistsave, arglist); /*lint !e530*/ #elif defined(__va_copy) /*for vxworks*/ __va_copy(arglistsave, arglist); #else arglistsave = arglist; #endif pDestBuf = (void*)va_arg(arglistsave, void*); /*lint !e826 !e64 !e10*/ /* Get the next argument - size of the array in characters */ bufSize = ((size_t)(va_arg(arglistsave, size_t))) & 0xFFFFFFFFUL; ; /*lint !e826 !e48 !e516 !e530 !e78*/ va_end(arglistsave); if(0 == bufSize || bufSize > SECUREC_STRING_MAX_LEN || !pDestBuf) { return; /*lint !e801*/ } *(char*)pDestBuf = '\0'; if (wideChar > 0 && bufSize >= sizeof(wchar_t)) { *(wchar_t UNALIGNED*)pDestBuf = L'\0'; } return;/*lint !e801*/ } ++fmt; /* skip to next char */ } } return; } #ifdef _UNICODE int securec_winput_s (SEC_FILE_STREAM* stream, const wchar_t* cformat, va_list arglist) #else int securec_input_s (SEC_FILE_STREAM* stream, const char* cformat, va_list arglist)/*lint !e40 !e10*/ #endif {/*lint !e10*/ unsigned long number = 0; _TCHAR floatstring[_CVTBUFSIZE + 1]; _TCHAR* pFloatStr = floatstring; const _TUCHAR* format = ( _TUCHAR*)cformat; size_t arrayWidth = 0; size_t nFloatStrUsed = 0; size_t nFloatStrSz = sizeof(floatstring) / sizeof(floatstring[0]); int malloc_FloatStrFlag = 0; #if ALLOC_TABLE char* table = NULL; int malloc_flag = 0; /*flag for whether "table" is allocated on the heap */ #else /* ALLOC_TABLE */ char AsciiTable[TABLESIZE]; char* table = AsciiTable; #endif /* ALLOC_TABLE */ #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) int longbits = sizeof(long) << 3; #endif #if _INTEGRAL_MAX_BITS >= 64 UINT64T num64 = 0; #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) int beyondMax = 0; UINT64T num64as = 0; #endif #endif void* pointer = NULL; /* points to receiving data addr */ void* start; _TUCHAR* scanptr; /* for building "table" data */ _TINT ch = 0; int charCount = 0; /* total number of chars read */ int comChr = 0; /* holds designator type */ #if defined(COMPATIBLE_LINUX_FORMAT) int oricomChr = 0; #endif int count = 0; int started = 0; int width = 0; /* width of field */ int widthSet = 0; /* user has specified width */ int errNoMem = 0; int formatError = 0; /* int is64Bits = sizeof(int*) == 8; LSD add */ int paraIsNull = 0; int longone; /* LSD change to 0 = SHORT, 1 = int, > 1 long or L_DOUBLE */ #if _INTEGRAL_MAX_BITS >= 64 int integer64 = 0; /* 1 for 64-bit integer, 0 otherwise */ #endif va_list arglistsave; /*lint !e530*/ /* backup for arglist value, this variable don't need initialized */ #if defined(va_copy) || defined(__va_copy) int arglistBeenCopied = 0; #endif #ifndef _UNICODE wchar_t wctemp = L'\0'; #endif /* _UNICODE */ _TCHAR decimal; _TUCHAR rngch; _TUCHAR last; _TUCHAR prevChar; _TCHAR tch; char fl_wchar_arg; /* flags wide char/string argument */ signed char wideChar; char reject; char negative; char suppress; char match; char doneFlag; /* general purpose loop monitor */ count = charCount = match = 0; (void)memset(&arglistsave, 0, sizeof(arglistsave));/*lint !e545*/ while (format != NULL && *format) /*lint !e944*/ { /* coverity[returned_null] */ if (isspace((_TUCHAR)*format)) /*lint !e48 !e409*/ { UN_INC(EAT_WHITE()); /* put first non-space char back */ do { tch = (_TCHAR)*(++format); } /* coverity[returned_null] */ while (isspace((_TUCHAR)tch)); /*lint !e48 !e409*/ continue; } if (_T('%') == *format) { number = 0; prevChar = 0; width = widthSet = started = 0; arrayWidth = 0; errNoMem = 0; fl_wchar_arg = 0; doneFlag = 0; suppress = 0; negative = 0; reject = 0; wideChar = 0; longone = 1; #if _INTEGRAL_MAX_BITS >= 64 #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) beyondMax = 0; #endif integer64 = 0; num64 = 0; #endif while (!doneFlag) { comChr = *(++format); /* coverity[returned_null] */ if (_IS_DIGIT((_TUCHAR)comChr)) /*lint !e48 !e409*/ { ++widthSet; width = (int)MUL10(width) + (comChr - _T('0')); /*lint !e701*/ } else { switch (comChr) { case _T('F') : case _T('N') : break; /* NEAR is default in small model */ case _T('h') : /* set longone to 0 */ --longone; --wideChar; /* set wideChar = -1 */ break; #if _INTEGRAL_MAX_BITS >= 64 case _T('I'): if ( (*(format + 1) == _T('6')) && (*(format + 2) == _T('4')) ) { format += 2; ++integer64; break; } else if ( (*(format + 1) == _T('3')) && (*(format + 2) == _T('2')) ) { format += 2; break; } else if ( (*(format + 1) == _T('d')) || (*(format + 1) == _T('i')) || (*(format + 1) == _T('o')) || (*(format + 1) == _T('x')) || (*(format + 1) == _T('X')) ) { /*lint -e506*/ if (sizeof(void*) == sizeof(INT64T)) /*lint !e774*/ { ++integer64; } break; } /*lint -e506*/ if (sizeof(void*) == sizeof(INT64T)) /*lint !e774*/ { ++integer64; } goto DEFAULT_LABEL; /*lint !e801*/ #endif /* _INTEGRAL_MAX_BITS >= 64 */ #ifdef COMPATIBLE_LINUX_FORMAT case _T('j') : longone += 2; ++integer64; break; case _T('t') : #endif case _T('z') : ++longone; #ifdef SECUREC_ON_64BITS ++longone; ++integer64; #endif break; case _T('L') : case _T('q') : longone += 2; #if LONGLONG_IS_INT64 ++integer64; #endif break; case _T('l') : if (*(format + 1) == _T('l')) { ++format; #if LONGLONG_IS_INT64 /*LSD change from #ifdef to #if*/ ++integer64; longone += 2; break; #else /* LONGLONG_IS_INT64 */ ++longone; /* NOBREAK */ #endif /* LONGLONG_IS_INT64 */ } else { ++longone; #ifdef SECUREC_ON_64BITS #if !(defined(COMPATIBLE_WIN_FORMAT)) /*on window 64 system sizeof long is 32bit */ ++integer64; #endif #endif /* NOBREAK */ } /*lint -fallthrough*/ case _T('w') : ++wideChar; /* set wideChar = 1 */ break; case _T('*') : ++suppress; break; default: DEFAULT_LABEL: ++doneFlag; break; }/* end of switch(comChr) ... */ } } if (!suppress) { /* LSD change, for gcc compile arglistsave = arglist; */ #if defined(va_copy) va_copy(arglistsave, arglist); /*lint !e530 !e644*/ #elif defined(__va_copy) /*for vxworks*/ __va_copy(arglistsave, arglist); #else arglistsave = arglist; #endif pointer = va_arg(arglist, void*); /*lint !e826 !e64 !e10*/ CHECK_INPUT_ADDR(pointer); /*lint !e801*/ } else { /*"pointer = NULL" is safe, in supress mode we don't use pointer to store data*/ pointer = NULL; /* doesn't matter what value we use here - we're only using it as a flag*/ } doneFlag = 0; if (!wideChar) { /* use case if not explicitly specified */ if ((*format == _T('S')) || (*format == _T('C'))) #if defined(_UNICODE) && (defined(COMPATIBLE_WIN_FORMAT)) { --wideChar;} else { ++wideChar; } #else /* _UNICODE */ { ++wideChar;} else { --wideChar; } #endif /* _UNICODE */ } comChr = *format | (_T('a') - _T('A')); if (_T('n') != comChr) { if (_T('c') != comChr && LEFT_BRACKET != comChr) { ch = EAT_WHITE(); } else { ch = INC(); } } if (_T('n') != comChr) { if (_TEOF == ch) { goto error_return; /*lint !e801*/ } } if (!widthSet || width) { if (!suppress && (comChr == _T('c') || comChr == _T('s') || comChr == LEFT_BRACKET)) { #if defined(va_copy) va_copy(arglist, arglistsave); va_end(arglistsave); arglistBeenCopied = 1; #elif defined(__va_copy) /*for vxworks*/ __va_copy(arglist, arglistsave); va_end(arglistsave); arglistBeenCopied = 1; #else arglist = arglistsave; #endif pointer = va_arg(arglist, void*); /*lint !e826 !e64 !e10*/ CHECK_INPUT_ADDR(pointer); /*lint !e801*/ #if defined(va_copy) va_copy(arglistsave, arglist); #elif defined(__va_copy) /*for vxworks*/ __va_copy(arglistsave, arglist); #else arglistsave = arglist; #endif /* Get the next argument - size of the array in characters */ #ifdef SECUREC_ON_64BITS arrayWidth = ((size_t)(va_arg(arglist, size_t))) & 0xFFFFFFFFUL; #else /* !SECUREC_ON_64BITS */ arrayWidth = va_arg(arglist, size_t); /*lint !e826 !e48 !e516 !e530 !e78*/ #endif if (arrayWidth < 1) { if (wideChar > 0) { *(wchar_t UNALIGNED*)pointer = L'\0'; } else { *(char*)pointer = '\0'; } goto error_return; /*lint !e801*/ } /*LSD add string maxi width protection*/ if (wideChar > 0 ) { if (arrayWidth > SECUREC_WCHAR_STRING_MAX_LEN) { goto error_return; /*lint !e801*/ } } else { /*for char* buffer*/ if ( arrayWidth > SECUREC_STRING_MAX_LEN) { goto error_return; /*lint !e801*/ } } } #if defined(COMPATIBLE_LINUX_FORMAT) oricomChr = comChr; #endif switch (comChr) { case _T('c'): /* case _T('C'): */ if (!widthSet) { ++widthSet; ++width; } if (wideChar > 0) { fl_wchar_arg++; } goto scanit; /*lint !e801*/ case _T('s'): /* case _T('S'): */ if (wideChar > 0) { fl_wchar_arg++; } goto scanit; /*lint !e801*/ case LEFT_BRACKET : /* scanset */ #if !(defined(COMPATIBLE_WIN_FORMAT)) if (_T('{') == *format) { goto error_return; /*lint !e801*/ } #endif if (wideChar > 0) { fl_wchar_arg++; } scanptr = (_TUCHAR*)(++format); if (_T('^') == *scanptr) { ++scanptr; --reject; /* set reject to 255 */ } /* Allocate "table" on first %[] spec */ #if ALLOC_TABLE if (table == NULL) { /*LSD the table will be freed after error_return label of this function */ table = (char*)SECC_MALLOC(TABLESIZE); if ( table == NULL) { goto error_return; /*lint !e801*/ } malloc_flag = 1; } #endif /* ALLOC_TABLE */ (void)memset_s(table, TABLESIZE, 0, TABLESIZE); if (LEFT_BRACKET == comChr) /*lint !e774*/ { if (_T(']') == *scanptr) { prevChar = _T(']'); ++scanptr; table[ _T(']') >> 3] = 1 << (_T(']') & 7); } } /*2014 3.14 LSD add _T('\0') != *scanptr*/ while (_T('\0') != *scanptr && _T(']') != *scanptr) { rngch = *scanptr++; if (_T('-') != rngch || !prevChar || /* first char */ _T(']') == *scanptr) /* last char */ { table[(prevChar = rngch) >> 3] |= 1 << (rngch & 7); /*lint !e734 !e702*/ } else { /* handle a-z type set */ rngch = *scanptr++; /* get end of range */ if (prevChar < rngch) /* %[a-z] */ { last = rngch; } else { #if (defined(COMPATIBLE_WIN_FORMAT)) /* %[z-a] */ last = prevChar; prevChar = rngch; #else table[ '-' >> 3] |= 1 << ('-' & 7); /*lint !e734 !e702*/ table[(prevChar = rngch) >> 3] |= 1 << (rngch & 7); /*lint !e734 !e702*/ continue; #endif } for (rngch = prevChar; rngch <= last; ++rngch) { table[rngch >> 3] |= 1 << (rngch & 7); /*lint !e734 !e702*/ } prevChar = 0; } } if (!*scanptr) { if (arrayWidth >= sizeof(_TCHAR) && pointer) /*LSD add "&& pointer"*/ { *(_TCHAR*)pointer = _T('\0'); /*lint !e613*/ } goto error_return; /* trunc'd format string */ /*lint !e801*/ } /* scanset completed. Now read string */ if (LEFT_BRACKET == comChr) /*lint !e774*/ { format = scanptr; } scanit: start = pointer; UN_INC(ch); /* One element is needed for '\0' for %s & %[ */ if (comChr != _T('c')) { --arrayWidth; } while ( !widthSet || width-- ) { ch = INC(); if ( (_TEOF != ch) && /* char conditions*/ ( ( comChr == _T('c')) || /* string conditions !isspace()*/ ( ( comChr == _T('s') && (!(ch >= _T('\t') && ch <= _T('\r')) && ch != _T(' ')))) || /* BRACKET conditions*/ ( (comChr == LEFT_BRACKET) && table && /*LSD add && table is NOT NULL condition*/ ((table[(size_t)ch >> 3] ^ reject) & (1 << (ch & 7))) ) ) ) { if (!suppress) { if (!arrayWidth) { /* We have exhausted the user's buffer */ errNoMem = 1; break; } CHECK_INPUT_ADDR(pointer); /*lint !e801*/ #ifndef _UNICODE if (fl_wchar_arg) { char temp[MULTI_BYTE_MAX_LEN]; int di = 1; #if !defined(COMPATIBLE_WIN_FORMAT) int convRes = 0; #endif wctemp = L'?'; /* set default char as ?*/ (void)memset(temp, 0, MULTI_BYTE_MAX_LEN); temp[0] = (char) ch; if ( isleadbyte((UINT8T)ch)) { #if defined(COMPATIBLE_WIN_FORMAT) temp[1] = (char) INC(); #else /*in Linux like system, the string is encoded in UTF-8*/ while (di < (int)MB_CUR_MAX) /*lint !e681*/ { temp[di++] = (char) INC(); if ( (convRes = mbtowc(&wctemp, temp, (size_t)MB_CUR_MAX)) > 0 ) { break; /*convert succeed*/ } } #endif } if (di == 1) { /*for windows system*/ if (mbtowc(&wctemp, temp, (size_t)MB_CUR_MAX) <= 0 ) /*no string termination error for Fortify*/ { wctemp = L'?'; } } #if !(defined(COMPATIBLE_WIN_FORMAT)) else /*di > 1*/ { if (convRes <= 0 ) { wctemp = L'?'; } } #endif *(wchar_t UNALIGNED*)pointer = wctemp; /* just copy L'?' if mbtowc fails, errno is set by mbtowc */ pointer = (wchar_t*)pointer + 1; --arrayWidth; } else #else /* _UNICODE */ if (fl_wchar_arg) { *(wchar_t UNALIGNED*)pointer = (wchar_t)ch; pointer = (wchar_t*)pointer + 1; --arrayWidth; } else #endif /* _UNICODE */ { #ifndef _UNICODE *(char*)pointer = (char)ch; pointer = (char*)pointer + 1; --arrayWidth; #else /* _UNICODE */ int temp = 0; /* convert wide to multibyte */ if (arrayWidth >= ((size_t)MB_CUR_MAX)) { MASK_MSVC_SECURE_CRT_WARNING temp = wctomb((char*)pointer, (wchar_t)ch); END_MASK_MSVC_SECURE_CRT_WARNING } else { char tmpbuf[MB_LEN_MAX]; MASK_MSVC_SECURE_CRT_WARNING temp = wctomb(tmpbuf, (wchar_t)ch); END_MASK_MSVC_SECURE_CRT_WARNING if (temp > 0 && ((size_t)temp) > arrayWidth) { errNoMem = 1; break; } if (temp > 0) { (void)memcpy_s(pointer, arrayWidth, tmpbuf, (size_t)temp); } } if (temp > 0) { /* do nothing if wctomb fails*/ pointer = (char*)pointer + temp; arrayWidth -= temp; /*lint !e737*/ } #endif /* _UNICODE */ } } /* suppress */ else { /* just indicate a match */ start = (_TCHAR*)start + 1; /*lint !e613 */ } } else { UN_INC(ch); break; } } if (errNoMem) { /* In case of error, blank out the input buffer */ if (fl_wchar_arg) { if (start ) { *(wchar_t UNALIGNED*)start = 0; } } else { if (start ) { *(char*)start = 0; /*LSD add if (start )*/ } } goto error_return; /*lint !e801*/ } if (start != pointer) { if (!suppress) { ++count; CHECK_INPUT_ADDR(pointer); /*lint !e801*/ if ('c' != comChr) { /* null-terminate strings */ if (fl_wchar_arg) { *(wchar_t UNALIGNED*)pointer = L'\0'; } else { *(char*)pointer = '\0'; } } } /*remove blank else*/ /*NULL*/ } else { goto error_return; /*lint !e801*/ } break; case _T('i') : /* could be d, o, or x */ comChr = _T('d'); /* use as default */ /*lint -fallthrough*/ case _T('x'): if (_T('-') == ch) { ++negative; goto x_incwidth; /*lint !e801*/ } else if (_T('+') == ch) { x_incwidth: if (!--width && widthSet) { ++doneFlag; } else { ch = INC(); } } if (_T('0') == ch) { if (_T('x') == (_TCHAR)(ch = INC()) || _T('X') == (_TCHAR)ch) { ch = INC(); if (widthSet) { width -= 2; if (width < 1) { ++doneFlag; } } comChr = _T('x'); } else { ++started; if (_T('x') != comChr) { if (widthSet && !--width) { ++doneFlag; } comChr = _T('o'); } else { UN_INC(ch); ch = _T('0'); } } } goto getnum; /*lint !e801*/ /* NOTREACHED */ /*lint -fallthrough*/ case _T('p') : /* force %hp to be treated as %p */ longone = 1; #ifdef SECUREC_ON_64BITS /* force %p to be 64 bit in 64bits system */ ++integer64; num64 = 0; #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) beyondMax = 0; #endif #endif /*lint -fallthrough*/ case _T('o') : case _T('u') : case _T('d') : if (_T('-') == ch) { ++negative; goto d_incwidth; /*lint !e801*/ } else if (_T('+') == ch) { d_incwidth: if (!--width && widthSet) { ++doneFlag; } else { ch = INC(); } } getnum: #if _INTEGRAL_MAX_BITS >= 64 if ( integer64 ) { while (!doneFlag) { if (_T('x') == comChr || _T('p') == comChr) /* coverity[returned_null] */ if (_IS_XDIGIT(ch)) /*lint !e48 !e409*/ { #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) if((num64 >> 60) > 0) beyondMax = 1; num64as = 16; #endif num64 <<= 4; /*coverity[negative_return_fn]*/ /* coverity[negative_returns] */ ch = _hextodec((_TCHAR)ch); } else { ++doneFlag; } /* coverity[returned_null] */ else if (_IS_DIGIT(ch)) /*lint !e48 !e409*/ if (_T('o') == comChr) if (_T('8') > ch) { #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) if((num64 >> 61) > 0) beyondMax = 1; num64as = 8; #endif num64 <<= 3; } else { ++doneFlag; } else /* _T('d') == comChr */ { #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) if (num64 > MAX_64BITS_VALUE_DIV_TEN) { beyondMax = 1; } #endif num64 = MUL10(num64); #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) if (num64 == MAX_64BITS_VALUE_CUT_LAST_DIGIT) { num64as = MAX_64BITS_VALUE - num64; if (num64as < (UINT64T)(ch - _T('0'))) /*lint !e574 !e571*/ { beyondMax = 1; } } #endif } else { ++doneFlag; } if (!doneFlag) { ++started; num64 += ch - _T('0'); /*lint !e737*/ if (widthSet && !--width) { ++doneFlag; } else { ch = INC(); } } else { UN_INC(ch); } } /* end of WHILE loop */ if (negative) #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) { if ((_T('d') == oricomChr) || (_T('i') == oricomChr)) { if (num64 > MIN_64BITS_NEG_VALUE) { num64 = MIN_64BITS_NEG_VALUE; } else { num64 = (UINT64T )(-(INT64T)num64); } if(beyondMax) { num64 = MIN_64BITS_NEG_VALUE; } } else /* o, u, x, X, p */ { num64 = (UINT64T )(-(INT64T)num64); if (beyondMax) { num64 = MAX_64BITS_VALUE; } } } else { if ((_T('d') == oricomChr) || (_T('i') == oricomChr)) { if (num64 > MAX_64BITS_POS_VALUE) { num64 = MAX_64BITS_POS_VALUE; } if (beyondMax) { num64 = MAX_64BITS_POS_VALUE; } } else { if (beyondMax) { num64 = MAX_64BITS_VALUE; } } } #else { #if defined(__hpux) if (_T('p') != oricomChr) #endif num64 = (UINT64T )(-(INT64T)num64); } #endif } else { #endif /* _INTEGRAL_MAX_BITS >= 64 */ #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) unsigned long decimalEdge = MAX_32BITS_VALUE_DIV_TEN; #endif while (!doneFlag) { if (_T('x') == comChr || _T('p') == comChr) { if (_IS_XDIGIT(ch)) /*lint !e48 !e409*/ { #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) if((number >> (longbits - 4)) > 0) { beyondMax = 1; } #endif number = (number << 4); ch = _hextodec((_TCHAR)ch); } else { ++doneFlag; } } else if (_IS_DIGIT(ch)) /*lint !e48 !e409*/ { if (_T('o') == comChr) { if (_T('8') > ch) { #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) if((number >> (longbits - 3)) > 0) { beyondMax = 1; } #endif number = (number << 3); } else { ++doneFlag; } } else /* _T('d') == comChr */ { #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) #ifdef SECUREC_ON_64BITS if (longbits == 64) { decimalEdge = (unsigned long)MAX_64BITS_VALUE_DIV_TEN; } #else if (longbits == 32) { decimalEdge = MAX_32BITS_VALUE_DIV_TEN; } #endif if (number > decimalEdge) { beyondMax = 1; } #endif number = MUL10(number); #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) if (number == MUL10(decimalEdge)) { num64as = (unsigned long)MAX_64BITS_VALUE - number; if (num64as < (UINT64T)(ch - _T('0'))) /*lint !e574 !e571*/ { beyondMax = 1; } } #endif } } else { ++doneFlag; } if (!doneFlag) { ++started; number += ch - _T('0'); /*lint !e737*/ if (widthSet && !--width) { ++doneFlag; } else { ch = INC(); } } else { UN_INC(ch); } } /* end of WHILE loop */ if (negative) #if (defined(COMPATIBLE_LINUX_FORMAT) && !(defined(__UNIX))) { if(1 == longone) { if ((_T('d') == oricomChr) || (_T('i') == oricomChr)) { #ifdef SECUREC_ON_64BITS if (longbits == 64) { if ((number > MIN_64BITS_NEG_VALUE)) { number = 0; } else { number = (unsigned int)(-(int)number); } } #else if (longbits == 32) { if ((number > MIN_32BITS_NEG_VALUE)) { number = MIN_32BITS_NEG_VALUE; } else { number = (unsigned int)(-(int)number); } } #endif if (beyondMax) { #ifdef SECUREC_ON_64BITS if (64 == longbits) { number = 0; } #else if (32 == longbits) { number = MIN_32BITS_NEG_VALUE; } #endif } } else /* o, u, x, X ,p */ { #ifdef SECUREC_ON_64BITS if (number > MAX_32BITS_VALUE_INC) { number = MAX_32BITS_VALUE; } else #endif { number = (unsigned int)(-(int)number); } if (beyondMax) { number |= (unsigned long)0xffffffffffffffffULL; } } } else { if ((_T('d') == oricomChr) || (_T('i') == oricomChr)) { if (number > (unsigned long)(1ULL << (longbits - 1))) { number = (unsigned long)(1ULL << (longbits - 1)); } else { number = (unsigned long)(-(long)number); } } else { number = (unsigned long)(-(long)number); if (beyondMax) { number |= (unsigned long)0xffffffffffffffffULL; } } } if ((_T('d') == oricomChr) || (_T('i') == oricomChr)) { if (((beyondMax) && (-1 == longone)) || ((beyondMax) && ( 0 == longone)) || ((beyondMax) && ( 1 == longone) && (64 == longbits))) { number = 0; } if ((beyondMax) && (2 == longone)) { number = ((unsigned long)(1UL << (longbits - 1))); } } else /* o, u, x, X, p */ { if (beyondMax) { number |= (unsigned long)0xffffffffffffffffULL; } } } else { if(1 == longone) { if ((_T('d') == oricomChr) || (_T('i') == oricomChr)) { #ifdef SECUREC_ON_64BITS if (longbits == 64) { if (number > MAX_64BITS_POS_VALUE) { number |= (unsigned long)0xffffffffffffffffULL; } } if ((beyondMax) && (64 == longbits)) { number |= (unsigned long)0xffffffffffffffffULL; } #else if (longbits == 32) { if (number > MAX_32BITS_POS_VALUE) { number = MAX_32BITS_POS_VALUE; } } if ((beyondMax) && (32 == longbits)) { number = MAX_32BITS_POS_VALUE; } #endif } else /* o,u,x,X,p */ { if(beyondMax) { number = MAX_32BITS_VALUE; } } } else { if ((_T('d') == oricomChr) || (_T('i') == oricomChr) ) { if (number > ((unsigned long)(1UL << (longbits - 1)) - 1)) { number = ((unsigned long)(1UL << (longbits - 1)) - 1); } if(((beyondMax) && (-1 == longone)) || ((beyondMax) && ( 0 == longone)) || ((beyondMax) && ( 1 == longone) && (64 == longbits))) { number |= (unsigned long)0xffffffffffffffffULL; } if((beyondMax) && (2 == longone)) { number = ((unsigned long)(1UL << (longbits - 1)) - 1); } } else { if(beyondMax) { number |= (unsigned long)0xffffffffffffffffULL; } } } } #else { #if defined(__hpux) if (_T('p') != oricomChr) #endif number = (unsigned long)(-(long)number); } #endif #if _INTEGRAL_MAX_BITS >= 64 } #endif /* _INTEGRAL_MAX_BITS >= 64 */ if (_T('F') == comChr) /* expected ':' in long pointer */ { started = 0; } if (started) { if (!suppress) { ++count; assign_num: CHECK_INPUT_ADDR(pointer); /*lint !e801*/ #if _INTEGRAL_MAX_BITS >= 64 if ( integer64 ) { #if defined(SECUREC_VXWORKS_PLATFORM) *(INT64T UNALIGNED*)pointer = *(UINT64T*)(&num64); /*lint !e713, take num64 as unsigned number*/ #else *(INT64T UNALIGNED*)pointer = (UINT64T)num64; /*lint !e713, take num64 as unsigned number*/ #endif } else #endif { if (longone > 1) { *(long UNALIGNED*)pointer = (unsigned long)number; /*lint !e713, take number as unsigned number*/ } else if (longone == 1) { *(int UNALIGNED*)pointer = (int)number; } else if (longone == 0) { *(short UNALIGNED*)pointer = (unsigned short)number; /*lint !e713, take number as unsigned number*/ } else /* < 0 for hh format modifier*/ { *(char UNALIGNED*)pointer = (unsigned char)number; /*lint !e713, take number as unsigned number*/ } } } /* remove blank else*/ /*NULL*/ } else { goto error_return; /*lint !e801*/ } break; case _T('n') : /* char count*/ number = charCount; /*lint !e732*/ if (!suppress) { goto assign_num; /*lint !e801*/ } break; case _T('e') : /* case _T('E') : */ case _T('f') : case _T('g') : /* scan a float */ /* case _T('G') : */ nFloatStrUsed = 0; if (_T('-') == ch) { pFloatStr[nFloatStrUsed++] = _T('-'); goto f_incwidth; /*lint !e801*/ } else if (_T('+') == ch) { f_incwidth: --width; ch = INC(); } if (!widthSet) /* must care width */ { width = -1; } /* now get integral part */ /* coverity[returned_null] */ while (_IS_DIGIT(ch) && width--) /*lint !e48 !e409*/ { ++started; pFloatStr[nFloatStrUsed++] = (_TCHAR)ch; /* ch must be '0' - '9'*/ if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag ) == 0) { goto error_return; /*lint !e801*/ } ch = INC(); } #ifdef _UNICODE /* convert decimal point(.) to wide-char */ decimal = L'.'; if (mbtowc(&decimal, DECIMAL_POINT_PTR, (size_t)MB_CUR_MAX) <= 0) { decimal = L'.'; } #else /* _UNICODE */ decimal = *DECIMAL_POINT_PTR; /*if locale.h NOT exist, let decimal = '.' */ #endif /* _UNICODE */ /* now check for decimal */ if (decimal == (char)ch && width--) { ch = INC(); pFloatStr[nFloatStrUsed++] = decimal; if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag ) == 0) { goto error_return; /*lint !e801*/ } /* coverity[returned_null] */ while (_IS_DIGIT(ch) && width--) /*lint !e48 !e409*/ { ++started; pFloatStr[nFloatStrUsed++] = (_TCHAR)ch; if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag ) == 0) { goto error_return; /*lint !e801*/ } ch = INC(); } } /* now check for exponent */ if (started && (_T('e') == ch || _T('E') == ch) && width--) { pFloatStr[nFloatStrUsed++] = _T('e'); if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag ) == 0) { goto error_return; /*lint !e801*/ } if (_T('-') == (ch = INC())) { pFloatStr[nFloatStrUsed++] = _T('-'); if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag ) == 0) { goto error_return; /*lint !e801*/ } goto f_incwidth2; /*lint !e801*/ } else if (_T('+') == ch) { f_incwidth2: if (!width--) { ++width; } else { ch = INC(); } } /* coverity[returned_null] */ while (_IS_DIGIT(ch) && width--) /*lint !e48 !e409*/ { ++started; pFloatStr[nFloatStrUsed++] = (_TCHAR)ch; if (__check_float_string(nFloatStrUsed, &nFloatStrSz, &pFloatStr, floatstring, &malloc_FloatStrFlag ) == 0) { goto error_return; /*lint !e801*/ } ch = INC(); } } UN_INC(ch); if (started) { if (!suppress) { ++count; pFloatStr[nFloatStrUsed] = _T('\0'); #ifdef _UNICODE { /* convert float string */ size_t cfslength; char* cfloatstring; cfslength = (size_t)(nFloatStrSz + 1) * WCHAR_SIZE; if ((cfloatstring = (char*)SECC_MALLOC(cfslength)) == NULL) { goto error_return; /*lint !e801*/ } if (wcstombs (cfloatstring, pFloatStr, cfslength - 1) > 0) { /*LSD from wcstombs_s tp wcstombs add >0*/ _safecrt_fassign( longone - 1, (char*)pointer , cfloatstring, (char)decimal); } else { SECC_FREE(cfloatstring); /*lint !e516*/ goto error_return; /*lint !e801*/ } SECC_FREE(cfloatstring); /*lint !e516*/ } #else /* _UNICODE */ _safecrt_fassign( longone - 1, (char*)pointer , pFloatStr, (char)decimal); #endif /* _UNICODE */ } /* remove blank else */ /*NULL */ } else { goto error_return; /*lint !e801*/ } break; default: /* either found '%' or something else */ if ((int)*format != (int)ch) { UN_INC(ch); /* error_return ASSERT's if formatError is true */ formatError = 1; goto error_return; /*lint !e801*/ } else { match--; /* % found, compensate for inc below */ } if (!suppress) { #if defined(va_copy) va_copy(arglist, arglistsave); arglistBeenCopied = 1; va_end(arglistsave); #elif defined(__va_copy) /*for vxworks*/ __va_copy(arglist, arglistsave); arglistBeenCopied = 1; va_end(arglistsave); #else arglist = arglistsave; #endif } } /* SWITCH */ match++; /* matched a format field - set flag */ } /* WHILE (width) */ else { /* zero-width field in format string */ UN_INC(ch); goto error_return; /*lint !e801*/ } ++format; /* skip to next char */ } else /* ('%' != *format) */ { if ((int)*format++ != (int)(ch = INC())) { UN_INC(ch); goto error_return; /*lint !e801*/ } #ifndef _UNICODE if (isleadbyte((UINT8T)ch)) { int ch2; char temp[MULTI_BYTE_MAX_LEN]; if ((int)*format++ != (ch2 = INC())) { UN_INC(ch2); /*LSD in console mode, UN_INC twice will cause problem */ UN_INC(ch); goto error_return; /*lint !e801*/ } if (MB_CUR_MAX > 2 && (((UINT8T)ch & 0xE0) == 0xE0) && (((UINT8T)ch2 & 0x80) == 0x80) ) { /*this char is very likely to be a UTF-8 char*/ int ch3 = INC(); temp[0] = (char)ch; temp[1] = (char)ch2; temp[2] = (char)ch3; if (mbtowc(&wctemp, temp, (size_t)MB_CUR_MAX) > 0 ) { /*succeed*/ if ((int)*format++ != (int)ch3) { UN_INC(ch3); goto error_return; /*lint !e801*/ } --charCount; } else { UN_INC(ch3); } } --charCount; /* only count as one character read */ } #endif /* _UNICODE */ } if ( (_TEOF == ch) && ((*format != _T('%')) || (*(format + 1) != _T('n'))) ) { break; } } /*END while (*format) */ error_return: #if ALLOC_TABLE if (malloc_flag == 1) { SECC_FREE(table); /*lint !e668 !e516*/ } #endif /* ALLOC_TABLE */ #if defined(va_copy) || defined(__va_copy) if (arglistBeenCopied) { va_end(arglist); } #endif va_end(arglistsave); /*LSD 2014.3.6 add, clear the stack data*/ (void)memset_s(floatstring, (_CVTBUFSIZE + 1) * sizeof(_TCHAR), 0 , (_CVTBUFSIZE + 1) * sizeof(_TCHAR) ); if (malloc_FloatStrFlag == 1) { /* pFloatStr can be alloced in __check_float_string function, clear and free it*/ (void)memset_s(pFloatStr, nFloatStrSz * sizeof(_TCHAR), 0 , nFloatStrSz * sizeof(_TCHAR) ); SECC_FREE(pFloatStr); /*lint !e586 !e424 */ } if (stream != NULL && (stream->_flag & FILE_STREAM_FLAG) && stream->_base != NULL) { if ( (0 == stream->_cnt) && feof(stream->pf)) /*lint !e611*/ { /* file pointer at the end of file, don't need to seek back */ stream->_base[0] = '\0'; } else { /*LSD seek to original position, bug fix 2014 1 21*/ if ( fseek(stream->pf, stream->oriFilePos, SEEK_SET) ) /*lint !e613*/ { /*seek failed, ignore it*/ stream->oriFilePos = 0; } else { if ( stream->fileRealRead > 0 ) /*LSD bug fix. when file reach to EOF, don't seek back*/ { #if (defined(COMPATIBLE_WIN_FORMAT)) int loops = 0; for( loops = 0; loops < (stream->fileRealRead / BUFFERED_BLOK_SIZE); ++loops ) { if (BUFFERED_BLOK_SIZE != fread(stream->_base, 1, BUFFERED_BLOK_SIZE, stream->pf)) { break; } } if ( 0 != (stream->fileRealRead % BUFFERED_BLOK_SIZE)) { (void)fread(stream->_base, (stream->fileRealRead % BUFFERED_BLOK_SIZE), 1, stream->pf); if (ftell(stream->pf) < stream->oriFilePos + stream->fileRealRead) /*lint !e613*/ { (void)fseek(stream->pf, stream->oriFilePos + stream->fileRealRead, SEEK_SET); } } #else /* in linux like system*/ if ( fseek(stream->pf, stream->oriFilePos + stream->fileRealRead, SEEK_SET) ) /*lint !e613*/ { /*seek failed, ignore it*/ stream->oriFilePos = 0; } #endif } } } SECC_FREE(stream->_base); /*lint !e613 !e586 !e516*/ } if (_TEOF == ch) { /* If any fields were matched or assigned, return count */ /* coverity[missing_va_end] */ return ( (count || match) ? count : EOF); /*lint !e593 */ } else if (formatError == 1) { /*Invalid Input Format*/ /* coverity[missing_va_end] */ return -2; /*lint !e593 */ } else if (paraIsNull) { /* coverity[missing_va_end] */ return -2; /*lint !e593 */ } /* coverity[missing_va_end] */ /* coverity[leaked_storage] */ return count; /*lint !e593 */ } static _TINT _hextodec ( _TCHAR chr)/*lint !e129 !e10 */ { /* coverity[returned_null] */ return _IS_DIGIT(chr) ? chr : ((chr & ~(_T('a') - _T('A'))) - _T('A')) + 10 + _T('0');/*lint !e40 !e10 !e48 !e409*/ } #ifdef _UNICODE #define _gettc_nolock getWch____ #define _UN_T_INC _un_w_inc #else #define _gettc_nolock getCh____ #define _UN_T_INC _un_c_inc #endif #ifdef _UNICODE static _TINT getWch____(SEC_FILE_STREAM* str)/*lint !e1075 !e129 !e10 */ #else static _TINT getCh____(SEC_FILE_STREAM* str)/*lint !e1075 !e129 !e10 */ #endif { _TINT ch = 0; int firstReadOnFile = 0; do { if ((str->_flag & FROM_STDIN_FLAG) > 0 ) { if (str->fUnget) { ch = (_TINT)str->lastChar; str->fUnget = 0; } else { ch = _tfgetc(stdin); str->lastChar = (unsigned int)ch; /*lint !e732*/ } break; } if ((str->_flag & FILE_STREAM_FLAG) > 0 && str->_cnt == 0 ) { /*load file to buffer*/ if (!str->_base ) { str->_base = (char*)SECC_MALLOC(BUFFERED_BLOK_SIZE + 1);/*lint !e586*/ if (!str->_base ) { ch = _TEOF; break; } str->_base[BUFFERED_BLOK_SIZE] = '\0';/* for fortify Warning STRING_NULL*/ } /*LSD add 2014.3.21*/ if (UNINITIALIZED_FILE_POS == str->oriFilePos) { str->oriFilePos = ftell(str->pf); /* save original file read position*/ firstReadOnFile = 1; } str->_cnt = (int)fread(str->_base, 1, BUFFERED_BLOK_SIZE, str->pf); str->_base[BUFFERED_BLOK_SIZE] = '\0'; /* for fortify Warning STRING_NULL*/ if (0 == str->_cnt || str->_cnt > BUFFERED_BLOK_SIZE) { ch = _TEOF; break; } str->_ptr = str->_base; str->_flag |= LOAD_FILE_TO_MEM_FLAG; if (firstReadOnFile) { #ifdef _UNICODE /*LSD fix 2014.3.24, add (UINT8T) to (str->_base[0])*/ if ( str->_cnt > 1 \ && (((UINT8T)(str->_base[0]) == 0xFFU && (UINT8T)(str->_base[1]) == 0xFEU) \ || ((UINT8T)(str->_base[0]) == 0xFEU && (UINT8T)(str->_base[1]) == 0xFFU))) { /*it's BOM header, UNICODE little endian*/ str->_cnt -= BOM_HEADER_SIZE; if(0 != memmove_s(str->_base,(size_t)BUFFERED_BLOK_SIZE,str->_base + BOM_HEADER_SIZE,(size_t)str->_cnt)) { ch = _TEOF; break; } if(str->_cnt % (int)sizeof(_TCHAR)) { /*the str->_cnt must be a multiple of sizeof(_TCHAR),otherwise this function will return _TEOF when read the last character */ int ret = 0; ret = (int)fread(str->_base + str->_cnt, 1, BOM_HEADER_SIZE, str->pf); if (ret > 0 && ret <= BUFFERED_BLOK_SIZE) { str->_cnt += ret; } } } #else if ( str->_cnt > 2 && (UINT8T)(str->_base[0]) == 0xEFU && (UINT8T)(str->_base[1]) == 0xBBU && (UINT8T)(str->_base[2]) == 0xBFU) { /*it's BOM header, little endian*/ str->_cnt -= UTF8_BOM_HEADER_SIZE; str->_ptr += UTF8_BOM_HEADER_SIZE; } #endif } } if ((str->_flag & MEM_STR_FLAG) > 0 || (str->_flag & LOAD_FILE_TO_MEM_FLAG) > 0 ) { ch = (_TINT)((str->_cnt -= (int)sizeof(_TCHAR)) >= 0 ? (_tCharMask & * ((_TCHAR*)str->_ptr)) : _TEOF); /*lint !e826*/ str->_ptr += sizeof(_TCHAR); } } while (0); /*lint !e717, use break in do-while to skip some code*/ if (_TEOF != ch && (str->_flag & FILE_STREAM_FLAG) > 0 && str->_base) { str->fileRealRead += (int)sizeof(_TCHAR); } return ch; } static _TINT _inc(SEC_FILE_STREAM* fileptr)/*lint !e1075 !e129 !e10 */ { return (_gettc_nolock(fileptr)); } /*FIXIT _un_inc w version*/ #ifndef TRUE #define TRUE 1 #endif #ifdef _UNICODE static void _un_w_inc(_TINT chr, SEC_FILE_STREAM* str) #else static void _un_c_inc(_TINT chr, SEC_FILE_STREAM* str)/*lint !e1075 !e129 !e10 */ #endif { if (_TEOF != chr) { if ((str->_flag & FROM_STDIN_FLAG) > 0) { str->lastChar = (unsigned int)chr; str->fUnget = TRUE; } else if ( (str->_flag & MEM_STR_FLAG) || (str->_flag & LOAD_FILE_TO_MEM_FLAG) > 0) { if (str->_ptr > str->_base) /*lint !e946*/ { str->_ptr -= sizeof(_TCHAR); str->_cnt += (int)sizeof(_TCHAR); } } if ( (str->_flag & FILE_STREAM_FLAG) > 0 && str->_base) { str->fileRealRead -= (int)sizeof(_TCHAR); /*LSD fix, change from -- str->fileRealRead to str->fileRealRead -= sizeof(_TCHAR). 2014.2.21*/ } } } static void _un_inc(_TINT chr, SEC_FILE_STREAM* str)/*lint !e1075 !e129 !e10 */ { _UN_T_INC(chr, str); } static _TINT _whiteout(int* counter, SEC_FILE_STREAM* fileptr)/*lint !e1075 !e129 !e601 !e40 !e10 */ {/*lint !e578 */ _TINT ch;/*lint !e1075 !e522 !e10 */ do { ++*counter; ch = _inc(fileptr);/*lint !e40 !e10 !e63 !e530 */ if (ch == _TEOF)/*lint !e40 */ { break; } } #ifdef _UNICODE while (iswspace((wint_t)ch)); #else /* coverity[returned_null] */ while (isspace((_TUCHAR)ch));/*lint !e40 !e26 !e10 !e48 !e409*/ #endif return ch;/*lint !e40 */ } #endif /*__INPUT_INL__5D13A042_DC3F_4ED9_A8D1_882811274C27*/ UVP-Tools-2.2.0.316/uvp-monitor/securec/src/memcpy_s.c000066400000000000000000000257061314037446600223140ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: memcpy_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" /******************************************************************************* * * memcpy_s * * * errno_t memcpy_s(void *dest, size_t destMax, const void *src, size_t count); * * * memcpy_s copies count bytes from src to dest * * * dest new buffer. * destMax Size of the destination buffer. * src Buffer to copy from. * count Number of characters to copy * * * dest buffer is updated. * * * EOK Success * EINVAL dest == NULL or strSrc == NULL * ERANGE count > destMax or destMax > * SECUREC_MEM_MAX_LEN or destMax == 0 * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped * * if an error occured, dest will be filled with 0. * If the source and destination overlap, the behavior of memcpy_s is undefined. * Use memmove_s to handle overlapping regions. ******************************************************************************* */ /* assembly language memcpy for X86 or MIPS */ extern void* memcpy_opt(void* dest, const void* src, size_t n); /* if USE_ASM macro is enabled, it will call assembly language function to improve performance. */ #ifndef SECURE_MEMCOPY_THRESHOLD_SIZE #define SECURE_MEMCOPY_THRESHOLD_SIZE (64UL) #endif #define SC_ADDR_ALIGNED(addr) !((size_t)addr & 7) #define SECURE_COPY_STRUCT(num) case num:*(MY_STR##num *)dest=*(MY_STR##num *) src;break; #define SECURE_COPY_BY_BYTE(caseNum) case caseNum: *pcDest++ = *pcSrc++; #define SMALL_MEM_COPY \ if (SC_ADDR_ALIGNED(dest) && SC_ADDR_ALIGNED(src) ) \ { \ /*use struct assignment*/ \ switch(count)\ { \ SECURE_COPY_STRUCT(1)\ SECURE_COPY_STRUCT(2)\ SECURE_COPY_STRUCT(3)\ SECURE_COPY_STRUCT(4)\ SECURE_COPY_STRUCT(5)\ SECURE_COPY_STRUCT(6)\ SECURE_COPY_STRUCT(7)\ SECURE_COPY_STRUCT(8)\ SECURE_COPY_STRUCT(9)\ SECURE_COPY_STRUCT(10)\ SECURE_COPY_STRUCT(11)\ SECURE_COPY_STRUCT(12)\ SECURE_COPY_STRUCT(13)\ SECURE_COPY_STRUCT(14)\ SECURE_COPY_STRUCT(15)\ SECURE_COPY_STRUCT(16)\ SECURE_COPY_STRUCT(17)\ SECURE_COPY_STRUCT(18)\ SECURE_COPY_STRUCT(19)\ SECURE_COPY_STRUCT(20)\ SECURE_COPY_STRUCT(21)\ SECURE_COPY_STRUCT(22)\ SECURE_COPY_STRUCT(23)\ SECURE_COPY_STRUCT(24)\ SECURE_COPY_STRUCT(25)\ SECURE_COPY_STRUCT(26)\ SECURE_COPY_STRUCT(27)\ SECURE_COPY_STRUCT(28)\ SECURE_COPY_STRUCT(29)\ SECURE_COPY_STRUCT(30)\ SECURE_COPY_STRUCT(31)\ SECURE_COPY_STRUCT(32)\ SECURE_COPY_STRUCT(33)\ SECURE_COPY_STRUCT(34)\ SECURE_COPY_STRUCT(35)\ SECURE_COPY_STRUCT(36)\ SECURE_COPY_STRUCT(37)\ SECURE_COPY_STRUCT(38)\ SECURE_COPY_STRUCT(39)\ SECURE_COPY_STRUCT(40)\ SECURE_COPY_STRUCT(41)\ SECURE_COPY_STRUCT(42)\ SECURE_COPY_STRUCT(43)\ SECURE_COPY_STRUCT(44)\ SECURE_COPY_STRUCT(45)\ SECURE_COPY_STRUCT(46)\ SECURE_COPY_STRUCT(47)\ SECURE_COPY_STRUCT(48)\ SECURE_COPY_STRUCT(49)\ SECURE_COPY_STRUCT(50)\ SECURE_COPY_STRUCT(51)\ SECURE_COPY_STRUCT(52)\ SECURE_COPY_STRUCT(53)\ SECURE_COPY_STRUCT(54)\ SECURE_COPY_STRUCT(55)\ SECURE_COPY_STRUCT(56)\ SECURE_COPY_STRUCT(57)\ SECURE_COPY_STRUCT(58)\ SECURE_COPY_STRUCT(59)\ SECURE_COPY_STRUCT(60)\ SECURE_COPY_STRUCT(61)\ SECURE_COPY_STRUCT(62)\ SECURE_COPY_STRUCT(63)\ SECURE_COPY_STRUCT(64)\ }/*END switch*/ \ } \ else \ { \ char* pcDest = (char*)dest; \ char* pcSrc = (char*)src; \ switch(count) \ { /*lint -save -e616*/\ SECURE_COPY_BY_BYTE(64)\ SECURE_COPY_BY_BYTE(63)\ SECURE_COPY_BY_BYTE(62)\ SECURE_COPY_BY_BYTE(61)\ SECURE_COPY_BY_BYTE(60)\ SECURE_COPY_BY_BYTE(59)\ SECURE_COPY_BY_BYTE(58)\ SECURE_COPY_BY_BYTE(57)\ SECURE_COPY_BY_BYTE(56)\ SECURE_COPY_BY_BYTE(55)\ SECURE_COPY_BY_BYTE(54)\ SECURE_COPY_BY_BYTE(53)\ SECURE_COPY_BY_BYTE(52)\ SECURE_COPY_BY_BYTE(51)\ SECURE_COPY_BY_BYTE(50)\ SECURE_COPY_BY_BYTE(49)\ SECURE_COPY_BY_BYTE(48)\ SECURE_COPY_BY_BYTE(47)\ SECURE_COPY_BY_BYTE(46)\ SECURE_COPY_BY_BYTE(45)\ SECURE_COPY_BY_BYTE(44)\ SECURE_COPY_BY_BYTE(43)\ SECURE_COPY_BY_BYTE(42)\ SECURE_COPY_BY_BYTE(41)\ SECURE_COPY_BY_BYTE(40)\ SECURE_COPY_BY_BYTE(39)\ SECURE_COPY_BY_BYTE(38)\ SECURE_COPY_BY_BYTE(37)\ SECURE_COPY_BY_BYTE(36)\ SECURE_COPY_BY_BYTE(35)\ SECURE_COPY_BY_BYTE(34)\ SECURE_COPY_BY_BYTE(33)\ SECURE_COPY_BY_BYTE(32)\ SECURE_COPY_BY_BYTE(31)\ SECURE_COPY_BY_BYTE(30)\ SECURE_COPY_BY_BYTE(29)\ SECURE_COPY_BY_BYTE(28)\ SECURE_COPY_BY_BYTE(27)\ SECURE_COPY_BY_BYTE(26)\ SECURE_COPY_BY_BYTE(25)\ SECURE_COPY_BY_BYTE(24)\ SECURE_COPY_BY_BYTE(23)\ SECURE_COPY_BY_BYTE(22)\ SECURE_COPY_BY_BYTE(21)\ SECURE_COPY_BY_BYTE(20)\ SECURE_COPY_BY_BYTE(19)\ SECURE_COPY_BY_BYTE(18)\ SECURE_COPY_BY_BYTE(17)\ SECURE_COPY_BY_BYTE(16)\ SECURE_COPY_BY_BYTE(15)\ SECURE_COPY_BY_BYTE(14)\ SECURE_COPY_BY_BYTE(13)\ SECURE_COPY_BY_BYTE(12)\ SECURE_COPY_BY_BYTE(11)\ SECURE_COPY_BY_BYTE(10)\ SECURE_COPY_BY_BYTE(9)\ SECURE_COPY_BY_BYTE(8)\ SECURE_COPY_BY_BYTE(7)\ SECURE_COPY_BY_BYTE(6)\ SECURE_COPY_BY_BYTE(5)\ SECURE_COPY_BY_BYTE(4)\ SECURE_COPY_BY_BYTE(3)\ SECURE_COPY_BY_BYTE(2)\ SECURE_COPY_BY_BYTE(1)\ } /*lint -restore */\ } #define SECC_RET_MEMCPY_ECODE \ if (destMax == 0 || destMax > SECUREC_MEM_MAX_LEN ) \ { \ SECUREC_ERROR_INVALID_RANGE("memcpy_s"); \ return ERANGE; \ } \ if (dest == NULL || src == NULL) \ { \ SECUREC_ERROR_INVALID_PARAMTER("memcpy_s"); \ if (dest != NULL ) \ { \ (void)memset(dest, 0, destMax); \ return EINVAL_AND_RESET; \ } \ return EINVAL; \ } \ if (count > destMax) \ { \ (void)memset(dest, 0, destMax); \ SECUREC_ERROR_INVALID_RANGE("memcpy_s"); \ return ERANGE_AND_RESET; \ } \ if (dest == src) \ { \ return EOK; \ } \ if ((dest > src && dest < (void*)((UINT8T*)src + count)) || \ (src > dest && src < (void*)((UINT8T*)dest + count)) ) \ { \ (void)memset(dest, 0, destMax); \ SECUREC_ERROR_BUFFER_OVERLAP("memcpy_s"); \ return EOVERLAP_AND_RESET; \ } \ return EOK; errno_t memcpy_s(void* dest, size_t destMax, const void* src, size_t count) { #if defined(COMPATIBLE_WIN_FORMAT) /* fread API in windows will call memcpy_s and pass 0xffffffff to destMax. To avoid the failure of fread, we don't check desMax limit. */ if (LIKELY( count <= destMax && dest && src /*&& dest != src*/ && count > 0 && ( (dest > src && (void*)((UINT8T*)src + count) <= dest) || (src > dest && (void*)((UINT8T*)dest + count) <= src) ) ) ) #else if (LIKELY( count <= destMax && dest && src /*&& dest != src*/ && destMax <= SECUREC_MEM_MAX_LEN && count > 0 && ( (dest > src && (void*)((UINT8T*)src + count) <= dest) || (src > dest && (void*)((UINT8T*)dest + count) <= src) ) ) ) #endif { #if defined(SECUREC_MEMCOPY_WITH_PERFORMANCE) if (count > SECURE_MEMCOPY_THRESHOLD_SIZE) { #endif /*large enough, let system API do it*/ #ifdef USE_ASM memcpy_opt(dest, src, count); #else (void)memcpy(dest, src, count); #endif return EOK; #if defined(SECUREC_MEMCOPY_WITH_PERFORMANCE) } else { SMALL_MEM_COPY; return EOK; } #endif } else { /* meet some runtime violation, return error code */ SECC_RET_MEMCPY_ECODE } } #if defined(WITH_PERFORMANCE_ADDONS) errno_t memcpy_sOptAsm(void* dest, size_t destMax, const void* src, size_t count) { if (LIKELY( count <= destMax && dest && src /*&& dest != src*/ && destMax <= SECUREC_MEM_MAX_LEN && count > 0 && ( (dest > src && (void*)((UINT8T*)src + count) <= dest) || (src > dest && (void*)((UINT8T*)dest + count) <= src) ) ) ) { if (count > SECURE_MEMCOPY_THRESHOLD_SIZE) { /*large enough, let system API do it*/ #ifdef USE_ASM memcpy_opt(dest, src, count); #else (void)memcpy(dest, src, count); #endif return EOK; } else { SMALL_MEM_COPY; return EOK; } } else { /* meet some runtime violation, return error code */ SECC_RET_MEMCPY_ECODE } } /*trim judgement on "destMax <= SECUREC_MEM_MAX_LEN" */ errno_t memcpy_sOptTc(void* dest, size_t destMax, const void* src, size_t count) { if (LIKELY( count <= destMax && dest && src /*&& dest != src*/ && count > 0 && ( (dest > src && (void*)((UINT8T*)src + count) <= dest) || (src > dest && (void*)((UINT8T*)dest + count) <= src) ) ) ) { if (count > SECURE_MEMCOPY_THRESHOLD_SIZE) { /*large enough, let system API do it*/ #ifdef USE_ASM memcpy_opt(dest, src, count); #else (void)memcpy(dest, src, count); #endif return EOK; } else { SMALL_MEM_COPY; return EOK; } } else { /* meet some runtime violation, return error code */ SECC_RET_MEMCPY_ECODE } } #endif /* WITH_PERFORMANCE_ADDONS */ UVP-Tools-2.2.0.316/uvp-monitor/securec/src/memmove_s.c000066400000000000000000000047441314037446600224660ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: memmove_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" /******************************************************************************* * * memmove_s * * * errno_t memmove_s(void *dest, size_t destMax, const void *src, size_t count); * * * Copies count bytes of characters from src to dest. * * * dest Destination object. * destMax Size of the destination buffer. * src Source object. * count Number of characters to copy. * * * dest buffer is uptdated. * * * EOK Success * EINVAL dest == NULL or strSrc == NULL * ERANGE count > destMax or destMax > SECUREC_MEM_MAX_LEN * or destMax == 0 * * If an error occured, dest will NOT be filled with 0. * If some regions of the source area and the destination overlap, memmove_s * ensures that the original source bytes in the overlapping region are copied * before being overwritten. ******************************************************************************* */ errno_t memmove_s(void* dest, size_t destMax, const void* src, size_t count) { if (destMax == 0 || destMax > SECUREC_MEM_MAX_LEN ) { SECUREC_ERROR_INVALID_RANGE("memmove_s"); return ERANGE; } if (dest == NULL || src == NULL) { SECUREC_ERROR_INVALID_PARAMTER("memmove_s"); if (dest != NULL) { (void)memset(dest, 0, destMax); return EINVAL_AND_RESET; } return EINVAL; } if (count > destMax) { (void)memset(dest, 0, destMax); SECUREC_ERROR_INVALID_RANGE("memmove_s"); return ERANGE_AND_RESET; } if (dest == src) { return EOK; } if (count > 0) { #ifdef CALL_LIBC_COR_API /*use underlying memmove for performance consideration*/ (void)memmove(dest, src, count); #else util_memmove(dest, src, count); #endif } return EOK; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/memset_s.c000066400000000000000000000225431314037446600223100ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: memset_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" /******************************************************************************* * * memset_s * * * errno_t memset_s(void* dest, size_t destMax, int c, size_t count) * * * Sets buffers to a specified character. * * * dest Pointer to destination. * destMax The size of the buffer. * c Character to set. * count Number of characters. * * * dest buffer is uptdated. * * * EOK Success * EINVAL dest == NULL * ERANGE count > destMax or destMax > SECUREC_MEM_MAX_LEN * or destMax == 0 ******************************************************************************* */ /*assemble language memset*/ extern void* memset_opt(void* d, int c, size_t cnt); #if defined(WITH_PERFORMANCE_ADDONS) || defined(SECUREC_MEMSET_WITH_PERFORMANCE) static const MY_STR32 myStr = {{'\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0'}}; /*static variable always set zero*/ /*lint !e784 */ static const MY_STR32 myStrAllFF ={{'\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF','\xFF'}}; /*lint !e784 */ #ifndef SECURE_MEMSET_THRESHOLD_SIZE #define SECURE_MEMSET_THRESHOLD_SIZE (32UL) #endif #define SC_ADDR_ALIGNED(addr) !((size_t)addr & 7) #define COPY_STRUCT_4_ZERO(caseNum)\ case caseNum:*(MY_STR##caseNum *)dest=*(MY_STR##caseNum *)(&myStr);break; #define COPY_STRUCT_4_FF(caseNum)\ case caseNum:*(MY_STR##caseNum *)dest=*(MY_STR##caseNum *)(&myStrAllFF);break; #define SECURE_MEMSET_CONST(caseNum,addr,c) case caseNum: *addr++ = (char)c; #define SC_ALIGNED_COPY \ switch(c) { \ case 0: \ switch(count)\ { \ COPY_STRUCT_4_ZERO(1)\ COPY_STRUCT_4_ZERO(2)\ COPY_STRUCT_4_ZERO(3)\ COPY_STRUCT_4_ZERO(4)\ COPY_STRUCT_4_ZERO(5)\ COPY_STRUCT_4_ZERO(6)\ COPY_STRUCT_4_ZERO(7)\ COPY_STRUCT_4_ZERO(8)\ COPY_STRUCT_4_ZERO(9)\ COPY_STRUCT_4_ZERO(10)\ COPY_STRUCT_4_ZERO(11)\ COPY_STRUCT_4_ZERO(12)\ COPY_STRUCT_4_ZERO(13)\ COPY_STRUCT_4_ZERO(14)\ COPY_STRUCT_4_ZERO(15)\ COPY_STRUCT_4_ZERO(16)\ COPY_STRUCT_4_ZERO(17)\ COPY_STRUCT_4_ZERO(18)\ COPY_STRUCT_4_ZERO(19)\ COPY_STRUCT_4_ZERO(20)\ COPY_STRUCT_4_ZERO(21)\ COPY_STRUCT_4_ZERO(22)\ COPY_STRUCT_4_ZERO(23)\ COPY_STRUCT_4_ZERO(24)\ COPY_STRUCT_4_ZERO(25)\ COPY_STRUCT_4_ZERO(26)\ COPY_STRUCT_4_ZERO(27)\ COPY_STRUCT_4_ZERO(28)\ COPY_STRUCT_4_ZERO(29)\ COPY_STRUCT_4_ZERO(30)\ COPY_STRUCT_4_ZERO(31)\ COPY_STRUCT_4_ZERO(32)\ } \ return EOK; \ case 0xFF: \ switch(count)\ { \ COPY_STRUCT_4_FF(1)\ COPY_STRUCT_4_FF(2)\ COPY_STRUCT_4_FF(3)\ COPY_STRUCT_4_FF(4)\ COPY_STRUCT_4_FF(5)\ COPY_STRUCT_4_FF(6)\ COPY_STRUCT_4_FF(7)\ COPY_STRUCT_4_FF(8)\ COPY_STRUCT_4_FF(9)\ COPY_STRUCT_4_FF(10)\ COPY_STRUCT_4_FF(11)\ COPY_STRUCT_4_FF(12)\ COPY_STRUCT_4_FF(13)\ COPY_STRUCT_4_FF(14)\ COPY_STRUCT_4_FF(15)\ COPY_STRUCT_4_FF(16)\ COPY_STRUCT_4_FF(17)\ COPY_STRUCT_4_FF(18)\ COPY_STRUCT_4_FF(19)\ COPY_STRUCT_4_FF(20)\ COPY_STRUCT_4_FF(21)\ COPY_STRUCT_4_FF(22)\ COPY_STRUCT_4_FF(23)\ COPY_STRUCT_4_FF(24)\ COPY_STRUCT_4_FF(25)\ COPY_STRUCT_4_FF(26)\ COPY_STRUCT_4_FF(27)\ COPY_STRUCT_4_FF(28)\ COPY_STRUCT_4_FF(29)\ COPY_STRUCT_4_FF(30)\ COPY_STRUCT_4_FF(31)\ COPY_STRUCT_4_FF(32)\ } \ return EOK; \ }/*END switch*/ #define SC_ALIGNED_TAIL_COPY \ char* pcDest = (char*)dest; \ switch(count) \ { \ SECURE_MEMSET_CONST(32,pcDest,c)\ SECURE_MEMSET_CONST(31,pcDest,c)\ SECURE_MEMSET_CONST(30,pcDest,c)\ SECURE_MEMSET_CONST(29,pcDest,c)\ SECURE_MEMSET_CONST(28,pcDest,c)\ SECURE_MEMSET_CONST(27,pcDest,c)\ SECURE_MEMSET_CONST(26,pcDest,c)\ SECURE_MEMSET_CONST(25,pcDest,c)\ SECURE_MEMSET_CONST(24,pcDest,c)\ SECURE_MEMSET_CONST(23,pcDest,c)\ SECURE_MEMSET_CONST(22,pcDest,c)\ SECURE_MEMSET_CONST(21,pcDest,c)\ SECURE_MEMSET_CONST(20,pcDest,c)\ SECURE_MEMSET_CONST(19,pcDest,c)\ SECURE_MEMSET_CONST(18,pcDest,c)\ SECURE_MEMSET_CONST(17,pcDest,c)\ SECURE_MEMSET_CONST(16,pcDest,c)\ SECURE_MEMSET_CONST(15,pcDest,c)\ SECURE_MEMSET_CONST(14,pcDest,c)\ SECURE_MEMSET_CONST(13,pcDest,c)\ SECURE_MEMSET_CONST(12,pcDest,c)\ SECURE_MEMSET_CONST(11,pcDest,c)\ SECURE_MEMSET_CONST(10,pcDest,c)\ SECURE_MEMSET_CONST(9,pcDest,c)\ SECURE_MEMSET_CONST(8,pcDest,c)\ SECURE_MEMSET_CONST(7,pcDest,c)\ SECURE_MEMSET_CONST(6,pcDest,c)\ SECURE_MEMSET_CONST(5,pcDest,c)\ SECURE_MEMSET_CONST(4,pcDest,c)\ SECURE_MEMSET_CONST(3,pcDest,c)\ SECURE_MEMSET_CONST(2,pcDest,c)\ SECURE_MEMSET_CONST(1,pcDest,c)\ } #endif #define SECC_RET_MEMSET_ECODE \ if (destMax == 0 || destMax > SECUREC_MEM_MAX_LEN ) \ { \ SECUREC_ERROR_INVALID_RANGE("memset_s"); \ return ERANGE; \ } \ if (dest == NULL) \ { \ SECUREC_ERROR_INVALID_PARAMTER("memset_s"); \ return EINVAL; \ } \ if (count > destMax) \ { \ (void)memset(dest, c, destMax); /*set entire buffer to value c*/ \ SECUREC_ERROR_INVALID_RANGE("memset_s"); \ return ERANGE_AND_RESET; \ } \ return EOK; errno_t memset_s(void* dest, size_t destMax, int c, size_t count) { if (LIKELY(count <= destMax && dest && destMax <= SECUREC_MEM_MAX_LEN )) { #if defined(SECUREC_MEMSET_WITH_PERFORMANCE) if (count > SECURE_MEMSET_THRESHOLD_SIZE) { #endif #ifdef USE_ASM (void)memset_opt(dest, c, count); #else (void)memset(dest, c, count); #endif return EOK; #if defined(SECUREC_MEMSET_WITH_PERFORMANCE) } else { if (SC_ADDR_ALIGNED(dest) ) { /*use struct assignment*/ SC_ALIGNED_COPY } { SC_ALIGNED_TAIL_COPY /*lint !e616 */ } return EOK; } #endif } else { /* meet some runtime violation, return error code */ SECC_RET_MEMSET_ECODE } } #if defined(WITH_PERFORMANCE_ADDONS) errno_t memset_sOptAsm(void* dest, size_t destMax, int c, size_t count) { if (LIKELY(count <= destMax && dest && destMax <= SECUREC_MEM_MAX_LEN )) { if (count > SECURE_MEMSET_THRESHOLD_SIZE) { #ifdef USE_ASM (void)memset_opt(dest, c, count); #else (void)memset(dest, c, count); #endif return EOK; } else { if (SC_ADDR_ALIGNED(dest) ) { /*use struct assignment*/ SC_ALIGNED_COPY } { SC_ALIGNED_TAIL_COPY /*lint !e616 */ } return EOK; } } else { /* meet some runtime violation, return error code */ SECC_RET_MEMSET_ECODE } } errno_t memset_sOptTc(void* dest, size_t destMax, int c, size_t count) { if (LIKELY(count <= destMax && dest )) { if (count > SECURE_MEMSET_THRESHOLD_SIZE) { #ifdef USE_ASM (void)memset_opt(dest, c, count); #else (void)memset(dest, c, count); #endif return EOK; } else { if (SC_ADDR_ALIGNED(dest) ) { /*use struct assignment*/ SC_ALIGNED_COPY } { SC_ALIGNED_TAIL_COPY /*lint !e616 */ } return EOK; } } else { /* meet some runtime violation, return error code */ SECC_RET_MEMSET_ECODE } } #endif /* WITH_PERFORMANCE_ADDONS */ UVP-Tools-2.2.0.316/uvp-monitor/securec/src/output.inl000066400000000000000000001435221314037446600223750ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: output.inl * Description: * used by secureprintoutput_a.c and secureprintoutput_w.c to include. * This file provides a template function for ANSI and UNICODE compiling * by different type definition. The functions of securec_output_s or * securec_woutput_s provides internal implementation for printf family * API, such as sprintf, swprintf_s. * History: * 1. Date: 2014/10/31 * Author: LiShunda 254400 * Modification: use system sprintf API to format float number which can keep compatible * this float string output * 2. Date: 2014/11/21 * Author: LiShunda 254400 * Modification: improve performance on sprintf_s serial function, modify the algorithm * of data conversion from integer to string, and replace some function call * with macro. * see: http://www.cplusplus.com/reference/cstdio/printf/ ******************************************************************************** */ #ifndef OUTPUT_INL_2B263E9C_43D8_44BB_B17A_6D2033DECEE5 #define OUTPUT_INL_2B263E9C_43D8_44BB_B17A_6D2033DECEE5 #define DISABLE_N_FORMAT #ifndef INT_MAX #define INT_MAX (0x7FFFFFFF) #endif #ifndef _CVTBUFSIZE #define _CVTBUFSIZE (309+40) /* # of digits in max float point value */ #endif #ifndef INT_MAX #define INT_MAX 2147483647 #endif #ifndef INT_MAX_DIV_TEN #define INT_MAX_DIV_TEN 21474836 #endif #ifndef MUL10 #define MUL10(x) ((((x) << 2) + (x)) << 1) #endif #define MUL10_ADD_BEYOND_MAX(val,ch) ((val > INT_MAX_DIV_TEN)) #ifdef STACK_SIZE_LESS_THAN_1K #define SECURE_FMT_STR_LEN (8) #else #define SECURE_FMT_STR_LEN (16) #endif #define MASK_PCLINT_NO_OVERFLOW /*lint !e586 */ static int indirectSprintf(char* strDest, const char* format, ...) { int ret = 0; va_list arglist; va_start(arglist, format); ret = vsprintf(strDest, format, arglist); /*lint !e668*/ va_end(arglist); return ret; } /* fast conversion for %d %i %u unsigned fastDivu10(unsigned n) { unsigned q, r; q = (n >> 1) + (n >> 2); q = q + (q >> 4); q = q + (q >> 8); q = q + (q >> 16); q = q >> 3; r = n - (((q << 2) + q) << 1); return q + (r > 9); } */ #define SAFE_WRITE_STR(src, txtLen, _stream, outChars) \ if (txtLen < 12 /* for mobile number length */) { \ for (ii = 0; ii < txtLen; ++ii) { \ *((TCHAR*)(_stream->_ptr)) = *(src); \ _stream->_ptr += sizeof(TCHAR); \ ++(src); \ } \ }else { \ memcpy(_stream->_ptr, src, txtLen * sizeof(TCHAR)); \ _stream->_ptr += txtLen * sizeof(TCHAR); \ } \ _stream->_cnt -= txtLen * sizeof(TCHAR); \ *(outChars) = *(outChars) + (txtLen); #define SECUREC_SPECIAL(_val, Base) \ case Base: \ do { \ *--text.sz = digits[_val % Base]; \ }while ((_val /= Base) /* != 0 */); \ break #define SAFE_WRITE_CHAR(_ch, _stream, outChars) \ *((TCHAR*)(_stream->_ptr)) = (TCHAR)_ch; \ _stream->_ptr += sizeof(TCHAR); \ _stream->_cnt -= sizeof(TCHAR); \ *(outChars) = *(outChars) + 1; #define SAFE_PADDING(padChar, padLen, _stream, outChars) \ for (ii = 0; ii < padLen; ++ii) { \ *((TCHAR*)(_stream->_ptr)) = (TCHAR)padChar; \ _stream->_ptr += sizeof(TCHAR); \ } \ _stream->_cnt -= padLen * sizeof(TCHAR); \ *(outChars) = *(outChars) + (padLen); #define IS_REST_BUF_ENOUGH(needLen) ((int)(stream->_cnt - (int)needLen * sizeof(TCHAR)) > 0) #ifdef _XXXUNICODE int securec_woutput_s #else int securec_output_s #endif ( SECUREC_XPRINTF_STREAM* stream,/*lint !e40 !e10 */ const TCHAR* format, va_list argptr ) {/*lint !e10 */ /* literal string to print null ptr, define it on stack rather than const text area is to avoid gcc warning with pointing const text with variable */ static char STR_NULL_STRING[] = "(null)"; /* change to next line wchar_t WSTR_NULL_STRING[] = L"(null)"; */ static wchar_t WSTR_NULL_STRING[] = {L'(', L'n', L'u', L'l', L'l', L')', L'\0'}; char* heapBuf = NULL; union { char* sz; /* pointer text to be printed, not zero terminated */ wchar_t* wz; } text; static const char _itoa_upper_digits[] = "0123456789ABCDEFX"; static const char _itoa_lower_digits[] = "0123456789abcdefx"; const char *digits = _itoa_upper_digits; int ii = 0; int hexAdd = 0; /* 0:lower x; 1: upper X */ int flags = 0; unsigned int radix; int charsOut; /* characters written*/ int fldWidth = 0; int precision = 0; int prefixLen = 0; int padding = 0; int textLen; /* length of the text*/ int bufferSize = 0; /* size of text.sz */ int dynWidth = 0, dynPrecision = 0; int noOutput = 0; int bufferIsWide = 0; /* flag for buffer contains wide chars */ FMT_STATE state,laststate; /* current state */ CHARTYPE chclass; /* current character class*/ wchar_t wchar = 0; TCHAR prefix[2]={0}; TCHAR ch; /* currently read character*/ union { char sz[BUFFERSIZE]; #ifdef _XXXUNICODE wchar_t wz[BUFFERSIZE]; #endif /* _XXXUNICODE */ } buffer; static const UINT8T securec__lookuptable_s[] = { /* ' ' */ 0x06, /* '!' */ 0x80, /* '"' */ 0x80, /* '#' */ 0x86, /* '$' */ 0x80, /* '%' */ 0x81, /* '&' */ 0x80, /* ''' */ 0x00, /* '(' */ 0x00, /* ')' */ 0x10, /* '*' */ 0x03, /* '+' */ 0x86, /* ',' */ 0x80, /* '-' */ 0x86, /* '.' */ 0x82, /* '/' */ 0x80, /* '0' */ 0x14, /* '1' */ 0x05, /* '2' */ 0x05, /* '3' */ 0x45, /* '4' */ 0x45, /* '5' */ 0x45, /* '6' */ 0x85, /* '7' */ 0x85, /* '8' */ 0x85, /* '9' */ 0x05, /* ':' */ 0x00, /* ';' */ 0x00, /* '<' */ 0x30, /* '=' */ 0x30, /* '>' */ 0x80, /* '?' */ 0x50, /* '@' */ 0x80, /* 'A' */ 0x80, /* 'B' */ 0x00, /* 'C' */ 0x08, /* 'D' */ 0x00, /* 'E' */ 0x28, /* 'F' */ 0x28, /* 'G' */ 0x38, /* 'H' */ 0x50, /* 'I' */ 0x57, /* 'J' */ 0x80, /* 'K' */ 0x00, /* 'L' */ 0x07, /* 'M' */ 0x00, /* 'N' */ 0x37, /* 'O' */ 0x30, /* 'P' */ 0x30, /* 'Q' */ 0x50, /* 'R' */ 0x50, /* 'S' */ 0x88, /* 'T' */ 0x00, /* 'U' */ 0x00, /* 'V' */ 0x00, /* 'W' */ 0x20, /* 'X' */ 0x28, /* 'Y' */ 0x20, /* 'Z' */ 0x87, /* '[' */ 0x80, /* '\' */ 0x80, /* ']' */ 0x00, /* '^' */ 0x00, /* '_' */ 0x00, /* '`' */ 0x60, /* 'a' */ 0x60, /* 'b' */ 0x60, /* 'c' */ 0x68, /* 'd' */ 0x68, /* 'e' */ 0x68, /* 'f' */ 0x08, /* 'g' */ 0x08, /* 'h' */ 0x07, /* 'i' */ 0x78, /* 'j' */ 0x77, /* 'k' */ 0x70, /* 'l' */ 0x77, /* 'm' */ 0x70, /* 'n' */ 0x70, /* 'o' */ 0x08, /* 'p' */ 0x08, /* 'q' */ 0x77, /* 'r' */ 0x00, /* 's' */ 0x08, /* 't' */ 0x77, /* 'u' */ 0x08, /* 'v' */ 0x00, /* 'w' */ 0x07, /* 'x' */ 0x08, /* 'y' */ 0x00, /* 'z' */ 0x57 }; charsOut = 0; textLen = 0; laststate = state = STAT_NORMAL; /* starting state */ text.sz = NULL; /*loop each format character */ /* remove format != NULL */ while ((ch = *format++) != _T('\0') && charsOut >= 0) { laststate = state; chclass = FIND_CHAR_CLASS(securec__lookuptable_s, ch); state = FIND_NEXT_STATE(securec__lookuptable_s, chclass, laststate); switch (state) { case STAT_INVALID: return -1; /*input format is wrong, directly return*/ case STAT_NORMAL: NORMAL_STATE: /* normal state, write character */ #ifdef _XXXUNICODE bufferIsWide = 1; #else /* _XXXUNICODE */ bufferIsWide = 0; #endif /* _XXXUNICODE */ if (IS_REST_BUF_ENOUGH(1 /* only one char */)) { SAFE_WRITE_CHAR(ch, stream, &charsOut); }else { write_char(ch, stream, &charsOut); } continue; case STAT_PERCENT: /* set default values */ prefixLen = fldWidth = 0; noOutput = 0; flags = 0; precision = -1; bufferIsWide = 0; break; case STAT_FLAG: /* set flag based on which flag character */ switch (ch) { case _T('-'): flags |= FLAG_LEFT; break; case _T('+'): flags |= FLAG_SIGN; /* force sign indicator */ break; case _T(' '): flags |= FLAG_SIGN_SPACE; break; case _T('#'): flags |= FLAG_ALTERNATE; break; case _T('0'): flags |= FLAG_LEADZERO; /* pad with leading zeros */ break; default: break; } break; case STAT_WIDTH: /* update width value */ if (ch == _T('*')) { /* get width*/ fldWidth = va_arg(argptr, int); /*lint !e826 !e10 !e718 !e746*/ if (fldWidth < 0) { flags |= FLAG_LEFT; fldWidth = -fldWidth; } dynWidth = 1; } else { if(laststate != STAT_WIDTH) { fldWidth = 0; } if(MUL10_ADD_BEYOND_MAX(fldWidth,ch)) { return -1; } fldWidth = MUL10(fldWidth) + (ch - _T('0')); dynWidth = 0; } break; case STAT_DOT: precision = 0; break; case STAT_PRECIS: /* update precison value */ if (ch == _T('*')) { /* get precision from arg list */ precision = va_arg(argptr, int); /*lint !e826 !e10 */ if (precision < 0) { precision = -1; } dynPrecision = 1; } else { /* add digit to current precision */ if(MUL10_ADD_BEYOND_MAX(precision,ch)) { return -1; } precision = MUL10(precision) + (ch - _T('0')); dynPrecision = 0; } break; case STAT_SIZE: /* read a size specifier, set the flags based on it */ switch (ch) { #ifdef COMPATIBLE_LINUX_FORMAT case _T('j'): flags |= FLAG_INTMAX; break; #endif case _T('q'): case _T('L'): flags |= FLAG_LONGLONG | FLAG_LONG_DOUBLE; break; case _T('l'): if (*format == _T('l')) { ++format; flags |= FLAG_LONGLONG; /* long long */ } else { flags |= FLAG_LONG; /* long int or wchar_t */ } break; case _T('t') : flags |= FLAG_PTRDIFF; break; #ifdef COMPATIBLE_LINUX_FORMAT case _T('z') : flags |= FLAG_SIZE; break; #endif /*lint -fallthrough */ case _T('I'): #ifdef SECUREC_ON_64BITS flags |= FLAG_I64; /* 'I' => INT64T on 64bits systems */ #endif if ( (*format == _T('6')) && (*(format + 1) == _T('4')) ) { format += 2; flags |= FLAG_I64; /* I64 => INT64T */ } else if ( (*format == _T('3')) && (*(format + 1) == _T('2')) ) { format += 2; flags &= ~FLAG_I64; /* I32 => __int32 */ } else if ( (*format == _T('d')) || (*format == _T('i')) || (*format == _T('o')) || (*format == _T('u')) || (*format == _T('x')) || (*format == _T('X')) ) { /* no further doing. */ } else { state = STAT_NORMAL; goto NORMAL_STATE; /*lint !e801*/ } break; case _T('h'): if (*format == _T('h')) flags |= FLAG_CHAR; /* char */ else flags |= FLAG_SHORT; /* short int */ break; case _T('w'): flags |= FLAG_WIDECHAR; /* wide char */ break; default: break; } break; case STAT_TYPE: switch (ch) { case _T('C'): /* wide char */ if (!(flags & (FLAG_SHORT | FLAG_LONG | FLAG_WIDECHAR))) { #ifdef _XXXUNICODE flags |= FLAG_SHORT; #else /* _XXXUNICODE */ flags |= FLAG_WIDECHAR; #endif /* _XXXUNICODE */ } /* fall into 'c' case */ /*lint -fallthrough */ case _T('c'): { #if (defined(COMPATIBLE_LINUX_FORMAT)) && !(defined(__hpux)) && !(defined(__SOLARIS)) flags &= ~FLAG_LEADZERO; #endif /* print a single character specified by int argument */ #ifdef _XXXUNICODE bufferIsWide = 1; wchar = (wchar_t)va_arg(argptr, int); /*lint !e826 !e10*/ if (flags & FLAG_SHORT) { /* format multibyte character */ char tempchar[2]; { tempchar[0] = (char)(wchar & 0x00ff); tempchar[1] = '\0'; } if (mbtowc(buffer.wz, tempchar, (size_t)MB_CUR_MAX) < 0) { /* ignore if conversion was unsuccessful */ noOutput = 1; } } else { buffer.wz[0] = wchar; } text.wz = buffer.wz; textLen = 1; /* print just a single character */ #else /* _XXXUNICODE */ if (flags & (FLAG_LONG | FLAG_WIDECHAR)) { wchar = (wchar_t)va_arg(argptr, int); /*lint !e826 !e10 */ /* convert to multibyte character */ textLen = wctomb(buffer.sz, wchar); if (textLen < 0) { noOutput = 1; } } else { /* format multibyte character */ unsigned short temp; temp = (unsigned short) va_arg(argptr, int); /*lint !e826 !e10 */ { buffer.sz[0] = (char) temp; textLen = 1; } } text.sz = buffer.sz; #endif /* _XXXUNICODE */ } break; case _T('S'): /* wide char string */ #ifndef _XXXUNICODE if (!(flags & (FLAG_SHORT | FLAG_LONG | FLAG_WIDECHAR))) { flags |= FLAG_WIDECHAR; } #else /* _XXXUNICODE */ if (!(flags & (FLAG_SHORT | FLAG_LONG | FLAG_WIDECHAR))) { flags |= FLAG_SHORT; } #endif /* _XXXUNICODE */ /*lint -fallthrough */ case _T('s'): { /* a string */ int i; char* p; /* temps */ wchar_t* pwch; #if (defined(COMPATIBLE_LINUX_FORMAT)) && (!defined(__UNIX)) flags &= ~FLAG_LEADZERO; #endif i = (precision == -1) ? INT_MAX : precision; text.sz = va_arg(argptr, char*); /*lint !e826 !e64 !e10 */ /* scan for null upto i characters */ #ifdef _XXXUNICODE #if defined(COMPATIBLE_LINUX_FORMAT) if (!(flags & FLAG_LONG)) { flags |= FLAG_SHORT; } #endif if (flags & FLAG_SHORT) { if (text.sz == NULL) /* NULL passed, use special string */ { text.sz = STR_NULL_STRING; } p = text.sz; for (textLen = 0; textLen < i && *p; textLen++) { ++p; } /* textLen now contains length in multibyte chars */ } else { if (text.wz == NULL) /* NULL passed, use special string */ { text.wz = WSTR_NULL_STRING; } bufferIsWide = 1; pwch = text.wz; while (i-- && *pwch) { ++pwch; } textLen = (int)(pwch - text.wz); /*lint !e946*/ /* in wchar_ts */ /* textLen now contains length in wide chars */ } #else /* _XXXUNICODE */ if (flags & (FLAG_LONG | FLAG_WIDECHAR)) { if (text.wz == NULL) /* NULL passed, use special string */ { text.wz = WSTR_NULL_STRING; } bufferIsWide = 1; pwch = text.wz; while ( i-- && *pwch ) { ++pwch; } textLen = (int)(pwch - text.wz); /*lint !e946*/ } else { if (text.sz == NULL) /* meet NULL, use special string */ { text.sz = STR_NULL_STRING; } p = text.sz; if (INT_MAX == i) { /* precision NOT assigned */ while (*p) { ++p; } }else { /* precision assigned */ while (i-- && *p) { ++p; } } textLen = (int)(p - text.sz); /*lint !e946*/ /* length of the string */ } #endif /* _XXXUNICODE */ } break; case _T('n'): { /* write count of characters*/ /* if %n is disabled, we skip an arg and print 'n' */ #ifdef DISABLE_N_FORMAT return -1; #else void* p; /* temp */ p = va_arg(argptr, void*); /* LSD util_get_ptr_arg(&argptr);*/ /* store chars out into short/long/int depending on flags */ #ifdef SECUREC_ON_64BITS if (flags & FLAG_LONG) { *(long*)p = charsOut; } else #endif /* SECUREC_ON_64BITS */ if (flags & FLAG_SHORT) { *(short*)p = (short) charsOut; } else { *(int*)p = charsOut; } noOutput = 1; /* force no output */ break; #endif } case _T('E'): case _T('F'): case _T('G'): case _T('A'): ch += _T('a') - _T('A'); /* convert format char to lower */ /*lint -fallthrough */ case _T('e'): case _T('f'): case _T('g'): case _T('a'): { /* floating point conversion */ text.sz = buffer.sz; /* output buffer for float string with default size*/ bufferSize = BUFFERSIZE; /* compute the precision value */ if (precision < 0) { precision = 6; } else if (precision == 0 && ch == _T('g')) { precision = 1; } /* calc buffer size to store long double value */ if (flags & FLAG_LONG_DOUBLE) { if( precision > (INT_MAX - CVTBUFSIZE_LB) ) { noOutput = 1; break; } bufferSize = CVTBUFSIZE_LB + precision ; } else { if( precision > (INT_MAX - _CVTBUFSIZE)) { noOutput = 1; break; } bufferSize = _CVTBUFSIZE + precision ; } if(fldWidth > bufferSize) { bufferSize = fldWidth ; } if(bufferSize >= BUFFERSIZE) { /* the current vlaue of BUFFERSIZE could NOT store the formatted float string */ heapBuf = (char*)SECC_MALLOC(((size_t)bufferSize + (size_t)2));/*lint !e586 *//* size include '+' and '\0'*/ if (heapBuf != NULL) { text.sz = heapBuf; } else { noOutput = 1; break; } } { /* add following code to call system sprintf API for float number */ const TCHAR* pFltFmt = format - 2; /* point to the position before 'f' or 'g' */ int k = 0, fltFmtStrLen; TCHAR fltFmtStr[SECURE_FMT_STR_LEN]; TCHAR* pFltFmtStr = fltFmtStr; while(_T('%') != *pFltFmt ) { /* must meet '%' */ --pFltFmt; } fltFmtStrLen = format - pFltFmt + 1; /* with ending terminator */ if (fltFmtStrLen > SECURE_FMT_STR_LEN) { /* if SECURE_FMT_STR_LEN is NOT enough, alloc a new buffer */ pFltFmtStr = (TCHAR*)SECC_MALLOC(fltFmtStrLen * sizeof(TCHAR)); if (NULL == pFltFmtStr) { noOutput= 1; break; } } for (;k < fltFmtStrLen -1; ++k ){ pFltFmtStr[k] = *(pFltFmt + k); /* copy the format string */ } pFltFmtStr[k] = _T('\0'); #ifdef _XXXUNICODE for (k = 0; k < fltFmtStrLen - 1; ++k ){ /* convert wchar to char */ *((char*)pFltFmtStr + k) = (char)*(pFltFmt + k); } *((char*)pFltFmtStr + k) = '\0'; #endif #ifdef COMPATIBLE_LINUX_FORMAT if (flags & FLAG_LONG_DOUBLE) { long double tmp; tmp = va_arg(argptr, long double); /*lint !e826 !e10 */ /* call system sprintf to format float value */ if (dynWidth && dynPrecision) { textLen = indirectSprintf(text.sz, (char*)pFltFmtStr, fldWidth, precision, tmp); MASK_PCLINT_NO_OVERFLOW }else if (dynWidth) { textLen = indirectSprintf(text.sz, (char*)pFltFmtStr, fldWidth,tmp); MASK_PCLINT_NO_OVERFLOW }else if (dynPrecision) { textLen = indirectSprintf(text.sz, (char*)pFltFmtStr, precision, tmp); MASK_PCLINT_NO_OVERFLOW }else{ textLen = indirectSprintf(text.sz, (char*)pFltFmtStr, tmp); MASK_PCLINT_NO_OVERFLOW } } else #endif { double tmp; tmp = va_arg(argptr, double); /*lint !e826 !e10 */ if (dynWidth && dynPrecision) { textLen = indirectSprintf(text.sz, (char*)pFltFmtStr, fldWidth, precision,tmp); MASK_PCLINT_NO_OVERFLOW }else if (dynWidth) { textLen = indirectSprintf(text.sz, (char*)pFltFmtStr, fldWidth,tmp); MASK_PCLINT_NO_OVERFLOW }else if (dynPrecision) { textLen = indirectSprintf(text.sz, (char*)pFltFmtStr, precision, tmp); MASK_PCLINT_NO_OVERFLOW }else{ textLen = indirectSprintf(text.sz, (char*)pFltFmtStr, tmp); MASK_PCLINT_NO_OVERFLOW } } if (fltFmtStrLen > SECURE_FMT_STR_LEN) { /* if buffer is alloced on heap, free it */ SECC_FREE(pFltFmtStr); /*lint !e673*/ } if (textLen > bufferSize ) { /* bufferSize is large enough, this should NOT occur */ noOutput = 1; break; } padding = 0; /* no padding*/ flags = 0; /* clear all internal flags */ goto FILL_STRING_BUFFER; /*lint !e801*/ } } case _T('d'): case _T('i'): /* signed decimal output */ flags |= FLAG_SIGNED; radix = 10; goto COMMON_INT; /*lint !e801*/ case _T('u'): radix = 10; goto COMMON_INT; /*lint !e801*/ case _T('p'): /* print a pointer*/ #if defined(COMPATIBLE_WIN_FORMAT) flags &= ~FLAG_LEADZERO; #else flags |= FLAG_POINTER; #endif #if (defined(COMPATIBLE_LINUX_FORMAT) || defined(SECUREC_VXWORKS_PLATFORM)) && (!defined(__UNIX)) #if defined(SECUREC_VXWORKS_PLATFORM) precision = 1; #else precision = 0; #endif flags |= FLAG_ALTERNATE; /* "0x" is not default prefix in UNIX */ hexAdd = 0; #ifdef SECUREC_ON_64BITS flags |= FLAG_I64; /* converting an int64 */ #else flags |= FLAG_LONG; /* converting a long */ #endif goto COMMON_HEX; /*lint !e801 */ #else #if defined(_AIX) || defined(__SOLARIS) precision = 1; #else precision = 2 * sizeof(void*); #endif #ifdef SECUREC_ON_64BITS flags |= FLAG_I64; /* converting an int64 */ #else flags |= FLAG_LONG; /* converting a long */ #endif #endif #if defined(__UNIX) hexAdd = 0; goto COMMON_HEX; /*lint !e801*/ #endif /*lint -fallthrough */ case _T('X'): /* unsigned upper hex output */ hexAdd = 1; goto COMMON_HEX; /*lint !e801*/ case _T('x'): /* unsigned lower hex output */ hexAdd = 0; /* set hexAdd for lowercase hex */ /* DROP THROUGH TO COMMON_HEX */ COMMON_HEX: radix = 16; digits = hexAdd == 0 ? _itoa_lower_digits: _itoa_upper_digits ; if (flags & FLAG_ALTERNATE) { /* alternate form means '0x' prefix */ prefix[0] = _T('0'); prefix[1] = (TCHAR)(digits[16]); /* 'x' or 'X' */ /*lint !e571*/ #if (defined(COMPATIBLE_LINUX_FORMAT) || defined(SECUREC_VXWORKS_PLATFORM)) if ('p' == ch) { prefix[1] = _T('x'); } #endif #if defined(_AIX) || defined(__SOLARIS) if('p' == ch) { prefixLen = 0; } else { prefixLen = 2; } #else prefixLen = 2; #endif } goto COMMON_INT; /*lint !e801*/ case _T('o'): /* unsigned octal output */ radix = 8; if (flags & FLAG_ALTERNATE) { /* alternate form means force a leading 0 */ flags |= FLAG_FORCE_OCTAL; } /* DROP THROUGH to COMMON_INT */ COMMON_INT: { UINT64T number = 0; /* number to convert */ #if defined(SECUREC_VXWORKS_VERSION_5_4) UINT32T digit = 0; /* ascii value of digit */ #endif INT64T l; /* temp long value */ unsigned char tch; #if defined(SECUREC_VXWORKS_VERSION_5_4) UINT32T puiQuotientHigh = 0; UINT32T puiQuotientLow = 0; #endif /* read argument into variable l*/ if (flags & FLAG_I64) { l = va_arg(argptr, INT64T); /*lint !e826 !e530 !e516 !e78 */ } else if (flags & FLAG_LONGLONG) { l = va_arg(argptr, INT64T); /*lint !e826 !e516 !e78 */ } else #ifdef SECUREC_ON_64BITS if (flags & FLAG_LONG) { l = va_arg(argptr, long); } else #endif /* SECUREC_ON_64BITS */ if (flags & FLAG_CHAR) { if (flags & FLAG_SIGNED) { l = (char) va_arg(argptr, int); /* sign extend */ /*lint !e826 !e10 */ if (l >= 128) /* on some platform, char is always unsigned */ { flags |= FLAG_NEGATIVE; tch = (char)~l; l = tch + 1; } } else { l = (unsigned char) va_arg(argptr, int); /* zero-extend*/ /*lint !e826 !e10 */ } } else if (flags & FLAG_SHORT) { if (flags & FLAG_SIGNED) { l = (short) va_arg(argptr, int); /* sign extend */ /*lint !e826 !e10 */ } else { l = (unsigned short) va_arg(argptr, int); /* zero-extend*/ /*lint !e826 !e10 */ } } #ifdef COMPATIBLE_LINUX_FORMAT else if (flags & FLAG_PTRDIFF) { l = (ptrdiff_t) va_arg(argptr, ptrdiff_t); /* sign extend */ /*lint !e826 !e10 !e78 !e530 */ } else if (flags & FLAG_SIZE) { if (flags & FLAG_SIGNED) { if (sizeof(size_t) == sizeof(long)) /*lint !e506*/ { l = va_arg(argptr, long); /* sign extend */ /*lint !e826 !e10 */ } else if (sizeof(size_t) == sizeof(long long)) /*lint !e506*/ { l = va_arg(argptr, long long); /* sign extend */ /*lint !e826 !e10 */ } else { l = va_arg(argptr, int); /* sign extend */ /*lint !e826 !e10 */ } } else { l = (size_t) va_arg(argptr, size_t); /* sign extend */ /*lint !e826 !e10 !e78 !e516 !e530 */ } } else if (flags & FLAG_INTMAX) { if (flags & FLAG_SIGNED) { l = va_arg(argptr, INT64T); /* sign extend */ /*lint !e826 !e10 !e78 !e516 !e530 */ } else { l = (UINT64T)va_arg(argptr, UINT64T); /* sign extend */ /*lint !e826 !e10 !e78 !e516 !e571 */ } } #endif else { if (flags & FLAG_SIGNED) { l = va_arg(argptr, int); /*lint !e826 !e10 */ } else { l = (unsigned int) va_arg(argptr, int); /* zero-extend*/ /*lint !e826 !e10 */ } } /* check for negative; copy into number */ if ( (flags & FLAG_SIGNED) && l < 0)/*lint !e539 */ { number = -l; /*lint !e732*/ flags |= FLAG_NEGATIVE; } else { number = l; /*lint !e732*/ } #ifdef COMPATIBLE_LINUX_FORMAT if ((flags & FLAG_INTMAX) == 0) #endif { if ( ((flags & FLAG_I64) == 0) && ((flags & FLAG_LONGLONG) == 0) ) { #ifdef SECUREC_ON_64BITS #if defined(COMPATIBLE_WIN_FORMAT) /*on window 64 system sizeof long is 32bit */ if (((flags & FLAG_PTRDIFF) == 0) && ((flags & FLAG_SIZE) == 0)) #else if (((flags & FLAG_LONG) == 0) && ((flags & FLAG_PTRDIFF) == 0) && ((flags & FLAG_SIZE) == 0)) #endif #endif number &= 0xffffffff; } } /* check precision value for default */ if (precision < 0) { precision = 1; /* default precision */ } else { #if defined(COMPATIBLE_WIN_FORMAT) flags &= ~FLAG_LEADZERO; #else if(!(flags & FLAG_POINTER)){ flags &= ~FLAG_LEADZERO; } #endif if (precision > MAXPRECISION) { precision = MAXPRECISION; } } /* Check if data is 0; if so, turn off hex prefix,if 'p',add 0x prefix,else not add prefix */ if (number == 0) { #if !(defined(SECUREC_VXWORKS_PLATFORM)||defined(__hpux)) prefixLen = 0; #else if(('p' == ch) && (flags & FLAG_ALTERNATE)) prefixLen = 2; else prefixLen = 0; #endif } /* Convert data to ASCII */ text.sz = &buffer.sz[BUFFERSIZE - 1]; if (number > 0) { #ifdef SECUREC_ON_64BITS switch (radix) { /* the compiler will optimize each one */ SECUREC_SPECIAL (number, 10); SECUREC_SPECIAL (number, 16); SECUREC_SPECIAL (number, 8); } #else /* for 32 bits system */ if (number <= 0xFFFFFFFFUL ) { /* in most case, the value to be converted is small value */ UINT32T n32Tmp = (UINT32T)number; switch (radix){ SECUREC_SPECIAL (n32Tmp, 16); SECUREC_SPECIAL (n32Tmp, 8); #ifdef _AIX /* the compiler will optimize div 10 */ SECUREC_SPECIAL (n32Tmp, 10); #else case 10: { /* fast div 10 */ UINT32T q, r; do{ *--text.sz = digits[n32Tmp % 10]; q = (n32Tmp >> 1) + (n32Tmp >> 2); q = q + (q >> 4); q = q + (q >> 8); q = q + (q >> 16); q = q >> 3; r = n32Tmp - (((q << 2) + q) << 1); n32Tmp = (r > 9) ? (q+1):q; }while ( n32Tmp /* != 0 */); } #endif } /* end switch */ }else{ /* the value to be converted is greater than 4G */ #if defined(SECUREC_VXWORKS_VERSION_5_4) do{ if (0 != U64Div32((UINT32T)((number >> 16)>>16), (UINT32T)number, radix, &puiQuotientHigh, &puiQuotientLow, &digit)) { noOutput = 1; break; } *--text.sz = digits[digit]; number = (UINT64T)puiQuotientHigh; number = (number << 32) + puiQuotientLow; }while (number /* != 0 */); #else switch (radix) { /* the compiler will optimize div 10 */ SECUREC_SPECIAL (number, 10); SECUREC_SPECIAL (number, 16); SECUREC_SPECIAL (number, 8); } #endif } #endif } /* END if (number > 0)*/ textLen = (int)((char*)&buffer.sz[BUFFERSIZE - 1] - text.sz); /*lint !e946*/ /* compute length of number */ if (precision > textLen ){ for (ii = 0; ii < precision - textLen; ++ii) { *--text.sz = '0'; } textLen = precision; } /* Force a leading zero if FORCEOCTAL flag set */ if ((flags & FLAG_FORCE_OCTAL) && (textLen == 0 || text.sz[0] != '0')) { *--text.sz = '0'; ++textLen; /* add a zero */ } } break; default: break; } if (!noOutput) { if (flags & FLAG_SIGNED) { if (flags & FLAG_NEGATIVE) { /* prefix is a '-' */ prefix[0] = _T('-'); prefixLen = 1; } else if (flags & FLAG_SIGN) { /* prefix is '+' */ prefix[0] = _T('+'); prefixLen = 1; } else if (flags & FLAG_SIGN_SPACE) { /* prefix is ' ' */ prefix[0] = _T(' '); prefixLen = 1; } } #if defined(COMPATIBLE_LINUX_FORMAT) && (!defined(__UNIX)) if((flags & FLAG_POINTER) && (0 == textLen)) { flags &= ~FLAG_LEADZERO; text.sz = &buffer.sz[BUFFERSIZE - 1]; *text.sz-- = '\0'; *text.sz-- = ')'; *text.sz-- = 'l'; *text.sz-- = 'i'; *text.sz-- = 'n'; *text.sz = '('; textLen = 5; } #endif /* calculate amount of padding */ padding = (fldWidth - textLen) - prefixLen; /* put out the padding, prefix, and text, in the correct order */ if (!(flags & (FLAG_LEFT | FLAG_LEADZERO)) && padding > 0) { /* pad on left with blanks */ if (IS_REST_BUF_ENOUGH(padding)) { SAFE_PADDING(_T(' '), padding, stream, &charsOut); }else { write_multi_char(_T(' '), padding, stream, &charsOut); } } /* write prefix */ if (prefixLen > 0 ) { TCHAR* pPrefix = prefix; if (IS_REST_BUF_ENOUGH(prefixLen)) { SAFE_WRITE_STR(pPrefix, prefixLen, stream, &charsOut); /*lint !e670 !e534*/ }else { write_string(prefix, prefixLen, stream, &charsOut); } } if ((flags & FLAG_LEADZERO) && !(flags & FLAG_LEFT) && padding > 0) { /* write leading zeros */ if (IS_REST_BUF_ENOUGH(padding)) { SAFE_PADDING(_T('0'), padding, stream, &charsOut); }else { write_multi_char(_T('0'), padding, stream, &charsOut); } } FILL_STRING_BUFFER: /* write text */ #ifndef _XXXUNICODE if (bufferIsWide && (textLen > 0)) { wchar_t* p; int retval; int count; /* int e = 0;*/ char L_buffer[MB_LEN_MAX + 1]; p = text.wz; count = textLen; while (count--) { retval = wctomb(L_buffer, *p++); if (retval <= 0) { charsOut = -1; break; } write_string(L_buffer, retval, stream, &charsOut); } } else { /* coverity[overrun-local] */ if (IS_REST_BUF_ENOUGH(textLen)) { SAFE_WRITE_STR(text.sz, textLen, stream, &charsOut); /*lint !e534*/ }else { write_string(text.sz, textLen, stream, &charsOut); } } #else /* _XXXUNICODE */ if (!bufferIsWide && textLen > 0) { char* p; int retval, count; p = text.sz; count = textLen; while (count > 0) { /* coverity[overrun-buffer-val] */ retval = mbtowc(&wchar, p, (size_t)MB_CUR_MAX); if (retval <= 0) { charsOut = -1; break; } write_char(wchar, stream, &charsOut); p += retval; count-=retval; } } else { if (IS_REST_BUF_ENOUGH(textLen)) { SAFE_WRITE_STR(text.wz, textLen, stream, &charsOut); /*lint !e534*/ }else { write_string(text.wz, textLen, stream, &charsOut); } } #endif /* _XXXUNICODE */ if (charsOut >= 0 && (flags & FLAG_LEFT) && padding > 0) { /* pad on right with blanks */ if (IS_REST_BUF_ENOUGH(padding)) { SAFE_PADDING(_T(' '), padding, stream, &charsOut); }else { write_multi_char(_T(' '), padding, stream, &charsOut); } } /* we're done! */ } if (heapBuf != NULL) { SECC_FREE(heapBuf);/*lint !e586 !e516*/ heapBuf = NULL; } break; } } if (state != STAT_NORMAL && state != STAT_TYPE) { return -1; } return charsOut; /* the number of characters written */ } #endif /*OUTPUT_INL_2B263E9C_43D8_44BB_B17A_6D2033DECEE5*/ UVP-Tools-2.2.0.316/uvp-monitor/securec/src/scanf_s.c000066400000000000000000000037351314037446600221120ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: scanf_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "secinput.h" /******************************************************************************* * * scanf_s * * * int scanf_s (const char* format, ...); * * * The scanf_s function reads data from the standard input stream stdin and * writes the data into the location that's given by argument. Each argument * must be a pointer to a variable of a type that corresponds to a type specifier * in format. If copying occurs between strings that overlap, the behavior is * undefined. * * * format Format control string. * ... Optional arguments. * * * ... The converted value stored in user assigned address * * * Returns the number of fields successfully converted and assigned; * the return value does not include fields that were read but not assigned. * A return value of 0 indicates that no fields were assigned. * The return value is EOF for an error, or if the end-of-file character * or the end-of-string character is encountered in the first attempt to read * a character. * If format is a NULL pointer, the invalid parameter handler is invoked, If * execution is allowed to continue, scanf_s and wscanf_s return SCANF_EINVAL. ******************************************************************************* */ int scanf_s (const char* format, ...) { int ret = 0; va_list arglist; va_start(arglist, format); ret = vscanf_s( format, arglist); va_end(arglist); return ret; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/secinput.h000066400000000000000000000053631314037446600223340ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: secinput.h * Decription: * define macro, data struct, and declare function prototype, * which is used by input.inl, secureinput_a.c and secureinput_w.c. Note: * The macro SECUREC_LOCK_FIL, SECUREC_UNLOCK_FILE, SECUREC_LOCK_STDIN * and SECUREC_UNLOCK_STDIN are NOT implemented, which depends on your dest * system. User can add implementation to these macro. * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #ifndef __SEC_INPUT_H_E950DA2C_902F_4B15_BECD_948E99090D9C #define __SEC_INPUT_H_E950DA2C_902F_4B15_BECD_948E99090D9C #include #include #define SCANF_EINVAL (-1) #define NOT_USED_FILE_PTR NULL /* for internal stream flag */ #define MEM_STR_FLAG 0X01 #define FILE_STREAM_FLAG 0X02 #define FROM_STDIN_FLAG 0X04 #define LOAD_FILE_TO_MEM_FLAG 0X08 #define UNINITIALIZED_FILE_POS (-1) #define BOM_HEADER_SIZE (2) #define UTF8_BOM_HEADER_SIZE (3) typedef struct { int _cnt; /* the size of buffered string in bytes*/ char* _ptr; /* the pointer to next read position */ char* _base; /*the pointer to the header of buffered string*/ int _flag; /* mark the properties of input stream*/ FILE* pf; /*the file pointer*/ int fileRealRead; long oriFilePos; /*the original position of file offset when fscanf is called*/ unsigned int lastChar; /*the char code of last input*/ int fUnget; /*the boolean flag of pushing a char back to read stream*/ } SEC_FILE_STREAM; #define INIT_SEC_FILE_STREAM {0, NULL, NULL, 0, NULL, 0, 0, 0, 0 } //lint -esym(526, securec_input_s*) int securec_input_s (SEC_FILE_STREAM* stream, const char* format, va_list arglist); /*from const UINT8T* to const char**/ //lint -esym(526, securec_winput_s*) int securec_winput_s (SEC_FILE_STREAM* stream, const wchar_t* format, va_list arglist); //lint -esym(526, clearDestBuf*) void clearDestBuf(const char* buffer,const char* cformat, va_list arglist); //lint -esym(526, clearwDestBuf*) void clearwDestBuf (const wchar_t* buffer,const wchar_t* cformat, va_list arglist); #define SECUREC_LOCK_FILE(s) #define SECUREC_UNLOCK_FILE(s) #define SECUREC_LOCK_STDIN(i,s) #define SECUREC_UNLOCK_STDIN(i,s) #define _VALIDATE_STREAM_ANSI_SETRET( stream, errorcode, retval, retexpr) #endif UVP-Tools-2.2.0.316/uvp-monitor/securec/src/securecutil.c000066400000000000000000000352151314037446600230230ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: securecutil.c * Decription: * provides internal functions used by this library, such as memory * copy and memory move. Besides, include some helper function for * printf family API, such as vsnprintf_helper, SECUREC_PUTC_NOLOCK, * __putwc_nolock. Also includes some folat converting function, such * as cvt, ecvtbuf, fcvtbuf, cfltcvt. * History: * 1. Date: 2014/4/10 * Author: LiShunda * Modification: move vswprintf_helper() function from this file to vswprintf.c, * which make this file only contain ANSI string function. This will * facilitate the ANSI string API building. * 2. Date: 2014/4/10 * Author: LiShunda * Modification: add int putWcharStrEndingZero(SECUREC_XPRINTF_STREAM* str, int zerosNum) * function. In vswprintf.c, the original code use (SECUREC_PUTC_NOLOCK('\0', str) != EOF ) * four times and a do-while wrapper which is NOT easy to read and understand, * so abstract this function. * 3. Date: 2014/4/10 * Author: LiShunda * Modification: change a variabel name "exp" in function "cfltcvt" to "expVal", for "exp" is a function name in , * which make pclint warning. * 4. Date: 2014/4/10 * Author: LiShunda * Modification: remove 'char* s__nullstring = "(null)"' and 'wchar_t* s__wnullstring = L"(null)"' to * to avoid pclint warning on "redundant statement declared symbol's__nullstring'". * 4. Date: 2014/5/7 * Author: LiShunda * Modification: replace memcpy with memcpy_s at line 558 * 5. Date: 2014/5/20 * Author: LiShunda * Modification: change the return type of function "__putwc_nolock" from unsigned short to * wchar_t. * 6. Date: 2014/6/4 * Author: LiShunda * Modification: change the function name from "fcvtbuf" to "fcvtbuf_hw", and add "static" modifier. * change the function name from "ecvtbuf" to "ecvtbuf_hw", and add "static" modifier. * 7. Date: 2014/6/8 * Author: LiShunda * Modification: move the functions of __putwc_nolock, write_char_w, write_multi_char_w * and write_string_w from this file to secureprintoutput_w.c. In ANSI mode build, * these functions are NOT needed, which can reduce code generation size * and let securecutil.c NOT include . * 8. Date: 2014/7/1 * Author: LiShunda * Modification: move "securec__lookuptable_s[]" from this file to output.inl, which is to avoid * redefinition when multiple secure c library are integrated. ******************************************************************************** */ #include #include "securec.h" #include "securecutil.h" #include "secureprintoutput.h" #include #include #ifdef ANDROID #include int wctomb(char *s, wchar_t wc) { return wcrtomb(s,wc,NULL); } int mbtowc(wchar_t *pwc, const char *s, size_t n) { return mbrtowc(pwc, s, n, NULL); } #endif /*SPC verNumber<->verStr like:0X201<->C01;0X202<->SPC001;0X502<->SPC002;0X503<->SPC003...;0X510<->SPC010;0X511<->SPC011...*/ /*CP verNumber<->verStr like:0X601<->CP0001;0X602<->CP0002...;*/ void getHwSecureCVersion(char* verStr, int bufSize, unsigned short* verNumber) { if (verStr != NULL && bufSize > 0 ) { (void)strcpy_s(verStr, (size_t)bufSize, "Huawei Secure C V100R001C01SPC004"); } if (verNumber != NULL) { *verNumber = (5 << 8 | 4);/*high Num << 8 | num of Ver*/ } } void util_memmove (void* dst, const void* src, size_t count) { UINT8T* pDest = (UINT8T*)dst; UINT8T* pSrc = (UINT8T*)src; if (dst <= src || pDest >= (pSrc + count)) _CHECK_BUFFER_OVERLAP { /* * Non-Overlapping Buffers * copy from lower addresses to higher addresses */ while (count--) { *pDest = *(UINT8T*)pSrc; ++pDest; ++pSrc; } } else { /* * Overlapping Buffers * copy from higher addresses to lower addresses */ pDest = pDest + count - 1; pSrc = pSrc + count - 1; while (count--) { *pDest = *pSrc; --pDest; --pSrc; } } } /*put a char to output stream */ #define SECUREC_PUTC_NOLOCK(_c,_stream) (--(_stream)->_cnt >= 0 ? 0xff & (*(_stream)->_ptr++ = (char)(_c)) : EOF) int putWcharStrEndingZero(SECUREC_XPRINTF_STREAM* str, int zerosNum) { int succeed = 0, i = 0; for (; i < zerosNum && (SECUREC_PUTC_NOLOCK('\0', str) != EOF ); ++i) { } if (i == zerosNum) { succeed = 1; } return succeed; } int vsnprintf_helper (char* string, size_t count, const char* format, va_list arglist) { SECUREC_XPRINTF_STREAM str; int retval; str._cnt = (int)count; str._ptr = string; retval = securec_output_s(&str, format, arglist ); if ((retval >= 0) && (SECUREC_PUTC_NOLOCK('\0', &str) != EOF)) { return (retval); } if (string != NULL) { string[count - 1] = 0; } if (str._cnt < 0) { /* the buffer was too small; we return -2 to indicate truncation */ return -2; } return -1; } /* remove it */ void write_char_a( char ch, SECUREC_XPRINTF_STREAM* f, int* pnumwritten) { if (SECUREC_PUTC_NOLOCK(ch, f) == EOF) { *pnumwritten = -1; } else { ++(*pnumwritten); } } void write_multi_char_a(char ch, int num, SECUREC_XPRINTF_STREAM* f, int* pnumwritten) { while (num-- > 0) { if (SECUREC_PUTC_NOLOCK(ch, f) == EOF) { *pnumwritten = -1; break; } else { ++(*pnumwritten); } } } void write_string_a (char* string, int len, SECUREC_XPRINTF_STREAM* f, int* pnumwritten) { while (len-- > 0) { if (SECUREC_PUTC_NOLOCK(*string, f) == EOF) { *pnumwritten = -1; break; } else { ++(*pnumwritten); ++string; } } } /* Following function "U64Div32" realized the operation of division between an unsigned 64-bits * number and an unsigned 32-bits number. * these codes are contributed by Dopra team in syslib. */ #if defined(VXWORKS_VERSION) #define SEC_MAX_SHIFT_NUM 32 #define SEC_MASK_BIT_ALL 0xFFFFFFFF #define SEC_MASK_BIT_32 0x80000000 #define SEC_MASK_BIT_01 0x00000001 #define SEC_MASK_HI_NBITS(x) (SEC_MASK_BIT_ALL << (SEC_MAX_SHIFT_NUM - (x))) typedef enum tagbit64SecCompareResult { SEC_BIT64_GREAT, SEC_BIT64_EQUAL, SEC_BIT64_LESS } bit64SecCompareResult; #define SEC_BIT64_SUB(argAHi, argALo, argBHi, argBLo) \ do \ { \ if ((argALo) < (argBLo)) \ { \ (argAHi) -= ((argBHi) + 1); \ } \ else \ { \ (argAHi) -= (argBHi); \ } \ (argALo) -= (argBLo); \ } while (0) #define SEC_BIT64_COMPARE(argAHi, argALo, argBHi, argBLo, result) \ do \ { \ if ((argAHi) > (argBHi)) \ { \ result = SEC_BIT64_GREAT; \ } \ else if (((argAHi) == (argBHi)) && ((argALo) > (argBLo))) \ { \ result = SEC_BIT64_GREAT; \ } \ else if (((argAHi) == (argBHi)) && ((argALo) == (argBLo))) \ { \ result = SEC_BIT64_EQUAL; \ } \ else \ { \ result = SEC_BIT64_LESS; \ } \ } while (0) INT32T U64Div64(UINT32T uiDividendHigh, UINT32T uiDividendLow, UINT32T uiDivisorHigh, UINT32T uiDivisorLow, UINT32T *puiQuotientHigh, UINT32T *puiQuotientLow, UINT32T *puiRemainderHigh, UINT32T *puiRemainderLow) { INT8T scShiftNumHi = 0, scShiftNumLo = 0; UINT32T uiTmpQuoHi, uiTmpQuoLo; UINT32T uiTmpDividendHi, uiTmpDividendLo; UINT32T uiTmpDivisorHi, uiTmpDivisorLo; bit64SecCompareResult etmpResult; if ((NULL == puiQuotientHigh) || (NULL == puiQuotientLow)) { return -1; } if (0 == uiDivisorHigh) { if (0 == uiDivisorLow) { return -1; } else if (1 == uiDivisorLow) { *puiQuotientHigh = uiDividendHigh; *puiQuotientLow = uiDividendLow; if (NULL != puiRemainderHigh && NULL != puiRemainderLow) { *puiRemainderHigh = 0; *puiRemainderLow = 0; } return 0; } } uiTmpQuoHi = uiTmpQuoLo = 0; uiTmpDividendHi = uiDividendHigh; uiTmpDividendLo = uiDividendLow; /* if divisor is larger than dividend, quotient equals to zero, * remainder equals to dividends */ SEC_BIT64_COMPARE(uiDividendHigh, uiDividendLow, uiDivisorHigh, uiDivisorLow, etmpResult); if (SEC_BIT64_LESS == etmpResult) { goto returnHandle; } else if (SEC_BIT64_EQUAL == etmpResult) { *puiQuotientHigh = 0; *puiQuotientLow = 1; if ((NULL != puiRemainderHigh) && (NULL != puiRemainderLow)) { *puiRemainderHigh = 0; *puiRemainderLow = 0; } return 0; } /* get shift number to implement divide arithmetic */ if (uiDivisorHigh > 0) { for (scShiftNumHi = 0; scShiftNumHi < SEC_MAX_SHIFT_NUM; scShiftNumHi++) { if ( (uiDivisorHigh << scShiftNumHi) & SEC_MASK_BIT_32 ) { break; } } } else { for (scShiftNumLo = 0; scShiftNumLo < SEC_MAX_SHIFT_NUM; scShiftNumLo++) { if ( (uiDivisorLow << scShiftNumLo) & SEC_MASK_BIT_32 ) { break; } } } if (uiDivisorHigh > 0) { /* divisor's high 32 bits doesn't equal to zero */ for (; scShiftNumHi >= 0; scShiftNumHi--) { if (0 == scShiftNumHi) { uiTmpDivisorHi = uiDivisorHigh; } else { uiTmpDivisorHi = (uiDivisorHigh << scShiftNumHi) | (uiDivisorLow >> (SEC_MAX_SHIFT_NUM - scShiftNumHi)); } uiTmpDivisorLo = uiDivisorLow << scShiftNumHi; SEC_BIT64_COMPARE(uiTmpDividendHi, uiTmpDividendLo, uiTmpDivisorHi, uiTmpDivisorLo, etmpResult); if (SEC_BIT64_LESS != etmpResult) { SEC_BIT64_SUB(uiTmpDividendHi, uiTmpDividendLo, uiTmpDivisorHi, uiTmpDivisorLo); uiTmpQuoLo |= (1 << scShiftNumHi); if ((0 == uiTmpDividendHi) && (0 == uiTmpDividendLo)) { goto returnHandle; } } if (0 == scShiftNumHi) break; } } else { /* divisor's high 32 bits equals to zero */ scShiftNumHi = scShiftNumLo; for (; scShiftNumHi >= 0; scShiftNumHi--) { uiTmpDivisorHi = uiDivisorLow << scShiftNumHi; SEC_BIT64_COMPARE(uiTmpDividendHi, uiTmpDividendLo, uiTmpDivisorHi, 0, etmpResult); if (SEC_BIT64_LESS != etmpResult) { UINT32T uiTmp = 0; SEC_BIT64_SUB(uiTmpDividendHi, uiTmpDividendLo, uiTmpDivisorHi, uiTmp); uiTmpQuoHi |= (1 << scShiftNumHi); if ((0 == uiTmpDividendHi) && (0 == uiTmpDividendLo)) { goto returnHandle; } } if (0 == scShiftNumHi) break; } for (scShiftNumHi = SEC_MAX_SHIFT_NUM - 1; scShiftNumHi >= 0; scShiftNumHi--) { if (0 == scShiftNumHi) { uiTmpDivisorHi = 0; } else { uiTmpDivisorHi = uiDivisorLow >> (SEC_MAX_SHIFT_NUM - scShiftNumHi); } uiTmpDivisorLo = uiDivisorLow << scShiftNumHi; SEC_BIT64_COMPARE(uiTmpDividendHi, uiTmpDividendLo, uiTmpDivisorHi, uiTmpDivisorLo, etmpResult); if (SEC_BIT64_LESS != etmpResult) { SEC_BIT64_SUB(uiTmpDividendHi, uiTmpDividendLo, uiTmpDivisorHi, uiTmpDivisorLo); uiTmpQuoLo |= (1 << scShiftNumHi); if ((0 == uiTmpDividendHi) && (0 == uiTmpDividendLo)) { goto returnHandle; } } if (0 == scShiftNumHi) break; } } returnHandle: *puiQuotientHigh = uiTmpQuoHi; *puiQuotientLow = uiTmpQuoLo; if ((NULL != puiRemainderHigh) && (NULL != puiRemainderLow)) { *puiRemainderHigh = uiTmpDividendHi; *puiRemainderLow = uiTmpDividendLo; } return 0; } INT32T U64Div32(UINT32T uiDividendHigh, UINT32T uiDividendLow, UINT32T uiDivisor, UINT32T *puiQuotientHigh, UINT32T *puiQuotientLow, UINT32T *puiRemainder) { UINT32T uiTmpRemainderHi = 0, uiTmpRemainderLo = 0; UINT32T uiRet = 0; if ((NULL == puiQuotientHigh) || (NULL == puiQuotientLow) || 0 == uiDivisor || NULL == puiRemainder) { return -1; } uiDividendHigh &= SEC_MASK_BIT_ALL; uiDividendLow &= SEC_MASK_BIT_ALL; uiDivisor &= SEC_MASK_BIT_ALL; *puiQuotientHigh = 0; *puiQuotientLow = 0; *puiRemainder = 0; uiRet = U64Div64( uiDividendHigh, uiDividendLow, 0, uiDivisor, puiQuotientHigh, puiQuotientLow, &uiTmpRemainderHi, &uiTmpRemainderLo ); if (0 != uiRet) { return uiRet; } if (NULL != puiRemainder) { if(0 != uiTmpRemainderHi) { return -1; } *puiRemainder = uiTmpRemainderLo; } return 0; } #endif UVP-Tools-2.2.0.316/uvp-monitor/securec/src/securecutil.h000066400000000000000000000121651314037446600230270ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: securecutil.h * History: * 1. Date: 2014/5/20 * Author: LiShunda * Modification: remove extern "C" modifier which will cause g++ link error. ******************************************************************************** */ #ifndef __SECURECUTIL_H__46C86578_F8FF_4E49_8E64_9B175241761F #define __SECURECUTIL_H__46C86578_F8FF_4E49_8E64_9B175241761F #include #ifdef CALL_LIBC_COR_API /*#include if memory.h don't exist, use "string.h" instead.*/ #include #endif #define DIRECT_ASSIGNMENT_THRESHOLD (12UL) /*struct for performance*/ typedef struct {char buf[1];}MY_STR1; typedef struct {char buf[2];}MY_STR2; typedef struct {char buf[3];}MY_STR3; typedef struct {char buf[4];}MY_STR4; typedef struct {char buf[5];}MY_STR5; typedef struct {char buf[6];}MY_STR6; typedef struct {char buf[7];}MY_STR7; typedef struct {char buf[8];}MY_STR8; typedef struct {char buf[9];}MY_STR9; typedef struct {char buf[10];}MY_STR10; typedef struct {char buf[11];}MY_STR11; typedef struct {char buf[12];}MY_STR12; typedef struct {char buf[13];}MY_STR13; typedef struct {char buf[14];}MY_STR14; typedef struct {char buf[15];}MY_STR15; typedef struct {char buf[16];}MY_STR16; typedef struct {char buf[17];}MY_STR17; typedef struct {char buf[18];}MY_STR18; typedef struct {char buf[19];}MY_STR19; typedef struct {char buf[20];}MY_STR20; typedef struct {char buf[21];}MY_STR21; typedef struct {char buf[22];}MY_STR22; typedef struct {char buf[23];}MY_STR23; typedef struct {char buf[24];}MY_STR24; typedef struct {char buf[25];}MY_STR25; typedef struct {char buf[26];}MY_STR26; typedef struct {char buf[27];}MY_STR27; typedef struct {char buf[28];}MY_STR28; typedef struct {char buf[29];}MY_STR29; typedef struct {char buf[30];}MY_STR30; typedef struct {char buf[31];}MY_STR31; typedef struct {char buf[32];}MY_STR32; typedef struct {char buf[33];}MY_STR33; typedef struct {char buf[34];}MY_STR34; typedef struct {char buf[35];}MY_STR35; typedef struct {char buf[36];}MY_STR36; typedef struct {char buf[37];}MY_STR37; typedef struct {char buf[38];}MY_STR38; typedef struct {char buf[39];}MY_STR39; typedef struct {char buf[40];}MY_STR40; typedef struct {char buf[41];}MY_STR41; typedef struct {char buf[42];}MY_STR42; typedef struct {char buf[43];}MY_STR43; typedef struct {char buf[44];}MY_STR44; typedef struct {char buf[45];}MY_STR45; typedef struct {char buf[46];}MY_STR46; typedef struct {char buf[47];}MY_STR47; typedef struct {char buf[48];}MY_STR48; typedef struct {char buf[49];}MY_STR49; typedef struct {char buf[50];}MY_STR50; typedef struct {char buf[51];}MY_STR51; typedef struct {char buf[52];}MY_STR52; typedef struct {char buf[53];}MY_STR53; typedef struct {char buf[54];}MY_STR54; typedef struct {char buf[55];}MY_STR55; typedef struct {char buf[56];}MY_STR56; typedef struct {char buf[57];}MY_STR57; typedef struct {char buf[58];}MY_STR58; typedef struct {char buf[59];}MY_STR59; typedef struct {char buf[60];}MY_STR60; typedef struct {char buf[61];}MY_STR61; typedef struct {char buf[62];}MY_STR62; typedef struct {char buf[63];}MY_STR63; typedef struct {char buf[64];}MY_STR64; /*#define USE_ASM*/ #define _CHECK_BUFFER_OVERLAP /*lint !e946*/ #define ERROR_HANDLER_BY_PRINTF /* #define ERROR_HANDLER_BY_ASSERT #define ERROR_HANDLER_BY_FILE_LOG */ /* User can change the error handler by modify the following definition, * such as logging the detail error in file. */ #if defined(_DEBUG) || defined(DEBUG) #if defined(ERROR_HANDLER_BY_ASSERT) #define SECUREC_ERROR_INVALID_PARAMTER(msg) assert( msg "invalid argument" == NULL) #define SECUREC_ERROR_INVALID_RANGE(msg) assert( msg "invalid dest buffer size" == NULL) #define SECUREC_ERROR_BUFFER_OVERLAP(msg) assert( msg "buffer overlap" == NULL) #elif defined(ERROR_HANDLER_BY_PRINTF) #define SECUREC_ERROR_INVALID_PARAMTER(msg) printf( "%s invalid argument\n",msg) #define SECUREC_ERROR_INVALID_RANGE(msg) printf( "%s invalid dest buffer size\n", msg) #define SECUREC_ERROR_BUFFER_OVERLAP(msg) printf( "%s buffer overlap\n",msg) #else #define SECUREC_ERROR_INVALID_PARAMTER(msg) ((void)0) #define SECUREC_ERROR_INVALID_RANGE(msg) ((void)0) #define SECUREC_ERROR_BUFFER_OVERLAP(msg) ((void)0) #endif #if defined(ERROR_HANDLER_BY_FILE_LOG) #define SECUREC_ERROR_INVALID_PARAMTER(msg) logSecureCRuntimeError(msg " EINVAL\n") #define SECUREC_ERROR_INVALID_RANGE(msg) logSecureCRuntimeError(msg " ERANGE\n") #define SECUREC_ERROR_BUFFER_OVERLAP(msg) logSecureCRuntimeError(msg " EOVERLAP\n") #endif #else #define SECUREC_ERROR_INVALID_PARAMTER(msg) ((void)0) #define SECUREC_ERROR_INVALID_RANGE(msg) ((void)0) #define SECUREC_ERROR_BUFFER_OVERLAP(msg) ((void)0) #endif void util_memmove (void* dst, const void* src, size_t count); //lint -esym(526, vsnprintf_helper*) int vsnprintf_helper (char* string, size_t count, const char* format, va_list ap); #ifdef __cplusplus extern "C" { #endif void logSecureCRuntimeError(const char* errDetail); #ifdef __cplusplus } #endif /* __cplusplus */ #endif UVP-Tools-2.2.0.316/uvp-monitor/securec/src/secureinput_a.c000066400000000000000000000013671314037446600233430ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: secureinput_a.c * Decription: * by defining data type for ANSI string and including "input.inl", * this file generates real underlying function used by scanf family * API. * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "secinput.h" #ifdef _UNICODE #undef _UNICODE #endif #ifdef UNICODE #undef UNICODE #endif typedef char _TCHAR; typedef unsigned char _TUCHAR; typedef int _TINT; #include "input.inl" UVP-Tools-2.2.0.316/uvp-monitor/securec/src/secureinput_w.c000066400000000000000000000020211314037446600233550ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: secureinput_w.c * Decription: * by defining data type for UNICODE string and including "input.inl", * this file generates real underlying function used by scanf family * API. * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "secinput.h" #include #if !(defined(SECUREC_VXWORKS_PLATFORM)) #include /*lint !e322 !e7*/ #include #endif #ifndef UNICODE #define UNICODE #endif #ifndef _UNICODE #define _UNICODE #endif #ifndef WEOF #define WEOF ((wchar_t)-1) #endif typedef wchar_t _TCHAR; typedef wchar_t _TUCHAR; #if defined(SECUREC_VXWORKS_PLATFORM) && !defined(__WINT_TYPE__) typedef wchar_t wint_t; #endif typedef wint_t _TINT; #include "input.inl" UVP-Tools-2.2.0.316/uvp-monitor/securec/src/secureprintoutput.h000066400000000000000000000076631314037446600243330ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: secureprintoutput.h * Decription: * define macro, enum, data struct, and declare internal used function * prototype, which is used by output.inl, secureprintoutput_w.c and * secureprintoutput_a.c. * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #ifndef __SECUREPRINTOUTPUT_H__E950DA2C_902F_4B15_BECD_948E99090D9C #define __SECUREPRINTOUTPUT_H__E950DA2C_902F_4B15_BECD_948E99090D9C /* flag definitions */ enum{ FLAG_SIGN = 0x00001, FLAG_SIGN_SPACE = 0x00002, FLAG_LEFT = 0x00004, FLAG_LEADZERO = 0x00008, FLAG_LONG = 0x00010, FLAG_SHORT = 0x00020, FLAG_SIGNED = 0x00040, FLAG_ALTERNATE = 0x00080, FLAG_NEGATIVE = 0x00100, FLAG_FORCE_OCTAL = 0x00200, FLAG_LONG_DOUBLE = 0x00400, FLAG_WIDECHAR = 0x00800, FLAG_LONGLONG = 0x01000, FLAG_CHAR = 0x02000, FLAG_POINTER = 0x04000, FLAG_I64 = 0x08000, FLAG_PTRDIFF = 0x10000, #ifdef COMPATIBLE_LINUX_FORMAT FLAG_SIZE = 0x20000, FLAG_INTMAX = 0x40000 #else FLAG_SIZE = 0x20000 #endif }; /* character type values */ typedef enum _CHARTYPE { CHAR_OTHER, /* character with no special meaning */ CHAR_PERCENT, /* '%' */ CHAR_DOT, /* '.' */ CHAR_STAR, /* '*' */ CHAR_ZERO, /* '0' */ CHAR_DIGIT, /* '1'..'9' */ CHAR_FLAG, /* ' ', '+', '-', '#' */ CHAR_SIZE, /* 'h', 'l', 'L', 'N', 'F', 'w' */ CHAR_TYPE /* type specifying character */ }CHARTYPE; /* state definitions */ typedef enum _FMT_STATE { STAT_NORMAL, STAT_PERCENT, STAT_FLAG, STAT_WIDTH, STAT_DOT, STAT_PRECIS, STAT_SIZE, STAT_TYPE, STAT_INVALID }FMT_STATE; #define NUMSTATES ((int)STAT_INVALID + 1) #ifdef STACK_SIZE_LESS_THAN_1K #define BUFFERSIZE 256 #else #define BUFFERSIZE 512 #endif #define MAXPRECISION BUFFERSIZE #define BUFFERSIZE_LB 5120 #define MAXPRECISION_LB BUFFERSIZE_LB #ifndef MB_LEN_MAX #define MB_LEN_MAX 5 #endif #define CVTBUFSIZE (309+40) /* # of digits in max. dp value + slop */ #define CVTBUFSIZE_LB (4932+40) #define FIND_CHAR_CLASS(lookuptbl, c) ((c) < _T(' ') || (c) > _T('z') ? CHAR_OTHER : (CHARTYPE)(lookuptbl[(c)-_T(' ')] & 0xF)) #define FIND_NEXT_STATE(lookuptbl, charClass, state) (FMT_STATE)((int)(lookuptbl[(int)(charClass) * NUMSTATES + (int)(state)]) >> 4) typedef struct _SECUREC_XPRINTF_STREAM { int _cnt; char* _ptr; } SECUREC_XPRINTF_STREAM; INT32T U64Div32(UINT32T uiDividendHigh,UINT32T uiDividendLow,UINT32T uiDivisor,UINT32T *puiQuotientHigh,UINT32T *puiQuotientLow,UINT32T *puiRemainder);/*lint !e129 !e101 !e132 !e131*/ //lint -esym(526, write_char_a*) void write_char_a ( char ch, SECUREC_XPRINTF_STREAM* f, int* pnumwritten ); //lint -esym(526, write_multi_char_a*) void write_multi_char_a ( char ch, int num, SECUREC_XPRINTF_STREAM* f, int* pnumwritten ); //lint -esym(526, write_string_a*) void write_string_a ( char* string, int len, SECUREC_XPRINTF_STREAM* f, int* pnumwritten ); //lint -esym(526, securec_output_s*) int securec_output_s ( SECUREC_XPRINTF_STREAM* stream, const char* format, va_list argptr ); //lint -esym(526, securec_woutput_s*) int securec_woutput_s ( SECUREC_XPRINTF_STREAM* stream, const wchar_t* format, va_list argptr ); #endif UVP-Tools-2.2.0.316/uvp-monitor/securec/src/secureprintoutput_a.c000066400000000000000000000025241314037446600246150ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: secureprintoutput_a.c * Decription: * by defining corresponding macro for ANSI string and including * "output.inl", this file generates real underlying function used by * printf family API. * History: * 1. Date: 2014/6/13 * Author: LiShunda * Modification: change "#define TCHAR char" to "typedef char TCHAR" * 2. Date: 2014/7/1 * Author: LiShunda * Modification: remove "extern const UINT8T securec__lookuptable_s[]". * securec__lookuptable_s is a global variable which will cause * redefinition when multiple secure c library are integrated. ******************************************************************************** */ #include "securec.h" #include #include #include "secureprintoutput.h" #include /***************************/ /*remove this def style #define TCHAR char*/ typedef char TCHAR; #define _T(x) x #define write_char write_char_a #define write_multi_char write_multi_char_a #define write_string write_string_a /***************************/ /*extern const UINT8T securec__lookuptable_s[];*/ #include "output.inl" UVP-Tools-2.2.0.316/uvp-monitor/securec/src/secureprintoutput_w.c000066400000000000000000000062761314037446600246530ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: secureprintoutput_w.c * Decription: * by defining corresponding macro for UNICODE string and including * "output.inl", this file generates real underlying function used by * printf family API. * History: * 1. Date: 2014/6/8 * Author: LiShunda * Modification: move the functions of __putwc_nolock, write_char_w, write_multi_char_w * and write_string_w from securecutil.c to this file. In ANSI mode build, * these functions are NOT needed, which can reduce code generation size * and let securecutil.c NOT include . * 2. Date: 2014/7/1 * Author: LiShunda * Modification: remove "extern const UINT8T securec__lookuptable_s[]". * securec__lookuptable_s is a global variable which will cause * redefinition when multiple secure c library are integrated. ******************************************************************************** */ #include "securec.h" #include #include #include #ifndef WEOF /*if some platforms don't have wchar.h, dont't include it*/ #if !(defined(SECUREC_VXWORKS_PLATFORM)) #include /*lint !e322 !e7*/ #endif #ifndef WEOF #define WEOF ((wchar_t)-1) #endif #endif /* WEOF */ #include "secureprintoutput.h" /***************************/ #define _XXXUNICODE #define TCHAR wchar_t #define _T(x) L ## x #define write_char write_char_w #define write_multi_char write_multi_char_w #define write_string write_string_w /***************************/ /*extern const UINT8T securec__lookuptable_s[];*/ /*put a wchar to output stream */ /*LSD change "unsigned short" to wchar_t*/ static wchar_t __putwc_nolock (wchar_t _c, SECUREC_XPRINTF_STREAM* _stream) { wchar_t wcRet = 0; if (((_stream)->_cnt -= (int)WCHAR_SIZE ) >= 0 ) { *(wchar_t*)(_stream->_ptr) = (wchar_t)_c; /*lint !e826*/ _stream->_ptr += sizeof (wchar_t); /* remove LSD wcRet = (unsigned short) (0xffff & (wchar_t)_c); */ wcRet = _c; } else { wcRet = (wchar_t)WEOF; } return wcRet; } static void write_char_w(wchar_t ch, SECUREC_XPRINTF_STREAM* f, int* pnumwritten) { if (__putwc_nolock(ch, f) == (wchar_t)WEOF) { *pnumwritten = -1; } else { ++(*pnumwritten); } } static void write_multi_char_w(wchar_t ch, int num, SECUREC_XPRINTF_STREAM* f, int* pnumwritten) { while (num-- > 0) { write_char_w(ch, f, pnumwritten); if (*pnumwritten == -1) { break; } } } static void write_string_w (wchar_t* string, int len, SECUREC_XPRINTF_STREAM* f, int* pnumwritten) { while (len-- > 0) { write_char_w(*string++, f, pnumwritten); if (*pnumwritten == -1) { /* if (errno == EILSEQ) write_char(_T('?'), f, pnumwritten); else break; */ break; } } } #include "output.inl" UVP-Tools-2.2.0.316/uvp-monitor/securec/src/snprintf_s.c000066400000000000000000000043231314037446600226550ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: sprintf.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" #include /******************************************************************************* * * snprintf_s * * * int snprintf_s(char* strDest, size_t destMax, size_t count, const char* format, ...); * * * The snprintf_s function formats and stores count or fewer characters in * strDest and appends a terminating null. Each argument (if any) is converted * and output according to the corresponding format specification in format. * The formatting is consistent with the printf family of functions; If copying * occurs between strings that overlap, the behavior is undefined. * * * strDest Storage location for the output. * destMax The size of the storage location for output. Size * in bytes for snprintf_s or size in words for snwprintf_s. * count Maximum number of character to store. * format Format-control string. * * * strDest is updated * * * snprintf_s returns the number of characters stored in strDest, not counting * the terminating null character. * If the storage required to store the data and a terminating null exceeds * destMax, the function set strDest to an empty string, and return -1. * If strDest or format is a NULL pointer, or if count is less than or equal * to zero, the function return -1. ******************************************************************************* */ int snprintf_s (char* strDest, size_t destMax, size_t count, const char* format, ...) { int ret = 0; va_list arglist; va_start(arglist, format); ret = vsnprintf_s(strDest, destMax, count, format, arglist); va_end(arglist); return ret; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/sprintf_s.c000066400000000000000000000037121314037446600225000ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: snprintf.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" #include /******************************************************************************* * * sprintf_s * * * int sprintf_s(char* strDest, size_t destMax, const char* format, ...); * * * The sprintf_s function formats and stores a series of characters and values * in strDest. Each argument (if any) is converted and output according to * the corresponding format specification in format. The format consists of * ordinary characters and has the same form and function as the format argument * for printf. A null character is appended after the last character written. * If copying occurs between strings that overlap, the behavior is undefined. * * * strDest Storage location for output. * destMax Maximum number of characters to store. * format Format-control string. * * * strDest is updated * * * sprintf_s returns the number of bytes stored in strDest, not counting the * terminating null character. * The number of characters written, or -1 if an error occurred. If strDest * or format is a null pointer, sprintf_s returns -1. ******************************************************************************* */ int sprintf_s (char* strDest, size_t destMax, const char* format, ...) { int ret = 0; va_list arglist; va_start(arglist, format); ret = vsprintf_s(strDest, destMax, format, arglist); va_end(arglist); return ret; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/sscanf_s.c000066400000000000000000000040251314037446600222660ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: sscanf_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "secinput.h" /******************************************************************************* * * sscanf_s * * * int sscanf_s (const char *buffer, const char *format, ...) * * * The sscanf function reads data from buffer into the location given by each * argument. Every argument must be a pointer to a variable with a type that * corresponds to a type specifier in format. The format argument controls the * interpretation of the input fields and has the same form and function as * the format argument for the scanf function. * If copying takes place between strings that overlap, the behavior is undefined. * * * buffer Stored data. * format Format control string, see Format Specifications. * ... Optional arguments. * * * ... The converted value stored in user assigned address * * * Each of these functions returns the number of fields successfully converted * and assigned; the return value does not include fields that were read but * not assigned. * A return value of 0 indicates that no fields were assigned. * The return value is SCANF_EINVAL(-1) for an error or if the end of the string * is reached before the first conversion. ******************************************************************************* */ int sscanf_s (const char* buffer, const char* format, ...) { int ret = 0; va_list arglist; va_start(arglist, format); ret = vsscanf_s( buffer, format, arglist); va_end(arglist); return ret; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/strcat_s.c000066400000000000000000000103231314037446600223070ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: strcat_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" /******************************************************************************* * * strcat_s * * errno_t strcat_s(char* strDest, size_t destMax, const char* strSrc); * * * The strcat_s function appends strSrc to strDest and terminates the resulting * string with a null character. The initial character of strSrc overwrites * the terminating null character of strDest. strcat_s will return EOVERLAP_AND_RESET * if the source and destination strings overlap. * * Note that the second parameter is the total size of the buffer, not the * remaining size. * * * strDest Null-terminated destination string buffer. * destMax Size of the destination string buffer. * strSrc Null-terminated source string buffer. * * * strDest is updated * * * EOK Successful operation * EINVAL strDest not terminated or NULL, strSrc is NULL * ERANGE destMax is 0, too small, or destMax > SECUREC_STRING_MAX_LEN * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped ******************************************************************************* */ errno_t strcat_s(char* strDest, size_t destMax, const char* strSrc) { char* pHeader = strDest; size_t availableSize = destMax; IN_REGISTER const char* overlapGuard = NULL; if (destMax == 0 || destMax > SECUREC_STRING_MAX_LEN) { SECUREC_ERROR_INVALID_RANGE("strcat_s"); return ERANGE; } if (strDest == NULL || strSrc == NULL) { SECUREC_ERROR_INVALID_PARAMTER("strcat_s"); if (strDest != NULL) { pHeader[0] = '\0'; return EINVAL_AND_RESET; } return EINVAL; } if (strDest < strSrc) { overlapGuard = strSrc; while (availableSize > 0 && *strDest != 0) { if (strDest == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("strcat_s"); return EOVERLAP_AND_RESET; } /*seek to string end*/ strDest++; availableSize--; } /* strDest unterminated, return error. */ if (availableSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_PARAMTER("strcat_s"); return EINVAL_AND_RESET; } /* if available > 0, then excute the strcat operation */ while ((*strDest++ = *strSrc++) != 0 && --availableSize > 0) { if ( strDest == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("strcat_s"); return EOVERLAP_AND_RESET; } } } else { overlapGuard = strDest; while (availableSize > 0 && *strDest != '\0') { /*seek to string end, and no need to check overlap*/ strDest++; availableSize--; } /* strDest unterminated, return error. */ if (availableSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_PARAMTER("strcat_s"); return EINVAL_AND_RESET; } while ((*strDest++ = *strSrc++) != '\0' && --availableSize > 0) { if ( strSrc == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("strcat_s"); return EOVERLAP_AND_RESET; } } } /* strDest have not enough space, return error */ if (availableSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_RANGE("strcat_s"); return ERANGE_AND_RESET; } return EOK; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/strcpy_s.c000066400000000000000000000162061314037446600223410ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: strcpy_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" /******************************************************************************* * * strcpy_s * * * errno_t strcpy_s(char* strDest, size_t destMax, const char* strSrc) * * * The strcpy_s function copies the contents in the address of strSrc, * including the terminating null character, to the location specified by strDest. * The destination string must be large enough to hold the source string, * including the terminating null character. strcpy_s will return EOVERLAP_AND_RESET * if the source and destination strings overlap. * * * strDest Location of destination string buffer * destMax Size of the destination string buffer. * strSrc Null-terminated source string buffer. * * * strDest is updated. * * * 0 success * EINVAL strDest == NULL or strSrc == NULL * ERANGE destination buffer is NOT enough, or size of * buffer is zero or greater than SECUREC_STRING_MAX_LEN * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped * * If there is a runtime-constraint violation, then if strDest is not a null * pointer and destMax is greater than zero and not greater than * SECUREC_STRING_MAX_LEN, then strcpy_s sets strDest[0] to the null character. ******************************************************************************* */ #ifndef SECURE_STRCOPY_SIZE #define SECURE_STRCOPY_SIZE (32UL) #endif #define SC_ADDR_ALIGNED(addr) !((size_t)addr & 7) #define SECURE_STRCPY_BY_STRUCT(num) case num: *(MY_STR##num *)strDest=*(MY_STR##num *) strSrc; break; #define SECURE_STRCPY_BY_BYTE(caseNum) case caseNum: *pcDest++ = *pcSrc++; #define SMALL_STR_COPY \ if (SC_ADDR_ALIGNED(strDest) && SC_ADDR_ALIGNED(strSrc) ) \ { \ /*use struct assignment*/ \ switch(srcStrLen)\ { \ SECURE_STRCPY_BY_STRUCT(1)\ SECURE_STRCPY_BY_STRUCT(2)\ SECURE_STRCPY_BY_STRUCT(3)\ SECURE_STRCPY_BY_STRUCT(4)\ SECURE_STRCPY_BY_STRUCT(5)\ SECURE_STRCPY_BY_STRUCT(6)\ SECURE_STRCPY_BY_STRUCT(7)\ SECURE_STRCPY_BY_STRUCT(8)\ SECURE_STRCPY_BY_STRUCT(9)\ SECURE_STRCPY_BY_STRUCT(10)\ SECURE_STRCPY_BY_STRUCT(11)\ SECURE_STRCPY_BY_STRUCT(12)\ SECURE_STRCPY_BY_STRUCT(13)\ SECURE_STRCPY_BY_STRUCT(14)\ SECURE_STRCPY_BY_STRUCT(15)\ SECURE_STRCPY_BY_STRUCT(16)\ SECURE_STRCPY_BY_STRUCT(17)\ SECURE_STRCPY_BY_STRUCT(18)\ SECURE_STRCPY_BY_STRUCT(19)\ SECURE_STRCPY_BY_STRUCT(20)\ SECURE_STRCPY_BY_STRUCT(21)\ SECURE_STRCPY_BY_STRUCT(22)\ SECURE_STRCPY_BY_STRUCT(23)\ SECURE_STRCPY_BY_STRUCT(24)\ SECURE_STRCPY_BY_STRUCT(25)\ SECURE_STRCPY_BY_STRUCT(26)\ SECURE_STRCPY_BY_STRUCT(27)\ SECURE_STRCPY_BY_STRUCT(28)\ SECURE_STRCPY_BY_STRUCT(29)\ SECURE_STRCPY_BY_STRUCT(30)\ SECURE_STRCPY_BY_STRUCT(31)\ SECURE_STRCPY_BY_STRUCT(32)\ }/*END switch*/ \ } \ else \ { \ char* pcDest = (char*)strDest; \ char* pcSrc = (char*)strSrc; \ switch(srcStrLen) \ { /*lint -save -e616*/\ SECURE_STRCPY_BY_BYTE(32)\ SECURE_STRCPY_BY_BYTE(31)\ SECURE_STRCPY_BY_BYTE(30)\ SECURE_STRCPY_BY_BYTE(29)\ SECURE_STRCPY_BY_BYTE(28)\ SECURE_STRCPY_BY_BYTE(27)\ SECURE_STRCPY_BY_BYTE(26)\ SECURE_STRCPY_BY_BYTE(25)\ SECURE_STRCPY_BY_BYTE(24)\ SECURE_STRCPY_BY_BYTE(23)\ SECURE_STRCPY_BY_BYTE(22)\ SECURE_STRCPY_BY_BYTE(21)\ SECURE_STRCPY_BY_BYTE(20)\ SECURE_STRCPY_BY_BYTE(19)\ SECURE_STRCPY_BY_BYTE(18)\ SECURE_STRCPY_BY_BYTE(17)\ SECURE_STRCPY_BY_BYTE(16)\ SECURE_STRCPY_BY_BYTE(15)\ SECURE_STRCPY_BY_BYTE(14)\ SECURE_STRCPY_BY_BYTE(13)\ SECURE_STRCPY_BY_BYTE(12)\ SECURE_STRCPY_BY_BYTE(11)\ SECURE_STRCPY_BY_BYTE(10)\ SECURE_STRCPY_BY_BYTE(9)\ SECURE_STRCPY_BY_BYTE(8)\ SECURE_STRCPY_BY_BYTE(7)\ SECURE_STRCPY_BY_BYTE(6)\ SECURE_STRCPY_BY_BYTE(5)\ SECURE_STRCPY_BY_BYTE(4)\ SECURE_STRCPY_BY_BYTE(3)\ SECURE_STRCPY_BY_BYTE(2)\ SECURE_STRCPY_BY_BYTE(1)\ } /*lint -restore */\ } \ return EOK; errno_t strcpy_error(char* strDest, size_t destMax, const char* strSrc) { if (destMax == 0 || destMax > SECUREC_STRING_MAX_LEN) { SECUREC_ERROR_INVALID_RANGE("strcpy_s"); return ERANGE; }else if (strDest == NULL || strSrc == NULL) { SECUREC_ERROR_INVALID_PARAMTER("strcpy_s"); if (strDest != NULL) { strDest[0] = '\0'; return EINVAL_AND_RESET; } return EINVAL; }else if (strlen(strSrc) + 1 > destMax) { strDest[0] = '\0'; SECUREC_ERROR_INVALID_RANGE("strcpy_s"); return ERANGE_AND_RESET; }else { return EOK; } } errno_t strcpy_s(char* strDest, size_t destMax, const char* strSrc) { if ((destMax > 0 && destMax <= SECUREC_STRING_MAX_LEN && strDest != NULL && strSrc != NULL && strDest != strSrc)) { const char *endPos = strSrc; size_t srcStrLen = destMax; /* use it to store the maxi length limit */ while( *(endPos++) && srcStrLen-- > 0); /* use srcStrLen as boundary checker */ srcStrLen = endPos - strSrc ; /*with ending terminator*/ if (srcStrLen <= destMax) { if (strDest < strSrc) { if (strDest + srcStrLen <= strSrc ) { if ( srcStrLen > SECURE_STRCOPY_SIZE) { (void)memcpy(strDest, strSrc, srcStrLen); }else { SMALL_STR_COPY } return EOK; }else{ strDest[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("strcpy_s"); return EOVERLAP_AND_RESET; } }else{ if (strSrc + srcStrLen <= strDest ) { if ( srcStrLen > SECURE_STRCOPY_SIZE) { (void)memcpy(strDest, strSrc, srcStrLen); }else{ SMALL_STR_COPY } return EOK; }else{ strDest[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("strcpy_s"); return EOVERLAP_AND_RESET; } } } } return strcpy_error(strDest, destMax, strSrc); } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/strncat_s.c000066400000000000000000000114371314037446600224740ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: strncat_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" /******************************************************************************* * * strncat_s * * * errno_t * strncat_s(char* strDest, size_t destMax, const char* strSrc, size_t count); * * * The strncat_s function try to append the first D characters of strSrc to * the end of strDest, where D is the lesser of count and the length of strSrc. * If appending those D characters will fit within strDest (whose size is given * as destMax) and still leave room for a null terminator, then those characters * are appended, starting at the original terminating null of strDest, and a * new terminating null is appended; otherwise, strDest[0] is set to the null * character. * * * strDest Null-terminated destination string. * destMax Size of the destination buffer. * strSrc Null-terminated source string. * count Number of character to append, or truncate. * * * strDest is updated * * * EOK Successful operation * EINVAL strDest not terminated or NULL, or strSrc is NULL * ERANGE destMax is 0, too big or too small, count > * SECUREC_STRING_MAX_LEN * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped ******************************************************************************** */ errno_t strncat_s(char* strDest, size_t destMax, const char* strSrc, size_t count) { char* pHeader = strDest; size_t availableSize = destMax; IN_REGISTER const char* overlapGuard = NULL; if (destMax == 0 || destMax > SECUREC_STRING_MAX_LEN ) { SECUREC_ERROR_INVALID_RANGE("strncat_s"); return ERANGE; } if (strDest == NULL || strSrc == NULL) { SECUREC_ERROR_INVALID_PARAMTER("strncat_s"); if (strDest != NULL) { pHeader[0] = '\0'; return EINVAL_AND_RESET; } return EINVAL; } #ifdef COMPATIBLE_WIN_FORMAT if (count > SECUREC_STRING_MAX_LEN && count !=(size_t)-1) #else if (count > SECUREC_STRING_MAX_LEN) #endif { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_RANGE("strncat_s"); return ERANGE_AND_RESET; } if (strDest < strSrc) { overlapGuard = strSrc; while (availableSize > 0 && *strDest != '\0') { if (strDest == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("strncat_s"); return EOVERLAP_AND_RESET; } strDest++; availableSize--; } /* strDestination unterminated, return error. */ if (availableSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_PARAMTER("strncat_s"); return EINVAL_AND_RESET; } while (count > 0 && (*strDest++ = *strSrc++) != '\0' && --availableSize > 0) { if ( strDest == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("strncat_s"); return EOVERLAP_AND_RESET; } count--; } } else { overlapGuard = strDest; while (availableSize > 0 && *strDest != '\0') { /*seek to string end, and no need to check overlap*/ strDest++; availableSize--; } /* strDest unterminated, return error. */ if (availableSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_PARAMTER("strncat_s"); return EINVAL_AND_RESET; } while (count > 0 && (*strDest++ = *strSrc++) != '\0' && --availableSize > 0) { if ( strSrc == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("strncat_s"); return EOVERLAP_AND_RESET; } count--; } } if (count == 0) { *strDest = 0; /* add terminator to strDest*/ } /* strDest have not enough space,return error */ if (availableSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_RANGE("strncat_s"); return ERANGE_AND_RESET; } return EOK; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/strncpy_s.c000066400000000000000000000115211314037446600225120ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: strncpy_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" /******************************************************************************* * * strncpy_s * * * errno_t strncpy_s( char* strDest, size_t destMax, const char* strSrc, size_t count); * * * Copy the contents from strSrc, including the terminating null character, * to the location specified by strDest. * * * strDest Destination string. * destMax The size of the destination string, in characters. * strSrc Source string. * count Number of characters to be copied. * * * strDest is updated * * * EOK(0) success * EINVAL strDest == NULL or strSrc == NULL * ERANGE destMax is zero or greater than SECUREC_STRING_MAX_LEN, * or count > SECUREC_STRING_MAX_LEN, or destMax is too small * EOVERLAP_AND_RESET buffer and source buffer are overlapped * * If there is a runtime-constraint violation, then if strDest is not a null * pointer and destMax is greater than zero and not greater than SECUREC_STRING_MAX_LEN, * then strncpy_s sets strDest[0] to the null character. ******************************************************************************* */ errno_t strncpy_error(char* strDest, size_t destMax, const char* strSrc, size_t count) { if ( destMax == 0 || destMax > SECUREC_STRING_MAX_LEN ) { SECUREC_ERROR_INVALID_RANGE("strncpy_s"); return ERANGE; } else if (strDest == NULL || strSrc == NULL ) { SECUREC_ERROR_INVALID_PARAMTER("strncpy_s"); if (strDest != NULL ) { strDest[0] = '\0'; return EINVAL_AND_RESET; } return EINVAL; } else if ( count > SECUREC_STRING_MAX_LEN) { strDest[0] = '\0'; /*clear dest string*/ SECUREC_ERROR_INVALID_RANGE("strncpy_s"); return ERANGE_AND_RESET; } else if (count == 0) { strDest[0] = '\0'; return EOK; } else { size_t srcStrLen = strlen(strSrc); if( (size_t)(TWO_MIN(srcStrLen, count) + 1) > destMax ) { strDest[0] = '\0'; SECUREC_ERROR_INVALID_RANGE("strncpy_s"); return ERANGE_AND_RESET; } else { return EOK; } } } errno_t strncpy_s(char* strDest, size_t destMax, const char* strSrc, size_t count) { #ifdef COMPATIBLE_WIN_FORMAT if ((destMax > 0 && destMax <= SECUREC_STRING_MAX_LEN && strDest != NULL && strSrc != NULL && (count <= SECUREC_STRING_MAX_LEN || count == (size_t)-1) && count > 0)) { #else if ((destMax > 0 && destMax <= SECUREC_STRING_MAX_LEN && strDest != NULL && strSrc != NULL && count <= SECUREC_STRING_MAX_LEN && count > 0)) { #endif const char *endPos = strSrc; int fillEndingZero = 0; /* flag for setting ending terminator for dest string */ size_t destMaxBk = destMax, minCpLen = count; /* use it to store the maxi length limit */ while( *(endPos++) && --destMaxBk >0 && --minCpLen > 0 ); /* use srcStrLen and minCpLen as boundary checker */ if (0 == destMaxBk) { strDest[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("strncpy_s"); return ERANGE_AND_RESET; } if (0 == minCpLen) { minCpLen = count + 1; /* add ending terminator */ fillEndingZero = 1; } else { minCpLen = endPos - strSrc; /* with ending terminator */ } if ((strDest < strSrc && strDest + minCpLen <= strSrc) || (strSrc < strDest && strSrc + minCpLen <= strDest ) || strDest == strSrc) { /*Not overlap*/ if (fillEndingZero) { (void)memcpy(strDest, strSrc, minCpLen - 1); /* copy string by count bytes */ strDest[minCpLen -1] = '\0'; } else { (void)memcpy(strDest, strSrc, minCpLen); /* copy string with terminator */ } return EOK; } else { strDest[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("strncpy_s"); return EOVERLAP_AND_RESET; } } return strncpy_error(strDest, destMax, strSrc, count); } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/strtok_s.c000066400000000000000000000061661314037446600223470ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: strtok_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" /******************************************************************************* * * strtok_s * * * char* strtok_s(char* strToken, const char* strDelimit, char** context); * * * The strtok_s function finds the next token in strToken. * * * strToken String containing token or tokens. * strDelimit Set of delimiter characters. * context Used to store position information between calls * to strtok_s * * * * * Returns a pointer to the next token found in strToken. * They return NULL when no more tokens are found. * Each call modifies strToken by substituting a NULL character for the first * delimiter that occurs after the returned token. * * return value condition * NULL context == NULL, strDelimit == NULL, strToken == NULL * && (*context) == NULL, or no token is found. ******************************************************************************* */ char* strtok_s(char* strToken, const char* strDelimit, char** context) { char* token = NULL; const char* ctl = NULL; /* validate delimiter and string context */ if (context == NULL || strDelimit == NULL) { return NULL; } /*valid input string and string pointer from where to search*/ if (strToken == NULL && (*context) == NULL) { return NULL; } /* If string is null, continue searching from previous string position stored in context*/ if (NULL == strToken) { strToken = *context; } /* Find beginning of token (skip over leading delimiters). Note that * there is no token if this loop sets string to point to the terminal null. */ while (*strToken != 0 ) { ctl = strDelimit; while ( *ctl != 0 && *ctl != *strToken) { ++ctl; } if (*ctl == 0) /*don't find any delimiter in string header, break the loop*/ { break; } ++strToken; } token = strToken; /*point to updated position*/ /* Find the rest of the token. If it is not the end of the string, * put a null there. */ for ( ; *strToken != 0 ; strToken++) { for (ctl = strDelimit; *ctl != 0 && *ctl != *strToken; ctl++) ; if (*ctl != 0) /*find a delimiter*/ { *strToken++ = 0; /*set string termintor*/ break; } } /* record string position for next search in the context */ *context = strToken; /* Determine if a token has been found. */ if (token == strToken) { return NULL; } else { return token; } } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/swprintf_s.c000066400000000000000000000033431314037446600226670ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: swprintf_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" #include /******************************************************************************* * * swprintf_s * * * int swprintf_s(wchar_t* strDest, size_t destMax, const wchar_t* format, ...); * * * swprintf_s is a wide-character version of sprintf_s; the pointer arguments * to swprintf_s are wide-character strings. Detection of encoding errors in * swprintf_s may differ from that in sprintf_s. * * * strDest Storage location for the output. * destMax Maximum number of characters to store. * format Format-control string. * * * strDest is updated * * * swprintf_s returns the number of wide characters stored in strDest, not * counting the terminating null wide character. * The number of characters written, or -1 if an error occurred. If strDest * or format is a null pointer, swprintf_s return -1. ******************************************************************************* */ int swprintf_s (wchar_t* strDest, size_t destMax, const wchar_t* format, ...) { int ret = 0; va_list arglist; va_start(arglist, format); ret = vswprintf_s(strDest, destMax, format, arglist); va_end(arglist); return ret; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/swscanf_s.c000066400000000000000000000040471314037446600224610ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: swscanf_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "secinput.h" /******************************************************************************* * * swscanf_s * * * int swscanf_s (const wchar_t* buffer, const wchar_t* format, ...) * * * The swscanf_s function reads data from buffer into the location given by * each argument. Every argument must be a pointer to a variable with a type * that corresponds to a type specifier in format. The format argument controls * the interpretation of the input fields and has the same form and function * as the format argument for the scanf function. If copying takes place between * strings that overlap, the behavior is undefined. * * * buffer Stored data. * format Format control string, see Format Specifications. * ... Optional arguments. * * * ... the converted value stored in user assigned address * * * Each of these functions returns the number of fields successfully converted * and assigned; The return value does not include fields that were read but not * assigned. * A return value of 0 indicates that no fields were assigned. * The return value is SCANF_EINVAL(-1) for an error or if the end of the string * is reached before the first conversion. ******************************************************************************* */ int swscanf_s(const wchar_t* buffer, const wchar_t* format, ...) { int ret = 0; va_list arglist; va_start(arglist, format); ret = vswscanf_s(buffer, format, arglist); va_end(arglist); return ret; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/vfscanf_s.c000066400000000000000000000051001314037446600224320ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: vfscanf_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "secinput.h" #include "securecutil.h" /******************************************************************************* * * vfscanf_s * * * int vfscanf_s (FILE* stream, const char* format, va_list arglist) * * * The vfscanf_s function reads data from the current position of stream into * the locations given by argument (if any). Each argument must be a pointer * to a variable of a type that corresponds to a type specifier in format. * format controls the interpretation of the input fields and has the same * form and function as the format argument for scanf. * * * stream Pointer to FILE structure. * format Format control string, see Format Specifications. * arglist pointer to list of arguments * * * arglist the converted value stored in user assigned address * * * Each of these functions returns the number of fields successfully converted * and assigned; the return value does not include fields that were read but * not assigned. A return value of 0 indicates that no fields were assigned. * If an error occurs, or if the end of the file stream is reached before the * first conversion, the return value is SCANF_EINVAL(-1). ******************************************************************************* */ int vfscanf_s(FILE* stream, const char* format, va_list arglist) { int retval = 0; SEC_FILE_STREAM fStr = INIT_SEC_FILE_STREAM; if ((stream == NULL) || (format == NULL) ) { SECUREC_ERROR_INVALID_PARAMTER("vfscanf_s"); return SCANF_EINVAL; } SECUREC_LOCK_FILE(stream); _VALIDATE_STREAM_ANSI_SETRET(stream, EINVAL, retval, EOF); fStr.pf = stream; fStr._flag = FILE_STREAM_FLAG; fStr.oriFilePos = UNINITIALIZED_FILE_POS; retval = securec_input_s(&fStr, format, arglist); SECUREC_UNLOCK_FILE(stream); if (retval < 0) { /*MaskCoverityID31811*/ /*coverity[RESOURCE_LEAK]*/ SECUREC_ERROR_INVALID_PARAMTER("vfscanf_s"); /* coverity[leaked_storage] */ return SCANF_EINVAL; } return retval; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/vfwscanf_s.c000066400000000000000000000051731314037446600226330ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: vfwscanf_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "secinput.h" #include "securecutil.h" /******************************************************************************* * * vfwscanf_s * * * int vfwscanf_s (FILE* stream, const wchar_t* format, va_list arglist) * * * The vfwscanf_s function reads data from the current position of stream into * the locations given by argument (if any). Each argument must be a pointer * to a variable of a type that corresponds to a type specifier in format. * format controls the interpretation of the input fields and has the same form * and function as the format argument for scanf; see scanf for a description * of format. * * * stream Pointer to FILE structure. * format Format control string, see Format Specifications. * arglist pointer to list of arguments * * * arglist the converted value stored in user assigned address * * * Each of these functions returns the number of fields successfully converted * and assigned; the return value does not include fields that were read but * not assigned. A return value of 0 indicates that no fields were assigned. * If an error occurs, or if the end of the file stream is reached before the * first conversion, the return value is SCANF_EINVAL(-1). ******************************************************************************* */ int vfwscanf_s(FILE* stream, const wchar_t* format, va_list arglist) { int retval = 0; SEC_FILE_STREAM fStr = INIT_SEC_FILE_STREAM; if ((stream == NULL) || (format == NULL) ) { SECUREC_ERROR_INVALID_PARAMTER("vfwscanf_s"); return SCANF_EINVAL; } SECUREC_LOCK_FILE(stream); _VALIDATE_STREAM_ANSI_SETRET(stream, EINVAL, retval, EOF); fStr.pf = stream; fStr._flag = FILE_STREAM_FLAG; fStr.oriFilePos = UNINITIALIZED_FILE_POS; retval = securec_winput_s(&fStr, format, arglist); SECUREC_UNLOCK_FILE(stream); if (retval < 0) { /*MaskCoverityID13633*/ /*coverity[RESOURCE_LEAK]*/ SECUREC_ERROR_INVALID_PARAMTER("vfwscanf_s"); /* coverity[leaked_storage] */ return SCANF_EINVAL; } return retval; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/vscanf_s.c000066400000000000000000000054661314037446600223030ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: vscanf_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "secinput.h" #include "securecutil.h" /******************************************************************************* * * vscanf_s * * * int vscanf_s(const char* format, va_list arglist) * * * The vscanf_s function reads data from the standard input stream stdin and * writes the data into the location that's given by argument. Each argument * must be a pointer to a variable of a type that corresponds to a type specifier * in format. If copying occurs between strings that overlap, the behavior is * undefined. * * * format Format control string. * arglist pointer to list of arguments * * * arglist the converted value stored in user assigned address * * * Returns the number of fields successfully converted and assigned; * the return value does not include fields that were read but not assigned. * A return value of 0 indicates that no fields were assigned. * The return value is EOF for an error, * or if the end-of-file character or the end-of-string character is encountered * in the first attempt to read a character. * If format is a NULL pointer or retrun value is less than zero, the invalid * parameter handler is invoked, as described in Parameter Validation. If * execution is allowed to continue, scanf_s and wscanf_s return SCANF_EINVAL(-1). ******************************************************************************* */ int vscanf_s(const char* format, va_list arglist) { int retval = -1; SEC_FILE_STREAM fStr = INIT_SEC_FILE_STREAM; /* "va_list" has different definition on different platform, so we can't use arglist == NULL to determine it's invalid. If you has fixed platform, you can check some fields to validate it, such as "arglist == NULL" or arglist.xxx != NULL or *(size_t*)&arglist != 0. */ if (format == NULL) { SECUREC_ERROR_INVALID_PARAMTER("vscanf_s"); return SCANF_EINVAL; } fStr.pf = stdin; fStr._flag = FROM_STDIN_FLAG; SECUREC_LOCK_STDIN(0, stdin); retval = securec_input_s(&fStr, format, arglist); SECUREC_UNLOCK_STDIN(0, stdin); if (retval < 0) { /*MaskCoverityID31812*/ /*coverity[RESOURCE_LEAK]*/ SECUREC_ERROR_INVALID_PARAMTER("vscanf_s"); /* coverity[leaked_storage] */ return SCANF_EINVAL; } return retval; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/vsnprintf_s.c000066400000000000000000000073331314037446600230470ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: vsnprintf.c * History: * 1. Date: 2014/5/20 * Author: LiShunda * Modification: change #include "securectype.h" to #include "securec.h", which * will cause link error under C++ compiler. ******************************************************************************** */ #include "securec.h" #include "securecutil.h" #include /******************************************************************************* * * vsnprintf_s * * * int vsnprintf_s(char* strDest, size_t destMax, size_t count, const char* format, valist ap); * * * The vsnprintf_s function takes a pointer to an argument list, then formats * and writes up to count characters of the given data to the memory pointed * to by strDest and appends a terminating null. * * * strDest Storage location for the output. * destMax The size of the strDest for output. * count Maximum number of character to write(not including * the terminating NULL) * format Format-control string. * ap pointer to list of arguments. * * * strDest is updated * * * vsnprintf_s returns the number of characters written, not including the * terminating null, or a negative value if an output error occurs. vsnprintf_s * is included for compliance to the ANSI standard. * If the storage required to store the data and a terminating null exceeds * destMax, the function set strDest to an empty strDest, and return -1. * If strDest or format is a NULL pointer, or if count is less than or equal * to zero, the function return -1. * * ERROR CONDITIONS: * Condition Return * strDest is NULL -1 * format is NULL -1 * count <= 0 -1 * destMax too small -1(and strDest set to an empty string) ******************************************************************************* */ int vsnprintf_s (char* strDest, size_t destMax, size_t count, const char* format, va_list arglist) { int retvalue = -1; if (format == NULL || strDest == NULL || destMax == 0 || destMax > SECUREC_STRING_MAX_LEN || (count > (SECUREC_STRING_MAX_LEN - 1) && count != (size_t)-1)) { if (strDest != NULL && destMax > 0) { strDest[0] = '\0'; } SECUREC_ERROR_INVALID_PARAMTER("vsnprintf_s"); return -1; } if (destMax > count) { retvalue = vsnprintf_helper(strDest, count + 1, format, arglist); if (retvalue == -2) /* lsd add to keep dest buffer not destroyed 2014.2.18 */ { /* the string has been truncated, return -1 */ return -1; /* to skip error handler, return strlen(strDest) or -1 */ } } else /* destMax <= count */ { retvalue = vsnprintf_helper(strDest, destMax, format, arglist); strDest[destMax - 1] = 0; #ifdef COMPATIBLE_WIN_FORMAT if (retvalue == -2 && count == (size_t)-1) { return -1; } #endif } if (retvalue < 0) { strDest[0] = '\0'; /* empty the dest strDest */ if (retvalue == -2) { /* Buffer too small */ SECUREC_ERROR_INVALID_RANGE("vsnprintf_s"); } SECUREC_ERROR_INVALID_PARAMTER("vsnprintf_s"); return -1; } return retvalue; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/vsprintf_s.c000066400000000000000000000045521314037446600226710ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: vsprintf.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" #include /******************************************************************************* * * vsprintf_s * * * int vsprintf_s(char* strDest, size_t destMax, const char* format, valist ap); * * * The vsprintf_s function takes a pointer to an argument list, and then formats * and writes the given data to the memory pointed to by strDest. * The function differ from the non-secure versions only in that the secure * versions support positional parameters. * * * strDest Storage location for the output. * destMax Size of strDest * format Format specification. * ap pointer to list of arguments * * * strDest is updated * * * vsprintf_s returns the number of characters written, not including the * terminating null character, or a negative value if an output error occurs. * If strDest or format is a null pointer, if count is zero, or if the format * string contains invalid formatting characters, the function return -1. ******************************************************************************* */ int vsprintf_s (char* strDest, size_t destMax, const char* format, va_list arglist) { int retvalue = -1; if (format == NULL || strDest == NULL || destMax == 0 || destMax > SECUREC_STRING_MAX_LEN) { if (strDest != NULL && destMax > 0) { strDest[0] = '\0'; } SECUREC_ERROR_INVALID_PARAMTER("vsprintf_s"); return -1; } retvalue = vsnprintf_helper(strDest, destMax, format, arglist); if (retvalue < 0) { strDest[0] = '\0'; if (retvalue == -2) { /*Buffer is too small*/ SECUREC_ERROR_INVALID_RANGE("vsprintf_s"); } SECUREC_ERROR_INVALID_PARAMTER("vsprintf_s"); return -1; } return retvalue; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/vsscanf_s.c000066400000000000000000000063411314037446600224570ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: vsscanf_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "secinput.h" #include "securecutil.h" #include #if !defined(SECUREC_SYSAPI4VXWORKS) #include #endif /******************************************************************************* * * vsscanf_s * * * int vsscanf_s(const char* buffer, const char* format, va_list arglist) * * * The vsscanf_s function reads data from buffer into the location given by * each argument. Every argument must be a pointer to a variable with a type * that corresponds to a type specifier in format. The format argument controls * the interpretation of the input fields and has the same form and function * as the format argument for the scanf function. * If copying takes place between strings that overlap, the behavior is undefined. * * * buffer Stored data * format Format control string, see Format Specifications. * arglist pointer to list of arguments * * * arglist the converted value stored in user assigned address * * * Each of these functions returns the number of fields successfully converted * and assigned; the return value does not include fields that were read but * not assigned. A return value of 0 indicates that no fields were assigned. * The return value is SCANF_EINVAL(-1) for an error or if the end of the * string is reached before the first conversion. ******************************************************************************* */ int vsscanf_s(const char* buffer, const char* format, va_list arglist) { SEC_FILE_STREAM fStr = INIT_SEC_FILE_STREAM; int retval = -1; size_t count = 0; /* validation section */ if (buffer == NULL || format == NULL) { SECUREC_ERROR_INVALID_PARAMTER("vsscanf_s"); return SCANF_EINVAL; } count = strlen(buffer); if (count == 0 || count > SECUREC_STRING_MAX_LEN) { clearDestBuf(buffer,format, arglist); SECUREC_ERROR_INVALID_PARAMTER("vsscanf_s"); return SCANF_EINVAL; } #ifdef SECUREC_VXWORKS_PLATFORM /* in vxworks platform when buffer is white string ,will set first %s argument tu zero.like following useage: sscanf(" \v\f\t\r\n","%s",str,strSize); */ if(isspace((int)buffer[0]) && isspace((int)buffer[count - 1])) /* do not check all character ,just first and last character then consider it is white string */ { clearDestBuf(buffer,format, arglist); } #endif fStr._flag = MEM_STR_FLAG; fStr._base = fStr._ptr = (char*) buffer; fStr._cnt = (int)count; /* coverity[var_deref_model] */ retval = securec_input_s(&fStr, format, arglist); if (retval < 0 ) { SECUREC_ERROR_INVALID_PARAMTER("vsscanf_s"); return SCANF_EINVAL; } return retval; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/vswprintf_s.c000066400000000000000000000065231314037446600230600ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: vswprintf.c * History: * 1. Date: 2014/4/10 * Author: LiShunda * Modification: move vswprintf_helper() function from vswprintf.c to this file, * which will facilitate the UNICODE string API building. ******************************************************************************** */ #include "securec.h" #include "securecutil.h" #include #include "secureprintoutput.h" /******************************************************************************* * * vswprintf_s * * * int vswprintf_s(wchar_t* strDest, size_t destMax, const wchar_t* format, valist ap); * * * The vswprintf_s function is a wide-character version of vsprintf_s. * The function takes a pointer to an argument list, and then formats and * writes the given data to the memory pointed to by strDest. * The function differ from the non-secure versions only in that the secure * versions support positional parameters. * * * strDest Storage location for the output. * destMax Size of strDest * format Format specification. * ap pointer to list of arguments * * * strDest is updated * * * vswprintf_s returns the number of characters written, not including the * terminating null character, or a negative value if an output error occurs. * If strDest or format is a null pointer, if count is zero, or if the format * string contains invalid formatting characters, the function return -1. ******************************************************************************* */ extern int putWcharStrEndingZero(SECUREC_XPRINTF_STREAM* str, int zerosNum); int vswprintf_helper (wchar_t* string, size_t sizeInWchar, const wchar_t* format, va_list arglist) { SECUREC_XPRINTF_STREAM str; int retval; str._ptr = (char*) string; str._cnt = (int)(sizeInWchar * WCHAR_SIZE); retval = securec_woutput_s(&str, format, arglist ); //lint -esym(526, putWcharStrEndingZero*) if ((retval >= 0) && putWcharStrEndingZero(&str, WCHAR_SIZE) ) { return (retval); } if (string != NULL) { string[sizeInWchar - 1] = 0; } if (str._cnt < 0) { /* the buffer was too small; we return -2 to indicate truncation */ return -2; } return -1; } int vswprintf_s (wchar_t* strDest, size_t destMax, const wchar_t* format, va_list ap) { int retvalue = -1; if (format == NULL || strDest == NULL || destMax == 0 || destMax > (SECUREC_WCHAR_STRING_MAX_LEN)) { if (strDest != NULL && destMax > 0) { strDest[0] = '\0'; } SECUREC_ERROR_INVALID_PARAMTER("vswprintf_s"); return -1; } retvalue = vswprintf_helper(strDest, destMax, format, ap); if (retvalue < 0) { strDest[0] = '\0'; if (retvalue == -2) { /*Buffer too small*/ SECUREC_ERROR_INVALID_RANGE("vswprintf_s"); } SECUREC_ERROR_INVALID_PARAMTER("vswprintf_s"); return -1; } return retvalue; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/vswscanf_s.c000066400000000000000000000057501314037446600226510ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: vswscanf_s.c * History: * 1. Date: 2014/6/8 * Author: LiShunda * Modification: add mywcslen function to avoid include . ******************************************************************************** */ #include "securec.h" #include "secinput.h" #include "securecutil.h" #include /******************************************************************************* * * vswscanf_s * * * int vswscanf_s(const wchar_t* buffer, const wchar_t* format, va_list arglist) * * * The vsscanf_s function reads data from buffer into the location given by * each argument. Every argument must be a pointer to a variable with a type * that corresponds to a type specifier in format. * The format argument controls the interpretation of the input fields and * has the same form and function as the format argument for the scanf function. * If copying takes place between strings that overlap, the behavior is undefined. * * * buffer Stored data * format Format control string, see Format Specifications. * arglist pointer to list of arguments * * * arglist the converted value stored in user assigned address * * * Each of these functions returns the number of fields successfully converted * and assigned; the return value does not include fields that were read but * not assigned. A return value of 0 indicates that no fields were assigned. * The return value is SCANF_EINVAL(-1) for an error or if the end of the * string is reached before the first conversion. ******************************************************************************* */ static size_t mywcslen(const wchar_t *wcs) { const wchar_t* eos = wcs; while( *eos++ ) ; return( (size_t)(eos - wcs - 1) ); } int vswscanf_s(const wchar_t* buffer, const wchar_t* format, va_list arglist) { SEC_FILE_STREAM fStr = INIT_SEC_FILE_STREAM; int retval = -1; size_t count; /* validation section */ if (buffer == NULL || format == NULL) { SECUREC_ERROR_INVALID_PARAMTER("vswscanf_s"); return SCANF_EINVAL; } count = mywcslen(buffer); if (count == 0 || count > SECUREC_WCHAR_STRING_MAX_LEN ) { clearwDestBuf(buffer,format, arglist); SECUREC_ERROR_INVALID_PARAMTER("vswscanf_s"); return SCANF_EINVAL; } fStr._flag = MEM_STR_FLAG; fStr._base = fStr._ptr = (char*) buffer; fStr._cnt = (int)count * ((int)WCHAR_SIZE); /* coverity[var_deref_model] */ retval = securec_winput_s(&fStr, format, arglist); if (retval < 0 ) { SECUREC_ERROR_INVALID_PARAMTER("vswscanf_s"); return SCANF_EINVAL; } return retval; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/vwscanf_s.c000066400000000000000000000050441314037446600224620ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: vwscanf_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "secinput.h" #include "securecutil.h" /******************************************************************************* * * vwscanf_s * * * int vwscanf_s(const wchar_t* format, va_list arglist) * * * The vwscanf_s function is the wide-character version of vscanf_s. The * function reads data from the standard input stream stdin and writes the * data into the location that's given by argument. Each argument must be a * pointer to a variable of a type that corresponds to a type specifier in * format. If copying occurs between strings that overlap, the behavior is * undefined. * * * format Format control string. * arglist pointer to list of arguments * * * arglist the converted value stored in user assigned address * * * Returns the number of fields successfully converted and assigned; * the return value does not include fields that were read but not assigned. * A return value of 0 indicates that no fields were assigned. * The return value is EOF for an error, * or if the end-of-file character or the end-of-string character is encountered * in the first attempt to read a character. * If format is a NULL pointer, the invalid parameter handler is invoked, * as described in Parameter Validation. If execution is allowed to continue, * scanf_s and wscanf_s return SCANF_EINVAL(-1). ******************************************************************************* */ int vwscanf_s(const wchar_t* format, va_list arglist) { int retval = -1; SEC_FILE_STREAM fStr = INIT_SEC_FILE_STREAM; if (format == NULL) { SECUREC_ERROR_INVALID_PARAMTER("vwscanf_s"); return SCANF_EINVAL; } fStr.pf = stdin; fStr._flag = FROM_STDIN_FLAG; SECUREC_LOCK_STDIN(0, stdin); retval = securec_winput_s(&fStr, format, arglist); SECUREC_UNLOCK_STDIN(0, stdin); if (retval < 0) { /* coverity[leaked_storage] */ SECUREC_ERROR_INVALID_PARAMTER("vwscanf_s"); return SCANF_EINVAL; } return retval; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/wcscat_s.c000066400000000000000000000106011314037446600222720ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: wcscat_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" /******************************************************************************* * * wcscat_s * * * errno_t wcscat_s(wchar_t* strDest, size_t destMax, const wchar_t* strSrc); * * * The wcscat_s function is the wide-character and multibyte-character * versions of strcat_s. * The arguments and return value of wcscat_s are wide-character strings. * * The wcscat_s function appends strSrc to strDest and terminates the resulting * string with a null character. The initial character of strSrc overwrites the * terminating null character of strDest. wcscat_s will return EOVERLAP_AND_RESET if the * source and destination strings overlap. * * Note that the second parameter is the total size of the buffer, not the * remaining size. * * * strDest Null-terminated destination string buffer. * destMax Size of the destination string buffer. * strSrc Null-terminated source string buffer. * * * strDest is updated * * * EOK Successful operation * EINVAL strDest not terminated or NULL, strSrc is NULL * ERANGE destMax is 0, too big or too small * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped ******************************************************************************* */ errno_t wcscat_s(wchar_t* strDest, size_t destMax, const wchar_t* strSrc) { wchar_t* pHeader = strDest; size_t availableSize = destMax; IN_REGISTER const wchar_t* overlapGuard = NULL; if (destMax == 0 || destMax > SECUREC_WCHAR_STRING_MAX_LEN ) { SECUREC_ERROR_INVALID_RANGE("wcscat_s"); return ERANGE; } if (strDest == NULL || strSrc == NULL) { SECUREC_ERROR_INVALID_PARAMTER("wcscat_s"); if (strDest != NULL ) { pHeader[0] = '\0'; return EINVAL_AND_RESET; } return EINVAL; } if (strDest < strSrc) { overlapGuard = strSrc; while (availableSize > 0 && *strDest != '\0') { if (strDest == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("wcscat_s"); return EOVERLAP_AND_RESET; } /*seek to string end*/ strDest++; availableSize--; } /* strDest unterminated, return error. */ if (availableSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_PARAMTER("wcscat_s"); return EINVAL_AND_RESET; } /* if available > 0, then excute the strcat operation */ while ((*strDest++ = *strSrc++) != '\0' && --availableSize > 0) { if ( strDest == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("wcscat_s"); return EOVERLAP_AND_RESET; } } } else { overlapGuard = strDest; while (availableSize > 0 && *strDest != '\0') { /*seek to string end, and no need to check overlap*/ strDest++; availableSize--; } /* strDest unterminated, return error. */ if (availableSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_PARAMTER("wcscat_s"); return EINVAL_AND_RESET; } while ((*strDest++ = *strSrc++) != '\0' && --availableSize > 0) { if ( strSrc == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("wcscat_s"); return EOVERLAP_AND_RESET; } } } /* strDest have not enough space, return error */ if (availableSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_RANGE("wcscat_s"); return ERANGE_AND_RESET; } return EOK; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/wcscpy_s.c000066400000000000000000000062021314037446600223200ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: wcscpy_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" /******************************************************************************* * * wcscpy_s * * * errno_t wcscpy_s(wchar_t* strDest, size_t destMax, const wchar_t* strSrc); * * * wcscpy_s is wide-character version of strcpy_s. * * strDest Location of destination string buffer * destMax Size of the destination string buffer. * strSrc Null-terminated source string buffer. * * * strDest is updated. * * * 0 success * EINVAL strDest == NULL or strSrc == NULL * ERANGE destination buffer is NOT enough, or size of * buffer is zero or greater than SECUREC_STRING_MAX_LEN * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped * * If there is a runtime-constraint violation, then if strDest is not a null * pointer and destMax is greater than zero and not greater than * SECUREC_WCHAR_STRING_MAX_LEN, then wcscpy_s sets strDest[0] to the null * character. ******************************************************************************* */ errno_t wcscpy_s(wchar_t* strDest, size_t destMax, const wchar_t* strSrc) { wchar_t* pHeader = strDest; size_t maxSize = destMax; IN_REGISTER const wchar_t* overlapGuard = NULL; if (destMax == 0 || destMax > SECUREC_WCHAR_STRING_MAX_LEN) { SECUREC_ERROR_INVALID_RANGE("wcscpy_s"); return ERANGE; } if (strDest == NULL || strSrc == NULL) { SECUREC_ERROR_INVALID_PARAMTER("wcscpy_s"); if (strDest != NULL) { pHeader[0] = '\0'; return EINVAL_AND_RESET; } return EINVAL; } if (strDest == strSrc) { return EOK; } if (strDest < strSrc) { overlapGuard = strSrc; while ((*(strDest++) = *(strSrc++)) != '\0' && --maxSize > 0) { if ( strDest == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("strcpy_s"); return EOVERLAP_AND_RESET; } } } else { overlapGuard = strDest; while ((*(strDest++) = *(strSrc++)) != '\0' && --maxSize > 0) { if ( strSrc == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("strcpy_s"); return EOVERLAP_AND_RESET; } } } if (maxSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_RANGE("wcscpy_s"); return ERANGE; } return EOK; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/wcsncat_s.c000066400000000000000000000117521314037446600224600ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: wcsncat_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" /******************************************************************************* * * wcsncat_s * * * errno_t wcsncat_s(wchar_t* strDest, size_t destMax, const wchar_t* strSrc, size_t count); * * * The wcsncat_s function is wide-character and multibyte-character versions * of strncat_s. The string arguments and return value of wcsncat_s are * wide-character strings; * * The wcsncat_s function try to append the first D characters of strSrc to * the end of strDest, where D is the lesser of count and the length of strSrc. * If appending those D characters will fit within strDest (whose size is * given as destMax) and still leave room for a null terminator, then those * characters are appended, starting at the original terminating null of * strDest, and a new terminating null is appended; otherwise, strDest[0] is * set to the null character. * * * strDest Null-terminated destination string. * destMax Size of the destination buffer. * strSrc Null-terminated source string. * count Number of character to append, or truncate. * * * strDest is updated * * * EOK Successful operation * EINVAL strDest not terminated or NULL, or strSrc is NULL * ERANGE destMax is 0, too big or too small, count > * SECUREC_WCHAR_STRING_MAX_LEN * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped ******************************************************************************* */ errno_t wcsncat_s(wchar_t* strDest, size_t destMax, const wchar_t* strSrc, size_t count) { wchar_t* pHeader = strDest; size_t availableSize = destMax; IN_REGISTER const wchar_t* overlapGuard = NULL; if (destMax == 0 || destMax > SECUREC_WCHAR_STRING_MAX_LEN ) { SECUREC_ERROR_INVALID_RANGE("wcsncat_s"); return ERANGE; } if (strDest == NULL || strSrc == NULL) { SECUREC_ERROR_INVALID_PARAMTER("wcsncat_s"); if (strDest != NULL) { pHeader[0] = '\0'; return EINVAL_AND_RESET; } return EINVAL; } #ifdef COMPATIBLE_WIN_FORMAT if (count > SECUREC_WCHAR_STRING_MAX_LEN && count !=(size_t)-1) #else if (count > SECUREC_WCHAR_STRING_MAX_LEN) #endif { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_RANGE("wcsncat_s"); return ERANGE_AND_RESET; } if (strDest < strSrc) { overlapGuard = strSrc; while (availableSize > 0 && *strDest != '\0') { if (strDest == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("wcsncat_s"); return EOVERLAP_AND_RESET; } strDest++; availableSize--; } /* strDestination unterminated, return error. */ if (availableSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_PARAMTER("wcsncat_s"); return EINVAL_AND_RESET; } while (count > 0 && (*strDest++ = *strSrc++) != '\0' && --availableSize > 0) { if ( strDest == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("wcsncat_s"); return EOVERLAP_AND_RESET; } count--; } } else { overlapGuard = strDest; while (availableSize > 0 && *strDest != '\0') { /*seek to string end, and no need to check overlap*/ strDest++; availableSize--; } /* strDest unterminated, return error. */ if (availableSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_PARAMTER("wcsncat_s"); return EINVAL_AND_RESET; } while (count > 0 && (*strDest++ = *strSrc++) != '\0' && --availableSize > 0) { if ( strSrc == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("wcsncat_s"); return EOVERLAP_AND_RESET; } count--; } } if (count == 0) { *strDest = '\0'; /* add terminator to strDest*/ } /* strDest have no space to store the terminator, return error */ if (availableSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_RANGE("wcsncat_s"); return ERANGE_AND_RESET; } return EOK; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/wcsncpy_s.c000066400000000000000000000074161314037446600225060ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: wcsncpy_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" /******************************************************************************* * * wcsncpy_s * * * errno_t wcsncpy_s(wchar_t* strDest, size_t destMax, const wchar_t* strSrc, size_t count); * * * wcsncpy_s is the wide-character version of strncpy_s. the wcsncpy_s function * copy the first D characters of strSrc to strDest, where D is the lesser of * count and the length of strSrc. * * * strDest Destination string. * destMax The size of the destination string, in characters. * strSrc Source string. * count Number of characters to be copied. * * * strDest is updated * * * EOK(0) success * EINVAL strDest == NULL or strSrc == NULL * ERANGE destMax is zero or greater than SECUREC_WCHAR_STRING_MAX_LEN, * or count > SECUREC_WCHAR_STRING_MAX_LEN, or destMax * is too small * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped * * If there is a runtime-constraint violation, then if strDest is not a null * pointer and destMax is greater than zero and not greater than * SECUREC_WCHAR_STRING_MAX_LEN, then wcsncpy_s sets strDest[0] to the null * character. ******************************************************************************* */ errno_t wcsncpy_s(wchar_t* strDest, size_t destMax, const wchar_t* strSrc, size_t count) { wchar_t* pHeader = strDest; size_t maxSize = destMax; IN_REGISTER const wchar_t* overlapGuard = NULL; if ( destMax == 0 || destMax > SECUREC_WCHAR_STRING_MAX_LEN ) { SECUREC_ERROR_INVALID_RANGE("wcsncpy_s"); return ERANGE; } if (strDest == NULL || strSrc == NULL ) { SECUREC_ERROR_INVALID_PARAMTER("wcsncpy_s"); if (strDest != NULL) { pHeader[0] = '\0'; return EINVAL_AND_RESET; } return EINVAL; } #ifdef COMPATIBLE_WIN_FORMAT if (count > SECUREC_WCHAR_STRING_MAX_LEN && count !=(size_t)-1) #else if (count > SECUREC_WCHAR_STRING_MAX_LEN) #endif { pHeader[0] = '\0'; /*clear dest string*/ SECUREC_ERROR_INVALID_RANGE("wcsncpy_s"); return ERANGE_AND_RESET; } if (count == 0) { pHeader[0] = '\0'; return EOK; } if (strDest < strSrc) { overlapGuard = strSrc; while ((*(strDest++) = *(strSrc++)) != '\0' && --maxSize > 0 && --count > 0) { if ( strDest == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("wcsncpy_s"); return EOVERLAP_AND_RESET; } } } else { overlapGuard = strDest; while ((*(strDest++) = *(strSrc++)) != '\0' && --maxSize > 0 && --count > 0) { if ( strSrc == overlapGuard) { pHeader[0] = '\0'; SECUREC_ERROR_BUFFER_OVERLAP("wcsncpy_s"); return EOVERLAP_AND_RESET; } } } if (count == 0) { *strDest = '\0'; } if (maxSize == 0) { pHeader[0] = '\0'; SECUREC_ERROR_INVALID_RANGE("wcsncpy_s"); return ERANGE_AND_RESET; } return EOK; } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/wcstok_s.c000066400000000000000000000055111314037446600223240ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: wcstok_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" /******************************************************************************* * * wcstok_s * * * wchar_t * wcstok_s(wchar_t* strToken, const wchar_t* strDelimit, wchar_t** context); * * * wcstok_s is the wide-character versions of strtok_s. * * * strToken String containing token or tokens. * strDelimit Set of delimiter characters. * context Used to store position information between calls to * strtok_s. * * * * * Returns a pointer to the next token found in strToken. * They return NULL when no more tokens are found. * Each call modifies strToken by substituting a NULL character for the first * delimiter that occurs after the returned token. * * return value condition * NULL context == NULL, strDelimit == NULL, strToken == NULL * && (*context) == NULL, or no token is found. ******************************************************************************* */ wchar_t* wcstok_s(wchar_t* strToken, const wchar_t* strDelimit, wchar_t** context) { wchar_t* token = NULL; const wchar_t* ctl = NULL; /* validation section */ if (context == NULL || strDelimit == NULL) { return NULL; } if (strToken == NULL && (*context) == NULL) { return NULL; } /* If string==NULL, continue with previous string */ if (!strToken) { strToken = *context; } /* Find beginning of token (skip over leading delimiters). Note that * there is no token if this loop sets string to point to the terminal null. */ for ( ; *strToken != 0 ; strToken++) { for (ctl = strDelimit; *ctl != 0 && *ctl != *strToken; ctl++) ; if (*ctl == 0) { break; } } token = strToken; /* Find the end of the token. If it is not the end of the string, * put a null there. */ for ( ; *strToken != 0 ; strToken++) { for (ctl = strDelimit; *ctl != 0 && *ctl != *strToken; ctl++) ; if (*ctl != 0) { *strToken++ = 0; break; } } /* Update the context */ *context = strToken; /* Determine if a token has been found. */ if (token == strToken) { return NULL; } else { return token; } } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/wmemcpy_s.c000066400000000000000000000041651314037446600224770ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: wmemcpy_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" /******************************************************************************* * * wmemcpy_s * * * errno_t wmemcpy_s(wchar_t* dest, size_t destMax, const wchar_t* src, size_t count); * * * wmemcpy_s copies count wide character(two bytes) from src to dest. * * * dest new buffer. * destMax Size of the destination buffer. * src Buffer to copy from. * count Number of characters to copy. * * * dest buffer is uptdated. * * * 0 Success * EINVAL dest == NULL or src == NULL * ERANGE count > destMax or destMax > SECUREC_WCHAR_MEM_MAX_LEN * or destMax == 0 * EOVERLAP_AND_RESET dest buffer and source buffer are overlapped * * if an error occured, dest will be filled with 0. * If the source and destination overlap, the behavior of wmemcpy_s is undefined. * Use wmemmove_s to handle overlapping regions. ******************************************************************************* */ errno_t wmemcpy_s(wchar_t* dest, size_t destMax, const wchar_t* src, size_t count) { if (destMax == 0 || destMax > SECUREC_WCHAR_MEM_MAX_LEN ) { SECUREC_ERROR_INVALID_PARAMTER("wmemcpy_s"); return ERANGE; } if (count > destMax) { SECUREC_ERROR_INVALID_PARAMTER("wmemcpy_s"); if (dest != NULL) { (void)memset(dest, 0, destMax * WCHAR_SIZE); return ERANGE_AND_RESET; } return ERANGE; } return memcpy_s(dest, destMax * WCHAR_SIZE, src, count * WCHAR_SIZE); } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/wmemmove_s.c000066400000000000000000000042271314037446600226510ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: wmemmove_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "securecutil.h" /******************************************************************************* * * wmemmove_s * * * errno_t wmemmove_s(wchar_t* dest, size_t destMax, const wchar_t* src, size_t count); * * * wmemmove_s copies count wide character(two bytes) from src to dest * * * dest destination object. * destMax Size of the destination buffer. * src Source object. * count Number of bytes or character to copy. * * * dest is updated. * * * EOK success * EINVAL dest == NULL or src == NULL * ERANGE count > destMax or destMax > SECUREC_WCHAR_MEM_MAX_LEN * or destMax == 0 * * If an error occured, dest will NOT be filled with 0. * If some regions of the source area and the destination overlap, wmemmove_s * ensures that the original source bytes in the overlapping region are copied * before being overwritten ******************************************************************************** */ errno_t wmemmove_s(wchar_t* dest, size_t destMax, const wchar_t* src, size_t count) { if (destMax == 0 || destMax > SECUREC_WCHAR_MEM_MAX_LEN ) { SECUREC_ERROR_INVALID_PARAMTER("wmemmove_s"); return ERANGE; } if ( count > destMax) { SECUREC_ERROR_INVALID_PARAMTER("wmemmove_s"); if (dest != NULL) { (void)memset(dest, 0, destMax * WCHAR_SIZE); return ERANGE_AND_RESET; } return ERANGE; } return memmove_s(dest, destMax * WCHAR_SIZE, src, count * WCHAR_SIZE); } UVP-Tools-2.2.0.316/uvp-monitor/securec/src/wscanf_s.c000066400000000000000000000040311314037446600222670ustar00rootroot00000000000000/******************************************************************************* * Copyright @ Huawei Technologies Co., Ltd. 1998-2014. All rights reserved. * File name: wscanf_s.c * History: * 1. Date: * Author: * Modification: ******************************************************************************** */ #include "securec.h" #include "secinput.h" /******************************************************************************* * * wscanf_s * * * int wscanf_s (const wchar_t* format, ...); * * * The wscanf_s function reads data from the standard input stream stdin and * writes the data into the location that's given by argument. Each argument * must be a pointer to a variable of a type that corresponds to a type specifier * in format. If copying occurs between strings that overlap, the behavior is * undefined. * * * format Format control string. * ... Optional arguments. * * * ... the converted value stored in user assigned address * * * Returns the number of fields successfully converted and assigned; * the return value does not include fields that were read but not assigned. * A return value of 0 indicates that no fields were assigned. * The return value is EOF for an error, * or if the end-of-file character or the end-of-string character is encountered * in the first attempt to read a character. * If format is a NULL pointer, the invalid parameter handler is invoked, * as described in Parameter Validation. If execution is allowed to continue, * scanf_s and wscanf_s return SCANF_EINVAL(-1). ******************************************************************************* */ int wscanf_s(const wchar_t* format, ... ) { int ret = 0; va_list arglist; va_start(arglist, format); ret = vwscanf_s( format, arglist); va_end(arglist); return ret; } UVP-Tools-2.2.0.316/uvp-monitor/xenctlmon.c000066400000000000000000001772021314037446600202660ustar00rootroot00000000000000/* * Main process * * Copyright 2016, Huawei Tech. Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "libxenctl.h" #include "xenstore_common.h" #include "public_common.h" #include #include #include #include "qlist.h" #include #include #include "securec.h" #include #include #include "uvpmon.h" #include #include #include #include #include #include #include #ifdef FIFREEZE #undef FIFREEZE #endif #define FIFREEZE _IOWR('X', 119, int) /* Freeze */ #ifdef FITHAW #undef FITHAW #endif #define FITHAW _IOWR('X', 120, int) /* Thaw */ /* * PV OPS kernel */ #define KERNEL_PV_OPS "control/uvp/pvops" #define UVP_UNPLUG_DISK "control/uvp/unplug-disk" #define CUR_PVDRIVER_VERSION "control/uvp/pvdriver-version" /*Ǩɵı־λ*/ #define COMPLETE_RESTORE "control/uvp/completerestore-flag" /*Ǩƿʼı־λ*/ #define MIGRATE_FLAG "control/uvp/migrate_flag" #define HIBERNATE_MIGRATE_PATH "/etc/.uvp-monitor/hibernate_migrate_flag.ini" /*resumeɵı־λ*/ #define DRIVER_RESUME_FLAG "control/uvp/driver-resume-flag" #define SYNC_TIME_FLAG "control/uvp/clock/mode" #define TIME_BUFFER 100 /*VSA*/ #define PYTHON_PATH "/opt/galax/vsa/vsaApi/vsa/service/router/service/allintaprping.py" #define EXEC_PYTHON_PATH "python /opt/galax/vsa/vsaApi/vsa/service/router/service/allintaprping.py &" /*Nanoseconds*/ #define NANOTOMICRO 1000000 #define MICROTOSEC 1000 #define MILLITOMICRO 1000 #define TIME_DIFF 1 bool hibernate_migrate_flag = 0; /* ѯ汾ϢȲϢļֵ */ #define PVDRIVER_STATIC_INFO_PATH "control/uvp/static-info-flag" //libvirt Ӧ ipv6 ֵ #define NETINFO_FEATURE_PATH "control/uvp/netinfo_feature" #define BUFFER_SIZE 1024 #define SHELL_BUFFER 256 #define MAX_COMMAND_LENGTH 128 #define VER_SIZE 16 #define LOG_LEVEL_SIZE 8 #define DEFAULT_VERSION "error" #define DEFAULT_PATH "/etc/.uvp-monitor/version.ini" /* һԿ */ #define STORAGE_SNAPSHOT_FLAG "control/uvp/storage_snapshot_flag" #define IOMIRROR_SNAPSHOT_FLAG "control/uvp/iomirror_snapshot_flag" #define THAW "thaw" #define FREEZE "freeze" #define SHELL_PATH "/usr/bin/userFreeze.sh" #define DEV_TYPE_NUM 5 #define SUSE_11_SP1 "2.6.32.12-0.7" #define SUSE_11_SP2 "3.0.13-0.27" const char *devtype[] = {"ext3", "ext4", "reiserfs", "jfs", "xfs"}; int gfreezeflag = 0; /*ִOSڲ̶·µĿִļ*/ #define OS_CMD_XS_PATH "control/uvp/command" #define CMD_RESULT_XS_PATH "control/uvp/command_result" #define GUEST_CMD_FILE_PATH "/etc/.uvp-monitor" #define CMD_RESULT_BUF_LEN 5 #define ARG_CHECK_OK 1 #define ARG_ERROR 2 #define FILENAME_ERROR 3 #define CHECKSUM_ERROR 4 /* forkʧ */ #define ERROR_FORK -1 /* waitpidʧ */ #define ERROR_WAITPID -2 /* waitpidʧ */ #define ERROR_PARAMETER -3 #define UNEXPECTED_ERROR 10 char chret[CMD_RESULT_BUF_LEN]; char feature_str[SHELL_BUFFER]; /* VRM is based on SLES */ #define VRM_VERSION "/etc/os_version" #define SUSE_VERSION "/etc/SuSE-release" #define VRM_FLAG "control/uvp/vrm_flag" /* */ #define XS_HEART_BEAT_RATE "control/uvp/heartbeat_rate" #define XS_HEART_BEAT "control/uvp/heartbeat" #define HEART_BEAT_BUF_LEN 5 unsigned int heartbeatrate = 0; int heartbeatnum = 0; bool heartbeat_thread_exist_flag = 0; pthread_mutex_t heartbeat_mutex = PTHREAD_MUTEX_INITIALIZER; /* don't provide pv-upgrade ability to user-compiled-pv vm */ #define XS_NOT_USE_PV_UPGRADE "control/uvp/not_use_pv_upgrade" typedef struct FsMount { char* dirname; char* devtype; QTAILQ_ENTRY(FsMount) next; } FsMount; typedef QTAILQ_HEAD(FsMountList, FsMount) FsMountList; struct Freezearg { void *handle; FsMountList *mounts; }; pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; #define LOG_BUF_LEN 256 char fReboot = '0'; void sys_log(const char* process, int Level, const char *func, int line, const char *format, ...) { va_list ap; char tmpBuf[SHELL_BUFFER] = {0}; char Log[BUFFER_SIZE] = {0}; char szLevel[LOG_LEVEL_SIZE] = {0}; int ltz = 0; int utz = 0; int timezone = 0; struct timeval tv = {0}; struct tm *tm = NULL; struct tm *tm1 = NULL; time_t tt = {0}; gettimeofday(&tv, NULL); tt = tv.tv_sec; tm1 = gmtime(&tt); utz= tm1->tm_hour; tm = localtime(&tt); ltz= tm->tm_hour; timezone=ltz-utz; if (timezone < -12) { timezone += 24; } else if (timezone >= 12) { timezone -= 24; } switch(Level) { case LOG_DEBUG: { strcpy_s(szLevel, LOG_LEVEL_SIZE, "debug"); break; } case LOG_INFO: { strcpy_s(szLevel, LOG_LEVEL_SIZE, "info"); break; } case LOG_ERR: { strcpy_s(szLevel, LOG_LEVEL_SIZE, "err"); break; } default: { strcpy_s(szLevel, LOG_LEVEL_SIZE, "debug"); break; } } snprintf_s(tmpBuf, SHELL_BUFFER, SHELL_BUFFER, "%d-%02d-%02dT%02d:%02d:%02d.%ld%+03d:00|%s|%s[%d]|%s[%d]|:", 1900+tm->tm_year, tm->tm_mon+1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec, tv.tv_usec, timezone, szLevel, process, getpid(), func, line); va_start(ap, format); vsprintf_s(Log, BUFFER_SIZE, format, ap); syslog(Level, "%s %s", tmpBuf, Log); va_end(ap); } /***************************************************************************** Function : write_vrm_flag Description: note vm is vrm Input : phandle -- xenstore file handle Output : None Return : None *****************************************************************************/ void write_vrm_flag(void *phandle) { if (NULL == phandle) { return; } if((0 == access(VRM_VERSION, R_OK)) && (0 == access(SUSE_VERSION, R_OK))) { INFO_LOG("This is VRM."); write_to_xenstore(phandle, VRM_FLAG, "true"); } } void set_guest_feature(void *handle) { int ret = 0; char *get_feature_cmd = "cat /etc/.uvp-monitor/GuestOSFeature |grep -w `cat /etc/.uvp-monitor/CurrentOS` | awk '{printf $2}'"; char *get_cfg_cmd = "cat /etc/.uvp-monitor/CurrentOS"; (void)memset_s(feature_str, SHELL_BUFFER, 0, SHELL_BUFFER); //Can't read cfg files if(0 != access("/etc/.uvp-monitor/GuestOSFeature", R_OK) || 0 != access("/etc/.uvp-monitor/CurrentOS", R_OK)) { ERR_LOG("Get Guest Feature, Can't read cfg files, errno=%d", errno); return; } ret = uvpPopen(get_cfg_cmd, feature_str, SHELL_BUFFER); if(0 != ret) { ERR_LOG("Failed to call uvpPopen 1, output=%s ret=%d.", feature_str, ret); return; } //Get name form CurrentOS error,maybe "NULL" if(0 == strlen(feature_str)) { INFO_LOG("Get name form cfg error."); return; } ret = uvpPopen(get_feature_cmd, feature_str, SHELL_BUFFER); if (0 != ret) { ERR_LOG("Failed to call uvpPopen 2, output=%s ret=%d.", feature_str, ret); return; } //grep from GuestOSFeature maybe NULL if (0 == strlen(feature_str)) { INFO_LOG("Get guest feature failed."); } else { INFO_LOG("Guest feature is %s.", feature_str); write_to_xenstore(handle, GUSET_OS_FEATURE, trim(feature_str)); } return; } void SetPvDriverVer(void *handle) { FILE *pFileVer = NULL; char CurrentPath[SHELL_BUFFER] = {0}; char c = '='; char *pstart; char tmp_version[20]; int count = 0; pFileVer = fopen(DEFAULT_PATH, "r"); if (NULL == pFileVer) { DEBUG_LOG("Open /etc/.uvp-monitor/version.ini failed, errno=%d.", errno); write_to_xenstore(handle, PVDRIVER_VERSION, DEFAULT_VERSION); return; } (void)fgets(CurrentPath, SHELL_BUFFER, pFileVer); fclose(pFileVer); pstart = strchr(CurrentPath, c); // change the PVDRIVER_VERSION in xenstore ---2010.11.19 if (NULL != pstart) { pstart[strlen(pstart) - 1] = '\0'; tmp_version[0] = '2'; tmp_version[1] = '.'; (void)strncpy_s(&tmp_version[2], sizeof(tmp_version) - 2, pstart + 1, sizeof(tmp_version) - 3); tmp_version[sizeof(tmp_version) - 1] = '\0'; pstart = tmp_version; while('\0' != *pstart) { if('.' == *pstart) { count++; if(4 == count) { *pstart = '\0'; break; } } pstart++; } pstart = tmp_version; write_to_xenstore(handle, PVDRIVER_VERSION, pstart); } else { write_to_xenstore(handle, PVDRIVER_VERSION, DEFAULT_VERSION); } } /***************************************************************************** Function : deal_hib_migrate_flag_file Description:Ǩʱ/etc/.uvp-monitor/hibernate_migrate_flag.iniд־Ϣ Input :־Ϣֵ Input : Return : None *****************************************************************************/ void deal_hib_migrate_flag_file(int hib_mig_flag) { int iRet = 0; char pszCommand[MAX_COMMAND_LENGTH] = {0}; char pszBuff[MAX_COMMAND_LENGTH] = {0}; if (1 == hib_mig_flag) { (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "mkdir -p %s 2> /dev/null;echo %d > %s 2> /dev/null;", GUEST_CMD_FILE_PATH, hib_mig_flag, HIBERNATE_MIGRATE_PATH); } else { (void)snprintf_s(pszCommand, MAX_COMMAND_LENGTH, MAX_COMMAND_LENGTH, "rm -rf %s 2> /dev/null ", HIBERNATE_MIGRATE_PATH); } iRet = uvpPopen(pszCommand, pszBuff, MAX_COMMAND_LENGTH); if (0 != iRet) { ERR_LOG("hibernate_migrate_start: write migrate_start flag fail. pszCommand=%s output=%s ret=%d.", pszCommand, pszBuff, iRet); } else { ERR_LOG("write migrate_start flag %d success.", hib_mig_flag); } return; } /***************************************************************************** Function : write_to_file Description:Ǩƺļ/var/log/uvp_migrate/complete_migrate.configдϢ Input : Input : Return : None *****************************************************************************/ void write_to_file() { FILE *fo; char *filename; char *str; filename = "/etc/.uvp-monitor/complete_migrate.config"; str = "The restore has been completed"; fo = fopen(filename, "w+"); if (NULL == fo) { ERR_LOG("fopen failed, errno=%d.", errno); return; } fprintf(fo, "%s\n", str); fclose(fo); } /***************************************************************************** Function : write_unplugdisk_flag Description: Input : handle -- xenstore file handle Output : None Return : None *****************************************************************************/ void write_unplugdisk_flag(void* phandle) { if (NULL == phandle) { return; } write_to_xenstore(phandle, UVP_UNPLUG_DISK, "uvptoken"); } /***************************************************************************** Function : uvp_regwatch Description:עwatch Input :phandle xenstore Input : service_flag true or false Return : None *****************************************************************************/ void uvp_regwatch(void *phandle) { int iIsHotplug = XEN_FAIL; if(NULL == phandle) { return; } (void)write_unplugdisk_flag(phandle); (void)regwatch(phandle, PVDRIVER_STATIC_INFO_PATH , "staticInfoToken"); (void)regwatch(phandle, COMPLETE_RESTORE, "0"); (void)regwatch(phandle, DRIVER_RESUME_FLAG, "uvptoken"); /* ΪǨƺ󴥷һܻȡ */ (void)regwatch(phandle, MIGRATE_FLAG, "0"); (void)regwatch(phandle, RELEASE_BOND, "0"); (void)regwatch(phandle, REBOND_SRIOV, "0"); (void)regwatch(phandle, EXINFO_FLAG_PATH, "exinfo_token"); (void)regwatch(phandle, DISABLE_EXINFO_PATH, "exinfo_token"); /* write cpu hotplug feature if cpu support hotplug */ iIsHotplug = SetCpuHotplugFeature(phandle); if (iIsHotplug == XEN_SUCC) { /* if OS support cpu hotplug register a watch */ cpuhotplug_regwatch(phandle); } (void)regwatch(phandle, STORAGE_SNAPSHOT_FLAG, "0"); (void)regwatch(phandle, XS_HEART_BEAT_RATE, "0"); (void)regwatch(phandle, UVP_UNPLUG_DISK , "uvptoken"); (void)regwatch(phandle, OS_CMD_XS_PATH, "0"); } /***************************************************************************** Function : uvp_unregwatch Description:ע watch Input :phandle xenstore Input : Return : None *****************************************************************************/ void uvp_unregwatch(void *phandle) { int iIsHotplug = XEN_FAIL; if(NULL == phandle) { return; } (void)xs_unwatch(phandle, PVDRIVER_STATIC_INFO_PATH , "staticInfoToken"); (void)xs_unwatch(phandle, COMPLETE_RESTORE, "0"); (void)xs_unwatch(phandle, DRIVER_RESUME_FLAG, "uvptoken"); /* ΪǨƺ󴥷һܻȡ */ (void)xs_unwatch(phandle, MIGRATE_FLAG, "0"); (void)xs_unwatch(phandle, RELEASE_BOND, "0"); (void)xs_unwatch(phandle, REBOND_SRIOV, "0"); (void)xs_unwatch(phandle, EXINFO_FLAG_PATH, "exinfo_token"); (void)xs_unwatch(phandle, DISABLE_EXINFO_PATH, "exinfo_token"); /* write cpu hotplug feature if cpu support hotplug */ iIsHotplug = SetCpuHotplugFeature(phandle); if (iIsHotplug == XEN_SUCC) { /* if OS support cpu hotplug register a watch */ (void)xs_unwatch(phandle, CPU_HOTPLUG_SIGNAL , ""); } (void)xs_unwatch(phandle, STORAGE_SNAPSHOT_FLAG, "0"); (void)xs_unwatch(phandle, XS_HEART_BEAT_RATE, "0"); (void)xs_unwatch(phandle, UVP_UNPLUG_DISK , "uvptoken"); } /***************************************************************************** Function : write_feature_flag Description: Input : phandle xenstore Input : feature_flag true or false Return : None *****************************************************************************/ static void write_feature_flag(void *phandle, char *feature_flag) { if (NULL == phandle || NULL == feature_flag) { return; } write_to_xenstore(phandle, FEATURE_FLAG_WATCH_PATH, feature_flag); } /***************************************************************************** Function : write_service_flag Description:xenstoreдܼؽǷkillı־λfalseʾѾkill Input :phandle xenstore Input : service_flag true or false Return : None *****************************************************************************/ void write_service_flag(void *phandle, char *service_flag) { if (NULL == phandle || NULL == service_flag) { return; } write_to_xenstore(phandle, SERVICE_FLAG_WATCH_PATH, service_flag); } /***************************************************************************** Function : write_vmstate_flag Description: note vm is running Input : handle -- xenstore file handle Output : None Return : None *****************************************************************************/ void write_vmstate_flag(void *phandle, char *vm_state) { if (NULL == phandle || NULL == vm_state) { return; } write_to_xenstore(phandle, UVP_VM_STATE_PATH, vm_state); } /***************************************************************************** Function : do_unplugdisk Description: Input : handle -- xenstore file handle Output : None Return : None *****************************************************************************/ void *do_unplugdisk(void* handle) { char pszCommand[SHELL_BUFFER] = {0}; char chUnplugPath[SHELL_BUFFER] = {0}; char *pchUnplugDiskName = NULL; char pszBuff[SHELL_BUFFER] = {0}; (void)memset(chUnplugPath, 0, SHELL_BUFFER); (void)snprintf(chUnplugPath, sizeof(chUnplugPath), "%s", UVP_UNPLUG_DISK); pchUnplugDiskName = read_from_xenstore(handle, chUnplugPath); if (NULL != pchUnplugDiskName && strstr(pchUnplugDiskName, "xvd") != NULL) { (void)memset(pszCommand, 0, SHELL_BUFFER); (void)memset(pszBuff, 0, SHELL_BUFFER); (void)snprintf(pszCommand, SHELL_BUFFER, "pvumount.sh %s", pchUnplugDiskName); (void)system(pszCommand); /*if (0 != iRet) { ERR_LOG("unplug disk: call uvpPopen pszCommand=%s Fail ret = %d \n", pszCommand, iRet); }*/ } if (NULL != pchUnplugDiskName) { free(pchUnplugDiskName); pchUnplugDiskName = NULL; } return pchUnplugDiskName; } /***************************************************************************** Function : SetScsiFeature Description: note this pv support scsi function Input : handle -- xenstore file handle Output : None Return : None *****************************************************************************/ void SetScsiFeature(void *phandle) { if (NULL == phandle) { return; } write_to_xenstore(phandle, SCSI_FEATURE_PATH, "1"); } /***************************************************************************** Function : write_pvops_flag Description: note vm is pvops Input : handle -- xenstore file handle Output : None Return : None *****************************************************************************/ void write_pvops_flag(void *phandle, char *vm_state) { if (NULL == phandle || NULL == vm_state) { return; } write_to_xenstore(phandle, KERNEL_PV_OPS, vm_state); } /***************************************************************************** Function : update_netinfo_flag Description: if libvirt has netinfo,then g_netinfo_value = 1 Input : handle -- xenstore file handle Output : None Return : None *****************************************************************************/ void set_netinfo_flag(void *handle) { char *netinfoFlag = NULL; if (NULL == handle) { return; } g_netinfo_value = 0; netinfoFlag = read_from_xenstore(handle, NETINFO_FEATURE_PATH); if(NULL != netinfoFlag) { if(strcmp(netinfoFlag, "1") == 0) { g_netinfo_value = 1; } free(netinfoFlag); netinfoFlag = NULL; } } /***************************************************************************** Function : do_wtach_functions Description: DomU's extended-information watch function Input : handle -- xenstore file handle Output : None Return : None *****************************************************************************/ void do_watch_functions(void *handle) { if(!g_disable_exinfo_value) { if(1 == g_netinfo_value) NetinfoNetworkctlmon( handle ); else networkctlmon( handle ); (void)memoryworkctlmon( handle ); (void)diskworkctlmon( handle ); (void)hostnameworkctlmon( handle ); if (ERROR == cpuworkctlmon( handle )) { write_to_xenstore(handle, CPU_DATA_PATH, "error"); } } return; } void do_watch_functions_delay(void *handle) { static unsigned int i = 0; (void)sleep(5); if( !g_disable_exinfo_value ) { if(1 == g_netinfo_value) NetinfoNetworkctlmon(handle); else networkctlmon( handle ); (void)memoryworkctlmon( handle ); i++; if ( 6 == i) { (void)diskworkctlmon( handle ); (void)hostnameworkctlmon( handle ); if (ERROR == cpuworkctlmon( handle )) { write_to_xenstore(handle, CPU_DATA_PATH, "error"); } i = 0; } } return; } /***************************************************************************** Function : watch_listen Description: 豸select¼ Input :xsfd : ļ Output : None Return : SUCC : 5ڶȡtrue *****************************************************************************/ int watch_listen(int xsfd) { fd_set set; struct timeval tv = {.tv_sec = 5, .tv_usec = 0}; /*lint -e530 */ FD_ZERO(&set); FD_SET((unsigned int)xsfd, (fd_set *)&set); /*lint +e530 */ /* selectֵҪ1 */ if ((select((unsigned int)(xsfd + 1), &set, NULL, NULL, &tv) > 0 ) && FD_ISSET((unsigned int)xsfd, &set)) { return SUCC; } return ERROR; } /***************************************************************************** Function : condition Description: whileѭж Input :None Output : None Return : SUCC : һֱwatchĴ *****************************************************************************/ int condition(void) { return SUCC; } /***************************************************************************** Function : IsSupportStorageSnapshotcheck Description: жϲϵͳǷ֧һԣֵֽ֧iomirror_snapshot_flagΪ"0" Input : handle -- xenstore file handle Output : None Return : None *****************************************************************************/ void IsSupportStorageSnapshotcheck (void *handle) { struct utsname buf; (void)memset_s (&buf, sizeof(struct utsname), 0, sizeof(struct utsname)); if (uname(&buf) < 0) { ERR_LOG("Get os release failed, errno=%d.", errno); return; } /* жǷϵͳǷΪNovell SUSE Linux Enterprise Server 11 SP1 Novell SUSE Linux Enterprise Server 11 SP2 */ if (strstr(buf.release, SUSE_11_SP1) || strstr(buf.release, SUSE_11_SP2)) { write_to_xenstore(handle, IOMIRROR_SNAPSHOT_FLAG, "0"); } return; } /***************************************************************************** Function : timing_monitor Description: watch domU's extended-information per 5 seconds Input : handle -- xenstore file handle Output : None Return : None *****************************************************************************/ void *timing_monitor(void *handle) { char *xenversionflag = NULL; char UpgradeOldVerInfo[VER_SIZE]= {0}; FILE *UpgradeOldVerFile = NULL; (void)sleep(5); InitBond(); (void)netbond(); //һдxentoreдɹ xb_write_first_flag = 0; do_watch_functions(handle); /* ֧һԿдvss ־λ */ IsSupportStorageSnapshotcheck(handle); /* ñ־λʾpv driver汾֧pvscsi */ SetScsiFeature(handle); /* дpvdriver־ */ write_service_flag(handle, "true"); /* ״̬־λ*/ write_vmstate_flag(handle, "running"); /* д PV OPS ں˱־λ */ if ( ! access("/proc/xen/version", R_OK) || ! access("/proc/xen_version", R_OK) || ! access("/dev/xen/xenbus", R_OK)) { write_pvops_flag(handle, "1"); } while(SUCC == condition()) { do_watch_functions_delay(handle); } return NULL; } /***************************************************************************** Function : free_fs_mount_list Description: ͷ Input : mounts Output : None Return : None *****************************************************************************/ static void free_fs_mount_list(FsMountList *mounts) { FsMount *mount = NULL, *temp = NULL; if (!mounts) { return; } /* */ /*lint -e506 */ QTAILQ_FOREACH_SAFE(mount, mounts, next, temp) { /* ɾ */ QTAILQ_REMOVE(mounts, mount, next); if (mount) { freePath(mount->dirname, mount->devtype, NULL); free(mount); } } /*lint +e506 */ } /***************************************************************************** Function : build_fs_mount_list Description: mount Input : mounts : Output : None Return : -1 mountʧ Return : 0 mountɹ, *****************************************************************************/ static int build_fs_mount_list(FsMountList *mounts) { struct mntent *ment = NULL; FsMount *mount = NULL; const char *mtab = "/proc/self/mounts"; FILE *fp = NULL; char *devstate = NULL; char *psaveptr1 = NULL; int i = 0; fp = setmntent(mtab, "r"); if (!fp) { ERR_LOG("Unable to read mtab, errno=%d.", errno); return ERROR; } while ((ment = getmntent(fp))) { devstate = strtok_r(ment->mnt_opts, ",", &psaveptr1); /* ֻļϵͳ˵ */ if ((NULL != devstate) && (0 == strcmp(devstate, "ro"))) { continue; } /* ļϵͳͲ ext3ext4reiserfsjfsxfs֮һĹ˵ */ for ( i = 0; i < DEV_TYPE_NUM; i++) { if (!strcmp(ment->mnt_type, devtype[i])) { mount = (FsMount *)malloc(sizeof(FsMount)); if (NULL == mount) { ERR_LOG("Malloc failed."); (void)endmntent(fp); return ERROR; } (void)memset_s(mount, sizeof(FsMount), 0, sizeof(FsMount)); mount->dirname = strdup(ment->mnt_dir); mount->devtype = strdup(ment->mnt_type); QTAILQ_INSERT_TAIL(mounts, mount, next); break; } } } (void)endmntent(fp); return SUCC; } /***************************************************************************** Function : execute_fsfreeze_shell Description: ִûű Input : state ʾ״̬ⶳ Output : None Return : 0 ִгɹ Return : -1 ִʧ *****************************************************************************/ static int execute_fsfreeze_shell(const char *state) { char buf[SHELL_BUFFER] = {0}; char cmd[SHELL_BUFFER] = {0}; int ret = 0; INFO_LOG("Enter execute_fsfreeze_shell."); /* жûűǷ */ if (access(SHELL_PATH, F_OK) != 0) { INFO_LOG("No usr shell, errno=%d.", errno); return SUCC; } (void)snprintf_s(cmd, sizeof(cmd), sizeof(cmd), "sh %s %s >/dev/null 2>&1", SHELL_PATH, state); /* ִûű */ ret = uvpPopen(cmd, buf, SHELL_BUFFER); if (0 != ret) { (void)ERR_LOG("Execute fsfreeze shell failed, cmd=%s, output=%s ret = %d.", cmd, buf, ret); return ERROR; } INFO_LOG("Leave execute_fsfreeze_shell success."); return SUCC; } /***************************************************************************** Function : guest_cache_thaw Description: ⶳļϵͳݿ Input : mounts mountݽṹ Output : None Return : 0 ⶳɹ Return : -1 ⶳʧ *****************************************************************************/ static int guest_cache_thaw(FsMountList *mounts) { FsMount *mount = NULL; int fd = 0, i = 0, ret = 0; QTAILQ_FOREACH(mount, mounts, next) { fd = open(mount->dirname, O_RDONLY); if (fd == -1) { ERR_LOG("Open file error, errno=%d", errno); continue; } /* ⶳɹೢ10Σ10Բɹ򷵻ʧ */ do { ret = ioctl(fd, FITHAW); i++; } while (0 != ret && i < 10); close(fd); if (10 == i && 0 != ret) { ERR_LOG("Ioctl [FITHAW] error, mount name is %s, errno=%d", mount->dirname, errno); goto out; } } out: if (execute_fsfreeze_shell(THAW) < 0) { ERR_LOG("guest_fsfreeze_shell thaw failed"); return ERROR; } if (0 != ret) { return ERROR; } return SUCC; } /***************************************************************************** Function : guest_cache_freeze Description: ˢݿ⼰ļϵͳ棬IO Input : mounts mountݽṹ Output : None Return : 0 ɹ Return : -1 ʧ *****************************************************************************/ static int guest_cache_freeze(FsMountList *mounts) { struct FsMount *mount = NULL; int fd = 0; /* ִûű(ˢݿ⻺棬ݿIO)*/ if (execute_fsfreeze_shell(FREEZE) < 0) { ERR_LOG("execute_fsfreeze_shell freeze failed."); } /* mount,ķŽ */ if (build_fs_mount_list(mounts) < 0) { ERR_LOG("build_fs_mount_list failed."); goto error; } /* ȡ足ļϵͳ */ QTAILQ_FOREACH(mount, mounts, next) { fd = open(mount->dirname, O_RDONLY); if (-1 == fd) { ERR_LOG("Open file error, dirname=%s, errno=%d.", mount->dirname, errno); goto error; } /* ļϵͳ */ if (ioctl(fd, FIFREEZE) < 0) { close(fd); ERR_LOG("Ioctl [FIFREEZE] error, errno=%d.", errno); goto error; } close(fd); } return SUCC; error: /* ʧⶳļϵͳݿ */ (void)guest_cache_thaw(mounts); return ERROR; } /***************************************************************************** Function : do_cache_thaw Description: ⶳļϵͳݿⲢݷֵдxenstoreֵ Input : handle xenstore Input : mounts mountݽṹ Output : None Return : 0 ִгɹ Return : -1 ִʧ *****************************************************************************/ int do_cache_thaw(void *handle, FsMountList *mounts) { int ret = 0; (void)pthread_mutex_lock(&mutex); if(gfreezeflag == 1) { gfreezeflag = 0; if (guest_cache_thaw(mounts) < 0) { ret = -1; ERR_LOG("guest_cache_thaw failed!"); write_to_xenstore(handle, IOMIRROR_SNAPSHOT_FLAG, "-1"); } else { INFO_LOG("Guest cache thaw success!"); write_to_xenstore(handle, IOMIRROR_SNAPSHOT_FLAG, "0"); write_to_xenstore(handle, STORAGE_SNAPSHOT_FLAG, "0"); } /* ⶳɹʧܶҪݽṹͷŵ */ free_fs_mount_list(mounts); } (void)pthread_mutex_unlock(&mutex); return ret; } /***************************************************************************** Function : do_time_thaw Description: ȴ10sxenstoreдֵⶳļϵͳ Input : arg Output : None Return : None *****************************************************************************/ void *do_time_thaw(void *arg) { void *handle = NULL; FsMountList *mounts = NULL; struct Freezearg *rev_arg; if(NULL == arg) { return NULL; } rev_arg = (struct Freezearg *)arg; mounts = rev_arg->mounts; handle = rev_arg->handle; (void)sleep(10); do_cache_thaw(handle, mounts); if(arg) { free(arg); arg = NULL; } return arg; } /***************************************************************************** Function : wait_for_thaw Description: ̣߳߳Ҫʵֵȴ10sⶳ Input : handle : xenstoreľ Input : mounts : mountݽṹ Output : None Return : 0 ɹ Return : -1 ʧ *****************************************************************************/ int wait_for_thaw(void *handle, FsMountList *mounts) { int thread = 0; pthread_t thread_id = 0; int ret = 0; struct Freezearg *arg; pthread_attr_t attr; /* argҪdo_time_thawͷ */ arg = (struct Freezearg *)malloc(sizeof(struct Freezearg)); if(arg == NULL) { ERR_LOG("Malloc failed."); return -1; } pthread_attr_init (&attr); pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); arg->handle = handle; arg->mounts = mounts; thread = pthread_create(&thread_id, &attr, do_time_thaw, (void *)arg); if (strcmp(strerror(thread), "Success") != 0) { ret = -1; DEBUG_LOG("Create do_time_thaw fail, thread=%d.", thread); free(arg); } pthread_attr_destroy (&attr); return ret; } /***************************************************************************** Function : write_heartbeat Description: д Input :handle : xenstoreľ Output : None Return : None *****************************************************************************/ void *write_heartbeat(void *handle) { (void)pthread_mutex_lock(&heartbeat_mutex); //ñ־λʾд߳ heartbeat_thread_exist_flag = 1; char heartbeat[HEART_BEAT_BUF_LEN] = {0}; while( 0 < heartbeatrate) { if (9999 > heartbeatnum) { heartbeatnum = heartbeatnum + 1; } else { heartbeatnum = 1; } (void)memset_s(heartbeat, HEART_BEAT_BUF_LEN, 0, HEART_BEAT_BUF_LEN); (void)snprintf_s(heartbeat, HEART_BEAT_BUF_LEN, HEART_BEAT_BUF_LEN, "%d", heartbeatnum); if(xb_write_first_flag == 0) { write_to_xenstore(handle, XS_HEART_BEAT, heartbeat); } else { write_weak_to_xenstore(handle, XS_HEART_BEAT, heartbeat); } (void)sleep(heartbeatrate); } heartbeat_thread_exist_flag = 0; (void)pthread_mutex_unlock(&heartbeat_mutex); return NULL; } /***************************************************************************** Function : create_write_heartbeat_thread Description: ̣߳߳д Input :handle : xenstoreľ Output : None Return : 0 ɹ Return : -1 ʧ *****************************************************************************/ int create_write_heartbeat_thread(void *handle) { char logbuf[LOG_BUF_LEN] = {0}; int thread = 0; pthread_t thread_id = 0; int ret = 0; pthread_attr_t attr; (void)pthread_attr_init (&attr); (void)pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); thread = pthread_create(&thread_id, &attr, write_heartbeat, handle); if (strcmp(strerror(thread), "Success") != 0) { ret = -1; (void)memset_s(logbuf, LOG_BUF_LEN, 0, LOG_BUF_LEN); (void)snprintf_s(logbuf, LOG_BUF_LEN, LOG_BUF_LEN, "Create write_heartbeat, errorno = %d !", thread); INFO_LOG("logbuf=%s", logbuf); } (void)pthread_attr_destroy (&attr); return ret; } void do_heartbeat_watch(void *handle) { char *heartbeat_rate = NULL; char pszCommand_mig[100] = {0}; (void)snprintf_s(pszCommand_mig, sizeof(pszCommand_mig), sizeof(pszCommand_mig), "%s", XS_HEART_BEAT_RATE); heartbeat_rate = read_from_xenstore(handle, pszCommand_mig); if (NULL != heartbeat_rate) { unsigned int rate = (unsigned int)atoi(heartbeat_rate); free(heartbeat_rate); heartbeat_rate = NULL; if (0 == rate) { /* ֵΪֹͣд*/ heartbeatrate = 0; while(heartbeat_thread_exist_flag) { (void)sleep(1); } heartbeatnum = 0; if(xb_write_first_flag == 0) { write_to_xenstore(handle, XS_HEART_BEAT, "0"); } else { write_weak_to_xenstore(handle, XS_HEART_BEAT, "0"); } } else if (0 == heartbeatrate) { heartbeatrate = rate; /* ֵΪʱд߳*/ (void)create_write_heartbeat_thread(handle); } else { /* ޸дƵ*/ heartbeatrate = rate; } } } void do_complete_restore_watch(void *handle) { int ret = 0; char pszCommand[SHELL_BUFFER] = {0}; char *migratestate = NULL; char hib_migrate[SHELL_BUFFER] = {0}; char *hib_migrate_buffer = "cat /etc/.uvp-monitor/hibernate_migrate_flag.ini"; /*set ipv6 info value*/ set_netinfo_flag(handle); /* ֧һԿд־λ */ IsSupportStorageSnapshotcheck(handle); (void)memset_s(pszCommand, SHELL_BUFFER, 0, SHELL_BUFFER); (void)snprintf_s(pszCommand, sizeof(pszCommand), sizeof(pszCommand), "%s", COMPLETE_RESTORE); migratestate = read_from_xenstore(handle, pszCommand); if((NULL != migratestate) && \ ((0 == strcmp(migratestate, "1")) || (0 == strcmp(migratestate, "2")))) { if ( ! access("/proc/xen/version", R_OK) || ! access("/proc/xen_version", R_OK) || ! access("/dev/xen/xenbus", R_OK)) { SetScsiFeature(handle); write_pvops_flag(handle, "1"); write_service_flag(handle, "true"); write_vmstate_flag(handle, "running"); write_feature_flag(handle, "1"); INFO_LOG("modify_swappiness_after_blkfront.sh restore in PVOPS GuestOS"); (void)system("sh /etc/.uvp-monitor/modify_swappiness_after_blkfront.sh restore 2>/dev/null"); } write_vrm_flag(handle); INFO_LOG("Complate restore, send ndp"); (void)system("sh /etc/init.d/xenvnet-arp 2>/dev/null"); write_to_file(); (void)write_to_xenstore(handle, CMD_RESULT_XS_PATH, chret); write_to_xenstore(handle, XS_NOT_USE_PV_UPGRADE, "true"); INFO_LOG("Do not provide UVP Tools upgrade ability."); if(strlen(feature_str) > 0) { write_to_xenstore(handle, GUSET_OS_FEATURE, feature_str); } if(0 == access("/etc/.uvp-monitor/hibernate_migrate_flag.ini", R_OK)) { ret = uvpPopen(hib_migrate_buffer, hib_migrate, SHELL_BUFFER); if((0 == ret) && (! strcmp(trim(hib_migrate), "1"))) { hibernate_migrate_flag = 1; (void)write_to_xenstore(handle, COMPLETE_RESTORE, "0"); } else { INFO_LOG("Failed to call uvpPopen hibernate_migrate_flag, ret=%d.", ret); } } if(!hibernate_migrate_flag) { (void)system("hwclock --hctosys 2>/dev/null"); } hibernate_migrate_flag = 0; (void)deal_hib_migrate_flag_file(hibernate_migrate_flag); /*if Linux OS is VSA, exec this shell after migrate*/ if(( ! access(PYTHON_PATH, R_OK)) && (0 == strcmp(migratestate, "2"))) { (void)system(EXEC_PYTHON_PATH); } } if((NULL != migratestate) && ((0 == strcmp(migratestate, "3")) )) { if ( ! access("/proc/xen/version", R_OK) || ! access("/proc/xen_version", R_OK) || ! access("/dev/xen/xenbus", R_OK) ) { write_service_flag(handle, "true"); } } if(NULL != migratestate) { free(migratestate); } } void do_cpu_hotplug_watch(void *handle) { char pszCommand[SHELL_BUFFER] = {0}; char *cpustate = NULL; char *cpuhotplugstate = NULL; (void)memset_s(pszCommand, SHELL_BUFFER, 0, SHELL_BUFFER); (void)snprintf_s(pszCommand, sizeof(pszCommand), sizeof(pszCommand), "%s", CPU_HOTPLUG_STATE); /*ǷȲı־*/ cpustate = read_from_xenstore(handle, pszCommand); if((NULL != cpustate) && (0 == strcmp(cpustate, "1"))) { (void)memset_s(pszCommand, SHELL_BUFFER, 0, SHELL_BUFFER); (void)snprintf_s(pszCommand, sizeof(pszCommand), sizeof(pszCommand), "%s", CPU_HOTPLUG_ONLINE); /*Զonline״̬жϣֵ״̬Ϊ1ʱcpuԶonlineܣ״̬ΪԶonline*/ cpuhotplugstate = read_from_xenstore(handle, pszCommand); if((NULL != cpuhotplugstate) && (0 == strcmp(cpuhotplugstate, "1"))) { (void)DoCpuHotplug(handle); } else { (void)write_to_xenstore(handle, CPU_HOTPLUG_STATE, "0"); (void)write_to_xenstore(handle, CPU_HOTPLUG_SIGNAL, "0"); } if( NULL != cpuhotplugstate) { free(cpuhotplugstate); } } if( NULL != cpustate ) { free(cpustate); } } void do_exinfo_flag_watch(void *handle) { char pszCommand_mig[100] = {0}; char *exinfo_flag = NULL; (void)memset_s(pszCommand_mig, 100, 0, 100); (void)snprintf_s(pszCommand_mig, sizeof(pszCommand_mig), sizeof(pszCommand_mig), "%s", EXINFO_FLAG_PATH); exinfo_flag = read_from_xenstore(handle, pszCommand_mig); if ((NULL != exinfo_flag) && (exinfo_flag[0] >= '0' && exinfo_flag[0] <= '9')) { g_exinfo_flag_value = strtol(exinfo_flag, NULL, 10); } if(NULL != exinfo_flag) { free(exinfo_flag); exinfo_flag = NULL; } } void do_driver_resume_watch(void *handle) { char pszCommand[SHELL_BUFFER] = {0}; char *driver_resume = NULL; (void)memset_s(pszCommand, SHELL_BUFFER, 0, SHELL_BUFFER); (void)snprintf_s(pszCommand, sizeof(pszCommand), sizeof(pszCommand), "%s", DRIVER_RESUME_FLAG); driver_resume = read_from_xenstore(handle, pszCommand); if((NULL != driver_resume) && (0 == strcmp(driver_resume, "1"))) { INFO_LOG("Driver resume, send ndp."); (void)system("sh /etc/init.d/xenvnet-arp 2>/dev/null"); } if(NULL != driver_resume) { free(driver_resume); } } static int uvpexecl(const char* pszCmd) { int nRet = 0; int stat = 0; pid_t cpid = -1; /*μ*/ if((pszCmd == NULL) || (0 == strlen(pszCmd))) { return ERROR_PARAMETER; } cpid = fork(); if (0 > cpid) { ERR_LOG("Failed to create subprocess, errno=%d.", errno); return ERROR_FORK; } else if (0 == cpid) { /* ӽ */ (void) execl("/bin/sh", "sh", "-c", pszCmd, NULL); _exit(127); } else { /* */ while (0 > waitpid(cpid, &stat, 0)) { if (EINTR != errno) { return ERROR_WAITPID; } } } /* õĽű ֵ0-127 */ if (WIFEXITED(stat)) { nRet = WEXITSTATUS(stat); } return nRet; } void *do_guestcmd_watch(void *handle) { char *pchCmdType=NULL; char *pchFileName=NULL; char *pchPara=NULL; char *guest_cmd = NULL; char chCmdStr[BUFFER_SIZE]={0}; char pszCommand[SHELL_BUFFER] = {0}; int iRet = 0; guest_cmd = read_from_xenstore(handle, OS_CMD_XS_PATH); /* ֵΪʱִ */ if( NULL == guest_cmd ) { ERR_LOG("read_from_xenstore error, guest_cmd is NULL."); return NULL; } if( !strcmp(guest_cmd, " ")) { free(guest_cmd); guest_cmd = NULL; return NULL; } /* ɾϢ*/ write_to_xenstore(handle, OS_CMD_XS_PATH, " "); (void)strncpy_s(chCmdStr, BUFFER_SIZE, guest_cmd, BUFFER_SIZE); iRet = CheckArg(chCmdStr, &pchCmdType, &pchFileName, &pchPara); (void)memset_s(chret, CMD_RESULT_BUF_LEN, 0, CMD_RESULT_BUF_LEN); (void)snprintf_s(chret, CMD_RESULT_BUF_LEN, CMD_RESULT_BUF_LEN, "%d", iRet); write_to_xenstore(handle, CMD_RESULT_XS_PATH, chret); if(ARG_CHECK_OK == iRet) { (void)snprintf_s(pszCommand, BUFFER_SIZE, BUFFER_SIZE, "%s %s/%s %s", pchCmdType, GUEST_CMD_FILE_PATH, pchFileName, pchPara); ERR_LOG("chCmdStr is %s, pszCommand is %s.", chCmdStr, pszCommand); iRet = uvpexecl(pszCommand); if (0 != iRet) { ERR_LOG("Call uvpexecl pszCommand failed ret = %d.", iRet); if (1 == iRet) { iRet = UNEXPECTED_ERROR; ERR_LOG("Change error code 1 to 10."); } } (void)memset_s(chret, CMD_RESULT_BUF_LEN, 0, CMD_RESULT_BUF_LEN); (void)snprintf_s(chret, BUFFER_SIZE, BUFFER_SIZE, "%d", iRet); write_to_xenstore(handle, CMD_RESULT_XS_PATH, chret); } if(NULL != guest_cmd) { free(guest_cmd); guest_cmd = NULL; } return NULL; } int CheckArg(char* chCmdStr, char** pchCmdType, char** pchFileName, char** pchPara) { char *pCmd = NULL; char *outer_ptr = NULL; char *buf = chCmdStr; char *pchCheck = NULL; char pszCommand[BUFFER_SIZE] = {0}; char pszBuff[BUFFER_SIZE] = {0}; char FullFileName[BUFFER_SIZE] = {0}; int iRet = 0; int num = 1; while((pCmd = strtok_s(buf, ">", &outer_ptr)) != NULL) { if(num == 1) *pchCmdType = pCmd + 1; else if(num == 2) *pchFileName = pCmd + 1; else if(num == 3) *pchPara = pCmd + 1; else pchCheck = pCmd + 1; num++; buf = NULL; } if (NULL == *pchCmdType || NULL == *pchFileName || NULL == *pchPara || NULL == pchCheck) { ERR_LOG("pchCmdType=%p, pchFileName=%p, pchPara=%p, pchCheck=%p.", \ *pchCmdType, *pchFileName, *pchPara, pchCheck); return ARG_ERROR; } if(strstr(*pchFileName, "<") || strstr(*pchFileName, ">") || strstr(*pchFileName, "\\") || strstr(*pchFileName, "/") || strstr(*pchFileName, "..") || strstr(*pchFileName, "&") || strstr(*pchFileName, "|") || strstr(*pchFileName, ";") || strstr(*pchFileName, "$") || strstr(*pchFileName, " ") || strstr(*pchFileName, "`") || strstr(*pchFileName, "\n")) { ERR_LOG("Check the file name error, pchFileName=%s.", *pchFileName); return ARG_ERROR; } if(strstr(*pchPara, "<") ||strstr(*pchPara, ">") || strstr(*pchPara, "\\") || strstr(*pchPara, "/") || strstr(*pchPara, "&") || strstr(*pchPara, "|") || strstr(*pchPara, ";") || strstr(*pchPara, "$") || strstr(*pchPara, "?") || strstr(*pchPara, ",") || strstr(*pchPara, "*") || strstr(*pchPara, "{") || strstr(*pchPara, "}") || strstr(*pchPara, "`") || strstr(*pchPara, "\n")) { ERR_LOG("Check the parameters error, pchPara=%s.", *pchPara); return ARG_ERROR; } if(!strcmp(*pchCmdType, "sh")) { if(!strstr(*pchFileName, ".sh")) { ERR_LOG("The cmd type sh and filename is not match, pchFileName=%s.", *pchFileName); return ARG_ERROR; } } else if (!strcmp(*pchCmdType, "python")) { if(!strstr(*pchFileName, ".py")) { ERR_LOG("The cmd type python and filename is not match, pchFileName=%s.", *pchFileName); return ARG_ERROR; } } else if (!strcmp(*pchCmdType, "perl")) { if(!strstr(*pchFileName, ".pl")) { ERR_LOG("The cmd type perl and filename is not match, pchFileName=%s.", *pchFileName); return ARG_ERROR; } } else { ERR_LOG("The cmd type is Invalid, pchCmdType=%s.", *pchCmdType); return ARG_ERROR; } (void)snprintf_s(FullFileName, BUFFER_SIZE, BUFFER_SIZE, "%s/%s", GUEST_CMD_FILE_PATH, *pchFileName); if(access(FullFileName, F_OK)) { ERR_LOG("The file not exist, FullFileName=%s.", FullFileName); return FILENAME_ERROR; } (void)snprintf_s(pszCommand, BUFFER_SIZE, BUFFER_SIZE, "sha256sum %s", FullFileName); iRet = uvpPopen(pszCommand, pszBuff, BUFFER_SIZE); if (0 != iRet) { ERR_LOG("Call uvpPopen pszCommand failed, pszCommand=%s, pszBuff=%s, ret=%d.", pszCommand, pszBuff, iRet); return CHECKSUM_ERROR; } pCmd = strtok_s(pszBuff, " ", &outer_ptr); if(NULL == pCmd) { ERR_LOG("strtok_s error, pszBuff=%s.", pszBuff); return CHECKSUM_ERROR; } if(strcmp(pchCheck, pCmd)) { ERR_LOG("Integrity check error, pchCheck=%s, pCmd=%s.", pchCheck, pCmd); return CHECKSUM_ERROR; } return ARG_CHECK_OK; } /***************************************************************************** Function : do_watch_proc Description: watch¼ Input :handle : xenstoreľ Output : None Return : None *****************************************************************************/ void do_watch_proc(void *handle) { char **vec; int xsfd = -1; char pszCommand[SHELL_BUFFER] = {0}; char pszCommand_mig[100] = {0}; char *migrate_start = NULL; char *release_bond = NULL; char *rebond = NULL; char *disableInfo_flag = NULL; char *healthstate = NULL; char *storage_snapshot_flag = NULL; FsMountList mounts; QTAILQ_INIT(&mounts); pthread_t th_watch = 0; int iThread = -1; pthread_attr_t attr; xsfd = getxsfileno(handle); if ((int) - 1 == xsfd) { return; } while (SUCC == condition()) { if (SUCC == watch_listen(xsfd)) { vec = readWatch(handle); if (!vec) { continue; } if (strstr(*vec, PVDRIVER_STATIC_INFO_PATH)) { SetPvDriverVer(handle); //ԭ̬ѯ̬Ϣֻڿʱдһ (void)SetCpuHotplugFeature(handle); SetScsiFeature(handle); } else if (NULL != strstr(*vec, CPU_HOTPLUG_SIGNAL)) { do_cpu_hotplug_watch(handle); } else if ( NULL != strstr(*vec, COMPLETE_RESTORE)) { do_complete_restore_watch(handle); } else if ( NULL != strstr(*vec, DRIVER_RESUME_FLAG)) { do_driver_resume_watch(handle); } else if ( NULL != strstr(*vec, MIGRATE_FLAG)) { (void)memset_s(pszCommand_mig, 100, 0, 100); (void)snprintf_s(pszCommand_mig, sizeof(pszCommand_mig), sizeof(pszCommand_mig), "%s", MIGRATE_FLAG); migrate_start = read_from_xenstore(handle, pszCommand_mig); if((NULL != migrate_start) && (0 == strcmp(migrate_start, "1"))) { hibernate_migrate_flag = 1; /* ñmonitorʧЧдļ */ (void)deal_hib_migrate_flag_file(hibernate_migrate_flag); } if(NULL != migrate_start) { free(migrate_start); } } /*before hot-migrate*/ else if ( NULL != strstr(*vec, RELEASE_BOND)) { //Ϊעwacth ʱһΣΪ˷ֹ monitor ʱִжÿִ֮Ҫ // xenstore ļֵֹʱ˶ (void)memset_s(pszCommand_mig, 100, 0, 100); (void)snprintf_s(pszCommand_mig, sizeof(pszCommand_mig), sizeof(pszCommand_mig), "%s", RELEASE_BOND); release_bond = read_from_xenstore(handle, pszCommand_mig); if((NULL != release_bond) && (0 == strcmp(release_bond, "1"))) { (void)releasenetbond(handle); } if(NULL != release_bond) { free(release_bond); } } /*after hot-migrate*/ else if ( NULL != strstr(*vec, REBOND_SRIOV)) { (void)memset_s(pszCommand_mig, 100, 0, 100); (void)snprintf_s(pszCommand_mig, sizeof(pszCommand_mig), sizeof(pszCommand_mig), "%s", REBOND_SRIOV); rebond = read_from_xenstore(handle, pszCommand_mig); if((NULL != rebond) && (0 == strcmp(rebond, "1"))) { (void)rebondnet(handle); } if(NULL != rebond) { free(rebond); } } else if ( NULL != strstr(*vec, EXINFO_FLAG_PATH)) { do_exinfo_flag_watch(handle); } else if (NULL != strstr(*vec, DISABLE_EXINFO_PATH)) { (void)memset_s(pszCommand_mig, 100, 0, 100); (void)snprintf_s(pszCommand_mig, sizeof(pszCommand_mig), sizeof(pszCommand_mig), "%s", DISABLE_EXINFO_PATH); disableInfo_flag = read_from_xenstore(handle, pszCommand_mig); if ((NULL != disableInfo_flag) && (disableInfo_flag[0] == '0' || disableInfo_flag[0] == '1')) { g_disable_exinfo_value = strtol(disableInfo_flag, NULL, 10); } if (NULL != disableInfo_flag) { free(disableInfo_flag); disableInfo_flag = NULL; } } else if (NULL != strstr(*vec, STORAGE_SNAPSHOT_FLAG)) { (void)snprintf_s(pszCommand_mig, sizeof(pszCommand_mig), sizeof(pszCommand_mig), "%s", STORAGE_SNAPSHOT_FLAG); storage_snapshot_flag = read_from_xenstore(handle, pszCommand_mig); /* ֵ״̬Ϊ1ʱˢݿ⼰ļϵͳ沢ݿ⼰ļϵͳ */ if ((NULL != storage_snapshot_flag) && (0 == strcmp(storage_snapshot_flag, "1"))) { write_to_xenstore(handle, IOMIRROR_SNAPSHOT_FLAG, "1"); if (guest_cache_freeze(&mounts) < 0) { ERR_LOG("guest_cache_freeze failed!"); write_to_xenstore(handle, IOMIRROR_SNAPSHOT_FLAG, "-1"); free_fs_mount_list(&mounts); } else { INFO_LOG("guest_cache_freeze success!"); write_to_xenstore(handle, STORAGE_SNAPSHOT_FLAG, "2"); /* gfreezeflag=1 ʾѾ */ gfreezeflag = 1; /* ̵߳ȴ10sԷⶳ */ wait_for_thaw(handle, &mounts); } } /* ֵ״̬Ϊ3ʱⶳļϵͳ */ if ((NULL != storage_snapshot_flag) && (0 == strcmp(storage_snapshot_flag, "3"))) { /* ڶʱȥⶳ */ do_cache_thaw(handle, &mounts); } if ( NULL != storage_snapshot_flag ) { free(storage_snapshot_flag); } } else if (NULL != strstr(*vec, XS_HEART_BEAT_RATE)) { do_heartbeat_watch(handle); } else if (NULL != strstr(*vec, UVP_UNPLUG_DISK)) { pthread_attr_init (&attr); pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); iThread = pthread_create(&th_watch, &attr, do_unplugdisk, (void*)handle); if (strcmp(strerror(iThread), "Success") != 0) { pthread_attr_destroy (&attr); ERR_LOG("Unplug disk thread create failed!"); } pthread_attr_destroy (&attr); } else if ( NULL != strstr(*vec, OS_CMD_XS_PATH)) { (void)pthread_attr_init (&attr); (void)pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); iThread = pthread_create(&th_watch, &attr, do_guestcmd_watch, (void*)handle); if (strcmp(strerror(iThread), "Success") != 0) { (void)pthread_attr_destroy (&attr); ERR_LOG("Create guestcmd thread failed!"); } (void)pthread_attr_destroy (&attr); } free(vec); vec = NULL; } } close(xsfd); return ; } /***************************************************************************** Function : ReleaseEnvironment Description: release system environment Input :None Output : None Return : None *****************************************************************************/ void ReleaseEnvironment(void *handle) { if (NULL != handle) { write_service_flag(handle, "false"); closexenstore(handle); //lint -save -e438 handle = NULL; }//lint -restore } /***************************************************************************** Function : do_deamon_model Description: عģ飬watch¼ Input :None Output : None Return : None *****************************************************************************/ void do_deamon_model(void *handle) { /* עܼصwatch */ uvp_regwatch(handle); /* дpvdriver־ */ //write_service_flag(handle, "true"); /* ȡܼ */ do_watch_proc(handle); /* ͷxenstore */ ReleaseEnvironment(handle); } /***************************************************************************** Function : do_monitoring Description:ܼ Input :handle xenstore Return : None *****************************************************************************/ void *do_monitoring(void *handle) { if (NULL == handle) { return NULL; } /* ģ鴦 */ do_deamon_model(handle); return NULL; } /***************************************************************************** Function : exe_command Description: ʹsystemִ Input :ݻ· Output : None Return : int *****************************************************************************/ #define UPGRADE_SUCCESS 0 #define MONITOR_UPGRADE_OK 3 #define UPGRADE_ROLLBACK 5 int exe_command(char *path) { char logBuf[BUFFER_SIZE] = {0}; int flag,exit_value; exit_value = system(path); flag = WEXITSTATUS(exit_value); //flag = 1,3,5ʱҪʾ if ( (UPGRADE_SUCCESS != flag) && (MONITOR_UPGRADE_OK != flag) && (UPGRADE_ROLLBACK != flag) ) { (void)snprintf_s(logBuf, BUFFER_SIZE - 1, BUFFER_SIZE - 1, "exe error-command:%s", path); INFO_LOG("[Monitor-Upgrade]: logBuf %s",logBuf); return 1; } if(UPGRADE_ROLLBACK == flag) { return 5; } if(MONITOR_UPGRADE_OK == flag) { return 3; } return 0; } /***************************************************************************** Function : init_daemon Description:ӽ̺͸̵Ľڼؽ̱killʱxenstoreд־λ Return : None *****************************************************************************/ void init_daemon() { int iRet; int iStatus, iThread, tThread, mThread, sThread; pid_t cpid, wpid; pid_t pipes[2]; pthread_t pthread_id; pthread_t tthread_id; pthread_t mthread_id; pthread_attr_t attr; char *timeFlag = NULL; pthread_t sthread_id; char buf; void *handle; g_disable_exinfo_value = 0; g_exinfo_flag_value = 0; g_netinfo_value = 0; g_monitor_restart_value = 0; again: /*ܵ*/ iRet = pipe(pipes); if(iRet < 0){ ERR_LOG("Pipe failed, errno=%d, just exit.", errno); exit(1); } cpid = fork(); DEBUG_LOG("Mlock uvp-monitor memory."); iRet = mlockall(MCL_CURRENT|MCL_FUTURE); if(iRet != 0){ ERR_LOG("Mlock uvp-monitor memory failed, errno=%d.", errno); } /* forkʧܣ˳ */ if(cpid < 0) { DEBUG_LOG("Process fork cpid fail, cpid=%d.", cpid); exit(1); } /*ӽ̵*/ else if( 0 == cpid ) { /*write monitor-service-flag "false" after stop uvp-monitor service*/ struct sigaction sig; memset_s(&sig, sizeof(sig), 0, sizeof(sig)); sig.sa_handler= SIG_IGN; sig.sa_flags = SA_RESTART; sigaction(SIGTERM, &sig, NULL); /*end */ handle = openxenstore(); if (NULL == handle) { DEBUG_LOG("Open xenstore fail!"); return; } /*set ipv6 info value*/ set_netinfo_flag(handle); set_guest_feature(handle); /* is vrm */ write_vrm_flag(handle); pthread_attr_init(&attr); pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); write_to_xenstore(handle, XS_NOT_USE_PV_UPGRADE, "true"); INFO_LOG("Do not provide UVP Tools upgrade ability."); //do_monitoring߳ iThread = pthread_create(&pthread_id, &attr, do_monitoring, (void *)handle); if (strcmp(strerror(iThread), "Success") != 0) { DEBUG_LOG("Create do_monitoring fail, iThread=%d.", iThread); pthread_attr_destroy (&attr); exit(1); } //¿߳ڶʱ5xenstoreдϵͳչϢ mThread = pthread_create(&mthread_id, &attr, timing_monitor, (void *)handle); if (strcmp(strerror(mThread), "Success") != 0) { DEBUG_LOG("Create timing_monitor fail, try again. mThread=%d.", mThread); mThread = pthread_create(&mthread_id, &attr, timing_monitor, (void *)handle); if (strcmp(strerror(mThread), "Success") != 0) { DEBUG_LOG("Create timing_monitor fail, process exit. mThread=%d.", mThread); pthread_attr_destroy (&attr); exit(1); } } pthread_attr_destroy (&attr); /*رչܵд*/ close(pipes[1]); iRet = read(pipes[0], &buf, 1); if(0 == iRet) { ERR_LOG("The parent has dead, the uvp-monitor exits."); ReleaseEnvironment(handle); (void)kill(getpid(), SIGKILL); } if(g_monitor_restart_value == 1) { ERR_LOG("Upgrade pv free son process handle."); ReleaseEnvironment(handle); } } /*̵*/ else { handle = openxenstore(); if (NULL == handle) { ERR_LOG("Open xenstore fd failed, just exit."); exit(1); } /*cpidΪӽPID*/ wait_again: wpid = waitpid(cpid, &iStatus, 0); //FIXME if (-1 == wpid) { if(errno == EINTR) //FIXME { INFO_LOG("waitpid when caught a signal."); goto wait_again; } ERR_LOG("waitpid failed, errno=%d.", errno); ReleaseEnvironment(handle); exit(1); } closexenstore(handle); handle = NULL; close(pipes[1]); sleep(1); if (WIFEXITED(iStatus)) ERR_LOG("The uvp-monitor %d exits with status %d.", wpid, WEXITSTATUS(iStatus)); else { ERR_LOG("The uvp-monitor %d exits with unknown reason, iStatus is %d.", wpid, iStatus); } goto again; } } /***************************************************************************** Function : start_service Description: start the service daemon 1ʼ߳ 2ʼл 3عܴ Input :None Output : None Return : None *****************************************************************************/ void start_service(void) { /* ʼ߳ */ init_daemon(); } UVP-Tools-2.2.0.316/uvp-monitor/xenstore_common.c000066400000000000000000000201071314037446600214650ustar00rootroot00000000000000/* * Accesses xenstore * writes these to xenstore. * * Copyright 2016, Huawei Tech. Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "xenstore_common.h" #include "public_common.h" /***************************************************************************** Function : trim Description: ȥִβո Input : char * Output : None Return : char * *****************************************************************************/ char *trim(char *str) { char *p = NULL; if(NULL == str) { return NULL; } p = str + strlen(str) - 1; while(' ' == *p || '\t' == *p || '\n' == *p || '\r' == *p) { *p = '\0'; p--; } return str; } static int get_fd_from_handle(struct xs_handle * handle) { if(handle == NULL) return -2; else return *(int *)handle; } /***************************************************************************** Function : write_to_xenstore Description: write into xenstore Input : handle -- xenstore path -- the xenstore path buf -- Ҫдxenstoreֵ Output : None Return : None *****************************************************************************/ void write_to_xenstore (void *handle, char *path, char *buf) { bool err; struct xs_handle *head; int fd_pre = -1, fd_aft = -1; if(NULL == handle || NULL == path || NULL == buf || 0 == strlen(buf)) { return ; } head = (struct xs_handle *)handle; fd_pre = get_fd_from_handle(head); err = xs_write(head, XBT_NULL, path, &buf[0], strlen(buf)); fd_aft = get_fd_from_handle(head); if (!err) { ERR_LOG("Write %s %s failed, errno is %d, fd_pre is %d, fd_aft is %d.", \ path, buf, errno, fd_pre, fd_aft); } else return; } /***************************************************************************** Function : write_weak_to_xenstore Description: write into xenstore Input : handle -- xenstore path -- the xenstore path buf -- Ҫдxenstoreֵ Output : None Return : None *****************************************************************************/ void write_weak_to_xenstore (void *handle, char *path, char *buf) { bool ret = 0; struct xs_handle *head; int retry_times = 0; int fd_pre = -1, fd_aft = -1; if(NULL == handle || NULL == path || NULL == buf || 0 == strlen(buf)) { return ; } head = (struct xs_handle *)handle; fd_pre = get_fd_from_handle(head); //дxenstoreʧܽ do { ret = xs_write(head, XBT_NULL, path, &buf[0], strlen(buf)); if(1 == ret) { break; } (void)usleep(300000); retry_times++; } while(retry_times < 3); if(ret != 1) { fd_aft = get_fd_from_handle(head); ERR_LOG("Write %s %s failed, errno is %d, fd_pre is %d, fd_aft is %d.", \ path, buf, errno, fd_pre, fd_aft); return; } else return; } /***************************************************************************** Function : read_from_xenstore Description: read from xenstore Input : handle -- xenstore path -- the xenstore path Output : None Return : buf -- xenstoreָ·¶ȡ *****************************************************************************/ char *read_from_xenstore (void *handle, char *path) { unsigned int len; char *buf = NULL; struct xs_handle *head; int fd_pre = -1, fd_aft = -1; if(NULL == handle || NULL == path) { return NULL; } head = (struct xs_handle *)handle; fd_pre = get_fd_from_handle(head); buf = (char *)xs_read(head, XBT_NULL, path, &len); fd_aft = get_fd_from_handle(head); if(buf == NULL && (errno != 2)) { ERR_LOG("Read %s failed, errno is %d, fd_pre is %d, fd_aft is %d.", \ path, errno, fd_pre, fd_aft); if(fd_aft < 0) { exit(1); } } return buf; } /***************************************************************************** Function : regwatch Description: xenstoreעwatch Input : handle -- xenstore path -- the xenstore path token -- watchtoken Output : None Return : true or false *****************************************************************************/ bool regwatch(void *handle, const char *path, const char *token) { struct xs_handle *head; if (NULL == handle || NULL == path || NULL == token) { return false; } head = (struct xs_handle *)handle; return xs_watch(head, path, token); } /***************************************************************************** Function : openxenstore Description: xenstore Input : None Output : None Return : xenstoreľָ *****************************************************************************/ void *openxenstore(void) { struct xs_handle *h = xs_domain_open(); int fd; int flag; if (h) { fd = get_fd_from_handle(h); flag = fcntl(fd, F_GETFD); if (fcntl(fd, F_SETFD, flag | FD_CLOEXEC) == -1) { ERR_LOG("Set FD_CLOEXEC failed! errno[%d].", errno); } } return h; } /***************************************************************************** Function : closexenstore Description: رxenstore Input : handle -- xenstore Output : None Return : None *****************************************************************************/ void closexenstore(void *handle) { struct xs_handle *head; if (NULL == handle) { return; } head = (struct xs_handle *)handle; (void) xs_unwatch(head, UVP_PATH , "uvptoken"); /* ΪǨƺ󴥷һܻȡ */ (void) xs_unwatch(head, SERVICE_FLAG_WATCH_PATH , "migtoken"); xs_daemon_close(head); return; } /***************************************************************************** Function : getxsfileno Description: ȡxenstoreļ Input : handle -- xenstore Output : None Return : ļfd *****************************************************************************/ int getxsfileno(void *handle) { struct xs_handle *head; if (NULL == handle) { return -1; } head = (struct xs_handle *)handle; return xs_fileno(head); } /***************************************************************************** Function : readWatch Description: ȡwatch· Input : None Output : None Return : watch· *****************************************************************************/ char **readWatch(void *handle) { unsigned int num; struct xs_handle *head; if (NULL == handle) { return NULL; } head = (struct xs_handle *)handle; return xs_read_watch(head, &num); } UVP-Tools-2.2.0.316/uvp-monitor/xenstore_dev_xen_xenbus_enable.diff000066400000000000000000000257711314037446600252210ustar00rootroot00000000000000diff -uraN xen-4.1.2.orig/tools/xenstore/Makefile xen-4.1.2/tools/xenstore/Makefile --- xen-4.1.2.orig/tools/xenstore/Makefile 2011-10-21 01:05:46.000000000 +0800 +++ xen-4.1.2/tools/xenstore/Makefile 2016-05-05 11:14:42.000000000 +0800 @@ -1,117 +1,17 @@ -XEN_ROOT=$(CURDIR)/../.. -include $(XEN_ROOT)/tools/Rules.mk +all: + gcc -O2 -fomit-frame-pointer -fno-strict-aliasing -std=gnu99 -Wall -Wstrict-prototypes -falign-functions=32 -Wno-unused-value -Wdeclaration-after-statement -D__XEN_TOOLS__ -MMD -MF .xenstore_client.o.d -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -Werror -g -O0 -I. -c -o xenstore_client.o xenstore_client.c + gcc -DPIC -O2 -fomit-frame-pointer -fno-strict-aliasing -std=gnu99 -Wall -Wstrict-prototypes -falign-functions=32 -Wno-unused-value -Wdeclaration-after-statement -D__XEN_TOOLS__ -MMD -MF .xs.opic.d -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -Werror -g -O0 -I. -DUSE_PTHREAD -fPIC -c -o xs.opic xs.c + gcc -DPIC -O2 -fomit-frame-pointer -fno-strict-aliasing -std=gnu99 -Wall -Wstrict-prototypes -falign-functions=32 -Wno-unused-value -Wdeclaration-after-statement -D__XEN_TOOLS__ -MMD -MF .xs_lib.opic.d -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -Werror -g -O0 -I. -fPIC -c -o xs_lib.opic xs_lib.c + gcc -O2 -fomit-frame-pointer -fno-strict-aliasing -std=gnu99 -Wall -Wstrict-prototypes -falign-functions=32 -Wno-unused-value -Wdeclaration-after-statement -D__XEN_TOOLS__ -MMD -MF .libxenstore.so.3.0.0.d -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -Werror -g -O0 -I. -Wl,--no-as-needed -Wl,-soname -Wl,libxenstore.so.3.0 -shared -o libxenstore.so.3.0.0 xs.opic xs_lib.opic -lpthread + ar rcs libxenstore.a xs.opic xs_lib.opic + cp -f libxenstore.a ../../.. + ln -sf libxenstore.so.3.0.0 libxenstore.so.3.0 + ln -sf libxenstore.so.3.0 libxenstore.so + gcc -O2 -fomit-frame-pointer -fno-strict-aliasing -std=gnu99 -Wall -Wstrict-prototypes -falign-functions=32 -Wno-unused-value -Wdeclaration-after-statement -D__XEN_TOOLS__ -MMD -MF .xenstore.d -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -Werror -g -O0 -I. -Wl,--no-as-needed xenstore_client.o -L. -lxenstore -o xenstore + gcc -O2 -fomit-frame-pointer -fno-strict-aliasing -std=gnu99 -Wall -Wstrict-prototypes -falign-functions=32 -Wno-unused-value -Wdeclaration-after-statement -D__XEN_TOOLS__ -MMD -MF .xenstore_control.o.d -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -Werror -g -O0 -I. -c -o xenstore_control.o xenstore_control.c + gcc -O2 -fomit-frame-pointer -fno-strict-aliasing -std=gnu99 -Wall -Wstrict-prototypes -falign-functions=32 -Wno-unused-value -Wdeclaration-after-statement -D__XEN_TOOLS__ -MMD -MF .xenstore-control.d -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -Werror -g -O0 -I. -Wl,--no-as-needed xenstore_control.o -L. -lxenstore -o xenstore-control -MAJOR = 3.0 -MINOR = 0 - -CFLAGS += -Werror -CFLAGS += -I. -CFLAGS += $(CFLAGS_libxenctrl) - -CLIENTS := xenstore-exists xenstore-list xenstore-read xenstore-rm xenstore-chmod -CLIENTS += xenstore-write xenstore-ls xenstore-watch - -XENSTORED_OBJS = xenstored_core.o xenstored_watch.o xenstored_domain.o xenstored_transaction.o xs_lib.o talloc.o utils.o tdb.o hashtable.o - -XENSTORED_OBJS_$(CONFIG_Linux) = xenstored_linux.o -XENSTORED_OBJS_$(CONFIG_SunOS) = xenstored_solaris.o xenstored_probes.o -XENSTORED_OBJS_$(CONFIG_NetBSD) = xenstored_netbsd.o - -XENSTORED_OBJS += $(XENSTORED_OBJS_y) - -ifneq ($(XENSTORE_STATIC_CLIENTS),y) -LIBXENSTORE := libxenstore.so -else -LIBXENSTORE := libxenstore.a -xenstore xenstore-control: CFLAGS += -static -endif - -ALL_TARGETS = libxenstore.so libxenstore.a clients xs_tdb_dump xenstored - -.PHONY: all -all: $(ALL_TARGETS) - -.PHONY: clients -clients: xenstore $(CLIENTS) xenstore-control - -ifeq ($(CONFIG_SunOS),y) -xenstored_probes.h: xenstored_probes.d - dtrace -C -h -s xenstored_probes.d - -xenstored_solaris.o: xenstored_probes.h - -xenstored_probes.o: xenstored_solaris.o - dtrace -C -G -s xenstored_probes.d xenstored_solaris.o - -CFLAGS += -DHAVE_DTRACE=1 -endif - -xenstored: $(XENSTORED_OBJS) - $(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS_libxenctrl) $(SOCKET_LIBS) -o $@ - -$(CLIENTS): xenstore - ln -f xenstore $@ - -xenstore: xenstore_client.o $(LIBXENSTORE) - $(CC) $(CFLAGS) $(LDFLAGS) $< -L. -lxenstore $(SOCKET_LIBS) -o $@ - -xenstore-control: xenstore_control.o $(LIBXENSTORE) - $(CC) $(CFLAGS) $(LDFLAGS) $< -L. -lxenstore $(SOCKET_LIBS) -o $@ - -xs_tdb_dump: xs_tdb_dump.o utils.o tdb.o talloc.o - $(CC) $(CFLAGS) $(LDFLAGS) $^ -o $@ - -libxenstore.so: libxenstore.so.$(MAJOR) - ln -sf $< $@ -libxenstore.so.$(MAJOR): libxenstore.so.$(MAJOR).$(MINOR) - ln -sf $< $@ - -xs.opic: CFLAGS += -DUSE_PTHREAD - -libxenstore.so.$(MAJOR).$(MINOR): xs.opic xs_lib.opic - $(CC) $(CFLAGS) $(LDFLAGS) -Wl,$(SONAME_LDFLAG) -Wl,libxenstore.so.$(MAJOR) $(SHLIB_LDFLAGS) -o $@ $^ $(SOCKET_LIBS) -lpthread - -libxenstore.a: xs.o xs_lib.o - $(AR) rcs $@ $^ - -.PHONY: clean clean: rm -f *.a *.o *.opic *.so* xenstored_probes.h rm -f xenstored xs_random xs_stress xs_crashme rm -f xs_tdb_dump xenstore-control - rm -f xenstore $(CLIENTS) - $(RM) $(DEPS) - -.PHONY: TAGS -TAGS: - etags `find . -name '*.[ch]'` - -.PHONY: tarball -tarball: clean - cd .. && tar -c -j -v -h -f xenstore.tar.bz2 xenstore/ - -.PHONY: install -install: all - $(INSTALL_DIR) $(DESTDIR)$(BINDIR) - $(INSTALL_DIR) $(DESTDIR)$(SBINDIR) - $(INSTALL_DIR) $(DESTDIR)$(INCLUDEDIR) - $(INSTALL_DIR) $(DESTDIR)/var/run/xenstored - $(INSTALL_DIR) $(DESTDIR)/var/lib/xenstored - $(INSTALL_PROG) xenstored $(DESTDIR)$(SBINDIR) - $(INSTALL_PROG) xenstore-control $(DESTDIR)$(BINDIR) - $(INSTALL_PROG) xenstore $(DESTDIR)$(BINDIR) - set -e ; for c in $(CLIENTS) ; do \ - ln -f $(DESTDIR)$(BINDIR)/xenstore $(DESTDIR)$(BINDIR)/$${c} ; \ - done - $(INSTALL_DIR) $(DESTDIR)$(LIBDIR) - $(INSTALL_PROG) libxenstore.so.$(MAJOR).$(MINOR) $(DESTDIR)$(LIBDIR) - ln -sf libxenstore.so.$(MAJOR).$(MINOR) $(DESTDIR)$(LIBDIR)/libxenstore.so.$(MAJOR) - ln -sf libxenstore.so.$(MAJOR) $(DESTDIR)$(LIBDIR)/libxenstore.so - $(INSTALL_DATA) libxenstore.a $(DESTDIR)$(LIBDIR) - $(INSTALL_DATA) xs.h $(DESTDIR)$(INCLUDEDIR) - $(INSTALL_DATA) xs_lib.h $(DESTDIR)$(INCLUDEDIR) - --include $(DEPS) - -# never delete any intermediate files. -.SECONDARY: + rm -f xenstore xenstore-exists xenstore-list xenstore-read xenstore-rm xenstore-chmod xenstore-write xenstore-ls xenstore-watch + rm -f .*.d diff -uraN xen-4.1.2.orig/tools/xenstore/xs_lib.c xen-4.1.2/tools/xenstore/xs_lib.c --- xen-4.1.2.orig/tools/xenstore/xs_lib.c 2011-10-21 01:05:46.000000000 +0800 +++ xen-4.1.2/tools/xenstore/xs_lib.c 2016-05-05 11:19:35.000000000 +0800 @@ -81,6 +81,8 @@ return s; #if defined(__linux__) + if (access("/dev/xen/xenbus", F_OK) == 0) + return "/dev/xen/xenbus"; return "/proc/xen/xenbus"; #elif defined(__NetBSD__) return "/kern/xen/xenbus"; diff -uraN xen-4.1.2.orig/tools/xenstore/xs_lib.h xen-4.1.2/tools/xenstore/xs_lib.h --- xen-4.1.2.orig/tools/xenstore/xs_lib.h 2011-10-21 01:05:46.000000000 +0800 +++ xen-4.1.2/tools/xenstore/xs_lib.h 2016-05-05 11:14:42.000000000 +0800 @@ -24,7 +24,7 @@ #include #include #include -#include +#include "xs_wire.h" /* Bitmask of permissions. */ enum xs_perm_type { diff -uraN xen-4.1.2.orig/tools/xenstore/xs_wire.h xen-4.1.2/tools/xenstore/xs_wire.h --- xen-4.1.2.orig/tools/xenstore/xs_wire.h 1970-01-01 08:00:00.000000000 +0800 +++ xen-4.1.2/tools/xenstore/xs_wire.h 2016-05-05 11:14:42.000000000 +0800 @@ -0,0 +1,133 @@ +/* + * Details of the "wire" protocol between Xen Store Daemon and client + * library or guest kernel. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Copyright (C) 2005 Rusty Russell IBM Corporation + */ + +#ifndef _XS_WIRE_H +#define _XS_WIRE_H + +enum xsd_sockmsg_type +{ + XS_DEBUG, + XS_DIRECTORY, + XS_READ, + XS_GET_PERMS, + XS_WATCH, + XS_UNWATCH, + XS_TRANSACTION_START, + XS_TRANSACTION_END, + XS_INTRODUCE, + XS_RELEASE, + XS_GET_DOMAIN_PATH, + XS_WRITE, + XS_MKDIR, + XS_RM, + XS_SET_PERMS, + XS_WATCH_EVENT, + XS_ERROR, + XS_IS_DOMAIN_INTRODUCED, + XS_RESUME, + XS_SET_TARGET, + XS_RESTRICT +}; + +#define XS_WRITE_NONE "NONE" +#define XS_WRITE_CREATE "CREATE" +#define XS_WRITE_CREATE_EXCL "CREATE|EXCL" + +/* We hand errors as strings, for portability. */ +struct xsd_errors +{ + int errnum; + const char *errstring; +}; +#ifdef EINVAL +#define XSD_ERROR(x) { x, #x } +/* LINTED: static unused */ +static struct xsd_errors xsd_errors[] +#if defined(__GNUC__) +__attribute__((unused)) +#endif + = { + XSD_ERROR(EINVAL), + XSD_ERROR(EACCES), + XSD_ERROR(EEXIST), + XSD_ERROR(EISDIR), + XSD_ERROR(ENOENT), + XSD_ERROR(ENOMEM), + XSD_ERROR(ENOSPC), + XSD_ERROR(EIO), + XSD_ERROR(ENOTEMPTY), + XSD_ERROR(ENOSYS), + XSD_ERROR(EROFS), + XSD_ERROR(EBUSY), + XSD_ERROR(EAGAIN), + XSD_ERROR(EISCONN) +}; +#endif + +struct xsd_sockmsg +{ + uint32_t type; /* XS_??? */ + uint32_t req_id;/* Request identifier, echoed in daemon's response. */ + uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ + uint32_t len; /* Length of data following this. */ + + /* Generally followed by nul-terminated string(s). */ +}; + +enum xs_watch_type +{ + XS_WATCH_PATH = 0, + XS_WATCH_TOKEN +}; + +/* Inter-domain shared memory communications. */ +#define XENSTORE_RING_SIZE 1024 +typedef uint32_t XENSTORE_RING_IDX; +#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) +struct xenstore_domain_interface { + char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ + char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ + XENSTORE_RING_IDX req_cons, req_prod; + XENSTORE_RING_IDX rsp_cons, rsp_prod; +}; + +/* Violating this is very bad. See docs/misc/xenstore.txt. */ +#define XENSTORE_PAYLOAD_MAX 4096 + +/* Violating these just gets you an error back */ +#define XENSTORE_ABS_PATH_MAX 3072 +#define XENSTORE_REL_PATH_MAX 2048 + +#endif /* _XS_WIRE_H */ + +/* + * Local variables: + * mode: C + * c-set-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ UVP-Tools-2.2.0.316/uvp-xenpv/000077500000000000000000000000001314037446600155535ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/000077500000000000000000000000001314037446600170575ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/000077500000000000000000000000001314037446600201625ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/Makefile000066400000000000000000000017341314037446600216270ustar00rootroot00000000000000############################################################################### ###### File name : Makefile ###### ###### Author : ###### ###### Modify : songjiangtao 00202546 ###### ###### Description : To optimize the PV_ON_HVM drivers's Makefile ###### ###### History : ###### ###### 2012-02-24 : Create the file ###### ############################################################################### M=$(shell pwd) COMPILEFLAGS=$(CROSSCOMPILE) obj-m += xen-platform-pci/ obj-m += xen-balloon/ obj-m += xen-vbd/ obj-m += xen-vnif/ all: make -C $(KERNDIR) M=$(M) modules $(COMPILEFLAGS) modules_install: make -C $(KERNDIR) M=$(M) modules_install $(COMPILEFLAGS) clean: make -C $(KERNDIR) M=$(M) clean $(COMPILEFLAGS) rm -rf Modules.symvers UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/compat-include/000077500000000000000000000000001314037446600230665ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/compat-include/asm-generic/000077500000000000000000000000001314037446600252605ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/compat-include/asm-generic/pgtable-nopmd.h000066400000000000000000000005611314037446600301640ustar00rootroot00000000000000#ifndef _PGTABLE_NOPMD_H #define _PGTABLE_NOPMD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopmd.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPMD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/compat-include/asm-generic/pgtable-nopud.h000066400000000000000000000006211314037446600301710ustar00rootroot00000000000000#ifndef _PGTABLE_NOPUD_H #define _PGTABLE_NOPUD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopud.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define pud_bad(pud) 0 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPUD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/compat-include/linux/000077500000000000000000000000001314037446600242255ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/compat-include/linux/io.h000066400000000000000000000002771314037446600250130ustar00rootroot00000000000000#ifndef _LINUX_IO_H #define _LINUX_IO_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) #error "This version of Linux should not need compat linux/io.h" #endif #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/compat-include/linux/mutex.h000066400000000000000000000015411314037446600255410ustar00rootroot00000000000000/* * Copyright (c) 2006 Cisco Systems. All rights reserved. * * This file is released under the GPLv2. */ /* mutex compatibility for pre-2.6.16 kernels */ #ifndef __LINUX_MUTEX_H #define __LINUX_MUTEX_H #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) #error "This version of Linux should not need compat mutex.h" #endif #include #include #define mutex semaphore #define DEFINE_MUTEX(foo) DECLARE_MUTEX(foo) #define mutex_init(foo) init_MUTEX(foo) #define mutex_lock(foo) down(foo) #define mutex_lock_interruptible(foo) down_interruptible(foo) /* this function follows the spin_trylock() convention, so * * it is negated to the down_trylock() return values! Be careful */ #define mutex_trylock(foo) !down_trylock(foo) #define mutex_unlock(foo) up(foo) #endif /* __LINUX_MUTEX_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/compat-include/linux/scatterlist.h000066400000000000000000000003761314037446600267450ustar00rootroot00000000000000#ifndef _LINUX_SCATTERLIST_H #define _LINUX_SCATTERLIST_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) #error "This version of Linux should not need compat linux/scatterlist.h" #endif #include #endif /* _LINUX_SCATTERLIST_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/compat-include/xen/000077500000000000000000000000001314037446600236605ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/compat-include/xen/platform-compat.h000066400000000000000000000126561314037446600271500ustar00rootroot00000000000000#ifndef COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #define COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #include #include #include #if defined(__LINUX_COMPILER_H) && !defined(__always_inline) #define __always_inline inline #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_SPINLOCK) #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED #endif #if defined(_LINUX_INIT_H) && !defined(__init) #define __init #endif #if defined(__LINUX_CACHE_H) && !defined(__read_mostly) #define __read_mostly #endif #if defined(_LINUX_SKBUFF_H) && !defined(NET_IP_ALIGN) #define NET_IP_ALIGN 0 #endif #if defined(_LINUX_SKBUFF_H) && !defined(CHECKSUM_HW) #define CHECKSUM_HW CHECKSUM_PARTIAL #endif #if defined(_LINUX_ERR_H) && !defined(IS_ERR_VALUE) #define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) #endif #if defined(_ASM_IA64_PGTABLE_H) && !defined(_PGTABLE_NOPUD_H) #include #endif /* Some kernels have this typedef backported so we cannot reliably * detect based on version number, hence we forcibly #define it. */ #if defined(__LINUX_TYPES_H) || defined(__LINUX_GFP_H) || defined(_LINUX_KERNEL_H) #define gfp_t unsigned #endif #if defined(_LINUX_NOTIFIER_H) && !defined(ATOMIC_NOTIFIER_HEAD) #define ATOMIC_NOTIFIER_HEAD(name) struct notifier_block *name #define atomic_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define atomic_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define atomic_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_NOTIFIER_H) && !defined(BLOCKING_NOTIFIER_HEAD) #define BLOCKING_NOTIFIER_HEAD(name) struct notifier_block *name #define blocking_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define blocking_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define blocking_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_MM_H) && defined set_page_count #define init_page_count(page) set_page_count(page, 1) #endif #if defined(__LINUX_GFP_H) && !defined __GFP_NOMEMALLOC #define __GFP_NOMEMALLOC 0 #endif #if defined(_LINUX_FS_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) #define nonseekable_open(inode, filp) /* Nothing to do */ #endif #if defined(_LINUX_MM_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) unsigned long vmalloc_to_pfn(void *addr); #endif #if defined(__LINUX_COMPLETION_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); #endif #if defined(_LINUX_SCHED_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout); #endif #if defined(_LINUX_SLAB_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) void *kzalloc(size_t size, int flags); #endif #if defined(_LINUX_BLKDEV_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define end_that_request_last(req, uptodate) end_that_request_last(req) #endif #if defined(_LINUX_CAPABILITY_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define capable(cap) (1) #endif #if defined(_LINUX_KERNEL_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) extern char *kasprintf(gfp_t gfp, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); #endif #if defined(_LINUX_SYSRQ_H) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) #define handle_sysrq(x,y,z) handle_sysrq(x,y) #endif #if defined(_PAGE_PRESENT) && !defined(_PAGE_NX) #define _PAGE_NX 0 /* * This variable at present is referenced by netfront, but only in code that * is dead when running in hvm guests. To detect potential active uses of it * in the future, don't try to supply a 'valid' value here, so that any * mappings created with it will fault when accessed. */ #define __supported_pte_mask ((maddr_t)0) #endif /* This code duplication is not ideal, but || does not seem to properly * short circuit in a #if condition. **/ #if defined(_LINUX_NETDEVICE_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) #if !defined(SLE_VERSION) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #elif SLE_VERSION_CODE < SLE_VERSION(10,1,0) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #endif #endif #if defined(__LINUX_SEQLOCK_H) && !defined(DEFINE_SEQLOCK) #define DEFINE_SEQLOCK(x) seqlock_t x = SEQLOCK_UNLOCKED #endif /* Bug in RHEL4-U3: rw_lock_t is mistakenly defined in DEFINE_RWLOCK() macro */ #if defined(__LINUX_SPINLOCK_H) && defined(DEFINE_RWLOCK) #define rw_lock_t rwlock_t #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_RWLOCK) #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED #endif #if defined(_LINUX_INTERRUPT_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /** * RHEL4-U5 pulled back this feature into the older kernel * Since it is a typedef, and not a macro - detect this kernel via * RHEL_VERSION */ #if !defined(RHEL_VERSION) || (RHEL_VERSION == 4 && RHEL_UPDATE < 5) #if !defined(RHEL_MAJOR) || (RHEL_MAJOR == 4 && RHEL_MINOR < 5) typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *); #endif #endif #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) #define setup_xen_features_uvp xen_setup_features #endif #ifndef atomic_cmpxchg #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/config.mk000066400000000000000000000013101314037446600217530ustar00rootroot00000000000000# Hack: we need to use the config which was used to build the kernel, # except that that won't have the right headers etc., so duplicate # some of the mach-xen infrastructure in here. # # (i.e. we need the native config for things like -mregparm, but # a Xen kernel to find the right headers) _XEN_CPPFLAGS += -D__XEN_INTERFACE_VERSION__=0x00030205 _XEN_CPPFLAGS += -DCONFIG_XEN_COMPAT=0xffffff _XEN_CPPFLAGS += -I$(M)/include -I$(M)/compat-include -DHAVE_XEN_PLATFORM_COMPAT_H ifeq ($(ARCH),ia64) _XEN_CPPFLAGS += -DCONFIG_VMX_GUEST endif #_XEN_CPPFLAGS += -include $(objtree)/include/linux/autoconf.h EXTRA_CFLAGS += $(_XEN_CPPFLAGS) EXTRA_AFLAGS += $(_XEN_CPPFLAGS) CPPFLAGS := -I$(M)/include $(CPPFLAGS) UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/000077500000000000000000000000001314037446600216055ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/asm-i386/000077500000000000000000000000001314037446600230545ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/asm-i386/README000066400000000000000000000000001314037446600237220ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/asm-xen/000077500000000000000000000000001314037446600231555ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/asm-xen/gnttab_dma.h000066400000000000000000000026471314037446600254370ustar00rootroot00000000000000/* * Copyright (c) 2007 Herbert Xu * Copyright (c) 2007 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _ASM_I386_GNTTAB_DMA_H #define _ASM_I386_GNTTAB_DMA_H static inline int gnttab_dma_local_pfn(struct page *page) { /* Has it become a local MFN? */ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page)))); } static inline maddr_t gnttab_dma_map_page(struct page *page) { __gnttab_dma_map_page(page); return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT); } static inline void gnttab_dma_unmap_page(maddr_t maddr) { __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr))); } #endif /* _ASM_I386_GNTTAB_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/asm-xen/hypercall.h000066400000000000000000000250771314037446600253240ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * 64-bit updates: * Benjamin Liu * Jun Nakajima * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #if CONFIG_XEN_COMPAT <= 0x030002 # include /* memcpy() */ #endif #ifdef CONFIG_XEN_no #define HYPERCALL_ASM_OPERAND "%c" #define HYPERCALL_LOCATION(op) (hypercall_page + (op) * 32) #define HYPERCALL_C_OPERAND(name) "i" (HYPERCALL_LOCATION(__HYPERVISOR_##name)) #else #define HYPERCALL_ASM_OPERAND "*%" #define HYPERCALL_LOCATION(op) (hypercall_stubs + (op) * 32) #define HYPERCALL_C_OPERAND(name) "g" (HYPERCALL_LOCATION(__HYPERVISOR_##name)) #endif #define HYPERCALL_ARG(arg, n) \ register typeof((arg)+0) __arg##n asm(HYPERCALL_arg##n) = (arg) #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "1" \ : "=a" (__res) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, arg) \ ({ \ type __res; \ HYPERCALL_ARG(arg, 1); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "2" \ : "=a" (__res), "+r" (__arg1) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "3" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "4" \ : "=a" (__res), "+r" (__arg1), \ "+r" (__arg2), "+r" (__arg3) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ HYPERCALL_ARG(a4, 4); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "5" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ "+r" (__arg3), "+r" (__arg4) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ HYPERCALL_ARG(a4, 4); \ HYPERCALL_ARG(a5, 5); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "6" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall(type, op, a1, a2, a3, a4, a5) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ HYPERCALL_ARG(a4, 4); \ HYPERCALL_ARG(a5, 5); \ asm volatile ( \ "call *%6" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ : "g" (HYPERCALL_LOCATION(op)) \ : "memory" ); \ __res; \ }) #ifdef CONFIG_X86_32 # include "hypercall_32.h" #else # include "hypercall_64.h" #endif static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { if (arch_use_lazy_mmu_mode()) return xen_multi_mmu_update(req, count, success_count, domid); return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { if (arch_use_lazy_mmu_mode()) return xen_multi_mmuext_op(op, count, success_count, domid); return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } #endif static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; if (arch_use_lazy_mmu_mode()) xen_multicall_flush(false); /* when hypercall failed, retry */ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { bool fixup = false; int rc; if (arch_use_lazy_mmu_mode()) xen_multicall_flush(false); #ifdef GNTTABOP_map_grant_ref if (cmd == GNTTABOP_map_grant_ref) #endif fixup = gnttab_pre_map_adjust(cmd, uop, count); rc = _hypercall3(int, grant_table_op, cmd, uop, count); if (rc == 0 && fixup) rc = gnttab_post_map_adjust(uop, count); return rc; } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN_no static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/asm-xen/hypercall_32.h000066400000000000000000000031401314037446600256130ustar00rootroot00000000000000#define HYPERCALL_arg1 "ebx" #define HYPERCALL_arg2 "ecx" #define HYPERCALL_arg3 "edx" #define HYPERCALL_arg4 "esi" #define HYPERCALL_arg5 "edi" #if CONFIG_XEN_COMPAT <= 0x030002 static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_selector, unsigned long event_address, unsigned long failsafe_selector, unsigned long failsafe_address) { return _hypercall4(int, set_callbacks, event_selector, event_address, failsafe_selector, failsafe_address); } #endif static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall2(long, set_timer_op, (unsigned long)timeout, (unsigned long)(timeout>>32)); } static inline int __must_check HYPERVISOR_update_descriptor( u64 ma, u64 desc) { return _hypercall4(int, update_descriptor, (unsigned long)ma, (unsigned long)(ma>>32), (unsigned long)desc, (unsigned long)(desc>>32)); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { unsigned long pte_hi = 0; if (arch_use_lazy_mmu_mode()) return xen_multi_update_va_mapping(va, new_val, flags); #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall4(int, update_va_mapping, va, new_val.pte_low, pte_hi, flags); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall5(int, update_va_mapping_otherdomain, va, new_val.pte_low, pte_hi, flags, domid); } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/asm-xen/hypercall_64.h000066400000000000000000000025661314037446600256330ustar00rootroot00000000000000#define HYPERCALL_arg1 "rdi" #define HYPERCALL_arg2 "rsi" #define HYPERCALL_arg3 "rdx" #define HYPERCALL_arg4 "r10" #define HYPERCALL_arg5 "r8" #if CONFIG_XEN_COMPAT <= 0x030002 static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_address, unsigned long failsafe_address, unsigned long syscall_address) { return _hypercall3(int, set_callbacks, event_address, failsafe_address, syscall_address); } #endif static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall1(long, set_timer_op, timeout); } static inline int __must_check HYPERVISOR_update_descriptor( unsigned long ma, unsigned long word) { return _hypercall2(int, update_descriptor, ma, word); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { if (arch_use_lazy_mmu_mode()) return xen_multi_update_va_mapping(va, new_val, flags); return _hypercall3(int, update_va_mapping, va, new_val.pte, flags); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { return _hypercall4(int, update_va_mapping_otherdomain, va, new_val.pte, flags, domid); } static inline int __must_check HYPERVISOR_set_segment_base( int reg, unsigned long value) { return _hypercall2(int, set_segment_base, reg, value); } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/asm-xen/hypervisor.h000066400000000000000000000235621314037446600255500ustar00rootroot00000000000000/****************************************************************************** * hypervisor.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERVISOR_H__ #define __HYPERVISOR_H__ #include #include #include #include #include #include #include #include #include #include #include #include extern shared_info_t *HYPERVISOR_shared_info; #define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu)) #ifdef CONFIG_SMP #define current_vcpu_info() vcpu_info(smp_processor_id()) #else #define current_vcpu_info() vcpu_info(0) #endif #ifdef CONFIG_X86_32 extern unsigned long hypervisor_virt_start; #endif /* arch/xen/i386/kernel/setup.c */ extern start_info_t *xen_start_info; #ifdef CONFIG_XEN_PRIVILEGED_GUEST #define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN) #else #define is_initial_xendomain() 0 #endif #define init_hypervisor(c) ((void)((c)->x86_hyper_vendor = X86_HYPER_VENDOR_XEN)) /* arch/xen/kernel/evtchn.c */ /* Force a proper event-channel callback from Xen. */ void force_evtchn_callback(void); /* arch/xen/kernel/process.c */ void xen_cpu_idle (void); /* arch/xen/i386/kernel/hypervisor.c */ void do_hypervisor_callback(struct pt_regs *regs); /* arch/xen/i386/mm/hypervisor.c */ /* * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already * be MACHINE addresses. */ void xen_pt_switch(pgd_t *); void xen_new_user_pt(pgd_t *); /* x86_64 only */ void xen_load_gs(unsigned int selector); /* x86_64 only */ void xen_tlb_flush(void); void xen_invlpg(unsigned long ptr); void xen_l1_entry_update(pte_t *ptr, pte_t val); void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */ void xen_pgd_pin(pgd_t *); void xen_pgd_unpin(pgd_t *); void xen_init_pgd_pin(void); void xen_set_ldt(const void *ptr, unsigned int ents); #ifdef CONFIG_SMP #include void xen_tlb_flush_all(void); void xen_invlpg_all(unsigned long ptr); void xen_tlb_flush_mask(cpumask_t *mask); void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr); #endif /* Returns zero on success else negative errno. */ int xen_create_contiguous_region( unsigned long vstart, unsigned int order, unsigned int address_bits); void xen_destroy_contiguous_region( unsigned long vstart, unsigned int order); struct page; int xen_limit_pages_to_max_mfn( struct page *pages, unsigned int order, unsigned int address_bits); /* Turn jiffies into Xen system time. */ u64 jiffies_to_st(unsigned long jiffies); #ifdef CONFIG_XEN_SCRUB_PAGES_uvp void scrub_pages(void *, unsigned int); #else #define scrub_pages(_p,_n) ((void)0) #endif #ifdef CONFIG_XEN_no DECLARE_PER_CPU(bool, xen_lazy_mmu); int xen_multicall_flush(bool); int __must_check xen_multi_update_va_mapping(unsigned long va, pte_t, unsigned long flags); int __must_check xen_multi_mmu_update(mmu_update_t *, unsigned int count, unsigned int *success_count, domid_t); int __must_check xen_multi_mmuext_op(struct mmuext_op *, unsigned int count, unsigned int *success_count, domid_t); #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE static inline void arch_enter_lazy_mmu_mode(void) { __get_cpu_var(xen_lazy_mmu) = true; } static inline void arch_leave_lazy_mmu_mode(void) { __get_cpu_var(xen_lazy_mmu) = false; xen_multicall_flush(false); } #if defined(CONFIG_X86_32) #define arch_use_lazy_mmu_mode() unlikely(x86_read_percpu(xen_lazy_mmu)) #elif !defined(arch_use_lazy_mmu_mode) #define arch_use_lazy_mmu_mode() unlikely(__get_cpu_var(xen_lazy_mmu)) #endif #if 0 /* All uses are in places potentially called asynchronously, but * asynchronous code should rather not make use of lazy mode at all. * Therefore, all uses of this function get commented out, proper * detection of asynchronous invocations is added whereever needed, * and this function is disabled to catch any new (improper) uses. */ static inline void arch_flush_lazy_mmu_mode(void) { if (arch_use_lazy_mmu_mode()) xen_multicall_flush(false); } #endif struct gnttab_map_grant_ref; bool gnttab_pre_map_adjust(unsigned int cmd, struct gnttab_map_grant_ref *, unsigned int count); #if CONFIG_XEN_COMPAT < 0x030400 int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *, unsigned int); #else static inline int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *m, unsigned int count) { BUG(); return -ENOSYS; } #endif #else /* CONFIG_XEN_no */ static inline void xen_multicall_flush(bool ignore) {} #define arch_use_lazy_mmu_mode() false #define xen_multi_update_va_mapping(...) ({ BUG(); -ENOSYS; }) #define xen_multi_mmu_update(...) ({ BUG(); -ENOSYS; }) #define xen_multi_mmuext_op(...) ({ BUG(); -ENOSYS; }) #define gnttab_pre_map_adjust(...) false #define gnttab_post_map_adjust(...) ({ BUG(); -ENOSYS; }) #endif /* CONFIG_XEN_no */ #if defined(CONFIG_X86_64) #define MULTI_UVMFLAGS_INDEX 2 #define MULTI_UVMDOMID_INDEX 3 #else #define MULTI_UVMFLAGS_INDEX 3 #define MULTI_UVMDOMID_INDEX 4 #endif #ifdef CONFIG_XEN_no #define is_running_on_xen() 1 extern char hypercall_page[PAGE_SIZE]; #else extern char *hypercall_stubs; #define is_running_on_xen() (!!hypercall_stubs) #endif #include static inline int HYPERVISOR_yield( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int HYPERVISOR_block( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0); #endif return rc; } static inline void __noreturn HYPERVISOR_shutdown( unsigned int reason) { struct sched_shutdown sched_shutdown = { .reason = reason }; VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown)); #if CONFIG_XEN_COMPAT <= 0x030002 VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason)); #endif /* Don't recurse needlessly. */ BUG_ON(reason != SHUTDOWN_crash); for(;;); } static inline int __must_check HYPERVISOR_poll( evtchn_port_t *ports, unsigned int nr_ports, u64 timeout) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports, .timeout = jiffies_to_st(timeout) }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int __must_check HYPERVISOR_poll_no_timeout( evtchn_port_t *ports, unsigned int nr_ports) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } #ifdef CONFIG_XEN_no static inline void MULTI_update_va_mapping( multicall_entry_t *mcl, unsigned long va, pte_t new_val, unsigned long flags) { mcl->op = __HYPERVISOR_update_va_mapping; mcl->args[0] = va; #if defined(CONFIG_X86_64) mcl->args[1] = new_val.pte; #elif defined(CONFIG_X86_PAE) mcl->args[1] = new_val.pte_low; mcl->args[2] = new_val.pte_high; #else mcl->args[1] = new_val.pte_low; mcl->args[2] = 0; #endif mcl->args[MULTI_UVMFLAGS_INDEX] = flags; } static inline void MULTI_mmu_update(multicall_entry_t *mcl, mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)req; mcl->args[1] = count; mcl->args[2] = (unsigned long)success_count; mcl->args[3] = domid; } static inline void MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd, void *uop, unsigned int count) { mcl->op = __HYPERVISOR_grant_table_op; mcl->args[0] = cmd; mcl->args[1] = (unsigned long)uop; mcl->args[2] = count; } #else /* !defined(CONFIG_XEN) */ /* Multicalls not supported for HVM guests. */ #define MULTI_update_va_mapping(a,b,c,d) ((void)0) #define MULTI_grant_table_op(a,b,c,d) ((void)0) #endif #ifdef LINUX /* drivers/staging/rt28?0/ use Windows-style types, including VOID */ #undef VOID #endif #endif /* __HYPERVISOR_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/asm-xen/maddr.h000066400000000000000000000001201314037446600244060ustar00rootroot00000000000000#ifdef CONFIG_X86_32 # include "maddr_32.h" #else # include "maddr_64.h" #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/asm-xen/maddr_32.h000066400000000000000000000126251314037446600247270ustar00rootroot00000000000000#ifndef _I386_MADDR_H #define _I386_MADDR_H #include #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<31) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ #ifdef CONFIG_X86_PAE typedef unsigned long long paddr_t; typedef unsigned long long maddr_t; #else typedef unsigned long paddr_t; typedef unsigned long maddr_t; #endif #ifdef CONFIG_XEN_no extern unsigned long *phys_to_machine_mapping; extern unsigned long max_mapnr; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return pfn; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return 1; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return max_mapnr; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movl %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movl %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if (likely(pfn < max_mapnr) && likely(!xen_feature(XENFEAT_auto_translated_physmap)) && unlikely(phys_to_machine_mapping[pfn] != mfn)) return max_mapnr; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } #ifdef CONFIG_X86_PAE static inline paddr_t pte_phys_to_machine(paddr_t phys) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to pfn_to_mfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to mfn_to_pfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #else #define pte_phys_to_machine phys_to_machine #define pte_machine_to_phys machine_to_phys #endif #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _I386_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/asm-xen/maddr_64.h000066400000000000000000000115551314037446600247350ustar00rootroot00000000000000#ifndef _X86_64_MADDR_H #define _X86_64_MADDR_H #include #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<63) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ typedef unsigned long paddr_t; typedef unsigned long maddr_t; #ifdef CONFIG_XEN_no extern unsigned long *phys_to_machine_mapping; extern unsigned long max_mapnr; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return pfn; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { printk("cjm---not form macro"); if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return 1; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return max_mapnr; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movq %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movq %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 8\n" " .quad 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if (likely(pfn < max_mapnr) && likely(!xen_feature(XENFEAT_auto_translated_physmap)) && unlikely(phys_to_machine_mapping[pfn] != mfn)) return max_mapnr; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } static inline paddr_t pte_phys_to_machine(paddr_t phys) { maddr_t machine; machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { paddr_t phys; phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _X86_64_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/asm-xen/synch_bitops.h000066400000000000000000000062311314037446600260340ustar00rootroot00000000000000#ifndef __XEN_SYNCH_BITOPS_H__ #define __XEN_SYNCH_BITOPS_H__ /* * Copyright 1992, Linus Torvalds. * Heavily modified to provide guaranteed strong synchronisation * when communicating with Xen or other guest OSes running on other CPUs. */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define ADDR (*(volatile long *) addr) static __inline__ void synch_set_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btsl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_clear_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btrl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_change_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btcl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btsl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btrl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btcl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } struct __synch_xchg_dummy { unsigned long a[100]; }; #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x)) #define synch_cmpxchg(ptr, old, new) \ ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ (unsigned long)(old), \ (unsigned long)(new), \ sizeof(*(ptr)))) static inline unsigned long __synch_cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__("lock; cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__("lock; cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #ifdef CONFIG_X86_64 case 4: __asm__ __volatile__("lock; cmpxchgl %k1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__("lock; cmpxchgq %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #else case 4: __asm__ __volatile__("lock; cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #endif } return old; } #define synch_test_bit test_bit #define synch_cmpxchg_subword synch_cmpxchg #endif /* __XEN_SYNCH_BITOPS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/000077500000000000000000000000001314037446600223775ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/balloon.h000066400000000000000000000052711314037446600242030ustar00rootroot00000000000000/****************************************************************************** * balloon.h * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_H__ #define __XEN_BALLOON_H__ #include #if !defined(CONFIG_PARAVIRT_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /* * Inform the balloon driver that it should allow some slop for device-driver * memory activities. */ void balloon_update_driver_allowance_uvp_uvp(long delta); /* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */ struct page **alloc_empty_pages_and_pagevec_uvp(int nr_pages); void free_empty_pages_and_pagevec_uvp(struct page **pagevec, int nr_pages); /* Free an empty page range (not allocated through alloc_empty_pages_and_pagevec_uvp), adding to the balloon. */ void free_empty_pages(struct page **pagevec, int nr_pages); void balloon_release_driver_page_uvp_uvp(struct page *page); /* * Prevent the balloon driver from changing the memory reservation during * a driver critical region. */ extern spinlock_t balloon_lock; #define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags) #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags) #endif #endif /* __XEN_BALLOON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/blkif.h000066400000000000000000000112031314037446600236340ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_BLKIF_H__ #define __XEN_BLKIF_H__ #include #include #include /* Not a real protocol. Used to generate ring structs which contain * the elements common to all protocols only. This way we get a * compiler-checkable way to use common struct elements, so we can * avoid using switch(protocol) in a number of places. */ struct blkif_common_request { char dummy; }; struct blkif_common_response { char dummy; }; /* i386 protocol version */ #pragma pack(push, 4) struct blkif_x86_32_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_32_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_x86_32_request blkif_x86_32_request_t; typedef struct blkif_x86_32_response blkif_x86_32_response_t; #pragma pack(pop) /* x86_64 protocol version */ struct blkif_x86_64_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t __attribute__((__aligned__(8))) id; blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_64_response { uint64_t __attribute__((__aligned__(8))) id; uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_x86_64_request blkif_x86_64_request_t; typedef struct blkif_x86_64_response blkif_x86_64_response_t; DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response); DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response); DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response); union blkif_back_rings { blkif_back_ring_t native; blkif_common_back_ring_t common; blkif_x86_32_back_ring_t x86_32; blkif_x86_64_back_ring_t x86_64; }; typedef union blkif_back_rings blkif_back_rings_t; enum blkif_protocol { BLKIF_PROTOCOL_NATIVE = 1, BLKIF_PROTOCOL_X86_32 = 2, BLKIF_PROTOCOL_X86_64 = 3, }; static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; dst->id = src->id; dst->sector_number = src->sector_number; barrier(); if (n > dst->nr_segments) n = dst->nr_segments; for (i = 0; i < n; i++) dst->seg[i] = src->seg[i]; } static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; dst->id = src->id; dst->sector_number = src->sector_number; barrier(); if (n > dst->nr_segments) n = dst->nr_segments; for (i = 0; i < n; i++) dst->seg[i] = src->seg[i]; } #endif /* __XEN_BLKIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/compat_ioctl.h000066400000000000000000000030411314037446600252230ustar00rootroot00000000000000/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright IBM Corp. 2007 * * Authors: Jimi Xenidis * Hollis Blanchard */ #ifndef __LINUX_XEN_COMPAT_H__ #define __LINUX_XEN_COMPAT_H__ #include extern int privcmd_ioctl_32(int fd, unsigned int cmd, unsigned long arg); struct privcmd_mmap_32 { int num; domid_t dom; compat_uptr_t entry; }; struct privcmd_mmapbatch_32 { int num; /* number of pages to populate */ domid_t dom; /* target domain */ __u64 addr; /* virtual address */ compat_uptr_t arr; /* array of mfns - top nibble set on err */ }; #define IOCTL_PRIVCMD_MMAP_32 \ _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap_32)) #define IOCTL_PRIVCMD_MMAPBATCH_32 \ _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch_32)) #endif /* __LINUX_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/cpu_hotplug.h000066400000000000000000000014751314037446600251100ustar00rootroot00000000000000#ifndef __XEN_CPU_HOTPLUG_H__ #define __XEN_CPU_HOTPLUG_H__ #include #include #if defined(CONFIG_X86) && defined(CONFIG_SMP) extern cpumask_t cpu_initialized_map; #endif #if defined(CONFIG_HOTPLUG_CPU) int cpu_up_check(unsigned int cpu); void init_xenbus_allowed_cpumask(void); int smp_suspend(void); void smp_resume(void); void cpu_bringup(void); #else /* !defined(CONFIG_HOTPLUG_CPU) */ #define cpu_up_check(cpu) (0) #define init_xenbus_allowed_cpumask() ((void)0) static inline int smp_suspend(void) { if (num_online_cpus() > 1) { printk(KERN_WARNING "Can't suspend SMP guests " "without CONFIG_HOTPLUG_CPU\n"); return -EOPNOTSUPP; } return 0; } static inline void smp_resume(void) { } #endif /* !defined(CONFIG_HOTPLUG_CPU) */ #endif /* __XEN_CPU_HOTPLUG_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/driver_util.h000066400000000000000000000003161314037446600251000ustar00rootroot00000000000000 #ifndef __ASM_XEN_DRIVER_UTIL_H__ #define __ASM_XEN_DRIVER_UTIL_H__ #include #include extern struct class *get_xen_class(void); #endif /* __ASM_XEN_DRIVER_UTIL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/events.h000066400000000000000000000032311314037446600240530ustar00rootroot00000000000000#ifndef _XEN_EVENTS_H #define _XEN_EVENTS_H #include #include #include #include int bind_evtchn_to_irq(unsigned int evtchn); int bind_evtchn_to_irqhandler(unsigned int evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_ipi_to_irqhandler(enum ipi_vector ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (even for bindings * made with bind_evtchn_to_irqhandler()). */ void unbind_from_irqhandler_uvp(unsigned int irq, void *dev_id); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); int resend_irq_on_evtchn(unsigned int irq); void rebind_evtchn_irq(int evtchn, int irq); static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); } extern void notify_remote_via_irq_uvp(int irq); extern void xen_irq_resume(void); /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq); /* Poll waiting for an irq to become pending. In the usual case, the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq(int irq); #endif /* _XEN_EVENTS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/evtchn.h000066400000000000000000000161571314037446600240510ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Communication via Xen event channels. * Also definitions for the device that demuxes notifications to userspace. * * Copyright (c) 2004-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_EVTCHN_H__ #define __ASM_EVTCHN_H__ #include #include #include #include #include #include /* * LOW-LEVEL DEFINITIONS */ /* * Dynamically bind an event source to an IRQ-like callback handler. * On some platforms this may not be implemented via the Linux IRQ subsystem. * The IRQ argument passed to the callback handler is the same as returned * from the bind call. It may not correspond to a Linux IRQ number. * Returns IRQ or negative errno. */ int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_interdomain_evtchn_to_irqhandler( unsigned int remote_domain, unsigned int remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irqhandler( unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); #if defined(CONFIG_SMP) && defined(CONFIG_XEN_no) && defined(CONFIG_X86) int bind_virq_to_irqaction( unsigned int virq, unsigned int cpu, struct irqaction *action); #else #define bind_virq_to_irqaction(virq, cpu, action) \ bind_virq_to_irqhandler(virq, cpu, (action)->handler, \ (action)->flags | IRQF_NOBALANCING, \ (action)->name, action) #endif #if defined(CONFIG_SMP) && !defined(MODULE) #ifndef CONFIG_X86 int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); #else int bind_ipi_to_irqaction( unsigned int ipi, unsigned int cpu, struct irqaction *action); #endif #endif /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (except for bindings * made with bind_caller_port_to_irqhandler()). */ void unbind_from_irqhandler_uvp(unsigned int irq, void *dev_id); #if defined(CONFIG_SMP) && defined(CONFIG_XEN_no) && defined(CONFIG_X86) /* Specialized unbind function for per-CPU IRQs. */ void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu, struct irqaction *); #else #define unbind_from_per_cpu_irq(irq, cpu, action) \ unbind_from_irqhandler_uvp(irq, action) #endif #ifndef CONFIG_XEN_no void irq_resume(void); #endif /* Entry point for notifications into Linux subsystems. */ asmlinkage void evtchn_do_upcall(struct pt_regs *regs); /* Entry point for notifications into the userland character device. */ void evtchn_device_upcall(int port); /* Mark a PIRQ as unavailable for dynamic allocation. */ void evtchn_register_pirq(int irq); /* Map a Xen-supplied PIRQ to a dynamically allocated one. */ int evtchn_map_pirq(int irq, int xen_pirq); /* Look up a Xen-supplied PIRQ for a dynamically allocated one. */ int evtchn_get_xen_pirq(int irq); void mask_evtchn(int port); void disable_all_local_evtchn(void); void unmask_evtchn(int port); #ifdef CONFIG_SMP void rebind_evtchn_to_cpu(int port, unsigned int cpu); #else #define rebind_evtchn_to_cpu(port, cpu) ((void)0) #endif static inline int test_and_set_evtchn_mask(int port) { shared_info_t *s = HYPERVISOR_shared_info; return synch_test_and_set_bit(port, s->evtchn_mask); } static inline void clear_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; synch_clear_bit(port, s->evtchn_pending); } static inline void set_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; synch_set_bit(port, s->evtchn_pending); } static inline int test_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; return synch_test_bit(port, s->evtchn_pending); } static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send)); } static inline void multi_notify_remote_via_evtchn(multicall_entry_t *mcl, int port) { struct evtchn_send *send = (void *)(mcl->args + 2); BUILD_BUG_ON(sizeof(*send) > sizeof(mcl->args) - 2 * sizeof(*mcl->args)); send->port = port; mcl->op = __HYPERVISOR_event_channel_op; mcl->args[0] = EVTCHNOP_send; mcl->args[1] = (unsigned long)send; } /* Clear an irq's pending state, in preparation for polling on it. */ void xen_clear_irq_pending(int irq); /* Set an irq's pending state, to avoid blocking on it. */ void xen_set_irq_pending(int irq); /* Test an irq's pending state. */ int xen_test_irq_pending(int irq); /* Poll waiting for an irq to become pending. In the usual case, the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq(int irq); /* * Use these to access the event channel underlying the IRQ handle returned * by bind_*_to_irqhandler(). */ void notify_remote_via_irq_uvp(int irq); int multi_notify_remote_via_irq_uvp(multicall_entry_t *, int irq); int irq_to_evtchn_port(int irq); #define PIRQ_SET_MAPPING 0x0 #define PIRQ_CLEAR_MAPPING 0x1 #define PIRQ_GET_MAPPING 0x3 int pirq_mapstatus(int pirq, int action); int set_pirq_hw_action(int pirq, int (*action)(int pirq, int action)); int clear_pirq_hw_action(int pirq); #define PIRQ_STARTUP 1 #define PIRQ_SHUTDOWN 2 #define PIRQ_ENABLE 3 #define PIRQ_DISABLE 4 #define PIRQ_END 5 #define PIRQ_ACK 6 #if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86) void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu); #endif #endif /* __ASM_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/features.h000066400000000000000000000007661314037446600243770ustar00rootroot00000000000000/****************************************************************************** * features.h * * Query the features reported by Xen. * * Copyright (c) 2006, Ian Campbell */ #ifndef __XEN_FEATURES_H__ #define __XEN_FEATURES_H__ #include #include void xen_setup_features(void); extern u8 xen_features_uvp[XENFEAT_NR_SUBMAPS * 32]; static inline int xen_feature(int flag) { return xen_features_uvp[flag]; } #endif /* __XEN_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/firmware.h000066400000000000000000000003011314037446600243560ustar00rootroot00000000000000#ifndef __XEN_FIRMWARE_H__ #define __XEN_FIRMWARE_H__ #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) void copy_edd(void); #endif void copy_edid(void); #endif /* __XEN_FIRMWARE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/gnttab.h000066400000000000000000000125751314037446600240410ustar00rootroot00000000000000/****************************************************************************** * gnttab.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include /* maddr_t */ #include #include #include struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; u8 queued; }; int gnttab_grant_foreign_access_uvp(domid_t domid, unsigned long frame, int flags); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_uvp_ref_uvp(grant_ref_t ref); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access_uvp(grant_ref_t ref, unsigned long page); int gnttab_grant_foreign_transfer_uvp(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_uvp_ref_uvp(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer_uvp(grant_ref_t ref); int gnttab_query_foreign_access_uvp(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references_uvp(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference_uvp(grant_ref_t ref); void gnttab_free_grant_reference_uvps(grant_ref_t head); int gnttab_empty_grant_references_uvp(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference_uvp(grant_ref_t *pprivate_head); void gnttab_release_grant_reference_uvp(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback_uvp(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback_uvp(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_uvp_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags); void gnttab_grant_foreign_transfer_uvp_ref(grant_ref_t, domid_t domid, unsigned long pfn); int gnttab_copy_grant_page_uvp(grant_ref_t ref, struct page **pagep); void __gnttab_dma_map_page(struct page *page); static inline void __gnttab_dma_unmap_page(struct page *page) { } void gnttab_reset_grant_page_uvp(struct page *page); #ifndef CONFIG_XEN_no int gnttab_resume(void); #endif void *arch_gnttab_alloc_shared(unsigned long *frames); static inline void gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr, uint32_t flags, grant_ref_t ref, domid_t domid) { if (flags & GNTMAP_contains_pte) map->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) map->host_addr = __pa(addr); else map->host_addr = addr; map->flags = flags; map->ref = ref; map->dom = domid; } static inline void gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr, uint32_t flags, grant_handle_t handle) { if (flags & GNTMAP_contains_pte) unmap->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) unmap->host_addr = __pa(addr); else unmap->host_addr = addr; unmap->handle = handle; unmap->dev_bus_addr = 0; } static inline void gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr, maddr_t new_addr, grant_handle_t handle) { if (xen_feature(XENFEAT_auto_translated_physmap)) { unmap->host_addr = __pa(addr); unmap->new_addr = __pa(new_addr); } else { unmap->host_addr = addr; unmap->new_addr = new_addr; } unmap->handle = handle; } #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/grant_table.h000066400000000000000000000105641314037446600250400ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include #include /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ #define NR_GRANT_FRAMES 4 struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; }; int gnttab_suspend(void); int gnttab_resume(void); int gnttab_grant_foreign_access_uvp(domid_t domid, unsigned long frame, int readonly); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_uvp_ref_uvp(grant_ref_t ref, int readonly); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access_uvp(grant_ref_t ref, int readonly, unsigned long page); int gnttab_grant_foreign_transfer_uvp(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_uvp_ref_uvp(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer_uvp(grant_ref_t ref); int gnttab_query_foreign_access_uvp(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references_uvp(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference_uvp(grant_ref_t ref); void gnttab_free_grant_reference_uvps(grant_ref_t head); int gnttab_empty_grant_references_uvp(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference_uvp(grant_ref_t *pprivate_head); void gnttab_release_grant_reference_uvp(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback_uvp(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback_uvp(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_uvp_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly); void gnttab_grant_foreign_transfer_uvp_ref(grant_ref_t, domid_t domid, unsigned long pfn); int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, struct grant_entry **__shared); void arch_gnttab_unmap_shared(struct grant_entry *shared, unsigned long nr_gframes); #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/hvc-console.h000066400000000000000000000007051314037446600247720ustar00rootroot00000000000000#ifndef XEN_HVC_CONSOLE_H #define XEN_HVC_CONSOLE_H extern struct console xenboot_console; #ifdef CONFIG_HVC_XEN_uvp void xen_console_resume(void); void xen_raw_console_write(const char *str); void xen_raw_printk(const char *fmt, ...); #else static inline void xen_console_resume(void) { } static inline void xen_raw_console_write(const char *str) { } static inline void xen_raw_printk(const char *fmt, ...) { } #endif #endif /* XEN_HVC_CONSOLE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/hvm.h000066400000000000000000000007101314037446600233400ustar00rootroot00000000000000/* Simple wrappers around HVM functions */ #ifndef XEN_HVM_H__ #define XEN_HVM_H__ #include static inline unsigned long hvm_get_parameter(int idx) { struct xen_hvm_param xhv; int r; xhv.domid = DOMID_SELF; xhv.index = idx; r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); if (r < 0) { printk(KERN_ERR "cannot get hvm parameter %d: %d.\n", idx, r); return 0; } return xhv.value; } #endif /* XEN_HVM_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/hypercall.h000066400000000000000000000014011314037446600245270ustar00rootroot00000000000000#ifndef __XEN_HYPERCALL_H__ #define __XEN_HYPERCALL_H__ #include static inline int __must_check HYPERVISOR_multicall_check( multicall_entry_t *call_list, unsigned int nr_calls, const unsigned long *rc_list) { int rc = HYPERVISOR_multicall(call_list, nr_calls); if (unlikely(rc < 0)) return rc; BUG_ON(rc); BUG_ON((int)nr_calls < 0); for ( ; nr_calls > 0; --nr_calls, ++call_list) if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0))) return nr_calls; return 0; } /* A construct to ignore the return value of hypercall wrappers in a few * exceptional cases (simply casting the function result to void doesn't * avoid the compiler warning): */ #define VOID(expr) ((void)((expr)?:0)) #endif /* __XEN_HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/hypervisor_sysfs.h000066400000000000000000000015051314037446600262120ustar00rootroot00000000000000/* * copyright (c) 2006 IBM Corporation * Authored by: Mike D. Day * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _HYP_SYSFS_H_ #define _HYP_SYSFS_H_ #include #include #define HYPERVISOR_ATTR_RO(_name) \ static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) #define HYPERVISOR_ATTR_RW(_name) \ static struct hyp_sysfs_attr _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) struct hyp_sysfs_attr { struct attribute attr; ssize_t (*show)(struct hyp_sysfs_attr *, char *); ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t); void *hyp_attr_data; }; #endif /* _HYP_SYSFS_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/000077500000000000000000000000001314037446600243375ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/COPYING000066400000000000000000000032641314037446600253770ustar00rootroot00000000000000XEN NOTICE ========== This copyright applies to all files within this subdirectory and its subdirectories: include/public/*.h include/public/hvm/*.h include/public/io/*.h The intention is that these files can be freely copied into the source tree of an operating system when porting that OS to run on Xen. Doing so does *not* cause the OS to become subject to the terms of the GPL. All other files in the Xen source distribution are covered by version 2 of the GNU General Public License except where explicitly stated otherwise within individual source files. -- Keir Fraser (on behalf of the Xen team) ===================================================================== Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/arch-x86/000077500000000000000000000000001314037446600256775ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/arch-x86/cpuid.h000066400000000000000000000050721314037446600271600ustar00rootroot00000000000000/****************************************************************************** * arch-x86/cpuid.h * * CPUID interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2007 Citrix Systems, Inc. * * Authors: * Keir Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__ #define __XEN_PUBLIC_ARCH_X86_CPUID_H__ /* Xen identification leaves start at 0x40000000. */ #define XEN_CPUID_FIRST_LEAF 0x40000000 #define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i)) /* * Leaf 1 (0x40000000) * EAX: Largest Xen-information leaf. All leaves up to an including @EAX * are supported by the Xen host. * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification * of a Xen host. */ #define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */ #define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */ #define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */ /* * Leaf 2 (0x40000001) * EAX[31:16]: Xen major version. * EAX[15: 0]: Xen minor version. * EBX-EDX: Reserved (currently all zeroes). */ /* * Leaf 3 (0x40000002) * EAX: Number of hypercall transfer pages. This register is always guaranteed * to specify one hypercall page. * EBX: Base address of Xen-specific MSRs. * ECX: Features 1. Unused bits are set to zero. * EDX: Features 2. Unused bits are set to zero. */ /* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */ #define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0 #define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0) #endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/arch-x86/hvm/000077500000000000000000000000001314037446600264715ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/arch-x86/hvm/save.h000066400000000000000000000250301314037446600276000ustar00rootroot00000000000000/* * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * Copyright (c) 2007 XenSource Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__ #define __XEN_PUBLIC_HVM_SAVE_X86_H__ /* * Save/restore header: general info about the save file. */ #define HVM_FILE_MAGIC 0x54381286 #define HVM_FILE_VERSION 0x00000001 struct hvm_save_header { uint32_t magic; /* Must be HVM_FILE_MAGIC */ uint32_t version; /* File format version */ uint64_t changeset; /* Version of Xen that saved this file */ uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */ uint32_t gtsc_khz; /* Guest's TSC frequency in kHz */ }; DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header); /* * Processor */ struct hvm_hw_cpu { uint8_t fpu_regs[512]; uint64_t rax; uint64_t rbx; uint64_t rcx; uint64_t rdx; uint64_t rbp; uint64_t rsi; uint64_t rdi; uint64_t rsp; uint64_t r8; uint64_t r9; uint64_t r10; uint64_t r11; uint64_t r12; uint64_t r13; uint64_t r14; uint64_t r15; uint64_t rip; uint64_t rflags; uint64_t cr0; uint64_t cr2; uint64_t cr3; uint64_t cr4; uint64_t dr0; uint64_t dr1; uint64_t dr2; uint64_t dr3; uint64_t dr6; uint64_t dr7; uint32_t cs_sel; uint32_t ds_sel; uint32_t es_sel; uint32_t fs_sel; uint32_t gs_sel; uint32_t ss_sel; uint32_t tr_sel; uint32_t ldtr_sel; uint32_t cs_limit; uint32_t ds_limit; uint32_t es_limit; uint32_t fs_limit; uint32_t gs_limit; uint32_t ss_limit; uint32_t tr_limit; uint32_t ldtr_limit; uint32_t idtr_limit; uint32_t gdtr_limit; uint64_t cs_base; uint64_t ds_base; uint64_t es_base; uint64_t fs_base; uint64_t gs_base; uint64_t ss_base; uint64_t tr_base; uint64_t ldtr_base; uint64_t idtr_base; uint64_t gdtr_base; uint32_t cs_arbytes; uint32_t ds_arbytes; uint32_t es_arbytes; uint32_t fs_arbytes; uint32_t gs_arbytes; uint32_t ss_arbytes; uint32_t tr_arbytes; uint32_t ldtr_arbytes; uint64_t sysenter_cs; uint64_t sysenter_esp; uint64_t sysenter_eip; /* msr for em64t */ uint64_t shadow_gs; /* msr content saved/restored. */ uint64_t msr_flags; uint64_t msr_lstar; uint64_t msr_star; uint64_t msr_cstar; uint64_t msr_syscall_mask; uint64_t msr_efer; uint64_t msr_tsc_aux; /* guest's idea of what rdtsc() would return */ uint64_t tsc; /* pending event, if any */ union { uint32_t pending_event; struct { uint8_t pending_vector:8; uint8_t pending_type:3; uint8_t pending_error_valid:1; uint32_t pending_reserved:19; uint8_t pending_valid:1; }; }; /* error code for pending event */ uint32_t error_code; }; DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu); /* * PIC */ struct hvm_hw_vpic { /* IR line bitmasks. */ uint8_t irr; uint8_t imr; uint8_t isr; /* Line IRx maps to IRQ irq_base+x */ uint8_t irq_base; /* * Where are we in ICW2-4 initialisation (0 means no init in progress)? * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1). * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence) * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence) */ uint8_t init_state:4; /* IR line with highest priority. */ uint8_t priority_add:4; /* Reads from A=0 obtain ISR or IRR? */ uint8_t readsel_isr:1; /* Reads perform a polling read? */ uint8_t poll:1; /* Automatically clear IRQs from the ISR during INTA? */ uint8_t auto_eoi:1; /* Automatically rotate IRQ priorities during AEOI? */ uint8_t rotate_on_auto_eoi:1; /* Exclude slave inputs when considering in-service IRQs? */ uint8_t special_fully_nested_mode:1; /* Special mask mode excludes masked IRs from AEOI and priority checks. */ uint8_t special_mask_mode:1; /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */ uint8_t is_master:1; /* Edge/trigger selection. */ uint8_t elcr; /* Virtual INT output. */ uint8_t int_output; }; DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic); /* * IO-APIC */ #ifdef __ia64__ #define VIOAPIC_IS_IOSAPIC 1 #define VIOAPIC_NUM_PINS 24 #else #define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */ #endif struct hvm_hw_vioapic { uint64_t base_address; uint32_t ioregsel; uint32_t id; union vioapic_redir_entry { uint64_t bits; struct { uint8_t vector; uint8_t delivery_mode:3; uint8_t dest_mode:1; uint8_t delivery_status:1; uint8_t polarity:1; uint8_t remote_irr:1; uint8_t trig_mode:1; uint8_t mask:1; uint8_t reserve:7; #if !VIOAPIC_IS_IOSAPIC uint8_t reserved[4]; uint8_t dest_id; #else uint8_t reserved[3]; uint16_t dest_id; #endif } fields; } redirtbl[VIOAPIC_NUM_PINS]; }; DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic); /* * LAPIC */ struct hvm_hw_lapic { uint64_t apic_base_msr; uint32_t disabled; /* VLAPIC_xx_DISABLED */ uint32_t timer_divisor; }; DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic); struct hvm_hw_lapic_regs { uint8_t data[1024]; }; DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs); /* * IRQs */ struct hvm_hw_pci_irqs { /* * Virtual interrupt wires for a single PCI bus. * Indexed by: device*4 + INTx#. */ union { unsigned long i[16 / sizeof (unsigned long)]; /* DECLARE_BITMAP(i, 32*4); */ uint64_t pad[2]; }; }; DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs); struct hvm_hw_isa_irqs { /* * Virtual interrupt wires for ISA devices. * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing). */ union { unsigned long i[1]; /* DECLARE_BITMAP(i, 16); */ uint64_t pad[1]; }; }; DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs); struct hvm_hw_pci_link { /* * PCI-ISA interrupt router. * Each PCI is 'wire-ORed' into one of four links using * the traditional 'barber's pole' mapping ((device + INTx#) & 3). * The router provides a programmable mapping from each link to a GSI. */ uint8_t route[4]; uint8_t pad0[4]; }; DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link); /* * PIT */ struct hvm_hw_pit { struct hvm_hw_pit_channel { uint32_t count; /* can be 65536 */ uint16_t latched_count; uint8_t count_latched; uint8_t status_latched; uint8_t status; uint8_t read_state; uint8_t write_state; uint8_t write_latch; uint8_t rw_mode; uint8_t mode; uint8_t bcd; /* not supported */ uint8_t gate; /* timer start */ } channels[3]; /* 3 x 16 bytes */ uint32_t speaker_data_on; uint32_t pad0; }; DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit); /* * RTC */ #define RTC_CMOS_SIZE 14 struct hvm_hw_rtc { /* CMOS bytes */ uint8_t cmos_data[RTC_CMOS_SIZE]; /* Index register for 2-part operations */ uint8_t cmos_index; uint8_t pad0; }; DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc); /* * HPET */ #define HPET_TIMER_NUM 3 /* 3 timers supported now */ struct hvm_hw_hpet { /* Memory-mapped, software visible registers */ uint64_t capability; /* capabilities */ uint64_t res0; /* reserved */ uint64_t config; /* configuration */ uint64_t res1; /* reserved */ uint64_t isr; /* interrupt status reg */ uint64_t res2[25]; /* reserved */ uint64_t mc64; /* main counter */ uint64_t res3; /* reserved */ struct { /* timers */ uint64_t config; /* configuration/cap */ uint64_t cmp; /* comparator */ uint64_t fsb; /* FSB route, not supported now */ uint64_t res4; /* reserved */ } timers[HPET_TIMER_NUM]; uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */ /* Hidden register state */ uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */ }; DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet); /* * PM timer */ struct hvm_hw_pmtimer { uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */ uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */ uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */ }; DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer); /* * MTRR MSRs */ struct hvm_hw_mtrr { #define MTRR_VCNT 8 #define NUM_FIXED_MSR 11 uint64_t msr_pat_cr; /* mtrr physbase & physmask msr pair*/ uint64_t msr_mtrr_var[MTRR_VCNT*2]; uint64_t msr_mtrr_fixed[NUM_FIXED_MSR]; uint64_t msr_mtrr_cap; uint64_t msr_mtrr_def_type; }; DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr); /* * Viridian hypervisor context. */ struct hvm_viridian_context { uint64_t hypercall_gpa; uint64_t guest_os_id; }; DECLARE_HVM_SAVE_TYPE(VIRIDIAN, 15, struct hvm_viridian_context); /* * Largest type-code in use */ #define HVM_SAVE_CODE_MAX 15 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/arch-x86/xen-mca.h000066400000000000000000000342111314037446600274010ustar00rootroot00000000000000/****************************************************************************** * arch-x86/mca.h * * Contributed by Advanced Micro Devices, Inc. * Author: Christoph Egger * * Guest OS machine check interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /* Full MCA functionality has the following Usecases from the guest side: * * Must have's: * 1. Dom0 and DomU register machine check trap callback handlers * (already done via "set_trap_table" hypercall) * 2. Dom0 registers machine check event callback handler * (doable via EVTCHNOP_bind_virq) * 3. Dom0 and DomU fetches machine check data * 4. Dom0 wants Xen to notify a DomU * 5. Dom0 gets DomU ID from physical address * 6. Dom0 wants Xen to kill DomU (already done for "xm destroy") * * Nice to have's: * 7. Dom0 wants Xen to deactivate a physical CPU * This is better done as separate task, physical CPU hotplugging, * and hypercall(s) should be sysctl's * 8. Page migration proposed from Xen NUMA work, where Dom0 can tell Xen to * move a DomU (or Dom0 itself) away from a malicious page * producing correctable errors. * 9. offlining physical page: * Xen free's and never re-uses a certain physical page. * 10. Testfacility: Allow Dom0 to write values into machine check MSR's * and tell Xen to trigger a machine check */ #ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__ #define __XEN_PUBLIC_ARCH_X86_MCA_H__ /* Hypercall */ #define __HYPERVISOR_mca __HYPERVISOR_arch_0 /* * The xen-unstable repo has interface version 0x03000001; out interface * is incompatible with that and any future minor revisions, so we * choose a different version number range that is numerically less * than that used in xen-unstable. */ #define XEN_MCA_INTERFACE_VERSION 0x01ecc003 /* IN: Dom0 calls hypercall to retrieve nonurgent telemetry */ #define XEN_MC_NONURGENT 0x0001 /* IN: Dom0/DomU calls hypercall to retrieve urgent telemetry */ #define XEN_MC_URGENT 0x0002 /* IN: Dom0 acknowledges previosly-fetched telemetry */ #define XEN_MC_ACK 0x0004 /* OUT: All is ok */ #define XEN_MC_OK 0x0 /* OUT: Domain could not fetch data. */ #define XEN_MC_FETCHFAILED 0x1 /* OUT: There was no machine check data to fetch. */ #define XEN_MC_NODATA 0x2 /* OUT: Between notification time and this hypercall an other * (most likely) correctable error happened. The fetched data, * does not match the original machine check data. */ #define XEN_MC_NOMATCH 0x4 /* OUT: DomU did not register MC NMI handler. Try something else. */ #define XEN_MC_CANNOTHANDLE 0x8 /* OUT: Notifying DomU failed. Retry later or try something else. */ #define XEN_MC_NOTDELIVERED 0x10 /* Note, XEN_MC_CANNOTHANDLE and XEN_MC_NOTDELIVERED are mutually exclusive. */ #ifndef __ASSEMBLY__ #define VIRQ_MCA VIRQ_ARCH_0 /* G. (DOM0) Machine Check Architecture */ /* * Machine Check Architecure: * structs are read-only and used to report all kinds of * correctable and uncorrectable errors detected by the HW. * Dom0 and DomU: register a handler to get notified. * Dom0 only: Correctable errors are reported via VIRQ_MCA * Dom0 and DomU: Uncorrectable errors are reported via nmi handlers */ #define MC_TYPE_GLOBAL 0 #define MC_TYPE_BANK 1 #define MC_TYPE_EXTENDED 2 #define MC_TYPE_RECOVERY 3 struct mcinfo_common { uint16_t type; /* structure type */ uint16_t size; /* size of this struct in bytes */ }; #define MC_FLAG_CORRECTABLE (1 << 0) #define MC_FLAG_UNCORRECTABLE (1 << 1) #define MC_FLAG_RECOVERABLE (1 << 2) #define MC_FLAG_POLLED (1 << 3) #define MC_FLAG_RESET (1 << 4) #define MC_FLAG_CMCI (1 << 5) #define MC_FLAG_MCE (1 << 6) /* contains global x86 mc information */ struct mcinfo_global { struct mcinfo_common common; /* running domain at the time in error (most likely the impacted one) */ uint16_t mc_domid; uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */ uint32_t mc_socketid; /* physical socket of the physical core */ uint16_t mc_coreid; /* physical impacted core */ uint16_t mc_core_threadid; /* core thread of physical core */ uint32_t mc_apicid; uint32_t mc_flags; uint64_t mc_gstatus; /* global status */ }; /* contains bank local x86 mc information */ struct mcinfo_bank { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint16_t mc_domid; /* Usecase 5: domain referenced by mc_addr on dom0 * and if mc_addr is valid. Never valid on DomU. */ uint64_t mc_status; /* bank status */ uint64_t mc_addr; /* bank address, only valid * if addr bit is set in mc_status */ uint64_t mc_misc; uint64_t mc_ctrl2; uint64_t mc_tsc; }; struct mcinfo_msr { uint64_t reg; /* MSR */ uint64_t value; /* MSR value */ }; /* contains mc information from other * or additional mc MSRs */ struct mcinfo_extended { struct mcinfo_common common; /* You can fill up to five registers. * If you need more, then use this structure * multiple times. */ uint32_t mc_msrs; /* Number of msr with valid values. */ /* * Currently Intel extended MSR (32/64) include all gp registers * and E(R)FLAGS, E(R)IP, E(R)MISC, up to 11/19 of them might be * useful at present. So expand this array to 16/32 to leave room. */ struct mcinfo_msr mc_msr[sizeof(void *) * 4]; }; /* Recovery Action flags. Giving recovery result information to DOM0 */ /* Xen takes successful recovery action, the error is recovered */ #define REC_ACTION_RECOVERED (0x1 << 0) /* No action is performed by XEN */ #define REC_ACTION_NONE (0x1 << 1) /* It's possible DOM0 might take action ownership in some case */ #define REC_ACTION_NEED_RESET (0x1 << 2) /* Different Recovery Action types, if the action is performed successfully, * REC_ACTION_RECOVERED flag will be returned. */ /* Page Offline Action */ #define MC_ACTION_PAGE_OFFLINE (0x1 << 0) /* CPU offline Action */ #define MC_ACTION_CPU_OFFLINE (0x1 << 1) /* L3 cache disable Action */ #define MC_ACTION_CACHE_SHRINK (0x1 << 2) /* Below interface used between XEN/DOM0 for passing XEN's recovery action * information to DOM0. * usage Senario: After offlining broken page, XEN might pass its page offline * recovery action result to DOM0. DOM0 will save the information in * non-volatile memory for further proactive actions, such as offlining the * easy broken page earlier when doing next reboot. */ struct page_offline_action { /* Params for passing the offlined page number to DOM0 */ uint64_t mfn; uint64_t status; }; struct cpu_offline_action { /* Params for passing the identity of the offlined CPU to DOM0 */ uint32_t mc_socketid; uint16_t mc_coreid; uint16_t mc_core_threadid; }; #define MAX_UNION_SIZE 16 struct mcinfo_recovery { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint8_t action_flags; uint8_t action_types; union { struct page_offline_action page_retire; struct cpu_offline_action cpu_offline; uint8_t pad[MAX_UNION_SIZE]; } action_info; }; #define MCINFO_HYPERCALLSIZE 1024 #define MCINFO_MAXSIZE 768 struct mc_info { /* Number of mcinfo_* entries in mi_data */ uint32_t mi_nentries; uint32_t _pad0; uint64_t mi_data[(MCINFO_MAXSIZE - 1) / 8]; }; typedef struct mc_info mc_info_t; DEFINE_XEN_GUEST_HANDLE(mc_info_t); #define __MC_MSR_ARRAYSIZE 8 #define __MC_NMSRS 1 #define MC_NCAPS 7 /* 7 CPU feature flag words */ #define MC_CAPS_STD_EDX 0 /* cpuid level 0x00000001 (%edx) */ #define MC_CAPS_AMD_EDX 1 /* cpuid level 0x80000001 (%edx) */ #define MC_CAPS_TM 2 /* cpuid level 0x80860001 (TransMeta) */ #define MC_CAPS_LINUX 3 /* Linux-defined */ #define MC_CAPS_STD_ECX 4 /* cpuid level 0x00000001 (%ecx) */ #define MC_CAPS_VIA 5 /* cpuid level 0xc0000001 */ #define MC_CAPS_AMD_ECX 6 /* cpuid level 0x80000001 (%ecx) */ struct mcinfo_logical_cpu { uint32_t mc_cpunr; uint32_t mc_chipid; uint16_t mc_coreid; uint16_t mc_threadid; uint32_t mc_apicid; uint32_t mc_clusterid; uint32_t mc_ncores; uint32_t mc_ncores_active; uint32_t mc_nthreads; int32_t mc_cpuid_level; uint32_t mc_family; uint32_t mc_vendor; uint32_t mc_model; uint32_t mc_step; char mc_vendorid[16]; char mc_brandid[64]; uint32_t mc_cpu_caps[MC_NCAPS]; uint32_t mc_cache_size; uint32_t mc_cache_alignment; int32_t mc_nmsrvals; struct mcinfo_msr mc_msrvalues[__MC_MSR_ARRAYSIZE]; }; typedef struct mcinfo_logical_cpu xen_mc_logical_cpu_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_logical_cpu_t); /* * OS's should use these instead of writing their own lookup function * each with its own bugs and drawbacks. * We use macros instead of static inline functions to allow guests * to include this header in assembly files (*.S). */ /* Prototype: * uint32_t x86_mcinfo_nentries(struct mc_info *mi); */ #define x86_mcinfo_nentries(_mi) \ (_mi)->mi_nentries /* Prototype: * struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi); */ #define x86_mcinfo_first(_mi) \ ((struct mcinfo_common *)(_mi)->mi_data) /* Prototype: * struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic); */ #define x86_mcinfo_next(_mic) \ ((struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size)) /* Prototype: * void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type); */ #define x86_mcinfo_lookup(_ret, _mi, _type) \ do { \ uint32_t found, i; \ struct mcinfo_common *_mic; \ \ found = 0; \ (_ret) = NULL; \ if (_mi == NULL) break; \ _mic = x86_mcinfo_first(_mi); \ for (i = 0; i < x86_mcinfo_nentries(_mi); i++) { \ if (_mic->type == (_type)) { \ found = 1; \ break; \ } \ _mic = x86_mcinfo_next(_mic); \ } \ (_ret) = found ? _mic : NULL; \ } while (0) /* Usecase 1 * Register machine check trap callback handler * (already done via "set_trap_table" hypercall) */ /* Usecase 2 * Dom0 registers machine check event callback handler * done by EVTCHNOP_bind_virq */ /* Usecase 3 * Fetch machine check data from hypervisor. * Note, this hypercall is special, because both Dom0 and DomU must use this. */ #define XEN_MC_fetch 1 struct xen_mc_fetch { /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_NONURGENT, XEN_MC_URGENT, XEN_MC_ACK if ack'ing an earlier fetch */ /* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED, XEN_MC_NODATA, XEN_MC_NOMATCH */ uint32_t _pad0; uint64_t fetch_id; /* OUT: id for ack, IN: id we are ack'ing */ /* OUT variables. */ XEN_GUEST_HANDLE(mc_info_t) data; }; typedef struct xen_mc_fetch xen_mc_fetch_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t); /* Usecase 4 * This tells the hypervisor to notify a DomU about the machine check error */ #define XEN_MC_notifydomain 2 struct xen_mc_notifydomain { /* IN variables. */ uint16_t mc_domid; /* The unprivileged domain to notify. */ uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify. * Usually echo'd value from the fetch hypercall. */ /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */ /* OUT: XEN_MC_OK, XEN_MC_CANNOTHANDLE, XEN_MC_NOTDELIVERED, XEN_MC_NOMATCH */ }; typedef struct xen_mc_notifydomain xen_mc_notifydomain_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t); #define XEN_MC_physcpuinfo 3 struct xen_mc_physcpuinfo { /* IN/OUT */ uint32_t ncpus; uint32_t _pad0; /* OUT */ XEN_GUEST_HANDLE(xen_mc_logical_cpu_t) info; }; #define XEN_MC_msrinject 4 #define MC_MSRINJ_MAXMSRS 8 struct xen_mc_msrinject { /* IN */ uint32_t mcinj_cpunr; /* target processor id */ uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */ uint32_t mcinj_count; /* 0 .. count-1 in array are valid */ uint32_t _pad0; struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS]; }; /* Flags for mcinj_flags above; bits 16-31 are reserved */ #define MC_MSRINJ_F_INTERPOSE 0x1 #define XEN_MC_mceinject 5 struct xen_mc_mceinject { unsigned int mceinj_cpunr; /* target processor id */ }; struct xen_mc { uint32_t cmd; uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */ union { struct xen_mc_fetch mc_fetch; struct xen_mc_notifydomain mc_notifydomain; struct xen_mc_physcpuinfo mc_physcpuinfo; struct xen_mc_msrinject mc_msrinject; struct xen_mc_mceinject mc_mceinject; } u; }; typedef struct xen_mc xen_mc_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_t); #endif /* __ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/arch-x86/xen-x86_32.h000066400000000000000000000145401314037446600275750ustar00rootroot00000000000000/****************************************************************************** * xen-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2007, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ /* * Hypercall interface: * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5) * Output: %eax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; int $0x82 */ #define TRAP_INSTR "int $0x82" #endif /* * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ #define FLAT_KERNEL_CS FLAT_RING1_CS #define FLAT_KERNEL_DS FLAT_RING1_DS #define FLAT_KERNEL_SS FLAT_RING1_SS #define FLAT_USER_CS FLAT_RING3_CS #define FLAT_USER_DS FLAT_RING3_DS #define FLAT_USER_SS FLAT_RING3_SS #define __HYPERVISOR_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_END_PAE 0xF6800000 #define HYPERVISOR_VIRT_START_PAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE) #define MACH2PHYS_VIRT_START_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE) #define MACH2PHYS_VIRT_END_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE) /* Non-PAE bounds are obsolete. */ #define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000 #define HYPERVISOR_VIRT_START_NONPAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_START_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_END_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE) #define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE #define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE #define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) #endif /* 32-/64-bit invariability for control interfaces (domctl/sysctl). */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #undef ___DEFINE_XEN_GUEST_HANDLE #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } \ __guest_handle_ ## name; \ typedef struct { union { type *p; uint64_aligned_t q; }; } \ __guest_handle_64_ ## name #undef set_xen_guest_handle #define set_xen_guest_handle(hnd, val) \ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \ (hnd).p = val; \ } while ( 0 ) #define uint64_aligned_t uint64_t __attribute__((aligned(8))) #define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name #define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name) #endif #ifndef __ASSEMBLY__ struct cpu_user_regs { uint32_t ebx; uint32_t ecx; uint32_t edx; uint32_t esi; uint32_t edi; uint32_t ebp; uint32_t eax; uint16_t error_code; /* private */ uint16_t entry_vector; /* private */ uint32_t eip; uint16_t cs; uint8_t saved_upcall_mask; uint8_t _pad0; uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ uint32_t esp; uint16_t ss, _pad1; uint16_t es, _pad2; uint16_t ds, _pad3; uint16_t fs, _pad4; uint16_t gs, _pad5; }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); /* * Page-directory addresses above 4GB do not fit into architectural %cr3. * When accessing %cr3, or equivalent field in vcpu_guest_context, guests * must use the following accessor macros to pack/unpack valid MFNs. */ #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) struct arch_vcpu_info { unsigned long cr2; unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; struct xen_callback { unsigned long cs; unsigned long eip; }; typedef struct xen_callback xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/arch-x86/xen-x86_64.h000066400000000000000000000157351314037446600276110ustar00rootroot00000000000000/****************************************************************************** * xen-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ /* * Hypercall interface: * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5) * Output: %rax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; syscall * Clobbered: %rcx, %r11, argument registers (as above) */ #define TRAP_INSTR "syscall" #endif /* * 64-bit segment selectors * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING3_CS32 0xe023 /* GDT index 260 */ #define FLAT_RING3_CS64 0xe033 /* GDT index 261 */ #define FLAT_RING3_DS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_DS64 0x0000 /* NULL selector */ #define FLAT_RING3_SS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_SS64 0xe02b /* GDT index 262 */ #define FLAT_KERNEL_DS64 FLAT_RING3_DS64 #define FLAT_KERNEL_DS32 FLAT_RING3_DS32 #define FLAT_KERNEL_DS FLAT_KERNEL_DS64 #define FLAT_KERNEL_CS64 FLAT_RING3_CS64 #define FLAT_KERNEL_CS32 FLAT_RING3_CS32 #define FLAT_KERNEL_CS FLAT_KERNEL_CS64 #define FLAT_KERNEL_SS64 FLAT_RING3_SS64 #define FLAT_KERNEL_SS32 FLAT_RING3_SS32 #define FLAT_KERNEL_SS FLAT_KERNEL_SS64 #define FLAT_USER_DS64 FLAT_RING3_DS64 #define FLAT_USER_DS32 FLAT_RING3_DS32 #define FLAT_USER_DS FLAT_USER_DS64 #define FLAT_USER_CS64 FLAT_RING3_CS64 #define FLAT_USER_CS32 FLAT_RING3_CS32 #define FLAT_USER_CS FLAT_USER_CS64 #define FLAT_USER_SS64 FLAT_RING3_SS64 #define FLAT_USER_SS32 FLAT_RING3_SS32 #define FLAT_USER_SS FLAT_USER_SS64 #define __HYPERVISOR_VIRT_START 0xFFFF800000000000 #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) #endif /* * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) * @which == SEGBASE_* ; @base == 64-bit base address * Returns 0 on success. */ #define SEGBASE_FS 0 #define SEGBASE_GS_USER 1 #define SEGBASE_GS_KERNEL 2 #define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */ /* * int HYPERVISOR_iret(void) * All arguments are on the kernel stack, in the following format. * Never returns if successful. Current kernel context is lost. * The saved CS is mapped as follows: * RING0 -> RING3 kernel mode. * RING1 -> RING3 kernel mode. * RING2 -> RING3 kernel mode. * RING3 -> RING3 user mode. * However RING0 indicates that the guest kernel should return to iteself * directly with * orb $3,1*8(%rsp) * iretq * If flags contains VGCF_in_syscall: * Restore RAX, RIP, RFLAGS, RSP. * Discard R11, RCX, CS, SS. * Otherwise: * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP. * All other registers are saved on hypercall entry and restored to user. */ /* Guest exited in SYSCALL context? Return to guest with SYSRET? */ #define _VGCF_in_syscall 8 #define VGCF_in_syscall (1<<_VGCF_in_syscall) #define VGCF_IN_SYSCALL VGCF_in_syscall #ifndef __ASSEMBLY__ struct iret_context { /* Top of stack (%rsp at point of hypercall). */ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; /* Bottom of iret stack frame. */ }; #if defined(__GNUC__) && !defined(__STRICT_ANSI__) /* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ #define __DECL_REG(name) union { \ uint64_t r ## name, e ## name; \ uint32_t _e ## name; \ } #else /* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ #define __DECL_REG(name) uint64_t r ## name #endif struct cpu_user_regs { uint64_t r15; uint64_t r14; uint64_t r13; uint64_t r12; __DECL_REG(bp); __DECL_REG(bx); uint64_t r11; uint64_t r10; uint64_t r9; uint64_t r8; __DECL_REG(ax); __DECL_REG(cx); __DECL_REG(dx); __DECL_REG(si); __DECL_REG(di); uint32_t error_code; /* private */ uint32_t entry_vector; /* private */ __DECL_REG(ip); uint16_t cs, _pad0[1]; uint8_t saved_upcall_mask; uint8_t _pad1[3]; __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */ __DECL_REG(sp); uint16_t ss, _pad2[3]; uint16_t es, _pad3[3]; uint16_t ds, _pad4[3]; uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */ }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); #undef __DECL_REG #define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) #define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) struct arch_vcpu_info { unsigned long cr2; unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; typedef unsigned long xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/arch-x86/xen.h000066400000000000000000000171711314037446600266510ustar00rootroot00000000000000/****************************************************************************** * arch-x86/xen.h * * Guest OS interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "../xen.h" #ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_H__ /* Structural guest handles introduced in 0x00030201. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030201 #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name #else #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef type * __guest_handle_ ## name #endif #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ ___DEFINE_XEN_GUEST_HANDLE(name, type); \ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif /* Allow co-existing Linux 2.6.23+ Xen interface definitions. */ #define DEFINE_GUEST_HANDLE_STRUCT(name) struct name #if defined(__i386__) #include "xen-x86_32.h" #elif defined(__x86_64__) #include "xen-x86_64.h" #endif #ifndef __ASSEMBLY__ typedef unsigned long xen_pfn_t; #define PRI_xen_pfn "lx" #endif /* * SEGMENT DESCRIPTOR TABLES */ /* * A number of GDT entries are reserved by Xen. These are not situated at the * start of the GDT because some stupid OSes export hard-coded selector values * in their ABI. These hard-coded values are always near the start of the GDT, * so Xen places itself out of the way, at the far end of the GDT. */ #define FIRST_RESERVED_GDT_PAGE 14 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) /* Maximum number of virtual CPUs in legacy multi-processor guests. */ #define XEN_LEGACY_MAX_VCPUS 32 #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; /* * Send an array of these to HYPERVISOR_set_trap_table(). * The privilege level specifies which modes may enter a trap via a software * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate * privilege levels as follows: * Level == 0: Noone may enter * Level == 1: Kernel may enter * Level == 2: Kernel may enter * Level == 3: Everyone may enter */ #define TI_GET_DPL(_ti) ((_ti)->flags & 3) #define TI_GET_IF(_ti) ((_ti)->flags & 4) #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) struct trap_info { uint8_t vector; /* exception vector */ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ uint16_t cs; /* code selector */ unsigned long address; /* code offset */ }; typedef struct trap_info trap_info_t; DEFINE_XEN_GUEST_HANDLE(trap_info_t); typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* * The following is all CPU context. Note that the fpu_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ struct vcpu_guest_context { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ #define VGCF_I387_VALID (1<<0) #define VGCF_IN_KERNEL (1<<2) #define _VGCF_i387_valid 0 #define VGCF_i387_valid (1<<_VGCF_i387_valid) #define _VGCF_in_kernel 2 #define VGCF_in_kernel (1<<_VGCF_in_kernel) #define _VGCF_failsafe_disables_events 3 #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) #define _VGCF_syscall_disables_events 4 #define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) #define _VGCF_online 5 #define VGCF_online (1<<_VGCF_online) unsigned long flags; /* VGCF_* flags */ struct cpu_user_regs user_regs; /* User-level CPU registers */ struct trap_info trap_ctxt[256]; /* Virtual IDT */ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ #ifdef __i386__ unsigned long event_callback_cs; /* CS:EIP of event callback */ unsigned long event_callback_eip; unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ unsigned long failsafe_callback_eip; #else unsigned long event_callback_eip; unsigned long failsafe_callback_eip; #ifdef __XEN__ union { unsigned long syscall_callback_eip; struct { unsigned int event_callback_cs; /* compat CS of event cb */ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */ }; }; #else unsigned long syscall_callback_eip; #endif #endif unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ #ifdef __x86_64__ /* Segment base addresses. */ uint64_t fs_base; uint64_t gs_base_kernel; uint64_t gs_base_user; #endif }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); struct arch_shared_info { unsigned long max_pfn; /* max pfn that appears in table */ /* Frame containing list of mfns containing list of mfns containing p2m. */ xen_pfn_t pfn_to_mfn_frame_list_list; unsigned long nmi_reason; uint64_t pad[32]; }; typedef struct arch_shared_info arch_shared_info_t; #endif /* !__ASSEMBLY__ */ /* * Prefix forces emulation of some non-trapping instructions. * Currently only CPUID. */ #ifdef __ASSEMBLY__ #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; #define XEN_CPUID XEN_EMULATE_PREFIX cpuid #else #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" #endif #endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/arch-x86_32.h000066400000000000000000000024131314037446600263540ustar00rootroot00000000000000/****************************************************************************** * arch-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/arch-x86_64.h000066400000000000000000000024131314037446600263610ustar00rootroot00000000000000/****************************************************************************** * arch-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/callback.h000066400000000000000000000074531314037446600262550ustar00rootroot00000000000000/****************************************************************************** * callback.h * * Register guest OS callbacks with Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell */ #ifndef __XEN_PUBLIC_CALLBACK_H__ #define __XEN_PUBLIC_CALLBACK_H__ #include "xen.h" /* * Prototype for this hypercall is: * long callback_op(int cmd, void *extra_args) * @cmd == CALLBACKOP_??? (callback operation). * @extra_args == Operation-specific extra arguments (NULL if none). */ /* ia64, x86: Callback for event delivery. */ #define CALLBACKTYPE_event 0 /* x86: Failsafe callback when guest state cannot be restored by Xen. */ #define CALLBACKTYPE_failsafe 1 /* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */ #define CALLBACKTYPE_syscall 2 /* * x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel * feature is enabled. Do not use this callback type in new code. */ #define CALLBACKTYPE_sysenter_deprecated 3 /* x86: Callback for NMI delivery. */ #define CALLBACKTYPE_nmi 4 /* * x86: sysenter is only available as follows: * - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled * - 64-bit hypervisor: 32-bit guest applications on Intel CPUs * ('32-on-32-on-64', '32-on-64-on-64') * [nb. also 64-bit guest applications on Intel CPUs * ('64-on-64-on-64'), but syscall is preferred] */ #define CALLBACKTYPE_sysenter 5 /* * x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs * ('32-on-32-on-64', '32-on-64-on-64') */ #define CALLBACKTYPE_syscall32 7 /* * Disable event deliver during callback? This flag is ignored for event and * NMI callbacks: event delivery is unconditionally disabled. */ #define _CALLBACKF_mask_events 0 #define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events) /* * Register a callback. */ #define CALLBACKOP_register 0 struct callback_register { uint16_t type; uint16_t flags; xen_callback_t address; }; typedef struct callback_register callback_register_t; DEFINE_XEN_GUEST_HANDLE(callback_register_t); /* * Unregister a callback. * * Not all callbacks can be unregistered. -EINVAL will be returned if * you attempt to unregister such a callback. */ #define CALLBACKOP_unregister 1 struct callback_unregister { uint16_t type; uint16_t _unused; }; typedef struct callback_unregister callback_unregister_t; DEFINE_XEN_GUEST_HANDLE(callback_unregister_t); #if __XEN_INTERFACE_VERSION__ < 0x00030207 #undef CALLBACKTYPE_sysenter #define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated #endif #endif /* __XEN_PUBLIC_CALLBACK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/dom0_ops.h000066400000000000000000000077061314037446600262420ustar00rootroot00000000000000/****************************************************************************** * dom0_ops.h * * Process command requests from domain-0 guest OS. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOM0_OPS_H__ #define __XEN_PUBLIC_DOM0_OPS_H__ #include "xen.h" #include "platform.h" #if __XEN_INTERFACE_VERSION__ >= 0x00030204 #error "dom0_ops.h is a compatibility interface only" #endif #define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION #define DOM0_SETTIME XENPF_settime #define dom0_settime xenpf_settime #define dom0_settime_t xenpf_settime_t #define DOM0_ADD_MEMTYPE XENPF_add_memtype #define dom0_add_memtype xenpf_add_memtype #define dom0_add_memtype_t xenpf_add_memtype_t #define DOM0_DEL_MEMTYPE XENPF_del_memtype #define dom0_del_memtype xenpf_del_memtype #define dom0_del_memtype_t xenpf_del_memtype_t #define DOM0_READ_MEMTYPE XENPF_read_memtype #define dom0_read_memtype xenpf_read_memtype #define dom0_read_memtype_t xenpf_read_memtype_t #define DOM0_MICROCODE XENPF_microcode_update #define dom0_microcode xenpf_microcode_update #define dom0_microcode_t xenpf_microcode_update_t #define DOM0_PLATFORM_QUIRK XENPF_platform_quirk #define dom0_platform_quirk xenpf_platform_quirk #define dom0_platform_quirk_t xenpf_platform_quirk_t typedef uint64_t cpumap_t; /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_MSR 15 struct dom0_msr { /* IN variables. */ uint32_t write; cpumap_t cpu_mask; uint32_t msr; uint32_t in1; uint32_t in2; /* OUT variables. */ uint32_t out1; uint32_t out2; }; typedef struct dom0_msr dom0_msr_t; DEFINE_XEN_GUEST_HANDLE(dom0_msr_t); /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_PHYSICAL_MEMORY_MAP 40 struct dom0_memory_map_entry { uint64_t start, end; uint32_t flags; /* reserved */ uint8_t is_ram; }; typedef struct dom0_memory_map_entry dom0_memory_map_entry_t; DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t); struct dom0_op { uint32_t cmd; uint32_t interface_version; /* DOM0_INTERFACE_VERSION */ union { struct dom0_msr msr; struct dom0_settime settime; struct dom0_add_memtype add_memtype; struct dom0_del_memtype del_memtype; struct dom0_read_memtype read_memtype; struct dom0_microcode microcode; struct dom0_platform_quirk platform_quirk; struct dom0_memory_map_entry physical_memory_map; uint8_t pad[128]; } u; }; typedef struct dom0_op dom0_op_t; DEFINE_XEN_GUEST_HANDLE(dom0_op_t); #endif /* __XEN_PUBLIC_DOM0_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/domctl.h000066400000000000000000001004501314037446600257720ustar00rootroot00000000000000/****************************************************************************** * domctl.h * * Domain management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOMCTL_H__ #define __XEN_PUBLIC_DOMCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "domctl operations are intended for use by node control tools only" #endif #include "xen.h" #include "grant_table.h" #define XEN_DOMCTL_INTERFACE_VERSION 0x00000006 struct xenctl_cpumap { XEN_GUEST_HANDLE_64(uint8) bitmap; uint32_t nr_cpus; }; /* * NB. xen_domctl.domain is an IN/OUT parameter for this operation. * If it is specified as zero, an id is auto-allocated and returned. */ /* XEN_DOMCTL_createdomain */ struct xen_domctl_createdomain { /* IN parameters */ uint32_t ssidref; xen_domain_handle_t handle; /* Is this an HVM guest (as opposed to a PV guest)? */ #define _XEN_DOMCTL_CDF_hvm_guest 0 #define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest) /* Use hardware-assisted paging if available? */ #define _XEN_DOMCTL_CDF_hap 1 #define XEN_DOMCTL_CDF_hap (1U<<_XEN_DOMCTL_CDF_hap) /* Should domain memory integrity be verifed by tboot during Sx? */ #define _XEN_DOMCTL_CDF_s3_integrity 2 #define XEN_DOMCTL_CDF_s3_integrity (1U<<_XEN_DOMCTL_CDF_s3_integrity) uint32_t flags; /* Disable out-of-sync shadow page tables? */ #define _XEN_DOMCTL_CDF_oos_off 3 #define XEN_DOMCTL_CDF_oos_off (1U<<_XEN_DOMCTL_CDF_oos_off) }; typedef struct xen_domctl_createdomain xen_domctl_createdomain_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t); /* XEN_DOMCTL_getdomaininfo */ struct xen_domctl_getdomaininfo { /* OUT variables. */ domid_t domain; /* Also echoed in domctl.domain */ /* Domain is scheduled to die. */ #define _XEN_DOMINF_dying 0 #define XEN_DOMINF_dying (1U<<_XEN_DOMINF_dying) /* Domain is an HVM guest (as opposed to a PV guest). */ #define _XEN_DOMINF_hvm_guest 1 #define XEN_DOMINF_hvm_guest (1U<<_XEN_DOMINF_hvm_guest) /* The guest OS has shut down. */ #define _XEN_DOMINF_shutdown 2 #define XEN_DOMINF_shutdown (1U<<_XEN_DOMINF_shutdown) /* Currently paused by control software. */ #define _XEN_DOMINF_paused 3 #define XEN_DOMINF_paused (1U<<_XEN_DOMINF_paused) /* Currently blocked pending an event. */ #define _XEN_DOMINF_blocked 4 #define XEN_DOMINF_blocked (1U<<_XEN_DOMINF_blocked) /* Domain is currently running. */ #define _XEN_DOMINF_running 5 #define XEN_DOMINF_running (1U<<_XEN_DOMINF_running) /* Being debugged. */ #define _XEN_DOMINF_debugged 6 #define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged) /* XEN_DOMINF_shutdown guest-supplied code. */ #define XEN_DOMINF_shutdownmask 255 #define XEN_DOMINF_shutdownshift 16 uint32_t flags; /* XEN_DOMINF_* */ uint64_aligned_t tot_pages; uint64_aligned_t max_pages; uint64_aligned_t shr_pages; uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */ uint64_aligned_t cpu_time; uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */ uint32_t ssidref; xen_domain_handle_t handle; }; typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t); /* XEN_DOMCTL_getmemlist */ struct xen_domctl_getmemlist { /* IN variables. */ /* Max entries to write to output buffer. */ uint64_aligned_t max_pfns; /* Start index in guest's page list. */ uint64_aligned_t start_pfn; XEN_GUEST_HANDLE_64(uint64) buffer; /* OUT variables. */ uint64_aligned_t num_pfns; }; typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t); /* XEN_DOMCTL_getpageframeinfo */ #define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28 #define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28) #define XEN_DOMCTL_PFINFO_L1TAB (0x1U<<28) #define XEN_DOMCTL_PFINFO_L2TAB (0x2U<<28) #define XEN_DOMCTL_PFINFO_L3TAB (0x3U<<28) #define XEN_DOMCTL_PFINFO_L4TAB (0x4U<<28) #define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28) #define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31) #define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */ #define XEN_DOMCTL_PFINFO_PAGEDTAB (0x8U<<28) #define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28) struct xen_domctl_getpageframeinfo { /* IN variables. */ uint64_aligned_t gmfn; /* GMFN to query */ /* OUT variables. */ /* Is the page PINNED to a type? */ uint32_t type; /* see above type defs */ }; typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t); /* XEN_DOMCTL_getpageframeinfo2 */ struct xen_domctl_getpageframeinfo2 { /* IN variables. */ uint64_aligned_t num; /* IN/OUT variables. */ XEN_GUEST_HANDLE_64(uint32) array; }; typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t); /* * Control shadow pagetables operation */ /* XEN_DOMCTL_shadow_op */ /* Disable shadow mode. */ #define XEN_DOMCTL_SHADOW_OP_OFF 0 /* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */ #define XEN_DOMCTL_SHADOW_OP_ENABLE 32 /* Log-dirty bitmap operations. */ /* Return the bitmap and clean internal copy for next round. */ #define XEN_DOMCTL_SHADOW_OP_CLEAN 11 /* Return the bitmap but do not modify internal copy. */ #define XEN_DOMCTL_SHADOW_OP_PEEK 12 /* Memory allocation accessors. */ #define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30 #define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31 /* Legacy enable operations. */ /* Equiv. to ENABLE with no mode flags. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST 1 /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY 2 /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE 3 /* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */ /* * Shadow pagetables are refcounted: guest does not use explicit mmu * operations nor write-protect its pagetables. */ #define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT (1 << 1) /* * Log pages in a bitmap as they are dirtied. * Used for live relocation to determine which pages must be re-sent. */ #define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2) /* * Automatically translate GPFNs into MFNs. */ #define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3) /* * Xen does not steal virtual address space from the guest. * Requires HVM support. */ #define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4) struct xen_domctl_shadow_op_stats { uint32_t fault_count; uint32_t dirty_count; }; typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t); struct xen_domctl_shadow_op { /* IN variables. */ uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */ /* OP_ENABLE */ uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */ /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */ uint32_t mb; /* Shadow memory allocation in MB */ /* OP_PEEK / OP_CLEAN */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */ struct xen_domctl_shadow_op_stats stats; }; typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t); /* XEN_DOMCTL_max_mem */ struct xen_domctl_max_mem { /* IN variables. */ uint64_aligned_t max_memkb; }; typedef struct xen_domctl_max_mem xen_domctl_max_mem_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t); /* XEN_DOMCTL_setvcpucontext */ /* XEN_DOMCTL_getvcpucontext */ struct xen_domctl_vcpucontext { uint32_t vcpu; /* IN */ XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */ }; typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t); /* XEN_DOMCTL_getvcpuinfo */ struct xen_domctl_getvcpuinfo { /* IN variables. */ uint32_t vcpu; /* OUT variables. */ uint8_t online; /* currently online (not hotplugged)? */ uint8_t blocked; /* blocked waiting for an event? */ uint8_t running; /* currently scheduled on its CPU? */ uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */ uint32_t cpu; /* current mapping */ }; typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t); /* Get/set which physical cpus a vcpu can execute on. */ /* XEN_DOMCTL_setvcpuaffinity */ /* XEN_DOMCTL_getvcpuaffinity */ struct xen_domctl_vcpuaffinity { uint32_t vcpu; /* IN */ struct xenctl_cpumap cpumap; /* IN/OUT */ }; typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t); /* XEN_DOMCTL_max_vcpus */ struct xen_domctl_max_vcpus { uint32_t max; /* maximum number of vcpus */ }; typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t); /* XEN_DOMCTL_scheduler_op */ /* Scheduler types. */ #define XEN_SCHEDULER_SEDF 4 #define XEN_SCHEDULER_CREDIT 5 /* Set or get info? */ #define XEN_DOMCTL_SCHEDOP_putinfo 0 #define XEN_DOMCTL_SCHEDOP_getinfo 1 struct xen_domctl_scheduler_op { uint32_t sched_id; /* XEN_SCHEDULER_* */ uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */ union { struct xen_domctl_sched_sedf { uint64_aligned_t period; uint64_aligned_t slice; uint64_aligned_t latency; uint32_t extratime; uint32_t weight; } sedf; struct xen_domctl_sched_credit { uint16_t weight; uint16_t cap; } credit; } u; }; typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t); /* XEN_DOMCTL_setdomainhandle */ struct xen_domctl_setdomainhandle { xen_domain_handle_t handle; }; typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t); /* XEN_DOMCTL_setdebugging */ struct xen_domctl_setdebugging { uint8_t enable; }; typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t); /* XEN_DOMCTL_irq_permission */ struct xen_domctl_irq_permission { uint8_t pirq; uint8_t allow_access; /* flag to specify enable/disable of IRQ access */ }; typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t); /* XEN_DOMCTL_iomem_permission */ struct xen_domctl_iomem_permission { uint64_aligned_t first_mfn;/* first page (physical page number) in range */ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */ }; typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t); /* XEN_DOMCTL_ioport_permission */ struct xen_domctl_ioport_permission { uint32_t first_port; /* first port int range */ uint32_t nr_ports; /* size of port range */ uint8_t allow_access; /* allow or deny access to range? */ }; typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t); /* XEN_DOMCTL_hypercall_init */ struct xen_domctl_hypercall_init { uint64_aligned_t gmfn; /* GMFN to be initialised */ }; typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t); /* XEN_DOMCTL_arch_setup */ #define _XEN_DOMAINSETUP_hvm_guest 0 #define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest) #define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */ #define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query) #define _XEN_DOMAINSETUP_sioemu_guest 2 #define XEN_DOMAINSETUP_sioemu_guest (1UL<<_XEN_DOMAINSETUP_sioemu_guest) typedef struct xen_domctl_arch_setup { uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */ #ifdef __ia64__ uint64_aligned_t bp; /* mpaddr of boot param area */ uint64_aligned_t maxmem; /* Highest memory address for MDT. */ uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */ int8_t vhpt_size_log2; /* Log2 of VHPT size. */ #endif } xen_domctl_arch_setup_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t); /* XEN_DOMCTL_settimeoffset */ struct xen_domctl_settimeoffset { int32_t time_offset_seconds; /* applied to domain wallclock time */ }; typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t); /* XEN_DOMCTL_gethvmcontext */ /* XEN_DOMCTL_sethvmcontext */ typedef struct xen_domctl_hvmcontext { uint32_t size; /* IN/OUT: size of buffer / bytes filled */ XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call * gethvmcontext with NULL * buffer to get size req'd */ } xen_domctl_hvmcontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t); /* XEN_DOMCTL_set_address_size */ /* XEN_DOMCTL_get_address_size */ typedef struct xen_domctl_address_size { uint32_t size; } xen_domctl_address_size_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t); /* XEN_DOMCTL_real_mode_area */ struct xen_domctl_real_mode_area { uint32_t log; /* log2 of Real Mode Area size */ }; typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t); /* XEN_DOMCTL_sendtrigger */ #define XEN_DOMCTL_SENDTRIGGER_NMI 0 #define XEN_DOMCTL_SENDTRIGGER_RESET 1 #define XEN_DOMCTL_SENDTRIGGER_INIT 2 #define XEN_DOMCTL_SENDTRIGGER_POWER 3 struct xen_domctl_sendtrigger { uint32_t trigger; /* IN */ uint32_t vcpu; /* IN */ }; typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t); /* Assign PCI device to HVM guest. Sets up IOMMU structures. */ /* XEN_DOMCTL_assign_device */ /* XEN_DOMCTL_test_assign_device */ /* XEN_DOMCTL_deassign_device */ struct xen_domctl_assign_device { uint32_t machine_bdf; /* machine PCI ID of assigned device */ }; typedef struct xen_domctl_assign_device xen_domctl_assign_device_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t); /* Retrieve sibling devices infomation of machine_bdf */ /* XEN_DOMCTL_get_device_group */ struct xen_domctl_get_device_group { uint32_t machine_bdf; /* IN */ uint32_t max_sdevs; /* IN */ uint32_t num_sdevs; /* OUT */ XEN_GUEST_HANDLE_64(uint32) sdev_array; /* OUT */ }; typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t); /* Pass-through interrupts: bind real irq -> hvm devfn. */ /* XEN_DOMCTL_bind_pt_irq */ /* XEN_DOMCTL_unbind_pt_irq */ typedef enum pt_irq_type_e { PT_IRQ_TYPE_PCI, PT_IRQ_TYPE_ISA, PT_IRQ_TYPE_MSI, PT_IRQ_TYPE_MSI_TRANSLATE, } pt_irq_type_t; struct xen_domctl_bind_pt_irq { uint32_t machine_irq; pt_irq_type_t irq_type; uint32_t hvm_domid; union { struct { uint8_t isa_irq; } isa; struct { uint8_t bus; uint8_t device; uint8_t intx; } pci; struct { uint8_t gvec; uint32_t gflags; uint64_aligned_t gtable; } msi; } u; }; typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t); /* Bind machine I/O address range -> HVM address range. */ /* XEN_DOMCTL_memory_mapping */ #define DPCI_ADD_MAPPING 1 #define DPCI_REMOVE_MAPPING 0 struct xen_domctl_memory_mapping { uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */ uint64_aligned_t first_mfn; /* first page (machine page) in range */ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ uint32_t add_mapping; /* add or remove mapping */ uint32_t padding; /* padding for 64-bit aligned structure */ }; typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t); /* Bind machine I/O port range -> HVM I/O port range. */ /* XEN_DOMCTL_ioport_mapping */ struct xen_domctl_ioport_mapping { uint32_t first_gport; /* first guest IO port*/ uint32_t first_mport; /* first machine IO port */ uint32_t nr_ports; /* size of port range */ uint32_t add_mapping; /* add or remove mapping */ }; typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t); /* * Pin caching type of RAM space for x86 HVM domU. */ /* XEN_DOMCTL_pin_mem_cacheattr */ /* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */ #define XEN_DOMCTL_MEM_CACHEATTR_UC 0 #define XEN_DOMCTL_MEM_CACHEATTR_WC 1 #define XEN_DOMCTL_MEM_CACHEATTR_WT 4 #define XEN_DOMCTL_MEM_CACHEATTR_WP 5 #define XEN_DOMCTL_MEM_CACHEATTR_WB 6 #define XEN_DOMCTL_MEM_CACHEATTR_UCM 7 struct xen_domctl_pin_mem_cacheattr { uint64_aligned_t start, end; uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */ }; typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t); /* XEN_DOMCTL_set_ext_vcpucontext */ /* XEN_DOMCTL_get_ext_vcpucontext */ struct xen_domctl_ext_vcpucontext { /* IN: VCPU that this call applies to. */ uint32_t vcpu; /* * SET: Size of struct (IN) * GET: Size of struct (OUT) */ uint32_t size; #if defined(__i386__) || defined(__x86_64__) /* SYSCALL from 32-bit mode and SYSENTER callback information. */ /* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */ uint64_aligned_t syscall32_callback_eip; uint64_aligned_t sysenter_callback_eip; uint16_t syscall32_callback_cs; uint16_t sysenter_callback_cs; uint8_t syscall32_disables_events; uint8_t sysenter_disables_events; #endif }; typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t); /* * Set optimizaton features for a domain */ /* XEN_DOMCTL_set_opt_feature */ struct xen_domctl_set_opt_feature { #if defined(__ia64__) struct xen_ia64_opt_feature optf; #else /* Make struct non-empty: do not depend on this field name! */ uint64_t dummy; #endif }; typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t); /* * Set the target domain for a domain */ /* XEN_DOMCTL_set_target */ struct xen_domctl_set_target { domid_t target; }; typedef struct xen_domctl_set_target xen_domctl_set_target_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t); #if defined(__i386__) || defined(__x86_64__) # define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF /* XEN_DOMCTL_set_cpuid */ struct xen_domctl_cpuid { uint32_t input[2]; uint32_t eax; uint32_t ebx; uint32_t ecx; uint32_t edx; }; typedef struct xen_domctl_cpuid xen_domctl_cpuid_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t); #endif /* XEN_DOMCTL_subscribe */ struct xen_domctl_subscribe { uint32_t port; /* IN */ }; typedef struct xen_domctl_subscribe xen_domctl_subscribe_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t); /* * Define the maximum machine address size which should be allocated * to a guest. */ /* XEN_DOMCTL_set_machine_address_size */ /* XEN_DOMCTL_get_machine_address_size */ /* * Do not inject spurious page faults into this domain. */ /* XEN_DOMCTL_suppress_spurious_page_faults */ /* XEN_DOMCTL_debug_op */ #define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF 0 #define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON 1 struct xen_domctl_debug_op { uint32_t op; /* IN */ uint32_t vcpu; /* IN */ }; typedef struct xen_domctl_debug_op xen_domctl_debug_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_debug_op_t); /* * Request a particular record from the HVM context */ /* XEN_DOMCTL_gethvmcontext_partial */ typedef struct xen_domctl_hvmcontext_partial { uint32_t type; /* IN: Type of record required */ uint32_t instance; /* IN: Instance of that type */ XEN_GUEST_HANDLE_64(uint8) buffer; /* OUT: buffer to write record into */ } xen_domctl_hvmcontext_partial_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_partial_t); /* XEN_DOMCTL_disable_migrate */ typedef struct xen_domctl_disable_migrate { uint32_t disable; /* IN: 1: disable migration and restore */ } xen_domctl_disable_migrate_t; /* XEN_DOMCTL_gettscinfo */ /* XEN_DOMCTL_settscinfo */ struct xen_guest_tsc_info { uint32_t tsc_mode; uint32_t gtsc_khz; uint32_t incarnation; uint32_t pad; uint64_aligned_t elapsed_nsec; }; typedef struct xen_guest_tsc_info xen_guest_tsc_info_t; DEFINE_XEN_GUEST_HANDLE(xen_guest_tsc_info_t); typedef struct xen_domctl_tsc_info { XEN_GUEST_HANDLE_64(xen_guest_tsc_info_t) out_info; /* OUT */ xen_guest_tsc_info_t info; /* IN */ } xen_domctl_tsc_info_t; /* XEN_DOMCTL_gdbsx_guestmemio guest mem io */ struct xen_domctl_gdbsx_memio { /* IN */ uint64_aligned_t pgd3val;/* optional: init_mm.pgd[3] value */ uint64_aligned_t gva; /* guest virtual address */ uint64_aligned_t uva; /* user buffer virtual address */ uint32_t len; /* number of bytes to read/write */ uint8_t gwr; /* 0 = read from guest. 1 = write to guest */ /* OUT */ uint32_t remain; /* bytes remaining to be copied */ }; /* XEN_DOMCTL_gdbsx_pausevcpu */ /* XEN_DOMCTL_gdbsx_unpausevcpu */ struct xen_domctl_gdbsx_pauseunp_vcpu { /* pause/unpause a vcpu */ uint32_t vcpu; /* which vcpu */ }; /* XEN_DOMCTL_gdbsx_domstatus */ struct xen_domctl_gdbsx_domstatus { /* OUT */ uint8_t paused; /* is the domain paused */ uint32_t vcpu_id; /* any vcpu in an event? */ uint32_t vcpu_ev; /* if yes, what event? */ }; /* * Memory event operations */ /* XEN_DOMCTL_mem_event_op */ /* Add and remove memory handlers */ #define XEN_DOMCTL_MEM_EVENT_OP_ENABLE 0 #define XEN_DOMCTL_MEM_EVENT_OP_DISABLE 1 /* * Page memory in and out. */ #define XEN_DOMCTL_MEM_EVENT_OP_PAGING (1 << 0) /* Domain memory paging */ #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE 0 #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT 1 #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP 2 #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME 3 struct xen_domctl_mem_event_op { uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */ uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_ENABLE_* */ /* OP_ENABLE */ uint64_aligned_t shared_addr; /* IN: Virtual address of shared page */ uint64_aligned_t ring_addr; /* IN: Virtual address of ring page */ /* Other OPs */ uint64_aligned_t gfn; /* IN: gfn of page being operated on */ }; typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t); /* * Memory sharing operations */ /* XEN_DOMCTL_mem_sharing_op */ #define XEN_DOMCTL_MEM_SHARING_OP_CONTROL 0 #define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN 1 #define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF 2 #define XEN_DOMCTL_MEM_SHARING_OP_SHARE 3 #define XEN_DOMCTL_MEM_SHARING_OP_RESUME 4 #define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN 5 #define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN 6 #define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF 7 #define XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID (-10) #define XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID (-9) struct xen_domctl_mem_sharing_op { uint8_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */ union { uint8_t enable; /* OP_CONTROL */ struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */ union { uint64_aligned_t gfn; /* IN: gfn to nominate */ uint32_t grant_ref; /* IN: grant ref to nominate */ } u; uint64_aligned_t handle; /* OUT: the handle */ } nominate; struct mem_sharing_op_share { /* OP_SHARE */ uint64_aligned_t source_handle; /* IN: handle to the source page */ uint64_aligned_t client_handle; /* IN: handle to the client page */ } share; struct mem_sharing_op_debug { /* OP_DEBUG_xxx */ union { uint64_aligned_t gfn; /* IN: gfn to debug */ uint64_aligned_t mfn; /* IN: mfn to debug */ grant_ref_t gref; /* IN: gref to debug */ } u; } debug; } u; }; typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t); struct xen_domctl { uint32_t cmd; #define XEN_DOMCTL_createdomain 1 #define XEN_DOMCTL_destroydomain 2 #define XEN_DOMCTL_pausedomain 3 #define XEN_DOMCTL_unpausedomain 4 #define XEN_DOMCTL_getdomaininfo 5 #define XEN_DOMCTL_getmemlist 6 #define XEN_DOMCTL_getpageframeinfo 7 #define XEN_DOMCTL_getpageframeinfo2 8 #define XEN_DOMCTL_setvcpuaffinity 9 #define XEN_DOMCTL_shadow_op 10 #define XEN_DOMCTL_max_mem 11 #define XEN_DOMCTL_setvcpucontext 12 #define XEN_DOMCTL_getvcpucontext 13 #define XEN_DOMCTL_getvcpuinfo 14 #define XEN_DOMCTL_max_vcpus 15 #define XEN_DOMCTL_scheduler_op 16 #define XEN_DOMCTL_setdomainhandle 17 #define XEN_DOMCTL_setdebugging 18 #define XEN_DOMCTL_irq_permission 19 #define XEN_DOMCTL_iomem_permission 20 #define XEN_DOMCTL_ioport_permission 21 #define XEN_DOMCTL_hypercall_init 22 #define XEN_DOMCTL_arch_setup 23 #define XEN_DOMCTL_settimeoffset 24 #define XEN_DOMCTL_getvcpuaffinity 25 #define XEN_DOMCTL_real_mode_area 26 #define XEN_DOMCTL_resumedomain 27 #define XEN_DOMCTL_sendtrigger 28 #define XEN_DOMCTL_subscribe 29 #define XEN_DOMCTL_gethvmcontext 33 #define XEN_DOMCTL_sethvmcontext 34 #define XEN_DOMCTL_set_address_size 35 #define XEN_DOMCTL_get_address_size 36 #define XEN_DOMCTL_assign_device 37 #define XEN_DOMCTL_bind_pt_irq 38 #define XEN_DOMCTL_memory_mapping 39 #define XEN_DOMCTL_ioport_mapping 40 #define XEN_DOMCTL_pin_mem_cacheattr 41 #define XEN_DOMCTL_set_ext_vcpucontext 42 #define XEN_DOMCTL_get_ext_vcpucontext 43 #define XEN_DOMCTL_set_opt_feature 44 #define XEN_DOMCTL_test_assign_device 45 #define XEN_DOMCTL_set_target 46 #define XEN_DOMCTL_deassign_device 47 #define XEN_DOMCTL_unbind_pt_irq 48 #define XEN_DOMCTL_set_cpuid 49 #define XEN_DOMCTL_get_device_group 50 #define XEN_DOMCTL_set_machine_address_size 51 #define XEN_DOMCTL_get_machine_address_size 52 #define XEN_DOMCTL_suppress_spurious_page_faults 53 #define XEN_DOMCTL_debug_op 54 #define XEN_DOMCTL_gethvmcontext_partial 55 #define XEN_DOMCTL_mem_event_op 56 #define XEN_DOMCTL_mem_sharing_op 57 #define XEN_DOMCTL_disable_migrate 58 #define XEN_DOMCTL_gettscinfo 59 #define XEN_DOMCTL_settscinfo 60 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 #define XEN_DOMCTL_gdbsx_domstatus 1003 uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */ domid_t domain; union { struct xen_domctl_createdomain createdomain; struct xen_domctl_getdomaininfo getdomaininfo; struct xen_domctl_getmemlist getmemlist; struct xen_domctl_getpageframeinfo getpageframeinfo; struct xen_domctl_getpageframeinfo2 getpageframeinfo2; struct xen_domctl_vcpuaffinity vcpuaffinity; struct xen_domctl_shadow_op shadow_op; struct xen_domctl_max_mem max_mem; struct xen_domctl_vcpucontext vcpucontext; struct xen_domctl_getvcpuinfo getvcpuinfo; struct xen_domctl_max_vcpus max_vcpus; struct xen_domctl_scheduler_op scheduler_op; struct xen_domctl_setdomainhandle setdomainhandle; struct xen_domctl_setdebugging setdebugging; struct xen_domctl_irq_permission irq_permission; struct xen_domctl_iomem_permission iomem_permission; struct xen_domctl_ioport_permission ioport_permission; struct xen_domctl_hypercall_init hypercall_init; struct xen_domctl_arch_setup arch_setup; struct xen_domctl_settimeoffset settimeoffset; struct xen_domctl_disable_migrate disable_migrate; struct xen_domctl_tsc_info tsc_info; struct xen_domctl_real_mode_area real_mode_area; struct xen_domctl_hvmcontext hvmcontext; struct xen_domctl_hvmcontext_partial hvmcontext_partial; struct xen_domctl_address_size address_size; struct xen_domctl_sendtrigger sendtrigger; struct xen_domctl_get_device_group get_device_group; struct xen_domctl_assign_device assign_device; struct xen_domctl_bind_pt_irq bind_pt_irq; struct xen_domctl_memory_mapping memory_mapping; struct xen_domctl_ioport_mapping ioport_mapping; struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr; struct xen_domctl_ext_vcpucontext ext_vcpucontext; struct xen_domctl_set_opt_feature set_opt_feature; struct xen_domctl_set_target set_target; struct xen_domctl_subscribe subscribe; struct xen_domctl_debug_op debug_op; struct xen_domctl_mem_event_op mem_event_op; struct xen_domctl_mem_sharing_op mem_sharing_op; #if defined(__i386__) || defined(__x86_64__) struct xen_domctl_cpuid cpuid; #endif struct xen_domctl_gdbsx_memio gdbsx_guest_memio; struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu; struct xen_domctl_gdbsx_domstatus gdbsx_domstatus; uint8_t pad[128]; } u; }; typedef struct xen_domctl xen_domctl_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_t); #endif /* __XEN_PUBLIC_DOMCTL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/elfnote.h000066400000000000000000000165011314037446600261470ustar00rootroot00000000000000/****************************************************************************** * elfnote.h * * Definitions used for the Xen ELF notes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell, XenSource Ltd. */ #ifndef __XEN_PUBLIC_ELFNOTE_H__ #define __XEN_PUBLIC_ELFNOTE_H__ /* * The notes should live in a PT_NOTE segment and have "Xen" in the * name field. * * Numeric types are either 4 or 8 bytes depending on the content of * the desc field. * * LEGACY indicated the fields in the legacy __xen_guest string which * this a note type replaces. */ /* * NAME=VALUE pair (string). */ #define XEN_ELFNOTE_INFO 0 /* * The virtual address of the entry point (numeric). * * LEGACY: VIRT_ENTRY */ #define XEN_ELFNOTE_ENTRY 1 /* The virtual address of the hypercall transfer page (numeric). * * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page * number not a virtual address) */ #define XEN_ELFNOTE_HYPERCALL_PAGE 2 /* The virtual address where the kernel image should be mapped (numeric). * * Defaults to 0. * * LEGACY: VIRT_BASE */ #define XEN_ELFNOTE_VIRT_BASE 3 /* * The offset of the ELF paddr field from the acutal required * psuedo-physical address (numeric). * * This is used to maintain backwards compatibility with older kernels * which wrote __PAGE_OFFSET into that field. This field defaults to 0 * if not present. * * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE) */ #define XEN_ELFNOTE_PADDR_OFFSET 4 /* * The version of Xen that we work with (string). * * LEGACY: XEN_VER */ #define XEN_ELFNOTE_XEN_VERSION 5 /* * The name of the guest operating system (string). * * LEGACY: GUEST_OS */ #define XEN_ELFNOTE_GUEST_OS 6 /* * The version of the guest operating system (string). * * LEGACY: GUEST_VER */ #define XEN_ELFNOTE_GUEST_VERSION 7 /* * The loader type (string). * * LEGACY: LOADER */ #define XEN_ELFNOTE_LOADER 8 /* * The kernel supports PAE (x86/32 only, string = "yes", "no" or * "bimodal"). * * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting * may be given as "yes,bimodal" which will cause older Xen to treat * this kernel as PAE. * * LEGACY: PAE (n.b. The legacy interface included a provision to * indicate 'extended-cr3' support allowing L3 page tables to be * placed above 4G. It is assumed that any kernel new enough to use * these ELF notes will include this and therefore "yes" here is * equivalent to "yes[entended-cr3]" in the __xen_guest interface. */ #define XEN_ELFNOTE_PAE_MODE 9 /* * The features supported/required by this kernel (string). * * The string must consist of a list of feature names (as given in * features.h, without the "XENFEAT_" prefix) separated by '|' * characters. If a feature is required for the kernel to function * then the feature name must be preceded by a '!' character. * * LEGACY: FEATURES */ #define XEN_ELFNOTE_FEATURES 10 /* * The kernel requires the symbol table to be loaded (string = "yes" or "no") * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence * of this string as a boolean flag rather than requiring "yes" or * "no". */ #define XEN_ELFNOTE_BSD_SYMTAB 11 /* * The lowest address the hypervisor hole can begin at (numeric). * * This must not be set higher than HYPERVISOR_VIRT_START. Its presence * also indicates to the hypervisor that the kernel can deal with the * hole starting at a higher address. */ #define XEN_ELFNOTE_HV_START_LOW 12 /* * List of maddr_t-sized mask/value pairs describing how to recognize * (non-present) L1 page table entries carrying valid MFNs (numeric). */ #define XEN_ELFNOTE_L1_MFN_VALID 13 /* * Whether or not the guest supports cooperative suspend cancellation. */ #define XEN_ELFNOTE_SUSPEND_CANCEL 14 /* * The (non-default) location the initial phys-to-machine map should be * placed at by the hypervisor (Dom0) or the tools (DomU). * The kernel must be prepared for this mapping to be established using * large pages, despite such otherwise not being available to guests. * The kernel must also be able to handle the page table pages used for * this mapping not being accessible through the initial mapping. * (Only x86-64 supports this at present.) */ #define XEN_ELFNOTE_INIT_P2M 15 /* * The number of the highest elfnote defined. */ #define XEN_ELFNOTE_MAX XEN_ELFNOTE_INIT_P2M /* * System information exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO * note in case of a system crash. This note will contain various * information about the system, see xen/include/xen/elfcore.h. */ #define XEN_ELFNOTE_CRASH_INFO 0x1000001 /* * System registers exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS * note per cpu in case of a system crash. This note is architecture * specific and will contain registers not saved in the "CORE" note. * See xen/include/xen/elfcore.h for more information. */ #define XEN_ELFNOTE_CRASH_REGS 0x1000002 /* * xen dump-core none note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE * in its dump file to indicate that the file is xen dump-core * file. This note doesn't have any other information. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000 /* * xen dump-core header note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER * in its dump file. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001 /* * xen dump-core xen version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION * in its dump file. It contains the xen version obtained via the * XENVER hypercall. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002 /* * xen dump-core format version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION * in its dump file. It contains a format version identifier. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003 #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/event_channel.h000066400000000000000000000212271314037446600273250ustar00rootroot00000000000000/****************************************************************************** * event_channel.h * * Event channels between domains. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, K A Fraser. */ #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ #define __XEN_PUBLIC_EVENT_CHANNEL_H__ #include /* * Prototype for this hypercall is: * int event_channel_op(int cmd, void *args) * @cmd == EVTCHNOP_??? (event-channel operation). * @args == Operation-specific extra arguments (NULL if none). */ typedef uint32_t evtchn_port_t; DEFINE_XEN_GUEST_HANDLE(evtchn_port_t); /* * EVTCHNOP_alloc_unbound: Allocate a port in domain and mark as * accepting interdomain bindings from domain . A fresh port * is allocated in and returned as . * NOTES: * 1. If the caller is unprivileged then must be DOMID_SELF. * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_alloc_unbound 6 struct evtchn_alloc_unbound { /* IN parameters */ domid_t dom, remote_dom; /* OUT parameters */ evtchn_port_t port; }; typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; /* * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between * the calling domain and . must identify * a port that is unbound and marked as accepting bindings from the calling * domain. A fresh port is allocated in the calling domain and returned as * . * NOTES: * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_bind_interdomain 0 struct evtchn_bind_interdomain { /* IN parameters. */ domid_t remote_dom; evtchn_port_t remote_port; /* OUT parameters. */ evtchn_port_t local_port; }; typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t; /* * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ on specified * vcpu. * NOTES: * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list * in xen.h for the classification of each VIRQ. * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be * re-bound via EVTCHNOP_bind_vcpu. * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. * The allocated event channel is bound to the specified vcpu and the * binding cannot be changed. */ #define EVTCHNOP_bind_virq 1 struct evtchn_bind_virq { /* IN parameters. */ uint32_t virq; uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_virq evtchn_bind_virq_t; /* * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ . * NOTES: * 1. A physical IRQ may be bound to at most one event channel per domain. * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. */ #define EVTCHNOP_bind_pirq 2 struct evtchn_bind_pirq { /* IN parameters. */ uint32_t pirq; #define BIND_PIRQ__WILL_SHARE 1 uint32_t flags; /* BIND_PIRQ__* */ /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_pirq evtchn_bind_pirq_t; /* * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. * NOTES: * 1. The allocated event channel is bound to the specified vcpu. The binding * may not be changed. */ #define EVTCHNOP_bind_ipi 7 struct evtchn_bind_ipi { uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_ipi evtchn_bind_ipi_t; /* * EVTCHNOP_close: Close a local event channel . If the channel is * interdomain then the remote end is placed in the unbound state * (EVTCHNSTAT_unbound), awaiting a new connection. */ #define EVTCHNOP_close 3 struct evtchn_close { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_close evtchn_close_t; /* * EVTCHNOP_send: Send an event to the remote end of the channel whose local * endpoint is . */ #define EVTCHNOP_send 4 struct evtchn_send { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_send evtchn_send_t; /* * EVTCHNOP_status: Get the current status of the communication channel which * has an endpoint at . * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may obtain the status of an event * channel for which is not DOMID_SELF. */ #define EVTCHNOP_status 5 struct evtchn_status { /* IN parameters */ domid_t dom; evtchn_port_t port; /* OUT parameters */ #define EVTCHNSTAT_closed 0 /* Channel is not in use. */ #define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ #define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ uint32_t status; uint32_t vcpu; /* VCPU to which this channel is bound. */ union { struct { domid_t dom; } unbound; /* EVTCHNSTAT_unbound */ struct { domid_t dom; evtchn_port_t port; } interdomain; /* EVTCHNSTAT_interdomain */ uint32_t pirq; /* EVTCHNSTAT_pirq */ uint32_t virq; /* EVTCHNSTAT_virq */ } u; }; typedef struct evtchn_status evtchn_status_t; /* * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an * event is pending. * NOTES: * 1. IPI-bound channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 3. All other channels notify vcpu0 by default. This default is set when * the channel is allocated (a port that is freed and subsequently reused * has its binding reset to vcpu0). */ #define EVTCHNOP_bind_vcpu 8 struct evtchn_bind_vcpu { /* IN parameters. */ evtchn_port_t port; uint32_t vcpu; }; typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t; /* * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver * a notification to the appropriate VCPU if an event is pending. */ #define EVTCHNOP_unmask 9 struct evtchn_unmask { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_unmask evtchn_unmask_t; /* * EVTCHNOP_reset: Close all event channels associated with specified domain. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. */ #define EVTCHNOP_reset 10 struct evtchn_reset { /* IN parameters. */ domid_t dom; }; typedef struct evtchn_reset evtchn_reset_t; /* * Argument to event_channel_op_compat() hypercall. Superceded by new * event_channel_op() hypercall since 0x00030202. */ struct evtchn_op { uint32_t cmd; /* EVTCHNOP_* */ union { struct evtchn_alloc_unbound alloc_unbound; struct evtchn_bind_interdomain bind_interdomain; struct evtchn_bind_virq bind_virq; struct evtchn_bind_pirq bind_pirq; struct evtchn_bind_ipi bind_ipi; struct evtchn_close close; struct evtchn_send send; struct evtchn_status status; struct evtchn_bind_vcpu bind_vcpu; struct evtchn_unmask unmask; } u; }; DEFINE_GUEST_HANDLE_STRUCT(evtchn_op); typedef struct evtchn_op evtchn_op_t; DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/features.h000066400000000000000000000053701314037446600263330ustar00rootroot00000000000000/****************************************************************************** * features.h * * Feature flags, reported by XENVER_get_features. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Keir Fraser */ #ifndef __XEN_PUBLIC_FEATURES_H__ #define __XEN_PUBLIC_FEATURES_H__ /* * If set, the guest does not need to write-protect its pagetables, and can * update them via direct writes. */ #define XENFEAT_writable_page_tables 0 /* * If set, the guest does not need to write-protect its segment descriptor * tables, and can update them via direct writes. */ #define XENFEAT_writable_descriptor_tables 1 /* * If set, translation between the guest's 'pseudo-physical' address space * and the host's machine address space are handled by the hypervisor. In this * mode the guest does not need to perform phys-to/from-machine translations * when performing page table operations. */ #define XENFEAT_auto_translated_physmap 2 /* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ #define XENFEAT_supervisor_mode_kernel 3 /* * If set, the guest does not need to allocate x86 PAE page directories * below 4GB. This flag is usually implied by auto_translated_physmap. */ #define XENFEAT_pae_pgdir_above_4gb 4 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ #define XENFEAT_mmu_pt_update_preserve_ad 5 /* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */ #define XENFEAT_highmem_assist 6 /* * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel * available pte bits. */ #define XENFEAT_gnttab_map_avail_bits 7 #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/grant_table.h000066400000000000000000000534771314037446600270120ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ #define __XEN_PUBLIC_GRANT_TABLE_H__ #include "xen.h" /*********************************** * GRANT TABLE REPRESENTATION */ /* Some rough guidelines on accessing and updating grant-table entries * in a concurrency-safe manner. For more information, Linux contains a * reference implementation for guest OSes (arch/xen/kernel/grant_table.c). * * NB. WMB is a no-op on current-generation x86 processors. However, a * compiler barrier will still be required. * * Introducing a valid entry into the grant table: * 1. Write ent->domid. * 2. Write ent->frame: * GTF_permit_access: Frame to which access is permitted. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new * frame, or zero if none. * 3. Write memory barrier (WMB). * 4. Write ent->flags, inc. valid type. * * Invalidating an unused GTF_permit_access entry: * 1. flags = ent->flags. * 2. Observe that !(flags & (GTF_reading|GTF_writing)). * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * * Invalidating an in-use GTF_permit_access entry: * This cannot be done directly. Request assistance from the domain controller * which can set a timeout on the use of a grant entry and take necessary * action. (NB. This is not yet implemented!). * * Invalidating an unused GTF_accept_transfer entry: * 1. flags = ent->flags. * 2. Observe that !(flags & GTF_transfer_committed). [*] * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. * The guest must /not/ modify the grant entry until the address of the * transferred frame is written. It is safe for the guest to spin waiting * for this to occur (detect by observing GTF_transfer_completed in * ent->flags). * * Invalidating a committed GTF_accept_transfer entry: * 1. Wait for (ent->flags & GTF_transfer_completed). * * Changing a GTF_permit_access from writable to read-only: * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. * * Changing a GTF_permit_access from read-only to writable: * Use SMP-safe bit-setting instruction. */ /* * Reference to a grant entry in a specified domain's grant table. */ typedef uint32_t grant_ref_t; /* * A grant table comprises a packed array of grant entries in one or more * page frames shared between Xen and a guest. * [XEN]: This field is written by Xen and read by the sharing guest. * [GST]: This field is written by the guest and read by Xen. */ /* * Version 1 of the grant table entry structure is maintained purely * for backwards compatibility. New guests should use version 2. */ #if __XEN_INTERFACE_VERSION__ < 0x0003020a #define grant_entry_v1 grant_entry #define grant_entry_v1_t grant_entry_t #endif struct grant_entry_v1 { /* GTF_xxx: various type and flag information. [XEN,GST] */ uint16_t flags; /* The domain being granted foreign privileges. [GST] */ domid_t domid; /* * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] */ uint32_t frame; }; typedef struct grant_entry_v1 grant_entry_v1_t; /* * Type of grant entry. * GTF_invalid: This grant entry grants no privileges. * GTF_permit_access: Allow @domid to map/access @frame. * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame * to this guest. Xen writes the page number to @frame. * GTF_transitive: Allow @domid to transitively access a subrange of * @trans_grant in @trans_domid. No mappings are allowed. */ #define GTF_invalid (0U<<0) #define GTF_permit_access (1U<<0) #define GTF_accept_transfer (2U<<0) #define GTF_transitive (3U<<0) #define GTF_type_mask (3U<<0) /* * Subflags for GTF_permit_access. * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST] * GTF_sub_page: Grant access to only a subrange of the page. @domid * will only be allowed to copy from the grant, and not * map it. [GST] */ #define _GTF_readonly (2) #define GTF_readonly (1U<<_GTF_readonly) #define _GTF_reading (3) #define GTF_reading (1U<<_GTF_reading) #define _GTF_writing (4) #define GTF_writing (1U<<_GTF_writing) #define _GTF_PWT (5) #define GTF_PWT (1U<<_GTF_PWT) #define _GTF_PCD (6) #define GTF_PCD (1U<<_GTF_PCD) #define _GTF_PAT (7) #define GTF_PAT (1U<<_GTF_PAT) #define _GTF_sub_page (8) #define GTF_sub_page (1U<<_GTF_sub_page) /* * Subflags for GTF_accept_transfer: * GTF_transfer_committed: Xen sets this flag to indicate that it is committed * to transferring ownership of a page frame. When a guest sees this flag * it must /not/ modify the grant entry until GTF_transfer_completed is * set by Xen. * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag * after reading GTF_transfer_committed. Xen will always write the frame * address, followed by ORing this flag, in a timely manner. */ #define _GTF_transfer_committed (2) #define GTF_transfer_committed (1U<<_GTF_transfer_committed) #define _GTF_transfer_completed (3) #define GTF_transfer_completed (1U<<_GTF_transfer_completed) /* * Version 2 grant table entries. These fulfil the same role as * version 1 entries, but can represent more complicated operations. * Any given domain will have either a version 1 or a version 2 table, * and every entry in the table will be the same version. * * The interface by which domains use grant references does not depend * on the grant table version in use by the other domain. */ #if __XEN_INTERFACE_VERSION__ >= 0x0003020a /* * Version 1 and version 2 grant entries share a common prefix. The * fields of the prefix are documented as part of struct * grant_entry_v1. */ struct grant_entry_header { uint16_t flags; domid_t domid; }; typedef struct grant_entry_header grant_entry_header_t; /* * Version 2 of the grant entry structure. */ union grant_entry_v2 { grant_entry_header_t hdr; /* * This member is used for V1-style full page grants, where either: * * -- hdr.type is GTF_accept_transfer, or * -- hdr.type is GTF_permit_access and GTF_sub_page is not set. * * In that case, the frame field has the same semantics as the * field of the same name in the V1 entry structure. */ struct { grant_entry_header_t hdr; uint32_t pad0; uint64_t frame; } full_page; /* * If the grant type is GTF_grant_access and GTF_sub_page is set, * @domid is allowed to access bytes [@page_off,@page_off+@length) * in frame @frame. */ struct { grant_entry_header_t hdr; uint16_t page_off; uint16_t length; uint64_t frame; } sub_page; /* * If the grant is GTF_transitive, @domid is allowed to use the * grant @gref in domain @trans_domid, as if it was the local * domain. Obviously, the transitive access must be compatible * with the original grant. * * The current version of Xen does not allow transitive grants * to be mapped. */ struct { grant_entry_header_t hdr; domid_t trans_domid; uint16_t pad0; grant_ref_t gref; } transitive; uint32_t __spacer[4]; /* Pad to a power of two */ }; typedef union grant_entry_v2 grant_entry_v2_t; typedef uint16_t grant_status_t; #endif /* __XEN_INTERFACE_VERSION__ */ /*********************************** * GRANT TABLE QUERIES AND USES */ /* * Handle to track a mapping created via a grant reference. */ typedef uint32_t grant_handle_t; /* * GNTTABOP_map_grant_ref: Map the grant entry (,) for access * by devices and/or host CPUs. If successful, is a tracking number * that must be presented later to destroy the mapping(s). On error, * is a negative status code. * NOTES: * 1. If GNTMAP_device_map is specified then is the address * via which I/O devices may access the granted frame. * 2. If GNTMAP_host_map is specified then a mapping will be added at * either a host virtual address in the current address space, or at * a PTE at the specified machine address. The type of mapping to * perform is selected through the GNTMAP_contains_pte flag, and the * address is specified in . * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a * host mapping is destroyed by other means then it is *NOT* guaranteed * to be accounted to the correct grant reference! */ #define GNTTABOP_map_grant_ref 0 struct gnttab_map_grant_ref { /* IN parameters. */ uint64_t host_addr; uint32_t flags; /* GNTMAP_* */ grant_ref_t ref; domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ grant_handle_t handle; uint64_t dev_bus_addr; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref); typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t); /* * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings * tracked by . If or is zero, that * field is ignored. If non-zero, they must refer to a device/host mapping * that is tracked by * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 3. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_grant_ref 1 struct gnttab_unmap_grant_ref { /* IN parameters. */ uint64_t host_addr; uint64_t dev_bus_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref); typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t); /* * GNTTABOP_setup_table: Set up a grant table for comprising at least * pages. The frame addresses are written to the . * Only addresses are written, even if the table is larger. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. * 3. Xen may not support more than a single grant-table page per domain. */ #define GNTTABOP_setup_table 2 struct gnttab_setup_table { /* IN parameters. */ domid_t dom; uint32_t nr_frames; /* OUT parameters. */ int16_t status; /* GNTST_* */ XEN_GUEST_HANDLE(ulong) frame_list; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table); typedef struct gnttab_setup_table gnttab_setup_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t); /* * GNTTABOP_dump_table: Dump the contents of the grant table to the * xen console. Debugging use only. */ #define GNTTABOP_dump_table 3 struct gnttab_dump_table { /* IN parameters. */ domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table); typedef struct gnttab_dump_table gnttab_dump_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t); /* * GNTTABOP_transfer_grant_ref: Transfer to a foreign domain. The * foreign domain has previously registered its interest in the transfer via * . * * Note that, even if the transfer fails, the specified page no longer belongs * to the calling domain *unless* the error is GNTST_bad_page. */ #define GNTTABOP_transfer 4 struct gnttab_transfer { /* IN parameters. */ xen_pfn_t mfn; domid_t domid; grant_ref_t ref; /* OUT parameters. */ int16_t status; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer); typedef struct gnttab_transfer gnttab_transfer_t; DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t); /* * GNTTABOP_copy: Hypervisor based copy * source and destinations can be eithers MFNs or, for foreign domains, * grant references. the foreign domain has to grant read/write access * in its grant table. * * The flags specify what type source and destinations are (either MFN * or grant reference). * * Note that this can also be used to copy data between two domains * via a third party if the source and destination domains had previously * grant appropriate access to their pages to the third party. * * source_offset specifies an offset in the source frame, dest_offset * the offset in the target frame and len specifies the number of * bytes to be copied. */ #define _GNTCOPY_source_gref (0) #define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) #define _GNTCOPY_dest_gref (1) #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) #define _GNTCOPY_can_fail (2) #define GNTCOPY_can_fail (1<<_GNTCOPY_can_fail) #define GNTTABOP_copy 5 typedef struct gnttab_copy { /* IN parameters. */ struct { union { grant_ref_t ref; xen_pfn_t gmfn; } u; domid_t domid; uint16_t offset; } source, dest; uint16_t len; uint16_t flags; /* GNTCOPY_* */ /* OUT parameters. */ int16_t status; } gnttab_copy_t; DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy); DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t); /* * GNTTABOP_query_size: Query the current and maximum sizes of the shared * grant table. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. */ #define GNTTABOP_query_size 6 struct gnttab_query_size { /* IN parameters. */ domid_t dom; /* OUT parameters. */ uint32_t nr_frames; uint32_t max_nr_frames; int16_t status; /* GNTST_* */ }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size); typedef struct gnttab_query_size gnttab_query_size_t; DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t); /* * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings * tracked by but atomically replace the page table entry with one * pointing to the machine address under . will be * redirected to the null entry. * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 2. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_and_replace 7 struct gnttab_unmap_and_replace { /* IN parameters. */ uint64_t host_addr; uint64_t new_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t); #if __XEN_INTERFACE_VERSION__ >= 0x0003020a /* * GNTTABOP_set_version: Request a particular version of the grant * table shared table structure. This operation can only be performed * once in any given domain. It must be performed before any grants * are activated; otherwise, the domain will be stuck with version 1. * The only defined versions are 1 and 2. */ #define GNTTABOP_set_version 8 struct gnttab_set_version { /* IN/OUT parameters */ uint32_t version; }; typedef struct gnttab_set_version gnttab_set_version_t; DEFINE_XEN_GUEST_HANDLE(gnttab_set_version_t); /* * GNTTABOP_get_status_frames: Get the list of frames used to store grant * status for . In grant format version 2, the status is separated * from the other shared grant fields to allow more efficient synchronization * using barriers instead of atomic cmpexch operations. * specify the size of vector . * The frame addresses are returned in the . * Only addresses are returned, even if the table is larger. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. */ #define GNTTABOP_get_status_frames 9 struct gnttab_get_status_frames { /* IN parameters. */ uint32_t nr_frames; domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ XEN_GUEST_HANDLE(uint64_t) frame_list; }; typedef struct gnttab_get_status_frames gnttab_get_status_frames_t; DEFINE_XEN_GUEST_HANDLE(gnttab_get_status_frames_t); /* * GNTTABOP_get_version: Get the grant table version which is in * effect for domain . */ #define GNTTABOP_get_version 10 struct gnttab_get_version { /* IN parameters */ domid_t dom; uint16_t pad; /* OUT parameters */ uint32_t version; }; typedef struct gnttab_get_version gnttab_get_version_t; DEFINE_XEN_GUEST_HANDLE(gnttab_get_version_t); #endif /* __XEN_INTERFACE_VERSION__ */ /* * Bitfield values for gnttab_map_grant_ref.flags. */ /* Map the grant entry for access by I/O devices. */ #define _GNTMAP_device_map (0) #define GNTMAP_device_map (1<<_GNTMAP_device_map) /* Map the grant entry for access by host CPUs. */ #define _GNTMAP_host_map (1) #define GNTMAP_host_map (1<<_GNTMAP_host_map) /* Accesses to the granted frame will be restricted to read-only access. */ #define _GNTMAP_readonly (2) #define GNTMAP_readonly (1<<_GNTMAP_readonly) /* * GNTMAP_host_map subflag: * 0 => The host mapping is usable only by the guest OS. * 1 => The host mapping is usable by guest OS + current application. */ #define _GNTMAP_application_map (3) #define GNTMAP_application_map (1<<_GNTMAP_application_map) /* * GNTMAP_contains_pte subflag: * 0 => This map request contains a host virtual address. * 1 => This map request contains the machine addess of the PTE to update. */ #define _GNTMAP_contains_pte (4) #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) #define _GNTMAP_can_fail (5) #define GNTMAP_can_fail (1<<_GNTMAP_can_fail) /* * Bits to be placed in guest kernel available PTE bits (architecture * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set). */ #define _GNTMAP_guest_avail0 (16) #define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0) /* * Values for error status returns. All errors are -ve. */ #define GNTST_okay (0) /* Normal return. */ #define GNTST_general_error (-1) /* General undefined error. */ #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ #define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ #define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ #define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ #define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ #define GNTST_address_too_big (-11) /* transfer page address too large. */ #define GNTST_eagain (-12) /* Could not map at the moment. Retry. */ #define GNTTABOP_error_msgs { \ "okay", \ "undefined error", \ "unrecognised domain id", \ "invalid grant reference", \ "invalid mapping handle", \ "invalid virtual address", \ "invalid device address", \ "no spare translation slot in the I/O MMU", \ "permission denied", \ "bad page", \ "copy arguments cross page boundary", \ "page address size too large", \ "could not map at the moment, retry" \ } #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/hvm/000077500000000000000000000000001314037446600251315ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/hvm/e820.h000066400000000000000000000030061314037446600257570ustar00rootroot00000000000000 /* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_E820_H__ #define __XEN_PUBLIC_HVM_E820_H__ /* E820 location in HVM virtual address space. */ #define HVM_E820_PAGE 0x00090000 #define HVM_E820_NR_OFFSET 0x000001E8 #define HVM_E820_OFFSET 0x000002D0 #define HVM_BELOW_4G_RAM_END 0xF0000000 #define HVM_BELOW_4G_MMIO_START HVM_BELOW_4G_RAM_END #define HVM_BELOW_4G_MMIO_LENGTH ((1ULL << 32) - HVM_BELOW_4G_MMIO_START) #endif /* __XEN_PUBLIC_HVM_E820_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/hvm/hvm_info_table.h000066400000000000000000000052611314037446600302620ustar00rootroot00000000000000/****************************************************************************** * hvm/hvm_info_table.h * * HVM parameter and information table, written into guest memory map. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define HVM_INFO_PFN 0x09F #define HVM_INFO_OFFSET 0x800 #define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET) /* Maximum we can support with current vLAPIC ID mapping. */ #define HVM_MAX_VCPUS 128 struct hvm_info_table { char signature[8]; /* "HVM INFO" */ uint32_t length; uint8_t checksum; /* Should firmware build ACPI tables? */ uint8_t acpi_enabled; /* Should firmware build APIC descriptors (APIC MADT / MP BIOS)? */ uint8_t apic_mode; /* How many CPUs does this domain have? */ uint32_t nr_vcpus; /* * MEMORY MAP provided by HVM domain builder. * Notes: * 1. page_to_phys(x) = x << 12 * 2. If a field is zero, the corresponding range does not exist. */ /* * 0x0 to page_to_phys(low_mem_pgend)-1: * RAM below 4GB (except for VGA hole 0xA0000-0xBFFFF) */ uint32_t low_mem_pgend; /* * page_to_phys(reserved_mem_pgstart) to 0xFFFFFFFF: * Reserved for special memory mappings */ uint32_t reserved_mem_pgstart; /* * 0x100000000 to page_to_phys(high_mem_pgend)-1: * RAM above 4GB */ uint32_t high_mem_pgend; /* Bitmap of which CPUs are online at boot time. */ uint8_t vcpu_online[HVM_MAX_VCPUS/8]; }; #endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/hvm/hvm_op.h000066400000000000000000000112771314037446600266020ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ #define __XEN_PUBLIC_HVM_HVM_OP_H__ #include "../xen.h" /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */ #define HVMOP_set_param 0 #define HVMOP_get_param 1 struct xen_hvm_param { domid_t domid; /* IN */ uint32_t index; /* IN */ uint64_t value; /* IN/OUT */ }; typedef struct xen_hvm_param xen_hvm_param_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); /* Set the logical level of one of a domain's PCI INTx wires. */ #define HVMOP_set_pci_intx_level 2 struct xen_hvm_set_pci_intx_level { /* Domain to be updated. */ domid_t domid; /* PCI INTx identification in PCI topology (domain:bus:device:intx). */ uint8_t domain, bus, device, intx; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t); /* Set the logical level of one of a domain's ISA IRQ wires. */ #define HVMOP_set_isa_irq_level 3 struct xen_hvm_set_isa_irq_level { /* Domain to be updated. */ domid_t domid; /* ISA device identification, by ISA IRQ (0-15). */ uint8_t isa_irq; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t); #define HVMOP_set_pci_link_route 4 struct xen_hvm_set_pci_link_route { /* Domain to be updated. */ domid_t domid; /* PCI link identifier (0-3). */ uint8_t link; /* ISA IRQ (1-15), or 0 (disable link). */ uint8_t isa_irq; }; typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); /* Flushes all VCPU TLBs: @arg must be NULL. */ #define HVMOP_flush_tlbs 5 /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Track dirty VRAM. */ #define HVMOP_track_dirty_vram 6 struct xen_hvm_track_dirty_vram { /* Domain to be tracked. */ domid_t domid; /* First pfn to track. */ uint64_aligned_t first_pfn; /* Number of pages to track. */ uint64_aligned_t nr; /* OUT variable. */ /* Dirty bitmap buffer. */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; }; typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t); /* Notify that some pages got modified by the Device Model. */ #define HVMOP_modified_memory 7 struct xen_hvm_modified_memory { /* Domain to be updated. */ domid_t domid; /* First pfn. */ uint64_aligned_t first_pfn; /* Number of pages. */ uint64_aligned_t nr; }; typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); #define HVMOP_set_mem_type 8 typedef enum { HVMMEM_ram_rw, /* Normal read/write guest RAM */ HVMMEM_ram_ro, /* Read-only; writes are discarded */ HVMMEM_mmio_dm, /* Reads and write go to the device model */ } hvmmem_type_t; /* Notify that a region of memory is to be treated in a specific way. */ struct xen_hvm_set_mem_type { /* Domain to be updated. */ domid_t domid; /* Memory type */ hvmmem_type_t hvmmem_type; /* First pfn. */ uint64_aligned_t first_pfn; /* Number of pages. */ uint64_aligned_t nr; }; typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t); #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/hvm/ioreq.h000066400000000000000000000100451314037446600264210ustar00rootroot00000000000000/* * ioreq.h: I/O request definitions for device models * Copyright (c) 2004, Intel Corporation. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef _IOREQ_H_ #define _IOREQ_H_ #define IOREQ_READ 1 #define IOREQ_WRITE 0 #define STATE_IOREQ_NONE 0 #define STATE_IOREQ_READY 1 #define STATE_IOREQ_INPROCESS 2 #define STATE_IORESP_READY 3 #define IOREQ_TYPE_PIO 0 /* pio */ #define IOREQ_TYPE_COPY 1 /* mmio ops */ #define IOREQ_TYPE_TIMEOFFSET 7 #define IOREQ_TYPE_INVALIDATE 8 /* mapcache */ /* * VMExit dispatcher should cooperate with instruction decoder to * prepare this structure and notify service OS and DM by sending * virq */ struct ioreq { uint64_t addr; /* physical address */ uint64_t data; /* data (or paddr of data) */ uint32_t count; /* for rep prefixes */ uint32_t size; /* size in bytes */ uint32_t vp_eport; /* evtchn for notifications to/from device model */ uint16_t _pad0; uint8_t state:4; uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr * of the real data to use. */ uint8_t dir:1; /* 1=read, 0=write */ uint8_t df:1; uint8_t _pad1:1; uint8_t type; /* I/O type */ }; typedef struct ioreq ioreq_t; struct shared_iopage { struct ioreq vcpu_ioreq[1]; }; typedef struct shared_iopage shared_iopage_t; struct buf_ioreq { uint8_t type; /* I/O type */ uint8_t pad:1; uint8_t dir:1; /* 1=read, 0=write */ uint8_t size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */ uint32_t addr:20;/* physical address */ uint32_t data; /* data */ }; typedef struct buf_ioreq buf_ioreq_t; #define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */ struct buffered_iopage { unsigned int read_pointer; unsigned int write_pointer; buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM]; }; /* NB. Size of this structure must be no greater than one page. */ typedef struct buffered_iopage buffered_iopage_t; #if defined(__ia64__) struct pio_buffer { uint32_t page_offset; uint32_t pointer; uint32_t data_end; uint32_t buf_size; void *opaque; }; #define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */ #define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */ #define PIO_BUFFER_ENTRY_NUM 2 struct buffered_piopage { struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM]; uint8_t buffer[1]; }; #endif /* defined(__ia64__) */ #define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40 #define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04) #define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08) #define ACPI_GPE0_BLK_ADDRESS (ACPI_PM_TMR_BLK_ADDRESS + 0x20) #define ACPI_GPE0_BLK_LEN 0x08 #endif /* _IOREQ_H_ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/hvm/params.h000066400000000000000000000076341314037446600265770ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_PARAMS_H__ #define __XEN_PUBLIC_HVM_PARAMS_H__ #include "hvm_op.h" /* * Parameter space for HVMOP_{set,get}_param. */ /* * How should CPU0 event-channel notifications be delivered? * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: * Domain = val[47:32], Bus = val[31:16], * DevFn = val[15: 8], IntX = val[ 1: 0] * If val == 0 then CPU0 event-channel notifications are not delivered. */ #define HVM_PARAM_CALLBACK_IRQ 0 /* * These are not used by Xen. They are here for convenience of HVM-guest * xenbus implementations. */ #define HVM_PARAM_STORE_PFN 1 #define HVM_PARAM_STORE_EVTCHN 2 #define HVM_PARAM_PAE_ENABLED 4 #define HVM_PARAM_IOREQ_PFN 5 #define HVM_PARAM_BUFIOREQ_PFN 6 #ifdef __ia64__ #define HVM_PARAM_NVRAM_FD 7 #define HVM_PARAM_VHPT_SIZE 8 #define HVM_PARAM_BUFPIOREQ_PFN 9 #elif defined(__i386__) || defined(__x86_64__) /* Expose Viridian interfaces to this HVM guest? */ #define HVM_PARAM_VIRIDIAN 9 #endif /* * Set mode for virtual timers (currently x86 only): * delay_for_missed_ticks (default): * Do not advance a vcpu's time beyond the correct delivery time for * interrupts that have been missed due to preemption. Deliver missed * interrupts when the vcpu is rescheduled and advance the vcpu's virtual * time stepwise for each one. * no_delay_for_missed_ticks: * As above, missed interrupts are delivered, but guest time always tracks * wallclock (i.e., real) time while doing so. * no_missed_ticks_pending: * No missed interrupts are held pending. Instead, to ensure ticks are * delivered at some non-zero rate, if we detect missed ticks then the * internal tick alarm is not disabled if the VCPU is preempted during the * next tick period. * one_missed_tick_pending: * Missed interrupts are collapsed together and delivered as one 'late tick'. * Guest time always tracks wallclock (i.e., real) time. */ #define HVM_PARAM_TIMER_MODE 10 #define HVMPTM_delay_for_missed_ticks 0 #define HVMPTM_no_delay_for_missed_ticks 1 #define HVMPTM_no_missed_ticks_pending 2 #define HVMPTM_one_missed_tick_pending 3 /* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */ #define HVM_PARAM_HPET_ENABLED 11 /* Identity-map page directory used by Intel EPT when CR0.PG=0. */ #define HVM_PARAM_IDENT_PT 12 /* Device Model domain, defaults to 0. */ #define HVM_PARAM_DM_DOMAIN 13 /* ACPI S state: currently support S0 and S3 on x86. */ #define HVM_PARAM_ACPI_S_STATE 14 /* TSS used on Intel when CR0.PE=0. */ #define HVM_PARAM_VM86_TSS 15 /* Boolean: Enable aligning all periodic vpts to reduce interrupts */ #define HVM_PARAM_VPT_ALIGN 16 #define HVM_NR_PARAMS 17 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/hvm/save.h000066400000000000000000000063451314037446600262500ustar00rootroot00000000000000/* * hvm/save.h * * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * Copyright (c) 2007 XenSource Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_H__ #define __XEN_PUBLIC_HVM_SAVE_H__ /* * Structures in this header *must* have the same layout in 32bit * and 64bit environments: this means that all fields must be explicitly * sized types and aligned to their sizes, and the structs must be * a multiple of eight bytes long. * * Only the state necessary for saving and restoring (i.e. fields * that are analogous to actual hardware state) should go in this file. * Internal mechanisms should be kept in Xen-private headers. */ #if !defined(__GNUC__) || defined(__STRICT_ANSI__) #error "Anonymous structs/unions are a GNU extension." #endif /* * Each entry is preceded by a descriptor giving its type and length */ struct hvm_save_descriptor { uint16_t typecode; /* Used to demux the various types below */ uint16_t instance; /* Further demux within a type */ uint32_t length; /* In bytes, *not* including this descriptor */ }; /* * Each entry has a datatype associated with it: for example, the CPU state * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU), * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU). * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system * ugliness. */ #define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; } #define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t) #define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x))) #define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c)) /* * The series of save records is teminated by a zero-type, zero-length * descriptor. */ struct hvm_save_end {}; DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); #if defined(__i386__) || defined(__x86_64__) #include "../arch-x86/hvm/save.h" #elif defined(__ia64__) #include "../arch-ia64/hvm/save.h" #else #error "unsupported architecture" #endif #endif /* __XEN_PUBLIC_HVM_SAVE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/000077500000000000000000000000001314037446600247465ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/blkif.h000066400000000000000000000133341314037446600262120ustar00rootroot00000000000000/****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_BLKIF_H__ #define __XEN_PUBLIC_IO_BLKIF_H__ #include "ring.h" #include "../grant_table.h" /* * Front->back notifications: When enqueuing a new request, sending a * notification can be made conditional on req_event (i.e., the generic * hold-off mechanism provided by the ring macros). Backends must set * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). * * Back->front notifications: When enqueuing a new response, sending a * notification can be made conditional on rsp_event (i.e., the generic * hold-off mechanism provided by the ring macros). Frontends must set * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). */ #ifndef blkif_vdev_t #define blkif_vdev_t uint16_t #endif #define blkif_sector_t uint64_t /* * REQUEST CODES. */ #define BLKIF_OP_READ 0 #define BLKIF_OP_WRITE 1 /* * Recognised only if "feature-barrier" is present in backend xenbus info. * The "feature-barrier" node contains a boolean indicating whether barrier * requests are likely to succeed or fail. Either way, a barrier request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt barrier requests. * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* * create the "feature-barrier" node! */ #define BLKIF_OP_WRITE_BARRIER 2 /* * Recognised if "feature-flush-cache" is present in backend xenbus * info. A flush will ask the underlying storage hardware to flush its * non-volatile caches as appropriate. The "feature-flush-cache" node * contains a boolean indicating whether flush requests are likely to * succeed or fail. Either way, a flush request may fail at any time * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying * block-device hardware. The boolean simply indicates whether or not it * is worthwhile for the frontend to attempt flushes. If a backend does * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the * "feature-flush-cache" node! */ #define BLKIF_OP_FLUSH_DISKCACHE 3 /* * Device specific command packet contained within the request */ #define BLKIF_OP_PACKET 4 /* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 /* * NB. first_sect and last_sect in blkif_request_segment, as well as * sector_number in blkif_request, are always expressed in 512-byte units. * However they must be properly aligned to the real sector size of the * physical disk, which is reported in the "sector-size" node in the backend * xenbus info. Also the xenbus "sectors" node is expressed in 512-byte units. */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; }; struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; typedef struct blkif_request blkif_request_t; struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_response blkif_response_t; /* * STATUS RETURN CODES. */ /* Operation not supported (only happens on barrier writes). */ #define BLKIF_RSP_EOPNOTSUPP -2 /* Operation failed for some unspecified reason (-EIO). */ #define BLKIF_RSP_ERROR -1 /* Operation completed successfully. */ #define BLKIF_RSP_OKAY 0 /* * Generate blkif ring structures and types. */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/cdromif.h000066400000000000000000000067321314037446600265520ustar00rootroot00000000000000/****************************************************************************** * cdromif.h * * Shared definitions between backend driver and Xen guest Virtual CDROM * block device. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_IO_CDROMIF_H__ #define __XEN_PUBLIC_IO_CDROMIF_H__ /* * Queries backend for CDROM support */ #define XEN_TYPE_CDROM_SUPPORT _IO('c', 1) struct xen_cdrom_support { uint32_t type; int8_t ret; /* returned, 0 succeded, -1 error */ int8_t err; /* returned, backend errno */ int8_t supported; /* returned, 1 supported */ }; /* * Opens backend device, returns drive geometry or * any encountered errors */ #define XEN_TYPE_CDROM_OPEN _IO('c', 2) struct xen_cdrom_open { uint32_t type; int8_t ret; int8_t err; int8_t pad; int8_t media_present; /* returned */ uint32_t sectors; /* returned */ uint32_t sector_size; /* returned */ int32_t payload_offset; /* offset to backend node name payload */ }; /* * Queries backend for media changed status */ #define XEN_TYPE_CDROM_MEDIA_CHANGED _IO('c', 3) struct xen_cdrom_media_changed { uint32_t type; int8_t ret; int8_t err; int8_t media_changed; /* returned */ }; /* * Sends vcd generic CDROM packet to backend, followed * immediately by the vcd_generic_command payload */ #define XEN_TYPE_CDROM_PACKET _IO('c', 4) struct xen_cdrom_packet { uint32_t type; int8_t ret; int8_t err; int8_t pad[2]; int32_t payload_offset; /* offset to vcd_generic_command payload */ }; /* CDROM_PACKET_COMMAND, payload for XEN_TYPE_CDROM_PACKET */ struct vcd_generic_command { uint8_t cmd[CDROM_PACKET_SIZE]; uint8_t pad[4]; uint32_t buffer_offset; uint32_t buflen; int32_t stat; uint32_t sense_offset; uint8_t data_direction; uint8_t pad1[3]; int32_t quiet; int32_t timeout; }; union xen_block_packet { uint32_t type; struct xen_cdrom_support xcs; struct xen_cdrom_open xco; struct xen_cdrom_media_changed xcmc; struct xen_cdrom_packet xcp; }; #define PACKET_PAYLOAD_OFFSET (sizeof(struct xen_cdrom_packet)) #define PACKET_SENSE_OFFSET (PACKET_PAYLOAD_OFFSET + sizeof(struct vcd_generic_command)) #define PACKET_BUFFER_OFFSET (PACKET_SENSE_OFFSET + sizeof(struct request_sense)) #define MAX_PACKET_DATA (PAGE_SIZE - sizeof(struct xen_cdrom_packet) - \ sizeof(struct vcd_generic_command) - sizeof(struct request_sense)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/console.h000066400000000000000000000031271314037446600265640ustar00rootroot00000000000000/****************************************************************************** * console.h * * Console I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_CONSOLE_H__ #define __XEN_PUBLIC_IO_CONSOLE_H__ typedef uint32_t XENCONS_RING_IDX; #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) struct xencons_interface { char in[1024]; char out[2048]; XENCONS_RING_IDX in_cons, in_prod; XENCONS_RING_IDX out_cons, out_prod; }; #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/fbif.h000066400000000000000000000126721314037446600260350ustar00rootroot00000000000000/* * fbif.h -- Xen virtual frame buffer device * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_FBIF_H__ #define __XEN_PUBLIC_IO_FBIF_H__ /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. */ /* Event type 1 currently not used */ /* * Framebuffer update notification event * Capable frontend sets feature-update in xenstore. * Backend requests it by setting request-update in xenstore. */ #define XENFB_TYPE_UPDATE 2 struct xenfb_update { uint8_t type; /* XENFB_TYPE_UPDATE */ int32_t x; /* source x */ int32_t y; /* source y */ int32_t width; /* rect width */ int32_t height; /* rect height */ }; /* * Framebuffer resize notification event * Capable backend sets feature-resize in xenstore. */ #define XENFB_TYPE_RESIZE 3 struct xenfb_resize { uint8_t type; /* XENFB_TYPE_RESIZE */ int32_t width; /* width in pixels */ int32_t height; /* height in pixels */ int32_t stride; /* stride in bytes */ int32_t depth; /* depth in bits */ int32_t offset; /* offset of the framebuffer in bytes */ }; #define XENFB_OUT_EVENT_SIZE 40 union xenfb_out_event { uint8_t type; struct xenfb_update update; struct xenfb_resize resize; char pad[XENFB_OUT_EVENT_SIZE]; }; /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. */ /* * Framebuffer refresh period advice * Backend sends it to advise the frontend their preferred period of * refresh. Frontends that keep the framebuffer constantly up-to-date * just ignore it. Frontends that use the advice should immediately * refresh the framebuffer (and send an update notification event if * those have been requested), then use the update frequency to guide * their periodical refreshs. */ #define XENFB_TYPE_REFRESH_PERIOD 1 #define XENFB_NO_REFRESH 0 struct xenfb_refresh_period { uint8_t type; /* XENFB_TYPE_UPDATE_PERIOD */ uint32_t period; /* period of refresh, in ms, * XENFB_NO_REFRESH if no refresh is needed */ }; #define XENFB_IN_EVENT_SIZE 40 union xenfb_in_event { uint8_t type; struct xenfb_refresh_period refresh_period; char pad[XENFB_IN_EVENT_SIZE]; }; /* shared page */ #define XENFB_IN_RING_SIZE 1024 #define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE) #define XENFB_IN_RING_OFFS 1024 #define XENFB_IN_RING(page) \ ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS)) #define XENFB_IN_RING_REF(page, idx) \ (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN]) #define XENFB_OUT_RING_SIZE 2048 #define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE) #define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE) #define XENFB_OUT_RING(page) \ ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS)) #define XENFB_OUT_RING_REF(page, idx) \ (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN]) struct xenfb_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; int32_t width; /* the width of the framebuffer (in pixels) */ int32_t height; /* the height of the framebuffer (in pixels) */ uint32_t line_length; /* the length of a row of pixels (in bytes) */ uint32_t mem_length; /* the length of the framebuffer (in bytes) */ uint8_t depth; /* the depth of a pixel (in bits) */ /* * Framebuffer page directory * * Each directory page holds PAGE_SIZE / sizeof(*pd) * framebuffer pages, and can thus map up to PAGE_SIZE * * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 Megs * 64 bit. 256 directories give enough room for a 512 Meg * framebuffer with a max resolution of 12,800x10,240. Should * be enough for a while with room leftover for expansion. */ #ifndef CONFIG_PARAVIRT_XEN unsigned long pd[256]; #else /* Two directory pages should be enough for a while. */ unsigned long pd[2]; #endif }; /* * Wart: xenkbd needs to know default resolution. Put it here until a * better solution is found, but don't leak it to the backend. */ #ifdef __KERNEL__ #define XENFB_WIDTH 800 #define XENFB_HEIGHT 600 #define XENFB_DEPTH 32 #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/fsif.h000066400000000000000000000123761314037446600260570ustar00rootroot00000000000000/****************************************************************************** * fsif.h * * Interface to FS level split device drivers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2007, Grzegorz Milos, . */ #ifndef __XEN_PUBLIC_IO_FSIF_H__ #define __XEN_PUBLIC_IO_FSIF_H__ #include "ring.h" #include "../grant_table.h" #define REQ_FILE_OPEN 1 #define REQ_FILE_CLOSE 2 #define REQ_FILE_READ 3 #define REQ_FILE_WRITE 4 #define REQ_STAT 5 #define REQ_FILE_TRUNCATE 6 #define REQ_REMOVE 7 #define REQ_RENAME 8 #define REQ_CREATE 9 #define REQ_DIR_LIST 10 #define REQ_CHMOD 11 #define REQ_FS_SPACE 12 #define REQ_FILE_SYNC 13 struct fsif_open_request { grant_ref_t gref; }; struct fsif_close_request { uint32_t fd; }; struct fsif_read_request { uint32_t fd; int32_t pad; uint64_t len; uint64_t offset; grant_ref_t grefs[1]; /* Variable length */ }; struct fsif_write_request { uint32_t fd; int32_t pad; uint64_t len; uint64_t offset; grant_ref_t grefs[1]; /* Variable length */ }; struct fsif_stat_request { uint32_t fd; }; /* This structure is a copy of some fields from stat structure, returned * via the ring. */ struct fsif_stat_response { int32_t stat_mode; uint32_t stat_uid; uint32_t stat_gid; int32_t stat_ret; int64_t stat_size; int64_t stat_atime; int64_t stat_mtime; int64_t stat_ctime; }; struct fsif_truncate_request { uint32_t fd; int32_t pad; int64_t length; }; struct fsif_remove_request { grant_ref_t gref; }; struct fsif_rename_request { uint16_t old_name_offset; uint16_t new_name_offset; grant_ref_t gref; }; struct fsif_create_request { int8_t directory; int8_t pad; int16_t pad2; int32_t mode; grant_ref_t gref; }; struct fsif_list_request { uint32_t offset; grant_ref_t gref; }; #define NR_FILES_SHIFT 0 #define NR_FILES_SIZE 16 /* 16 bits for the number of files mask */ #define NR_FILES_MASK (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT) #define ERROR_SIZE 32 /* 32 bits for the error mask */ #define ERROR_SHIFT (NR_FILES_SIZE + NR_FILES_SHIFT) #define ERROR_MASK (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT) #define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE) #define HAS_MORE_FLAG (1ULL << HAS_MORE_SHIFT) struct fsif_chmod_request { uint32_t fd; int32_t mode; }; struct fsif_space_request { grant_ref_t gref; }; struct fsif_sync_request { uint32_t fd; }; /* FS operation request */ struct fsif_request { uint8_t type; /* Type of the request */ uint8_t pad; uint16_t id; /* Request ID, copied to the response */ uint32_t pad2; union { struct fsif_open_request fopen; struct fsif_close_request fclose; struct fsif_read_request fread; struct fsif_write_request fwrite; struct fsif_stat_request fstat; struct fsif_truncate_request ftruncate; struct fsif_remove_request fremove; struct fsif_rename_request frename; struct fsif_create_request fcreate; struct fsif_list_request flist; struct fsif_chmod_request fchmod; struct fsif_space_request fspace; struct fsif_sync_request fsync; } u; }; typedef struct fsif_request fsif_request_t; /* FS operation response */ struct fsif_response { uint16_t id; uint16_t pad1; uint32_t pad2; union { uint64_t ret_val; struct fsif_stat_response fstat; } u; }; typedef struct fsif_response fsif_response_t; #define FSIF_RING_ENTRY_SIZE 64 #define FSIF_NR_READ_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_read_request)) / \ sizeof(grant_ref_t) + 1) #define FSIF_NR_WRITE_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_write_request)) / \ sizeof(grant_ref_t) + 1) DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response); #define STATE_INITIALISED "init" #define STATE_READY "ready" #define STATE_CLOSING "closing" #define STATE_CLOSED "closed" #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/kbdif.h000066400000000000000000000074211314037446600262020ustar00rootroot00000000000000/* * kbdif.h -- Xen virtual keyboard/mouse * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_KBDIF_H__ #define __XEN_PUBLIC_IO_KBDIF_H__ /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. */ /* Pointer movement event */ #define XENKBD_TYPE_MOTION 1 /* Event type 2 currently not used */ /* Key event (includes pointer buttons) */ #define XENKBD_TYPE_KEY 3 /* * Pointer position event * Capable backend sets feature-abs-pointer in xenstore. * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting * request-abs-update in xenstore. */ #define XENKBD_TYPE_POS 4 struct xenkbd_motion { uint8_t type; /* XENKBD_TYPE_MOTION */ int32_t rel_x; /* relative X motion */ int32_t rel_y; /* relative Y motion */ int32_t rel_z; /* relative Z motion (wheel) */ }; struct xenkbd_key { uint8_t type; /* XENKBD_TYPE_KEY */ uint8_t pressed; /* 1 if pressed; 0 otherwise */ uint32_t keycode; /* KEY_* from linux/input.h */ }; struct xenkbd_position { uint8_t type; /* XENKBD_TYPE_POS */ int32_t abs_x; /* absolute X position (in FB pixels) */ int32_t abs_y; /* absolute Y position (in FB pixels) */ int32_t rel_z; /* relative Z motion (wheel) */ }; #define XENKBD_IN_EVENT_SIZE 40 union xenkbd_in_event { uint8_t type; struct xenkbd_motion motion; struct xenkbd_key key; struct xenkbd_position pos; char pad[XENKBD_IN_EVENT_SIZE]; }; /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. * No out events currently defined. */ #define XENKBD_OUT_EVENT_SIZE 40 union xenkbd_out_event { uint8_t type; char pad[XENKBD_OUT_EVENT_SIZE]; }; /* shared page */ #define XENKBD_IN_RING_SIZE 2048 #define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE) #define XENKBD_IN_RING_OFFS 1024 #define XENKBD_IN_RING(page) \ ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS)) #define XENKBD_IN_RING_REF(page, idx) \ (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN]) #define XENKBD_OUT_RING_SIZE 1024 #define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE) #define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE) #define XENKBD_OUT_RING(page) \ ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS)) #define XENKBD_OUT_RING_REF(page, idx) \ (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN]) struct xenkbd_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/netif.h000066400000000000000000000171341314037446600262320ustar00rootroot00000000000000/****************************************************************************** * netif.h * * Unified network-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_NETIF_H__ #define __XEN_PUBLIC_IO_NETIF_H__ #include "ring.h" #include "../grant_table.h" /* * Notifications after enqueuing any type of message should be conditional on * the appropriate req_event or rsp_event field in the shared ring. * If the client sends notification for rx requests then it should specify * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume * that it cannot safely queue packets (as it may not be kicked to send them). */ /* * This is the 'wire' format for packets: * Request 1: netif_tx_request -- NETTXF_* (any flags) * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info) * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE) * Request 4: netif_tx_request -- NETTXF_more_data * Request 5: netif_tx_request -- NETTXF_more_data * ... * Request N: netif_tx_request -- 0 */ /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETTXF_csum_blank (0) #define NETTXF_csum_blank (1U<<_NETTXF_csum_blank) /* Packet data has been validated against protocol checksum. */ #define _NETTXF_data_validated (1) #define NETTXF_data_validated (1U<<_NETTXF_data_validated) /* Packet continues in the next request descriptor. */ #define _NETTXF_more_data (2) #define NETTXF_more_data (1U<<_NETTXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETTXF_extra_info (3) #define NETTXF_extra_info (1U<<_NETTXF_extra_info) struct netif_tx_request { grant_ref_t gref; /* Reference to buffer page */ uint16_t offset; /* Offset within buffer page */ uint16_t flags; /* NETTXF_* */ uint16_t id; /* Echoed in response message. */ uint16_t size; /* Packet size in bytes. */ }; typedef struct netif_tx_request netif_tx_request_t; /* Types of netif_extra_info descriptors. */ #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MAX (4) /* netif_extra_info flags. */ #define _XEN_NETIF_EXTRA_FLAG_MORE (0) #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) /* GSO types - only TCPv4 currently supported. */ #define XEN_NETIF_GSO_TYPE_TCPV4 (1) /* * This structure needs to fit within both netif_tx_request and * netif_rx_response for compatibility. */ struct netif_extra_info { uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ union { /* * XEN_NETIF_EXTRA_TYPE_GSO: */ struct { /* * Maximum payload size of each segment. For example, for TCP this * is just the path MSS. */ uint16_t size; /* * GSO type. This determines the protocol of the packet and any * extra features required to segment the packet properly. */ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ /* Future expansion. */ uint8_t pad; /* * GSO features. This specifies any extra GSO features required * to process this packet, such as ECN support for TCPv4. */ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ } gso; /* * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}: * Backend advertises availability via 'feature-multicast-control' * xenbus node containing value '1'. * Frontend requests this feature by advertising * 'request-multicast-control' xenbus node containing value '1'. * If multicast control is requested then multicast flooding is * disabled and the frontend must explicitly register its interest * in multicast groups using dummy transmit requests containing * MCAST_{ADD,DEL} extra-info fragments. */ struct { uint8_t addr[6]; /* Address to add/remove. */ } mcast; uint16_t pad[3]; } u; }; typedef struct netif_extra_info netif_extra_info_t; struct netif_tx_response { uint16_t id; int16_t status; /* NETIF_RSP_* */ }; typedef struct netif_tx_response netif_tx_response_t; struct netif_rx_request { uint16_t id; /* Echoed in response message. */ grant_ref_t gref; /* Reference to incoming granted frame */ }; typedef struct netif_rx_request netif_rx_request_t; /* Packet data has been validated against protocol checksum. */ #define _NETRXF_data_validated (0) #define NETRXF_data_validated (1U<<_NETRXF_data_validated) /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETRXF_csum_blank (1) #define NETRXF_csum_blank (1U<<_NETRXF_csum_blank) /* Packet continues in the next request descriptor. */ #define _NETRXF_more_data (2) #define NETRXF_more_data (1U<<_NETRXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETRXF_extra_info (3) #define NETRXF_extra_info (1U<<_NETRXF_extra_info) struct netif_rx_response { uint16_t id; uint16_t offset; /* Offset in page of start of received packet */ uint16_t flags; /* NETRXF_* */ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ }; typedef struct netif_rx_response netif_rx_response_t; /* * Generate netif ring structures and types. */ #if defined(CONFIG_XEN_no) || defined(HAVE_XEN_PLATFORM_COMPAT_H) DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response); DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response); #else #define xen_netif_tx_request netif_tx_request #define xen_netif_rx_request netif_rx_request #define xen_netif_tx_response netif_tx_response #define xen_netif_rx_response netif_rx_response DEFINE_RING_TYPES(xen_netif_tx, struct xen_netif_tx_request, struct xen_netif_tx_response); DEFINE_RING_TYPES(xen_netif_rx, struct xen_netif_rx_request, struct xen_netif_rx_response); #define xen_netif_extra_info netif_extra_info #endif #define NETIF_RSP_DROPPED -2 #define NETIF_RSP_ERROR -1 #define NETIF_RSP_OKAY 0 /* No response: used for auxiliary requests (e.g., netif_tx_extra). */ #define NETIF_RSP_NULL 1 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/pciif.h000066400000000000000000000074321314037446600262170ustar00rootroot00000000000000/* * PCI Backend/Frontend Common Data Structures & Macros * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Ryan Wilson */ #ifndef __XEN_PCI_COMMON_H__ #define __XEN_PCI_COMMON_H__ /* Be sure to bump this number if you change this file */ #define XEN_PCI_MAGIC "7" /* xen_pci_sharedinfo flags */ #define _XEN_PCIF_active (0) #define XEN_PCIF_active (1<<_XEN_PCIF_active) #define _XEN_PCIB_AERHANDLER (1) #define XEN_PCIB_AERHANDLER (1<<_XEN_PCIB_AERHANDLER) #define _XEN_PCIB_active (2) #define XEN_PCIB_active (1<<_XEN_PCIB_active) /* xen_pci_op commands */ #define XEN_PCI_OP_conf_read (0) #define XEN_PCI_OP_conf_write (1) #define XEN_PCI_OP_enable_msi (2) #define XEN_PCI_OP_disable_msi (3) #define XEN_PCI_OP_enable_msix (4) #define XEN_PCI_OP_disable_msix (5) #define XEN_PCI_OP_aer_detected (6) #define XEN_PCI_OP_aer_resume (7) #define XEN_PCI_OP_aer_mmio (8) #define XEN_PCI_OP_aer_slotreset (9) /* xen_pci_op error numbers */ #define XEN_PCI_ERR_success (0) #define XEN_PCI_ERR_dev_not_found (-1) #define XEN_PCI_ERR_invalid_offset (-2) #define XEN_PCI_ERR_access_denied (-3) #define XEN_PCI_ERR_not_implemented (-4) /* XEN_PCI_ERR_op_failed - backend failed to complete the operation */ #define XEN_PCI_ERR_op_failed (-5) /* * it should be PAGE_SIZE-sizeof(struct xen_pci_op))/sizeof(struct msix_entry)) * Should not exceed 128 */ #define SH_INFO_MAX_VEC 128 struct xen_msix_entry { uint16_t vector; uint16_t entry; }; struct xen_pci_op { /* IN: what action to perform: XEN_PCI_OP_* */ uint32_t cmd; /* OUT: will contain an error number (if any) from errno.h */ int32_t err; /* IN: which device to touch */ uint32_t domain; /* PCI Domain/Segment */ uint32_t bus; uint32_t devfn; /* IN: which configuration registers to touch */ int32_t offset; int32_t size; /* IN/OUT: Contains the result after a READ or the value to WRITE */ uint32_t value; /* IN: Contains extra infor for this operation */ uint32_t info; /*IN: param for msi-x */ struct xen_msix_entry msix_entries[SH_INFO_MAX_VEC]; }; /*used for pcie aer handling*/ struct xen_pcie_aer_op { /* IN: what action to perform: XEN_PCI_OP_* */ uint32_t cmd; /*IN/OUT: return aer_op result or carry error_detected state as input*/ int32_t err; /* IN: which device to touch */ uint32_t domain; /* PCI Domain/Segment*/ uint32_t bus; uint32_t devfn; }; struct xen_pci_sharedinfo { /* flags - XEN_PCIF_* */ uint32_t flags; struct xen_pci_op op; struct xen_pcie_aer_op aer_op; }; #endif /* __XEN_PCI_COMMON_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/protocols.h000066400000000000000000000032071314037446600271450ustar00rootroot00000000000000/****************************************************************************** * protocols.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PROTOCOLS_H__ #define __XEN_PROTOCOLS_H__ #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi" #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi" #define XEN_IO_PROTO_ABI_IA64 "ia64-abi" #if defined(__i386__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 #elif defined(__x86_64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 #elif defined(__ia64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64 #else # error arch fixup needed here #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/ring.h000066400000000000000000000352331314037446600260640ustar00rootroot00000000000000/****************************************************************************** * ring.h * * Shared producer-consumer ring macros. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Tim Deegan and Andrew Warfield November 2004. */ #ifndef __XEN_PUBLIC_IO_RING_H__ #define __XEN_PUBLIC_IO_RING_H__ #include "../xen-compat.h" #if __XEN_INTERFACE_VERSION__ < 0x00030208 #define xen_mb() mb() #define xen_rmb() rmb() #define xen_wmb() wmb() #endif typedef unsigned int RING_IDX; /* Round a 32-bit unsigned constant down to the nearest power of two. */ #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) /* * Calculate size of a shared ring, given the total available space for the * ring and indexes (_sz), and the name tag of the request/response structure. * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ #define __CONST_RING_SIZE(_s, _sz) \ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ sizeof(((struct _s##_sring *)0)->ring[0]))) /* * The same for passing in an actual pointer instead of a name tag. */ #define __RING_SIZE(_s, _sz) \ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* * Macros to make the correct C datatypes for a new kind of ring. * * To make a new ring datatype, you need to have two message structures, * let's say request_t, and response_t already defined. * * In a header where you want the ring datatype declared, you then do: * * DEFINE_RING_TYPES(mytag, request_t, response_t); * * These expand out to give you a set of types, as you can see below. * The most important of these are: * * mytag_sring_t - The shared ring. * mytag_front_ring_t - The 'front' half of the ring. * mytag_back_ring_t - The 'back' half of the ring. * * To initialize a ring in your code you need to know the location and size * of the shared memory area (PAGE_SIZE, for instance). To initialise * the front half: * * mytag_front_ring_t front_ring; * SHARED_RING_INIT((mytag_sring_t *)shared_page); * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); * * Initializing the back follows similarly (note that only the front * initializes the shared ring): * * mytag_back_ring_t back_ring; * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); */ #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ \ /* Shared ring entry */ \ union __name##_sring_entry { \ __req_t req; \ __rsp_t rsp; \ }; \ \ /* Shared ring page */ \ struct __name##_sring { \ RING_IDX req_prod, req_event; \ RING_IDX rsp_prod, rsp_event; \ uint8_t netfront_smartpoll_active; \ uint8_t pad[47]; \ union __name##_sring_entry ring[1]; /* variable-length */ \ }; \ \ /* "Front" end's private variables */ \ struct __name##_front_ring { \ RING_IDX req_prod_pvt; \ RING_IDX rsp_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* "Back" end's private variables */ \ struct __name##_back_ring { \ RING_IDX rsp_prod_pvt; \ RING_IDX req_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* Syntactic sugar */ \ typedef struct __name##_sring __name##_sring_t; \ typedef struct __name##_front_ring __name##_front_ring_t; \ typedef struct __name##_back_ring __name##_back_ring_t /* * Macros for manipulating rings. * * FRONT_RING_whatever works on the "front end" of a ring: here * requests are pushed on to the ring and responses taken off it. * * BACK_RING_whatever works on the "back end" of a ring: here * requests are taken off the ring and responses put on. * * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. * This is OK in 1-for-1 request-response situations where the * requestor (front end) never has more than RING_SIZE()-1 * outstanding requests. */ /* Initialising empty rings */ #define SHARED_RING_INIT(_s) do { \ (_s)->req_prod = (_s)->rsp_prod = 0; \ (_s)->req_event = (_s)->rsp_event = 1; \ (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \ } while(0) #define FRONT_RING_INIT(_r, _s, __size) do { \ (_r)->req_prod_pvt = 0; \ (_r)->rsp_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) #define BACK_RING_INIT(_r, _s, __size) do { \ (_r)->rsp_prod_pvt = 0; \ (_r)->req_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) /* Initialize to existing shared indexes -- for recovery */ #define FRONT_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->req_prod_pvt = (_s)->req_prod; \ (_r)->rsp_cons = (_s)->rsp_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) #define BACK_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ (_r)->req_cons = (_s)->req_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) /* How big is this ring? */ #define RING_SIZE(_r) \ ((_r)->nr_ents) /* Number of free requests (for use on front side only). */ #define RING_FREE_REQUESTS(_r) \ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) /* Test if there is an empty slot available on the front ring. * (This is only meaningful from the front. ) */ #define RING_FULL(_r) \ (RING_FREE_REQUESTS(_r) == 0) /* Test if there are outstanding messages to be processed on a ring. */ #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) #ifdef __GNUC__ #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ unsigned int rsp = RING_SIZE(_r) - \ ((_r)->req_cons - (_r)->rsp_prod_pvt); \ req < rsp ? req : rsp; \ }) #else /* Same as above, but without the nice GCC ({ ... }) syntax. */ #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ ((((_r)->sring->req_prod - (_r)->req_cons) < \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ ((_r)->sring->req_prod - (_r)->req_cons) : \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) #endif /* Direct access to individual ring elements, by index. */ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) #define RING_GET_RESPONSE(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) /* Loop termination condition: Would the specified index overflow the ring? */ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) #define RING_PUSH_REQUESTS(_r) do { \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) #define RING_PUSH_RESPONSES(_r) do { \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ } while (0) /* * Notification hold-off (req_event and rsp_event): * * When queueing requests or responses on a shared ring, it may not always be * necessary to notify the remote end. For example, if requests are in flight * in a backend, the front may be able to queue further requests without * notifying the back (if the back checks for new requests when it queues * responses). * * When enqueuing requests or responses: * * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument * is a boolean return value. True indicates that the receiver requires an * asynchronous notification. * * After dequeuing requests or responses (before sleeping the connection): * * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). * The second argument is a boolean return value. True indicates that there * are pending messages on the ring (i.e., the connection should not be put * to sleep). * * These macros will set the req_event/rsp_event field to trigger a * notification on the very next message that is enqueued. If you want to * create batches of work (i.e., only receive a notification after several * messages have been enqueued) then you will need to create a customised * version of the FINAL_CHECK macro in your own code, which sets the event * field appropriately. */ #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->req_prod; \ RING_IDX __new = (_r)->req_prod_pvt; \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = __new; \ xen_mb(); /* back sees new requests /before/ we check req_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->rsp_prod; \ RING_IDX __new = (_r)->rsp_prod_pvt; \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = __new; \ xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ if (_work_to_do) break; \ (_r)->sring->req_event = (_r)->req_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ } while (0) #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ if (_work_to_do) break; \ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ } while (0) #endif /* __XEN_PUBLIC_IO_RING_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/tpmif.h000066400000000000000000000046251314037446600262450ustar00rootroot00000000000000/****************************************************************************** * tpmif.h * * TPM I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, IBM Corporation * * Author: Stefan Berger, stefanb@us.ibm.com * Grant table support: Mahadevan Gomathisankaran * * This code has been derived from tools/libxc/xen/io/netif.h * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_TPMIF_H__ #define __XEN_PUBLIC_IO_TPMIF_H__ #include "../grant_table.h" struct tpmif_tx_request { unsigned long addr; /* Machine address of packet. */ grant_ref_t ref; /* grant table access reference */ uint16_t unused; uint16_t size; /* Packet size in bytes. */ }; typedef struct tpmif_tx_request tpmif_tx_request_t; /* * The TPMIF_TX_RING_SIZE defines the number of pages the * front-end and backend can exchange (= size of array). */ typedef uint32_t TPMIF_RING_IDX; #define TPMIF_TX_RING_SIZE 1 /* This structure must fit in a memory page. */ struct tpmif_ring { struct tpmif_tx_request req; }; typedef struct tpmif_ring tpmif_ring_t; struct tpmif_tx_interface { struct tpmif_ring ring[TPMIF_TX_RING_SIZE]; }; typedef struct tpmif_tx_interface tpmif_tx_interface_t; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/usbif.h000066400000000000000000000105601314037446600262310ustar00rootroot00000000000000/* * usbif.h * * USB I/O interface for Xen guest OSes. * * Copyright (C) 2009, FUJITSU LABORATORIES LTD. * Author: Noboru Iwamatsu * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_IO_USBIF_H__ #define __XEN_PUBLIC_IO_USBIF_H__ #include "ring.h" #include "../grant_table.h" enum usb_spec_version { USB_VER_UNKNOWN = 0, USB_VER_USB11, USB_VER_USB20, USB_VER_USB30, /* not supported yet */ }; /* * USB pipe in usbif_request * * bits 0-5 are specific bits for virtual USB driver. * bits 7-31 are standard urb pipe. * * - port number(NEW): bits 0-4 * (USB_MAXCHILDREN is 31) * * - operation flag(NEW): bit 5 * (0 = submit urb, * 1 = unlink urb) * * - direction: bit 7 * (0 = Host-to-Device [Out] * 1 = Device-to-Host [In]) * * - device address: bits 8-14 * * - endpoint: bits 15-18 * * - pipe type: bits 30-31 * (00 = isochronous, 01 = interrupt, * 10 = control, 11 = bulk) */ #define usbif_pipeportnum(pipe) ((pipe) & 0x1f) #define usbif_setportnum_pipe(pipe, portnum) \ ((pipe)|(portnum)) #define usbif_pipeunlink(pipe) ((pipe) & 0x20) #define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe)) #define usbif_setunlink_pipe(pipe) ((pipe)|(0x20)) #define USBIF_BACK_MAX_PENDING_REQS (128) #define USBIF_MAX_SEGMENTS_PER_REQUEST (16) /* * RING for transferring urbs. */ struct usbif_request_segment { grant_ref_t gref; uint16_t offset; uint16_t length; }; struct usbif_urb_request { uint16_t id; /* request id */ uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */ /* basic urb parameter */ uint32_t pipe; uint16_t transfer_flags; uint16_t buffer_length; union { uint8_t ctrl[8]; /* setup_packet (Ctrl) */ struct { uint16_t interval; /* maximum (1024*8) in usb core */ uint16_t start_frame; /* start frame */ uint16_t number_of_packets; /* number of ISO packet */ uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */ } isoc; struct { uint16_t interval; /* maximum (1024*8) in usb core */ uint16_t pad[3]; } intr; struct { uint16_t unlink_id; /* unlink request id */ uint16_t pad[3]; } unlink; } u; /* urb data segments */ struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST]; }; typedef struct usbif_urb_request usbif_urb_request_t; struct usbif_urb_response { uint16_t id; /* request id */ uint16_t start_frame; /* start frame (ISO) */ int32_t status; /* status (non-ISO) */ int32_t actual_length; /* actual transfer length */ int32_t error_count; /* number of ISO errors */ }; typedef struct usbif_urb_response usbif_urb_response_t; DEFINE_RING_TYPES(usbif_urb, struct usbif_urb_request, struct usbif_urb_response); #define USB_URB_RING_SIZE __CONST_RING_SIZE(usbif_urb, PAGE_SIZE) /* * RING for notifying connect/disconnect events to frontend */ struct usbif_conn_request { uint16_t id; }; typedef struct usbif_conn_request usbif_conn_request_t; struct usbif_conn_response { uint16_t id; /* request id */ uint8_t portnum; /* port number */ uint8_t speed; /* usb_device_speed */ }; typedef struct usbif_conn_response usbif_conn_response_t; DEFINE_RING_TYPES(usbif_conn, struct usbif_conn_request, struct usbif_conn_response); #define USB_CONN_RING_SIZE __CONST_RING_SIZE(usbif_conn, PAGE_SIZE) #endif /* __XEN_PUBLIC_IO_USBIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/vscsiif.h000066400000000000000000000070421314037446600265700ustar00rootroot00000000000000/****************************************************************************** * vscsiif.h * * Based on the blkif.h code. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright(c) FUJITSU Limited 2008. */ #ifndef __XEN__PUBLIC_IO_SCSI_H__ #define __XEN__PUBLIC_IO_SCSI_H__ #include "ring.h" #include "../grant_table.h" /* command between backend and frontend */ #define VSCSIIF_ACT_SCSI_CDB 1 /* SCSI CDB command */ #define VSCSIIF_ACT_SCSI_ABORT 2 /* SCSI Device(Lun) Abort*/ #define VSCSIIF_ACT_SCSI_RESET 3 /* SCSI Device(Lun) Reset*/ #define VSCSIIF_BACK_MAX_PENDING_REQS 128 /* * Maximum scatter/gather segments per request. * * Considering balance between allocating al least 16 "vscsiif_request" * structures on one page (4096bytes) and number of scatter gather * needed, we decided to use 26 as a magic number. */ #define VSCSIIF_SG_TABLESIZE 26 /* * base on linux kernel 2.6.18 */ #define VSCSIIF_MAX_COMMAND_SIZE 16 #define VSCSIIF_SENSE_BUFFERSIZE 96 struct vscsiif_request { uint16_t rqid; /* private guest value, echoed in resp */ uint8_t act; /* command between backend and frontend */ uint8_t cmd_len; uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; uint16_t timeout_per_command; /* The command is issued by twice the value in Backend. */ uint16_t channel, id, lun; uint16_t padding; uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1) DMA_FROM_DEVICE(2) DMA_NONE(3) requests */ uint8_t nr_segments; /* Number of pieces of scatter-gather */ struct scsiif_request_segment { grant_ref_t gref; uint16_t offset; uint16_t length; } seg[VSCSIIF_SG_TABLESIZE]; uint32_t reserved[3]; }; typedef struct vscsiif_request vscsiif_request_t; struct vscsiif_response { uint16_t rqid; uint8_t padding; uint8_t sense_len; uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE]; int32_t rslt; uint32_t residual_len; /* request bufflen - return the value from physical device */ uint32_t reserved[36]; }; typedef struct vscsiif_response vscsiif_response_t; DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response); #endif /*__XEN__PUBLIC_IO_SCSI_H__*/ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/xenbus.h000066400000000000000000000044351314037446600264310ustar00rootroot00000000000000/***************************************************************************** * xenbus.h * * Xenbus protocol details. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 XenSource Ltd. */ #ifndef _XEN_PUBLIC_IO_XENBUS_H #define _XEN_PUBLIC_IO_XENBUS_H /* * The state of either end of the Xenbus, i.e. the current communication * status of initialisation across the bus. States here imply nothing about * the state of the connection between the driver and the kernel's device * layers. */ enum xenbus_state { XenbusStateUnknown = 0, XenbusStateInitialising = 1, /* * InitWait: Finished early initialisation but waiting for information * from the peer or hotplug scripts. */ XenbusStateInitWait = 2, /* * Initialised: Waiting for a connection from the peer. */ XenbusStateInitialised = 3, XenbusStateConnected = 4, /* * Closing: The device is being closed due to an error or an unplug event. */ XenbusStateClosing = 5, XenbusStateClosed = 6, /* * Reconfiguring: The device is being reconfigured. */ XenbusStateReconfiguring = 7, XenbusStateReconfigured = 8 }; typedef enum xenbus_state XenbusState; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/io/xs_wire.h000066400000000000000000000066111314037446600266030ustar00rootroot00000000000000/* * Details of the "wire" protocol between Xen Store Daemon and client * library or guest kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Rusty Russell IBM Corporation */ #ifndef _XS_WIRE_H #define _XS_WIRE_H enum xsd_sockmsg_type { XS_DEBUG, XS_DIRECTORY, XS_READ, XS_GET_PERMS, XS_WATCH, XS_UNWATCH, XS_TRANSACTION_START, XS_TRANSACTION_END, XS_INTRODUCE, XS_RELEASE, XS_GET_DOMAIN_PATH, XS_WRITE, XS_MKDIR, XS_RM, XS_SET_PERMS, XS_WATCH_EVENT, XS_ERROR, XS_IS_DOMAIN_INTRODUCED, XS_RESUME, XS_SET_TARGET }; #define XS_WRITE_NONE "NONE" #define XS_WRITE_CREATE "CREATE" #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" /* We hand errors as strings, for portability. */ struct xsd_errors { int errnum; const char *errstring; }; #ifdef EINVAL #define XSD_ERROR(x) { x, #x } /* LINTED: static unused */ static struct xsd_errors xsd_errors[] #if defined(__GNUC__) __attribute__((unused)) #endif = { XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), XSD_ERROR(EISDIR), XSD_ERROR(ENOENT), XSD_ERROR(ENOMEM), XSD_ERROR(ENOSPC), XSD_ERROR(EIO), XSD_ERROR(ENOTEMPTY), XSD_ERROR(ENOSYS), XSD_ERROR(EROFS), XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN) }; #endif struct xsd_sockmsg { uint32_t type; /* XS_??? */ uint32_t req_id;/* Request identifier, echoed in daemon's response. */ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ uint32_t len; /* Length of data following this. */ /* Generally followed by nul-terminated string(s). */ }; enum xs_watch_type { XS_WATCH_PATH = 0, XS_WATCH_TOKEN }; /* Inter-domain shared memory communications. */ #define XENSTORE_RING_SIZE 1024 typedef uint32_t XENSTORE_RING_IDX; #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) struct xenstore_domain_interface { char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ XENSTORE_RING_IDX req_cons, req_prod; XENSTORE_RING_IDX rsp_cons, rsp_prod; }; /* Violating this is very bad. See docs/misc/xenstore.txt. */ #define XENSTORE_PAYLOAD_MAX 4096 /* Violating these just gets you an error back */ #define XENSTORE_ABS_PATH_MAX 3072 #define XENSTORE_REL_PATH_MAX 2048 #endif /* _XS_WIRE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/kexec.h000066400000000000000000000144501314037446600256130ustar00rootroot00000000000000/****************************************************************************** * kexec.h - Public portion * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Xen port written by: * - Simon 'Horms' Horman * - Magnus Damm */ #ifndef _XEN_PUBLIC_KEXEC_H #define _XEN_PUBLIC_KEXEC_H /* This file describes the Kexec / Kdump hypercall interface for Xen. * * Kexec under vanilla Linux allows a user to reboot the physical machine * into a new user-specified kernel. The Xen port extends this idea * to allow rebooting of the machine from dom0. When kexec for dom0 * is used to reboot, both the hypervisor and the domains get replaced * with some other kernel. It is possible to kexec between vanilla * Linux and Xen and back again. Xen to Xen works well too. * * The hypercall interface for kexec can be divided into three main * types of hypercall operations: * * 1) Range information: * This is used by the dom0 kernel to ask the hypervisor about various * address information. This information is needed to allow kexec-tools * to fill in the ELF headers for /proc/vmcore properly. * * 2) Load and unload of images: * There are no big surprises here, the kexec binary from kexec-tools * runs in userspace in dom0. The tool loads/unloads data into the * dom0 kernel such as new kernel, initramfs and hypervisor. When * loaded the dom0 kernel performs a load hypercall operation, and * before releasing all page references the dom0 kernel calls unload. * * 3) Kexec operation: * This is used to start a previously loaded kernel. */ #include "xen.h" #if defined(__i386__) || defined(__x86_64__) #define KEXEC_XEN_NO_PAGES 17 #endif /* * Prototype for this hypercall is: * int kexec_op(int cmd, void *args) * @cmd == KEXEC_CMD_... * KEXEC operation to perform * @args == Operation-specific extra arguments (NULL if none). */ /* * Kexec supports two types of operation: * - kexec into a regular kernel, very similar to a standard reboot * - KEXEC_TYPE_DEFAULT is used to specify this type * - kexec into a special "crash kernel", aka kexec-on-panic * - KEXEC_TYPE_CRASH is used to specify this type * - parts of our system may be broken at kexec-on-panic time * - the code should be kept as simple and self-contained as possible */ #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 /* The kexec implementation for Xen allows the user to load two * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH. * All data needed for a kexec reboot is kept in one xen_kexec_image_t * per "instance". The data mainly consists of machine address lists to pages * together with destination addresses. The data in xen_kexec_image_t * is passed to the "code page" which is one page of code that performs * the final relocations before jumping to the new kernel. */ typedef struct xen_kexec_image { #if defined(__i386__) || defined(__x86_64__) unsigned long page_list[KEXEC_XEN_NO_PAGES]; #endif #if defined(__ia64__) unsigned long reboot_code_buffer; #endif unsigned long indirection_page; unsigned long start_address; } xen_kexec_image_t; /* * Perform kexec having previously loaded a kexec or kdump kernel * as appropriate. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] */ #define KEXEC_CMD_kexec 0 typedef struct xen_kexec_exec { int type; } xen_kexec_exec_t; /* * Load/Unload kernel image for kexec or kdump. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] * image == relocation information for kexec (ignored for unload) [in] */ #define KEXEC_CMD_kexec_load 1 #define KEXEC_CMD_kexec_unload 2 typedef struct xen_kexec_load { int type; xen_kexec_image_t image; } xen_kexec_load_t; #define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */ #define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */ #define KEXEC_RANGE_MA_CPU 2 /* machine address and size of a CPU note */ #define KEXEC_RANGE_MA_XENHEAP 3 /* machine address and size of xenheap * Note that although this is adjacent * to Xen it exists in a separate EFI * region on ia64, and thus needs to be * inserted into iomem_machine separately */ #define KEXEC_RANGE_MA_BOOT_PARAM 4 /* machine address and size of * the ia64_boot_param */ #define KEXEC_RANGE_MA_EFI_MEMMAP 5 /* machine address and size of * of the EFI Memory Map */ #define KEXEC_RANGE_MA_VMCOREINFO 6 /* machine address and size of vmcoreinfo */ /* * Find the address and size of certain memory areas * range == KEXEC_RANGE_... [in] * nr == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in] * size == number of bytes reserved in window [out] * start == address of the first byte in the window [out] */ #define KEXEC_CMD_kexec_get_range 3 typedef struct xen_kexec_range { int range; int nr; unsigned long size; unsigned long start; } xen_kexec_range_t; #endif /* _XEN_PUBLIC_KEXEC_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/mem_event.h000066400000000000000000000034371314037446600264760ustar00rootroot00000000000000/****************************************************************************** * mem_event.h * * Memory event common structures. * * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _XEN_PUBLIC_MEM_EVENT_H #define _XEN_PUBLIC_MEM_EVENT_H #include "xen.h" #include "io/ring.h" /* Memory event notification modes */ #define MEM_EVENT_MODE_ASYNC 0 #define MEM_EVENT_MODE_SYNC (1 << 0) #define MEM_EVENT_MODE_SYNC_ALL (1 << 1) /* Memory event flags */ #define MEM_EVENT_FLAG_VCPU_PAUSED (1 << 0) #define MEM_EVENT_FLAG_DOM_PAUSED (1 << 1) #define MEM_EVENT_FLAG_OUT_OF_MEM (1 << 2) typedef struct mem_event_shared_page { int port; } mem_event_shared_page_t; typedef struct mem_event_st { unsigned long gfn; unsigned long offset; unsigned long p2mt; int vcpu_id; uint64_t flags; } mem_event_request_t, mem_event_response_t; DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t); #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/memory.h000066400000000000000000000236111314037446600260230ustar00rootroot00000000000000/****************************************************************************** * memory.h * * Memory reservation and information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_MEMORY_H__ #define __XEN_PUBLIC_MEMORY_H__ #include "xen.h" /* * Increase or decrease the specified domain's memory reservation. Returns the * number of extents successfully allocated or freed. * arg == addr of struct xen_memory_reservation. */ #define XENMEM_increase_reservation 0 #define XENMEM_decrease_reservation 1 #define XENMEM_populate_physmap 6 #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* * Maximum # bits addressable by the user of the allocated region (e.g., I/O * devices often have a 32-bit limitation even in 64-bit systems). If zero * then the user has no addressing restriction. This field is not used by * XENMEM_decrease_reservation. */ #define XENMEMF_address_bits(x) (x) #define XENMEMF_get_address_bits(x) ((x) & 0xffu) /* NUMA node to allocate from. */ #define XENMEMF_node(x) (((x) + 1) << 8) #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu) /* Flag to populate physmap with populate-on-demand entries */ #define XENMEMF_populate_on_demand (1<<16) #endif struct xen_memory_reservation { /* * XENMEM_increase_reservation: * OUT: MFN (*not* GMFN) bases of extents that were allocated * XENMEM_decrease_reservation: * IN: GMFN bases of extents to free * XENMEM_populate_physmap: * IN: GPFN bases of extents to populate with memory * OUT: GMFN bases of extents that were allocated * (NB. This command also updates the mach_to_phys translation table) */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* Number of extents, and size/alignment of each (2^extent_order pages). */ xen_ulong_t nr_extents; unsigned int extent_order; #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* XENMEMF flags. */ unsigned int mem_flags; #else unsigned int address_bits; #endif /* * Domain whose reservation is being changed. * Unprivileged domains can specify only DOMID_SELF. */ domid_t domid; }; DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation); typedef struct xen_memory_reservation xen_memory_reservation_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); /* * An atomic exchange of memory pages. If return code is zero then * @out.extent_list provides GMFNs of the newly-allocated memory. * Returns zero on complete success, otherwise a negative error code. * On complete success then always @nr_exchanged == @in.nr_extents. * On partial success @nr_exchanged indicates how much work was done. */ #define XENMEM_exchange 11 struct xen_memory_exchange { /* * [IN] Details of memory extents to be exchanged (GMFN bases). * Note that @in.address_bits is ignored and unused. */ struct xen_memory_reservation in; /* * [IN/OUT] Details of new memory extents. * We require that: * 1. @in.domid == @out.domid * 2. @in.nr_extents << @in.extent_order == * @out.nr_extents << @out.extent_order * 3. @in.extent_start and @out.extent_start lists must not overlap * 4. @out.extent_start lists GPFN bases to be populated * 5. @out.extent_start is overwritten with allocated GMFN bases */ struct xen_memory_reservation out; /* * [OUT] Number of input extents that were successfully exchanged: * 1. The first @nr_exchanged input extents were successfully * deallocated. * 2. The corresponding first entries in the output extent list correctly * indicate the GMFNs that were successfully exchanged. * 3. All other input and output extents are untouched. * 4. If not all input exents are exchanged then the return code of this * command will be non-zero. * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! */ xen_ulong_t nr_exchanged; }; typedef struct xen_memory_exchange xen_memory_exchange_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); /* * Returns the maximum machine frame number of mapped RAM in this system. * This command always succeeds (it never returns an error code). * arg == NULL. */ #define XENMEM_maximum_ram_page 2 /* * Returns the current or maximum memory reservation, in pages, of the * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. * arg == addr of domid_t. */ #define XENMEM_current_reservation 3 #define XENMEM_maximum_reservation 4 /* * Returns the maximum GPFN in use by the guest, or -ve errcode on failure. */ #define XENMEM_maximum_gpfn 14 /* * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys * mapping table. Architectures which do not have a m2p table do not implement * this command. * arg == addr of xen_machphys_mfn_list_t. */ #define XENMEM_machphys_mfn_list 5 struct xen_machphys_mfn_list { /* * Size of the 'extent_start' array. Fewer entries will be filled if the * machphys table is smaller than max_extents * 2MB. */ unsigned int max_extents; /* * Pointer to buffer to fill with list of extent starts. If there are * any large discontiguities in the machine address space, 2MB gaps in * the machphys table will be represented by an MFN base of zero. */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* * Number of extents written to the above array. This will be smaller * than 'max_extents' if the machphys table is smaller than max_e * 2MB. */ unsigned int nr_extents; }; DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list); typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); /* * Returns the location in virtual address space of the machine_to_phys * mapping table. Architectures which do not have a m2p table, or which do not * map it by default into guest address space, do not implement this command. * arg == addr of xen_machphys_mapping_t. */ #define XENMEM_machphys_mapping 12 struct xen_machphys_mapping { xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ }; typedef struct xen_machphys_mapping xen_machphys_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); /* * Sets the GPFN at which a particular page appears in the specified guest's * pseudophysical address space. * arg == addr of xen_add_to_physmap_t. */ #define XENMEM_add_to_physmap 7 struct xen_add_to_physmap { /* Which domain to change the mapping for. */ domid_t domid; /* Source mapping space. */ #define XENMAPSPACE_shared_info 0 /* shared info page */ #define XENMAPSPACE_grant_table 1 /* grant table page */ #define XENMAPSPACE_gmfn 2 /* GMFN */ unsigned int space; #define XENMAPIDX_grant_table_status 0x80000000 /* Index into source mapping space. */ xen_ulong_t idx; /* GPFN where the source mapping page should appear. */ xen_pfn_t gpfn; }; DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap); typedef struct xen_add_to_physmap xen_add_to_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); /*** REMOVED ***/ /*#define XENMEM_translate_gpfn_list 8*/ /* * Returns the pseudo-physical memory map as it was when the domain * was started (specified by XENMEM_set_memory_map). * arg == addr of xen_memory_map_t. */ #define XENMEM_memory_map 9 struct xen_memory_map { /* * On call the number of entries which can be stored in buffer. On * return the number of entries which have been stored in * buffer. */ unsigned int nr_entries; /* * Entries in the buffer are in the same format as returned by the * BIOS INT 0x15 EAX=0xE820 call. */ XEN_GUEST_HANDLE(void) buffer; }; typedef struct xen_memory_map xen_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); /* * Returns the real physical memory map. Passes the same structure as * XENMEM_memory_map. * arg == addr of xen_memory_map_t. */ #define XENMEM_machine_memory_map 10 /* * Set the pseudo-physical memory map of a domain, as returned by * XENMEM_memory_map. * arg == addr of xen_foreign_memory_map_t. */ #define XENMEM_set_memory_map 13 struct xen_foreign_memory_map { domid_t domid; struct xen_memory_map map; }; typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); #define XENMEM_set_pod_target 16 #define XENMEM_get_pod_target 17 struct xen_pod_target { /* IN */ uint64_t target_pages; /* OUT */ uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; /* IN */ domid_t domid; }; typedef struct xen_pod_target xen_pod_target_t; /* * Get the number of MFNs saved through memory sharing. * The call never fails. */ #define XENMEM_get_sharing_freed_pages 18 #endif /* __XEN_PUBLIC_MEMORY_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/nmi.h000066400000000000000000000053061314037446600252770ustar00rootroot00000000000000/****************************************************************************** * nmi.h * * NMI callback registration and reason codes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_NMI_H__ #define __XEN_PUBLIC_NMI_H__ #include "xen.h" /* * NMI reason codes: * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. */ /* I/O-check error reported via ISA port 0x61, bit 6. */ #define _XEN_NMIREASON_io_error 0 #define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) /* Parity error reported via ISA port 0x61, bit 7. */ #define _XEN_NMIREASON_parity_error 1 #define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error) /* Unknown hardware-generated NMI. */ #define _XEN_NMIREASON_unknown 2 #define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) /* * long nmi_op(unsigned int cmd, void *arg) * NB. All ops return zero on success, else a negative error code. */ /* * Register NMI callback for this (calling) VCPU. Currently this only makes * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. * arg == pointer to xennmi_callback structure. */ #define XENNMI_register_callback 0 struct xennmi_callback { unsigned long handler_address; unsigned long pad; }; typedef struct xennmi_callback xennmi_callback_t; DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t); /* * Deregister NMI callback for this (calling) VCPU. * arg == NULL. */ #define XENNMI_unregister_callback 1 #endif /* __XEN_PUBLIC_NMI_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/physdev.h000066400000000000000000000173201314037446600261750ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_PHYSDEV_H__ #define __XEN_PUBLIC_PHYSDEV_H__ #include "xen.h" /* * Prototype for this hypercall is: * int physdev_op(int cmd, void *args) * @cmd == PHYSDEVOP_??? (physdev operation). * @args == Operation-specific extra arguments (NULL if none). */ /* * Notify end-of-interrupt (EOI) for the specified IRQ. * @arg == pointer to physdev_eoi structure. */ #define PHYSDEVOP_eoi 12 struct physdev_eoi { /* IN */ uint32_t irq; }; typedef struct physdev_eoi physdev_eoi_t; DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t); /* * Register a shared page for the hypervisor to indicate whether the guest * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly * once the guest used this function in that the associated event channel * will automatically get unmasked. The page registered is used as a bit * array indexed by Xen's PIRQ value. */ #define PHYSDEVOP_pirq_eoi_gmfn 17 struct physdev_pirq_eoi_gmfn { /* IN */ xen_pfn_t gmfn; }; typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t; DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t); /* * Query the status of an IRQ line. * @arg == pointer to physdev_irq_status_query structure. */ #define PHYSDEVOP_irq_status_query 5 struct physdev_irq_status_query { /* IN */ uint32_t irq; /* OUT */ uint32_t flags; /* XENIRQSTAT_* */ }; typedef struct physdev_irq_status_query physdev_irq_status_query_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t); /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ #define _XENIRQSTAT_needs_eoi (0) #define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) /* IRQ shared by multiple guests? */ #define _XENIRQSTAT_shared (1) #define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) /* * Set the current VCPU's I/O privilege level. * @arg == pointer to physdev_set_iopl structure. */ #define PHYSDEVOP_set_iopl 6 struct physdev_set_iopl { /* IN */ uint32_t iopl; }; typedef struct physdev_set_iopl physdev_set_iopl_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t); /* * Set the current VCPU's I/O-port permissions bitmap. * @arg == pointer to physdev_set_iobitmap structure. */ #define PHYSDEVOP_set_iobitmap 7 struct physdev_set_iobitmap { /* IN */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(uint8) bitmap; #else uint8_t *bitmap; #endif uint32_t nr_ports; }; typedef struct physdev_set_iobitmap physdev_set_iobitmap_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t); /* * Read or write an IO-APIC register. * @arg == pointer to physdev_apic structure. */ #define PHYSDEVOP_apic_read 8 #define PHYSDEVOP_apic_write 9 struct physdev_apic { /* IN */ unsigned long apic_physbase; uint32_t reg; /* IN or OUT */ uint32_t value; }; typedef struct physdev_apic physdev_apic_t; DEFINE_XEN_GUEST_HANDLE(physdev_apic_t); /* * Allocate or free a physical upcall vector for the specified IRQ line. * @arg == pointer to physdev_irq structure. */ #define PHYSDEVOP_alloc_irq_vector 10 #define PHYSDEVOP_free_irq_vector 11 struct physdev_irq { /* IN */ uint32_t irq; /* IN or OUT */ uint32_t vector; }; typedef struct physdev_irq physdev_irq_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_t); #define MAP_PIRQ_TYPE_MSI 0x0 #define MAP_PIRQ_TYPE_GSI 0x1 #define MAP_PIRQ_TYPE_UNKNOWN 0x2 #define PHYSDEVOP_map_pirq 13 struct physdev_map_pirq { domid_t domid; /* IN */ int type; /* IN */ int index; /* IN or OUT */ int pirq; /* IN */ int bus; /* IN */ int devfn; /* IN */ int entry_nr; /* IN */ uint64_t table_base; }; typedef struct physdev_map_pirq physdev_map_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t); #define PHYSDEVOP_unmap_pirq 14 struct physdev_unmap_pirq { domid_t domid; /* IN */ int pirq; }; typedef struct physdev_unmap_pirq physdev_unmap_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t); #define PHYSDEVOP_manage_pci_add 15 #define PHYSDEVOP_manage_pci_remove 16 struct physdev_manage_pci { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_manage_pci physdev_manage_pci_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t); #define PHYSDEVOP_restore_msi 19 struct physdev_restore_msi { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_restore_msi physdev_restore_msi_t; DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t); #define PHYSDEVOP_manage_pci_add_ext 20 struct physdev_manage_pci_ext { /* IN */ uint8_t bus; uint8_t devfn; unsigned is_extfn; unsigned is_virtfn; struct { uint8_t bus; uint8_t devfn; } physfn; }; typedef struct physdev_manage_pci_ext physdev_manage_pci_ext_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_ext_t); /* * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() * hypercall since 0x00030202. */ struct physdev_op { uint32_t cmd; union { struct physdev_irq_status_query irq_status_query; struct physdev_set_iopl set_iopl; struct physdev_set_iobitmap set_iobitmap; struct physdev_apic apic_op; struct physdev_irq irq_op; } u; }; typedef struct physdev_op physdev_op_t; DEFINE_XEN_GUEST_HANDLE(physdev_op_t); #define PHYSDEVOP_setup_gsi 21 struct physdev_setup_gsi { int gsi; /* IN */ uint8_t triggering; /* IN */ uint8_t polarity; /* IN */ }; typedef struct physdev_setup_gsi physdev_setup_gsi_t; DEFINE_XEN_GUEST_HANDLE(physdev_setup_gsi_t); /* * Notify that some PIRQ-bound event channels have been unmasked. * ** This command is obsolete since interface version 0x00030202 and is ** * ** unsupported by newer versions of Xen. ** */ #define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 /* * These all-capitals physdev operation names are superceded by the new names * (defined above) since interface version 0x00030202. */ #define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query #define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl #define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap #define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read #define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write #define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector #define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi #define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared #endif /* __XEN_PUBLIC_PHYSDEV_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/platform.h000066400000000000000000000325711314037446600263440ustar00rootroot00000000000000/****************************************************************************** * platform.h * * Hardware platform operations. Intended for use by domain-0 kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_PLATFORM_H__ #define __XEN_PUBLIC_PLATFORM_H__ #include "xen.h" #define XENPF_INTERFACE_VERSION 0x03000001 /* * Set clock such that it would read after 00:00:00 UTC, * 1 January, 1970 if the current system time was . */ #define XENPF_settime 17 struct xenpf_settime { /* IN variables. */ uint32_t secs; uint32_t nsecs; uint64_t system_time; }; typedef struct xenpf_settime xenpf_settime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t); /* * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type. * On x86, @type is an architecture-defined MTRR memory type. * On success, returns the MTRR that was used (@reg) and a handle that can * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting. * (x86-specific). */ #define XENPF_add_memtype 31 struct xenpf_add_memtype { /* IN variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; /* OUT variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_add_memtype xenpf_add_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t); /* * Tear down an existing memory-range type. If @handle is remembered then it * should be passed in to accurately tear down the correct setting (in case * of overlapping memory regions with differing types). If it is not known * then @handle should be set to zero. In all cases @reg must be set. * (x86-specific). */ #define XENPF_del_memtype 32 struct xenpf_del_memtype { /* IN variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_del_memtype xenpf_del_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t); /* Read current type of an MTRR (x86-specific). */ #define XENPF_read_memtype 33 struct xenpf_read_memtype { /* IN variables. */ uint32_t reg; /* OUT variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; }; typedef struct xenpf_read_memtype xenpf_read_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t); #define XENPF_microcode_update 35 struct xenpf_microcode_update { /* IN variables. */ XEN_GUEST_HANDLE(const_void) data;/* Pointer to microcode data */ uint32_t length; /* Length of microcode data. */ }; typedef struct xenpf_microcode_update xenpf_microcode_update_t; DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t); #define XENPF_platform_quirk 39 #define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */ #define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */ #define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */ struct xenpf_platform_quirk { /* IN variables. */ uint32_t quirk_id; }; typedef struct xenpf_platform_quirk xenpf_platform_quirk_t; DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t); #define XENPF_firmware_info 50 #define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */ #define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */ #define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */ struct xenpf_firmware_info { /* IN variables. */ uint32_t type; uint32_t index; /* OUT variables. */ union { struct { /* Int13, Fn48: Check Extensions Present. */ uint8_t device; /* %dl: bios device number */ uint8_t version; /* %ah: major version */ uint16_t interface_support; /* %cx: support bitmap */ /* Int13, Fn08: Legacy Get Device Parameters. */ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */ uint8_t legacy_max_head; /* %dh: max head # */ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */ /* NB. First uint16_t of buffer must be set to buffer size. */ XEN_GUEST_HANDLE(void) edd_params; } disk_info; /* XEN_FW_DISK_INFO */ struct { uint8_t device; /* bios device number */ uint32_t mbr_signature; /* offset 0x1b8 in mbr */ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */ struct { /* Int10, AX=4F15: Get EDID info. */ uint8_t capabilities; uint8_t edid_transfer_time; /* must refer to 128-byte buffer */ XEN_GUEST_HANDLE(uint8) edid; } vbeddc_info; /* XEN_FW_VBEDDC_INFO */ } u; }; typedef struct xenpf_firmware_info xenpf_firmware_info_t; DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t); #define XENPF_enter_acpi_sleep 51 struct xenpf_enter_acpi_sleep { /* IN variables */ uint16_t pm1a_cnt_val; /* PM1a control value. */ uint16_t pm1b_cnt_val; /* PM1b control value. */ uint32_t sleep_state; /* Which state to enter (Sn). */ uint32_t flags; /* Must be zero. */ }; typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t; DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t); #define XENPF_change_freq 52 struct xenpf_change_freq { /* IN variables */ uint32_t flags; /* Must be zero. */ uint32_t cpu; /* Physical cpu. */ uint64_t freq; /* New frequency (Hz). */ }; typedef struct xenpf_change_freq xenpf_change_freq_t; DEFINE_XEN_GUEST_HANDLE(xenpf_change_freq_t); /* * Get idle times (nanoseconds since boot) for physical CPUs specified in the * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is * indexed by CPU number; only entries with the corresponding @cpumap_bitmap * bit set are written to. On return, @cpumap_bitmap is modified so that any * non-existent CPUs are cleared. Such CPUs have their @idletime array entry * cleared. */ #define XENPF_getidletime 53 struct xenpf_getidletime { /* IN/OUT variables */ /* IN: CPUs to interrogate; OUT: subset of IN which are present */ XEN_GUEST_HANDLE(uint8) cpumap_bitmap; /* IN variables */ /* Size of cpumap bitmap. */ uint32_t cpumap_nr_cpus; /* Must be indexable for every cpu in cpumap_bitmap. */ XEN_GUEST_HANDLE(uint64) idletime; /* OUT variables */ /* System time when the idletime snapshots were taken. */ uint64_t now; }; typedef struct xenpf_getidletime xenpf_getidletime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t); #define XENPF_set_processor_pminfo 54 /* ability bits */ #define XEN_PROCESSOR_PM_CX 1 #define XEN_PROCESSOR_PM_PX 2 #define XEN_PROCESSOR_PM_TX 4 /* cmd type */ #define XEN_PM_CX 0 #define XEN_PM_PX 1 #define XEN_PM_TX 2 /* Px sub info type */ #define XEN_PX_PCT 1 #define XEN_PX_PSS 2 #define XEN_PX_PPC 4 #define XEN_PX_PSD 8 struct xen_power_register { uint32_t space_id; uint32_t bit_width; uint32_t bit_offset; uint32_t access_size; uint64_t address; }; struct xen_processor_csd { uint32_t domain; /* domain number of one dependent group */ uint32_t coord_type; /* coordination type */ uint32_t num; /* number of processors in same domain */ }; typedef struct xen_processor_csd xen_processor_csd_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_csd_t); struct xen_processor_cx { struct xen_power_register reg; /* GAS for Cx trigger register */ uint8_t type; /* cstate value, c0: 0, c1: 1, ... */ uint32_t latency; /* worst latency (ms) to enter/exit this cstate */ uint32_t power; /* average power consumption(mW) */ uint32_t dpcnt; /* number of dependency entries */ XEN_GUEST_HANDLE(xen_processor_csd_t) dp; /* NULL if no dependency */ }; typedef struct xen_processor_cx xen_processor_cx_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_cx_t); struct xen_processor_flags { uint32_t bm_control:1; uint32_t bm_check:1; uint32_t has_cst:1; uint32_t power_setup_done:1; uint32_t bm_rld_set:1; }; struct xen_processor_power { uint32_t count; /* number of C state entries in array below */ struct xen_processor_flags flags; /* global flags of this processor */ XEN_GUEST_HANDLE(xen_processor_cx_t) states; /* supported c states */ }; struct xen_pct_register { uint8_t descriptor; uint16_t length; uint8_t space_id; uint8_t bit_width; uint8_t bit_offset; uint8_t reserved; uint64_t address; }; struct xen_processor_px { uint64_t core_frequency; /* megahertz */ uint64_t power; /* milliWatts */ uint64_t transition_latency; /* microseconds */ uint64_t bus_master_latency; /* microseconds */ uint64_t control; /* control value */ uint64_t status; /* success indicator */ }; typedef struct xen_processor_px xen_processor_px_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_px_t); struct xen_psd_package { uint64_t num_entries; uint64_t revision; uint64_t domain; uint64_t coord_type; uint64_t num_processors; }; struct xen_processor_performance { uint32_t flags; /* flag for Px sub info type */ uint32_t platform_limit; /* Platform limitation on freq usage */ struct xen_pct_register control_register; struct xen_pct_register status_register; uint32_t state_count; /* total available performance states */ XEN_GUEST_HANDLE(xen_processor_px_t) states; struct xen_psd_package domain_info; uint32_t shared_type; /* coordination type of this processor */ }; typedef struct xen_processor_performance xen_processor_performance_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_performance_t); struct xenpf_set_processor_pminfo { /* IN variables */ uint32_t id; /* ACPI CPU ID */ uint32_t type; /* {XEN_PM_CX, XEN_PM_PX} */ union { struct xen_processor_power power;/* Cx: _CST/_CSD */ struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */ } u; }; typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t; DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t); #define XENPF_get_cpuinfo 55 struct xenpf_pcpuinfo { /* IN */ uint32_t xen_cpuid; /* OUT */ /* The maxium cpu_id that is present */ uint32_t max_present; #define XEN_PCPU_FLAGS_ONLINE 1 /* Correponding xen_cpuid is not present*/ #define XEN_PCPU_FLAGS_INVALID 2 uint32_t flags; uint32_t apic_id; uint32_t acpi_id; }; typedef struct xenpf_pcpuinfo xenpf_pcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xenpf_pcpuinfo_t); #define XENPF_cpu_online 56 #define XENPF_cpu_offline 57 struct xenpf_cpu_ol { uint32_t cpuid; }; typedef struct xenpf_cpu_ol xenpf_cpu_ol_t; DEFINE_XEN_GUEST_HANDLE(xenpf_cpu_ol_t); #define XENPF_cpu_hotadd 58 struct xenpf_cpu_hotadd { uint32_t apic_id; uint32_t acpi_id; uint32_t pxm; }; #define XENPF_mem_hotadd 59 struct xenpf_mem_hotadd { uint64_t spfn; uint64_t epfn; uint32_t pxm; uint32_t flags; }; #define XENPF_get_cpu_freq ('N' << 24) struct xenpf_get_cpu_freq { /* IN variables */ uint32_t vcpu; /* OUT variables */ uint32_t freq; /* in kHz */ }; struct xen_platform_op { uint32_t cmd; uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ union { struct xenpf_settime settime; struct xenpf_add_memtype add_memtype; struct xenpf_del_memtype del_memtype; struct xenpf_read_memtype read_memtype; struct xenpf_microcode_update microcode; struct xenpf_platform_quirk platform_quirk; struct xenpf_firmware_info firmware_info; struct xenpf_enter_acpi_sleep enter_acpi_sleep; struct xenpf_change_freq change_freq; struct xenpf_getidletime getidletime; struct xenpf_set_processor_pminfo set_pminfo; struct xenpf_pcpuinfo pcpu_info; struct xenpf_cpu_ol cpu_ol; struct xenpf_cpu_hotadd cpu_add; struct xenpf_mem_hotadd mem_add; struct xenpf_get_cpu_freq get_cpu_freq; uint8_t pad[128]; } u; }; typedef struct xen_platform_op xen_platform_op_t; DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t); #endif /* __XEN_PUBLIC_PLATFORM_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/sched.h000066400000000000000000000103501314037446600255750ustar00rootroot00000000000000/****************************************************************************** * sched.h * * Scheduler state interactions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_SCHED_H__ #define __XEN_PUBLIC_SCHED_H__ #include "event_channel.h" /* * The prototype for this hypercall is: * long sched_op(int cmd, void *arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == Operation-specific extra argument(s), as described below. * * Versions of Xen prior to 3.0.2 provided only the following legacy version * of this hypercall, supporting only the commands yield, block and shutdown: * long sched_op(int cmd, unsigned long arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) * == SHUTDOWN_* code (SCHEDOP_shutdown) * This legacy version is available to new guests as sched_op_compat(). */ /* * Voluntarily yield the CPU. * @arg == NULL. */ #define SCHEDOP_yield 0 /* * Block execution of this VCPU until an event is received for processing. * If called with event upcalls masked, this operation will atomically * reenable event delivery and check for pending events before blocking the * VCPU. This avoids a "wakeup waiting" race. * @arg == NULL. */ #define SCHEDOP_block 1 /* * Halt execution of this domain (all VCPUs) and notify the system controller. * @arg == pointer to sched_shutdown structure. */ #define SCHEDOP_shutdown 2 struct sched_shutdown { unsigned int reason; /* SHUTDOWN_* */ }; DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown); typedef struct sched_shutdown sched_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t); /* * Poll a set of event-channel ports. Return when one or more are pending. An * optional timeout may be specified. * @arg == pointer to sched_poll structure. */ #define SCHEDOP_poll 3 struct sched_poll { XEN_GUEST_HANDLE(evtchn_port_t) ports; unsigned int nr_ports; uint64_t timeout; }; DEFINE_GUEST_HANDLE_STRUCT(sched_poll); typedef struct sched_poll sched_poll_t; DEFINE_XEN_GUEST_HANDLE(sched_poll_t); /* * Declare a shutdown for another domain. The main use of this function is * in interpreting shutdown requests and reasons for fully-virtualized * domains. A para-virtualized domain may use SCHEDOP_shutdown directly. * @arg == pointer to sched_remote_shutdown structure. */ #define SCHEDOP_remote_shutdown 4 struct sched_remote_shutdown { domid_t domain_id; /* Remote domain ID */ unsigned int reason; /* SHUTDOWN_xxx reason */ }; typedef struct sched_remote_shutdown sched_remote_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t); /* * Reason codes for SCHEDOP_shutdown. These may be interpreted by control * software to determine the appropriate action. For the most part, Xen does * not care about the shutdown code. */ #define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #endif /* __XEN_PUBLIC_SCHED_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/sysctl.h000066400000000000000000000435651314037446600260460ustar00rootroot00000000000000/****************************************************************************** * sysctl.h * * System management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_SYSCTL_H__ #define __XEN_PUBLIC_SYSCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "sysctl operations are intended for use by node control tools only" #endif #include "xen.h" #include "domctl.h" #define XEN_SYSCTL_INTERFACE_VERSION 0x00000007 /* * Read console content from Xen buffer ring. */ #define XEN_SYSCTL_readconsole 1 struct xen_sysctl_readconsole { /* IN: Non-zero -> clear after reading. */ uint8_t clear; /* IN: Non-zero -> start index specified by @index field. */ uint8_t incremental; uint8_t pad0, pad1; /* * IN: Start index for consuming from ring buffer (if @incremental); * OUT: End index after consuming from ring buffer. */ uint32_t index; /* IN: Virtual address to write console data. */ XEN_GUEST_HANDLE_64(char) buffer; /* IN: Size of buffer; OUT: Bytes written to buffer. */ uint32_t count; }; typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t); /* Get trace buffers machine base address */ #define XEN_SYSCTL_tbuf_op 2 struct xen_sysctl_tbuf_op { /* IN variables */ #define XEN_SYSCTL_TBUFOP_get_info 0 #define XEN_SYSCTL_TBUFOP_set_cpu_mask 1 #define XEN_SYSCTL_TBUFOP_set_evt_mask 2 #define XEN_SYSCTL_TBUFOP_set_size 3 #define XEN_SYSCTL_TBUFOP_enable 4 #define XEN_SYSCTL_TBUFOP_disable 5 uint32_t cmd; /* IN/OUT variables */ struct xenctl_cpumap cpu_mask; uint32_t evt_mask; /* OUT variables */ uint64_aligned_t buffer_mfn; uint32_t size; }; typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t); /* * Get physical information about the host machine */ #define XEN_SYSCTL_physinfo 3 /* (x86) The platform supports HVM guests. */ #define _XEN_SYSCTL_PHYSCAP_hvm 0 #define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm) /* (x86) The platform supports HVM-guest direct access to I/O devices. */ #define _XEN_SYSCTL_PHYSCAP_hvm_directio 1 #define XEN_SYSCTL_PHYSCAP_hvm_directio (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio) struct xen_sysctl_physinfo { uint32_t threads_per_core; uint32_t cores_per_socket; uint32_t nr_cpus; uint32_t max_node_id; uint32_t cpu_khz; uint64_aligned_t total_pages; uint64_aligned_t free_pages; uint64_aligned_t scrub_pages; uint32_t hw_cap[8]; /* * IN: maximum addressable entry in the caller-provided cpu_to_node array. * OUT: largest cpu identifier in the system. * If OUT is greater than IN then the cpu_to_node array is truncated! */ uint32_t max_cpu_id; /* * If not NULL, this array is filled with node identifier for each cpu. * If a cpu has no node information (e.g., cpu not present) then the * sentinel value ~0u is written. * The size of this array is specified by the caller in @max_cpu_id. * If the actual @max_cpu_id is smaller than the array then the trailing * elements of the array will not be written by the sysctl. */ XEN_GUEST_HANDLE_64(uint32) cpu_to_node; /* XEN_SYSCTL_PHYSCAP_??? */ uint32_t capabilities; }; typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t); /* * Get the ID of the current scheduler. */ #define XEN_SYSCTL_sched_id 4 struct xen_sysctl_sched_id { /* OUT variable */ uint32_t sched_id; }; typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t); /* Interface for controlling Xen software performance counters. */ #define XEN_SYSCTL_perfc_op 5 /* Sub-operations: */ #define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */ #define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */ struct xen_sysctl_perfc_desc { char name[80]; /* name of perf counter */ uint32_t nr_vals; /* number of values for this counter */ }; typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t); typedef uint32_t xen_sysctl_perfc_val_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t); struct xen_sysctl_perfc_op { /* IN variables. */ uint32_t cmd; /* XEN_SYSCTL_PERFCOP_??? */ /* OUT variables. */ uint32_t nr_counters; /* number of counters description */ uint32_t nr_vals; /* number of values */ /* counter information (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc; /* counter values (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val; }; typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t); #define XEN_SYSCTL_getdomaininfolist 6 struct xen_sysctl_getdomaininfolist { /* IN variables. */ domid_t first_domain; uint32_t max_domains; XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer; /* OUT variables. */ uint32_t num_domains; }; typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t); /* Inject debug keys into Xen. */ #define XEN_SYSCTL_debug_keys 7 struct xen_sysctl_debug_keys { /* IN variables. */ XEN_GUEST_HANDLE_64(char) keys; uint32_t nr_keys; }; typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t); /* Get physical CPU information. */ #define XEN_SYSCTL_getcpuinfo 8 struct xen_sysctl_cpuinfo { uint64_aligned_t idletime; }; typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t); struct xen_sysctl_getcpuinfo { /* IN variables. */ uint32_t max_cpus; XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info; /* OUT variables. */ uint32_t nr_cpus; }; typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t); #define XEN_SYSCTL_availheap 9 struct xen_sysctl_availheap { /* IN variables. */ uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */ uint32_t max_bitwidth; /* Largest address width (zero if don't care). */ int32_t node; /* NUMA node of interest (-1 for all nodes). */ /* OUT variables. */ uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */ }; typedef struct xen_sysctl_availheap xen_sysctl_availheap_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t); #define XEN_SYSCTL_get_pmstat 10 struct pm_px_val { uint64_aligned_t freq; /* Px core frequency */ uint64_aligned_t residency; /* Px residency time */ uint64_aligned_t count; /* Px transition count */ }; typedef struct pm_px_val pm_px_val_t; DEFINE_XEN_GUEST_HANDLE(pm_px_val_t); struct pm_px_stat { uint8_t total; /* total Px states */ uint8_t usable; /* usable Px states */ uint8_t last; /* last Px state */ uint8_t cur; /* current Px state */ XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */ XEN_GUEST_HANDLE_64(pm_px_val_t) pt; }; typedef struct pm_px_stat pm_px_stat_t; DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t); struct pm_cx_stat { uint32_t nr; /* entry nr in triggers & residencies, including C0 */ uint32_t last; /* last Cx state */ uint64_aligned_t idle_time; /* idle time from boot */ XEN_GUEST_HANDLE_64(uint64) triggers; /* Cx trigger counts */ XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */ }; struct xen_sysctl_get_pmstat { #define PMSTAT_CATEGORY_MASK 0xf0 #define PMSTAT_PX 0x10 #define PMSTAT_CX 0x20 #define PMSTAT_get_max_px (PMSTAT_PX | 0x1) #define PMSTAT_get_pxstat (PMSTAT_PX | 0x2) #define PMSTAT_reset_pxstat (PMSTAT_PX | 0x3) #define PMSTAT_get_max_cx (PMSTAT_CX | 0x1) #define PMSTAT_get_cxstat (PMSTAT_CX | 0x2) #define PMSTAT_reset_cxstat (PMSTAT_CX | 0x3) uint32_t type; uint32_t cpuid; union { struct pm_px_stat getpx; struct pm_cx_stat getcx; /* other struct for tx, etc */ } u; }; typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t); /* * Status codes. Must be greater than 0 to avoid confusing * sysctl callers that see 0 as a plain successful return. */ #define XEN_CPU_HOTPLUG_STATUS_OFFLINE 1 #define XEN_CPU_HOTPLUG_STATUS_ONLINE 2 #define XEN_CPU_HOTPLUG_STATUS_NEW 3 #define XEN_SYSCTL_cpu_hotplug 11 struct xen_sysctl_cpu_hotplug { /* IN variables */ uint32_t cpu; /* Physical cpu. */ #define XEN_SYSCTL_CPU_HOTPLUG_ONLINE 0 #define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1 #define XEN_SYSCTL_CPU_HOTPLUG_STATUS 2 uint32_t op; /* hotplug opcode */ }; typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t); /* * Get/set xen power management, include * 1. cpufreq governors and related parameters */ #define XEN_SYSCTL_pm_op 12 struct xen_userspace { uint32_t scaling_setspeed; }; typedef struct xen_userspace xen_userspace_t; struct xen_ondemand { uint32_t sampling_rate_max; uint32_t sampling_rate_min; uint32_t sampling_rate; uint32_t up_threshold; }; typedef struct xen_ondemand xen_ondemand_t; /* * cpufreq para name of this structure named * same as sysfs file name of native linux */ #define CPUFREQ_NAME_LEN 16 struct xen_get_cpufreq_para { /* IN/OUT variable */ uint32_t cpu_num; uint32_t freq_num; uint32_t gov_num; /* for all governors */ /* OUT variable */ XEN_GUEST_HANDLE_64(uint32) affected_cpus; XEN_GUEST_HANDLE_64(uint32) scaling_available_frequencies; XEN_GUEST_HANDLE_64(char) scaling_available_governors; char scaling_driver[CPUFREQ_NAME_LEN]; uint32_t cpuinfo_cur_freq; uint32_t cpuinfo_max_freq; uint32_t cpuinfo_min_freq; uint32_t scaling_cur_freq; char scaling_governor[CPUFREQ_NAME_LEN]; uint32_t scaling_max_freq; uint32_t scaling_min_freq; /* for specific governor */ union { struct xen_userspace userspace; struct xen_ondemand ondemand; } u; }; struct xen_set_cpufreq_gov { char scaling_governor[CPUFREQ_NAME_LEN]; }; struct xen_set_cpufreq_para { #define SCALING_MAX_FREQ 1 #define SCALING_MIN_FREQ 2 #define SCALING_SETSPEED 3 #define SAMPLING_RATE 4 #define UP_THRESHOLD 5 uint32_t ctrl_type; uint32_t ctrl_value; }; /* Get physical CPU topology information. */ #define INVALID_TOPOLOGY_ID (~0U) struct xen_get_cputopo { /* IN: maximum addressable entry in * the caller-provided cpu_to_core/socket. */ uint32_t max_cpus; XEN_GUEST_HANDLE_64(uint32) cpu_to_core; XEN_GUEST_HANDLE_64(uint32) cpu_to_socket; /* OUT: number of cpus returned * If OUT is greater than IN then the cpu_to_core/socket is truncated! */ uint32_t nr_cpus; }; struct xen_sysctl_pm_op { #define PM_PARA_CATEGORY_MASK 0xf0 #define CPUFREQ_PARA 0x10 /* cpufreq command type */ #define GET_CPUFREQ_PARA (CPUFREQ_PARA | 0x01) #define SET_CPUFREQ_GOV (CPUFREQ_PARA | 0x02) #define SET_CPUFREQ_PARA (CPUFREQ_PARA | 0x03) #define GET_CPUFREQ_AVGFREQ (CPUFREQ_PARA | 0x04) /* get CPU topology */ #define XEN_SYSCTL_pm_op_get_cputopo 0x20 /* set/reset scheduler power saving option */ #define XEN_SYSCTL_pm_op_set_sched_opt_smt 0x21 /* cpuidle max_cstate access command */ #define XEN_SYSCTL_pm_op_get_max_cstate 0x22 #define XEN_SYSCTL_pm_op_set_max_cstate 0x23 /* set scheduler migration cost value */ #define XEN_SYSCTL_pm_op_set_vcpu_migration_delay 0x24 #define XEN_SYSCTL_pm_op_get_vcpu_migration_delay 0x25 uint32_t cmd; uint32_t cpuid; union { struct xen_get_cpufreq_para get_para; struct xen_set_cpufreq_gov set_gov; struct xen_set_cpufreq_para set_para; uint64_aligned_t get_avgfreq; struct xen_get_cputopo get_topo; uint32_t set_sched_opt_smt; uint32_t get_max_cstate; uint32_t set_max_cstate; uint32_t get_vcpu_migration_delay; uint32_t set_vcpu_migration_delay; } u; }; #define XEN_SYSCTL_page_offline_op 14 struct xen_sysctl_page_offline_op { /* IN: range of page to be offlined */ #define sysctl_page_offline 1 #define sysctl_page_online 2 #define sysctl_query_page_offline 3 uint32_t cmd; uint32_t start; uint32_t end; /* OUT: result of page offline request */ /* * bit 0~15: result flags * bit 16~31: owner */ XEN_GUEST_HANDLE(uint32) status; }; #define PG_OFFLINE_STATUS_MASK (0xFFUL) /* The result is invalid, i.e. HV does not handle it */ #define PG_OFFLINE_INVALID (0x1UL << 0) #define PG_OFFLINE_OFFLINED (0x1UL << 1) #define PG_OFFLINE_PENDING (0x1UL << 2) #define PG_OFFLINE_FAILED (0x1UL << 3) #define PG_ONLINE_FAILED PG_OFFLINE_FAILED #define PG_ONLINE_ONLINED PG_OFFLINE_OFFLINED #define PG_OFFLINE_STATUS_OFFLINED (0x1UL << 1) #define PG_OFFLINE_STATUS_ONLINE (0x1UL << 2) #define PG_OFFLINE_STATUS_OFFLINE_PENDING (0x1UL << 3) #define PG_OFFLINE_STATUS_BROKEN (0x1UL << 4) #define PG_OFFLINE_MISC_MASK (0xFFUL << 4) /* only valid when PG_OFFLINE_FAILED */ #define PG_OFFLINE_XENPAGE (0x1UL << 8) #define PG_OFFLINE_DOM0PAGE (0x1UL << 9) #define PG_OFFLINE_ANONYMOUS (0x1UL << 10) #define PG_OFFLINE_NOT_CONV_RAM (0x1UL << 11) #define PG_OFFLINE_OWNED (0x1UL << 12) #define PG_OFFLINE_BROKEN (0x1UL << 13) #define PG_ONLINE_BROKEN PG_OFFLINE_BROKEN #define PG_OFFLINE_OWNER_SHIFT 16 #define XEN_SYSCTL_lockprof_op 15 /* Sub-operations: */ #define XEN_SYSCTL_LOCKPROF_reset 1 /* Reset all profile data to zero. */ #define XEN_SYSCTL_LOCKPROF_query 2 /* Get lock profile information. */ /* Record-type: */ #define LOCKPROF_TYPE_GLOBAL 0 /* global lock, idx meaningless */ #define LOCKPROF_TYPE_PERDOM 1 /* per-domain lock, idx is domid */ #define LOCKPROF_TYPE_N 2 /* number of types */ struct xen_sysctl_lockprof_data { char name[40]; /* lock name (may include up to 2 %d specifiers) */ int32_t type; /* LOCKPROF_TYPE_??? */ int32_t idx; /* index (e.g. domain id) */ uint64_aligned_t lock_cnt; /* # of locking succeeded */ uint64_aligned_t block_cnt; /* # of wait for lock */ uint64_aligned_t lock_time; /* nsecs lock held */ uint64_aligned_t block_time; /* nsecs waited for lock */ }; typedef struct xen_sysctl_lockprof_data xen_sysctl_lockprof_data_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_data_t); struct xen_sysctl_lockprof_op { /* IN variables. */ uint32_t cmd; /* XEN_SYSCTL_LOCKPROF_??? */ uint32_t max_elem; /* size of output buffer */ /* OUT variables (query only). */ uint32_t nr_elem; /* number of elements available */ uint64_aligned_t time; /* nsecs of profile measurement */ /* profile information (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_lockprof_data_t) data; }; typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t); struct xen_sysctl { uint32_t cmd; uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */ union { struct xen_sysctl_readconsole readconsole; struct xen_sysctl_tbuf_op tbuf_op; struct xen_sysctl_physinfo physinfo; struct xen_sysctl_sched_id sched_id; struct xen_sysctl_perfc_op perfc_op; struct xen_sysctl_getdomaininfolist getdomaininfolist; struct xen_sysctl_debug_keys debug_keys; struct xen_sysctl_getcpuinfo getcpuinfo; struct xen_sysctl_availheap availheap; struct xen_sysctl_get_pmstat get_pmstat; struct xen_sysctl_cpu_hotplug cpu_hotplug; struct xen_sysctl_pm_op pm_op; struct xen_sysctl_page_offline_op page_offline; struct xen_sysctl_lockprof_op lockprof_op; uint8_t pad[128]; } u; }; typedef struct xen_sysctl xen_sysctl_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t); #endif /* __XEN_PUBLIC_SYSCTL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/tmem.h000066400000000000000000000110241314037446600254500ustar00rootroot00000000000000/****************************************************************************** * tmem.h * * Guest OS interface to Xen Transcendent Memory. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_TMEM_H__ #define __XEN_PUBLIC_TMEM_H__ #include "xen.h" /* Commands to HYPERVISOR_tmem_op() */ #define TMEM_CONTROL 0 #define TMEM_NEW_POOL 1 #define TMEM_DESTROY_POOL 2 #define TMEM_NEW_PAGE 3 #define TMEM_PUT_PAGE 4 #define TMEM_GET_PAGE 5 #define TMEM_FLUSH_PAGE 6 #define TMEM_FLUSH_OBJECT 7 #define TMEM_READ 8 #define TMEM_WRITE 9 #define TMEM_XCHG 10 /* Privileged commands to HYPERVISOR_tmem_op() */ #define TMEM_AUTH 101 #define TMEM_RESTORE_NEW 102 /* Subops for HYPERVISOR_tmem_op(TMEM_CONTROL) */ #define TMEMC_THAW 0 #define TMEMC_FREEZE 1 #define TMEMC_FLUSH 2 #define TMEMC_DESTROY 3 #define TMEMC_LIST 4 #define TMEMC_SET_WEIGHT 5 #define TMEMC_SET_CAP 6 #define TMEMC_SET_COMPRESS 7 #define TMEMC_QUERY_FREEABLE_MB 8 #define TMEMC_SAVE_BEGIN 10 #define TMEMC_SAVE_GET_VERSION 11 #define TMEMC_SAVE_GET_MAXPOOLS 12 #define TMEMC_SAVE_GET_CLIENT_WEIGHT 13 #define TMEMC_SAVE_GET_CLIENT_CAP 14 #define TMEMC_SAVE_GET_CLIENT_FLAGS 15 #define TMEMC_SAVE_GET_POOL_FLAGS 16 #define TMEMC_SAVE_GET_POOL_NPAGES 17 #define TMEMC_SAVE_GET_POOL_UUID 18 #define TMEMC_SAVE_GET_NEXT_PAGE 19 #define TMEMC_SAVE_GET_NEXT_INV 20 #define TMEMC_SAVE_END 21 #define TMEMC_RESTORE_BEGIN 30 #define TMEMC_RESTORE_PUT_PAGE 32 #define TMEMC_RESTORE_FLUSH_PAGE 33 /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ #define TMEM_POOL_PERSIST 1 #define TMEM_POOL_SHARED 2 #define TMEM_POOL_PAGESIZE_SHIFT 4 #define TMEM_POOL_PAGESIZE_MASK 0xf #define TMEM_POOL_VERSION_SHIFT 24 #define TMEM_POOL_VERSION_MASK 0xff /* Bits for client flags (save/restore) */ #define TMEM_CLIENT_COMPRESS 1 #define TMEM_CLIENT_FROZEN 2 /* Special errno values */ #define EFROZEN 1000 #define EEMPTY 1001 #ifndef __ASSEMBLY__ typedef xen_pfn_t tmem_cli_mfn_t; typedef XEN_GUEST_HANDLE(char) tmem_cli_va_t; struct tmem_op { uint32_t cmd; int32_t pool_id; union { struct { uint64_t uuid[2]; uint32_t flags; uint32_t arg1; } new; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */ struct { uint32_t subop; uint32_t cli_id; uint32_t arg1; uint32_t arg2; uint64_t arg3; tmem_cli_va_t buf; } ctrl; /* for cmd == TMEM_CONTROL */ struct { uint64_t object; uint32_t index; uint32_t tmem_offset; uint32_t pfn_offset; uint32_t len; tmem_cli_mfn_t cmfn; /* client machine page frame */ } gen; /* for all other cmd ("generic") */ } u; }; typedef struct tmem_op tmem_op_t; DEFINE_XEN_GUEST_HANDLE(tmem_op_t); struct tmem_handle { uint32_t pool_id; uint32_t index; uint64_t oid; }; #endif #endif /* __XEN_PUBLIC_TMEM_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/trace.h000066400000000000000000000226251314037446600256150ustar00rootroot00000000000000/****************************************************************************** * include/public/trace.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Mark Williamson, (C) 2004 Intel Research Cambridge * Copyright (C) 2005 Bin Ren */ #ifndef __XEN_PUBLIC_TRACE_H__ #define __XEN_PUBLIC_TRACE_H__ #define TRACE_EXTRA_MAX 7 #define TRACE_EXTRA_SHIFT 28 /* Trace classes */ #define TRC_CLS_SHIFT 16 #define TRC_GEN 0x0001f000 /* General trace */ #define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */ #define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */ #define TRC_HVM 0x0008f000 /* Xen HVM trace */ #define TRC_MEM 0x0010f000 /* Xen memory trace */ #define TRC_PV 0x0020f000 /* Xen PV traces */ #define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */ #define TRC_PM 0x0080f000 /* Xen power management trace */ #define TRC_ALL 0x0ffff000 #define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff) #define TRC_HD_CYCLE_FLAG (1UL<<31) #define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) ) #define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX) /* Trace subclasses */ #define TRC_SUBCLS_SHIFT 12 /* trace subclasses for SVM */ #define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */ #define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */ #define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */ #define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */ /* Trace events per class */ #define TRC_LOST_RECORDS (TRC_GEN + 1) #define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2) #define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3) #define TRC_TRACE_IRQ (TRC_GEN + 4) #define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1) #define TRC_SCHED_CONTINUE_RUNNING (TRC_SCHED_MIN + 2) #define TRC_SCHED_DOM_ADD (TRC_SCHED_VERBOSE + 1) #define TRC_SCHED_DOM_REM (TRC_SCHED_VERBOSE + 2) #define TRC_SCHED_SLEEP (TRC_SCHED_VERBOSE + 3) #define TRC_SCHED_WAKE (TRC_SCHED_VERBOSE + 4) #define TRC_SCHED_YIELD (TRC_SCHED_VERBOSE + 5) #define TRC_SCHED_BLOCK (TRC_SCHED_VERBOSE + 6) #define TRC_SCHED_SHUTDOWN (TRC_SCHED_VERBOSE + 7) #define TRC_SCHED_CTL (TRC_SCHED_VERBOSE + 8) #define TRC_SCHED_ADJDOM (TRC_SCHED_VERBOSE + 9) #define TRC_SCHED_SWITCH (TRC_SCHED_VERBOSE + 10) #define TRC_SCHED_S_TIMER_FN (TRC_SCHED_VERBOSE + 11) #define TRC_SCHED_T_TIMER_FN (TRC_SCHED_VERBOSE + 12) #define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED_VERBOSE + 13) #define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14) #define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15) #define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1) #define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2) #define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3) #define TRC_PV_HYPERCALL (TRC_PV + 1) #define TRC_PV_TRAP (TRC_PV + 3) #define TRC_PV_PAGE_FAULT (TRC_PV + 4) #define TRC_PV_FORCED_INVALID_OP (TRC_PV + 5) #define TRC_PV_EMULATE_PRIVOP (TRC_PV + 6) #define TRC_PV_EMULATE_4GB (TRC_PV + 7) #define TRC_PV_MATH_STATE_RESTORE (TRC_PV + 8) #define TRC_PV_PAGING_FIXUP (TRC_PV + 9) #define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV + 10) #define TRC_PV_PTWR_EMULATION (TRC_PV + 11) #define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12) /* Indicates that addresses in trace record are 64 bits */ #define TRC_64_FLAG (0x100) #define TRC_SHADOW_NOT_SHADOW (TRC_SHADOW + 1) #define TRC_SHADOW_FAST_PROPAGATE (TRC_SHADOW + 2) #define TRC_SHADOW_FAST_MMIO (TRC_SHADOW + 3) #define TRC_SHADOW_FALSE_FAST_PATH (TRC_SHADOW + 4) #define TRC_SHADOW_MMIO (TRC_SHADOW + 5) #define TRC_SHADOW_FIXUP (TRC_SHADOW + 6) #define TRC_SHADOW_DOMF_DYING (TRC_SHADOW + 7) #define TRC_SHADOW_EMULATE (TRC_SHADOW + 8) #define TRC_SHADOW_EMULATE_UNSHADOW_USER (TRC_SHADOW + 9) #define TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ (TRC_SHADOW + 10) #define TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED (TRC_SHADOW + 11) #define TRC_SHADOW_WRMAP_BF (TRC_SHADOW + 12) #define TRC_SHADOW_PREALLOC_UNPIN (TRC_SHADOW + 13) #define TRC_SHADOW_RESYNC_FULL (TRC_SHADOW + 14) #define TRC_SHADOW_RESYNC_ONLY (TRC_SHADOW + 15) /* trace events per subclass */ #define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01) #define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02) #define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02) #define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01) #define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01) #define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02) #define TRC_HVM_PF_INJECT64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02) #define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03) #define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04) #define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05) #define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06) #define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07) #define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08) #define TRC_HVM_CR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08) #define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09) #define TRC_HVM_CR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09) #define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A) #define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B) #define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C) #define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D) #define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E) #define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F) #define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10) #define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11) #define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12) #define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13) #define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14) #define TRC_HVM_INVLPG64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14) #define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15) #define TRC_HVM_IOPORT_READ (TRC_HVM_HANDLER + 0x16) #define TRC_HVM_IOMEM_READ (TRC_HVM_HANDLER + 0x17) #define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18) #define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19) #define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19) #define TRC_HVM_INTR_WINDOW (TRC_HVM_HANDLER + 0x20) #define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216) #define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217) /* trace subclasses for power management */ #define TRC_PM_FREQ 0x00801000 /* xen cpu freq events */ #define TRC_PM_IDLE 0x00802000 /* xen cpu idle events */ /* trace events for per class */ #define TRC_PM_FREQ_CHANGE (TRC_PM_FREQ + 0x01) #define TRC_PM_IDLE_ENTRY (TRC_PM_IDLE + 0x01) #define TRC_PM_IDLE_EXIT (TRC_PM_IDLE + 0x02) /* This structure represents a single trace buffer record. */ struct t_rec { uint32_t event:28; uint32_t extra_u32:3; /* # entries in trailing extra_u32[] array */ uint32_t cycles_included:1; /* u.cycles or u.no_cycles? */ union { struct { uint32_t cycles_lo, cycles_hi; /* cycle counter timestamp */ uint32_t extra_u32[7]; /* event data items */ } cycles; struct { uint32_t extra_u32[7]; /* event data items */ } nocycles; } u; }; /* * This structure contains the metadata for a single trace buffer. The head * field, indexes into an array of struct t_rec's. */ struct t_buf { /* Assume the data buffer size is X. X is generally not a power of 2. * CONS and PROD are incremented modulo (2*X): * 0 <= cons < 2*X * 0 <= prod < 2*X * This is done because addition modulo X breaks at 2^32 when X is not a * power of 2: * (((2^32 - 1) % X) + 1) % X != (2^32) % X */ uint32_t cons; /* Offset of next item to be consumed by control tools. */ uint32_t prod; /* Offset of next item to be produced by Xen. */ /* Records follow immediately after the meta-data header. */ }; #endif /* __XEN_PUBLIC_TRACE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/vcpu.h000066400000000000000000000227101314037446600254670ustar00rootroot00000000000000/****************************************************************************** * vcpu.h * * VCPU initialisation, query, and hotplug. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VCPU_H__ #define __XEN_PUBLIC_VCPU_H__ #include "xen.h" /* * Prototype for this hypercall is: * int vcpu_op(int cmd, int vcpuid, void *extra_args) * @cmd == VCPUOP_??? (VCPU operation). * @vcpuid == VCPU to operate on. * @extra_args == Operation-specific extra arguments (NULL if none). */ /* * Initialise a VCPU. Each VCPU can be initialised only once. A * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. * * @extra_arg == pointer to vcpu_guest_context structure containing initial * state for the VCPU. */ #define VCPUOP_initialise 0 /* * Bring up a VCPU. This makes the VCPU runnable. This operation will fail * if the VCPU has not been initialised (VCPUOP_initialise). */ #define VCPUOP_up 1 /* * Bring down a VCPU (i.e., make it non-runnable). * There are a few caveats that callers should observe: * 1. This operation may return, and VCPU_is_up may return false, before the * VCPU stops running (i.e., the command is asynchronous). It is a good * idea to ensure that the VCPU has entered a non-critical loop before * bringing it down. Alternatively, this operation is guaranteed * synchronous if invoked by the VCPU itself. * 2. After a VCPU is initialised, there is currently no way to drop all its * references to domain memory. Even a VCPU that is down still holds * memory references via its pagetable base pointer and GDT. It is good * practise to move a VCPU onto an 'idle' or default page table, LDT and * GDT before bringing it down. */ #define VCPUOP_down 2 /* Returns 1 if the given VCPU is up. */ #define VCPUOP_is_up 3 /* * Return information about the state and running time of a VCPU. * @extra_arg == pointer to vcpu_runstate_info structure. */ #define VCPUOP_get_runstate_info 4 struct vcpu_runstate_info { /* VCPU's current state (RUNSTATE_*). */ int state; /* When was current state entered (system time, ns)? */ uint64_t state_entry_time; /* * Time spent in each RUNSTATE_* (ns). The sum of these times is * guaranteed not to drift from system time. */ uint64_t time[4]; }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate_info); typedef struct vcpu_runstate_info vcpu_runstate_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t); /* VCPU is currently running on a physical CPU. */ #define RUNSTATE_running 0 /* VCPU is runnable, but not currently scheduled on any physical CPU. */ #define RUNSTATE_runnable 1 /* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ #define RUNSTATE_blocked 2 /* * VCPU is not runnable, but it is not blocked. * This is a 'catch all' state for things like hotplug and pauses by the * system administrator (or for critical sections in the hypervisor). * RUNSTATE_blocked dominates this state (it is the preferred state). */ #define RUNSTATE_offline 3 /* * Register a shared memory area from which the guest may obtain its own * runstate information without needing to execute a hypercall. * Notes: * 1. The registered address may be virtual or physical or guest handle, * depending on the platform. Virtual address or guest handle should be * registered on x86 systems. * 2. Only one shared area may be registered per VCPU. The shared area is * updated by the hypervisor each time the VCPU is scheduled. Thus * runstate.state will always be RUNSTATE_running and * runstate.state_entry_time will indicate the system time at which the * VCPU was last scheduled to run. * @extra_arg == pointer to vcpu_register_runstate_memory_area structure. */ #define VCPUOP_register_runstate_memory_area 5 struct vcpu_register_runstate_memory_area { union { XEN_GUEST_HANDLE(vcpu_runstate_info_t) h; struct vcpu_runstate_info *v; uint64_t p; } addr; }; typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t); /* * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer * which can be set via these commands. Periods smaller than one millisecond * may not be supported. */ #define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ #define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ struct vcpu_set_periodic_timer { uint64_t period_ns; }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_periodic_timer); typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t); /* * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot * timer which can be set via these commands. */ #define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */ struct vcpu_set_singleshot_timer { uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */ uint32_t flags; /* VCPU_SSHOTTMR_??? */ }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_singleshot_timer); typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t); /* Flags to VCPUOP_set_singleshot_timer. */ /* Require the timeout to be in the future (return -ETIME if it's passed). */ #define _VCPU_SSHOTTMR_future (0) #define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future) /* * Register a memory location in the guest address space for the * vcpu_info structure. This allows the guest to place the vcpu_info * structure in a convenient place, such as in a per-cpu data area. * The pointer need not be page aligned, but the structure must not * cross a page boundary. * * This may be called only once per vcpu. */ #define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */ struct vcpu_register_vcpu_info { uint64_t mfn; /* mfn of page to place vcpu_info */ uint32_t offset; /* offset within page */ uint32_t rsvd; /* unused */ }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info); typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t); /* Send an NMI to the specified VCPU. @extra_arg == NULL. */ #define VCPUOP_send_nmi 11 /* * Get the physical ID information for a pinned vcpu's underlying physical * processor. The physical ID informmation is architecture-specific. * On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and * greater are reserved. * This command returns -EINVAL if it is not a valid operation for this VCPU. */ #define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */ struct vcpu_get_physid { uint64_t phys_id; }; typedef struct vcpu_get_physid vcpu_get_physid_t; DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t); #define xen_vcpu_physid_to_x86_apicid(physid) \ ((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid))) #define xen_vcpu_physid_to_x86_acpiid(physid) \ ((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32))) /* * Register a memory location to get a secondary copy of the vcpu time * parameters. The master copy still exists as part of the vcpu shared * memory area, and this secondary copy is updated whenever the master copy * is updated (and using the same versioning scheme for synchronisation). * * The intent is that this copy may be mapped (RO) into userspace so * that usermode can compute system time using the time info and the * tsc. Usermode will see an array of vcpu_time_info structures, one * for each vcpu, and choose the right one by an existing mechanism * which allows it to get the current vcpu number (such as via a * segment limit). It can then apply the normal algorithm to compute * system time from the tsc. * * @extra_arg == pointer to vcpu_register_time_info_memory_area structure. */ #define VCPUOP_register_vcpu_time_memory_area 13 DEFINE_XEN_GUEST_HANDLE(vcpu_time_info_t); struct vcpu_register_time_memory_area { union { XEN_GUEST_HANDLE(vcpu_time_info_t) h; struct vcpu_time_info *v; uint64_t p; } addr; }; typedef struct vcpu_register_time_memory_area vcpu_register_time_memory_area_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t); #endif /* __XEN_PUBLIC_VCPU_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/version.h000066400000000000000000000061561314037446600262050ustar00rootroot00000000000000/****************************************************************************** * version.h * * Xen version, type, and compile information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Nguyen Anh Quynh * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VERSION_H__ #define __XEN_PUBLIC_VERSION_H__ /* NB. All ops return zero on success, except XENVER_{version,pagesize} */ /* arg == NULL; returns major:minor (16:16). */ #define XENVER_version 0 /* arg == xen_extraversion_t. */ #define XENVER_extraversion 1 typedef char xen_extraversion_t[16]; struct xen_extraversion { xen_extraversion_t extraversion; }; #define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t)) /* arg == xen_compile_info_t. */ #define XENVER_compile_info 2 struct xen_compile_info { char compiler[64]; char compile_by[16]; char compile_domain[32]; char compile_date[32]; }; typedef struct xen_compile_info xen_compile_info_t; #define XENVER_capabilities 3 typedef char xen_capabilities_info_t[1024]; struct xen_capabilities_info { xen_capabilities_info_t info; }; #define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t)) #define XENVER_changeset 4 typedef char xen_changeset_info_t[64]; struct xen_changeset_info { xen_changeset_info_t info; }; #define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t)) #define XENVER_platform_parameters 5 struct xen_platform_parameters { unsigned long virt_start; }; typedef struct xen_platform_parameters xen_platform_parameters_t; #define XENVER_get_features 6 struct xen_feature_info { unsigned int submap_idx; /* IN: which 32-bit submap to return */ uint32_t submap; /* OUT: 32-bit submap */ }; typedef struct xen_feature_info xen_feature_info_t; /* Declares the features reported by XENVER_get_features. */ #include "features.h" /* arg == NULL; returns host memory page size. */ #define XENVER_pagesize 7 /* arg == xen_domain_handle_t. */ #define XENVER_guest_handle 8 #define XENVER_commandline 9 typedef char xen_commandline_t[1024]; #endif /* __XEN_PUBLIC_VERSION_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/xen-compat.h000066400000000000000000000036361314037446600265730ustar00rootroot00000000000000/****************************************************************************** * xen-compat.h * * Guest OS interface to Xen. Compatibility layer. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Christian Limpach */ #ifndef __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_LATEST_INTERFACE_VERSION__ 0x0003020a #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Xen is built with matching headers and implements the latest interface. */ #define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__ #elif !defined(__XEN_INTERFACE_VERSION__) /* Guests which do not specify a version get the legacy interface. */ #define __XEN_INTERFACE_VERSION__ 0x00000000 #endif #if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__ #error "These header files do not support the requested interface version." #endif #endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/xen.h000066400000000000000000000672511314037446600253150ustar00rootroot00000000000000/****************************************************************************** * xen.h * * Guest OS interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_XEN_H__ #define __XEN_PUBLIC_XEN_H__ #include "xen-compat.h" #ifdef CONFIG_PARAVIRT_XEN #include #endif #if defined(CONFIG_PARAVIRT_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) #include #elif defined(__i386__) || defined(__x86_64__) #include "arch-x86/xen.h" #elif defined(__ia64__) #include "arch-ia64.h" #else #error "Unsupported architecture" #endif #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ DEFINE_XEN_GUEST_HANDLE(char); __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); DEFINE_XEN_GUEST_HANDLE(int); __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); DEFINE_XEN_GUEST_HANDLE(long); __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); DEFINE_XEN_GUEST_HANDLE(void); DEFINE_XEN_GUEST_HANDLE(uint64_t); DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #endif /* * HYPERCALLS */ #define __HYPERVISOR_set_trap_table 0 #define __HYPERVISOR_mmu_update 1 #define __HYPERVISOR_set_gdt 2 #define __HYPERVISOR_stack_switch 3 #define __HYPERVISOR_set_callbacks 4 #define __HYPERVISOR_fpu_taskswitch 5 #define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */ #define __HYPERVISOR_platform_op 7 #define __HYPERVISOR_set_debugreg 8 #define __HYPERVISOR_get_debugreg 9 #define __HYPERVISOR_update_descriptor 10 #define __HYPERVISOR_memory_op 12 #define __HYPERVISOR_multicall 13 #define __HYPERVISOR_update_va_mapping 14 #define __HYPERVISOR_set_timer_op 15 #define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */ #define __HYPERVISOR_xen_version 17 #define __HYPERVISOR_console_io 18 #define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */ #define __HYPERVISOR_grant_table_op 20 #define __HYPERVISOR_vm_assist 21 #define __HYPERVISOR_update_va_mapping_otherdomain 22 #define __HYPERVISOR_iret 23 /* x86 only */ #define __HYPERVISOR_vcpu_op 24 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ #define __HYPERVISOR_mmuext_op 26 #define __HYPERVISOR_xsm_op 27 #define __HYPERVISOR_nmi_op 28 #define __HYPERVISOR_sched_op_new 29 #define __HYPERVISOR_callback_op 30 #define __HYPERVISOR_xenoprof_op 31 #define __HYPERVISOR_event_channel_op 32 #define __HYPERVISOR_physdev_op 33 #define __HYPERVISOR_hvm_op 34 #define __HYPERVISOR_sysctl 35 #define __HYPERVISOR_domctl 36 #define __HYPERVISOR_kexec_op 37 #define __HYPERVISOR_tmem_op 38 /* Architecture-specific hypercall definitions. */ #define __HYPERVISOR_arch_0 48 #define __HYPERVISOR_arch_1 49 #define __HYPERVISOR_arch_2 50 #define __HYPERVISOR_arch_3 51 #define __HYPERVISOR_arch_4 52 #define __HYPERVISOR_arch_5 53 #define __HYPERVISOR_arch_6 54 #define __HYPERVISOR_arch_7 55 /* * HYPERCALL COMPATIBILITY. */ /* New sched_op hypercall introduced in 0x00030101. */ #if __XEN_INTERFACE_VERSION__ < 0x00030101 || (defined(CONFIG_PARAVIRT_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H)) #undef __HYPERVISOR_sched_op #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat #else #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_new #endif /* New event-channel and physdev hypercalls introduced in 0x00030202. */ #if __XEN_INTERFACE_VERSION__ < 0x00030202 #undef __HYPERVISOR_event_channel_op #define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat #undef __HYPERVISOR_physdev_op #define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat #endif /* New platform_op hypercall introduced in 0x00030204. */ #if __XEN_INTERFACE_VERSION__ < 0x00030204 || (defined(CONFIG_PARAVIRT_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H)) #define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op #endif /* * VIRTUAL INTERRUPTS * * Virtual interrupts that a guest OS may receive from Xen. * * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. * The latter can be allocated only once per guest: they must initially be * allocated to VCPU0 but can subsequently be re-bound. */ #define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ #define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ #define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ #define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ #define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ #define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ #define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ #define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ #define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */ #define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */ /* Architecture-specific VIRQ definitions. */ #define VIRQ_ARCH_0 16 #define VIRQ_ARCH_1 17 #define VIRQ_ARCH_2 18 #define VIRQ_ARCH_3 19 #define VIRQ_ARCH_4 20 #define VIRQ_ARCH_5 21 #define VIRQ_ARCH_6 22 #define VIRQ_ARCH_7 23 #define NR_VIRQS 24 /* * HYPERVISOR_mmu_update(reqs, count, pdone, foreigndom) * * @reqs is an array of mmu_update_t structures ((ptr, val) pairs). * @count is the length of the above array. * @pdone is an output parameter indicating number of completed operations * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this * hypercall invocation. Can be DOMID_SELF. * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced * in this hypercall invocation. The value of this field * (x) encodes the PFD as follows: * x == 0 => PFD == DOMID_SELF * x != 0 => PFD == x - 1 * * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command. * ------------- * ptr[1:0] == MMU_NORMAL_PT_UPDATE: * Updates an entry in a page table belonging to PFD. If updating an L1 table, * and the new table entry is valid/present, the mapped frame must belong to * FD. If attempting to map an I/O page then the caller assumes the privilege * of the FD. * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. * FD == DOMID_XEN: Map restricted areas of Xen's heap space. * ptr[:2] -- Machine address of the page-table entry to modify. * val -- Value to write. * * ptr[1:0] == MMU_MACHPHYS_UPDATE: * Updates an entry in the machine->pseudo-physical mapping table. * ptr[:2] -- Machine address within the frame whose mapping to modify. * The frame must belong to the FD, if one is specified. * val -- Value to write into the mapping entry. * * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed * with those in @val. */ #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ /* * MMU EXTENDED OPERATIONS * * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * * cmd: MMUEXT_(UN)PIN_*_TABLE * mfn: Machine frame number to be (un)pinned as a p.t. page. * The frame must belong to the FD, if one is specified. * * cmd: MMUEXT_NEW_BASEPTR * mfn: Machine frame number of new page-table base to install in MMU. * * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] * mfn: Machine frame number of new page-table base to install in MMU * when in user space. * * cmd: MMUEXT_TLB_FLUSH_LOCAL * No additional arguments. Flushes local TLB. * * cmd: MMUEXT_INVLPG_LOCAL * linear_addr: Linear address to be flushed from the local TLB. * * cmd: MMUEXT_TLB_FLUSH_MULTI * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_INVLPG_MULTI * linear_addr: Linear address to be flushed. * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_TLB_FLUSH_ALL * No additional arguments. Flushes all VCPUs' TLBs. * * cmd: MMUEXT_INVLPG_ALL * linear_addr: Linear address to be flushed from all VCPUs' TLBs. * * cmd: MMUEXT_FLUSH_CACHE * No additional arguments. Writes back and flushes cache contents. * * cmd: MMUEXT_SET_LDT * linear_addr: Linear address of LDT base (NB. must be page-aligned). * nr_ents: Number of entries in LDT. * * cmd: MMUEXT_CLEAR_PAGE * mfn: Machine frame number to be cleared. * * cmd: MMUEXT_COPY_PAGE * mfn: Machine frame number of the destination page. * src_mfn: Machine frame number of the source page. */ #define MMUEXT_PIN_L1_TABLE 0 #define MMUEXT_PIN_L2_TABLE 1 #define MMUEXT_PIN_L3_TABLE 2 #define MMUEXT_PIN_L4_TABLE 3 #define MMUEXT_UNPIN_TABLE 4 #define MMUEXT_NEW_BASEPTR 5 #define MMUEXT_TLB_FLUSH_LOCAL 6 #define MMUEXT_INVLPG_LOCAL 7 #define MMUEXT_TLB_FLUSH_MULTI 8 #define MMUEXT_INVLPG_MULTI 9 #define MMUEXT_TLB_FLUSH_ALL 10 #define MMUEXT_INVLPG_ALL 11 #define MMUEXT_FLUSH_CACHE 12 #define MMUEXT_SET_LDT 13 #define MMUEXT_NEW_USER_BASEPTR 15 #define MMUEXT_CLEAR_PAGE 16 #define MMUEXT_COPY_PAGE 17 #ifndef __ASSEMBLY__ struct mmuext_op { unsigned int cmd; union { /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR * CLEAR_PAGE, COPY_PAGE */ xen_pfn_t mfn; /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ unsigned long linear_addr; } arg1; union { /* SET_LDT */ unsigned int nr_ents; /* TLB_FLUSH_MULTI, INVLPG_MULTI */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(const_void) vcpumask; #else const void *vcpumask; #endif /* COPY_PAGE */ xen_pfn_t src_mfn; } arg2; }; DEFINE_GUEST_HANDLE_STRUCT(mmuext_op); typedef struct mmuext_op mmuext_op_t; DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); #endif /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ #define UVMF_NONE (0UL<<0) /* No flushing at all. */ #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ #define UVMF_FLUSHTYPE_MASK (3UL<<0) #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ /* * Commands to HYPERVISOR_console_io(). */ #define CONSOLEIO_write 0 #define CONSOLEIO_read 1 /* * Commands to HYPERVISOR_vm_assist(). */ #define VMASST_CMD_enable 0 #define VMASST_CMD_disable 1 /* x86/32 guests: simulate full 4GB segment limits. */ #define VMASST_TYPE_4gb_segments 0 /* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ #define VMASST_TYPE_4gb_segments_notify 1 /* * x86 guests: support writes to bottom-level PTEs. * NB1. Page-directory entries cannot be written. * NB2. Guest must continue to remove all writable mappings of PTEs. */ #define VMASST_TYPE_writable_pagetables 2 /* x86/PAE guests: support PDPTs above 4GB. */ #define VMASST_TYPE_pae_extended_cr3 3 #define MAX_VMASST_TYPE 3 #ifndef __ASSEMBLY__ typedef uint16_t domid_t; /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ #define DOMID_FIRST_RESERVED (0x7FF0U) /* DOMID_SELF is used in certain contexts to refer to oneself. */ #define DOMID_SELF (0x7FF0U) /* * DOMID_IO is used to restrict page-table updates to mapping I/O memory. * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO * is useful to ensure that no mappings to the OS's own heap are accidentally * installed. (e.g., in Linux this could cause havoc as reference counts * aren't adjusted on the I/O-mapping code path). * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can * be specified by any calling domain. */ #define DOMID_IO (0x7FF1U) /* * DOMID_XEN is used to allow privileged domains to map restricted parts of * Xen's heap space (e.g., the machine_to_phys table). * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if * the caller is privileged. */ #define DOMID_XEN (0x7FF2U) /* * DOMID_COW is used as the owner of sharable pages */ #define DOMID_COW (0x7FF3U) /* DOMID_INVALID is used to identity invalid domid */ #define DOMID_INVALID (0x7FFFU) /* * Send an array of these to HYPERVISOR_mmu_update(). * NB. The fields are natural pointer/address size for this architecture. */ struct mmu_update { uint64_t ptr; /* Machine address of PTE. */ uint64_t val; /* New contents of PTE. */ }; DEFINE_GUEST_HANDLE_STRUCT(mmu_update); typedef struct mmu_update mmu_update_t; DEFINE_XEN_GUEST_HANDLE(mmu_update_t); /* * Send an array of these to HYPERVISOR_multicall(). * NB. The fields are natural register size for this architecture. */ struct multicall_entry { unsigned long op; #if !defined(CONFIG_PARAVIRT_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) unsigned long result; #else long result; #endif unsigned long args[6]; }; DEFINE_GUEST_HANDLE_STRUCT(multicall_entry); typedef struct multicall_entry multicall_entry_t; DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); /* * Event channel endpoints per domain: * 1024 if a long is 32 bits; 4096 if a long is 64 bits. */ #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) struct vcpu_time_info { /* * Updates to the following values are preceded and followed by an * increment of 'version'. The guest can therefore detect updates by * looking for changes to 'version'. If the least-significant bit of * the version number is set then an update is in progress and the guest * must wait to read a consistent set of values. * The correct way to interact with the version number is similar to * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry. */ uint32_t version; uint32_t pad0; uint64_t tsc_timestamp; /* TSC at last update of time vals. */ uint64_t system_time; /* Time, in nanosecs, since boot. */ /* * Current system time: * system_time + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32) * CPU frequency (Hz): * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift */ uint32_t tsc_to_system_mul; int8_t tsc_shift; int8_t pad1[3]; }; /* 32 bytes */ typedef struct vcpu_time_info vcpu_time_info_t; struct vcpu_info { /* * 'evtchn_upcall_pending' is written non-zero by Xen to indicate * a pending notification for a particular VCPU. It is then cleared * by the guest OS /before/ checking for pending work, thus avoiding * a set-and-check race. Note that the mask is only accessed by Xen * on the CPU that is currently hosting the VCPU. This means that the * pending and mask flags can be updated by the guest without special * synchronisation (i.e., no need for the x86 LOCK prefix). * This may seem suboptimal because if the pending flag is set by * a different CPU then an IPI may be scheduled even when the mask * is set. However, note: * 1. The task of 'interrupt holdoff' is covered by the per-event- * channel mask bits. A 'noisy' event that is continually being * triggered can be masked at source at this very precise * granularity. * 2. The main purpose of the per-VCPU mask is therefore to restrict * reentrant execution: whether for concurrency control, or to * prevent unbounded stack usage. Whatever the purpose, we expect * that the mask will be asserted only for short periods at a time, * and so the likelihood of a 'spurious' IPI is suitably small. * The mask is read before making an event upcall to the guest: a * non-zero mask therefore guarantees that the VCPU will not receive * an upcall activation. The mask is cleared when the VCPU requests * to block: this avoids wakeup-waiting races. */ uint8_t evtchn_upcall_pending; uint8_t evtchn_upcall_mask; unsigned long evtchn_pending_sel; struct arch_vcpu_info arch; #ifdef CONFIG_PARAVIRT_XEN struct pvclock_vcpu_time_info time; #else struct vcpu_time_info time; #endif }; /* 64 bytes (x86) */ #ifndef __XEN__ typedef struct vcpu_info vcpu_info_t; #endif /* * Xen/kernel shared data -- pointer provided in start_info. * * This structure is defined to be both smaller than a page, and the * only data on the shared page, but may vary in actual size even within * compatible Xen versions; guests should not rely on the size * of this structure remaining constant. */ struct shared_info { struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS]; /* * A domain can create "event channels" on which it can send and receive * asynchronous event notifications. There are three classes of event that * are delivered by this mechanism: * 1. Bi-directional inter- and intra-domain connections. Domains must * arrange out-of-band to set up a connection (usually by allocating * an unbound 'listener' port and avertising that via a storage service * such as xenstore). * 2. Physical interrupts. A domain with suitable hardware-access * privileges can bind an event-channel port to a physical interrupt * source. * 3. Virtual interrupts ('events'). A domain can bind an event-channel * port to a virtual interrupt source, such as the virtual-timer * device or the emergency console. * * Event channels are addressed by a "port index". Each channel is * associated with two bits of information: * 1. PENDING -- notifies the domain that there is a pending notification * to be processed. This bit is cleared by the guest. * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING * will cause an asynchronous upcall to be scheduled. This bit is only * updated by the guest. It is read-only within Xen. If a channel * becomes pending while the channel is masked then the 'edge' is lost * (i.e., when the channel is unmasked, the guest must manually handle * pending notifications as no upcall will be scheduled by Xen). * * To expedite scanning of pending notifications, any 0->1 pending * transition on an unmasked channel causes a corresponding bit in a * per-vcpu selector word to be set. Each bit in the selector covers a * 'C long' in the PENDING bitfield array. */ unsigned long evtchn_pending[sizeof(unsigned long) * 8]; unsigned long evtchn_mask[sizeof(unsigned long) * 8]; /* * Wallclock time: updated only by control software. Guests should base * their gettimeofday() syscall on this wallclock-base value. */ #ifdef CONFIG_PARAVIRT_XEN struct pvclock_wall_clock wc; #else uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ #endif struct arch_shared_info arch; }; #ifndef __XEN__ typedef struct shared_info shared_info_t; #endif /* * Start-of-day memory layout: * 1. The domain is started within contiguous virtual-memory region. * 2. The contiguous region ends on an aligned 4MB boundary. * 3. This the order of bootstrap elements in the initial virtual region: * a. relocated kernel image * b. initial ram disk [mod_start, mod_len] * c. list of allocated page frames [mfn_list, nr_pages] * (unless relocated due to XEN_ELFNOTE_INIT_P2M) * d. start_info_t structure [register ESI (x86)] * e. bootstrap page tables [pt_base, CR3 (x86)] * f. bootstrap stack [register ESP (x86)] * 4. Bootstrap elements are packed together, but each is 4kB-aligned. * 5. The initial ram disk may be omitted. * 6. The list of page frames forms a contiguous 'pseudo-physical' memory * layout for the domain. In particular, the bootstrap virtual-memory * region is a 1:1 mapping to the first section of the pseudo-physical map. * 7. All bootstrap elements are mapped read-writable for the guest OS. The * only exception is the bootstrap page table, which is mapped read-only. * 8. There is guaranteed to be at least 512kB padding after the final * bootstrap element. If necessary, the bootstrap virtual region is * extended by an extra 4MB to ensure this. */ #define MAX_GUEST_CMDLINE 1024 struct start_info { /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ char magic[32]; /* "xen--". */ unsigned long nr_pages; /* Total pages allocated to this domain. */ unsigned long shared_info; /* MACHINE address of shared info struct. */ uint32_t flags; /* SIF_xxx flags. */ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ uint32_t store_evtchn; /* Event channel for store communication. */ union { struct { xen_pfn_t mfn; /* MACHINE page number of console page. */ uint32_t evtchn; /* Event channel for console page. */ } domU; struct { uint32_t info_off; /* Offset of console_info struct. */ uint32_t info_size; /* Size of console_info struct from start.*/ } dom0; } console; /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ unsigned long pt_base; /* VIRTUAL address of page directory. */ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ int8_t cmd_line[MAX_GUEST_CMDLINE]; /* The pfn range here covers both page table and p->m table frames. */ unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */ unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */ }; typedef struct start_info start_info_t; /* New console union for dom0 introduced in 0x00030203. */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 #define console_mfn console.domU.mfn #define console_evtchn console.domU.evtchn #endif /* These flags are passed in the 'flags' field of start_info_t. */ #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ #define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */ #define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ /* * A multiboot module is a package containing modules very similar to a * multiboot module array. The only differences are: * - the array of module descriptors is by convention simply at the beginning * of the multiboot module, * - addresses in the module descriptors are based on the beginning of the * multiboot module, * - the number of modules is determined by a termination descriptor that has * mod_start == 0. * * This permits to both build it statically and reference it in a configuration * file, and let the PV guest easily rebase the addresses to virtual addresses * and at the same time count the number of modules. */ struct xen_multiboot_mod_list { /* Address of first byte of the module */ uint32_t mod_start; /* Address of last byte of the module (inclusive) */ uint32_t mod_end; /* Address of zero-terminated command line */ uint32_t cmdline; /* Unused, must be zero */ uint32_t pad; }; typedef struct dom0_vga_console_info { uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ #define XEN_VGATYPE_TEXT_MODE_3 0x03 #define XEN_VGATYPE_VESA_LFB 0x23 union { struct { /* Font height, in pixels. */ uint16_t font_height; /* Cursor location (column, row). */ uint16_t cursor_x, cursor_y; /* Number of rows and columns (dimensions in characters). */ uint16_t rows, columns; } text_mode_3; struct { /* Width and height, in pixels. */ uint16_t width, height; /* Bytes per scan line. */ uint16_t bytes_per_line; /* Bits per pixel. */ uint16_t bits_per_pixel; /* LFB physical address, and size (in units of 64kB). */ uint32_t lfb_base; uint32_t lfb_size; /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ uint8_t red_pos, red_size; uint8_t green_pos, green_size; uint8_t blue_pos, blue_size; uint8_t rsvd_pos, rsvd_size; #if __XEN_INTERFACE_VERSION__ >= 0x00030206 /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ uint32_t gbl_caps; /* Mode attributes (offset 0x0, VESA command 0x4f01). */ uint16_t mode_attrs; #endif } vesa_lfb; } u; } dom0_vga_console_info_t; #define xen_vga_console_info dom0_vga_console_info #define xen_vga_console_info_t dom0_vga_console_info_t typedef uint8_t xen_domain_handle_t[16]; /* Turn a plain number into a C unsigned long constant. */ #define __mk_unsigned_long(x) x ## UL #define mk_unsigned_long(x) __mk_unsigned_long(x) __DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t); __DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t); __DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t); __DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t); #else /* __ASSEMBLY__ */ /* In assembly code we cannot use C numeric constant suffixes. */ #define mk_unsigned_long(x) x #endif /* !__ASSEMBLY__ */ /* Default definitions for macros used by domctl/sysctl. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #ifndef uint64_aligned_t #define uint64_aligned_t uint64_t #endif #ifndef XEN_GUEST_HANDLE_64 #define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) #endif #endif #endif /* __XEN_PUBLIC_XEN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/xencomm.h000066400000000000000000000032131314037446600261550ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) IBM Corp. 2006 */ #ifndef _XEN_XENCOMM_H_ #define _XEN_XENCOMM_H_ /* A xencomm descriptor is a scatter/gather list containing physical * addresses corresponding to a virtually contiguous memory area. The * hypervisor translates these physical addresses to machine addresses to copy * to and from the virtually contiguous area. */ #define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */ #define XENCOMM_INVALID (~0UL) struct xencomm_desc { uint32_t magic; uint32_t nr_addrs; /* the number of entries in address[] */ uint64_t address[0]; }; #endif /* _XEN_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/xenoprof.h000066400000000000000000000100311314037446600263430ustar00rootroot00000000000000/****************************************************************************** * xenoprof.h * * Interface for enabling system wide profiling based on hardware performance * counters * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Hewlett-Packard Co. * Written by Aravind Menon & Jose Renato Santos */ #ifndef __XEN_PUBLIC_XENOPROF_H__ #define __XEN_PUBLIC_XENOPROF_H__ #include "xen.h" /* * Commands to HYPERVISOR_xenoprof_op(). */ #define XENOPROF_init 0 #define XENOPROF_reset_active_list 1 #define XENOPROF_reset_passive_list 2 #define XENOPROF_set_active 3 #define XENOPROF_set_passive 4 #define XENOPROF_reserve_counters 5 #define XENOPROF_counter 6 #define XENOPROF_setup_events 7 #define XENOPROF_enable_virq 8 #define XENOPROF_start 9 #define XENOPROF_stop 10 #define XENOPROF_disable_virq 11 #define XENOPROF_release_counters 12 #define XENOPROF_shutdown 13 #define XENOPROF_get_buffer 14 #define XENOPROF_set_backtrace 15 #define XENOPROF_last_op 15 #define MAX_OPROF_EVENTS 32 #define MAX_OPROF_DOMAINS 25 #define XENOPROF_CPU_TYPE_SIZE 64 /* Xenoprof performance events (not Xen events) */ struct event_log { uint64_t eip; uint8_t mode; uint8_t event; }; /* PC value that indicates a special code */ #define XENOPROF_ESCAPE_CODE ~0UL /* Transient events for the xenoprof->oprofile cpu buf */ #define XENOPROF_TRACE_BEGIN 1 /* Xenoprof buffer shared between Xen and domain - 1 per VCPU */ struct xenoprof_buf { uint32_t event_head; uint32_t event_tail; uint32_t event_size; uint32_t vcpu_id; uint64_t xen_samples; uint64_t kernel_samples; uint64_t user_samples; uint64_t lost_samples; struct event_log event_log[1]; }; #ifndef __XEN__ typedef struct xenoprof_buf xenoprof_buf_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t); #endif struct xenoprof_init { int32_t num_events; int32_t is_primary; char cpu_type[XENOPROF_CPU_TYPE_SIZE]; }; typedef struct xenoprof_init xenoprof_init_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t); struct xenoprof_get_buffer { int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; }; typedef struct xenoprof_get_buffer xenoprof_get_buffer_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t); struct xenoprof_counter { uint32_t ind; uint64_t count; uint32_t enabled; uint32_t event; uint32_t hypervisor; uint32_t kernel; uint32_t user; uint64_t unit_mask; }; typedef struct xenoprof_counter xenoprof_counter_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t); typedef struct xenoprof_passive { uint16_t domain_id; int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; } xenoprof_passive_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t); #endif /* __XEN_PUBLIC_XENOPROF_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/xsm/000077500000000000000000000000001314037446600251465ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/xsm/acm.h000066400000000000000000000156251314037446600260700ustar00rootroot00000000000000/* * acm.h: Xen access control module interface defintions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005, International Business Machines Corporation. */ #ifndef _XEN_PUBLIC_ACM_H #define _XEN_PUBLIC_ACM_H #include "../xen.h" /* default ssid reference value if not supplied */ #define ACM_DEFAULT_SSID 0x0 #define ACM_DEFAULT_LOCAL_SSID 0x0 /* Internal ACM ERROR types */ #define ACM_OK 0 #define ACM_UNDEF -1 #define ACM_INIT_SSID_ERROR -2 #define ACM_INIT_SOID_ERROR -3 #define ACM_ERROR -4 /* External ACCESS DECISIONS */ #define ACM_ACCESS_PERMITTED 0 #define ACM_ACCESS_DENIED -111 #define ACM_NULL_POINTER_ERROR -200 /* Error codes reported in when trying to test for a new policy These error codes are reported in an array of tuples where each error code is followed by a parameter describing the error more closely, such as a domain id. */ #define ACM_EVTCHN_SHARING_VIOLATION 0x100 #define ACM_GNTTAB_SHARING_VIOLATION 0x101 #define ACM_DOMAIN_LOOKUP 0x102 #define ACM_CHWALL_CONFLICT 0x103 #define ACM_SSIDREF_IN_USE 0x104 /* primary policy in lower 4 bits */ #define ACM_NULL_POLICY 0 #define ACM_CHINESE_WALL_POLICY 1 #define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2 #define ACM_POLICY_UNDEFINED 15 /* combinations have secondary policy component in higher 4bit */ #define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY) /* policy: */ #define ACM_POLICY_NAME(X) \ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \ "UNDEFINED" /* the following policy versions must be increased * whenever the interpretation of the related * policy's data structure changes */ #define ACM_POLICY_VERSION 4 #define ACM_CHWALL_VERSION 1 #define ACM_STE_VERSION 1 /* defines a ssid reference used by xen */ typedef uint32_t ssidref_t; /* hooks that are known to domains */ #define ACMHOOK_none 0 #define ACMHOOK_sharing 1 #define ACMHOOK_authorization 2 #define ACMHOOK_conflictset 3 /* -------security policy relevant type definitions-------- */ /* type identifier; compares to "equal" or "not equal" */ typedef uint16_t domaintype_t; /* CHINESE WALL POLICY DATA STRUCTURES * * current accumulated conflict type set: * When a domain is started and has a type that is in * a conflict set, the conflicting types are incremented in * the aggregate set. When a domain is destroyed, the * conflicting types to its type are decremented. * If a domain has multiple types, this procedure works over * all those types. * * conflict_aggregate_set[i] holds the number of * running domains that have a conflict with type i. * * running_types[i] holds the number of running domains * that include type i in their ssidref-referenced type set * * conflict_sets[i][j] is "0" if type j has no conflict * with type i and is "1" otherwise. */ /* high-16 = version, low-16 = check magic */ #define ACM_MAGIC 0x0001debc /* size of the SHA1 hash identifying the XML policy from which the binary policy was created */ #define ACM_SHA1_HASH_SIZE 20 /* each offset in bytes from start of the struct they * are part of */ /* V3 of the policy buffer aded a version structure */ struct acm_policy_version { uint32_t major; uint32_t minor; }; /* each buffer consists of all policy information for * the respective policy given in the policy code * * acm_policy_buffer, acm_chwall_policy_buffer, * and acm_ste_policy_buffer need to stay 32-bit aligned * because we create binary policies also with external * tools that assume packed representations (e.g. the java tool) */ struct acm_policy_buffer { uint32_t magic; uint32_t policy_version; /* ACM_POLICY_VERSION */ uint32_t len; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_buffer_offset; uint32_t secondary_policy_code; uint32_t secondary_buffer_offset; struct acm_policy_version xml_pol_version; /* add in V3 */ uint8_t xml_policy_hash[ACM_SHA1_HASH_SIZE]; /* added in V4 */ }; struct acm_policy_reference_buffer { uint32_t len; }; struct acm_chwall_policy_buffer { uint32_t policy_version; /* ACM_CHWALL_VERSION */ uint32_t policy_code; uint32_t chwall_max_types; uint32_t chwall_max_ssidrefs; uint32_t chwall_max_conflictsets; uint32_t chwall_ssid_offset; uint32_t chwall_conflict_sets_offset; uint32_t chwall_running_types_offset; uint32_t chwall_conflict_aggregate_offset; }; struct acm_ste_policy_buffer { uint32_t policy_version; /* ACM_STE_VERSION */ uint32_t policy_code; uint32_t ste_max_types; uint32_t ste_max_ssidrefs; uint32_t ste_ssid_offset; }; struct acm_stats_buffer { uint32_t magic; uint32_t len; uint32_t primary_policy_code; uint32_t primary_stats_offset; uint32_t secondary_policy_code; uint32_t secondary_stats_offset; }; struct acm_ste_stats_buffer { uint32_t ec_eval_count; uint32_t gt_eval_count; uint32_t ec_denied_count; uint32_t gt_denied_count; uint32_t ec_cachehit_count; uint32_t gt_cachehit_count; }; struct acm_ssid_buffer { uint32_t len; ssidref_t ssidref; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_max_types; uint32_t primary_types_offset; uint32_t secondary_policy_code; uint32_t secondary_max_types; uint32_t secondary_types_offset; }; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/xsm/acm_ops.h000066400000000000000000000104741314037446600267460ustar00rootroot00000000000000/* * acm_ops.h: Xen access control module hypervisor commands * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005,2006 International Business Machines Corporation. */ #ifndef __XEN_PUBLIC_ACM_OPS_H__ #define __XEN_PUBLIC_ACM_OPS_H__ #include "../xen.h" #include "acm.h" /* * Make sure you increment the interface version whenever you modify this file! * This makes sure that old versions of acm tools will stop working in a * well-defined way (rather than crashing the machine, for instance). */ #define ACM_INTERFACE_VERSION 0xAAAA000A /************************************************************************/ /* * Prototype for this hypercall is: * int acm_op(int cmd, void *args) * @cmd == ACMOP_??? (access control module operation). * @args == Operation-specific extra arguments (NULL if none). */ #define ACMOP_setpolicy 1 struct acm_setpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pushcache; uint32_t pushcache_size; }; #define ACMOP_getpolicy 2 struct acm_getpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_dumpstats 3 struct acm_dumpstats { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_getssid 4 #define ACM_GETBY_ssidref 1 #define ACM_GETBY_domainid 2 struct acm_getssid { /* IN */ uint32_t get_ssid_by; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id; XEN_GUEST_HANDLE_64(void) ssidbuf; uint32_t ssidbuf_size; }; #define ACMOP_getdecision 5 struct acm_getdecision { /* IN */ uint32_t get_decision_by1; /* ACM_GETBY_* */ uint32_t get_decision_by2; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id1; union { domaintype_t domainid; ssidref_t ssidref; } id2; uint32_t hook; /* OUT */ uint32_t acm_decision; }; #define ACMOP_chgpolicy 6 struct acm_change_policy { /* IN */ XEN_GUEST_HANDLE_64(void) policy_pushcache; uint32_t policy_pushcache_size; XEN_GUEST_HANDLE_64(void) del_array; uint32_t delarray_size; XEN_GUEST_HANDLE_64(void) chg_array; uint32_t chgarray_size; /* OUT */ /* array with error code */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; #define ACMOP_relabeldoms 7 struct acm_relabel_doms { /* IN */ XEN_GUEST_HANDLE_64(void) relabel_map; uint32_t relabel_map_size; /* OUT */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; /* future interface to Xen */ struct xen_acmctl { uint32_t cmd; uint32_t interface_version; union { struct acm_setpolicy setpolicy; struct acm_getpolicy getpolicy; struct acm_dumpstats dumpstats; struct acm_getssid getssid; struct acm_getdecision getdecision; struct acm_change_policy change_policy; struct acm_relabel_doms relabel_doms; } u; }; typedef struct xen_acmctl xen_acmctl_t; DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t); #endif /* __XEN_PUBLIC_ACM_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/interface/xsm/flask_op.h000066400000000000000000000024671314037446600271260ustar00rootroot00000000000000/* * This file contains the flask_op hypercall commands and definitions. * * Author: George Coker, * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ #ifndef __FLASK_OP_H__ #define __FLASK_OP_H__ #define FLASK_LOAD 1 #define FLASK_GETENFORCE 2 #define FLASK_SETENFORCE 3 #define FLASK_CONTEXT_TO_SID 4 #define FLASK_SID_TO_CONTEXT 5 #define FLASK_ACCESS 6 #define FLASK_CREATE 7 #define FLASK_RELABEL 8 #define FLASK_USER 9 #define FLASK_POLICYVERS 10 #define FLASK_GETBOOL 11 #define FLASK_SETBOOL 12 #define FLASK_COMMITBOOLS 13 #define FLASK_MLS 14 #define FLASK_DISABLE 15 #define FLASK_GETAVC_THRESHOLD 16 #define FLASK_SETAVC_THRESHOLD 17 #define FLASK_AVC_HASHSTATS 18 #define FLASK_AVC_CACHESTATS 19 #define FLASK_MEMBER 20 #define FLASK_ADD_OCONTEXT 21 #define FLASK_DEL_OCONTEXT 22 #define FLASK_LAST FLASK_DEL_OCONTEXT typedef struct flask_op { uint32_t cmd; uint32_t size; char *buf; } flask_op_t; DEFINE_XEN_GUEST_HANDLE(flask_op_t); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/page.h000066400000000000000000000000321314037446600234570ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/pcifront.h000066400000000000000000000026601314037446600244000ustar00rootroot00000000000000/* * PCI Frontend - arch-dependendent declarations * * Author: Ryan Wilson */ #ifndef __XEN_ASM_PCIFRONT_H__ #define __XEN_ASM_PCIFRONT_H__ #include #ifdef __KERNEL__ #ifndef __ia64__ #include struct pcifront_device; struct pci_bus; #define pcifront_sd pci_sysdata static inline struct pcifront_device * pcifront_get_pdev(struct pcifront_sd *sd) { return sd->pdev; } static inline void pcifront_init_sd(struct pcifront_sd *sd, unsigned int domain, unsigned int bus, struct pcifront_device *pdev) { sd->domain = domain; sd->pdev = pdev; } static inline void pcifront_setup_root_resources(struct pci_bus *bus, struct pcifront_sd *sd) { } #else /* __ia64__ */ #include #include #define pcifront_sd pci_controller extern void xen_add_resource(struct pci_controller *, unsigned int, unsigned int, struct acpi_resource *); extern void xen_pcibios_setup_root_windows(struct pci_bus *, struct pci_controller *); static inline struct pcifront_device * pcifront_get_pdev(struct pcifront_sd *sd) { return (struct pcifront_device *)sd->platform_data; } static inline void pcifront_setup_root_resources(struct pci_bus *bus, struct pcifront_sd *sd) { xen_pcibios_setup_root_windows(bus, sd); } #endif /* __ia64__ */ extern struct rw_semaphore pci_bus_sem; #endif /* __KERNEL__ */ #endif /* __XEN_ASM_PCIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/public/000077500000000000000000000000001314037446600236555ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/public/evtchn.h000066400000000000000000000056351314037446600253260ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Interface to /dev/xen/evtchn. * * Copyright (c) 2003-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_EVTCHN_H__ #define __LINUX_PUBLIC_EVTCHN_H__ /* * Bind a fresh port to VIRQ @virq. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_VIRQ \ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq)) struct ioctl_evtchn_bind_virq { unsigned int virq; }; /* * Bind a fresh port to remote <@remote_domain, @remote_port>. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_INTERDOMAIN \ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain)) struct ioctl_evtchn_bind_interdomain { unsigned int remote_domain, remote_port; }; /* * Allocate a fresh port for binding to @remote_domain. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_UNBOUND_PORT \ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port)) struct ioctl_evtchn_bind_unbound_port { unsigned int remote_domain; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_UNBIND \ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind)) struct ioctl_evtchn_unbind { unsigned int port; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_NOTIFY \ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify)) struct ioctl_evtchn_notify { unsigned int port; }; /* Clear and reinitialise the event buffer. Clear error condition. */ #define IOCTL_EVTCHN_RESET \ _IOC(_IOC_NONE, 'E', 5, 0) #endif /* __LINUX_PUBLIC_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/public/gntdev.h000066400000000000000000000107401314037446600253170ustar00rootroot00000000000000/****************************************************************************** * gntdev.h * * Interface to /dev/xen/gntdev. * * Copyright (c) 2007, D G Murray * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_GNTDEV_H__ #define __LINUX_PUBLIC_GNTDEV_H__ struct ioctl_gntdev_grant_ref { /* The domain ID of the grant to be mapped. */ uint32_t domid; /* The grant reference of the grant to be mapped. */ uint32_t ref; }; /* * Inserts the grant references into the mapping table of an instance * of gntdev. N.B. This does not perform the mapping, which is deferred * until mmap() is called with @index as the offset. */ #define IOCTL_GNTDEV_MAP_GRANT_REF \ _IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref)) struct ioctl_gntdev_map_grant_ref { /* IN parameters */ /* The number of grants to be mapped. */ uint32_t count; uint32_t pad; /* OUT parameters */ /* The offset to be used on a subsequent call to mmap(). */ uint64_t index; /* Variable IN parameter. */ /* Array of grant references, of size @count. */ struct ioctl_gntdev_grant_ref refs[1]; }; /* * Removes the grant references from the mapping table of an instance of * of gntdev. N.B. munmap() must be called on the relevant virtual address(es) * before this ioctl is called, or an error will result. */ #define IOCTL_GNTDEV_UNMAP_GRANT_REF \ _IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) struct ioctl_gntdev_unmap_grant_ref { /* IN parameters */ /* The offset was returned by the corresponding map operation. */ uint64_t index; /* The number of pages to be unmapped. */ uint32_t count; uint32_t pad; }; /* * Returns the offset in the driver's address space that corresponds * to @vaddr. This can be used to perform a munmap(), followed by an * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by * the caller. The number of pages that were allocated at the same time as * @vaddr is returned in @count. * * N.B. Where more than one page has been mapped into a contiguous range, the * supplied @vaddr must correspond to the start of the range; otherwise * an error will result. It is only possible to munmap() the entire * contiguously-allocated range at once, and not any subrange thereof. */ #define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \ _IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr)) struct ioctl_gntdev_get_offset_for_vaddr { /* IN parameters */ /* The virtual address of the first mapped page in a range. */ uint64_t vaddr; /* OUT parameters */ /* The offset that was used in the initial mmap() operation. */ uint64_t offset; /* The number of pages mapped in the VM area that begins at @vaddr. */ uint32_t count; uint32_t pad; }; /* * Sets the maximum number of grants that may mapped at once by this gntdev * instance. * * N.B. This must be called before any other ioctl is performed on the device. */ #define IOCTL_GNTDEV_SET_MAX_GRANTS \ _IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants)) struct ioctl_gntdev_set_max_grants { /* IN parameter */ /* The maximum number of grants that may be mapped at once. */ uint32_t count; }; #endif /* __LINUX_PUBLIC_GNTDEV_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/public/privcmd.h000066400000000000000000000052141314037446600254740ustar00rootroot00000000000000/****************************************************************************** * privcmd.h * * Interface to /proc/xen/privcmd. * * Copyright (c) 2003-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_PRIVCMD_H__ #define __LINUX_PUBLIC_PRIVCMD_H__ #include #ifndef __user #define __user #endif typedef struct privcmd_hypercall { __u64 op; __u64 arg[5]; } privcmd_hypercall_t; typedef struct privcmd_mmap_entry { __u64 va; __u64 mfn; __u64 npages; } privcmd_mmap_entry_t; typedef struct privcmd_mmap { int num; domid_t dom; /* target domain */ privcmd_mmap_entry_t __user *entry; } privcmd_mmap_t; typedef struct privcmd_mmapbatch { int num; /* number of pages to populate */ domid_t dom; /* target domain */ __u64 addr; /* virtual address */ xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */ } privcmd_mmapbatch_t; /* * @cmd: IOCTL_PRIVCMD_HYPERCALL * @arg: &privcmd_hypercall_t * Return: Value returned from execution of the specified hypercall. */ #define IOCTL_PRIVCMD_HYPERCALL \ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t)) #define IOCTL_PRIVCMD_MMAP \ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t)) #define IOCTL_PRIVCMD_MMAPBATCH \ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t)) #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/public/xenbus.h000066400000000000000000000036731314037446600253430ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Interface to /proc/xen/xenbus. * * Copyright (c) 2008, Diego Ongaro * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_XENBUS_H__ #define __LINUX_PUBLIC_XENBUS_H__ #include typedef struct xenbus_alloc { domid_t dom; __u32 port; __u32 grant_ref; } xenbus_alloc_t; /* * @cmd: IOCTL_XENBUS_ALLOC * @arg: &xenbus_alloc_t * Return: 0, or -1 for error */ #define IOCTL_XENBUS_ALLOC \ _IOC(_IOC_NONE, 'X', 0, sizeof(xenbus_alloc_t)) #endif /* __LINUX_PUBLIC_XENBUS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/sysctl.h000066400000000000000000000002551314037446600240730ustar00rootroot00000000000000#ifndef _XEN_SYSCTL_H #define _XEN_SYSCTL_H /* CTL_XEN names: */ enum { CTL_XEN_INDEPENDENT_WALLCLOCK=1, CTL_XEN_PERMITTED_CLOCK_JITTER=2, }; #endif /* _XEN_SYSCTL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/xen-ops.h000066400000000000000000000005351314037446600241440ustar00rootroot00000000000000#ifndef INCLUDE_XEN_OPS_H #define INCLUDE_XEN_OPS_H #include DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); void xen_pre_suspend(void); void xen_post_suspend(int suspend_cancelled); void xen_mm_pin_all(void); void xen_mm_unpin_all(void); void xen_timer_resume(void); void xen_arch_resume(void); #endif /* INCLUDE_XEN_OPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/xen_proc.h000066400000000000000000000004121314037446600243620ustar00rootroot00000000000000 #ifndef __ASM_XEN_PROC_H__ #define __ASM_XEN_PROC_H__ #include extern struct proc_dir_entry *create_xen_proc_entry_uvp( const char *name, mode_t mode); extern void remove_xen_proc_entry_uvp( const char *name); #endif /* __ASM_XEN_PROC_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/xenbus.h000066400000000000000000000277601314037446600240700ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XEN_XENBUS_H #define _XEN_XENBUS_H #include #include #include #include #include #include #include #include #include #include /* Register callback to watch this node. */ struct xenbus_watch { struct list_head list; /* Path being watched. */ const char *node; /* Callback (executed in a process context with no locks held). */ void (*callback)(struct xenbus_watch *, const char **vec, unsigned int len); #if defined(CONFIG_XEN_no) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /* See XBWF_ definitions below. */ unsigned long flags; #endif }; #if defined(CONFIG_XEN_no) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /* * Execute callback in its own kthread. Useful if the callback is long * running or heavily serialised, to avoid taking out the main xenwatch thread * for a long period of time (or even unwittingly causing a deadlock). */ #define XBWF_new_thread 1 #endif /* A xenbus device. */ struct xenbus_device { const char *devicetype; const char *nodename; const char *otherend; int otherend_id; struct xenbus_watch otherend_watch; struct device dev; enum xenbus_state state; struct completion down; }; static inline struct xenbus_device *to_xenbus_device(struct device *dev) { return container_of(dev, struct xenbus_device, dev); } struct xenbus_device_id { /* .../device// */ char devicetype[32]; /* General class of device. */ }; /* A xenbus driver. */ struct xenbus_driver { const char *name; const struct xenbus_device_id *ids; int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); void (*otherend_changed)(struct xenbus_device *dev, enum xenbus_state backend_state); int (*remove)(struct xenbus_device *dev); int (*suspend)(struct xenbus_device *dev); int (*suspend_cancel)(struct xenbus_device *dev); int (*resume)(struct xenbus_device *dev); int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *); struct device_driver driver; int (*read_otherend_details)(struct xenbus_device *dev); int (*is_ready)(struct xenbus_device *dev); }; static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) { return container_of(drv, struct xenbus_driver, driver); } int __must_check __xenbus_register_frontend_uvp(struct xenbus_driver *drv, struct module *owner, const char *mod_name); static inline int __must_check xenbus_register_frontend(struct xenbus_driver *drv) { return __xenbus_register_frontend_uvp(drv, THIS_MODULE, KBUILD_MODNAME); } int __must_check __xenbus_register_backend_uvp(struct xenbus_driver *drv, struct module *owner, const char *mod_name); static inline int __must_check xenbus_register_backend(struct xenbus_driver *drv) { return __xenbus_register_backend_uvp(drv, THIS_MODULE, KBUILD_MODNAME); } void xenbus_unregister_driver_uvp(struct xenbus_driver *drv); struct xenbus_transaction { u32 id; }; /* Nil transaction ID. */ #define XBT_NIL ((struct xenbus_transaction) { 0 }) char **xenbus_directory_uvp(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num); void *xenbus_read_uvp(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len); int xenbus_write_uvp(struct xenbus_transaction t, const char *dir, const char *node, const char *string); int xenbus_mkdir_uvp(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_exists_uvp(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_rm_uvp(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_transaction_start_uvp(struct xenbus_transaction *t); int xenbus_transaction_end_uvp(struct xenbus_transaction t, int abort); /* Single read and scanf: returns -errno or num scanned if > 0. */ int xenbus_scanf_uvp(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(scanf, 4, 5))); /* Single printf and write: returns -errno or 0. */ int xenbus_printf_uvp(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(printf, 4, 5))); /* Generic read function: NULL-terminated triples of name, * sprintf-style type string, and pointer. Returns 0 or errno.*/ int xenbus_gather_uvp(struct xenbus_transaction t, const char *dir, ...); /* notifer routines for when the xenstore comes up */ int register_xenstore_notifier_uvp(struct notifier_block *nb); void unregister_xenstore_notifier_uvp_uvp(struct notifier_block *nb); int register_xenbus_watch_uvp(struct xenbus_watch *watch); void unregister_xenbus_watch_uvp_uvp(struct xenbus_watch *watch); void xs_suspend(void); void xs_resume(void); void xs_suspend_cancel(void); /* Used by xenbus_dev to borrow kernel's store connection. */ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); /* Prepare for domain suspend: then resume or cancel the suspend. */ void xenbus_suspend_uvp(void); void xenbus_resume_uvp(void); void xenbus_suspend_uvp_cancel(void); #define XENBUS_IS_ERR_READ(str) ({ \ if (!IS_ERR(str) && strlen(str) == 0) { \ kfree(str); \ str = ERR_PTR(-ERANGE); \ } \ IS_ERR(str); \ }) #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE) /** * Register a watch on the given path, using the given xenbus_watch structure * for storage, and the given callback function as the callback. Return 0 on * success, or -errno on error. On success, the given path will be saved as * watch->node, and remains the caller's to free. On error, watch->node will * be NULL, the device will switch to XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path_uvp(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); #if defined(CONFIG_XEN_no) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /** * Register a watch on the given path/path2, using the given xenbus_watch * structure for storage, and the given callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (path/path2) will be saved as watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_path_uvp2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); #else int xenbus_watch_path_uvpfmt(struct xenbus_device *dev, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...) __attribute__ ((format (printf, 4, 5))); #endif /** * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state_uvp(struct xenbus_device *dev, enum xenbus_state new_state); /** * Grant access to the given ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring_uvp(struct xenbus_device *dev, unsigned long ring_mfn); /** * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_uvp_uvp_valloc_uvp allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * xenbus_map_ring_uvp_uvp does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ struct vm_struct *xenbus_map_ring_uvp_uvp_valloc_uvp(struct xenbus_device *dev, int gnt_ref); int xenbus_map_ring_uvp_uvp(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr); /** * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_uvp_uvp_vfree_uvp if you mapped in your memory with * xenbus_map_ring_uvp_uvp_valloc_uvp (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_uvp_uvp_vfree_uvp(struct xenbus_device *dev, struct vm_struct *); int xenbus_unmap_ring_uvp_uvp(struct xenbus_device *dev, grant_handle_t handle, void *vaddr); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn_uvp(struct xenbus_device *dev, int *port); /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn_uvp(struct xenbus_device *dev, int port); /** * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_uvp_driver_state(const char *path); /*** * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error_uvp(struct xenbus_device *dev, int err, const char *fmt, ...); /*** * Equivalent to xenbus_dev_error_uvp(dev, err, fmt, args), followed by * xenbus_switch_state_uvp(dev, NULL, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal_uvp(struct xenbus_device *dev, int err, const char *fmt, ...); int xenbus_dev_init(void); const char *xenbus_strstate_uvp(enum xenbus_state state); int xenbus_dev_is_online_uvp(struct xenbus_device *dev); int xenbus_frontend_closed_uvp(struct xenbus_device *dev); int xenbus_for_each_backend_uvp(void *arg, int (*fn)(struct device *, void *)); int xenbus_for_each_frontend_uvp(void *arg, int (*fn)(struct device *, void *)); #endif /* _XEN_XENBUS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/xencomm.h000066400000000000000000000050611314037446600242200ustar00rootroot00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Copyright (C) IBM Corp. 2006 * * Authors: Hollis Blanchard * Jerone Young */ #ifndef _LINUX_XENCOMM_H_ #define _LINUX_XENCOMM_H_ #include #define XENCOMM_MINI_ADDRS 3 struct xencomm_mini { struct xencomm_desc _desc; uint64_t address[XENCOMM_MINI_ADDRS]; }; /* To avoid additionnal virt to phys conversion, an opaque structure is presented. */ struct xencomm_handle; extern void xencomm_free(struct xencomm_handle *desc); extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes); extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, struct xencomm_mini *xc_area); #if 0 #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ struct xencomm_mini xc_desc ## _base[(n)] \ __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \ struct xencomm_mini *xc_desc = &xc_desc ## _base[0]; #else /* * gcc bug workaround: * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660 * gcc doesn't handle properly stack variable with * __attribute__((__align__(sizeof(struct xencomm_mini)))) */ #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ unsigned char xc_desc ## _base[((n) + 1 ) * \ sizeof(struct xencomm_mini)]; \ struct xencomm_mini *xc_desc = (struct xencomm_mini *) \ ((unsigned long)xc_desc ## _base + \ (sizeof(struct xencomm_mini) - \ ((unsigned long)xc_desc ## _base) % \ sizeof(struct xencomm_mini))); #endif #define xencomm_map_no_alloc(ptr, bytes) \ ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \ __xencomm_map_no_alloc(ptr, bytes, xc_desc); }) /* provided by architecture code: */ extern unsigned long xencomm_vtop(unsigned long vaddr); static inline void *xencomm_pa(void *ptr) { return (void *)xencomm_vtop((unsigned long)ptr); } #define xen_guest_handle(hnd) ((hnd).p) #endif /* _LINUX_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/xencons.h000066400000000000000000000007141314037446600242270ustar00rootroot00000000000000#ifndef __ASM_XENCONS_H__ #define __ASM_XENCONS_H__ struct dom0_vga_console_info; void dom0_init_screen_info(const struct dom0_vga_console_info *, size_t); void xencons_force_flush(void); void xencons_resume(void); /* Interrupt work hooks. Receive data, or kick data out. */ void xencons_rx(char *buf, unsigned len); void xencons_tx(void); int xencons_ring_init(void); int xencons_ring_send(const char *data, unsigned len); #endif /* __ASM_XENCONS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/include/xen/xenoprof.h000066400000000000000000000025701314037446600244140ustar00rootroot00000000000000/****************************************************************************** * xen/xenoprof.h * * Copyright (c) 2006 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_XENOPROF_H__ #define __XEN_XENOPROF_H__ #ifdef CONFIG_XEN_no #include struct oprofile_operations; int xenoprofile_init(struct oprofile_operations * ops); void xenoprofile_exit(void); struct xenoprof_shared_buffer { char *buffer; struct xenoprof_arch_shared_buffer arch; }; #else #define xenoprofile_init(ops) (-ENOSYS) #define xenoprofile_exit() do { } while (0) #endif /* CONFIG_XEN */ #endif /* __XEN_XENOPROF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/version.ini000066400000000000000000000000641314037446600223500ustar00rootroot00000000000000KernModeVersion=2.2.0.112 UserModeVersion=2.2.0.112 UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-balloon/000077500000000000000000000000001314037446600224005ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-balloon/Kbuild000066400000000000000000000002551314037446600235370ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-balloon.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-balloon-y := balloon.o sysfs.o xen-balloon-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-balloon/Makefile000066400000000000000000000000661314037446600240420ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-balloon/balloon.c000066400000000000000000000532471314037446600242050ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /*added by linyuliang 00176349 begin */ #include /*added by linyuliang 00176349 end */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); #ifndef MODULE #include static struct pagevec free_pagevec; #endif struct balloon_stats balloon_stats; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; #ifdef CONFIG_HIGHMEM #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else #define inc_totalhigh_pages() ((void)0) #define dec_totalhigh_pages() ((void)0) #endif #ifndef CONFIG_XEN_no /* * In HVM guests accounting here uses the Xen visible values, but the kernel * determined totalram_pages value shouldn't get altered. Since totalram_pages * includes neither the kernel static image nor any memory allocated prior to * or from the bootmem allocator, we have to synchronize the two values. */ static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process); static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page, int account) { unsigned long pfn; /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; if (account) dec_totalhigh_pages(); } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } pfn = page_to_pfn(page); if (account) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); page_zone(page)->present_pages--; } else { BUG_ON(!PageReserved(page)); WARN_ON_ONCE(phys_to_machine_mapping_valid(pfn)); } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(int *was_empty) { struct page *page; struct zone *zone; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); BUG_ON(!PageReserved(page)); if (PageHighMem(page)) { bs.balloon_high--; inc_totalhigh_pages(); } else bs.balloon_low--; zone = page_zone(page); *was_empty |= !populated_zone(zone); zone->present_pages++; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static inline void balloon_free_page(struct page *page) { #ifndef MODULE if (put_page_testzero(page) && !pagevec_add(&free_pagevec, page)) { __pagevec_free(&free_pagevec); pagevec_reinit(&free_pagevec); } #else /* pagevec interface is not being exported. */ __free_page(page); #endif } static inline void balloon_free_and_unlock(unsigned long flags) { #ifndef MODULE if (pagevec_count(&free_pagevec)) { __pagevec_free(&free_pagevec); pagevec_reinit(&free_pagevec); } #endif balloon_unlock(flags); } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = bs.target_pages; if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } unsigned long balloon_minimum_target(void) { #ifndef CONFIG_XEN_no #define max_pfn num_physpages #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN_no #undef max_pfn #endif } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; int need_zonelists_rebuild = 0; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page); page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(&need_zonelists_rebuild); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN_no /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); init_page_count(page); balloon_free_page(page); } bs.current_pages += rc; /* totalram_pages = bs.current_pages; */ totalram_pages = bs.current_pages - totalram_bias; out: balloon_free_and_unlock(flags); #ifndef MODULE setup_per_zone_wmarks(); if (rc > 0) kswapd_run(0); if (need_zonelists_rebuild) build_all_zonelists(); else vm_total_pages = nr_free_pagecache_pages(); #endif return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); #ifdef CONFIG_XEN_no ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES_uvp else { v = kmap(page); scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN_no /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); balloon_append(pfn_to_page(pfn), 1); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; /* totalram_pages = bs.current_pages; */ totalram_pages = bs.current_pages - totalram_bias; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ /*modified by linyuliang 00176349 begin */ /*When adjustment be stopped one time,adjustment over! then write current memory to xenstore*/ static int flag; static void balloon_process(struct work_struct *unused) { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); if(current_target() == bs.current_pages) { mutex_unlock(&balloon_mutex); return; } xenbus_write_uvp(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); //if (!need_sleep) //{ if (current_target() != bs.current_pages || (flag==1)) { sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write_uvp(XBT_NIL, "memory", "target", buffer); } xenbus_write_uvp(XBT_NIL, "control/uvp", "Balloon_flag", "0"); //} /* Schedule more work if there is some still to be done. */ //if (current_target() != bs.current_pages) // mod_timer(&balloon_timer, jiffies + HZ); mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, balloon_minimum_target()); flag = bs.target_pages== target ? 0:1; schedule_work(&balloon_worker); } /*modified by linyuliang 00176349 end */ static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf_uvp(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch_uvp(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf( page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Minimum target: %8lu kB\n" "Maximum target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(balloon_minimum_target()), PAGES2KB(num_physpages), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { /*#if defined(CONFIG_X86) && defined(CONFIG_XEN_no)*/ #if !defined(CONFIG_XEN_no) # ifndef XENMEM_get_pod_target # define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; # endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; #elif defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("[uvp] Initialising new balloon driver.\n"); #ifdef CONFIG_XEN_no pagevec_init(&free_pagevec, true); bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else /* bs.current_pages = totalram_pages; */ totalram_bias = HYPERVISOR_memory_op( HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target) != -ENOSYS ? XENMEM_maximum_reservation : XENMEM_current_reservation, &pod_target.domid); if ((long)totalram_bias != -ENOSYS) { BUG_ON(totalram_bias < totalram_pages); bs.current_pages = totalram_bias; totalram_bias -= totalram_pages; } else { totalram_bias = 0; bs.current_pages = totalram_pages; } #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #ifdef CONFIG_PROC_FS if ((balloon_pde = create_xen_proc_entry_uvp("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN_no) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(page, 0); } } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier_uvp(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void __exit balloon_exit(void) { balloon_sysfs_exit(); /* XXX - release balloon here */ } module_exit(balloon_exit); void balloon_update_driver_allowance_uvp_uvp(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance_uvp_uvp); #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) #ifdef CONFIG_XEN_no static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long pfn, mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); pfn = __pa(addr) >> PAGE_SHIFT; set_phys_to_machine(pfn, INVALID_P2M_ENTRY); SetPageReserved(pfn_to_page(pfn)); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec_uvp(int nr_pages) { unsigned long flags; void *v; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { balloon_lock(flags); page = balloon_first_page(); if (page && !PageHighMem(page)) { UNLIST_PAGE(page); bs.balloon_low--; balloon_unlock(flags); pagevec[i] = page; continue; } balloon_unlock(flags); page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD); if (page == NULL) goto err; v = page_address(page); scrub_pages(v, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN_no ret = apply_to_page_range(&init_mm, (unsigned long)v, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_free_page(page); balloon_free_and_unlock(flags); goto err; } /* totalram_pages = --bs.current_pages; */ totalram_pages = --bs.current_pages - totalram_bias; if (PageHighMem(page)) dec_totalhigh_pages(); page_zone(page)->present_pages--; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN_no flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i], 0); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec_uvp); #endif /* CONFIG_XEN_BACKEND */ static void _free_empty_pages_and_pagevec_uvp(struct page **pagevec, int nr_pages, int free_vec) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i], !free_vec); } if (!free_vec) totalram_pages = bs.current_pages -= nr_pages; balloon_unlock(flags); if (free_vec) kfree(pagevec); schedule_work(&balloon_worker); } #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) void free_empty_pages_and_pagevec_uvp(struct page **pagevec, int nr_pages) { _free_empty_pages_and_pagevec_uvp(pagevec, nr_pages, 1); } EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec_uvp); #endif /* CONFIG_XEN_BACKEND */ void free_empty_pages(struct page **pagevec, int nr_pages) { _free_empty_pages_and_pagevec_uvp(pagevec, nr_pages, 0); } void balloon_release_driver_page_uvp_uvp(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page, 1); totalram_pages = --bs.current_pages; bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_release_driver_page_uvp_uvp); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-balloon/common.h000066400000000000000000000043741314037446600240510ustar00rootroot00000000000000/****************************************************************************** * balloon/common.h * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_COMMON_H__ #define __XEN_BALLOON_COMMON_H__ #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; extern struct balloon_stats balloon_stats; #define bs balloon_stats int balloon_sysfs_init(void); void balloon_sysfs_exit(void); void balloon_set_new_target(unsigned long target); unsigned long balloon_minimum_target(void); #endif /* __XEN_BALLOON_COMMON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-balloon/scrub.c000066400000000000000000000010461314037446600236630ustar00rootroot00000000000000#include #include #include void scrub_pages(void *v, unsigned int count) { if (likely(cpu_has_xmm2)) { unsigned long n = count * (PAGE_SIZE / sizeof(long) / 4); for (; n--; v += sizeof(long) * 4) asm("movnti %1,(%0)\n\t" "movnti %1,%c2(%0)\n\t" "movnti %1,2*%c2(%0)\n\t" "movnti %1,3*%c2(%0)\n\t" : : "r" (v), "r" (0L), "i" (sizeof(long)) : "memory"); asm volatile("sfence" : : : "memory"); } else for (; count--; v += PAGE_SIZE) clear_page(v); } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-balloon/sysfs.c000066400000000000000000000127521314037446600237220ustar00rootroot00000000000000/****************************************************************************** * balloon/sysfs.c * * Xen balloon driver - sysfs interfaces. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BALLOON_CLASS_NAME "xen_memory" #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ struct sysdev_attribute *attr, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); BALLOON_SHOW(min_kb, "%lu\n", PAGES2KB(balloon_minimum_target())); BALLOON_SHOW(max_kb, "%lu\n", PAGES2KB(num_physpages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = simple_strtoull(buf, &endchar, 0) << 10; balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)balloon_stats.target_pages << PAGE_SHIFT); } static ssize_t store_target(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = memparse(buf, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR, show_target, store_target); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, &attr_target, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_min_kb.attr, &attr_max_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_driver_kb.attr, NULL }; static struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { .name = BALLOON_CLASS_NAME, }; static struct sys_device balloon_sysdev; static int __init register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } static __exit void unregister_balloon(struct sys_device *sysdev) { int i; sysfs_remove_group(&sysdev->kobj, &balloon_info_group); for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); } int __init balloon_sysfs_init(void) { return register_balloon(&balloon_sysdev); } void __exit balloon_sysfs_exit(void) { unregister_balloon(&balloon_sysdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/000077500000000000000000000000001314037446600233475ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/Kbuild000066400000000000000000000013171314037446600245060ustar00rootroot00000000000000include $(M)/config.mk obj-m := xen-platform-pci.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-platform-pci-objs := evtchn.o platform-pci.o gnttab.o xen_support.o xen-platform-pci-objs += features.o platform-compat.o xen-platform-pci-objs += reboot.o machine_reboot.o xen-platform-pci-objs += panic-handler.o xen-platform-pci-objs += platform-pci-unplug.o xen-platform-pci-objs += xenbus_comms.o xen-platform-pci-objs += xenbus_xs.o xen-platform-pci-objs += xenbus_probe.o xen-platform-pci-objs += xenbus_dev.o xen-platform-pci-objs += xenbus_client.o xen-platform-pci-objs += xen_proc.o # Can we do better ? ifeq ($(ARCH),ia64) xen-platform-pci-objs += xencomm.o xencomm_arch.o xcom_hcall.o xcom_asm.o endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/Makefile000066400000000000000000000000661314037446600250110ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/evtchn.c000066400000000000000000000213151314037446600250040ustar00rootroot00000000000000/****************************************************************************** * evtchn.c * * A simplified event channel for para-drivers in unmodified linux * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, Intel Corporation * * This file may be distributed separately from the Linux kernel, or * incorporated into other software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif void *shared_info_area; #define is_valid_evtchn(x) ((x) != 0) #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn) static struct { spinlock_t lock; irq_handler_t handler; void *dev_id; int evtchn; int close:1; /* close on unbind_from_irqhandler_uvp()? */ int inuse:1; int in_handler:1; } irq_evtchn[256]; static int evtchn_to_irq[NR_EVENT_CHANNELS] = { [0 ... NR_EVENT_CHANNELS-1] = -1 }; static DEFINE_SPINLOCK(irq_alloc_lock); static int alloc_xen_irq(void) { static int warned; int irq; spin_lock(&irq_alloc_lock); for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) { if (irq_evtchn[irq].inuse) continue; irq_evtchn[irq].inuse = 1; spin_unlock(&irq_alloc_lock); return irq; } if (!warned) { warned = 1; printk(KERN_WARNING "No available IRQ to bind to: " "increase irq_evtchn[] size in evtchn.c.\n"); } spin_unlock(&irq_alloc_lock); return -ENOSPC; } static void free_xen_irq(int irq) { spin_lock(&irq_alloc_lock); irq_evtchn[irq].inuse = 0; spin_unlock(&irq_alloc_lock); } int irq_to_evtchn_port(int irq) { return irq_evtchn[irq].evtchn; } EXPORT_SYMBOL(irq_to_evtchn_port); void mask_evtchn(int port) { shared_info_t *s = shared_info_area; synch_set_bit(port, &s->evtchn_mask[0]); } EXPORT_SYMBOL(mask_evtchn); void unmask_evtchn(int port) { evtchn_unmask_t op = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op)); } EXPORT_SYMBOL(unmask_evtchn); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { struct evtchn_alloc_unbound alloc_unbound; int err, irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_domain; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { spin_unlock_irq(&irq_evtchn[irq].lock); free_xen_irq(irq); return err; } irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = alloc_unbound.port; irq_evtchn[irq].close = 1; evtchn_to_irq[alloc_unbound.port] = irq; unmask_evtchn(alloc_unbound.port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_listening_port_to_irqhandler); int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = caller_port; irq_evtchn[irq].close = 0; evtchn_to_irq[caller_port] = irq; unmask_evtchn(caller_port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_caller_port_to_irqhandler); void unbind_from_irqhandler_uvp(unsigned int irq, void *dev_id) { int evtchn; spin_lock_irq(&irq_evtchn[irq].lock); evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) { evtchn_to_irq[evtchn] = -1; mask_evtchn(evtchn); if (irq_evtchn[irq].close) { struct evtchn_close close = { .port = evtchn }; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) BUG(); } } irq_evtchn[irq].handler = NULL; irq_evtchn[irq].evtchn = 0; spin_unlock_irq(&irq_evtchn[irq].lock); while (irq_evtchn[irq].in_handler) cpu_relax(); free_xen_irq(irq); } EXPORT_SYMBOL(unbind_from_irqhandler_uvp); void notify_remote_via_irq_uvp(int irq) { int evtchn; evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL(notify_remote_via_irq_uvp); static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 }; static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 }; static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh, unsigned int idx) { return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]); } static irqreturn_t evtchn_interrupt(int irq, void *dev_id #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) , struct pt_regs *regs #else # define handler(irq, dev_id, regs) handler(irq, dev_id) #endif ) { unsigned int l1i, l2i, port; unsigned long masked_l1, masked_l2; /* XXX: All events are bound to vcpu0 but irq may be redirected. */ int cpu = 0; /*smp_processor_id();*/ irq_handler_t handler; shared_info_t *s = shared_info_area; vcpu_info_t *v = &s->vcpu_info[cpu]; unsigned long l1, l2; v->evtchn_upcall_pending = 0; #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ /* Clear master flag /before/ clearing selector flag. */ wmb(); #endif l1 = xchg(&v->evtchn_pending_sel, 0); l1i = per_cpu(last_processed_l1i, cpu); l2i = per_cpu(last_processed_l2i, cpu); while (l1 != 0) { l1i = (l1i + 1) % BITS_PER_LONG; masked_l1 = l1 & ((~0UL) << l1i); if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */ l1i = BITS_PER_LONG - 1; l2i = BITS_PER_LONG - 1; continue; } l1i = __ffs(masked_l1); do { l2 = active_evtchns(cpu, s, l1i); l2i = (l2i + 1) % BITS_PER_LONG; masked_l2 = l2 & ((~0UL) << l2i); if (masked_l2 == 0) { /* if we masked out all events, move on */ l2i = BITS_PER_LONG - 1; break; } l2i = __ffs(masked_l2); /* process port */ port = (l1i * BITS_PER_LONG) + l2i; synch_clear_bit(port, &s->evtchn_pending[0]); irq = evtchn_to_irq[port]; if (irq < 0) continue; spin_lock(&irq_evtchn[irq].lock); handler = irq_evtchn[irq].handler; dev_id = irq_evtchn[irq].dev_id; if (unlikely(handler == NULL)) { printk("Xen IRQ%d (port %d) has no handler!\n", irq, port); spin_unlock(&irq_evtchn[irq].lock); continue; } irq_evtchn[irq].in_handler = 1; spin_unlock(&irq_evtchn[irq].lock); local_irq_enable(); handler(irq, irq_evtchn[irq].dev_id, regs); local_irq_disable(); spin_lock(&irq_evtchn[irq].lock); irq_evtchn[irq].in_handler = 0; spin_unlock(&irq_evtchn[irq].lock); /* if this is the final port processed, we'll pick up here+1 next time */ per_cpu(last_processed_l1i, cpu) = l1i; per_cpu(last_processed_l2i, cpu) = l2i; } while (l2i != BITS_PER_LONG - 1); l2 = active_evtchns(cpu, s, l1i); if (l2 == 0) /* we handled all ports, so we can clear the selector bit */ l1 &= ~(1UL << l1i); } return IRQ_HANDLED; } void irq_resume(void) { int evtchn, irq; for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) { mask_evtchn(evtchn); evtchn_to_irq[evtchn] = -1; } for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) irq_evtchn[irq].evtchn = 0; } int xen_irq_init(struct pci_dev *pdev) { int irq; for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) spin_lock_init(&irq_evtchn[irq].lock); return request_irq(pdev->irq, evtchn_interrupt, #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT, #else IRQF_SHARED | IRQF_SAMPLE_RANDOM | IRQF_DISABLED, #endif "xen-platform-pci", pdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/features.c000066400000000000000000000015031314037446600253300ustar00rootroot00000000000000/****************************************************************************** * features.c * * Xen feature flags. * * Copyright (c) 2006, Ian Campbell, XenSource Inc. */ #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif u8 xen_features_uvp[XENFEAT_NR_SUBMAPS * 32] __read_mostly; /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */ EXPORT_SYMBOL(xen_features_uvp); void xen_setup_features(void) { xen_feature_info_t fi; int i, j; for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) { fi.submap_idx = i; if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j=0; j<32; j++) xen_features_uvp[i*32+j] = !!(fi.submap & 1< #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff #define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t)) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; static unsigned int boot_max_nr_grant_frames; static int gnttab_free_count; static grant_ref_t gnttab_free_head; static DEFINE_SPINLOCK(gnttab_list_lock); static struct grant_entry *shared; static struct gnttab_free_callback *gnttab_free_callback_list; static int gnttab_expand(unsigned int req_entries); #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP]) #define nr_freelist_frames(grant_frames) \ (((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP) static int get_free_entries(int count) { unsigned long flags; int ref, rc; grant_ref_t head; spin_lock_irqsave(&gnttab_list_lock, flags); if ((gnttab_free_count < count) && ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { spin_unlock_irqrestore(&gnttab_list_lock, flags); return rc; } ref = head = gnttab_free_head; gnttab_free_count -= count; while (count-- > 1) head = gnttab_entry(head); gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } #define get_free_entry() get_free_entries(1) static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; callback = gnttab_free_callback_list; gnttab_free_callback_list = NULL; while (callback != NULL) { next = callback->next; if (gnttab_free_count >= callback->count) { callback->next = NULL; callback->queued = 0; callback->fn(callback->arg); } else { callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; } callback = next; } } static inline void check_free_callbacks(void) { if (unlikely(gnttab_free_callback_list)) do_free_callbacks(); } static void put_free_entry(grant_ref_t ref) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; gnttab_free_count++; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } /* * Public grant-issuing interface functions */ int gnttab_grant_foreign_access_uvp(domid_t domid, unsigned long frame, int flags) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_uvp); void gnttab_grant_foreign_access_uvp_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags) { shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_uvp_ref); int gnttab_query_foreign_access_uvp(grant_ref_t ref) { u16 nflags; nflags = shared[ref].flags; return (nflags & (GTF_reading|GTF_writing)); } EXPORT_SYMBOL_GPL(gnttab_query_foreign_access_uvp); int gnttab_end_foreign_access_uvp_ref_uvp(grant_ref_t ref) { u16 flags, nflags; nflags = shared[ref].flags; do { if ((flags = nflags) & (GTF_reading|GTF_writing)) { printk(KERN_DEBUG "WARNING: g.e. still in use!\n"); return 0; } } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) != flags); return 1; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_uvp_ref_uvp); void gnttab_end_foreign_access_uvp(grant_ref_t ref, unsigned long page) { if (gnttab_end_foreign_access_uvp_ref_uvp(ref)) { put_free_entry(ref); if (page != 0) free_page(page); } else { /* XXX This needs to be fixed so that the ref and page are placed on a list to be freed up later. */ printk(KERN_DEBUG "WARNING: leaking g.e. and page still in use!\n"); } } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_uvp); int gnttab_grant_foreign_transfer_uvp(domid_t domid, unsigned long pfn) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; gnttab_grant_foreign_transfer_uvp_ref(ref, domid, pfn); return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_uvp); void gnttab_grant_foreign_transfer_uvp_ref(grant_ref_t ref, domid_t domid, unsigned long pfn) { shared[ref].frame = pfn; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_accept_transfer; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_uvp_ref); unsigned long gnttab_end_foreign_transfer_uvp_ref_uvp(grant_ref_t ref) { unsigned long frame; u16 flags; /* * If a transfer is not even yet started, try to reclaim the grant * reference and return failure (== 0). */ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags) return 0; cpu_relax(); } /* If a transfer is in progress then wait until it is completed. */ while (!(flags & GTF_transfer_completed)) { flags = shared[ref].flags; cpu_relax(); } /* Read the frame number /after/ reading completion status. */ rmb(); frame = shared[ref].frame; BUG_ON(frame == 0); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_uvp_ref_uvp); unsigned long gnttab_end_foreign_transfer_uvp(grant_ref_t ref) { unsigned long frame = gnttab_end_foreign_transfer_uvp_ref_uvp(ref); put_free_entry(ref); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_uvp); void gnttab_free_grant_reference_uvp(grant_ref_t ref) { put_free_entry(ref); } EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_uvp); void gnttab_free_grant_reference_uvps(grant_ref_t head) { grant_ref_t ref; unsigned long flags; int count = 1; if (head == GNTTAB_LIST_END) return; spin_lock_irqsave(&gnttab_list_lock, flags); ref = head; while (gnttab_entry(ref) != GNTTAB_LIST_END) { ref = gnttab_entry(ref); count++; } gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = head; gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_uvps); int gnttab_alloc_grant_references_uvp(u16 count, grant_ref_t *head) { int h = get_free_entries(count); if (h < 0) return -ENOSPC; *head = h; return 0; } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references_uvp); int gnttab_empty_grant_references_uvp(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); } EXPORT_SYMBOL_GPL(gnttab_empty_grant_references_uvp); int gnttab_claim_grant_reference_uvp(grant_ref_t *private_head) { grant_ref_t g = *private_head; if (unlikely(g == GNTTAB_LIST_END)) return -ENOSPC; *private_head = gnttab_entry(g); return g; } EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference_uvp); void gnttab_release_grant_reference_uvp(grant_ref_t *private_head, grant_ref_t release) { gnttab_entry(release) = *private_head; *private_head = release; } EXPORT_SYMBOL_GPL(gnttab_release_grant_reference_uvp); void gnttab_request_free_callback_uvp(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); if (callback->queued) goto out; callback->fn = fn; callback->arg = arg; callback->count = count; callback->queued = 1; callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; check_free_callbacks(); out: spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_request_free_callback_uvp); void gnttab_cancel_free_callback_uvp(struct gnttab_free_callback *callback) { struct gnttab_free_callback **pcb; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { if (*pcb == callback) { *pcb = callback->next; callback->queued = 0; break; } } spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback_uvp); static int grow_gnttab_list(unsigned int more_frames) { unsigned int new_nr_grant_frames, extra_entries, i; unsigned int nr_glist_frames, new_nr_glist_frames; new_nr_grant_frames = nr_grant_frames + more_frames; extra_entries = more_frames * ENTRIES_PER_GRANT_FRAME; nr_glist_frames = nr_freelist_frames(nr_grant_frames); new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames); for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); if (!gnttab_list[i]) goto grow_nomem; } for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; check_free_callbacks(); return 0; grow_nomem: for ( ; i >= nr_glist_frames; i--) free_page((unsigned long) gnttab_list[i]); return -ENOMEM; } static unsigned int __max_nr_grant_frames(void) { struct gnttab_query_size query; int rc; query.dom = DOMID_SELF; rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); if ((rc < 0) || (query.status != GNTST_okay)) return 4; /* Legacy max supported number of frames */ return query.max_nr_frames; } static inline unsigned int max_nr_grant_frames(void) { unsigned int xen_max = __max_nr_grant_frames(); if (xen_max > boot_max_nr_grant_frames) return boot_max_nr_grant_frames; return xen_max; } #ifdef CONFIG_XEN_no static DEFINE_SEQLOCK(gnttab_dma_lock); #ifdef CONFIG_X86 static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } #ifdef CONFIG_PM_SLEEP static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } #endif void *arch_gnttab_alloc_shared(unsigned long *frames) { struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames()); BUG_ON(area == NULL); return area->addr; } #endif /* CONFIG_X86 */ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); if (shared == NULL) shared = arch_gnttab_alloc_shared(frames); #ifdef CONFIG_X86 rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); BUG_ON(rc); frames -= nr_gframes; /* adjust after map_pte_fn() */ #endif /* CONFIG_X86 */ kfree(frames); return 0; } static void gnttab_page_free(struct page *page, unsigned int order) { BUG_ON(order); ClearPageForeign(page); gnttab_reset_grant_page_uvp(page); put_page(page); } /* * Must not be called with IRQs off. This should only be used on the * slow path. * * Copy a foreign granted page to local memory. */ int gnttab_copy_grant_page_uvp(grant_ref_t ref, struct page **pagep) { struct gnttab_unmap_and_replace unmap; mmu_update_t mmu; struct page *page; struct page *new_page; void *new_addr; void *addr; paddr_t pfn; maddr_t mfn; maddr_t new_mfn; int err; page = *pagep; if (!get_page_unless_zero(page)) return -ENOENT; err = -ENOMEM; new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!new_page) goto out; new_addr = page_address(new_page); addr = page_address(page); memcpy(new_addr, addr, PAGE_SIZE); pfn = page_to_pfn(page); mfn = pfn_to_mfn(pfn); new_mfn = virt_to_mfn(new_addr); write_seqlock(&gnttab_dma_lock); /* Make seq visible before checking page_mapped. */ smp_mb(); /* Has the page been DMA-mapped? */ if (unlikely(page_mapped(page))) { write_sequnlock(&gnttab_dma_lock); put_page(new_page); err = -EBUSY; goto out; } if (!xen_feature(XENFEAT_auto_translated_physmap)) set_phys_to_machine(pfn, new_mfn); gnttab_set_replace_op(&unmap, (unsigned long)addr, (unsigned long)new_addr, ref); err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace, &unmap, 1); BUG_ON(err); BUG_ON(unmap.status); write_sequnlock(&gnttab_dma_lock); if (!xen_feature(XENFEAT_auto_translated_physmap)) { set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY); mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu.val = pfn; err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF); BUG_ON(err); } new_page->mapping = page->mapping; new_page->index = page->index; set_bit(PG_foreign, &new_page->flags); *pagep = new_page; SetPageForeign(page, gnttab_page_free); page->mapping = NULL; out: put_page(page); return err; } EXPORT_SYMBOL_GPL(gnttab_copy_grant_page_uvp); void gnttab_reset_grant_page_uvp(struct page *page) { init_page_count(page); reset_page_mapcount(page); } EXPORT_SYMBOL_GPL(gnttab_reset_grant_page_uvp); /* * Keep track of foreign pages marked as PageForeign so that we don't * return them to the remote domain prematurely. * * PageForeign pages are pinned down by increasing their mapcount. * * All other pages are simply returned as is. */ void __gnttab_dma_map_page(struct page *page) { unsigned int seq; if (!is_running_on_xen() || !PageForeign(page)) return; do { seq = read_seqbegin(&gnttab_dma_lock); if (gnttab_dma_local_pfn(page)) break; atomic_set(&page->_mapcount, 0); /* Make _mapcount visible before read_seqretry. */ smp_mb(); } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); } #ifdef __HAVE_ARCH_PTE_SPECIAL static unsigned int GNTMAP_pte_special; bool gnttab_pre_map_adjust(unsigned int cmd, struct gnttab_map_grant_ref *map, unsigned int count) { unsigned int i; if (unlikely(cmd != GNTTABOP_map_grant_ref)) count = 0; for (i = 0; i < count; ++i, ++map) { if (!(map->flags & GNTMAP_host_map) || !(map->flags & GNTMAP_application_map)) continue; if (GNTMAP_pte_special) map->flags |= GNTMAP_pte_special; else { BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); return true; } } return false; } EXPORT_SYMBOL(gnttab_pre_map_adjust); #if CONFIG_XEN_COMPAT < 0x030400 int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *map, unsigned int count) { unsigned int i; int rc = 0; for (i = 0; i < count && rc == 0; ++i, ++map) { pte_t pte; if (!(map->flags & GNTMAP_host_map) || !(map->flags & GNTMAP_application_map)) continue; #ifdef CONFIG_X86 pte = __pte_ma((map->dev_bus_addr | _PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NX | _PAGE_SPECIAL) & __supported_pte_mask); #else #error Architecture not yet supported. #endif if (!(map->flags & GNTMAP_readonly)) pte = pte_mkwrite(pte); if (map->flags & GNTMAP_contains_pte) { mmu_update_t u; u.ptr = map->host_addr; u.val = __pte_val(pte); rc = HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF); } else rc = HYPERVISOR_update_va_mapping(map->host_addr, pte, 0); } return rc; } EXPORT_SYMBOL(gnttab_post_map_adjust); #endif #endif /* __HAVE_ARCH_PTE_SPECIAL */ static int gnttab_resume(struct sys_device *dev) { if (max_nr_grant_frames() < nr_grant_frames) return -ENOSYS; return gnttab_map(0, nr_grant_frames - 1); } #define gnttab_resume() gnttab_resume(NULL) #ifdef CONFIG_PM_SLEEP #ifdef CONFIG_X86 static int gnttab_suspend(struct sys_device *dev, pm_message_t state) { apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); return 0; } #else #define gnttab_suspend NULL #endif static struct sysdev_class gnttab_sysclass = { .name = "gnttab", .resume = gnttab_resume, .suspend = gnttab_suspend, }; static struct sys_device device_gnttab = { .id = 0, .cls = &gnttab_sysclass, }; #endif #else /* !CONFIG_XEN */ #include static unsigned long resume_frames; static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; /* Loop backwards, so that the first hypercall has the largest index, * ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); } while (i-- > start_idx); return 0; } int gnttab_resume(void) { unsigned int max_nr_gframes, nr_gframes; nr_gframes = nr_grant_frames; max_nr_gframes = max_nr_grant_frames(); if (max_nr_gframes < nr_gframes) return -ENOSYS; if (!resume_frames) { resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); if (shared == NULL) { printk("error to ioremap gnttab share frames\n"); return -1; } } gnttab_map(0, nr_gframes - 1); return 0; } #endif /* !CONFIG_XEN */ static int gnttab_expand(unsigned int req_entries) { int rc; unsigned int cur, extra; cur = nr_grant_frames; extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) / ENTRIES_PER_GRANT_FRAME); if (cur + extra > max_nr_grant_frames()) return -ENOSPC; if ((rc = gnttab_map(cur, cur + extra - 1)) == 0) rc = grow_gnttab_list(extra); return rc; } int __devinit gnttab_init(void) { int i; unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int nr_init_grefs; if (!is_running_on_xen()) return -ENODEV; #if defined(CONFIG_XEN_no) && defined(CONFIG_PM_SLEEP) if (!is_initial_xendomain()) { int err = sysdev_class_register(&gnttab_sysclass); if (!err) err = sysdev_register(&device_gnttab); if (err) return err; } #endif nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; nr_glist_frames = nr_freelist_frames(nr_grant_frames); for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) goto ini_nomem; } if (gnttab_resume() < 0) return -ENODEV; nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; #if defined(CONFIG_XEN_no) && defined(__HAVE_ARCH_PTE_SPECIAL) if (!xen_feature(XENFEAT_auto_translated_physmap) && xen_feature(XENFEAT_gnttab_map_avail_bits)) { #ifdef CONFIG_X86 GNTMAP_pte_special = (__pte_val(pte_mkspecial(__pte_ma(0))) >> _PAGE_BIT_UNUSED1) << _GNTMAP_guest_avail0; #else #error Architecture not yet supported. #endif } #endif return 0; ini_nomem: for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); return -ENOMEM; } #ifdef CONFIG_XEN_no core_initcall(gnttab_init); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/machine_reboot.c000066400000000000000000000043131314037446600264720ustar00rootroot00000000000000#include #include #include #include #include #include "platform-pci.h" #include struct ap_suspend_info { int do_spin; atomic_t nr_spinning; }; #ifdef CONFIG_SMP /* * Spinning prevents, for example, APs touching grant table entries while * the shared grant table is not mapped into the address space imemdiately * after resume. */ static void ap_suspend(void *_info) { struct ap_suspend_info *info = _info; BUG_ON(!irqs_disabled()); atomic_inc(&info->nr_spinning); mb(); while (info->do_spin) cpu_relax(); mb(); atomic_dec(&info->nr_spinning); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0, 0) #else #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0) #endif #else /* !defined(CONFIG_SMP) */ #define initiate_ap_suspend(i) 0 #endif static int bp_suspend(void) { int suspend_cancelled; BUG_ON(!irqs_disabled()); suspend_cancelled = HYPERVISOR_suspend(0); if (!suspend_cancelled) { platform_pci_resume(); gnttab_resume(); irq_resume(); } return suspend_cancelled; } int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)) { int err, suspend_cancelled, nr_cpus; struct ap_suspend_info info; xenbus_suspend_uvp(); preempt_disable(); /* Prevent any races with evtchn_interrupt() handler. */ disable_irq(xen_platform_pdev->irq); info.do_spin = 1; atomic_set(&info.nr_spinning, 0); smp_mb(); nr_cpus = num_online_cpus() - 1; err = initiate_ap_suspend(&info); if (err < 0) { preempt_enable(); xenbus_suspend_uvp_cancel(); return err; } while (atomic_read(&info.nr_spinning) != nr_cpus) cpu_relax(); local_irq_disable(); suspend_cancelled = bp_suspend(); resume_notifier(suspend_cancelled); local_irq_enable(); smp_mb(); info.do_spin = 0; while (atomic_read(&info.nr_spinning) != 0) cpu_relax(); enable_irq(xen_platform_pdev->irq); preempt_enable(); if (!suspend_cancelled){ xenbus_resume_uvp(); } else xenbus_suspend_uvp_cancel(); // update uvp flags write_driver_resume_flag(); write_feature_flag(); write_monitor_service_flag(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/panic-handler.c000066400000000000000000000016561314037446600262300ustar00rootroot00000000000000#include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif MODULE_LICENSE("GPL"); #ifdef __ia64__ static void xen_panic_hypercall(struct unw_frame_info *info, void *arg) { current->thread.ksp = (__u64)info->sw - 16; HYPERVISOR_shutdown(SHUTDOWN_crash); /* we're never actually going to get here... */ } #endif static int xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { #ifdef __ia64__ unw_init_running(xen_panic_hypercall, NULL); #else /* !__ia64__ */ HYPERVISOR_shutdown(SHUTDOWN_crash); #endif /* we're never actually going to get here... */ return NOTIFY_DONE; } static struct notifier_block xen_panic_block = { .notifier_call = xen_panic_event }; int xen_panic_handler_init(void) { atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/platform-compat.c000066400000000000000000000067611314037446600266320ustar00rootroot00000000000000#include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) static int system_state = 1; EXPORT_SYMBOL(system_state); #endif void ctrl_alt_del(void) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) kill_proc(1, SIGINT, 1); /* interrupt init */ #else kill_cad_pid(SIGINT, 1); #endif } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) size_t strcspn(const char *s, const char *reject) { const char *p; const char *r; size_t count = 0; for (p = s; *p != '\0'; ++p) { for (r = reject; *r != '\0'; ++r) { if (*p == *r) return count; } ++count; } return count; } EXPORT_SYMBOL(strcspn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(void * vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; } EXPORT_SYMBOL(wait_for_completion_timeout); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) /* fake do_exit using complete_and_exit */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) asmlinkage NORET_TYPE void do_exit(long code) #else fastcall NORET_TYPE void do_exit(long code) #endif { complete_and_exit(NULL, code); } EXPORT_SYMBOL_GPL(do_exit); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout) { __set_current_state(TASK_INTERRUPTIBLE); return schedule_timeout(timeout); } EXPORT_SYMBOL(schedule_timeout_interruptible); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. */ void *kzalloc(size_t size, int flags) { void *ret = kmalloc(size, flags); if (ret) memset(ret, 0, size); return ret; } EXPORT_SYMBOL(kzalloc); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) /* Simplified asprintf. */ char *kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; unsigned int len; char *p, dummy[1]; va_start(ap, fmt); len = vsnprintf(dummy, 0, fmt, ap); va_end(ap); p = kmalloc(len + 1, gfp); if (!p) return NULL; va_start(ap, fmt); vsprintf(p, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL(kasprintf); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/platform-pci-unplug.c000066400000000000000000000117421314037446600274250ustar00rootroot00000000000000/****************************************************************************** * platform-pci-unplug.c * * Xen platform PCI device driver * Copyright (c) 2010, Citrix * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include /****************************************************************************** * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0xffff #define XEN_IOPORT_LINUX_DRVVER ((LINUX_VERSION_CODE << 8) + 0x0) #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define UNPLUG_ALL_IDE_DISKS 1 #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 #define UNPLUG_ALL 7 #define UNPLUG_IGNORE 8 int xen_platform_pci; EXPORT_SYMBOL_GPL(xen_platform_pci); static int xen_emul_unplug; void xen_unplug_emulated_devices(void) { /* If the version matches enable the Xen platform PCI driver. * Also enable the Xen platform PCI driver if the version is really old * and the user told us to ignore it. */ xen_platform_pci = 1; xen_emul_unplug |= UNPLUG_ALL_NICS; xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; /* Set the default value of xen_emul_unplug depending on whether or * not the Xen PV frontends and the Xen platform PCI driver have * been compiled for this kernel (modules or built-in are both OK). */ if (xen_platform_pci && !xen_emul_unplug) { #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Netfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated ICs.\n"); xen_emul_unplug |= UNPLUG_ALL_NICS; #endif #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Blkfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated disks.\n" "You might have to change the root device\n" "from /dev/hd[a-d] to /dev/xvd[a-d]\n" "in your root= kernel command line option\n"); xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; #endif } /* Now unplug the emulated devices */ if (xen_platform_pci && !(xen_emul_unplug & UNPLUG_IGNORE)) outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/platform-pci.c000066400000000000000000000240621314037446600261140ustar00rootroot00000000000000/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __ia64__ #include #endif #include "platform-pci.h" #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DRV_NAME "xen-platform-pci" #define DRV_VERSION "0.10" #define DRV_RELDATE "03/03/2005" static int max_hypercall_stub_pages, nr_hypercall_stub_pages; char *hypercall_stubs; EXPORT_SYMBOL(hypercall_stubs); MODULE_AUTHOR("ssmith@xensource.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); struct pci_dev *xen_platform_pdev; static unsigned long shared_info_frame; static uint64_t callback_via; /* driver should write xenstore flag to tell xen which feature supported */ void write_feature_flag() { int rc; char str[32] = {0}; (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); rc = xenbus_write_uvp(XBT_NIL, "control/uvp", "feature_flag", str); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); } } /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { int rc; rc = xenbus_write_uvp(XBT_NIL, "control/uvp", "monitor-service-flag", "true"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/monitor-service-flag failed \n"); } } /*DTS2014042508949. driver write this flag when the xenpci resume succeed.*/ void write_driver_resume_flag() { int rc = 0; rc = xenbus_write_uvp(XBT_NIL, "control/uvp", "driver-resume-flag", "1"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/driver-resume-flag failed \n"); } } static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; extern void *shared_info_area; #ifdef __ia64__ xencomm_initialize(); #endif setup_xen_features_uvp(); shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); shared_info_area = ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); return 0; } static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } #ifndef __ia64__ static uint32_t xen_cpuid_base(void) { uint32_t base, eax, ebx, ecx, edx; char signature[13]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { cpuid(base, &eax, &ebx, &ecx, &edx); *(uint32_t*)(signature + 0) = ebx; *(uint32_t*)(signature + 4) = ecx; *(uint32_t*)(signature + 8) = edx; signature[12] = 0; if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) return base; } return 0; } static int init_hypercall_stubs(void) { uint32_t eax, ebx, ecx, edx, pages, msr, i, base; base = xen_cpuid_base(); if (base == 0) { printk(KERN_WARNING "Detected Xen platform device but not Xen VMM?\n"); return -EINVAL; } cpuid(base + 1, &eax, &ebx, &ecx, &edx); printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff); /* * Find largest supported number of hypercall pages. * We'll create as many as possible up to this number. */ cpuid(base + 2, &pages, &msr, &ecx, &edx); /* * Use __vmalloc() because vmalloc_exec() is not an exported symbol. * PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL. * hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE); */ while (pages > 0) { hypercall_stubs = __vmalloc( pages * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM, __pgprot(__PAGE_KERNEL & ~_PAGE_NX)); if (hypercall_stubs != NULL) break; pages--; /* vmalloc failed: try one fewer pages */ } if (hypercall_stubs == NULL) return -ENOMEM; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; max_hypercall_stub_pages = pages; printk(KERN_INFO "Hypercall area is %u pages.\n", pages); return 0; } static void resume_hypercall_stubs(void) { uint32_t base, ecx, edx, pages, msr, i; base = xen_cpuid_base(); BUG_ON(base == 0); cpuid(base + 2, &pages, &msr, &ecx, &edx); if (pages > max_hypercall_stub_pages) pages = max_hypercall_stub_pages; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; } #else /* __ia64__ */ #define init_hypercall_stubs() (0) #define resume_hypercall_stubs() ((void)0) #endif static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; #ifdef __ia64__ for (irq = 0; irq < 16; irq++) { if (isa_irq_to_vector(irq) == pdev->irq) return irq; /* ISA IRQ */ } #else /* !__ia64__ */ irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) pin = pdev->pin; #else pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); #endif /* We don't know the GSI. Specify the PCI INTx line instead. */ return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3)); } static int set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } int xen_irq_init(struct pci_dev *pdev); int xenbus_init(void); int xen_reboot_init(void); int xen_panic_handler_init(void); int gnttab_init(void); void xen_unplug_emulated_devices(void); static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr, iolen; long mmio_addr, mmio_len; xen_unplug_emulated_devices(); if (xen_platform_pdev) return -EBUSY; xen_platform_pdev = pdev; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); callback_via = get_callback_via(pdev); if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { printk(KERN_WARNING DRV_NAME ":no resources found\n"); return -ENOENT; } if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) { printk(KERN_ERR ":MEM I/O resource 0x%lx @ 0x%lx busy\n", mmio_addr, mmio_len); return -EBUSY; } if (request_region(ioaddr, iolen, DRV_NAME) == NULL) { printk(KERN_ERR DRV_NAME ":I/O resource 0x%lx @ 0x%lx busy\n", iolen, ioaddr); release_mem_region(mmio_addr, mmio_len); return -EBUSY; } platform_mmio = mmio_addr; platform_mmiolen = mmio_len; ret = init_hypercall_stubs(); if (ret < 0) goto out; if ((ret = init_xen_info())) goto out; if ((ret = gnttab_init())) goto out; if ((ret = xen_irq_init(pdev))) goto out; if ((ret = set_callback_via(callback_via))) goto out; if ((ret = xenbus_init())) goto out; if ((ret = xen_reboot_init())) goto out; if ((ret = xen_panic_handler_init())) goto out; /* write flag to xenstore when xenbus is available */ write_feature_flag(); out: if (ret) { release_mem_region(mmio_addr, mmio_len); release_region(ioaddr, iolen); } return ret; } #define XEN_PLATFORM_VENDOR_ID 0x5853 #define XEN_PLATFORM_DEVICE_ID 0x0001 static struct pci_device_id platform_pci_tbl[] __devinitdata = { {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* Continue to recognise the old ID for now */ {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { name: DRV_NAME, probe: platform_pci_init, id_table: platform_pci_tbl, }; static int pci_device_registered; void platform_pci_resume(void) { struct xen_add_to_physmap xatp; resume_hypercall_stubs(); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); if (set_callback_via(callback_via)) printk("platform_pci_resume failure!\n"); } static int __init platform_pci_module_init(void) { int rc; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) rc = pci_module_init(&platform_driver); #else rc = pci_register_driver(&platform_driver); #endif if (rc) { printk(KERN_INFO DRV_NAME ": No platform pci device model found\n"); return rc; } pci_device_registered = 1; return 0; } module_init(platform_pci_module_init); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/platform-pci.h000066400000000000000000000026621314037446600261230ustar00rootroot00000000000000/****************************************************************************** * platform-pci.h * * Xen platform PCI device driver * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. */ #ifndef _XEN_PLATFORM_PCI_H #define _XEN_PLATFORM_PCI_H #include /* the features pvdriver supported */ #define XENPAGING 0x1 #define SUPPORTED_FEATURE XENPAGING extern void write_feature_flag(void); extern void write_monitor_service_flag(void); /*when xenpci resume succeed, write this flag, then uvpmonitor call ndsend*/ extern void write_driver_resume_flag(void); unsigned long alloc_xen_mmio(unsigned long len); void platform_pci_resume(void); extern struct pci_dev *xen_platform_pdev; #endif /* _XEN_PLATFORM_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/reboot.c000066400000000000000000000176301314037446600250140ustar00rootroot00000000000000#define __KERNEL_SYSCALLS__ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #undef handle_sysrq #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN_no sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN_no */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } #ifdef CONFIG_PM_SLEEP static int setup_suspend_evtchn(void); /* Was last suspend request cancelled? */ static int suspend_cancelled; static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed(current, cpumask_of_cpu(0)); if (err) { printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { printk(KERN_ERR "Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_delayed_work(&shutdown_work, 0); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } #else # define xen_suspend NULL #endif static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_delayed_work(&shutdown_work, 0); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(struct work_struct *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start_uvp(&xbt); if (err) return; str = (char *)xenbus_read_uvp(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end_uvp(xbt, 1); return; } /*պǨʱsuspendʧᵼǨƹȥsuspendʧܺǨƳʱ*/ //xenbus_write_uvp(xbt, "control", "shutdown", ""); err = xenbus_transaction_end_uvp(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } printk(KERN_WARNING "%s(%d): receive shutdown request %s\n", __FUNCTION__, __LINE__, str); if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) ctrl_alt_del(); #ifdef CONFIG_PM_SLEEP else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; #endif else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else printk("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start_uvp(&xbt); if (err) return; if (!xenbus_scanf_uvp(xbt, "control", "sysrq", "%c", &sysrq_key)) { printk(KERN_ERR "Unable to read sysrq code in " "control/sysrq\n"); xenbus_transaction_end_uvp(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf_uvp(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end_uvp(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key, NULL); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #ifdef CONFIG_PM_SLEEP static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler_uvp(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); printk(KERN_INFO "suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write_uvp(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } #else #define setup_suspend_evtchn() 0 #endif static int setup_shutdown_watcher(void) { int err; xenbus_scanf_uvp(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch_uvp(&shutdown_watch); if (err) { printk(KERN_ERR "Failed to set shutdown watcher\n"); return err; } err = register_xenbus_watch_uvp(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { printk(KERN_ERR "Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN_no static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier_uvp(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN_no) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN_no) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/reboot_64.c000066400000000000000000000174641314037446600253320ustar00rootroot00000000000000#define __KERNEL_SYSCALLS__ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #undef handle_sysrq #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN_no sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } #ifdef CONFIG_PM_SLEEP static int setup_suspend_evtchn(void); /* Was last suspend request cancelled? */ static int suspend_cancelled; static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed_ptr(current, cpumask_of(0)); if (err) { printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { printk(KERN_ERR "Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_delayed_work(&shutdown_work, 0); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } #else # define xen_suspend NULL #endif static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_delayed_work(&shutdown_work, 0); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(struct work_struct *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start_uvp(&xbt); if (err) return; str = (char *)xenbus_read_uvp(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end_uvp(xbt, 1); return; } xenbus_write_uvp(xbt, "control", "shutdown", ""); err = xenbus_transaction_end_uvp(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } printk(KERN_WARNING "%s(%d): receive shutdown request %s\n", __FUNCTION__, __LINE__, str); if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) ctrl_alt_del(); #ifdef CONFIG_PM_SLEEP else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; #endif else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else printk("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start_uvp(&xbt); if (err) return; if (!xenbus_scanf_uvp(xbt, "control", "sysrq", "%c", &sysrq_key)) { printk(KERN_ERR "Unable to read sysrq code in " "control/sysrq\n"); xenbus_transaction_end_uvp(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf_uvp(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end_uvp(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key, NULL); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #ifdef CONFIG_PM_SLEEP static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler_uvp(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); printk(KERN_INFO "suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write_uvp(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } #else #define setup_suspend_evtchn() 0 #endif static int setup_shutdown_watcher(void) { int err; xenbus_scanf_uvp(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch_uvp(&shutdown_watch); if (err) { printk(KERN_ERR "Failed to set shutdown watcher\n"); return err; } err = register_xenbus_watch_uvp(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { printk(KERN_ERR "Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN_no static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier_uvp(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN_no) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN_no) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/xen_proc.c000066400000000000000000000010571314037446600253330ustar00rootroot00000000000000 #include #include #include static struct proc_dir_entry *xen_base; struct proc_dir_entry *create_xen_proc_entry_uvp(const char *name, mode_t mode) { if ( xen_base == NULL ) if ( (xen_base = proc_mkdir("xen", NULL)) == NULL ) panic("Couldn't create /proc/xen"); return create_proc_entry(name, mode, xen_base); } EXPORT_SYMBOL_GPL(create_xen_proc_entry_uvp); void remove_xen_proc_entry_uvp(const char *name) { remove_proc_entry(name, xen_base); } EXPORT_SYMBOL_GPL(remove_xen_proc_entry_uvp); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/xen_support.c000066400000000000000000000041171314037446600261040ustar00rootroot00000000000000/****************************************************************************** * support.c * Xen module support functions. * Copyright (C) 2004, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if defined (__ia64__) unsigned long __hypercall(unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long cmd) { unsigned long __res; __asm__ __volatile__ (";;\n" "mov r2=%1\n" "break 0x1000 ;;\n" "mov %0=r8 ;;\n" : "=r"(__res) : "r"(cmd) : "r2", "r8", "memory"); return __res; } EXPORT_SYMBOL(__hypercall); int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) { return xencomm_hypercall_grant_table_op(cmd, uop, count); } EXPORT_SYMBOL(HYPERVISOR_grant_table_op); /* without using balloon driver on PV-on-HVM for ia64 */ void balloon_update_driver_allowance_uvp_uvp(long delta) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance_uvp_uvp); void balloon_release_driver_page_uvp_uvp(struct page *page) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_release_driver_page_uvp_uvp); #endif /* __ia64__ */ void xen_machphys_update(unsigned long mfn, unsigned long pfn) { BUG(); } EXPORT_SYMBOL(xen_machphys_update); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/xenbus_backend_client.c000066400000000000000000000113351314037446600300270ustar00rootroot00000000000000/****************************************************************************** * Backend-client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code in the backend * driver. * * Copyright (C) 2005-2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /* Based on Rusty Russell's skeleton driver's map_page */ struct vm_struct *xenbus_map_ring_uvp_uvp_valloc_uvp(struct xenbus_device *dev, int gnt_ref) { struct gnttab_map_grant_ref op; struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE); if (!area) return ERR_PTR(-ENOMEM); gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map, gnt_ref, dev->otherend_id); do { if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); msleep(10); } while(op.status == GNTST_eagain); if (op.status != GNTST_okay) { free_vm_area(area); xenbus_dev_fatal_uvp(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); BUG_ON(!IS_ERR(ERR_PTR(op.status))); return ERR_PTR(op.status); } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; return area; } EXPORT_SYMBOL_GPL(xenbus_map_ring_uvp_uvp_valloc_uvp); int xenbus_map_ring_uvp_uvp(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, dev->otherend_id); do { if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); msleep(10); } while(op.status == GNTST_eagain); if (op.status != GNTST_okay) { xenbus_dev_fatal_uvp(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring_uvp_uvp); /* Based on Rusty Russell's skeleton driver's unmap_page */ int xenbus_unmap_ring_uvp_uvp_vfree_uvp(struct xenbus_device *dev, struct vm_struct *area) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map, (grant_handle_t)area->phys_addr); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) free_vm_area(area); else xenbus_dev_error_uvp(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_uvp_uvp_vfree_uvp); int xenbus_unmap_ring_uvp_uvp(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error_uvp(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_uvp_uvp); int xenbus_dev_is_online_uvp(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf_uvp(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online_uvp); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/xenbus_client.c000066400000000000000000000430241314037446600263600ustar00rootroot00000000000000/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #if defined(CONFIG_XEN_no) || defined(MODULE) #include #include #include #include #else #include #include #include #include #include #include #include #endif #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif const char *xenbus_strstate_uvp(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", [ XenbusStateReconfiguring ] = "Reconfiguring", [ XenbusStateReconfigured ] = "Reconfigured", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate_uvp); /** * xenbus_watch_path_uvp - register a watch * @dev: xenbus device * @path: path to watch * @watch: watch to register * @callback: callback to register * * Register a @watch on the given path, using the given xenbus_watch structure * for storage, and the given @callback function as the callback. Return 0 on * success, or -errno on error. On success, the given @path will be saved as * @watch->node, and remains the caller's to free. On error, @watch->node will * be NULL, the device will switch to %XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path_uvp(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; watch->callback = callback; err = register_xenbus_watch_uvp(watch); if (err) { watch->node = NULL; watch->callback = NULL; xenbus_dev_fatal_uvp(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path_uvp); #if defined(CONFIG_XEN_no) || defined(MODULE) int xenbus_watch_path_uvp2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2); if (!state) { xenbus_dev_fatal_uvp(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path_uvp(dev, state, watch, callback); if (err) kfree(state); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path_uvp2); #else /** * xenbus_watch_path_uvpfmt - register a watch on a sprintf-formatted path * @dev: xenbus device * @watch: watch to register * @callback: callback to register * @pathfmt: format of path to watch * * Register a watch on the given @path, using the given xenbus_watch * structure for storage, and the given @callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (@path/@path2) will be saved as @watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to %XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_path_uvpfmt(struct xenbus_device *dev, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...) { int err; va_list ap; char *path; va_start(ap, pathfmt); path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); va_end(ap); if (!path) { xenbus_dev_fatal_uvp(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path_uvp(dev, path, watch, callback); if (err) kfree(path); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path_uvpfmt); #endif /** * xenbus_switch_state_uvp * @dev: xenbus device * @state: new state * * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state_uvp(struct xenbus_device *dev, enum xenbus_state state) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not work inside a Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ int current_state; int err; if (state == dev->state) return 0; err = xenbus_scanf_uvp(XBT_NIL, dev->nodename, "state", "%d", ¤t_state); if (err != 1) return 0; err = xenbus_printf_uvp(XBT_NIL, dev->nodename, "state", "%d", state); if (err) { if (state != XenbusStateClosing) /* Avoid looping */ xenbus_dev_fatal_uvp(dev, err, "writing new state"); return err; } dev->state = state; return 0; } EXPORT_SYMBOL_GPL(xenbus_switch_state_uvp); int xenbus_frontend_closed_uvp(struct xenbus_device *dev) { xenbus_switch_state_uvp(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed_uvp); /** * Return the path to the error node for the given device, or NULL on failure. * If the value returned is non-NULL, then it is the caller's to kfree. */ static char *error_path(struct xenbus_device *dev) { return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); } static void _dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list ap) { int ret; unsigned int len; char *printf_buffer = NULL, *path_buffer = NULL; #define PRINTF_BUFFER_SIZE 4096 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); if (printf_buffer == NULL) goto fail; len = sprintf(printf_buffer, "%i ", -err); ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = error_path(dev); if (path_buffer == NULL) { dev_err(&dev->dev, "xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } if (xenbus_write_uvp(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { dev_err(&dev->dev, "xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } fail: kfree(printf_buffer); kfree(path_buffer); } /** * xenbus_dev_error_uvp * @dev: xenbus device * @err: error to report * @fmt: error message format * * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error_uvp(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error_uvp); /** * xenbus_dev_fatal_uvp * @dev: xenbus device * @err: error to report * @fmt: error message format * * Equivalent to xenbus_dev_error_uvp(dev, err, fmt, args), followed by * xenbus_switch_state_uvp(dev, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal_uvp(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); xenbus_switch_state_uvp(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal_uvp); /** * xenbus_grant_ring_uvp * @dev: xenbus device * @ring_mfn: mfn of ring to grant * * Grant access to the given @ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring_uvp(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access_uvp(dev->otherend_id, ring_mfn, 0); if (err < 0) xenbus_dev_fatal_uvp(dev, err, "granting access to ring page"); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring_uvp); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn_uvp(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal_uvp(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn_uvp); #if 0 /* !defined(CONFIG_XEN) && !defined(MODULE) */ /** * Bind to an existing interdomain event channel in another domain. Returns 0 * on success and stores the local port in *port. On error, returns -errno, * switches the device to XenbusStateClosing, and saves the error in XenStore. */ int xenbus_bind_evtchn_uvp(struct xenbus_device *dev, int remote_port, int *port) { struct evtchn_bind_interdomain bind_interdomain; int err; bind_interdomain.remote_dom = dev->otherend_id; bind_interdomain.remote_port = remote_port; err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); if (err) xenbus_dev_fatal_uvp(dev, err, "binding to event channel %d from domain %d", remote_port, dev->otherend_id); else *port = bind_interdomain.local_port; return err; } EXPORT_SYMBOL_GPL(xenbus_bind_evtchn_uvp); #endif /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn_uvp(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error_uvp(dev, err, "freeing event channel %d", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn_uvp); #if 0 /* !defined(CONFIG_XEN) && !defined(MODULE) */ /** * xenbus_map_ring_uvp_uvp_valloc_uvp * @dev: xenbus device * @gnt_ref: grant reference * @vaddr: pointer to address to be filled out by mapping * * Based on Rusty Russell's skeleton driver's map_page. * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_uvp_uvp_valloc_uvp allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring_uvp_uvp_valloc_uvp(struct xenbus_device *dev, int gnt_ref, void **vaddr) { struct gnttab_map_grant_ref op = { .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; struct vm_struct *area; *vaddr = NULL; area = xen_alloc_vm_area(PAGE_SIZE); if (!area) return -ENOMEM; op.host_addr = (unsigned long)area->addr; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xen_free_vm_area(area); xenbus_dev_fatal_uvp(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); return op.status; } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; *vaddr = area->addr; return 0; } EXPORT_SYMBOL_GPL(xenbus_map_ring_uvp_uvp_valloc_uvp); /** * xenbus_map_ring_uvp_uvp * @dev: xenbus device * @gnt_ref: grant reference * @handle: pointer to grant handle to be filled * @vaddr: address to be mapped to * * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_uvp_uvp does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring_uvp_uvp(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op = { .host_addr = (unsigned long)vaddr, .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xenbus_dev_fatal_uvp(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring_uvp_uvp); /** * xenbus_unmap_ring_uvp_uvp_vfree_uvp * @dev: xenbus device * @vaddr: addr to unmap * * Based on Rusty Russell's skeleton driver's unmap_page. * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_uvp_uvp_vfree_uvp if you mapped in your memory with * xenbus_map_ring_uvp_uvp_valloc_uvp (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_uvp_uvp_vfree_uvp(struct xenbus_device *dev, void *vaddr) { struct vm_struct *area; struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, }; /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) * method so that we don't have to muck with vmalloc internals here. * We could force the user to hang on to their struct vm_struct from * xenbus_map_ring_uvp_uvp_valloc_uvp, but these 6 lines considerably simplify * this API. */ read_lock(&vmlist_lock); for (area = vmlist; area != NULL; area = area->next) { if (area->addr == vaddr) break; } read_unlock(&vmlist_lock); if (!area) { xenbus_dev_error_uvp(dev, -ENOENT, "can't find mapped virtual address %p", vaddr); return GNTST_bad_virt_addr; } op.handle = (grant_handle_t)area->phys_addr; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) xen_free_vm_area(area); else xenbus_dev_error_uvp(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_uvp_uvp_vfree_uvp); /** * xenbus_unmap_ring_uvp_uvp * @dev: xenbus device * @handle: grant handle * @vaddr: addr to unmap * * Unmap a page of memory in this domain that was imported from another domain. * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_uvp_uvp(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, .handle = handle, }; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error_uvp(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_uvp_uvp); #endif /** * xenbus_read_uvp_driver_state * @path: path for driver * * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_uvp_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather_uvp(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_uvp_driver_state); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/xenbus_comms.c000066400000000000000000000157531314037446600262300ustar00rootroot00000000000000/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #if defined(CONFIG_XEN_no) || defined(MODULE) #include #include #else #include #include #include #endif #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_irq; extern void xenbus_probe(struct work_struct *); static DECLARE_WORK(probe_work, xenbus_probe); static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); static irqreturn_t wake_waiting(int irq, void *unused) { int old, new; old = atomic_read(&xenbus_xsd_state); switch (old) { case XENBUS_XSD_UNCOMMITTED: BUG(); return IRQ_HANDLED; case XENBUS_XSD_FOREIGN_INIT: new = XENBUS_XSD_FOREIGN_READY; break; case XENBUS_XSD_LOCAL_INIT: new = XENBUS_XSD_LOCAL_READY; break; case XENBUS_XSD_FOREIGN_READY: case XENBUS_XSD_LOCAL_READY: default: goto wake; } old = atomic_cmpxchg(&xenbus_xsd_state, old, new); if (old != new) schedule_work(&probe_work); wake: wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } /** * xb_write - low level write * @data: buffer to send * @len: length of buffer * * Returns 0 on success, error otherwise. */ int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { return wait_event_interruptible(xb_waitq, xb_data_to_read()); } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /** * xb_init_comms - Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; int err; if (intf->req_prod != intf->req_cons) printk(KERN_ERR "XENBUS request ring is not quiescent " "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { printk(KERN_WARNING "XENBUS response ring is not quiescent " "(%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); intf->rsp_cons = intf->rsp_prod; } #if defined(CONFIG_XEN_no) || defined(MODULE) if (xenbus_irq) unbind_from_irqhandler_uvp(xenbus_irq, &xb_waitq); err = bind_caller_port_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; #else if (xenbus_irq) { /* Already have an irq; assume we're resuming */ rebind_evtchn_irq(xen_store_evtchn, xenbus_irq); } else { err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; } #endif return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/xenbus_comms.h000066400000000000000000000043301314037446600262220ustar00rootroot00000000000000/* * Private include for xenbus communications. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_COMMS_H #define _XENBUS_COMMS_H int xs_init(void); int xb_init_comms(void); /* Low level routines. */ int xb_write(const void *data, unsigned len); int xb_read(void *data, unsigned len); int xb_data_to_read(void); int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; /* For xenbus internal use. */ enum { XENBUS_XSD_UNCOMMITTED = 0, XENBUS_XSD_FOREIGN_INIT, XENBUS_XSD_FOREIGN_READY, XENBUS_XSD_LOCAL_INIT, XENBUS_XSD_LOCAL_READY, }; extern atomic_t xenbus_xsd_state; static inline int is_xenstored_ready(void) { int s = atomic_read(&xenbus_xsd_state); return s == XENBUS_XSD_FOREIGN_READY || s == XENBUS_XSD_LOCAL_READY; } #endif /* _XENBUS_COMMS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/xenbus_dev.c000066400000000000000000000253761314037446600256720ustar00rootroot00000000000000/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include #include #include #include //#include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #include struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[PAGE_SIZE]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static struct proc_dir_entry *xenbus_dev_intf; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; if (!is_xenstored_ready()) return -ENODEV; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static void queue_reply(struct xenbus_dev_data *u, char *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); BUG_ON(rb == NULL); rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, &u->read_buffers); wake_up(&u->read_waitq); } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int path_len, tok_len, body_len, data_len = 0; path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr)); queue_reply(adap->dev_data, (char *)path, path_len); queue_reply(adap->dev_data, (char *)token, tok_len); if (len > 2) queue_reply(adap->dev_data, (char *)vec[2], data_len); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply; char *path, *token; struct watch_adapter *watch, *tmp_watch; int err, rc = len; if (!is_xenstored_ready()) return -ENODEV; if ((len + u->len) > sizeof(u->u.buffer)) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: { static const char *XS_RESP = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); watch->watch.node = kmalloc(strlen(path)+1, GFP_KERNEL); strcpy((char *)watch->watch.node, path); watch->watch.callback = watch_fired; watch->token = kmalloc(strlen(token)+1, GFP_KERNEL); strcpy(watch->token, token); watch->dev_data = u; err = register_xenbus_watch_uvp(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch_uvp_uvp(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = strlen(XS_RESP) + 1; mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&hdr, sizeof(hdr)); queue_reply(u, (char *)XS_RESP, hdr.len); mutex_unlock(&u->reply_mutex); break; } default: if (msg_type == XS_TRANSACTION_START) { trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } } reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; BUG_ON(&trans->list == &u->transactions); list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg)); queue_reply(u, (char *)reply, u->u.msg.len); mutex_unlock(&u->reply_mutex); kfree(reply); break; } out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end_uvp(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch_uvp_uvp(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; if (!is_xenstored_ready()) return -ENODEV; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } #ifdef HAVE_UNLOCKED_IOCTL static long xenbus_dev_ioctl(struct file *file, unsigned int cmd, unsigned long data) { extern int xenbus_conn(domid_t remote_dom, int *grant_ref, evtchn_port_t *local_port); void __user *udata = (void __user *) data; int ret = -ENOTTY; if (!is_initial_xendomain()) return -ENODEV; switch (cmd) { case IOCTL_XENBUS_ALLOC: { xenbus_alloc_t xa; int old; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_FOREIGN_INIT); if (old != XENBUS_XSD_UNCOMMITTED) return -EBUSY; if (copy_from_user(&xa, udata, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } ret = xenbus_conn(xa.dom, &xa.grant_ref, &xa.port); if (ret != 0) { atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } if (copy_to_user(udata, &xa, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } } break; default: break; } return ret; } #endif static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .poll = xenbus_dev_poll, #ifdef HAVE_UNLOCKED_IOCTL .unlocked_ioctl = xenbus_dev_ioctl #endif }; int xenbus_dev_init(void) { xenbus_dev_intf = create_xen_proc_entry_uvp("xenbus", 0400); if (xenbus_dev_intf) xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops; return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/xenbus_probe.c000066400000000000000000001033661314037446600262170ustar00rootroot00000000000000/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_XEN_no) || defined(MODULE) #include #include #include #include #include #include #ifdef MODULE #include #endif #else #include #include #include #include #endif #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif int xen_store_evtchn; #if !defined(CONFIG_XEN_no) && !defined(MODULE) EXPORT_SYMBOL(xen_store_evtchn); #endif struct xenstore_domain_interface *xen_store_interface; static unsigned long xen_store_mfn; extern struct mutex xenwatch_mutex; static BLOCKING_NOTIFIER_HEAD(xenstore_chain); static void wait_for_devices(struct xenbus_driver *xendrv); static int xenbus_probe_frontend(const char *type, const char *name); static void xenbus_dev_shutdown(struct device *_dev); #if !defined(CONFIG_XEN_no) && !defined(MODULE) static int xenbus_dev_suspend(struct device *dev, pm_message_t state); static int xenbus_dev_resume(struct device *dev); #endif /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } /* device// => - */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); if (!strchr(bus_id, '/')) { printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch_uvp_uvp(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather_uvp(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal_uvp(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists_uvp(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal_uvp(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } static int read_backend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "backend-id", "backend"); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ #if defined(CONFIG_XEN_no) || defined(MODULE) add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); #endif add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype); return 0; } #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) static struct device_attribute xenbus_dev_attrs[] = { __ATTR_NULL }; #endif /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { .name = "xen", .match = xenbus_match, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_frontend, #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) .dev_attrs = xenbus_dev_attrs, #endif #if !defined(CONFIG_XEN_no) && !defined(MODULE) .suspend = xenbus_dev_suspend, .resume = xenbus_dev_resume, #endif }, #if defined(CONFIG_XEN_no) || defined(MODULE) .dev = { .init_name = "xen", }, #endif }; static void otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]); return; } state = xenbus_read_uvp_driver_state(dev->otherend); dev_dbg(&dev->dev, "state is %d (%s), %s, %s", state, xenbus_strstate_uvp(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { struct xen_bus_type *bus = bus; bus = container_of(dev->dev.bus, struct xen_bus_type, bus); /* If we're frontend, drive the state machine to Closed. */ /* This should cause the backend to release our resources. */ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) xenbus_frontend_closed_uvp(dev); return; } #endif if (drv->otherend_changed) drv->otherend_changed(dev, state); } static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { #if defined(CONFIG_XEN_no) || defined(MODULE) return xenbus_watch_path_uvp2(dev, dev->otherend, "state", &dev->otherend_watch, otherend_changed); #else return xenbus_watch_path_uvpfmt(dev, &dev->otherend_watch, otherend_changed, "%s/%s", dev->otherend, "state"); #endif } int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error_uvp(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state_uvp(dev, XenbusStateClosed); return -ENODEV; } int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); free_otherend_details(dev); if (drv->remove) drv->remove(dev); xenbus_switch_state_uvp(dev, XenbusStateClosed); return 0; } static void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); /* Commented out since xenstored stubdom is now minios based not linux based #define XENSTORE_DOMAIN_SHARES_THIS_KERNEL */ #ifndef XENSTORE_DOMAIN_SHARES_THIS_KERNEL if (is_initial_xendomain()) #endif return; get_device(&dev->dev); if (dev->state != XenbusStateConnected) { dev_info(&dev->dev, "%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate_uvp(dev->state)); goto out; } xenbus_switch_state_uvp(dev, XenbusStateClosing); if (!strcmp(dev->devicetype, "vfb")) goto out; timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) dev_info(&dev->dev, "%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); } int xenbus_register_driver_common2(struct xenbus_driver *drv, struct xen_bus_type *bus, struct module *owner, const char *mod_name) { int ret; if (bus->error) return bus->error; drv->driver.name = drv->name; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) drv->driver.owner = owner; #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) drv->driver.mod_name = mod_name; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; drv->driver.remove = xenbus_dev_remove; drv->driver.shutdown = xenbus_dev_shutdown; #endif mutex_lock(&xenwatch_mutex); ret = driver_register(&drv->driver); mutex_unlock(&xenwatch_mutex); return ret; } int __xenbus_register_frontend_uvp(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common2(drv, &xenbus_frontend, owner, mod_name); if (ret) return ret; // If this driver is loaded as a module wait for devices to attach. wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(__xenbus_register_frontend_uvp); void xenbus_unregister_driver_uvp(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver_uvp); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t xendev_show_nodename(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); static ssize_t xendev_show_devtype(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); static ssize_t xendev_show_modalias(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_uvp_driver_state(nodename); if (bus->error) return bus->error; if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); #if defined(CONFIG_XEN_no) || defined(MODULE) xendev->dev.parent = &bus->dev; #endif xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) { char devname[XEN_BUS_ID_SIZE]; err = bus->get_bus_id(devname, xendev->nodename); if (!err) dev_set_name(&xendev->dev, devname); } #else err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); #endif if (err) goto fail; /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) goto fail_unregister; err = device_create_file(&xendev->dev, &dev_attr_devtype); if (err) goto fail_remove_nodename; err = device_create_file(&xendev->dev, &dev_attr_modalias); if (err) goto fail_remove_devtype; return 0; fail_remove_devtype: device_remove_file(&xendev->dev, &dev_attr_devtype); fail_remove_nodename: device_remove_file(&xendev->dev, &dev_attr_nodename); fail_unregister: device_unregister(&xendev->dev); fail: kfree(xendev); return err; } /* device// */ static int xenbus_probe_frontend(const char *type, const char *name) { char *nodename; int err; if (!strcmp(type, "console")) return 0; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(&xenbus_frontend, type, nodename); kfree(nodename); return err; } static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory_uvp(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n; if (bus->error) return bus->error; dir = xenbus_directory_uvp(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void xenbus_dev_changed_uvp(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[XEN_BUS_ID_SIZE]; const char *p, *root; if (bus->error || char_count(node, '/') < 2) return; exists = xenbus_exists_uvp(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend//... or device//... */ p = strchr(node, '/') + 1; snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[XEN_BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } #if !defined(CONFIG_XEN_no) && !defined(MODULE) EXPORT_SYMBOL_GPL(xenbus_dev_changed_uvp); #endif static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed_uvp(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; #if !defined(CONFIG_XEN_no) && !defined(MODULE) static int xenbus_dev_suspend(struct device *dev, pm_message_t state) #else static int suspend_dev(struct device *dev, void *data) #endif { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend) #if !defined(CONFIG_XEN_no) && !defined(MODULE) err = drv->suspend(xdev, state); #else err = drv->suspend(xdev); #endif if (err) printk(KERN_WARNING "xenbus: suspend %s failed: %i\n", dev_name(dev), err); return 0; } #if defined(CONFIG_XEN_no) || defined(MODULE) static int suspend_cancel_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend_cancel) err = drv->suspend_cancel(xdev); if (err) printk(KERN_WARNING "xenbus: suspend_cancel %s failed: %i\n", dev_name(dev), err); return 0; } #endif #if !defined(CONFIG_XEN_no) && !defined(MODULE) static int xenbus_dev_resume(struct device *dev) #else static int resume_dev(struct device *dev, void *data) #endif { int err; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); err = talk_to_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus: resume (talk_to_otherend) %s failed: %i\n", dev_name(dev), err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { printk(KERN_WARNING "xenbus: resume %s failed: %i\n", dev_name(dev), err); return err; } } err = watch_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus_probe: resume (watch_otherend) %s failed: " "%d.\n", dev_name(dev), err); return err; } return 0; } #if defined(CONFIG_XEN_no) || defined(MODULE) void xenbus_suspend_uvp(void) { DPRINTK(""); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); xenbus_backend_suspend(suspend_dev); xs_suspend(); } EXPORT_SYMBOL_GPL(xenbus_suspend_uvp); void xen_unplug_emulated_devices(void); void xenbus_resume_uvp(void) { xb_init_comms(); xs_resume(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); xenbus_backend_resume(resume_dev); xen_unplug_emulated_devices(); } EXPORT_SYMBOL_GPL(xenbus_resume_uvp); void xenbus_suspend_uvp_cancel(void) { xs_suspend_cancel(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); xenbus_backend_resume(suspend_cancel_dev); } EXPORT_SYMBOL_GPL(xenbus_suspend_uvp_cancel); #endif /* A flag to determine if xenstored is 'ready' (i.e. has started) */ atomic_t xenbus_xsd_state = ATOMIC_INIT(XENBUS_XSD_UNCOMMITTED); int register_xenstore_notifier_uvp(struct notifier_block *nb) { int ret = 0; if (is_xenstored_ready()) ret = nb->notifier_call(nb, 0, NULL); else blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } EXPORT_SYMBOL_GPL(register_xenstore_notifier_uvp); void unregister_xenstore_notifier_uvp_uvp(struct notifier_block *nb) { blocking_notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier_uvp_uvp); void xenbus_probe(struct work_struct *unused) { BUG_ON(!is_xenstored_ready()); /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch_uvp(&fe_watch); xenbus_backend_probe_and_watch(); /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) static struct file_operations xsd_kva_fops; static struct proc_dir_entry *xsd_kva_intf; static struct proc_dir_entry *xsd_port_intf; static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; int old; int rc; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_LOCAL_INIT); switch (old) { case XENBUS_XSD_UNCOMMITTED: rc = xb_init_comms(); if (rc != 0) return rc; break; case XENBUS_XSD_FOREIGN_INIT: case XENBUS_XSD_FOREIGN_READY: return -EBUSY; case XENBUS_XSD_LOCAL_INIT: case XENBUS_XSD_LOCAL_READY: default: break; } if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static int xsd_kva_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "0x%p", xen_store_interface); *eof = 1; return len; } static int xsd_port_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "%d", xen_store_evtchn); *eof = 1; return len; } #endif #if defined(CONFIG_XEN_no) || defined(MODULE) static int xb_free_port(evtchn_port_t port) { struct evtchn_close close; close.port = port; return HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); } int xenbus_conn(domid_t remote_dom, unsigned long *grant_ref, evtchn_port_t *local_port) { struct evtchn_alloc_unbound alloc_unbound; int rc, rc2; BUG_ON(atomic_read(&xenbus_xsd_state) != XENBUS_XSD_FOREIGN_INIT); BUG_ON(!is_initial_xendomain()); #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) remove_xen_proc_entry_uvp("xsd_kva"); remove_xen_proc_entry_uvp("xsd_port"); #endif rc = xb_free_port(xen_store_evtchn); if (rc != 0) goto fail0; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_dom; rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (rc != 0) goto fail0; *local_port = xen_store_evtchn = alloc_unbound.port; /* keep the old page (xen_store_mfn, xen_store_interface) */ rc = gnttab_grant_foreign_access_uvp(remote_dom, xen_store_mfn, GTF_permit_access); if (rc < 0) goto fail1; *grant_ref = rc; rc = xb_init_comms(); if (rc != 0) goto fail1; return 0; fail1: rc2 = xb_free_port(xen_store_evtchn); if (rc2 != 0) printk(KERN_WARNING "XENBUS: Error freeing xenstore event channel: %d\n", rc2); fail0: xen_store_evtchn = -1; return rc; } #endif #ifndef MODULE static int __init xenbus_probe_init(void) #else static int __devinit xenbus_probe_init(void) #endif { int err = 0; #if defined(CONFIG_XEN_no) || defined(MODULE) unsigned long page = 0; #endif DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) printk(KERN_WARNING "XENBUS: Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { #if defined(CONFIG_XEN_no) || defined(MODULE) struct evtchn_alloc_unbound alloc_unbound; /* Allocate page. */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOMID_SELF; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ xsd_kva_intf = create_xen_proc_entry_uvp("xsd_kva", 0600); if (xsd_kva_intf) { memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, sizeof(xsd_kva_fops)); xsd_kva_fops.mmap = xsd_kva_mmap; xsd_kva_intf->proc_fops = &xsd_kva_fops; xsd_kva_intf->read_proc = xsd_kva_read; } xsd_port_intf = create_xen_proc_entry_uvp("xsd_port", 0400); if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif #else /* dom0 not yet supported */ #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else { atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY); #ifndef MODULE xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); #else xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN); xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN); xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); #endif /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) goto err; } #if defined(CONFIG_XEN_no) || defined(MODULE) xenbus_dev_init(); #endif /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); goto err; } #if defined(CONFIG_XEN_no) || defined(MODULE) /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); if (xenbus_frontend.error) { bus_unregister(&xenbus_frontend.bus); printk(KERN_WARNING "XENBUS: Error registering frontend device: %i\n", xenbus_frontend.error); } } #endif xenbus_backend_device_register(); if (!is_initial_xendomain()) xenbus_probe(NULL); #if defined(CONFIG_XEN_COMPAT_XENFS_uvp) && !defined(MODULE) /* * Create xenfs mountpoint in /proc for compatibility with * utilities that expect to find "xenbus" under "/proc/xen". */ proc_mkdir("xen", NULL); #endif return 0; err: #if defined(CONFIG_XEN_no) || defined(MODULE) if (page) free_page(page); #endif /* * Do not unregister the xenbus front/backend buses here. The buses * must exist because front/backend drivers will use them when they are * registered. */ return err; } #ifndef MODULE postcore_initcall(xenbus_probe_init); #ifdef CONFIG_XEN_no MODULE_LICENSE("Dual BSD/GPL"); #else MODULE_LICENSE("GPL"); #endif #else int __devinit xenbus_init(void) { return xenbus_probe_init(); } #endif static int is_device_connecting(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_connecting_device(struct device_driver *drv) { if (xenbus_frontend.error) return xenbus_frontend.error; return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", xendev->nodename); return 0; } if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_uvp_driver_state(xendev->otherend); printk(KERN_WARNING "XENBUS: Timeout connecting " "to device: %s (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } xendrv = to_xenbus_driver(dev->driver); if (xendrv->is_ready && !xendrv->is_ready(xendev)) printk(KERN_WARNING "XENBUS: Device not ready: %s\n", xendev->nodename); return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; } if (!ready_to_wait_for_devices || !is_running_on_xen()) return; while (exists_connecting_device(drv)) { if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) printk(KERN_WARNING "XENBUS: Waiting for " "devices to initialise: "); seconds_waited += 5; printk("%us...", 150 - seconds_waited); if (seconds_waited == 150) break; } schedule_timeout_interruptible(HZ/10); } if (seconds_waited) printk("seconds_waited\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } #ifndef MODULE static int __init boot_wait_for_devices(void) { if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; wait_for_devices(NULL); } return 0; } late_initcall(boot_wait_for_devices); #endif int xenbus_for_each_frontend_uvp(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_frontend_uvp); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/xenbus_probe.h000066400000000000000000000071771314037446600262270ustar00rootroot00000000000000/****************************************************************************** * xenbus_probe.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_PROBE_H #define _XENBUS_PROBE_H #ifndef BUS_ID_SIZE #define XEN_BUS_ID_SIZE 20 #else #define XEN_BUS_ID_SIZE BUS_ID_SIZE #endif #ifdef CONFIG_PARAVIRT_XEN #define is_running_on_xen() xen_domain() #define is_initial_xendomain() xen_initial_domain() #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) #define dev_name(dev) ((dev)->bus_id) #endif #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); extern void xenbus_backend_probe_and_watch(void); extern void xenbus_backend_bus_register(void); extern void xenbus_backend_device_register(void); #else static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_probe_and_watch(void) {} static inline void xenbus_backend_bus_register(void) {} static inline void xenbus_backend_device_register(void) {} #endif struct xen_bus_type { char *root; int error; unsigned int levels; int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); int (*probe)(const char *type, const char *dir); struct bus_type bus; #if defined(CONFIG_XEN_no) || defined(MODULE) struct device dev; #endif }; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus, struct module *owner, const char *mod_name); int __xenbus_register_frontend_uvp(struct xenbus_driver *drv, struct module *owner, const char *mod_name); extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); extern int xenbus_probe_devices(struct xen_bus_type *bus); extern void xenbus_dev_changed_uvp(const char *node, struct xen_bus_type *bus); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/xenbus_probe_backend.c000066400000000000000000000170001314037446600276530ustar00rootroot00000000000000/****************************************************************************** * Talks to Xen Store to figure out what devices we have (backend half). * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env); static int xenbus_probe_backend(const char *type, const char *domid); extern int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node); static int read_frontend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "frontend-id", "frontend"); } /* backend/// => -- */ static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { int domid, err; const char *devid, *type, *frontend; unsigned int typelen; type = strchr(nodename, '/'); if (!type) return -EINVAL; type++; typelen = strcspn(type, "/"); if (!typelen || type[typelen] != '/') return -EINVAL; devid = strrchr(nodename, '/') + 1; err = xenbus_gather_uvp(XBT_NIL, nodename, "frontend-id", "%i", &domid, "frontend", NULL, &frontend, NULL); if (err) return err; if (strlen(frontend) == 0) err = -ERANGE; if (!err && !xenbus_exists_uvp(XBT_NIL, frontend, "")) err = -ENOENT; kfree(frontend); if (err) return err; if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s", typelen, type, domid, devid) >= XEN_BUS_ID_SIZE) return -ENOSPC; return 0; } static struct device_attribute xenbus_backend_attrs[] = { __ATTR_NULL }; static struct xen_bus_type xenbus_backend = { .root = "backend", .levels = 3, /* backend/type// */ .get_bus_id = backend_bus_id, .probe = xenbus_probe_backend, .error = -ENODEV, .bus = { .name = "xen-backend", .match = xenbus_match, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, // .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_backend, .dev_attrs = xenbus_backend_attrs, }, .dev = { .init_name = "xen-backend", }, }; static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; struct xenbus_driver *drv; DPRINTK(""); if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); add_uevent_var(env, "XENBUS_BASE_PATH=%s", xenbus_backend.root); if (dev->driver) { drv = to_xenbus_driver(dev->driver); if (drv && drv->uevent) return drv->uevent(xdev, env); } return 0; } int __xenbus_register_backend_uvp(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { drv->read_otherend_details = read_frontend_details; return xenbus_register_driver_common(drv, &xenbus_backend, owner, mod_name); } EXPORT_SYMBOL_GPL(__xenbus_register_backend_uvp); /* backend/// */ static int xenbus_probe_backend_unit(const char *dir, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); if (!nodename) return -ENOMEM; DPRINTK("%s\n", nodename); err = xenbus_probe_node(&xenbus_backend, type, nodename); kfree(nodename); return err; } /* backend// */ static int xenbus_probe_backend(const char *type, const char *domid) { char *nodename; int err = 0; char **dir; unsigned int i, dir_n = 0; DPRINTK(""); nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid); if (!nodename) return -ENOMEM; dir = xenbus_directory_uvp(XBT_NIL, nodename, "", &dir_n); if (IS_ERR(dir)) { kfree(nodename); return PTR_ERR(dir); } for (i = 0; i < dir_n; i++) { err = xenbus_probe_backend_unit(nodename, type, dir[i]); if (err) break; } kfree(dir); kfree(nodename); return err; } static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed_uvp(vec[XS_WATCH_PATH], &xenbus_backend); } static struct xenbus_watch be_watch = { .node = "backend", .callback = backend_changed, }; void xenbus_backend_suspend(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_resume(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_probe_and_watch(void) { xenbus_probe_devices(&xenbus_backend); register_xenbus_watch_uvp(&be_watch); } void xenbus_backend_bus_register(void) { xenbus_backend.error = bus_register(&xenbus_backend.bus); if (xenbus_backend.error) printk(KERN_WARNING "XENBUS: Error registering backend bus: %i\n", xenbus_backend.error); } void xenbus_backend_device_register(void) { if (xenbus_backend.error) return; xenbus_backend.error = device_register(&xenbus_backend.dev); if (xenbus_backend.error) { bus_unregister(&xenbus_backend.bus); printk(KERN_WARNING "XENBUS: Error registering backend device: %i\n", xenbus_backend.error); } } int xenbus_for_each_backend_uvp(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_backend_uvp); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-platform-pci/xenbus_xs.c000066400000000000000000000541211314037446600255340ustar00rootroot00000000000000/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. * * transaction_mutex must be held before incrementing * transaction_count. The mutex is held when a suspend is in * progress to prevent new transactions starting. * * When decrementing transaction_count to zero the wait queue * should be woken up, the suspend code waits for count to * reach zero. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct mutex transaction_mutex; atomic_t transaction_count; wait_queue_head_t transaction_wq; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { printk(KERN_WARNING "XENBUS xen store gave: unknown error %s", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } static void transaction_start(void) { mutex_lock(&xs_state.transaction_mutex); atomic_inc(&xs_state.transaction_count); mutex_unlock(&xs_state.transaction_mutex); } static void transaction_end(void) { if (atomic_dec_and_test(&xs_state.transaction_count)) wake_up(&xs_state.transaction_wq); } static void transaction_suspend(void) { mutex_lock(&xs_state.transaction_mutex); wait_event(xs_state.transaction_wq, atomic_read(&xs_state.transaction_count) == 0); } static void transaction_resume(void) { mutex_unlock(&xs_state.transaction_mutex); } void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; struct xsd_sockmsg req_msg = *msg; int err; if (req_msg.type == XS_TRANSACTION_START) transaction_start(); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((req_msg.type == XS_TRANSACTION_END) || ((req_msg.type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) transaction_end(); return ret; } #if !defined(CONFIG_XEN_no) && !defined(MODULE) EXPORT_SYMBOL(xenbus_dev_request_and_reply); #endif /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { if (printk_ratelimit()) printk(KERN_WARNING "XENBUS unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len) + 1; /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; ret[*num] = strings + len; return ret; } char **xenbus_directory_uvp(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory_uvp); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists_uvp(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory_uvp(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists_uvp); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read_uvp(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read_uvp); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write_uvp(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write_uvp); /* Create a new directory. */ int xenbus_mkdir_uvp(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir_uvp); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm_uvp(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm_uvp); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start_uvp(struct xenbus_transaction *t) { char *id_str; transaction_start(); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { transaction_end(); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start_uvp); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end_uvp(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); transaction_end(); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end_uvp); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf_uvp(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read_uvp(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf_uvp); /* Single printf and write: returns -errno or 0. */ int xenbus_printf_uvp(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; #define PRINTF_BUFFER_SIZE 4096 char *printf_buffer; printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH); if (printf_buffer == NULL) return -ENOMEM; va_start(ap, fmt); ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap); va_end(ap); BUG_ON(ret > PRINTF_BUFFER_SIZE-1); ret = xenbus_write_uvp(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf_uvp); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather_uvp(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read_uvp(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather_uvp); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } /* Register callback to watch this node. */ int register_xenbus_watch_uvp(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); /* Ignore errors due to multiple registration. */ if ((err != 0) && (err != -EEXIST)) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch_uvp); void unregister_xenbus_watch_uvp_uvp(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; #if defined(CONFIG_XEN_no) || defined(MODULE) BUG_ON(watch->flags & XBWF_new_thread); #endif sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) printk(KERN_WARNING "XENBUS Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Make sure there are no callbacks running currently (unless its us) */ if (current->pid != xenwatch_pid) mutex_lock(&xenwatch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); if (current->pid != xenwatch_pid) mutex_unlock(&xenwatch_mutex); } EXPORT_SYMBOL_GPL(unregister_xenbus_watch_uvp_uvp); void xs_suspend(void) { transaction_suspend(); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; #if !defined(CONFIG_XEN_no) && !defined(MODULE) xb_init_comms(); #endif mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); transaction_resume(); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); mutex_unlock(&xs_state.transaction_mutex); } #if defined(CONFIG_XEN_no) || defined(MODULE) static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } #endif static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); #if defined(CONFIG_XEN_no) || defined(MODULE) /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch_uvp_uvp() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } #else msg->u.watch.handle->callback( msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); mutex_unlock(&xenwatch_mutex); kfree(msg->u.watch.vec); kfree(msg); #endif } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) printk(KERN_WARNING "XENBUS error %d while reading " "message\n", err); if (kthread_should_stop()) break; } return 0; } int xs_init(void) { struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); mutex_init(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); atomic_set(&xs_state.transaction_count, 0); init_waitqueue_head(&xs_state.transaction_wq); task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-scsi/000077500000000000000000000000001314037446600217135ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-scsi/Kbuild000066400000000000000000000001241314037446600230450ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-scsi.o xen-scsi-objs := scsifront.o xenbus.o UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-scsi/Makefile000066400000000000000000000000661314037446600233550ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-scsi/common.h000066400000000000000000000100541314037446600233540ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_SCSIFRONT_H__ #define __XEN_DRIVERS_SCSIFRONT_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define GRANT_INVALID_REF 0 #define VSCSI_IN_ABORT 1 #define VSCSI_IN_RESET 2 /* tuning point*/ #define VSCSIIF_DEFAULT_CMD_PER_LUN 10 #define VSCSIIF_MAX_TARGET 64 #define VSCSIIF_MAX_LUN 255 #define VSCSIIF_RING_SIZE \ __RING_SIZE((struct vscsiif_sring *)0, PAGE_SIZE) #define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE struct vscsifrnt_shadow { uint16_t next_free; /* command between backend and frontend * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */ unsigned char act; /* do reset function */ wait_queue_head_t wq_reset; /* reset work queue */ int wait_reset; /* reset work queue condition */ int32_t rslt_reset; /* reset response status */ /* (SUCESS or FAILED) */ /* for DMA_TO_DEVICE(1), DMA_FROM_DEVICE(2), DMA_NONE(3) requests */ unsigned int sc_data_direction; /* Number of pieces of scatter-gather */ unsigned int nr_segments; /* requested struct scsi_cmnd is stored from kernel */ unsigned long req_scsi_cmnd; int gref[VSCSIIF_SG_TABLESIZE]; }; struct vscsifrnt_info { struct xenbus_device *dev; struct Scsi_Host *host; spinlock_t io_lock; spinlock_t shadow_lock; unsigned int evtchn; unsigned int irq; grant_ref_t ring_ref; struct vscsiif_front_ring ring; struct vscsiif_response ring_res; struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS]; uint32_t shadow_free; struct task_struct *kthread; wait_queue_head_t wq; unsigned int waiting_resp; }; #define DPRINTK(_f, _a...) \ pr_debug("(file=%s, line=%d) " _f, \ __FILE__ , __LINE__ , ## _a ) int scsifront_xenbus_init(void); void scsifront_xenbus_unregister(void); int scsifront_schedule(void *data); irqreturn_t scsifront_intr(int irq, void *dev_id); int scsifront_cmd_done(struct vscsifrnt_info *info); #endif /* __XEN_DRIVERS_SCSIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-scsi/scsifront.c000066400000000000000000000265311314037446600241000ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "common.h" static int get_id_from_freelist(struct vscsifrnt_info *info) { unsigned long flags; uint32_t free; spin_lock_irqsave(&info->shadow_lock, flags); free = info->shadow_free; BUG_ON(free > VSCSIIF_MAX_REQS); info->shadow_free = info->shadow[free].next_free; info->shadow[free].next_free = 0x0fff; info->shadow[free].wait_reset = 0; spin_unlock_irqrestore(&info->shadow_lock, flags); return free; } static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id) { unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); info->shadow[id].next_free = info->shadow_free; info->shadow[id].req_scsi_cmnd = 0; info->shadow_free = id; spin_unlock_irqrestore(&info->shadow_lock, flags); } struct vscsiif_request * scsifront_pre_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); vscsiif_request_t *ring_req; uint32_t id; ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); ring->req_prod_pvt++; id = get_id_from_freelist(info); /* use id by response */ ring_req->rqid = (uint16_t)id; return ring_req; } static void scsifront_notify_work(struct vscsifrnt_info *info) { info->waiting_resp = 1; wake_up(&info->wq); } static void scsifront_do_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); unsigned int irq = info->irq; int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); if (notify) notify_remote_via_irq_uvp(irq); } irqreturn_t scsifront_intr(int irq, void *dev_id) { scsifront_notify_work((struct vscsifrnt_info *)dev_id); return IRQ_HANDLED; } static void scsifront_gnttab_done(struct vscsifrnt_shadow *s, uint32_t id) { int i; if (s->sc_data_direction == DMA_NONE) return; if (s->nr_segments) { for (i = 0; i < s->nr_segments; i++) { if (unlikely(gnttab_query_foreign_access_uvp( s->gref[i]) != 0)) { printk(KERN_ALERT "scsifront: " "grant still in use by backend.\n"); BUG(); } gnttab_end_foreign_access_uvp(s->gref[i], 0UL); } } return; } static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { struct scsi_cmnd *sc; uint32_t id; uint8_t sense_len; id = ring_res->rqid; sc = (struct scsi_cmnd *)info->shadow[id].req_scsi_cmnd; if (sc == NULL) BUG(); scsifront_gnttab_done(&info->shadow[id], id); add_id_to_freelist(info, id); sc->result = ring_res->rslt; scsi_set_resid(sc, ring_res->residual_len); if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE) sense_len = VSCSIIF_SENSE_BUFFERSIZE; else sense_len = ring_res->sense_len; if (sense_len) memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len); sc->scsi_done(sc); return; } static void scsifront_sync_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { uint16_t id = ring_res->rqid; unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); info->shadow[id].wait_reset = 1; info->shadow[id].rslt_reset = ring_res->rslt; spin_unlock_irqrestore(&info->shadow_lock, flags); wake_up(&(info->shadow[id].wq_reset)); } int scsifront_cmd_done(struct vscsifrnt_info *info) { vscsiif_response_t *ring_res; RING_IDX i, rp; int more_to_do = 0; unsigned long flags; spin_lock_irqsave(&info->io_lock, flags); rp = info->ring.sring->rsp_prod; rmb(); for (i = info->ring.rsp_cons; i != rp; i++) { ring_res = RING_GET_RESPONSE(&info->ring, i); if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB) scsifront_cdb_cmd_done(info, ring_res); else scsifront_sync_cmd_done(info, ring_res); } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); } else { info->ring.sring->rsp_event = i + 1; } spin_unlock_irqrestore(&info->io_lock, flags); /* Yield point for this unbounded loop. */ cond_resched(); return more_to_do; } int scsifront_schedule(void *data) { struct vscsifrnt_info *info = (struct vscsifrnt_info *)data; while (!kthread_should_stop()) { wait_event_interruptible( info->wq, info->waiting_resp || kthread_should_stop()); info->waiting_resp = 0; smp_mb(); if (scsifront_cmd_done(info)) info->waiting_resp = 1; } return 0; } static int map_data_for_request(struct vscsifrnt_info *info, struct scsi_cmnd *sc, vscsiif_request_t *ring_req, uint32_t id) { grant_ref_t gref_head; struct page *page; int err, ref, ref_cnt = 0; int write = (sc->sc_data_direction == DMA_TO_DEVICE); unsigned int i, nr_pages, off, len, bytes; unsigned long buffer_pfn; if (sc->sc_data_direction == DMA_NONE) return 0; err = gnttab_alloc_grant_references_uvp(VSCSIIF_SG_TABLESIZE, &gref_head); if (err) { printk(KERN_ERR "scsifront: gnttab_alloc_grant_references_uvp() error\n"); return -ENOMEM; } if (scsi_bufflen(sc)) { /* quoted scsi_lib.c/scsi_req_map_sg . */ struct scatterlist *sg, *sgl = scsi_sglist(sc); unsigned int data_len = scsi_bufflen(sc); nr_pages = (data_len + sgl->offset + PAGE_SIZE - 1) >> PAGE_SHIFT; if (nr_pages > VSCSIIF_SG_TABLESIZE) { printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n"); ref_cnt = (-E2BIG); goto big_to_sg; } for_each_sg (sgl, sg, scsi_sg_count(sc), i) { page = sg_page(sg); off = sg->offset; len = sg->length; buffer_pfn = page_to_phys(page) >> PAGE_SHIFT; while (len > 0 && data_len > 0) { /* * sg sends a scatterlist that is larger than * the data_len it wants transferred for certain * IO sizes */ bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); ref = gnttab_claim_grant_reference_uvp(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_uvp_ref(ref, info->dev->otherend_id, buffer_pfn, write); info->shadow[id].gref[ref_cnt] = ref; ring_req->seg[ref_cnt].gref = ref; ring_req->seg[ref_cnt].offset = (uint16_t)off; ring_req->seg[ref_cnt].length = (uint16_t)bytes; buffer_pfn++; len -= bytes; data_len -= bytes; off = 0; ref_cnt++; } } } big_to_sg: gnttab_free_grant_reference_uvps(gref_head); return ref_cnt; } static int scsifront_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { struct vscsifrnt_info *info = (struct vscsifrnt_info *) sc->device->host->hostdata; vscsiif_request_t *ring_req; int ref_cnt; uint16_t rqid; if (RING_FULL(&info->ring)) { goto out_host_busy; } sc->scsi_done = done; sc->result = 0; ring_req = scsifront_pre_request(info); rqid = ring_req->rqid; ring_req->act = VSCSIIF_ACT_SCSI_CDB; ring_req->id = sc->device->id; ring_req->lun = sc->device->lun; ring_req->channel = sc->device->channel; ring_req->cmd_len = sc->cmd_len; BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); if ( sc->cmd_len ) memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); else memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->timeout_per_command = (sc->request->timeout / HZ); info->shadow[rqid].req_scsi_cmnd = (unsigned long)sc; info->shadow[rqid].sc_data_direction = sc->sc_data_direction; info->shadow[rqid].act = ring_req->act; ref_cnt = map_data_for_request(info, sc, ring_req, rqid); if (ref_cnt < 0) { add_id_to_freelist(info, rqid); if (ref_cnt == (-ENOMEM)) goto out_host_busy; else { sc->result = (DID_ERROR << 16); goto out_fail_command; } } ring_req->nr_segments = (uint8_t)ref_cnt; info->shadow[rqid].nr_segments = ref_cnt; scsifront_do_request(info); return 0; out_host_busy: return SCSI_MLQUEUE_HOST_BUSY; out_fail_command: done(sc); return 0; } static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) { return (FAILED); } /* vscsi supports only device_reset, because it is each of LUNs */ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc) { struct Scsi_Host *host = sc->device->host; struct vscsifrnt_info *info = (struct vscsifrnt_info *) sc->device->host->hostdata; vscsiif_request_t *ring_req; uint16_t rqid; int err; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_lock_irq(host->host_lock); #endif ring_req = scsifront_pre_request(info); ring_req->act = VSCSIIF_ACT_SCSI_RESET; rqid = ring_req->rqid; info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET; ring_req->channel = sc->device->channel; ring_req->id = sc->device->id; ring_req->lun = sc->device->lun; ring_req->cmd_len = sc->cmd_len; if ( sc->cmd_len ) memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); else memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->timeout_per_command = (sc->request->timeout / HZ); ring_req->nr_segments = 0; scsifront_do_request(info); spin_unlock_irq(host->host_lock); wait_event_interruptible(info->shadow[rqid].wq_reset, info->shadow[rqid].wait_reset); spin_lock_irq(host->host_lock); err = info->shadow[rqid].rslt_reset; add_id_to_freelist(info, rqid); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_unlock_irq(host->host_lock); #endif return (err); } struct scsi_host_template scsifront_sht = { .module = THIS_MODULE, .name = "Xen SCSI frontend driver", .queuecommand = scsifront_queuecommand, .eh_abort_handler = scsifront_eh_abort_handler, .eh_device_reset_handler= scsifront_dev_reset_handler, .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN, .can_queue = VSCSIIF_MAX_REQS, .this_id = -1, .sg_tablesize = VSCSIIF_SG_TABLESIZE, .use_clustering = DISABLE_CLUSTERING, .proc_name = "scsifront", }; static int __init scsifront_init(void) { int err; if (!is_running_on_xen()) return -ENODEV; err = scsifront_xenbus_init(); return err; } static void __exit scsifront_exit(void) { scsifront_xenbus_unregister(); } module_init(scsifront_init); module_exit(scsifront_exit); MODULE_DESCRIPTION("Xen SCSI frontend driver"); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-scsi/xenbus.c000066400000000000000000000242051314037446600233660ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "common.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) #define DEFAULT_TASK_COMM_LEN 16 #else #define DEFAULT_TASK_COMM_LEN TASK_COMM_LEN #endif extern struct scsi_host_template scsifront_sht; static void scsifront_free(struct vscsifrnt_info *info) { struct Scsi_Host *host = info->host; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) if (host->shost_state != SHOST_DEL) { #else if (!test_bit(SHOST_DEL, &host->shost_state)) { #endif scsi_remove_host(info->host); } if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access_uvp(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler_uvp(info->irq, info); info->irq = 0; scsi_host_put(info->host); } static int scsifront_alloc_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct vscsiif_sring *sring; int err = -ENOMEM; info->ring_ref = GRANT_INVALID_REF; /***** Frontend to Backend ring start *****/ sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL); if (!sring) { xenbus_dev_fatal_uvp(dev, err, "fail to allocate shared ring (Front to Back)"); return err; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); err = xenbus_grant_ring_uvp(dev, virt_to_mfn(sring)); if (err < 0) { free_page((unsigned long) sring); info->ring.sring = NULL; xenbus_dev_fatal_uvp(dev, err, "fail to grant shared ring (Front to Back)"); goto free_sring; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, scsifront_intr, IRQF_SAMPLE_RANDOM, "scsifront", info); if (err <= 0) { xenbus_dev_fatal_uvp(dev, err, "bind_listening_port_to_irqhandler"); goto free_sring; } info->irq = err; return 0; /* free resource */ free_sring: scsifront_free(info); return err; } static int scsifront_init_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct xenbus_transaction xbt; int err; DPRINTK("%s\n",__FUNCTION__); err = scsifront_alloc_ring(info); if (err) return err; DPRINTK("%u %u\n", info->ring_ref, info->evtchn); again: err = xenbus_transaction_start_uvp(&xbt); if (err) { xenbus_dev_fatal_uvp(dev, err, "starting transaction"); } err = xenbus_printf_uvp(xbt, dev->nodename, "ring-ref", "%u", info->ring_ref); if (err) { xenbus_dev_fatal_uvp(dev, err, "%s", "writing ring-ref"); goto fail; } err = xenbus_printf_uvp(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { xenbus_dev_fatal_uvp(dev, err, "%s", "writing event-channel"); goto fail; } err = xenbus_transaction_end_uvp(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal_uvp(dev, err, "completing transaction"); goto free_sring; } return 0; fail: xenbus_transaction_end_uvp(xbt, 1); free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { struct vscsifrnt_info *info; struct Scsi_Host *host; int i, err = -ENOMEM; char name[DEFAULT_TASK_COMM_LEN]; host = scsi_host_alloc(&scsifront_sht, sizeof(*info)); if (!host) { xenbus_dev_fatal_uvp(dev, err, "fail to allocate scsi host"); return err; } info = (struct vscsifrnt_info *) host->hostdata; info->host = host; dev_set_drvdata(&dev->dev, info); info->dev = dev; for (i = 0; i < VSCSIIF_MAX_REQS; i++) { info->shadow[i].next_free = i + 1; init_waitqueue_head(&(info->shadow[i].wq_reset)); info->shadow[i].wait_reset = 0; } info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff; err = scsifront_init_ring(info); if (err) { scsi_host_put(host); return err; } init_waitqueue_head(&info->wq); spin_lock_init(&info->io_lock); spin_lock_init(&info->shadow_lock); snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no); info->kthread = kthread_run(scsifront_schedule, info, name); if (IS_ERR(info->kthread)) { err = PTR_ERR(info->kthread); info->kthread = NULL; printk(KERN_ERR "scsifront: kthread start err %d\n", err); goto free_sring; } host->max_id = VSCSIIF_MAX_TARGET; host->max_channel = 0; host->max_lun = VSCSIIF_MAX_LUN; host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512; err = scsi_add_host(host, &dev->dev); if (err) { printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err); goto free_sring; } xenbus_switch_state_uvp(dev, XenbusStateInitialised); return 0; free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_remove(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename); if (info->kthread) { kthread_stop(info->kthread); info->kthread = NULL; } scsifront_free(info); return 0; } static int scsifront_disconnect(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct Scsi_Host *host = info->host; DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename); /* When this function is executed, all devices of Frontend have been deleted. Therefore, it need not block I/O before remove_host. */ scsi_remove_host(host); xenbus_frontend_closed_uvp(dev); return 0; } #define VSCSIFRONT_OP_ADD_LUN 1 #define VSCSIFRONT_OP_DEL_LUN 2 static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) { struct xenbus_device *dev = info->dev; int i, err = 0; char str[64], state_str[64]; char **dir; unsigned int dir_n = 0; unsigned int device_state; unsigned int hst, chn, tgt, lun; struct scsi_device *sdev; dir = xenbus_directory_uvp(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n); if (IS_ERR(dir)) return; for (i = 0; i < dir_n; i++) { /* read status */ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]); err = xenbus_scanf_uvp(XBT_NIL, dev->otherend, str, "%u", &device_state); if (XENBUS_EXIST_ERR(err)) continue; /* virtual SCSI device */ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]); err = xenbus_scanf_uvp(XBT_NIL, dev->otherend, str, "%u:%u:%u:%u", &hst, &chn, &tgt, &lun); if (XENBUS_EXIST_ERR(err)) continue; /* front device state path */ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]); switch (op) { case VSCSIFRONT_OP_ADD_LUN: if (device_state == XenbusStateInitialised) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { printk(KERN_ERR "scsifront: Device already in use.\n"); scsi_device_put(sdev); xenbus_printf_uvp(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } else { scsi_add_device(info->host, chn, tgt, lun); xenbus_printf_uvp(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); } } break; case VSCSIFRONT_OP_DEL_LUN: if (device_state == XenbusStateClosing) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); xenbus_printf_uvp(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } } break; default: break; } } kfree(dir); return; } static void scsifront_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%p %u %u\n", dev, dev->state, backend_state); switch (backend_state) { case XenbusStateUnknown: case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateClosed: break; case XenbusStateInitialised: break; case XenbusStateConnected: if (xenbus_read_uvp_driver_state(dev->nodename) == XenbusStateInitialised) { scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); } if (dev->state == XenbusStateConnected) break; xenbus_switch_state_uvp(dev, XenbusStateConnected); break; case XenbusStateClosing: scsifront_disconnect(info); break; case XenbusStateReconfiguring: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN); xenbus_switch_state_uvp(dev, XenbusStateReconfiguring); break; case XenbusStateReconfigured: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); xenbus_switch_state_uvp(dev, XenbusStateConnected); break; } } static struct xenbus_device_id scsifront_ids[] = { { "vscsi" }, { "" } }; static struct xenbus_driver scsifront_driver = { .name = "vscsi", .ids = scsifront_ids, .probe = scsifront_probe, .remove = scsifront_remove, /* .resume = scsifront_resume, */ .otherend_changed = scsifront_backend_changed, }; int scsifront_xenbus_init(void) { return xenbus_register_frontend(&scsifront_driver); } void scsifront_xenbus_unregister(void) { xenbus_unregister_driver_uvp(&scsifront_driver); } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vbd/000077500000000000000000000000001314037446600215255ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vbd/Kbuild000066400000000000000000000001231314037446600226560ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-vbd.o xen-vbd-objs := blkfront.o vbd.o vcd.o UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vbd/Makefile000066400000000000000000000000661314037446600231670ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vbd/blkfront.c000066400000000000000000000616271314037446600235260ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 static void connect(struct blkfront_info *); static void blkfront_closing(struct xenbus_device *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(struct work_struct *arg); static void blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write_uvp(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice, i; struct blkfront_info *info; /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf_uvp(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf_uvp(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal_uvp(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal_uvp(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } info->xbdev = dev; info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); err = talk_to_backend(dev, info); if (err) { kfree(info); dev_set_drvdata(&dev->dev, NULL); return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); int err; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); err = talk_to_backend(dev, info); if (info->connected == BLKIF_STATE_SUSPENDED && !err) blkif_recover(info); return err; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { const char *message = NULL; struct xenbus_transaction xbt; int err; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start_uvp(&xbt); if (err) { xenbus_dev_fatal_uvp(dev, err, "starting transaction"); goto destroy_blkring; } err = xenbus_printf_uvp(xbt, dev->nodename, "ring-ref","%u", info->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { message = "writing protocol"; goto abort_transaction; } err = xenbus_transaction_end_uvp(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal_uvp(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state_uvp(dev, XenbusStateInitialised); return 0; abort_transaction: xenbus_transaction_end_uvp(xbt, 1); if (message) xenbus_dev_fatal_uvp(dev, err, "%s", message); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; info->ring_ref = GRANT_INVALID_REF; sring = (blkif_sring_t *)__get_free_page(GFP_NOIO | __GFP_HIGH); if (!sring) { xenbus_dev_fatal_uvp(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); err = xenbus_grant_ring_uvp(dev, virt_to_mfn(info->ring.sring)); if (err < 0) { free_page((unsigned long)sring); info->ring.sring = NULL; goto fail; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, IRQF_SAMPLE_RANDOM, "blkif", info); if (err <= 0) { xenbus_dev_fatal_uvp(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateConnected: connect(info); break; case XenbusStateClosing: if (!info->gd) { xenbus_frontend_closed_uvp(dev); break; } bd = bdget_disk(info->gd, 0); if (bd == NULL) xenbus_dev_fatal_uvp(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error_uvp(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); break; } } /* ** Connection ** */ /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf_uvp(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (XENBUS_EXIST_ERR(err)) return; printk(KERN_INFO "Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); //revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather_uvp(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal_uvp(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } err = xenbus_gather_uvp(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%lu", &info->feature_barrier, NULL); if (err) info->feature_barrier = 0; err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal_uvp(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal_uvp(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state_uvp(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&blkif_io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); add_disk(info->gd); info->is_ready = 1; register_vcd(info); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); unsigned long flags; DPRINTK("blkfront_closing: %s removed\n", dev->nodename); if (info->rq == NULL) goto out; spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback_uvp(&info->callback); spin_unlock_irqrestore(&blkif_io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); xlvbd_sysfs_delif(info); unregister_vcd(info); xlvbd_del(info); out: xenbus_frontend_closed_uvp(dev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); kfree(info); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= BLK_RING_SIZE); info->shadow_free = info->shadow[free].req.id; info->shadow[free].req.id = 0x0fffffee; /* debug */ return free; } static inline void ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = 0; info->shadow_free = id; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq_uvp(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(struct work_struct *arg) { struct blkfront_info *info = container_of(arg, struct blkfront_info, work); spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_open(struct inode *inode, struct file *filep) { struct block_device *bd = inode->i_bdev; #else int blkif_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; info->users++; return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_release(struct inode *inode, struct file *filep) { struct gendisk *disk = inode->i_bdev->bd_disk; #else int blkif_release(struct gendisk *disk, fmode_t mode) { #endif struct blkfront_info *info = disk->private_data; info->users--; if (info->users == 0) { /* Check whether we have been instructed to close. We will have ignored this request initially, as the device was still mounted. */ struct xenbus_device * dev = info->xbdev; enum xenbus_state state = xenbus_read_uvp_driver_state(dev->otherend); if (state == XenbusStateClosing && info->is_ready) blkfront_closing(dev); } return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { struct block_device *bd = inode->i_bdev; #else int blkif_ioctl(struct block_device *bd, fmode_t mode, unsigned command, unsigned long argument) { #endif int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: { struct blkfront_info *info = bd->bd_disk->private_data; struct gendisk *gd = info->gd; if (gd->flags & GENHD_FL_CD) return 0; return -EINVAL; } default: /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", command);*/ return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * blkif_queue_request * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; unsigned long buffer_mfn; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref; grant_ref_t gref_head; struct scatterlist *sg; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; if (gnttab_alloc_grant_references_uvp( BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { gnttab_request_free_callback_uvp( &info->callback, blkif_restart_queue_callback, info, BLKIF_MAX_SEGMENTS_PER_REQUEST); return 1; } /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = (unsigned long)req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (blk_barrier_rq(req)) ring_req->operation = BLKIF_OP_WRITE_BARRIER; if (blk_pc_request(req)) ring_req->operation = BLKIF_OP_PACKET; ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); for_each_sg(info->sg, sg, ring_req->nr_segments, i) { buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; /* install a grant reference. */ ref = gnttab_claim_grant_reference_uvp(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_uvp_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ? GTF_readonly : 0 ); info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); ring_req->seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; gnttab_free_grant_reference_uvps(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = blk_peek_request(rq)) != NULL) { info = req->rq_disk->private_data; if (RING_FULL(&info->ring)) goto wait; blk_start_request(req); if (!blk_fs_request(req) && !blk_pc_request(req)) { __blk_end_request_all(req, -EIO); continue; } DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%u) buffer:%p [%s]\n", req, req->cmd, (long long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), req->buffer, rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; spin_lock_irqsave(&blkif_io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = (struct request *)info->shadow[id].request; blkif_completion(&info->shadow[id]); ADD_ID_TO_FREELIST(info, id); ret = bret->status == BLKIF_RSP_OKAY ? 0 : -EIO; switch (bret->operation) { case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk("blkfront: %s: write barrier op failed\n", info->gd->disk_name); ret = -EOPNOTSUPP; info->feature_barrier = 0; xlvbd_barrier(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_PACKET: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); __blk_end_request_all(req, ret); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback_uvp(&info->callback); spin_unlock_irq(&blkif_io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); /* Free resources associated with old device channel. */ if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access_uvp(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler_uvp(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s) { int i; for (i = 0; i < s->req.nr_segments; i++) gnttab_end_foreign_access_uvp(s->req.seg[i].gref, 0UL); } static void blkif_recover(struct blkfront_info *info) { int i; blkif_request_t *req; struct blk_shadow *copy; int j; /* Stage 1: Make a safe copy of the shadow state. */ copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); memcpy(copy, info->shadow, sizeof(info->shadow)); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow_free = info->ring.req_prod_pvt; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ if (copy[i].request == 0) continue; /* Grab a request slot and copy shadow state into it. */ req = RING_GET_REQUEST( &info->ring, info->ring.req_prod_pvt); *req = copy[i].req; /* We get a new request id, and must reset the shadow state. */ req->id = GET_ID_FROM_FREELIST(info); memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i])); /* Rewrite any grant references invalidated by susp/resume. */ for (j = 0; j < req->nr_segments; j++) gnttab_grant_foreign_access_uvp_ref( req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), rq_data_dir((struct request *) info->shadow[req->id].request) ? GTF_readonly : 0); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; } kfree(copy); (void)xenbus_switch_state_uvp(info->xbdev, XenbusStateConnected); spin_lock_irq(&blkif_io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Send off requeued requests */ flush_requests(info); /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); return info->is_ready; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static struct xenbus_driver blkfront = { .name = "vbd", .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, }; static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront); } module_init(xlblk_init); static void __exit xlblk_exit(void) { return xenbus_unregister_driver_uvp(&blkfront); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vbd/block.h000066400000000000000000000123211314037446600227670ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #if 0 #define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a) #else #define DPRINTK_IOCTL(_f, _a...) ((void)0) #endif struct xlbd_type_info { int partn_shift; int disks_per_major; char *devname; char *diskname; }; struct xlbd_major_info { int major; int index; int usage; struct xlbd_type_info *type; }; struct blk_shadow { blkif_request_t req; unsigned long request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; struct gendisk *gd; int vdevice; blkif_vdev_t handle; int connected; int ring_ref; blkif_front_ring_t ring; struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int irq; struct xlbd_major_info *mi; struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; unsigned long shadow_free; int feature_barrier; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; extern spinlock_t blkif_io_lock; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); #else extern int blkif_open(struct block_device *bdev, fmode_t mode); extern int blkif_release(struct gendisk *disk, fmode_t mode); extern int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument); #endif extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (struct request_queue *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); int xlvbd_barrier(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif /* Virtual cdrom block-device */ extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vbd/vbd.c000066400000000000000000000266021314037446600224520ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) /*llj: the major of vitual_device_ext */ #define EXT_MAJOR 202 #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; DEFINE_SPINLOCK(blkif_io_lock); static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; int do_register, i; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SCSI_RANGE: ptr->type = &xlbd_scsi_type; ptr->index = index - XLBD_MAJOR_SCSI_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major EXT_MAJOR, * don't try to register it again */ for (i = XLBD_MAJOR_VBD_START; i < XLBD_MAJOR_VBD_START+ NUM_VBD_MAJORS; i++ ) { if (major_info[i] != NULL && major_info[i]->major == ptr->major) { do_register = 0; break; } } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(ptr); return NULL; } printk("xen-vbd: registered block device major %i\n", ptr->major); } major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = 10; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = 11 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = 18 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = 26; break; default: if (!VDEV_IS_EXTENDED(vdevice)) index = 27; else index = 28; break; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) { struct request_queue *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); #endif /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_logical_block_size(rq, sector_size); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) blk_queue_max_sectors(rq, 512); #else blk_queue_max_hw_sectors(rq, 512); #endif /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); #else blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); #endif /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; return 0; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { int major, minor; struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; unsigned int offset; if ((vdevice>>EXT_SHIFT) > 1) { // this is above the extended range; something is wrong printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = EXT_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); } BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if (!(vdisk_info & VDISK_CDROM) && (minor & ((1 << mi->type->partn_shift) - 1)) == 0) nr_minors = 1 << mi->type->partn_shift; gd = alloc_disk(nr_minors); if (gd == NULL) goto out; offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (nr_minors > 1 || (vdisk_info & VDISK_CDROM)) { if (offset < 26) { sprintf(gd->disk_name, "%s%c", mi->type->diskname, 'a' + offset ); } else { sprintf(gd->disk_name, "%s%c%c", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26) ); } } else { if (offset < 26) { sprintf(gd->disk_name, "%s%c%d", mi->type->diskname, 'a' + offset, minor & ((1 << mi->type->partn_shift) - 1)); } else { sprintf(gd->disk_name, "%s%c%c%d", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26), minor & ((1 << mi->type->partn_shift) - 1)); } } gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size)) { del_gendisk(gd); goto out; } info->rq = gd->queue; info->gd = gd; if (info->feature_barrier) xlvbd_barrier(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } void xlvbd_del(struct blkfront_info *info) { if (info->mi == NULL) return; BUG_ON(info->gd == NULL); del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); blk_cleanup_queue(info->rq); info->rq = NULL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int xlvbd_barrier(struct blkfront_info *info) { int err; err = blk_queue_ordered(info->rq, info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL); if (err) return err; printk(KERN_INFO "blkfront: %s: barriers %s\n", info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled"); return 0; } #else int xlvbd_barrier(struct blkfront_info *info) { printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name); return -ENOSYS; } #endif #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = dev_get_drvdata(&xendev->dev); if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vbd/vcd.c000066400000000000000000000313671314037446600224570ustar00rootroot00000000000000/******************************************************************************* * vcd.c * * Implements CDROM cmd packet passing between frontend guest and backend driver. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define REVISION "$Revision: 1.0 $" #include #include #include #include #include #include "block.h" /* List of cdrom_device_info, can have as many as blkfront supports */ struct vcd_disk { struct list_head vcd_entry; struct cdrom_device_info vcd_cdrom_info; spinlock_t vcd_cdrom_info_lock; }; static LIST_HEAD(vcd_disks); static DEFINE_SPINLOCK(vcd_disks_lock); static struct vcd_disk *xencdrom_get_list_entry(struct gendisk *disk) { struct vcd_disk *ret_vcd = NULL; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { spin_lock(&vcd->vcd_cdrom_info_lock); ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd; } static void submit_message(struct blkfront_info *info, void *sp) { struct request *req = NULL; req = blk_get_request(info->rq, READ, __GFP_WAIT); if (blk_rq_map_kern(info->rq, req, sp, PAGE_SIZE, __GFP_WAIT)) goto out; req->rq_disk = info->gd; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_NOMERGE; #else req->flags |= REQ_BLOCK_PC; #endif req->__sector = 0; req->__data_len = PAGE_SIZE; req->timeout = 60*HZ; blk_execute_rq(req->q, info->gd, req, 1); out: blk_put_request(req); } static int submit_cdrom_cmd(struct blkfront_info *info, struct packet_command *cgc) { int ret = 0; struct page *page; size_t size; union xen_block_packet *sp; struct xen_cdrom_packet *xcp; struct vcd_generic_command *vgc; if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) { printk(KERN_WARNING "%s() Packet buffer length is to large \n", __func__); return -EIO; } page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } size = PAGE_SIZE; memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xcp = &(sp->xcp); xcp->type = XEN_TYPE_CDROM_PACKET; xcp->payload_offset = PACKET_PAYLOAD_OFFSET; vgc = (struct vcd_generic_command *)((char *)sp + xcp->payload_offset); memcpy(vgc->cmd, cgc->cmd, CDROM_PACKET_SIZE); vgc->stat = cgc->stat; vgc->data_direction = cgc->data_direction; vgc->quiet = cgc->quiet; vgc->timeout = cgc->timeout; if (cgc->sense) { vgc->sense_offset = PACKET_SENSE_OFFSET; memcpy((char *)sp + vgc->sense_offset, cgc->sense, sizeof(struct request_sense)); } if (cgc->buffer) { vgc->buffer_offset = PACKET_BUFFER_OFFSET; memcpy((char *)sp + vgc->buffer_offset, cgc->buffer, cgc->buflen); vgc->buflen = cgc->buflen; } submit_message(info,sp); if (xcp->ret) ret = xcp->err; if (cgc->sense) { memcpy(cgc->sense, (char *)sp + PACKET_SENSE_OFFSET, sizeof(struct request_sense)); } if (cgc->buffer && cgc->buflen) { memcpy(cgc->buffer, (char *)sp + PACKET_BUFFER_OFFSET, cgc->buflen); } __free_page(page); return ret; } static int xencdrom_open(struct cdrom_device_info *cdi, int purpose) { int ret = 0; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_open *xco; info = cdi->disk->private_data; if (strlen(info->xbdev->otherend) > MAX_PACKET_DATA) { return -EIO; } page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xco = &(sp->xco); xco->type = XEN_TYPE_CDROM_OPEN; xco->payload_offset = sizeof(struct xen_cdrom_open); strcpy((char *)sp + xco->payload_offset, info->xbdev->otherend); submit_message(info,sp); if (xco->ret) { ret = xco->err; goto out; } if (xco->media_present) set_capacity(cdi->disk, xco->sectors); out: __free_page(page); return ret; } static void xencdrom_release(struct cdrom_device_info *cdi) { } static int xencdrom_media_changed(struct cdrom_device_info *cdi, int disc_nr) { int ret; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_media_changed *xcmc; info = cdi->disk->private_data; page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xcmc = &(sp->xcmc); xcmc->type = XEN_TYPE_CDROM_MEDIA_CHANGED; submit_message(info,sp); ret = xcmc->media_changed; __free_page(page); return ret; } static int xencdrom_tray_move(struct cdrom_device_info *cdi, int position) { int ret; struct packet_command cgc; struct blkfront_info *info; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_START_STOP_UNIT; if (position) cgc.cmd[4] = 2; else cgc.cmd[4] = 3; ret = submit_cdrom_cmd(info, &cgc); return ret; } static int xencdrom_lock_door(struct cdrom_device_info *cdi, int lock) { int ret = 0; struct blkfront_info *info; struct packet_command cgc; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; cgc.cmd[4] = lock; ret = submit_cdrom_cmd(info, &cgc); return ret; } static int xencdrom_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { int ret = -EIO; struct blkfront_info *info; info = cdi->disk->private_data; ret = submit_cdrom_cmd(info, cgc); cgc->stat = ret; return ret; } static int xencdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { return -EINVAL; } /* Query backend to see if CDROM packets are supported */ static int xencdrom_supported(struct blkfront_info *info) { struct page *page; union xen_block_packet *sp; struct xen_cdrom_support *xcs; page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xcs = &(sp->xcs); xcs->type = XEN_TYPE_CDROM_SUPPORT; submit_message(info,sp); return xcs->supported; } static struct cdrom_device_ops xencdrom_dops = { .open = xencdrom_open, .release = xencdrom_release, .media_changed = xencdrom_media_changed, .tray_move = xencdrom_tray_move, .lock_door = xencdrom_lock_door, .generic_packet = xencdrom_packet, .audio_ioctl = xencdrom_audio_ioctl, .capability = (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | \ CDC_MEDIA_CHANGED | CDC_GENERIC_PACKET | CDC_DVD | \ CDC_CD_R), .n_minors = 1, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_open(struct inode *inode, struct file *file) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_open(&vcd->vcd_cdrom_info, inode, file); #else ret = cdrom_open(&vcd->vcd_cdrom_info, bd, mode); #endif info->users = vcd->vcd_cdrom_info.use_count; spin_unlock(&vcd->vcd_cdrom_info_lock); } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_release(struct inode *inode, struct file *file) { struct gendisk *gd = inode->i_bdev->bd_disk; #else static int xencdrom_block_release(struct gendisk *gd, fmode_t mode) { #endif struct blkfront_info *info = gd->private_data; struct vcd_disk *vcd; int ret = 0; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_release(&vcd->vcd_cdrom_info, file); #else cdrom_release(&vcd->vcd_cdrom_info, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); if (vcd->vcd_cdrom_info.use_count == 0) { info->users = 1; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) blkif_release(inode, file); #else blkif_release(gd, mode); #endif } } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_ioctl(struct block_device *bd, fmode_t mode, unsigned cmd, unsigned long arg) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!(vcd = xencdrom_get_list_entry(info->gd))) goto out; switch (cmd) { case 2285: /* SG_IO */ ret = -ENOSYS; break; case CDROMEJECT: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 1); break; case CDROMCLOSETRAY: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 0); break; case CDROM_GET_CAPABILITY: ret = vcd->vcd_cdrom_info.ops->capability & ~vcd->vcd_cdrom_info.mask; break; case CDROM_SET_OPTIONS: ret = vcd->vcd_cdrom_info.options; break; case CDROM_SEND_PACKET: ret = submit_cdrom_cmd(info, (struct packet_command *)arg); break; default: /* Not supported, augment supported above if necessary */ printk("%s():%d Unsupported IOCTL:%x \n", __func__, __LINE__, cmd); ret = -ENOTTY; break; } spin_unlock(&vcd->vcd_cdrom_info_lock); out: return ret; } /* Called as result of cdrom_open, vcd_cdrom_info_lock already held */ static int xencdrom_block_media_changed(struct gendisk *disk) { struct vcd_disk *vcd; struct vcd_disk *ret_vcd = NULL; int ret = 0; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); if (ret_vcd) { ret = cdrom_media_changed(&ret_vcd->vcd_cdrom_info); } return ret; } static struct block_device_operations xencdrom_bdops = { .owner = THIS_MODULE, .open = xencdrom_block_open, .release = xencdrom_block_release, .ioctl = xencdrom_block_ioctl, .media_changed = xencdrom_block_media_changed, }; void register_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; /* Make sure this is for a CD device */ if (!(gd->flags & GENHD_FL_CD)) goto out; /* Make sure we have backend support */ if (!xencdrom_supported(info)) { goto out; } /* Create new vcd_disk and fill in cdrom_info */ vcd = (struct vcd_disk *)kzalloc(sizeof(struct vcd_disk), GFP_KERNEL); if (!vcd) { printk(KERN_INFO "%s(): Unable to allocate vcd struct!\n", __func__); goto out; } spin_lock_init(&vcd->vcd_cdrom_info_lock); vcd->vcd_cdrom_info.ops = &xencdrom_dops; vcd->vcd_cdrom_info.speed = 4; vcd->vcd_cdrom_info.capacity = 1; vcd->vcd_cdrom_info.options = 0; strcpy(vcd->vcd_cdrom_info.name, gd->disk_name); vcd->vcd_cdrom_info.mask = (CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_SELECT_SPEED | CDC_MRW | CDC_MRW_W | CDC_RAM); if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) { printk(KERN_WARNING "%s() Cannot register blkdev as a cdrom %d!\n", __func__, gd->major); goto err_out; } xencdrom_bdops.owner = gd->fops->owner; gd->fops = &xencdrom_bdops; vcd->vcd_cdrom_info.disk = gd; spin_lock(&vcd_disks_lock); list_add(&(vcd->vcd_entry), &vcd_disks); spin_unlock(&vcd_disks_lock); out: return; err_out: kfree(vcd); } void unregister_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == gd) { spin_lock(&vcd->vcd_cdrom_info_lock); unregister_cdrom(&vcd->vcd_cdrom_info); list_del(&vcd->vcd_entry); spin_unlock(&vcd->vcd_cdrom_info_lock); kfree(vcd); break; } } spin_unlock(&vcd_disks_lock); } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vnif/000077500000000000000000000000001314037446600217145ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vnif/Kbuild000066400000000000000000000001411314037446600230450ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-vnif.o xen-vnif-objs := netfront.o xen-vnif-objs += accel.o UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vnif/Makefile000066400000000000000000000000661314037446600233560ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vnif/accel.c000066400000000000000000000534431314037446600231400ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront/accel: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront/accel: " fmt, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); static void netfront_accelerator_remove_watch(struct netfront_info *np); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Workqueue to process acceleration configuration changes */ struct workqueue_struct *accel_watch_workqueue; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); accel_watch_workqueue = create_workqueue("net_accel"); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; flush_workqueue(accel_watch_workqueue); destroy_workqueue(accel_watch_workqueue); /* No lock required as everything else should be quiet by now */ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read_uvp(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded_uvp() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); queue_work(accel_watch_workqueue, &vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* * If old watch exists, e.g. from before suspend/resume, * remove it now */ netfront_accelerator_remove_watch(np); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path_uvp2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_purge_watch(struct netfront_accel_vif_state *vif_state) { flush_workqueue(accel_watch_workqueue); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch_uvp_uvp(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; netfront_accelerator_purge_watch(vif_state); } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. Must be called with the accelerator * mutex held. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } } /* * Initialise the state to track an accelerator plugin module. * * Must be called with the accelerator mutex held. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; list_add(&accelerator->link, &accelerators_list); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { struct netfront_accelerator *accelerator; unsigned long flags; DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); accelerator = vif_state->np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); vif_state->hooks = accelerator->hooks; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks && hooks->new_device(np->netdev, dev) == 0) accelerator_set_vif_state_hooks(&np->accel_vif_state); return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* Holds accelerator_mutex to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if (hooks->new_device(np->netdev, vif_state->dev) == 0) accelerator_set_vif_state_hooks(vif_state); } } /* * Called by the netfront accelerator plugin module when it has * loaded. * * Takes the accelerator_mutex and vif_states_lock spinlock. */ int netfront_accelerator_loaded_uvp(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded_uvp); /* * Remove the hooks from a single vif state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { unsigned long flags; /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Safely remove the accelerator function hooks from a netfront state. * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { if(vif_state->hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ vif_state->hooks->get_stats(vif_state->np->netdev, &vif_state->np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. * * Takes the accelerator mutex, and vif_states_lock spinlock. */ void netfront_accelerator_stop_uvp(const char *frontend) { struct netfront_accelerator *accelerator; mutex_lock(&accelerator_mutex); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_remove_hooks(accelerator); goto out; } } out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop_uvp); /* * Helper for call_remove and do_suspend * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator = np->accelerator; unsigned long flags; int rc = 0; if (np->accel_vif_state.hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ np->accel_vif_state.hooks->get_stats(np->netdev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); } return rc; } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock */ static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev); np->accelerator = NULL; return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { int rc = 0; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ rc = do_remove(np, dev); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { netfront_accelerator_purge_watch(&np->accel_vif_state); /* * Gratuitously fire the watch handler to reinstate the * configured accelerator */ if (dev->state == XenbusStateConnected) queue_work(accel_watch_workqueue, &np->accel_vif_state.accel_work); return 0; } /* * No lock pre-requisites. Takes the accelerator mutex */ void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; break; } } out: mutex_unlock(&accelerator_mutex); return; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vnif/acpi_bus.h000066400000000000000000000261731314037446600236630ustar00rootroot00000000000000/* * acpi_bus.h - ACPI Bus Driver ($Revision: 22 $) * * Copyright (C) 2001, 2002 Andy Grover * Copyright (C) 2001, 2002 Paul Diefenbaugh * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef __ACPI_BUS_H__ #define __ACPI_BUS_H__ #include #include /* TBD: Make dynamic */ #define ACPI_MAX_HANDLES 10 struct acpi_handle_list { u32 count; acpi_handle handles[ACPI_MAX_HANDLES]; }; /* acpi_utils.h */ acpi_status acpi_extract_package(union acpi_object *package, struct acpi_buffer *format, struct acpi_buffer *buffer); acpi_status acpi_evaluate_integer(acpi_handle handle, acpi_string pathname, struct acpi_object_list *arguments, unsigned long long *data); acpi_status acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, struct acpi_object_list *arguments, struct acpi_handle_list *list); #ifdef CONFIG_ACPI #include #define ACPI_BUS_FILE_ROOT "acpi" extern struct proc_dir_entry *acpi_root_dir; enum acpi_bus_removal_type { ACPI_BUS_REMOVAL_NORMAL = 0, ACPI_BUS_REMOVAL_EJECT, ACPI_BUS_REMOVAL_SUPRISE, ACPI_BUS_REMOVAL_TYPE_COUNT }; enum acpi_bus_device_type { ACPI_BUS_TYPE_DEVICE = 0, ACPI_BUS_TYPE_POWER, ACPI_BUS_TYPE_PROCESSOR, ACPI_BUS_TYPE_THERMAL, ACPI_BUS_TYPE_POWER_BUTTON, ACPI_BUS_TYPE_SLEEP_BUTTON, ACPI_BUS_DEVICE_TYPE_COUNT }; struct acpi_driver; struct acpi_device; /* * ACPI Driver * ----------- */ typedef int (*acpi_op_add) (struct acpi_device * device); typedef int (*acpi_op_remove) (struct acpi_device * device, int type); typedef int (*acpi_op_start) (struct acpi_device * device); typedef int (*acpi_op_suspend) (struct acpi_device * device, pm_message_t state); typedef int (*acpi_op_resume) (struct acpi_device * device); typedef int (*acpi_op_bind) (struct acpi_device * device); typedef int (*acpi_op_unbind) (struct acpi_device * device); typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event); struct acpi_bus_ops { u32 acpi_op_add:1; u32 acpi_op_start:1; }; struct acpi_device_ops { acpi_op_add add; acpi_op_remove remove; acpi_op_start start; acpi_op_suspend suspend; acpi_op_resume resume; acpi_op_bind bind; acpi_op_unbind unbind; acpi_op_notify notify; }; #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */ struct acpi_driver { char name[80]; char class[80]; const struct acpi_device_id *ids; /* Supported Hardware IDs */ unsigned int flags; struct acpi_device_ops ops; struct device_driver drv; struct module *owner; }; /* * ACPI Device * ----------- */ /* Status (_STA) */ struct acpi_device_status { u32 present:1; u32 enabled:1; u32 show_in_ui:1; u32 functional:1; u32 battery_present:1; u32 reserved:27; }; /* Flags */ struct acpi_device_flags { u32 dynamic_status:1; u32 bus_address:1; u32 removable:1; u32 ejectable:1; u32 lockable:1; u32 suprise_removal_ok:1; u32 power_manageable:1; u32 performance_manageable:1; u32 wake_capable:1; /* Wakeup(_PRW) supported? */ u32 force_power_state:1; u32 reserved:22; }; /* File System */ struct acpi_device_dir { struct proc_dir_entry *entry; }; #define acpi_device_dir(d) ((d)->dir.entry) /* Plug and Play */ typedef char acpi_bus_id[8]; typedef unsigned long acpi_bus_address; typedef char acpi_device_name[40]; typedef char acpi_device_class[20]; struct acpi_hardware_id { struct list_head list; char *id; }; struct acpi_device_pnp { acpi_bus_id bus_id; /* Object name */ acpi_bus_address bus_address; /* _ADR */ char *unique_id; /* _UID */ struct list_head ids; /* _HID and _CIDs */ acpi_device_name device_name; /* Driver-determined */ acpi_device_class device_class; /* " */ }; #define acpi_device_bid(d) ((d)->pnp.bus_id) #define acpi_device_adr(d) ((d)->pnp.bus_address) char *acpi_device_hid(struct acpi_device *device); #define acpi_device_name(d) ((d)->pnp.device_name) #define acpi_device_class(d) ((d)->pnp.device_class) /* Power Management */ struct acpi_device_power_flags { u32 explicit_get:1; /* _PSC present? */ u32 power_resources:1; /* Power resources */ u32 inrush_current:1; /* Serialize Dx->D0 */ u32 power_removed:1; /* Optimize Dx->D0 */ u32 reserved:28; }; struct acpi_device_power_state { struct { u8 valid:1; u8 explicit_set:1; /* _PSx present? */ u8 reserved:6; } flags; int power; /* % Power (compared to D0) */ int latency; /* Dx->D0 time (microseconds) */ struct acpi_handle_list resources; /* Power resources referenced */ }; struct acpi_device_power { int state; /* Current state */ struct acpi_device_power_flags flags; struct acpi_device_power_state states[4]; /* Power states (D0-D3) */ }; /* Performance Management */ struct acpi_device_perf_flags { u8 reserved:8; }; struct acpi_device_perf_state { struct { u8 valid:1; u8 reserved:7; } flags; u8 power; /* % Power (compared to P0) */ u8 performance; /* % Performance ( " ) */ int latency; /* Px->P0 time (microseconds) */ }; struct acpi_device_perf { int state; struct acpi_device_perf_flags flags; int state_count; struct acpi_device_perf_state *states; }; /* Wakeup Management */ struct acpi_device_wakeup_flags { u8 valid:1; /* Can successfully enable wakeup? */ u8 run_wake:1; /* Run-Wake GPE devices */ }; struct acpi_device_wakeup_state { u8 enabled:1; }; struct acpi_device_wakeup { acpi_handle gpe_device; acpi_integer gpe_number; acpi_integer sleep_state; struct acpi_handle_list resources; struct acpi_device_wakeup_state state; struct acpi_device_wakeup_flags flags; int prepare_count; }; /* Device */ struct acpi_device { int device_type; acpi_handle handle; /* no handle for fixed hardware */ struct acpi_device *parent; struct list_head children; struct list_head node; struct list_head wakeup_list; struct acpi_device_status status; struct acpi_device_flags flags; struct acpi_device_pnp pnp; struct acpi_device_power power; struct acpi_device_wakeup wakeup; struct acpi_device_perf performance; struct acpi_device_dir dir; struct acpi_device_ops ops; struct acpi_driver *driver; void *driver_data; struct device dev; struct acpi_bus_ops bus_ops; /* workaround for different code path for hotplug */ enum acpi_bus_removal_type removal_type; /* indicate for different removal type */ }; static inline void *acpi_driver_data(struct acpi_device *d) { return d->driver_data; } #define to_acpi_device(d) container_of(d, struct acpi_device, dev) #define to_acpi_driver(d) container_of(d, struct acpi_driver, drv) /* acpi_device.dev.bus == &acpi_bus_type */ extern struct bus_type acpi_bus_type; /* * Events * ------ */ struct acpi_bus_event { struct list_head node; acpi_device_class device_class; acpi_bus_id bus_id; u32 type; u32 data; }; extern struct kobject *acpi_kobj; extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); void acpi_bus_private_data_handler(acpi_handle, void *); int acpi_bus_get_private_data(acpi_handle, void **); extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); extern int register_acpi_notifier(struct notifier_block *); extern int unregister_acpi_notifier(struct notifier_block *); extern int register_acpi_bus_notifier(struct notifier_block *nb); extern void unregister_acpi_bus_notifier(struct notifier_block *nb); /* * External Functions */ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); void acpi_bus_data_handler(acpi_handle handle, void *context); acpi_status acpi_bus_get_status_handle(acpi_handle handle, unsigned long long *sta); int acpi_bus_get_status(struct acpi_device *device); int acpi_bus_get_power(acpi_handle handle, int *state); int acpi_bus_set_power(acpi_handle handle, int state); bool acpi_bus_power_manageable(acpi_handle handle); bool acpi_bus_can_wakeup(acpi_handle handle); #ifdef CONFIG_ACPI_PROC_EVENT int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data); int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data); int acpi_bus_receive_event(struct acpi_bus_event *event); #else static inline int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data) { return 0; } #endif int acpi_bus_register_driver(struct acpi_driver *driver); void acpi_bus_unregister_driver(struct acpi_driver *driver); int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent, acpi_handle handle, int type); int acpi_bus_trim(struct acpi_device *start, int rmdevice); int acpi_bus_start(struct acpi_device *device); acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd); int acpi_match_device_ids(struct acpi_device *device, const struct acpi_device_id *ids); int acpi_create_dir(struct acpi_device *); void acpi_remove_dir(struct acpi_device *); /* * Bind physical devices with ACPI devices */ struct acpi_bus_type { struct list_head list; struct bus_type *bus; /* For general devices under the bus */ int (*find_device) (struct device *, acpi_handle *); /* For bridges, such as PCI root bridge, IDE controller */ int (*find_bridge) (struct device *, acpi_handle *); }; int register_acpi_bus_type(struct acpi_bus_type *); int unregister_acpi_bus_type(struct acpi_bus_type *); struct device *acpi_get_physical_device(acpi_handle); struct acpi_pci_root { struct list_head node; struct acpi_device * device; struct acpi_pci_id id; struct pci_bus *bus; u16 segment; u8 bus_nr; u32 osc_support_set; /* _OSC state of support bits */ u32 osc_control_set; /* _OSC state of control bits */ u32 osc_control_qry; /* the latest _OSC query result */ u32 osc_queried:1; /* has _OSC control been queried? */ }; /* helper */ acpi_handle acpi_get_child(acpi_handle, acpi_integer); int acpi_is_root_bridge(acpi_handle); acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int); struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->archdata.acpi_handle)) #ifdef CONFIG_PM_SLEEP int acpi_pm_device_sleep_state(struct device *, int *); int acpi_pm_device_sleep_wake(struct device *, bool); #else /* !CONFIG_PM_SLEEP */ static inline int acpi_pm_device_sleep_state(struct device *d, int *p) { if (p) *p = ACPI_STATE_D0; return ACPI_STATE_D3; } static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) { return -ENODEV; } #endif /* !CONFIG_PM_SLEEP */ #endif /* CONFIG_ACPI */ #endif /*__ACPI_BUS_H__*/ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vnif/actypes.h000066400000000000000000001033731314037446600235440ustar00rootroot00000000000000/****************************************************************************** * * Name: actypes.h - Common data types for the entire ACPI subsystem * *****************************************************************************/ /* * Copyright (C) 2000 - 2008, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #ifndef __ACTYPES_H__ #define __ACTYPES_H__ /* acpisrc:struct_defs -- for acpisrc conversion */ /* * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header * and must be either 32 or 64. 16-bit ACPICA is no longer supported, as of * 12/2006. */ #ifndef ACPI_MACHINE_WIDTH #error ACPI_MACHINE_WIDTH not defined #endif /*! [Begin] no source code translation */ /* * Data type ranges * Note: These macros are designed to be compiler independent as well as * working around problems that some 32-bit compilers have with 64-bit * constants. */ #define ACPI_UINT8_MAX (UINT8) (~((UINT8) 0)) /* 0xFF */ #define ACPI_UINT16_MAX (UINT16)(~((UINT16) 0)) /* 0xFFFF */ #define ACPI_UINT32_MAX (UINT32)(~((UINT32) 0)) /* 0xFFFFFFFF */ #define ACPI_UINT64_MAX (UINT64)(~((UINT64) 0)) /* 0xFFFFFFFFFFFFFFFF */ #define ACPI_ASCII_MAX 0x7F /* * Architecture-specific ACPICA Subsystem Data Types * * The goal of these types is to provide source code portability across * 16-bit, 32-bit, and 64-bit targets. * * 1) The following types are of fixed size for all targets (16/32/64): * * BOOLEAN Logical boolean * * UINT8 8-bit (1 byte) unsigned value * UINT16 16-bit (2 byte) unsigned value * UINT32 32-bit (4 byte) unsigned value * UINT64 64-bit (8 byte) unsigned value * * INT16 16-bit (2 byte) signed value * INT32 32-bit (4 byte) signed value * INT64 64-bit (8 byte) signed value * * COMPILER_DEPENDENT_UINT64/INT64 - These types are defined in the * compiler-dependent header(s) and were introduced because there is no common * 64-bit integer type across the various compilation models, as shown in * the table below. * * Datatype LP64 ILP64 LLP64 ILP32 LP32 16bit * char 8 8 8 8 8 8 * short 16 16 16 16 16 16 * _int32 32 * int 32 64 32 32 16 16 * long 64 64 32 32 32 32 * long long 64 64 * pointer 64 64 64 32 32 32 * * Note: ILP64 and LP32 are currently not supported. * * * 2) These types represent the native word size of the target mode of the * processor, and may be 16-bit, 32-bit, or 64-bit as required. They are * usually used for memory allocation, efficient loop counters, and array * indexes. The types are similar to the size_t type in the C library and are * required because there is no C type that consistently represents the native * data width. ACPI_SIZE is needed because there is no guarantee that a * kernel-level C library is present. * * ACPI_SIZE 16/32/64-bit unsigned value * ACPI_NATIVE_INT 16/32/64-bit signed value * */ /******************************************************************************* * * Common types for all compilers, all targets * ******************************************************************************/ typedef unsigned char BOOLEAN; typedef unsigned char UINT8; typedef unsigned short UINT16; typedef COMPILER_DEPENDENT_UINT64 UINT64; typedef COMPILER_DEPENDENT_INT64 INT64; /*! [End] no source code translation !*/ /******************************************************************************* * * Types specific to 64-bit targets * ******************************************************************************/ #if ACPI_MACHINE_WIDTH == 64 /*! [Begin] no source code translation (keep the typedefs as-is) */ typedef unsigned int UINT32; typedef int INT32; /*! [End] no source code translation !*/ typedef s64 acpi_native_int; typedef u64 acpi_size; typedef u64 acpi_io_address; typedef u64 acpi_physical_address; #define ACPI_MAX_PTR ACPI_UINT64_MAX #define ACPI_SIZE_MAX ACPI_UINT64_MAX #define ACPI_USE_NATIVE_DIVIDE /* Has native 64-bit integer support */ /* * In the case of the Itanium Processor Family (IPF), the hardware does not * support misaligned memory transfers. Set the MISALIGNMENT_NOT_SUPPORTED flag * to indicate that special precautions must be taken to avoid alignment faults. * (IA64 or ia64 is currently used by existing compilers to indicate IPF.) * * Note: Em64_t and other X86-64 processors support misaligned transfers, * so there is no need to define this flag. */ #if defined (__IA64__) || defined (__ia64__) #define ACPI_MISALIGNMENT_NOT_SUPPORTED #endif /******************************************************************************* * * Types specific to 32-bit targets * ******************************************************************************/ #elif ACPI_MACHINE_WIDTH == 32 /*! [Begin] no source code translation (keep the typedefs as-is) */ typedef unsigned int UINT32; typedef int INT32; /*! [End] no source code translation !*/ typedef s32 acpi_native_int; typedef u32 acpi_size; typedef u32 acpi_io_address; typedef u32 acpi_physical_address; #define ACPI_MAX_PTR ACPI_UINT32_MAX #define ACPI_SIZE_MAX ACPI_UINT32_MAX #else /* ACPI_MACHINE_WIDTH must be either 64 or 32 */ #error unknown ACPI_MACHINE_WIDTH #endif /******************************************************************************* * * OS-dependent types * * If the defaults below are not appropriate for the host system, they can * be defined in the OS-specific header, and this will take precedence. * ******************************************************************************/ /* Value returned by acpi_os_get_thread_id */ #ifndef acpi_thread_id #define acpi_thread_id acpi_size #endif /* Flags for acpi_os_acquire_lock/acpi_os_release_lock */ #ifndef acpi_cpu_flags #define acpi_cpu_flags acpi_size #endif /* Object returned from acpi_os_create_cache */ #ifndef acpi_cache_t #ifdef ACPI_USE_LOCAL_CACHE #define acpi_cache_t struct acpi_memory_list #else #define acpi_cache_t void * #endif #endif /* * Synchronization objects - Mutexes, Semaphores, and spin_locks */ #if (ACPI_MUTEX_TYPE == ACPI_BINARY_SEMAPHORE) /* * These macros are used if the host OS does not support a mutex object. * Map the OSL Mutex interfaces to binary semaphores. */ #define acpi_mutex acpi_semaphore #define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle) #define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle) #define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time) #define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1) #endif /* Configurable types for synchronization objects */ #ifndef acpi_spinlock #define acpi_spinlock void * #endif #ifndef acpi_semaphore #define acpi_semaphore void * #endif #ifndef acpi_mutex #define acpi_mutex void * #endif /******************************************************************************* * * Compiler-dependent types * * If the defaults below are not appropriate for the host compiler, they can * be defined in the compiler-specific header, and this will take precedence. * ******************************************************************************/ /* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ #ifndef acpi_uintptr_t #define acpi_uintptr_t void * #endif /* * ACPI_PRINTF_LIKE is used to tag functions as "printf-like" because * some compilers can catch printf format string problems */ #ifndef ACPI_PRINTF_LIKE #define ACPI_PRINTF_LIKE(c) #endif /* * Some compilers complain about unused variables. Sometimes we don't want to * use all the variables (for example, _acpi_module_name). This allows us * to tell the compiler in a per-variable manner that a variable * is unused */ #ifndef ACPI_UNUSED_VAR #define ACPI_UNUSED_VAR #endif /* * All ACPICA functions that are available to the rest of the kernel are * tagged with this macro which can be defined as appropriate for the host. */ #ifndef ACPI_EXPORT_SYMBOL #define ACPI_EXPORT_SYMBOL(symbol) #endif /****************************************************************************** * * ACPI Specification constants (Do not change unless the specification changes) * *****************************************************************************/ /* Number of distinct FADT-based GPE register blocks (GPE0 and GPE1) */ #define ACPI_MAX_GPE_BLOCKS 2 /* Default ACPI register widths */ #define ACPI_GPE_REGISTER_WIDTH 8 #define ACPI_PM1_REGISTER_WIDTH 16 #define ACPI_PM2_REGISTER_WIDTH 8 #define ACPI_PM_TIMER_WIDTH 32 /* Names within the namespace are 4 bytes long */ #define ACPI_NAME_SIZE 4 #define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */ #define ACPI_PATH_SEPARATOR '.' /* Sizes for ACPI table headers */ #define ACPI_OEM_ID_SIZE 6 #define ACPI_OEM_TABLE_ID_SIZE 8 /* ACPI/PNP hardware IDs */ #define PCI_ROOT_HID_STRING "PNP0A03" #define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08" /* PM Timer ticks per second (HZ) */ #define PM_TIMER_FREQUENCY 3579545 /******************************************************************************* * * Independent types * ******************************************************************************/ /* Logical defines and NULL */ #ifdef FALSE #undef FALSE #endif #define FALSE (1 == 0) #ifdef TRUE #undef TRUE #endif #define TRUE (1 == 1) #ifndef NULL #define NULL (void *) 0 #endif /* * Miscellaneous types */ typedef u32 acpi_status; /* All ACPI Exceptions */ typedef u32 acpi_name; /* 4-byte ACPI name */ typedef char *acpi_string; /* Null terminated ASCII string */ typedef void *acpi_handle; /* Actually a ptr to a NS Node */ /* Owner IDs are used to track namespace nodes for selective deletion */ typedef u8 acpi_owner_id; #define ACPI_OWNER_ID_MAX 0xFF struct uint64_struct { u32 lo; u32 hi; }; union uint64_overlay { u64 full; struct uint64_struct part; }; struct uint32_struct { u32 lo; u32 hi; }; /* * Acpi integer width. In ACPI version 1, integers are 32 bits. In ACPI * version 2, integers are 64 bits. Note that this pertains to the ACPI integer * type only, not other integers used in the implementation of the ACPI CA * subsystem. */ typedef unsigned long long acpi_integer; #define ACPI_INTEGER_MAX ACPI_UINT64_MAX #define ACPI_INTEGER_BIT_SIZE 64 #define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */ #if ACPI_MACHINE_WIDTH == 64 #define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */ #endif #define ACPI_MAX64_DECIMAL_DIGITS 20 #define ACPI_MAX32_DECIMAL_DIGITS 10 #define ACPI_MAX16_DECIMAL_DIGITS 5 #define ACPI_MAX8_DECIMAL_DIGITS 3 /* PM Timer ticks per second (HZ) */ #define PM_TIMER_FREQUENCY 3579545 /* * Constants with special meanings */ #define ACPI_ROOT_OBJECT ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR) #define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */ #define ACPI_DO_NOT_WAIT 0 /******************************************************************************* * * Commonly used macros * ******************************************************************************/ /* Data manipulation */ #define ACPI_LOBYTE(integer) ((u8) (u16)(integer)) #define ACPI_HIBYTE(integer) ((u8) (((u16)(integer)) >> 8)) #define ACPI_LOWORD(integer) ((u16) (u32)(integer)) #define ACPI_HIWORD(integer) ((u16)(((u32)(integer)) >> 16)) #define ACPI_LODWORD(integer64) ((u32) (u64)(integer64)) #define ACPI_HIDWORD(integer64) ((u32)(((u64)(integer64)) >> 32)) #define ACPI_SET_BIT(target,bit) ((target) |= (bit)) #define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit)) #define ACPI_MIN(a,b) (((a)<(b))?(a):(b)) #define ACPI_MAX(a,b) (((a)>(b))?(a):(b)) /* Size calculation */ #define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) /* Pointer manipulation */ #define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p)) #define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p)) #define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b))) #define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b))) /* Pointer/Integer type conversions */ #define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) NULL,(acpi_size) i) #define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) NULL) #define ACPI_OFFSET(d, f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f), (void *) NULL) #define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED #define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) #else #define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) #endif /******************************************************************************* * * Miscellaneous constants * ******************************************************************************/ /* * Initialization sequence */ #define ACPI_FULL_INITIALIZATION 0x00 #define ACPI_NO_ADDRESS_SPACE_INIT 0x01 #define ACPI_NO_HARDWARE_INIT 0x02 #define ACPI_NO_EVENT_INIT 0x04 #define ACPI_NO_HANDLER_INIT 0x08 #define ACPI_NO_ACPI_ENABLE 0x10 #define ACPI_NO_DEVICE_INIT 0x20 #define ACPI_NO_OBJECT_INIT 0x40 /* * Initialization state */ #define ACPI_SUBSYSTEM_INITIALIZE 0x01 #define ACPI_INITIALIZED_OK 0x02 /* * Power state values */ #define ACPI_STATE_UNKNOWN (u8) 0xFF #define ACPI_STATE_S0 (u8) 0 #define ACPI_STATE_S1 (u8) 1 #define ACPI_STATE_S2 (u8) 2 #define ACPI_STATE_S3 (u8) 3 #define ACPI_STATE_S4 (u8) 4 #define ACPI_STATE_S5 (u8) 5 #define ACPI_S_STATES_MAX ACPI_STATE_S5 #define ACPI_S_STATE_COUNT 6 #define ACPI_STATE_D0 (u8) 0 #define ACPI_STATE_D1 (u8) 1 #define ACPI_STATE_D2 (u8) 2 #define ACPI_STATE_D3 (u8) 3 #define ACPI_D_STATES_MAX ACPI_STATE_D3 #define ACPI_D_STATE_COUNT 4 #define ACPI_STATE_C0 (u8) 0 #define ACPI_STATE_C1 (u8) 1 #define ACPI_STATE_C2 (u8) 2 #define ACPI_STATE_C3 (u8) 3 #define ACPI_C_STATES_MAX ACPI_STATE_C3 #define ACPI_C_STATE_COUNT 4 /* * Sleep type invalid value */ #define ACPI_SLEEP_TYPE_MAX 0x7 #define ACPI_SLEEP_TYPE_INVALID 0xFF /* * Standard notify values */ #define ACPI_NOTIFY_BUS_CHECK (u8) 0x00 #define ACPI_NOTIFY_DEVICE_CHECK (u8) 0x01 #define ACPI_NOTIFY_DEVICE_WAKE (u8) 0x02 #define ACPI_NOTIFY_EJECT_REQUEST (u8) 0x03 #define ACPI_NOTIFY_DEVICE_CHECK_LIGHT (u8) 0x04 #define ACPI_NOTIFY_FREQUENCY_MISMATCH (u8) 0x05 #define ACPI_NOTIFY_BUS_MODE_MISMATCH (u8) 0x06 #define ACPI_NOTIFY_POWER_FAULT (u8) 0x07 #define ACPI_NOTIFY_CAPABILITIES_CHECK (u8) 0x08 #define ACPI_NOTIFY_DEVICE_PLD_CHECK (u8) 0x09 #define ACPI_NOTIFY_RESERVED (u8) 0x0A #define ACPI_NOTIFY_LOCALITY_UPDATE (u8) 0x0B #define ACPI_NOTIFY_MAX 0x0B /* * Types associated with ACPI names and objects. The first group of * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition * of the ACPI object_type() operator (See the ACPI Spec). Therefore, * only add to the first group if the spec changes. * * NOTE: Types must be kept in sync with the global acpi_ns_properties * and acpi_ns_type_names arrays. */ typedef u32 acpi_object_type; #define ACPI_TYPE_ANY 0x00 #define ACPI_TYPE_INTEGER 0x01 /* Byte/Word/Dword/Zero/One/Ones */ #define ACPI_TYPE_STRING 0x02 #define ACPI_TYPE_BUFFER 0x03 #define ACPI_TYPE_PACKAGE 0x04 /* byte_const, multiple data_term/Constant/super_name */ #define ACPI_TYPE_FIELD_UNIT 0x05 #define ACPI_TYPE_DEVICE 0x06 /* Name, multiple Node */ #define ACPI_TYPE_EVENT 0x07 #define ACPI_TYPE_METHOD 0x08 /* Name, byte_const, multiple Code */ #define ACPI_TYPE_MUTEX 0x09 #define ACPI_TYPE_REGION 0x0A #define ACPI_TYPE_POWER 0x0B /* Name,byte_const,word_const,multi Node */ #define ACPI_TYPE_PROCESSOR 0x0C /* Name,byte_const,Dword_const,byte_const,multi nm_o */ #define ACPI_TYPE_THERMAL 0x0D /* Name, multiple Node */ #define ACPI_TYPE_BUFFER_FIELD 0x0E #define ACPI_TYPE_DDB_HANDLE 0x0F #define ACPI_TYPE_DEBUG_OBJECT 0x10 #define ACPI_TYPE_EXTERNAL_MAX 0x10 /* * These are object types that do not map directly to the ACPI * object_type() operator. They are used for various internal purposes only. * If new predefined ACPI_TYPEs are added (via the ACPI specification), these * internal types must move upwards. (There is code that depends on these * values being contiguous with the external types above.) */ #define ACPI_TYPE_LOCAL_REGION_FIELD 0x11 #define ACPI_TYPE_LOCAL_BANK_FIELD 0x12 #define ACPI_TYPE_LOCAL_INDEX_FIELD 0x13 #define ACPI_TYPE_LOCAL_REFERENCE 0x14 /* Arg#, Local#, Name, Debug, ref_of, Index */ #define ACPI_TYPE_LOCAL_ALIAS 0x15 #define ACPI_TYPE_LOCAL_METHOD_ALIAS 0x16 #define ACPI_TYPE_LOCAL_NOTIFY 0x17 #define ACPI_TYPE_LOCAL_ADDRESS_HANDLER 0x18 #define ACPI_TYPE_LOCAL_RESOURCE 0x19 #define ACPI_TYPE_LOCAL_RESOURCE_FIELD 0x1A #define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */ #define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */ /* * These are special object types that never appear in * a Namespace node, only in a union acpi_operand_object */ #define ACPI_TYPE_LOCAL_EXTRA 0x1C #define ACPI_TYPE_LOCAL_DATA 0x1D #define ACPI_TYPE_LOCAL_MAX 0x1D /* All types above here are invalid */ #define ACPI_TYPE_INVALID 0x1E #define ACPI_TYPE_NOT_FOUND 0xFF #define ACPI_NUM_NS_TYPES (ACPI_TYPE_INVALID + 1) /* * All I/O */ #define ACPI_READ 0 #define ACPI_WRITE 1 #define ACPI_IO_MASK 1 /* * Event Types: Fixed & General Purpose */ typedef u32 acpi_event_type; /* * Fixed events */ #define ACPI_EVENT_PMTIMER 0 #define ACPI_EVENT_GLOBAL 1 #define ACPI_EVENT_POWER_BUTTON 2 #define ACPI_EVENT_SLEEP_BUTTON 3 #define ACPI_EVENT_RTC 4 #define ACPI_EVENT_MAX 4 #define ACPI_NUM_FIXED_EVENTS ACPI_EVENT_MAX + 1 /* * Event Status - Per event * ------------- * The encoding of acpi_event_status is illustrated below. * Note that a set bit (1) indicates the property is TRUE * (e.g. if bit 0 is set then the event is enabled). * +-------------+-+-+-+ * | Bits 31:3 |2|1|0| * +-------------+-+-+-+ * | | | | * | | | +- Enabled? * | | +--- Enabled for wake? * | +----- Set? * +----------- */ typedef u32 acpi_event_status; #define ACPI_EVENT_FLAG_DISABLED (acpi_event_status) 0x00 #define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01 #define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02 #define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04 #define ACPI_EVENT_FLAG_HANDLE (acpi_event_status) 0x08 /* * General Purpose Events (GPE) */ #define ACPI_GPE_INVALID 0xFF #define ACPI_GPE_MAX 0xFF #define ACPI_NUM_GPE 256 #define ACPI_GPE_ENABLE 0 #define ACPI_GPE_DISABLE 1 /* * GPE info flags - Per GPE * +-+-+-+---+---+-+ * |7|6|5|4:3|2:1|0| * +-+-+-+---+---+-+ * | | | | | | * | | | | | +--- Interrupt type: Edge or Level Triggered * | | | | +--- Type: Wake-only, Runtime-only, or wake/runtime * | | | +--- Type of dispatch -- to method, handler, or none * | | +--- Enabled for runtime? * | +--- Enabled for wake? * +--- Unused */ #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01 #define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01 #define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 #define ACPI_GPE_TYPE_MASK (u8) 0x06 #define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x06 #define ACPI_GPE_TYPE_WAKE (u8) 0x02 #define ACPI_GPE_TYPE_RUNTIME (u8) 0x04 /* Default */ #define ACPI_GPE_DISPATCH_MASK (u8) 0x18 #define ACPI_GPE_DISPATCH_HANDLER (u8) 0x08 #define ACPI_GPE_DISPATCH_METHOD (u8) 0x10 #define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 /* Default */ #define ACPI_GPE_RUN_ENABLE_MASK (u8) 0x20 #define ACPI_GPE_RUN_ENABLED (u8) 0x20 #define ACPI_GPE_RUN_DISABLED (u8) 0x00 /* Default */ #define ACPI_GPE_WAKE_ENABLE_MASK (u8) 0x40 #define ACPI_GPE_WAKE_ENABLED (u8) 0x40 #define ACPI_GPE_WAKE_DISABLED (u8) 0x00 /* Default */ #define ACPI_GPE_ENABLE_MASK (u8) 0x60 /* Both run/wake */ /* * Flags for GPE and Lock interfaces */ #define ACPI_EVENT_WAKE_ENABLE 0x2 /* acpi_gpe_enable */ #define ACPI_EVENT_WAKE_DISABLE 0x2 /* acpi_gpe_disable */ #define ACPI_NOT_ISR 0x1 #define ACPI_ISR 0x0 /* Notify types */ #define ACPI_SYSTEM_NOTIFY 0x1 #define ACPI_DEVICE_NOTIFY 0x2 #define ACPI_ALL_NOTIFY (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY) #define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3 #define ACPI_MAX_SYS_NOTIFY 0x7f /* Address Space (Operation Region) Types */ typedef u8 acpi_adr_space_type; #define ACPI_ADR_SPACE_SYSTEM_MEMORY (acpi_adr_space_type) 0 #define ACPI_ADR_SPACE_SYSTEM_IO (acpi_adr_space_type) 1 #define ACPI_ADR_SPACE_PCI_CONFIG (acpi_adr_space_type) 2 #define ACPI_ADR_SPACE_EC (acpi_adr_space_type) 3 #define ACPI_ADR_SPACE_SMBUS (acpi_adr_space_type) 4 #define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5 #define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6 #define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7 #define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 8 #define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 127 /* * bit_register IDs * * These values are intended to be used by the hardware interfaces * and are mapped to individual bitfields defined within the ACPI * registers. See the acpi_gbl_bit_register_info global table in utglobal.c * for this mapping. */ /* PM1 Status register */ #define ACPI_BITREG_TIMER_STATUS 0x00 #define ACPI_BITREG_BUS_MASTER_STATUS 0x01 #define ACPI_BITREG_GLOBAL_LOCK_STATUS 0x02 #define ACPI_BITREG_POWER_BUTTON_STATUS 0x03 #define ACPI_BITREG_SLEEP_BUTTON_STATUS 0x04 #define ACPI_BITREG_RT_CLOCK_STATUS 0x05 #define ACPI_BITREG_WAKE_STATUS 0x06 #define ACPI_BITREG_PCIEXP_WAKE_STATUS 0x07 /* PM1 Enable register */ #define ACPI_BITREG_TIMER_ENABLE 0x08 #define ACPI_BITREG_GLOBAL_LOCK_ENABLE 0x09 #define ACPI_BITREG_POWER_BUTTON_ENABLE 0x0A #define ACPI_BITREG_SLEEP_BUTTON_ENABLE 0x0B #define ACPI_BITREG_RT_CLOCK_ENABLE 0x0C #define ACPI_BITREG_PCIEXP_WAKE_DISABLE 0x0D /* PM1 Control register */ #define ACPI_BITREG_SCI_ENABLE 0x0E #define ACPI_BITREG_BUS_MASTER_RLD 0x0F #define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10 #define ACPI_BITREG_SLEEP_TYPE 0x11 #define ACPI_BITREG_SLEEP_ENABLE 0x12 /* PM2 Control register */ #define ACPI_BITREG_ARB_DISABLE 0x13 #define ACPI_BITREG_MAX 0x13 #define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1 /* Status register values. A 1 clears a status bit. 0 = no effect */ #define ACPI_CLEAR_STATUS 1 /* Enable and Control register values */ #define ACPI_ENABLE_EVENT 1 #define ACPI_DISABLE_EVENT 0 /* * External ACPI object definition */ /* * Note: Type == ACPI_TYPE_ANY (0) is used to indicate a NULL package element * or an unresolved named reference. */ union acpi_object { acpi_object_type type; /* See definition of acpi_ns_type for values */ struct { acpi_object_type type; /* ACPI_TYPE_INTEGER */ acpi_integer value; /* The actual number */ } integer; struct { acpi_object_type type; /* ACPI_TYPE_STRING */ u32 length; /* # of bytes in string, excluding trailing null */ char *pointer; /* points to the string value */ } string; struct { acpi_object_type type; /* ACPI_TYPE_BUFFER */ u32 length; /* # of bytes in buffer */ u8 *pointer; /* points to the buffer */ } buffer; struct { acpi_object_type type; /* ACPI_TYPE_PACKAGE */ u32 count; /* # of elements in package */ union acpi_object *elements; /* Pointer to an array of ACPI_OBJECTs */ } package; struct { acpi_object_type type; /* ACPI_TYPE_LOCAL_REFERENCE */ acpi_object_type actual_type; /* Type associated with the Handle */ acpi_handle handle; /* object reference */ } reference; struct { acpi_object_type type; /* ACPI_TYPE_PROCESSOR */ u32 proc_id; acpi_io_address pblk_address; u32 pblk_length; } processor; struct { acpi_object_type type; /* ACPI_TYPE_POWER */ u32 system_level; u32 resource_order; } power_resource; }; /* * List of objects, used as a parameter list for control method evaluation */ struct acpi_object_list { u32 count; union acpi_object *pointer; }; /* * Miscellaneous common Data Structures used by the interfaces */ #define ACPI_NO_BUFFER 0 #define ACPI_ALLOCATE_BUFFER (acpi_size) (-1) #define ACPI_ALLOCATE_LOCAL_BUFFER (acpi_size) (-2) struct acpi_buffer { acpi_size length; /* Length in bytes of the buffer */ void *pointer; /* pointer to buffer */ }; /* * name_type for acpi_get_name */ #define ACPI_FULL_PATHNAME 0 #define ACPI_SINGLE_NAME 1 #define ACPI_NAME_TYPE_MAX 1 /* * Predefined Namespace items */ struct acpi_predefined_names { char *name; u8 type; char *val; }; /* * Structure and flags for acpi_get_system_info */ #define ACPI_SYS_MODE_UNKNOWN 0x0000 #define ACPI_SYS_MODE_ACPI 0x0001 #define ACPI_SYS_MODE_LEGACY 0x0002 #define ACPI_SYS_MODES_MASK 0x0003 /* * System info returned by acpi_get_system_info() */ struct acpi_system_info { u32 acpi_ca_version; u32 flags; u32 timer_resolution; u32 reserved1; u32 reserved2; u32 debug_level; u32 debug_layer; }; /* Table Event Types */ #define ACPI_TABLE_EVENT_LOAD 0x0 #define ACPI_TABLE_EVENT_UNLOAD 0x1 #define ACPI_NUM_TABLE_EVENTS 2 /* * Types specific to the OS service interfaces */ typedef u32(ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context); typedef void (ACPI_SYSTEM_XFACE * acpi_osd_exec_callback) (void *context); /* * Various handlers and callback procedures */ typedef u32(*acpi_event_handler) (void *context); typedef void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context); typedef void (*acpi_object_handler) (acpi_handle object, void *data); typedef acpi_status(*acpi_init_handler) (acpi_handle object, u32 function); #define ACPI_INIT_DEVICE_INI 1 typedef acpi_status(*acpi_exception_handler) (acpi_status aml_status, acpi_name name, u16 opcode, u32 aml_offset, void *context); /* Table Event handler (Load, load_table, etc.) and types */ typedef acpi_status(*acpi_tbl_handler) (u32 event, void *table, void *context); /* Address Spaces (For Operation Regions) */ typedef acpi_status(*acpi_adr_space_handler) (u32 function, acpi_physical_address address, u32 bit_width, acpi_integer * value, void *handler_context, void *region_context); #define ACPI_DEFAULT_HANDLER NULL typedef acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle, u32 function, void *handler_context, void **region_context); #define ACPI_REGION_ACTIVATE 0 #define ACPI_REGION_DEACTIVATE 1 typedef acpi_status(*acpi_walk_callback) (acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value); /* Interrupt handler return values */ #define ACPI_INTERRUPT_NOT_HANDLED 0x00 #define ACPI_INTERRUPT_HANDLED 0x01 /* Length of 32-bit EISAID values when converted back to a string */ #define ACPI_EISAID_STRING_SIZE 8 /* Includes null terminator */ /* Length of UUID (string) values */ #define ACPI_UUID_LENGTH 16 /* Structures used for device/processor HID, UID, CID */ struct acpica_device_id { u32 length; /* Length of string + null */ char *string; }; struct acpica_device_id_list { u32 count; /* Number of IDs in Ids array */ u32 list_size; /* Size of list, including ID strings */ struct acpica_device_id ids[1]; /* ID array */ }; /* * Structure returned from acpi_get_object_info. * Optimized for both 32- and 64-bit builds */ struct acpi_device_info { u32 info_size; /* Size of info, including ID strings */ u32 name; /* ACPI object Name */ acpi_object_type type; /* ACPI object Type */ u8 param_count; /* If a method, required parameter count */ u8 valid; /* Indicates which optional fields are valid */ u8 flags; /* Miscellaneous info */ u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ u32 current_status; /* _STA value */ acpi_integer address; /* _ADR value */ struct acpica_device_id hardware_id; /* _HID value */ struct acpica_device_id unique_id; /* _UID value */ struct acpica_device_id_list compatible_id_list; /* _CID list */ }; /* Values for Flags field above (acpi_get_object_info) */ #define ACPI_PCI_ROOT_BRIDGE 0x01 /* Flags for Valid field above (acpi_get_object_info) */ #define ACPI_VALID_STA 0x01 #define ACPI_VALID_ADR 0x02 #define ACPI_VALID_HID 0x04 #define ACPI_VALID_UID 0x08 #define ACPI_VALID_CID 0x10 #define ACPI_VALID_SXDS 0x20 #define ACPI_VALID_SXWS 0x40 /* Flags for _STA method */ #define ACPI_STA_DEVICE_PRESENT 0x01 #define ACPI_STA_DEVICE_ENABLED 0x02 #define ACPI_STA_DEVICE_UI 0x04 #define ACPI_STA_DEVICE_FUNCTIONING 0x08 #define ACPI_STA_DEVICE_OK 0x08 /* Synonym */ #define ACPI_STA_BATTERY_PRESENT 0x10 /* Context structs for address space handlers */ struct acpi_pci_id { u16 segment; u16 bus; u16 device; u16 function; }; struct acpi_mem_space_context { u32 length; acpi_physical_address address; acpi_physical_address mapped_physical_address; u8 *mapped_logical_address; acpi_size mapped_length; }; /* * struct acpi_memory_list is used only if the ACPICA local cache is enabled */ struct acpi_memory_list { char *list_name; void *list_head; u16 object_size; u16 max_depth; u16 current_depth; u16 link_offset; #ifdef ACPI_DBG_TRACK_ALLOCATIONS /* Statistics for debug memory tracking only */ u32 total_allocated; u32 total_freed; u32 max_occupied; u32 total_size; u32 current_total_size; u32 requests; u32 hits; #endif }; #endif /* __ACTYPES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vnif/netfront.c000066400000000000000000001644771314037446600237420ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "acpi_bus.h" #include "actypes.h" struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN_no static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif #define RX_COPY_THRESHOLD 256 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define HAVE_CSUM_OFFLOAD 1 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= ~NETIF_F_GSO_MASK; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define HAVE_CSUM_OFFLOAD 0 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define HAVE_CSUM_OFFLOAD 0 #define netif_needs_gso(dev, skb) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif #define GRANT_INVALID_REF 0 struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront: " fmt, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static void send_fake_arp(struct net_device *,int arpType); static irqreturn_t netif_int(int irq, void *dev_id); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of nic8139 */ static int rtl8139_acpi_bus_trim(acpi_handle handle) { struct acpi_device *device; int retval; retval = acpi_bus_get_device(handle, &device); if (retval) { printk(KERN_INFO "acpi_device not found\n"); return retval; } retval = acpi_bus_trim(device, 1); if (retval) printk(KERN_INFO "cannot remove from acpi list\n"); return retval; } static void remove_rtl8139(void) { struct pci_dev *pdev = NULL; acpi_handle phandle = NULL; pdev = pci_get_device(0x10ec, 0x8139, pdev); if (pdev) { phandle = DEVICE_ACPI_HANDLE(&pdev->dev); printk(KERN_INFO "remove_rtl8139 : rtl8139 of subsystem(0x%2x,0x%2x) removed.\n", pdev->subsystem_vendor, pdev->subsystem_device); pci_disable_device(pdev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /* check kernel version */ pci_remove_bus_device(pdev); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pdev); pci_remove_bus_device(pdev); #else pci_stop_and_remove_bus_device(pdev); #endif /* end check kernel version */ pci_dev_put(pdev); } if (phandle) { rtl8139_acpi_bus_trim(phandle); } return; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; /* remove rtl8139 when xen nic devices appear */ remove_rtl8139(); netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal_uvp(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { printk(KERN_WARNING "%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); printk(KERN_WARNING "%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } static int __devexit netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read_uvp(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(info->mac)) { err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal_uvp(dev, err, "parsing %s/mac", dev->nodename); goto out; } } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start_uvp(&xbt); if (err) { xenbus_dev_fatal_uvp(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf_uvp(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "feature-no-csum-offload", "%d", !HAVE_CSUM_OFFLOAD); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "feature-gso-tcpv4", "%d", HAVE_TSO); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end_uvp(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal_uvp(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end_uvp(xbt, 1); xenbus_dev_fatal_uvp(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal_uvp(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring_uvp(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal_uvp(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring_uvp(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, IRQF_SAMPLE_RANDOM, netdev->name, netdev); if (err < 0) goto fail; info->irq = err; return 0; fail: return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate_uvp(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state_uvp(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); break; case XenbusStateClosing: xenbus_frontend_closed_uvp(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @return 0 on success, error code otherwise */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; // creat GARP if ( ARPOP_REQUEST == arpType ) { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); } else { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); } if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); memset(&np->stats, 0, sizeof(np->stats)); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access_uvp( np->grant_tx_ref[id]) != 0)) { printk(KERN_ALERT "network_tx_buf_gc: warning " "-- grant still in use by backend " "domain.\n"); BUG(); } gnttab_end_foreign_access_uvp_ref_uvp(np->grant_tx_ref[id]); gnttab_release_grant_reference_uvp( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; struct xen_memory_reservation reservation; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference_uvp(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); if(!vaddr) { printk("vaddr is null\n"); } req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_uvp_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_uvp_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if ( nr_flips != 0 ) { /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance_uvp_uvp(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); reservation.nr_extents = nr_flips; reservation.extent_order = 0; reservation.address_bits = 0; reservation.domid = DOMID_SELF; if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ np->rx_mcl[i].op = __HYPERVISOR_memory_op; np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; np->rx_mcl[i].args[1] = (unsigned long)&reservation; /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (nr_flips--) BUG_ON(np->rx_mcl[nr_flips].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq_uvp(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference_uvp(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_uvp_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference_uvp(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_uvp_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return NETDEV_TX_OK; } frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irq(&np->tx_lock); if (unlikely(!netfront_carrier_ok(np) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb))) { spin_unlock_irq(&np->tx_lock); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference_uvp(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_uvp_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; #ifdef CONFIG_XEN_no if (skb->proto_data_valid) /* remote but checksummed? */ tx->flags |= NETTXF_data_validated; #endif #if HAVE_TSO if (skb_shinfo(skb)->gso_size) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq_uvp(np->irq); np->stats.tx_bytes += skb->len; np->stats.tx_packets++; dev->trans_start = jiffies; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irq(&np->tx_lock); return NETDEV_TX_OK; drop: np->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static irqreturn_t netif_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) WPRINTK("Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) WPRINTK("Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) WPRINTK("rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) WPRINTK("Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_uvp_ref_uvp(ref))) { if (net_ratelimit()) WPRINTK("Unfulfilled rx req " "(id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); if(!vaddr) { printk("vaddr is null\n"); } mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_uvp_ref_uvp(ref); BUG_ON(!ret); } gnttab_release_grant_reference_uvp(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) WPRINTK("Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) WPRINTK("GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) WPRINTK("Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) WPRINTK("GSO unsupported by this kernel.\n"); return -EINVAL; #endif } static int netif_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; struct multicall_entry *mcl; int work_done, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); np->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize must approximates the size of true data plus * any supervisor overheads. Adding hypervisor overheads * has been shown to significantly reduce achievable * bandwidth with the default receive buffer size. It is * therefore not wise to account for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to * RX_COPY_THRESHOLD + the supervisor overheads. Here, we * add the size of the data pulled in xennet_fill_frags(). * * We also adjust for any unused space in the main data * area by subtracting (RX_COPY_THRESHOLD - len). This is * especially important with drivers which split incoming * packets into header and data, using only 66 bytes of * the main data area (see the e1000 driver for example.) * On such systems, without this last adjustement, our * achievable receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; /* * Old backends do not assert data_validated but we * can infer it from csum_blank so test both flags. */ if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; #ifdef CONFIG_XEN_no skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE); skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank); #endif np->stats.rx_packets++; np->stats.rx_bytes += skb->len; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance_uvp_uvp(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { mcl = np->rx_mcl + pages_flipped; mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = pages_flipped; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } __skb_queue_purge(&errq); while ((skb = __skb_dequeue(&rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); /* Pass it up. */ netif_receive_skb(skb); } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_uvp_ref_uvp(np->grant_tx_ref[i]); gnttab_release_grant_reference_uvp( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_uvp_ref_uvp(ref); gnttab_release_grant_reference_uvp(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); if (0 == mfn) { struct page *page = skb_shinfo(skb)->frags[0].page; balloon_release_driver_page_uvp_uvp(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); if(!vaddr) { printk("vaddr is null\n"); } MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance_uvp_uvp(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = mmu - np->rx_mmu; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; mcl++; rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl - np->rx_mcl, NULL); BUG_ON(rc); } } __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_uvp_ref_uvp(ref)) { busy++; continue; } gnttab_release_grant_reference_uvp(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static struct net_device_stats *network_get_stats(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_get_stats(np, dev); return &np->stats; } static int xennet_set_mac_address(struct net_device *dev, void *p) { struct netfront_info *np = netdev_priv(dev); struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(np->mac, addr->sa_data, ETH_ALEN); return 0; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static int xennet_set_sg(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf_uvp(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } else if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; return ethtool_op_set_sg(dev, data); } static int xennet_set_tso(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf_uvp(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } return ethtool_op_set_tso(dev, data); } static void xennet_set_features(struct net_device *dev) { dev_disable_gso_features(dev); xennet_set_sg(dev, 0); /* We need checksum offload to enable scatter/gather and TSO. */ if (!(dev->features & NETIF_F_IP_CSUM)) return; if (xennet_set_sg(dev, 1)) return; /* Before 2.6.9 TSO seems to be unreliable so do not enable it * on older kernels. */ if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)) xennet_set_tso(dev, 1); } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf_uvp(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf_uvp(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; xennet_set_features(dev); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_uvp_ref( ref, np->xbdev->otherend_id, page_to_pfn(skb_shinfo(skb)->frags->page)); } else { gnttab_grant_foreign_access_uvp_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq_uvp(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_reference_uvps(np->gref_tx_head); gnttab_free_grant_reference_uvps(np->gref_rx_head); } static const struct ethtool_ops network_ethtool_ops = { .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = xennet_set_sg, #if HAVE_TSO .get_tso = ethtool_op_get_tso, .set_tso = xennet_set_tso, #endif .get_link = ethtool_op_get_link, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { error = device_create_file(&netdev->dev, &xennet_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } static const struct net_device_ops xennet_netdev_ops = { .ndo_uninit = netif_uninit, .ndo_open = network_open, .ndo_stop = network_close, .ndo_start_xmit = network_start_xmit, .ndo_set_multicast_list = network_set_multicast_list, .ndo_set_mac_address = xennet_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats = network_get_stats, }; static struct net_device * __devinit create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references_uvp(TX_MAX_TARGET, &np->gref_tx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references_uvp(RX_MAX_TARGET, &np->gref_rx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, netif_poll, 64); netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_reference_uvps(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } #ifdef CONFIG_INET /* * We use this notifier to send out a fake ARP reply to reset switches and * router ARP caches when an IP interface is brought up on a VIF. */ static int inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) { int count = 0; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ if (event == NETDEV_UP && dev->netdev_ops->ndo_open == network_open) { for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REQUEST); } for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REPLY); } } return NOTIFY_DONE; } static struct notifier_block notifier_inetdev = { .notifier_call = inetdev_notify, .next = NULL, .priority = 0 }; #endif static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler_uvp(info->irq, info->netdev); info->irq = 0; end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access_uvp(ref, (unsigned long)page); } /* ** Driver registration ** */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static struct xenbus_driver netfront_driver = { .name = "vif", .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(netfront_remove), .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, }; static int __init netif_init(void) { int err; if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN_no if (MODPARM_rx_flip && MODPARM_rx_copy) { WPRINTK("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_flip = 1; /* Default is to flip. */ #endif netif_init_accel(); IPRINTK("Initialising virtual ethernet driver.\n"); #ifdef CONFIG_INET (void)register_inetaddr_notifier(¬ifier_inetdev); #endif err = xenbus_register_frontend(&netfront_driver); if (err) { #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif } return err; } module_init(netif_init); static void __exit netif_exit(void) { #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif xenbus_unregister_driver_uvp(&netfront_driver); netif_exit_accel(); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA12/xen-vnif/netfront.h000066400000000000000000000210251314037446600237240ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include #include #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct net_device_stats stats; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; struct napi_struct napi; unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded_uvp(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded_uvp() */ extern void netfront_accelerator_stop_uvp(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/000077500000000000000000000000001314037446600201105ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/Makefile000066400000000000000000000017331314037446600215540ustar00rootroot00000000000000############################################################################### ###### File name : Makefile ###### ###### Author : ###### ###### Modify : songjiangtao 00202546 ###### ###### Description : To optimize the PV_ON_HVM drivers's Makefile ###### ###### History : ###### ###### 2012-02-24 : Create the file ###### ############################################################################### M=$(shell pwd) COMPILEFLAGS=$(CROSSCOMPILE) obj-m += xen-platform-pci/ obj-m += xen-balloon/ obj-m += xen-vbd/ obj-m += xen-vnif/ all: make -C $(KERNDIR) M=$(M) modules $(COMPILEFLAGS) modules_install: make -C $(KERNDIR) M=$(M) modules_install $(COMPILEFLAGS) clean: make -C $(KERNDIR) M=$(M) clean $(COMPILEFLAGS) rm -rf Modules.symvers UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/config.mk000066400000000000000000000011641314037446600217100ustar00rootroot00000000000000# Hack: we need to use the config which was used to build the kernel, # except that that won't have the right headers etc., so duplicate # some of the mach-xen infrastructure in here. # # (i.e. we need the native config for things like -mregparm, but # a Xen kernel to find the right headers) _XEN_CPPFLAGS += -D__XEN_INTERFACE_VERSION__=0x00030205 _XEN_CPPFLAGS += -DCONFIG_XEN_COMPAT=0xffffff _XEN_CPPFLAGS += -I$(M)/include -DHAVE_XEN_PLATFORM_COMPAT_H #_XEN_CPPFLAGS += -include $(objtree)/include/linux/autoconf.h EXTRA_CFLAGS += $(_XEN_CPPFLAGS) EXTRA_AFLAGS += $(_XEN_CPPFLAGS) CPPFLAGS := -I$(M)/include $(CPPFLAGS) UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/000077500000000000000000000000001314037446600215335ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/arch/000077500000000000000000000000001314037446600224505ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/arch/hypercall.h000066400000000000000000000001211314037446600245760ustar00rootroot00000000000000#ifdef __i386__ #include "hypercall_32.h" #else #include "hypercall_64.h" #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/arch/hypercall_32.h000066400000000000000000000245711314037446600251210ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #include /* memcpy() */ #include #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #ifdef CONFIG_XEN #define HYPERCALL_STR(name) \ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" #else #define HYPERCALL_STR(name) \ "mov hypercall_stubs,%%eax; " \ "add $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ "call *%%eax" #endif #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res) \ : \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, a1) \ ({ \ type __res; \ long __ign1; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1) \ : "1" ((long)(a1)) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ long __ign1, __ign2; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ : "1" ((long)(a1)), "2" ((long)(a2)) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ long __ign1, __ign2, __ign3, __ign4; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ long __ign1, __ign2, __ign3, __ign4, __ign5; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)), \ "5" ((long)(a5)) \ : "memory" ); \ __res; \ }) static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_selector, unsigned long event_address, unsigned long failsafe_selector, unsigned long failsafe_address) { return _hypercall4(int, set_callbacks, event_selector, event_address, failsafe_selector, failsafe_address); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { unsigned long timeout_hi = (unsigned long)(timeout>>32); unsigned long timeout_lo = (unsigned long)timeout; return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); } static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_update_descriptor( u64 ma, u64 desc) { return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; /* when hypercall memory_op failed, retry */ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall4(int, update_va_mapping, va, new_val.pte_low, pte_hi, flags); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { return _hypercall3(int, grant_table_op, cmd, uop, count); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall5(int, update_va_mapping_otherdomain, va, new_val.pte_low, pte_hi, flags, domid); } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } static inline int __must_check HYPERVISOR_tmem_op( struct tmem_op *op) { return _hypercall1(int, tmem_op, op); } #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/arch/hypercall_64.h000066400000000000000000000250321314037446600251170ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * 64-bit updates: * Benjamin Liu * Jun Nakajima * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #include /* memcpy() */ #include #include #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #ifdef CONFIG_XEN #define HYPERCALL_STR(name) \ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" #else #define HYPERCALL_STR(name) \ "mov $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ "add hypercall_stubs(%%rip),%%rax; " \ "call *%%rax" #endif #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res) \ : \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, a1) \ ({ \ type __res; \ long __ign1; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1) \ : "1" ((long)(a1)) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ long __ign1, __ign2; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \ : "1" ((long)(a1)), "2" ((long)(a2)) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ register long __arg4 asm("r10") = (long)(a4); \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3), "+r" (__arg4) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ register long __arg4 asm("r10") = (long)(a4); \ register long __arg5 asm("r8") = (long)(a5); \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3), "+r" (__arg4), "+r" (__arg5) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_address, unsigned long failsafe_address, unsigned long syscall_address) { return _hypercall3(int, set_callbacks, event_address, failsafe_address, syscall_address); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall1(long, set_timer_op, timeout); } static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_mca( struct xen_mc *mc_op) { mc_op->interface_version = XEN_MCA_INTERFACE_VERSION; return _hypercall1(int, mca, mc_op); } static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_update_descriptor( unsigned long ma, unsigned long word) { return _hypercall2(int, update_descriptor, ma, word); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; /* when hypercall memory_op failed, retry */ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { return _hypercall3(int, update_va_mapping, va, new_val.pte, flags); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { return _hypercall3(int, grant_table_op, cmd, uop, count); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { return _hypercall4(int, update_va_mapping_otherdomain, va, new_val.pte, flags, domid); } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_set_segment_base( int reg, unsigned long value) { return _hypercall2(int, set_segment_base, reg, value); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } static inline int __must_check HYPERVISOR_tmem_op( struct tmem_op *op) { return _hypercall1(int, tmem_op, op); } #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/arch/xen-mca.h000066400000000000000000000342111314037446600241520ustar00rootroot00000000000000/****************************************************************************** * arch-x86/mca.h * * Contributed by Advanced Micro Devices, Inc. * Author: Christoph Egger * * Guest OS machine check interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /* Full MCA functionality has the following Usecases from the guest side: * * Must have's: * 1. Dom0 and DomU register machine check trap callback handlers * (already done via "set_trap_table" hypercall) * 2. Dom0 registers machine check event callback handler * (doable via EVTCHNOP_bind_virq) * 3. Dom0 and DomU fetches machine check data * 4. Dom0 wants Xen to notify a DomU * 5. Dom0 gets DomU ID from physical address * 6. Dom0 wants Xen to kill DomU (already done for "xm destroy") * * Nice to have's: * 7. Dom0 wants Xen to deactivate a physical CPU * This is better done as separate task, physical CPU hotplugging, * and hypercall(s) should be sysctl's * 8. Page migration proposed from Xen NUMA work, where Dom0 can tell Xen to * move a DomU (or Dom0 itself) away from a malicious page * producing correctable errors. * 9. offlining physical page: * Xen free's and never re-uses a certain physical page. * 10. Testfacility: Allow Dom0 to write values into machine check MSR's * and tell Xen to trigger a machine check */ #ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__ #define __XEN_PUBLIC_ARCH_X86_MCA_H__ /* Hypercall */ #define __HYPERVISOR_mca __HYPERVISOR_arch_0 /* * The xen-unstable repo has interface version 0x03000001; out interface * is incompatible with that and any future minor revisions, so we * choose a different version number range that is numerically less * than that used in xen-unstable. */ #define XEN_MCA_INTERFACE_VERSION 0x01ecc003 /* IN: Dom0 calls hypercall to retrieve nonurgent telemetry */ #define XEN_MC_NONURGENT 0x0001 /* IN: Dom0/DomU calls hypercall to retrieve urgent telemetry */ #define XEN_MC_URGENT 0x0002 /* IN: Dom0 acknowledges previosly-fetched telemetry */ #define XEN_MC_ACK 0x0004 /* OUT: All is ok */ #define XEN_MC_OK 0x0 /* OUT: Domain could not fetch data. */ #define XEN_MC_FETCHFAILED 0x1 /* OUT: There was no machine check data to fetch. */ #define XEN_MC_NODATA 0x2 /* OUT: Between notification time and this hypercall an other * (most likely) correctable error happened. The fetched data, * does not match the original machine check data. */ #define XEN_MC_NOMATCH 0x4 /* OUT: DomU did not register MC NMI handler. Try something else. */ #define XEN_MC_CANNOTHANDLE 0x8 /* OUT: Notifying DomU failed. Retry later or try something else. */ #define XEN_MC_NOTDELIVERED 0x10 /* Note, XEN_MC_CANNOTHANDLE and XEN_MC_NOTDELIVERED are mutually exclusive. */ #ifndef __ASSEMBLY__ #define VIRQ_MCA VIRQ_ARCH_0 /* G. (DOM0) Machine Check Architecture */ /* * Machine Check Architecure: * structs are read-only and used to report all kinds of * correctable and uncorrectable errors detected by the HW. * Dom0 and DomU: register a handler to get notified. * Dom0 only: Correctable errors are reported via VIRQ_MCA * Dom0 and DomU: Uncorrectable errors are reported via nmi handlers */ #define MC_TYPE_GLOBAL 0 #define MC_TYPE_BANK 1 #define MC_TYPE_EXTENDED 2 #define MC_TYPE_RECOVERY 3 struct mcinfo_common { uint16_t type; /* structure type */ uint16_t size; /* size of this struct in bytes */ }; #define MC_FLAG_CORRECTABLE (1 << 0) #define MC_FLAG_UNCORRECTABLE (1 << 1) #define MC_FLAG_RECOVERABLE (1 << 2) #define MC_FLAG_POLLED (1 << 3) #define MC_FLAG_RESET (1 << 4) #define MC_FLAG_CMCI (1 << 5) #define MC_FLAG_MCE (1 << 6) /* contains global x86 mc information */ struct mcinfo_global { struct mcinfo_common common; /* running domain at the time in error (most likely the impacted one) */ uint16_t mc_domid; uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */ uint32_t mc_socketid; /* physical socket of the physical core */ uint16_t mc_coreid; /* physical impacted core */ uint16_t mc_core_threadid; /* core thread of physical core */ uint32_t mc_apicid; uint32_t mc_flags; uint64_t mc_gstatus; /* global status */ }; /* contains bank local x86 mc information */ struct mcinfo_bank { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint16_t mc_domid; /* Usecase 5: domain referenced by mc_addr on dom0 * and if mc_addr is valid. Never valid on DomU. */ uint64_t mc_status; /* bank status */ uint64_t mc_addr; /* bank address, only valid * if addr bit is set in mc_status */ uint64_t mc_misc; uint64_t mc_ctrl2; uint64_t mc_tsc; }; struct mcinfo_msr { uint64_t reg; /* MSR */ uint64_t value; /* MSR value */ }; /* contains mc information from other * or additional mc MSRs */ struct mcinfo_extended { struct mcinfo_common common; /* You can fill up to five registers. * If you need more, then use this structure * multiple times. */ uint32_t mc_msrs; /* Number of msr with valid values. */ /* * Currently Intel extended MSR (32/64) include all gp registers * and E(R)FLAGS, E(R)IP, E(R)MISC, up to 11/19 of them might be * useful at present. So expand this array to 16/32 to leave room. */ struct mcinfo_msr mc_msr[sizeof(void *) * 4]; }; /* Recovery Action flags. Giving recovery result information to DOM0 */ /* Xen takes successful recovery action, the error is recovered */ #define REC_ACTION_RECOVERED (0x1 << 0) /* No action is performed by XEN */ #define REC_ACTION_NONE (0x1 << 1) /* It's possible DOM0 might take action ownership in some case */ #define REC_ACTION_NEED_RESET (0x1 << 2) /* Different Recovery Action types, if the action is performed successfully, * REC_ACTION_RECOVERED flag will be returned. */ /* Page Offline Action */ #define MC_ACTION_PAGE_OFFLINE (0x1 << 0) /* CPU offline Action */ #define MC_ACTION_CPU_OFFLINE (0x1 << 1) /* L3 cache disable Action */ #define MC_ACTION_CACHE_SHRINK (0x1 << 2) /* Below interface used between XEN/DOM0 for passing XEN's recovery action * information to DOM0. * usage Senario: After offlining broken page, XEN might pass its page offline * recovery action result to DOM0. DOM0 will save the information in * non-volatile memory for further proactive actions, such as offlining the * easy broken page earlier when doing next reboot. */ struct page_offline_action { /* Params for passing the offlined page number to DOM0 */ uint64_t mfn; uint64_t status; }; struct cpu_offline_action { /* Params for passing the identity of the offlined CPU to DOM0 */ uint32_t mc_socketid; uint16_t mc_coreid; uint16_t mc_core_threadid; }; #define MAX_UNION_SIZE 16 struct mcinfo_recovery { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint8_t action_flags; uint8_t action_types; union { struct page_offline_action page_retire; struct cpu_offline_action cpu_offline; uint8_t pad[MAX_UNION_SIZE]; } action_info; }; #define MCINFO_HYPERCALLSIZE 1024 #define MCINFO_MAXSIZE 768 struct mc_info { /* Number of mcinfo_* entries in mi_data */ uint32_t mi_nentries; uint32_t _pad0; uint64_t mi_data[(MCINFO_MAXSIZE - 1) / 8]; }; typedef struct mc_info mc_info_t; DEFINE_XEN_GUEST_HANDLE(mc_info_t); #define __MC_MSR_ARRAYSIZE 8 #define __MC_NMSRS 1 #define MC_NCAPS 7 /* 7 CPU feature flag words */ #define MC_CAPS_STD_EDX 0 /* cpuid level 0x00000001 (%edx) */ #define MC_CAPS_AMD_EDX 1 /* cpuid level 0x80000001 (%edx) */ #define MC_CAPS_TM 2 /* cpuid level 0x80860001 (TransMeta) */ #define MC_CAPS_LINUX 3 /* Linux-defined */ #define MC_CAPS_STD_ECX 4 /* cpuid level 0x00000001 (%ecx) */ #define MC_CAPS_VIA 5 /* cpuid level 0xc0000001 */ #define MC_CAPS_AMD_ECX 6 /* cpuid level 0x80000001 (%ecx) */ struct mcinfo_logical_cpu { uint32_t mc_cpunr; uint32_t mc_chipid; uint16_t mc_coreid; uint16_t mc_threadid; uint32_t mc_apicid; uint32_t mc_clusterid; uint32_t mc_ncores; uint32_t mc_ncores_active; uint32_t mc_nthreads; int32_t mc_cpuid_level; uint32_t mc_family; uint32_t mc_vendor; uint32_t mc_model; uint32_t mc_step; char mc_vendorid[16]; char mc_brandid[64]; uint32_t mc_cpu_caps[MC_NCAPS]; uint32_t mc_cache_size; uint32_t mc_cache_alignment; int32_t mc_nmsrvals; struct mcinfo_msr mc_msrvalues[__MC_MSR_ARRAYSIZE]; }; typedef struct mcinfo_logical_cpu xen_mc_logical_cpu_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_logical_cpu_t); /* * OS's should use these instead of writing their own lookup function * each with its own bugs and drawbacks. * We use macros instead of static inline functions to allow guests * to include this header in assembly files (*.S). */ /* Prototype: * uint32_t x86_mcinfo_nentries(struct mc_info *mi); */ #define x86_mcinfo_nentries(_mi) \ (_mi)->mi_nentries /* Prototype: * struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi); */ #define x86_mcinfo_first(_mi) \ ((struct mcinfo_common *)(_mi)->mi_data) /* Prototype: * struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic); */ #define x86_mcinfo_next(_mic) \ ((struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size)) /* Prototype: * void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type); */ #define x86_mcinfo_lookup(_ret, _mi, _type) \ do { \ uint32_t found, i; \ struct mcinfo_common *_mic; \ \ found = 0; \ (_ret) = NULL; \ if (_mi == NULL) break; \ _mic = x86_mcinfo_first(_mi); \ for (i = 0; i < x86_mcinfo_nentries(_mi); i++) { \ if (_mic->type == (_type)) { \ found = 1; \ break; \ } \ _mic = x86_mcinfo_next(_mic); \ } \ (_ret) = found ? _mic : NULL; \ } while (0) /* Usecase 1 * Register machine check trap callback handler * (already done via "set_trap_table" hypercall) */ /* Usecase 2 * Dom0 registers machine check event callback handler * done by EVTCHNOP_bind_virq */ /* Usecase 3 * Fetch machine check data from hypervisor. * Note, this hypercall is special, because both Dom0 and DomU must use this. */ #define XEN_MC_fetch 1 struct xen_mc_fetch { /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_NONURGENT, XEN_MC_URGENT, XEN_MC_ACK if ack'ing an earlier fetch */ /* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED, XEN_MC_NODATA, XEN_MC_NOMATCH */ uint32_t _pad0; uint64_t fetch_id; /* OUT: id for ack, IN: id we are ack'ing */ /* OUT variables. */ XEN_GUEST_HANDLE(mc_info_t) data; }; typedef struct xen_mc_fetch xen_mc_fetch_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t); /* Usecase 4 * This tells the hypervisor to notify a DomU about the machine check error */ #define XEN_MC_notifydomain 2 struct xen_mc_notifydomain { /* IN variables. */ uint16_t mc_domid; /* The unprivileged domain to notify. */ uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify. * Usually echo'd value from the fetch hypercall. */ /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */ /* OUT: XEN_MC_OK, XEN_MC_CANNOTHANDLE, XEN_MC_NOTDELIVERED, XEN_MC_NOMATCH */ }; typedef struct xen_mc_notifydomain xen_mc_notifydomain_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t); #define XEN_MC_physcpuinfo 3 struct xen_mc_physcpuinfo { /* IN/OUT */ uint32_t ncpus; uint32_t _pad0; /* OUT */ XEN_GUEST_HANDLE(xen_mc_logical_cpu_t) info; }; #define XEN_MC_msrinject 4 #define MC_MSRINJ_MAXMSRS 8 struct xen_mc_msrinject { /* IN */ uint32_t mcinj_cpunr; /* target processor id */ uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */ uint32_t mcinj_count; /* 0 .. count-1 in array are valid */ uint32_t _pad0; struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS]; }; /* Flags for mcinj_flags above; bits 16-31 are reserved */ #define MC_MSRINJ_F_INTERPOSE 0x1 #define XEN_MC_mceinject 5 struct xen_mc_mceinject { unsigned int mceinj_cpunr; /* target processor id */ }; struct xen_mc { uint32_t cmd; uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */ union { struct xen_mc_fetch mc_fetch; struct xen_mc_notifydomain mc_notifydomain; struct xen_mc_physcpuinfo mc_physcpuinfo; struct xen_mc_msrinject mc_msrinject; struct xen_mc_mceinject mc_mceinject; } u; }; typedef struct xen_mc xen_mc_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_t); #endif /* __ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/arch/xen-x86_32.h000066400000000000000000000145401314037446600243460ustar00rootroot00000000000000/****************************************************************************** * xen-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2007, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ /* * Hypercall interface: * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5) * Output: %eax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; int $0x82 */ #define TRAP_INSTR "int $0x82" #endif /* * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ #define FLAT_KERNEL_CS FLAT_RING1_CS #define FLAT_KERNEL_DS FLAT_RING1_DS #define FLAT_KERNEL_SS FLAT_RING1_SS #define FLAT_USER_CS FLAT_RING3_CS #define FLAT_USER_DS FLAT_RING3_DS #define FLAT_USER_SS FLAT_RING3_SS #define __HYPERVISOR_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_END_PAE 0xF6800000 #define HYPERVISOR_VIRT_START_PAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE) #define MACH2PHYS_VIRT_START_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE) #define MACH2PHYS_VIRT_END_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE) /* Non-PAE bounds are obsolete. */ #define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000 #define HYPERVISOR_VIRT_START_NONPAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_START_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_END_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE) #define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE #define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE #define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) #endif /* 32-/64-bit invariability for control interfaces (domctl/sysctl). */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #undef ___DEFINE_XEN_GUEST_HANDLE #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } \ __guest_handle_ ## name; \ typedef struct { union { type *p; uint64_aligned_t q; }; } \ __guest_handle_64_ ## name #undef set_xen_guest_handle #define set_xen_guest_handle(hnd, val) \ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \ (hnd).p = val; \ } while ( 0 ) #define uint64_aligned_t uint64_t __attribute__((aligned(8))) #define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name #define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name) #endif #ifndef __ASSEMBLY__ struct cpu_user_regs { uint32_t ebx; uint32_t ecx; uint32_t edx; uint32_t esi; uint32_t edi; uint32_t ebp; uint32_t eax; uint16_t error_code; /* private */ uint16_t entry_vector; /* private */ uint32_t eip; uint16_t cs; uint8_t saved_upcall_mask; uint8_t _pad0; uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ uint32_t esp; uint16_t ss, _pad1; uint16_t es, _pad2; uint16_t ds, _pad3; uint16_t fs, _pad4; uint16_t gs, _pad5; }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); /* * Page-directory addresses above 4GB do not fit into architectural %cr3. * When accessing %cr3, or equivalent field in vcpu_guest_context, guests * must use the following accessor macros to pack/unpack valid MFNs. */ #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) struct arch_vcpu_info { unsigned long cr2; unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; struct xen_callback { unsigned long cs; unsigned long eip; }; typedef struct xen_callback xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/arch/xen-x86_64.h000066400000000000000000000157351314037446600243620ustar00rootroot00000000000000/****************************************************************************** * xen-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ /* * Hypercall interface: * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5) * Output: %rax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; syscall * Clobbered: %rcx, %r11, argument registers (as above) */ #define TRAP_INSTR "syscall" #endif /* * 64-bit segment selectors * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING3_CS32 0xe023 /* GDT index 260 */ #define FLAT_RING3_CS64 0xe033 /* GDT index 261 */ #define FLAT_RING3_DS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_DS64 0x0000 /* NULL selector */ #define FLAT_RING3_SS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_SS64 0xe02b /* GDT index 262 */ #define FLAT_KERNEL_DS64 FLAT_RING3_DS64 #define FLAT_KERNEL_DS32 FLAT_RING3_DS32 #define FLAT_KERNEL_DS FLAT_KERNEL_DS64 #define FLAT_KERNEL_CS64 FLAT_RING3_CS64 #define FLAT_KERNEL_CS32 FLAT_RING3_CS32 #define FLAT_KERNEL_CS FLAT_KERNEL_CS64 #define FLAT_KERNEL_SS64 FLAT_RING3_SS64 #define FLAT_KERNEL_SS32 FLAT_RING3_SS32 #define FLAT_KERNEL_SS FLAT_KERNEL_SS64 #define FLAT_USER_DS64 FLAT_RING3_DS64 #define FLAT_USER_DS32 FLAT_RING3_DS32 #define FLAT_USER_DS FLAT_USER_DS64 #define FLAT_USER_CS64 FLAT_RING3_CS64 #define FLAT_USER_CS32 FLAT_RING3_CS32 #define FLAT_USER_CS FLAT_USER_CS64 #define FLAT_USER_SS64 FLAT_RING3_SS64 #define FLAT_USER_SS32 FLAT_RING3_SS32 #define FLAT_USER_SS FLAT_USER_SS64 #define __HYPERVISOR_VIRT_START 0xFFFF800000000000 #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) #endif /* * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) * @which == SEGBASE_* ; @base == 64-bit base address * Returns 0 on success. */ #define SEGBASE_FS 0 #define SEGBASE_GS_USER 1 #define SEGBASE_GS_KERNEL 2 #define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */ /* * int HYPERVISOR_iret(void) * All arguments are on the kernel stack, in the following format. * Never returns if successful. Current kernel context is lost. * The saved CS is mapped as follows: * RING0 -> RING3 kernel mode. * RING1 -> RING3 kernel mode. * RING2 -> RING3 kernel mode. * RING3 -> RING3 user mode. * However RING0 indicates that the guest kernel should return to iteself * directly with * orb $3,1*8(%rsp) * iretq * If flags contains VGCF_in_syscall: * Restore RAX, RIP, RFLAGS, RSP. * Discard R11, RCX, CS, SS. * Otherwise: * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP. * All other registers are saved on hypercall entry and restored to user. */ /* Guest exited in SYSCALL context? Return to guest with SYSRET? */ #define _VGCF_in_syscall 8 #define VGCF_in_syscall (1<<_VGCF_in_syscall) #define VGCF_IN_SYSCALL VGCF_in_syscall #ifndef __ASSEMBLY__ struct iret_context { /* Top of stack (%rsp at point of hypercall). */ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; /* Bottom of iret stack frame. */ }; #if defined(__GNUC__) && !defined(__STRICT_ANSI__) /* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ #define __DECL_REG(name) union { \ uint64_t r ## name, e ## name; \ uint32_t _e ## name; \ } #else /* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ #define __DECL_REG(name) uint64_t r ## name #endif struct cpu_user_regs { uint64_t r15; uint64_t r14; uint64_t r13; uint64_t r12; __DECL_REG(bp); __DECL_REG(bx); uint64_t r11; uint64_t r10; uint64_t r9; uint64_t r8; __DECL_REG(ax); __DECL_REG(cx); __DECL_REG(dx); __DECL_REG(si); __DECL_REG(di); uint32_t error_code; /* private */ uint32_t entry_vector; /* private */ __DECL_REG(ip); uint16_t cs, _pad0[1]; uint8_t saved_upcall_mask; uint8_t _pad1[3]; __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */ __DECL_REG(sp); uint16_t ss, _pad2[3]; uint16_t es, _pad3[3]; uint16_t ds, _pad4[3]; uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */ }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); #undef __DECL_REG #define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) #define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) struct arch_vcpu_info { unsigned long cr2; unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; typedef unsigned long xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/arch/xen.h000066400000000000000000000170051314037446600234160ustar00rootroot00000000000000/****************************************************************************** * arch-x86/xen.h * * Guest OS interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "../public/xen.h" #ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_H__ /* Structural guest handles introduced in 0x00030201. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030201 #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name #else #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef type * __guest_handle_ ## name #endif #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ ___DEFINE_XEN_GUEST_HANDLE(name, type); \ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif #if defined(__i386__) #include #elif defined(__x86_64__) #include #endif #ifndef __ASSEMBLY__ typedef unsigned long xen_pfn_t; #define PRI_xen_pfn "lx" #endif /* * SEGMENT DESCRIPTOR TABLES */ /* * A number of GDT entries are reserved by Xen. These are not situated at the * start of the GDT because some stupid OSes export hard-coded selector values * in their ABI. These hard-coded values are always near the start of the GDT, * so Xen places itself out of the way, at the far end of the GDT. */ #define FIRST_RESERVED_GDT_PAGE 14 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) /* Maximum number of virtual CPUs in multi-processor guests. */ #define MAX_VIRT_CPUS 32 #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; /* * Send an array of these to HYPERVISOR_set_trap_table(). * The privilege level specifies which modes may enter a trap via a software * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate * privilege levels as follows: * Level == 0: Noone may enter * Level == 1: Kernel may enter * Level == 2: Kernel may enter * Level == 3: Everyone may enter */ #define TI_GET_DPL(_ti) ((_ti)->flags & 3) #define TI_GET_IF(_ti) ((_ti)->flags & 4) #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) struct trap_info { uint8_t vector; /* exception vector */ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ uint16_t cs; /* code selector */ unsigned long address; /* code offset */ }; typedef struct trap_info trap_info_t; DEFINE_XEN_GUEST_HANDLE(trap_info_t); typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* * The following is all CPU context. Note that the fpu_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ struct vcpu_guest_context { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ #define VGCF_I387_VALID (1<<0) #define VGCF_IN_KERNEL (1<<2) #define _VGCF_i387_valid 0 #define VGCF_i387_valid (1<<_VGCF_i387_valid) #define _VGCF_in_kernel 2 #define VGCF_in_kernel (1<<_VGCF_in_kernel) #define _VGCF_failsafe_disables_events 3 #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) #define _VGCF_syscall_disables_events 4 #define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) #define _VGCF_online 5 #define VGCF_online (1<<_VGCF_online) unsigned long flags; /* VGCF_* flags */ struct cpu_user_regs user_regs; /* User-level CPU registers */ struct trap_info trap_ctxt[256]; /* Virtual IDT */ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ #ifdef __i386__ unsigned long event_callback_cs; /* CS:EIP of event callback */ unsigned long event_callback_eip; unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ unsigned long failsafe_callback_eip; #else unsigned long event_callback_eip; unsigned long failsafe_callback_eip; #ifdef __XEN__ union { unsigned long syscall_callback_eip; struct { unsigned int event_callback_cs; /* compat CS of event cb */ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */ }; }; #else unsigned long syscall_callback_eip; #endif #endif unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ #ifdef __x86_64__ /* Segment base addresses. */ uint64_t fs_base; uint64_t gs_base_kernel; uint64_t gs_base_user; #endif }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); struct arch_shared_info { unsigned long max_pfn; /* max pfn that appears in table */ /* Frame containing list of mfns containing list of mfns containing p2m. */ xen_pfn_t pfn_to_mfn_frame_list_list; unsigned long nmi_reason; uint64_t pad[32]; }; typedef struct arch_shared_info arch_shared_info_t; #endif /* !__ASSEMBLY__ */ /* * Prefix forces emulation of some non-trapping instructions. * Currently only CPUID. */ #ifdef __ASSEMBLY__ #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; #define XEN_CPUID XEN_EMULATE_PREFIX cpuid #else #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" #endif #endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/balloon/000077500000000000000000000000001314037446600231615ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/balloon/balloon.h000066400000000000000000000046071314037446600247670ustar00rootroot00000000000000/****************************************************************************** * balloon.h * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_BALLOON_H__ #define __ASM_BALLOON_H__ /* * Inform the balloon driver that it should allow some slop for device-driver * memory activities. */ void balloon_update_driver_allowance(long delta); /* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */ struct page **alloc_empty_pages_and_pagevec(int nr_pages); void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages); void balloon_release_driver_page(struct page *page); /* * Prevent the balloon driver from changing the memory reservation during * a driver critical region. */ extern spinlock_t balloon_lock; #define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags) #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags) #endif /* __ASM_BALLOON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/interface/000077500000000000000000000000001314037446600234735ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/interface/features.h000066400000000000000000000055731314037446600254740ustar00rootroot00000000000000/****************************************************************************** * features.h * * Feature flags, reported by XENVER_get_features. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Keir Fraser */ #ifndef __INTERFACE_FEATURES_H__ #define __INTERFACE_FEATURES_H__ /* * If set, the guest does not need to write-protect its pagetables, and can * update them via direct writes. */ #define XENFEAT_writable_page_tables 0 /* * If set, the guest does not need to write-protect its segment descriptor * tables, and can update them via direct writes. */ #define XENFEAT_writable_descriptor_tables 1 /* * If set, translation between the guest's 'pseudo-physical' address space * and the host's machine address space are handled by the hypervisor. In this * mode the guest does not need to perform phys-to/from-machine translations * when performing page table operations. */ #define XENFEAT_auto_translated_physmap 2 /* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ #define XENFEAT_supervisor_mode_kernel 3 /* * If set, the guest does not need to allocate x86 PAE page directories * below 4GB. This flag is usually implied by auto_translated_physmap. */ #define XENFEAT_pae_pgdir_above_4gb 4 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ #define XENFEAT_mmu_pt_update_preserve_ad 5 /* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */ #define XENFEAT_highmem_assist 6 /* * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel * available pte bits. */ #define XENFEAT_gnttab_map_avail_bits 7 #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/interface/grant_table.h000066400000000000000000000407171314037446600261370ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ #define __XEN_PUBLIC_GRANT_TABLE_H__ #include /*********************************** * GRANT TABLE REPRESENTATION */ /* Some rough guidelines on accessing and updating grant-table entries * in a concurrency-safe manner. For more information, Linux contains a * reference implementation for guest OSes (arch/xen/kernel/grant_table.c). * * NB. WMB is a no-op on current-generation x86 processors. However, a * compiler barrier will still be required. * * Introducing a valid entry into the grant table: * 1. Write ent->domid. * 2. Write ent->frame: * GTF_permit_access: Frame to which access is permitted. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new * frame, or zero if none. * 3. Write memory barrier (WMB). * 4. Write ent->flags, inc. valid type. * * Invalidating an unused GTF_permit_access entry: * 1. flags = ent->flags. * 2. Observe that !(flags & (GTF_reading|GTF_writing)). * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * * Invalidating an in-use GTF_permit_access entry: * This cannot be done directly. Request assistance from the domain controller * which can set a timeout on the use of a grant entry and take necessary * action. (NB. This is not yet implemented!). * * Invalidating an unused GTF_accept_transfer entry: * 1. flags = ent->flags. * 2. Observe that !(flags & GTF_transfer_committed). [*] * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. * The guest must /not/ modify the grant entry until the address of the * transferred frame is written. It is safe for the guest to spin waiting * for this to occur (detect by observing GTF_transfer_completed in * ent->flags). * * Invalidating a committed GTF_accept_transfer entry: * 1. Wait for (ent->flags & GTF_transfer_completed). * * Changing a GTF_permit_access from writable to read-only: * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. * * Changing a GTF_permit_access from read-only to writable: * Use SMP-safe bit-setting instruction. */ /* * A grant table comprises a packed array of grant entries in one or more * page frames shared between Xen and a guest. * [XEN]: This field is written by Xen and read by the sharing guest. * [GST]: This field is written by the guest and read by Xen. */ struct grant_entry { /* GTF_xxx: various type and flag information. [XEN,GST] */ uint16_t flags; /* The domain being granted foreign privileges. [GST] */ domid_t domid; /* * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] */ uint32_t frame; }; typedef struct grant_entry grant_entry_t; /* * Type of grant entry. * GTF_invalid: This grant entry grants no privileges. * GTF_permit_access: Allow @domid to map/access @frame. * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame * to this guest. Xen writes the page number to @frame. */ #define GTF_invalid (0U<<0) #define GTF_permit_access (1U<<0) #define GTF_accept_transfer (2U<<0) #define GTF_type_mask (3U<<0) /* * Subflags for GTF_permit_access. * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST] */ #define _GTF_readonly (2) #define GTF_readonly (1U<<_GTF_readonly) #define _GTF_reading (3) #define GTF_reading (1U<<_GTF_reading) #define _GTF_writing (4) #define GTF_writing (1U<<_GTF_writing) #define _GTF_PWT (5) #define GTF_PWT (1U<<_GTF_PWT) #define _GTF_PCD (6) #define GTF_PCD (1U<<_GTF_PCD) #define _GTF_PAT (7) #define GTF_PAT (1U<<_GTF_PAT) /* * Subflags for GTF_accept_transfer: * GTF_transfer_committed: Xen sets this flag to indicate that it is committed * to transferring ownership of a page frame. When a guest sees this flag * it must /not/ modify the grant entry until GTF_transfer_completed is * set by Xen. * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag * after reading GTF_transfer_committed. Xen will always write the frame * address, followed by ORing this flag, in a timely manner. */ #define _GTF_transfer_committed (2) #define GTF_transfer_committed (1U<<_GTF_transfer_committed) #define _GTF_transfer_completed (3) #define GTF_transfer_completed (1U<<_GTF_transfer_completed) /*********************************** * GRANT TABLE QUERIES AND USES */ /* * Reference to a grant entry in a specified domain's grant table. */ typedef uint32_t grant_ref_t; /* * Handle to track a mapping created via a grant reference. */ typedef uint32_t grant_handle_t; /* * GNTTABOP_map_grant_ref: Map the grant entry (,) for access * by devices and/or host CPUs. If successful, is a tracking number * that must be presented later to destroy the mapping(s). On error, * is a negative status code. * NOTES: * 1. If GNTMAP_device_map is specified then is the address * via which I/O devices may access the granted frame. * 2. If GNTMAP_host_map is specified then a mapping will be added at * either a host virtual address in the current address space, or at * a PTE at the specified machine address. The type of mapping to * perform is selected through the GNTMAP_contains_pte flag, and the * address is specified in . * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a * host mapping is destroyed by other means then it is *NOT* guaranteed * to be accounted to the correct grant reference! */ #define GNTTABOP_map_grant_ref 0 struct gnttab_map_grant_ref { /* IN parameters. */ uint64_t host_addr; uint32_t flags; /* GNTMAP_* */ grant_ref_t ref; domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ grant_handle_t handle; uint64_t dev_bus_addr; }; typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t); /* * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings * tracked by . If or is zero, that * field is ignored. If non-zero, they must refer to a device/host mapping * that is tracked by * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 3. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_grant_ref 1 struct gnttab_unmap_grant_ref { /* IN parameters. */ uint64_t host_addr; uint64_t dev_bus_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t); /* * GNTTABOP_setup_table: Set up a grant table for comprising at least * pages. The frame addresses are written to the . * Only addresses are written, even if the table is larger. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. * 3. Xen may not support more than a single grant-table page per domain. */ #define GNTTABOP_setup_table 2 struct gnttab_setup_table { /* IN parameters. */ domid_t dom; uint32_t nr_frames; /* OUT parameters. */ int16_t status; /* GNTST_* */ XEN_GUEST_HANDLE(ulong) frame_list; }; typedef struct gnttab_setup_table gnttab_setup_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t); /* * GNTTABOP_dump_table: Dump the contents of the grant table to the * xen console. Debugging use only. */ #define GNTTABOP_dump_table 3 struct gnttab_dump_table { /* IN parameters. */ domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_dump_table gnttab_dump_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t); /* * GNTTABOP_transfer_grant_ref: Transfer to a foreign domain. The * foreign domain has previously registered its interest in the transfer via * . * * Note that, even if the transfer fails, the specified page no longer belongs * to the calling domain *unless* the error is GNTST_bad_page. */ #define GNTTABOP_transfer 4 struct gnttab_transfer { /* IN parameters. */ xen_pfn_t mfn; domid_t domid; grant_ref_t ref; /* OUT parameters. */ int16_t status; }; typedef struct gnttab_transfer gnttab_transfer_t; DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t); /* * GNTTABOP_copy: Hypervisor based copy * source and destinations can be eithers MFNs or, for foreign domains, * grant references. the foreign domain has to grant read/write access * in its grant table. * * The flags specify what type source and destinations are (either MFN * or grant reference). * * Note that this can also be used to copy data between two domains * via a third party if the source and destination domains had previously * grant appropriate access to their pages to the third party. * * source_offset specifies an offset in the source frame, dest_offset * the offset in the target frame and len specifies the number of * bytes to be copied. */ #define _GNTCOPY_source_gref (0) #define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) #define _GNTCOPY_dest_gref (1) #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) #define GNTTABOP_copy 5 typedef struct gnttab_copy { /* IN parameters. */ struct { union { grant_ref_t ref; xen_pfn_t gmfn; } u; domid_t domid; uint16_t offset; } source, dest; uint16_t len; uint16_t flags; /* GNTCOPY_* */ /* OUT parameters. */ int16_t status; } gnttab_copy_t; DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t); /* * GNTTABOP_query_size: Query the current and maximum sizes of the shared * grant table. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. */ #define GNTTABOP_query_size 6 struct gnttab_query_size { /* IN parameters. */ domid_t dom; /* OUT parameters. */ uint32_t nr_frames; uint32_t max_nr_frames; int16_t status; /* GNTST_* */ }; typedef struct gnttab_query_size gnttab_query_size_t; DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t); /* * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings * tracked by but atomically replace the page table entry with one * pointing to the machine address under . will be * redirected to the null entry. * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 2. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_and_replace 7 struct gnttab_unmap_and_replace { /* IN parameters. */ uint64_t host_addr; uint64_t new_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t); /* * Bitfield values for gnttab_map_grant_ref.flags. */ /* Map the grant entry for access by I/O devices. */ #define _GNTMAP_device_map (0) #define GNTMAP_device_map (1<<_GNTMAP_device_map) /* Map the grant entry for access by host CPUs. */ #define _GNTMAP_host_map (1) #define GNTMAP_host_map (1<<_GNTMAP_host_map) /* Accesses to the granted frame will be restricted to read-only access. */ #define _GNTMAP_readonly (2) #define GNTMAP_readonly (1<<_GNTMAP_readonly) /* * GNTMAP_host_map subflag: * 0 => The host mapping is usable only by the guest OS. * 1 => The host mapping is usable by guest OS + current application. */ #define _GNTMAP_application_map (3) #define GNTMAP_application_map (1<<_GNTMAP_application_map) /* * GNTMAP_contains_pte subflag: * 0 => This map request contains a host virtual address. * 1 => This map request contains the machine addess of the PTE to update. */ #define _GNTMAP_contains_pte (4) #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) /* * Bits to be placed in guest kernel available PTE bits (architecture * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set). */ #define _GNTMAP_guest_avail0 (16) #define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0) /* * Values for error status returns. All errors are -ve. */ #define GNTST_okay (0) /* Normal return. */ #define GNTST_general_error (-1) /* General undefined error. */ #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ #define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ #define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ #define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ #define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ #define GNTST_address_too_big (-11) /* transfer page address too large. */ #define GNTTABOP_error_msgs { \ "okay", \ "undefined error", \ "unrecognised domain id", \ "invalid grant reference", \ "invalid mapping handle", \ "invalid virtual address", \ "invalid device address", \ "no spare translation slot in the I/O MMU", \ "permission denied", \ "bad page", \ "copy arguments cross page boundary", \ "page address size too large" \ } #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/interface/xencomm.h000066400000000000000000000032131314037446600253110ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) IBM Corp. 2006 */ #ifndef _XEN_XENCOMM_H_ #define _XEN_XENCOMM_H_ /* A xencomm descriptor is a scatter/gather list containing physical * addresses corresponding to a virtually contiguous memory area. The * hypervisor translates these physical addresses to machine addresses to copy * to and from the virtually contiguous area. */ #define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */ #define XENCOMM_INVALID (~0UL) struct xencomm_desc { uint32_t magic; uint32_t nr_addrs; /* the number of entries in address[] */ uint64_t address[0]; }; #endif /* _XEN_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/000077500000000000000000000000001314037446600230115ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/common/000077500000000000000000000000001314037446600243015ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/common/xenbus.h000066400000000000000000000037411314037446600257630ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Interface to /proc/xen/xenbus. * * Copyright (c) 2008, Diego Ongaro * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_XENBUS_H__ #define __LINUX_PUBLIC_XENBUS_H__ #include #ifndef __user #define __user #endif typedef struct xenbus_alloc { domid_t dom; __u32 port; __u32 grant_ref; } xenbus_alloc_t; /* * @cmd: IOCTL_XENBUS_ALLOC * @arg: &xenbus_alloc_t * Return: 0, or -1 for error */ #define IOCTL_XENBUS_ALLOC \ _IOC(_IOC_NONE, 'X', 0, sizeof(xenbus_alloc_t)) #endif /* __LINUX_PUBLIC_XENBUS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/event_channel.h000066400000000000000000000213471314037446600260020ustar00rootroot00000000000000/****************************************************************************** * event_channel.h * * Event channels between domains. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, K A Fraser. */ #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ #define __XEN_PUBLIC_EVENT_CHANNEL_H__ #include "xen.h" /* * Prototype for this hypercall is: * int event_channel_op(int cmd, void *args) * @cmd == EVTCHNOP_??? (event-channel operation). * @args == Operation-specific extra arguments (NULL if none). */ typedef uint32_t evtchn_port_t; DEFINE_XEN_GUEST_HANDLE(evtchn_port_t); /* * EVTCHNOP_alloc_unbound: Allocate a port in domain and mark as * accepting interdomain bindings from domain . A fresh port * is allocated in and returned as . * NOTES: * 1. If the caller is unprivileged then must be DOMID_SELF. * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_alloc_unbound 6 struct evtchn_alloc_unbound { /* IN parameters */ domid_t dom, remote_dom; /* OUT parameters */ evtchn_port_t port; }; typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; /* * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between * the calling domain and . must identify * a port that is unbound and marked as accepting bindings from the calling * domain. A fresh port is allocated in the calling domain and returned as * . * NOTES: * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_bind_interdomain 0 struct evtchn_bind_interdomain { /* IN parameters. */ domid_t remote_dom; evtchn_port_t remote_port; /* OUT parameters. */ evtchn_port_t local_port; }; typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t; /* * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ on specified * vcpu. * NOTES: * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list * in xen.h for the classification of each VIRQ. * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be * re-bound via EVTCHNOP_bind_vcpu. * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. * The allocated event channel is bound to the specified vcpu and the * binding cannot be changed. */ #define EVTCHNOP_bind_virq 1 struct evtchn_bind_virq { /* IN parameters. */ uint32_t virq; uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_virq evtchn_bind_virq_t; /* * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ . * NOTES: * 1. A physical IRQ may be bound to at most one event channel per domain. * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. */ #define EVTCHNOP_bind_pirq 2 struct evtchn_bind_pirq { /* IN parameters. */ uint32_t pirq; #define BIND_PIRQ__WILL_SHARE 1 uint32_t flags; /* BIND_PIRQ__* */ /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_pirq evtchn_bind_pirq_t; /* * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. * NOTES: * 1. The allocated event channel is bound to the specified vcpu. The binding * may not be changed. */ #define EVTCHNOP_bind_ipi 7 struct evtchn_bind_ipi { uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_ipi evtchn_bind_ipi_t; /* * EVTCHNOP_close: Close a local event channel . If the channel is * interdomain then the remote end is placed in the unbound state * (EVTCHNSTAT_unbound), awaiting a new connection. */ #define EVTCHNOP_close 3 struct evtchn_close { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_close evtchn_close_t; /* * EVTCHNOP_send: Send an event to the remote end of the channel whose local * endpoint is . */ #define EVTCHNOP_send 4 struct evtchn_send { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_send evtchn_send_t; /* * EVTCHNOP_status: Get the current status of the communication channel which * has an endpoint at . * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may obtain the status of an event * channel for which is not DOMID_SELF. */ #define EVTCHNOP_status 5 struct evtchn_status { /* IN parameters */ domid_t dom; evtchn_port_t port; /* OUT parameters */ #define EVTCHNSTAT_closed 0 /* Channel is not in use. */ #define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ #define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ uint32_t status; uint32_t vcpu; /* VCPU to which this channel is bound. */ union { struct { domid_t dom; } unbound; /* EVTCHNSTAT_unbound */ struct { domid_t dom; evtchn_port_t port; } interdomain; /* EVTCHNSTAT_interdomain */ uint32_t pirq; /* EVTCHNSTAT_pirq */ uint32_t virq; /* EVTCHNSTAT_virq */ } u; }; typedef struct evtchn_status evtchn_status_t; /* * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an * event is pending. * NOTES: * 1. IPI-bound channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 3. All other channels notify vcpu0 by default. This default is set when * the channel is allocated (a port that is freed and subsequently reused * has its binding reset to vcpu0). */ #define EVTCHNOP_bind_vcpu 8 struct evtchn_bind_vcpu { /* IN parameters. */ evtchn_port_t port; uint32_t vcpu; }; typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t; /* * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver * a notification to the appropriate VCPU if an event is pending. */ #define EVTCHNOP_unmask 9 struct evtchn_unmask { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_unmask evtchn_unmask_t; /* * EVTCHNOP_reset: Close all event channels associated with specified domain. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. */ #define EVTCHNOP_reset 10 struct evtchn_reset { /* IN parameters. */ domid_t dom; }; typedef struct evtchn_reset evtchn_reset_t; /* * Argument to event_channel_op_compat() hypercall. Superceded by new * event_channel_op() hypercall since 0x00030202. */ struct evtchn_op { uint32_t cmd; /* EVTCHNOP_* */ union { struct evtchn_alloc_unbound alloc_unbound; struct evtchn_bind_interdomain bind_interdomain; struct evtchn_bind_virq bind_virq; struct evtchn_bind_pirq bind_pirq; struct evtchn_bind_ipi bind_ipi; struct evtchn_close close; struct evtchn_send send; struct evtchn_status status; struct evtchn_bind_vcpu bind_vcpu; struct evtchn_unmask unmask; } u; }; typedef struct evtchn_op evtchn_op_t; DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/evtchn.h000066400000000000000000000112251314037446600244520ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Communication via Xen event channels. * Also definitions for the device that demuxes notifications to userspace. * * Copyright (c) 2004-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_EVTCHN_H__ #define __ASM_EVTCHN_H__ #include #include #include /* * pv drivers header files */ #include "hypervisor.h" #include "synch_bitops.h" #include "event_channel.h" /* * LOW-LEVEL DEFINITIONS */ /* * Dynamically bind an event source to an IRQ-like callback handler. * On some platforms this may not be implemented via the Linux IRQ subsystem. * The IRQ argument passed to the callback handler is the same as returned * from the bind call. It may not correspond to a Linux IRQ number. * Returns IRQ or negative errno. */ int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_interdomain_evtchn_to_irqhandler( unsigned int remote_domain, unsigned int remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irqhandler( unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (except for bindings * made with bind_caller_port_to_irqhandler()). */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); void irq_resume(void); /* Entry point for notifications into Linux subsystems. */ asmlinkage void evtchn_do_upcall(struct pt_regs *regs); /* Entry point for notifications into the userland character device. */ void evtchn_device_upcall(int port); /* Mark a PIRQ as unavailable for dynamic allocation. */ void evtchn_register_pirq(int irq); /* Map a Xen-supplied PIRQ to a dynamically allocated one. */ int evtchn_map_pirq(int irq, int xen_pirq); /* Look up a Xen-supplied PIRQ for a dynamically allocated one. */ int evtchn_get_xen_pirq(int irq); void mask_evtchn(int port); void disable_all_local_evtchn(void); void unmask_evtchn(int port); #ifdef CONFIG_SMP void rebind_evtchn_to_cpu(int port, unsigned int cpu); #else #define rebind_evtchn_to_cpu(port, cpu) ((void)0) #endif static inline int test_and_set_evtchn_mask(int port) { shared_info_t *s = HYPERVISOR_shared_info; return synch_test_and_set_bit(port, s->evtchn_mask); } static inline void clear_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; synch_clear_bit(port, s->evtchn_pending); } static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send)); } /* * Use these to access the event channel underlying the IRQ handle returned * by bind_*_to_irqhandler(). */ void notify_remote_via_irq(int irq); int irq_to_evtchn_port(int irq); #endif /* __ASM_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/features.h000066400000000000000000000007561314037446600250100ustar00rootroot00000000000000/****************************************************************************** * features.h * * Query the features reported by Xen. * * Copyright (c) 2006, Ian Campbell */ #ifndef __FEATURES_H__ #define __FEATURES_H__ #include #include extern void setup_xen_features(void); //void setup_xen_features(void); extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32]; #define xen_feature(flag) (xen_features[flag]) #endif /* __ASM_XEN_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/gnttab.h000066400000000000000000000125041314037446600244430ustar00rootroot00000000000000/****************************************************************************** * gnttab.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include /* * pv driver header files */ #include #include #include #include /* maddr_t */ struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; u8 queued; }; int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_ref(grant_ref_t ref); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); int gnttab_query_foreign_access(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags); void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep); void __gnttab_dma_map_page(struct page *page); static inline void __gnttab_dma_unmap_page(struct page *page) { } void gnttab_reset_grant_page(struct page *page); int gnttab_suspend(void); int gnttab_resume(void); void *arch_gnttab_alloc_shared(unsigned long *frames); static inline void gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr, uint32_t flags, grant_ref_t ref, domid_t domid) { if (flags & GNTMAP_contains_pte) map->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) map->host_addr = __pa(addr); else map->host_addr = addr; map->flags = flags; map->ref = ref; map->dom = domid; } static inline void gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr, uint32_t flags, grant_handle_t handle) { if (flags & GNTMAP_contains_pte) unmap->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) unmap->host_addr = __pa(addr); else unmap->host_addr = addr; unmap->handle = handle; unmap->dev_bus_addr = 0; } static inline void gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr, maddr_t new_addr, grant_handle_t handle) { if (xen_feature(XENFEAT_auto_translated_physmap)) { unmap->host_addr = __pa(addr); unmap->new_addr = __pa(new_addr); } else { unmap->host_addr = addr; unmap->new_addr = new_addr; } unmap->handle = handle; } #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/grant_table.h000066400000000000000000000101301314037446600254370ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ #define NR_GRANT_FRAMES 4 struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; }; int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ //int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly); int gnttab_end_foreign_access_ref(grant_ref_t ref); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ //void gnttab_end_foreign_access(grant_ref_t ref, int readonly, // unsigned long page); void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); int gnttab_query_foreign_access(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly); void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/hvm.h000066400000000000000000000006751314037446600237640ustar00rootroot00000000000000/* Simple wrappers around HVM functions */ #ifndef XEN_HVM_H__ #define XEN_HVM_H__ #include static inline unsigned long hvm_get_parameter(int idx) { struct xen_hvm_param xhv; int r; xhv.domid = DOMID_SELF; xhv.index = idx; r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); if (r < 0) { printk(KERN_ERR "cannot get hvm parameter %d: %d.\n", idx, r); return 0; } return xhv.value; } #endif /* XEN_HVM_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/hvm_op.h000066400000000000000000000112741314037446600244570ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ #define __XEN_PUBLIC_HVM_HVM_OP_H__ #include "xen.h" /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */ #define HVMOP_set_param 0 #define HVMOP_get_param 1 struct xen_hvm_param { domid_t domid; /* IN */ uint32_t index; /* IN */ uint64_t value; /* IN/OUT */ }; typedef struct xen_hvm_param xen_hvm_param_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); /* Set the logical level of one of a domain's PCI INTx wires. */ #define HVMOP_set_pci_intx_level 2 struct xen_hvm_set_pci_intx_level { /* Domain to be updated. */ domid_t domid; /* PCI INTx identification in PCI topology (domain:bus:device:intx). */ uint8_t domain, bus, device, intx; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t); /* Set the logical level of one of a domain's ISA IRQ wires. */ #define HVMOP_set_isa_irq_level 3 struct xen_hvm_set_isa_irq_level { /* Domain to be updated. */ domid_t domid; /* ISA device identification, by ISA IRQ (0-15). */ uint8_t isa_irq; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t); #define HVMOP_set_pci_link_route 4 struct xen_hvm_set_pci_link_route { /* Domain to be updated. */ domid_t domid; /* PCI link identifier (0-3). */ uint8_t link; /* ISA IRQ (1-15), or 0 (disable link). */ uint8_t isa_irq; }; typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); /* Flushes all VCPU TLBs: @arg must be NULL. */ #define HVMOP_flush_tlbs 5 /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Track dirty VRAM. */ #define HVMOP_track_dirty_vram 6 struct xen_hvm_track_dirty_vram { /* Domain to be tracked. */ domid_t domid; /* First pfn to track. */ uint64_aligned_t first_pfn; /* Number of pages to track. */ uint64_aligned_t nr; /* OUT variable. */ /* Dirty bitmap buffer. */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; }; typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t); /* Notify that some pages got modified by the Device Model. */ #define HVMOP_modified_memory 7 struct xen_hvm_modified_memory { /* Domain to be updated. */ domid_t domid; /* First pfn. */ uint64_aligned_t first_pfn; /* Number of pages. */ uint64_aligned_t nr; }; typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); #define HVMOP_set_mem_type 8 typedef enum { HVMMEM_ram_rw, /* Normal read/write guest RAM */ HVMMEM_ram_ro, /* Read-only; writes are discarded */ HVMMEM_mmio_dm, /* Reads and write go to the device model */ } hvmmem_type_t; /* Notify that a region of memory is to be treated in a specific way. */ struct xen_hvm_set_mem_type { /* Domain to be updated. */ domid_t domid; /* Memory type */ hvmmem_type_t hvmmem_type; /* First pfn. */ uint64_aligned_t first_pfn; /* Number of pages. */ uint64_aligned_t nr; }; typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t); #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/hypercall.h000066400000000000000000000013761314037446600251540ustar00rootroot00000000000000#ifndef __XEN_HYPERCALL_H__ #define __XEN_HYPERCALL_H__ #include static inline int __must_check HYPERVISOR_multicall_check( multicall_entry_t *call_list, unsigned int nr_calls, const unsigned long *rc_list) { int rc = HYPERVISOR_multicall(call_list, nr_calls); if (unlikely(rc < 0)) return rc; BUG_ON(rc); BUG_ON((int)nr_calls < 0); for ( ; nr_calls > 0; --nr_calls, ++call_list) if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0))) return nr_calls; return 0; } /* A construct to ignore the return value of hypercall wrappers in a few * exceptional cases (simply casting the function result to void doesn't * avoid the compiler warning): */ #define VOID(expr) ((void)((expr)?:0)) #endif /* __XEN_HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/hypervisor.h000066400000000000000000000161051314037446600253770ustar00rootroot00000000000000/****************************************************************************** * hypervisor.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERVISOR_H__ #define __HYPERVISOR_H__ #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include #include #include #include #if defined(__i386__) #ifdef CONFIG_X86_PAE #include "pgtable-nopud.h" #else #include "pgtable-nopmd.h" #endif #elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) #include "pgtable-nopud.h" #endif extern shared_info_t *HYPERVISOR_shared_info; #define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu)) #ifdef CONFIG_SMP #define current_vcpu_info() vcpu_info(smp_processor_id()) #else #define current_vcpu_info() vcpu_info(0) #endif #ifdef CONFIG_X86_32 extern unsigned long hypervisor_virt_start; #endif /* arch/xen/i386/kernel/setup.c */ extern start_info_t *xen_start_info; #ifdef CONFIG_XEN_PRIVILEGED_GUEST #define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN) #else #define is_initial_xendomain() 0 #endif /* arch/xen/kernel/evtchn.c */ /* Force a proper event-channel callback from Xen. */ void force_evtchn_callback(void); /* arch/xen/kernel/process.c */ void xen_cpu_idle (void); /* arch/xen/i386/kernel/hypervisor.c */ void do_hypervisor_callback(struct pt_regs *regs); /* arch/xen/i386/mm/hypervisor.c */ /* * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already * be MACHINE addresses. */ void xen_pt_switch(unsigned long ptr); void xen_new_user_pt(unsigned long ptr); /* x86_64 only */ void xen_load_gs(unsigned int selector); /* x86_64 only */ void xen_tlb_flush(void); void xen_invlpg(unsigned long ptr); void xen_l1_entry_update(pte_t *ptr, pte_t val); void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */ void xen_pgd_pin(unsigned long ptr); void xen_pgd_unpin(unsigned long ptr); void xen_set_ldt(const void *ptr, unsigned int ents); #ifdef CONFIG_SMP #include void xen_tlb_flush_all(void); void xen_invlpg_all(unsigned long ptr); void xen_tlb_flush_mask(cpumask_t *mask); void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr); #else #define xen_tlb_flush_all xen_tlb_flush #define xen_invlpg_all xen_invlpg #endif /* Returns zero on success else negative errno. */ int xen_create_contiguous_region( unsigned long vstart, unsigned int order, unsigned int address_bits); void xen_destroy_contiguous_region( unsigned long vstart, unsigned int order); struct page; int xen_limit_pages_to_max_mfn( struct page *pages, unsigned int order, unsigned int address_bits); /* Turn jiffies into Xen system time. */ u64 jiffies_to_st(unsigned long jiffies); #ifdef CONFIG_XEN_SCRUB_PAGES void scrub_pages(void *, unsigned int); #else #define scrub_pages(_p,_n) ((void)0) #endif #include #if defined(CONFIG_X86_64) #define MULTI_UVMFLAGS_INDEX 2 #define MULTI_UVMDOMID_INDEX 3 #else #define MULTI_UVMFLAGS_INDEX 3 #define MULTI_UVMDOMID_INDEX 4 #endif #ifdef CONFIG_XEN #define is_running_on_xen() 1 #else extern char *hypercall_stubs; #define is_running_on_xen() (!!hypercall_stubs) #endif static inline int HYPERVISOR_yield( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int HYPERVISOR_block( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0); #endif return rc; } static inline void /*__noreturn*/ HYPERVISOR_shutdown( unsigned int reason) { struct sched_shutdown sched_shutdown = { .reason = reason }; VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown)); #if CONFIG_XEN_COMPAT <= 0x030002 VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason)); #endif /* Don't recurse needlessly. */ BUG_ON(reason != SHUTDOWN_crash); for(;;); } static inline int __must_check HYPERVISOR_poll( evtchn_port_t *ports, unsigned int nr_ports, u64 timeout) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports, .timeout = jiffies_to_st(timeout) }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } #ifdef CONFIG_XEN static inline void MULTI_update_va_mapping( multicall_entry_t *mcl, unsigned long va, pte_t new_val, unsigned long flags) { mcl->op = __HYPERVISOR_update_va_mapping; mcl->args[0] = va; #if defined(CONFIG_X86_64) mcl->args[1] = new_val.pte; #elif defined(CONFIG_X86_PAE) mcl->args[1] = new_val.pte_low; mcl->args[2] = new_val.pte_high; #else mcl->args[1] = new_val.pte_low; mcl->args[2] = 0; #endif mcl->args[MULTI_UVMFLAGS_INDEX] = flags; } static inline void MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd, void *uop, unsigned int count) { mcl->op = __HYPERVISOR_grant_table_op; mcl->args[0] = cmd; mcl->args[1] = (unsigned long)uop; mcl->args[2] = count; } #else /* !defined(CONFIG_XEN) */ /* Multicalls not supported for HVM guests. */ #define MULTI_update_va_mapping(a,b,c,d) ((void)0) #define MULTI_grant_table_op(a,b,c,d) ((void)0) #endif #endif /* __HYPERVISOR_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/io/000077500000000000000000000000001314037446600234205ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/io/ring.h000066400000000000000000000351261314037446600245370ustar00rootroot00000000000000/****************************************************************************** * ring.h * * Shared producer-consumer ring macros. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Tim Deegan and Andrew Warfield November 2004. */ #ifndef __XEN_PUBLIC_IO_RING_H__ #define __XEN_PUBLIC_IO_RING_H__ #include #if __XEN_INTERFACE_VERSION__ < 0x00030208 #define xen_mb() mb() #define xen_rmb() rmb() #define xen_wmb() wmb() #endif typedef unsigned int RING_IDX; /* Round a 32-bit unsigned constant down to the nearest power of two. */ #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) /* * Calculate size of a shared ring, given the total available space for the * ring and indexes (_sz), and the name tag of the request/response structure. * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ #define __RING_SIZE(_s, _sz) \ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* * Macros to make the correct C datatypes for a new kind of ring. * * To make a new ring datatype, you need to have two message structures, * let's say request_t, and response_t already defined. * * In a header where you want the ring datatype declared, you then do: * * DEFINE_RING_TYPES(mytag, request_t, response_t); * * These expand out to give you a set of types, as you can see below. * The most important of these are: * * mytag_sring_t - The shared ring. * mytag_front_ring_t - The 'front' half of the ring. * mytag_back_ring_t - The 'back' half of the ring. * * To initialize a ring in your code you need to know the location and size * of the shared memory area (PAGE_SIZE, for instance). To initialise * the front half: * * mytag_front_ring_t front_ring; * SHARED_RING_INIT((mytag_sring_t *)shared_page); * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); * * Initializing the back follows similarly (note that only the front * initializes the shared ring): * * mytag_back_ring_t back_ring; * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); */ #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ \ /* Shared ring entry */ \ union __name##_sring_entry { \ __req_t req; \ __rsp_t rsp; \ }; \ \ /* Shared ring page */ \ struct __name##_sring { \ RING_IDX req_prod, req_event; \ RING_IDX rsp_prod, rsp_event; \ uint8_t netfront_smartpoll_active; \ uint8_t pad[47]; \ union __name##_sring_entry ring[1]; /* variable-length */ \ }; \ \ /* "Front" end's private variables */ \ struct __name##_front_ring { \ RING_IDX req_prod_pvt; \ RING_IDX rsp_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* "Back" end's private variables */ \ struct __name##_back_ring { \ RING_IDX rsp_prod_pvt; \ RING_IDX req_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* Syntactic sugar */ \ typedef struct __name##_sring __name##_sring_t; \ typedef struct __name##_front_ring __name##_front_ring_t; \ typedef struct __name##_back_ring __name##_back_ring_t /* * Macros for manipulating rings. * * FRONT_RING_whatever works on the "front end" of a ring: here * requests are pushed on to the ring and responses taken off it. * * BACK_RING_whatever works on the "back end" of a ring: here * requests are taken off the ring and responses put on. * * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. * This is OK in 1-for-1 request-response situations where the * requestor (front end) never has more than RING_SIZE()-1 * outstanding requests. */ /* Initialising empty rings */ #define SHARED_RING_INIT(_s) do { \ (_s)->req_prod = (_s)->rsp_prod = 0; \ (_s)->req_event = (_s)->rsp_event = 1; \ (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \ } while(0) #define FRONT_RING_INIT(_r, _s, __size) do { \ (_r)->req_prod_pvt = 0; \ (_r)->rsp_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) #define BACK_RING_INIT(_r, _s, __size) do { \ (_r)->rsp_prod_pvt = 0; \ (_r)->req_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) /* Initialize to existing shared indexes -- for recovery */ #define FRONT_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->req_prod_pvt = (_s)->req_prod; \ (_r)->rsp_cons = (_s)->rsp_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) #define BACK_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ (_r)->req_cons = (_s)->req_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) /* How big is this ring? */ #define RING_SIZE(_r) \ ((_r)->nr_ents) /* Number of free requests (for use on front side only). */ #define RING_FREE_REQUESTS(_r) \ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) /* Test if there is an empty slot available on the front ring. * (This is only meaningful from the front. ) */ #define RING_FULL(_r) \ (RING_FREE_REQUESTS(_r) == 0) /* Test if there are outstanding messages to be processed on a ring. */ #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) #ifdef __GNUC__ #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ unsigned int rsp = RING_SIZE(_r) - \ ((_r)->req_cons - (_r)->rsp_prod_pvt); \ req < rsp ? req : rsp; \ }) #else /* Same as above, but without the nice GCC ({ ... }) syntax. */ #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ ((((_r)->sring->req_prod - (_r)->req_cons) < \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ ((_r)->sring->req_prod - (_r)->req_cons) : \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) #endif /* Direct access to individual ring elements, by index. */ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) #define RING_GET_RESPONSE(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) /* Loop termination condition: Would the specified index overflow the ring? */ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) #define RING_PUSH_REQUESTS(_r) do { \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) #define RING_PUSH_RESPONSES(_r) do { \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ } while (0) /* * Notification hold-off (req_event and rsp_event): * * When queueing requests or responses on a shared ring, it may not always be * necessary to notify the remote end. For example, if requests are in flight * in a backend, the front may be able to queue further requests without * notifying the back (if the back checks for new requests when it queues * responses). * * When enqueuing requests or responses: * * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument * is a boolean return value. True indicates that the receiver requires an * asynchronous notification. * * After dequeuing requests or responses (before sleeping the connection): * * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). * The second argument is a boolean return value. True indicates that there * are pending messages on the ring (i.e., the connection should not be put * to sleep). * * These macros will set the req_event/rsp_event field to trigger a * notification on the very next message that is enqueued. If you want to * create batches of work (i.e., only receive a notification after several * messages have been enqueued) then you will need to create a customised * version of the FINAL_CHECK macro in your own code, which sets the event * field appropriately. */ #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->req_prod; \ RING_IDX __new = (_r)->req_prod_pvt; \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = __new; \ xen_mb(); /* back sees new requests /before/ we check req_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->rsp_prod; \ RING_IDX __new = (_r)->rsp_prod_pvt; \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = __new; \ xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ if (_work_to_do) break; \ (_r)->sring->req_event = (_r)->req_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ } while (0) #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ if (_work_to_do) break; \ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ } while (0) #endif /* __XEN_PUBLIC_IO_RING_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/io/xenbus.h000066400000000000000000000046401314037446600251010ustar00rootroot00000000000000/***************************************************************************** * xenbus.h * * Xenbus protocol details. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 XenSource Ltd. */ #ifndef _XEN_PUBLIC_IO_XENBUS_H #define _XEN_PUBLIC_IO_XENBUS_H /* * The state of either end of the Xenbus, i.e. the current communication * status of initialisation across the bus. States here imply nothing about * the state of the connection between the driver and the kernel's device * layers. */ enum xenbus_state { XenbusStateUnknown = 0, XenbusStateInitialising = 1, /* * InitWait: Finished early initialisation but waiting for information * from the peer or hotplug scripts. */ XenbusStateInitWait = 2, /* * Initialised: Waiting for a connection from the peer. */ XenbusStateInitialised = 3, XenbusStateConnected = 4, /* * Closing: The device is being closed due to an error or an unplug event. */ XenbusStateClosing = 5, XenbusStateClosed = 6, /* * Reconfiguring: The device is being reconfigured. */ XenbusStateReconfiguring = 7, XenbusStateReconfigured = 8 }; typedef enum xenbus_state XenbusState; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/ioreq.h000066400000000000000000000105071314037446600243040ustar00rootroot00000000000000/* * ioreq.h: I/O request definitions for device models * Copyright (c) 2004, Intel Corporation. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef _IOREQ_H_ #define _IOREQ_H_ #define IOREQ_READ 1 #define IOREQ_WRITE 0 #define STATE_IOREQ_NONE 0 #define STATE_IOREQ_READY 1 #define STATE_IOREQ_INPROCESS 2 #define STATE_IORESP_READY 3 #define IOREQ_TYPE_PIO 0 /* pio */ #define IOREQ_TYPE_COPY 1 /* mmio ops */ #define IOREQ_TYPE_TIMEOFFSET 7 #define IOREQ_TYPE_INVALIDATE 8 /* mapcache */ /* * VMExit dispatcher should cooperate with instruction decoder to * prepare this structure and notify service OS and DM by sending * virq */ struct ioreq { uint64_t addr; /* physical address */ uint64_t size; /* size in bytes */ uint64_t count; /* for rep prefixes */ uint64_t data; /* data (or paddr of data) */ uint8_t state:4; uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr * of the real data to use. */ uint8_t dir:1; /* 1=read, 0=write */ uint8_t df:1; uint8_t pad:1; uint8_t type; /* I/O type */ uint8_t _pad0[6]; uint64_t io_count; /* How many IO done on a vcpu */ }; typedef struct ioreq ioreq_t; struct vcpu_iodata { struct ioreq vp_ioreq; /* Event channel port, used for notifications to/from the device model. */ uint32_t vp_eport; uint32_t _pad0; }; typedef struct vcpu_iodata vcpu_iodata_t; struct shared_iopage { struct vcpu_iodata vcpu_iodata[1]; }; typedef struct shared_iopage shared_iopage_t; struct buf_ioreq { uint8_t type; /* I/O type */ uint8_t pad:1; uint8_t dir:1; /* 1=read, 0=write */ uint8_t size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */ uint32_t addr:20;/* physical address */ uint32_t data; /* data */ }; typedef struct buf_ioreq buf_ioreq_t; #define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */ struct buffered_iopage { unsigned int read_pointer; unsigned int write_pointer; buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM]; }; /* NB. Size of this structure must be no greater than one page. */ typedef struct buffered_iopage buffered_iopage_t; #if defined(__ia64__) struct pio_buffer { uint32_t page_offset; uint32_t pointer; uint32_t data_end; uint32_t buf_size; void *opaque; }; #define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */ #define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */ #define PIO_BUFFER_ENTRY_NUM 2 struct buffered_piopage { struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM]; uint8_t buffer[1]; }; #endif /* defined(__ia64__) */ #define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40 #define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04) #define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08) #define ACPI_GPE0_BLK_ADDRESS (ACPI_PM_TMR_BLK_ADDRESS + 0x20) #define ACPI_GPE0_BLK_LEN 0x08 #endif /* _IOREQ_H_ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/maddr.h000066400000000000000000000114501314037446600242520ustar00rootroot00000000000000#ifndef _X86_64_MADDR_H #define _X86_64_MADDR_H #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<63) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ typedef unsigned long paddr_t; typedef unsigned long maddr_t; #ifdef CONFIG_XEN extern unsigned long *phys_to_machine_mapping; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return pfn; BUG_ON(end_pfn && pfn >= end_pfn); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return 1; BUG_ON(end_pfn && pfn >= end_pfn); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return end_pfn; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movq %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movq %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 8\n" " .quad 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if ((pfn < end_pfn) && !xen_feature(XENFEAT_auto_translated_physmap) && (phys_to_machine_mapping[pfn] != mfn)) return end_pfn; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { BUG_ON(end_pfn && pfn >= end_pfn); if (xen_feature(XENFEAT_auto_translated_physmap)) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } static inline paddr_t pte_phys_to_machine(paddr_t phys) { maddr_t machine; machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { paddr_t phys; phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #define __pte_ma(x) ((pte_t) { (x) } ) #define pfn_pte_ma(pfn, prot) __pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask) #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _X86_64_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/memory.h000066400000000000000000000233031314037446600244730ustar00rootroot00000000000000/****************************************************************************** * memory.h * * Memory reservation and information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_MEMORY_H__ #define __XEN_PUBLIC_MEMORY_H__ #include "xen.h" /* * Increase or decrease the specified domain's memory reservation. Returns the * number of extents successfully allocated or freed. * arg == addr of struct xen_memory_reservation. */ #define XENMEM_increase_reservation 0 #define XENMEM_decrease_reservation 1 #define XENMEM_populate_physmap 6 #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* * Maximum # bits addressable by the user of the allocated region (e.g., I/O * devices often have a 32-bit limitation even in 64-bit systems). If zero * then the user has no addressing restriction. This field is not used by * XENMEM_decrease_reservation. */ #define XENMEMF_address_bits(x) (x) #define XENMEMF_get_address_bits(x) ((x) & 0xffu) /* NUMA node to allocate from. */ #define XENMEMF_node(x) (((x) + 1) << 8) #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu) /* Flag to populate physmap with populate-on-demand entries */ #define XENMEMF_populate_on_demand (1<<16) #endif struct xen_memory_reservation { /* * XENMEM_increase_reservation: * OUT: MFN (*not* GMFN) bases of extents that were allocated * XENMEM_decrease_reservation: * IN: GMFN bases of extents to free * XENMEM_populate_physmap: * IN: GPFN bases of extents to populate with memory * OUT: GMFN bases of extents that were allocated * (NB. This command also updates the mach_to_phys translation table) */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* Number of extents, and size/alignment of each (2^extent_order pages). */ xen_ulong_t nr_extents; unsigned int extent_order; #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* XENMEMF flags. */ unsigned int mem_flags; #else unsigned int address_bits; #endif /* * Domain whose reservation is being changed. * Unprivileged domains can specify only DOMID_SELF. */ domid_t domid; }; typedef struct xen_memory_reservation xen_memory_reservation_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); /* * An atomic exchange of memory pages. If return code is zero then * @out.extent_list provides GMFNs of the newly-allocated memory. * Returns zero on complete success, otherwise a negative error code. * On complete success then always @nr_exchanged == @in.nr_extents. * On partial success @nr_exchanged indicates how much work was done. */ #define XENMEM_exchange 11 struct xen_memory_exchange { /* * [IN] Details of memory extents to be exchanged (GMFN bases). * Note that @in.address_bits is ignored and unused. */ struct xen_memory_reservation in; /* * [IN/OUT] Details of new memory extents. * We require that: * 1. @in.domid == @out.domid * 2. @in.nr_extents << @in.extent_order == * @out.nr_extents << @out.extent_order * 3. @in.extent_start and @out.extent_start lists must not overlap * 4. @out.extent_start lists GPFN bases to be populated * 5. @out.extent_start is overwritten with allocated GMFN bases */ struct xen_memory_reservation out; /* * [OUT] Number of input extents that were successfully exchanged: * 1. The first @nr_exchanged input extents were successfully * deallocated. * 2. The corresponding first entries in the output extent list correctly * indicate the GMFNs that were successfully exchanged. * 3. All other input and output extents are untouched. * 4. If not all input exents are exchanged then the return code of this * command will be non-zero. * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! */ xen_ulong_t nr_exchanged; }; typedef struct xen_memory_exchange xen_memory_exchange_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); /* * Returns the maximum machine frame number of mapped RAM in this system. * This command always succeeds (it never returns an error code). * arg == NULL. */ #define XENMEM_maximum_ram_page 2 /* * Returns the current or maximum memory reservation, in pages, of the * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. * arg == addr of domid_t. */ #define XENMEM_current_reservation 3 #define XENMEM_maximum_reservation 4 /* * Returns the maximum GPFN in use by the guest, or -ve errcode on failure. */ #define XENMEM_maximum_gpfn 14 /* * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys * mapping table. Architectures which do not have a m2p table do not implement * this command. * arg == addr of xen_machphys_mfn_list_t. */ #define XENMEM_machphys_mfn_list 5 struct xen_machphys_mfn_list { /* * Size of the 'extent_start' array. Fewer entries will be filled if the * machphys table is smaller than max_extents * 2MB. */ unsigned int max_extents; /* * Pointer to buffer to fill with list of extent starts. If there are * any large discontiguities in the machine address space, 2MB gaps in * the machphys table will be represented by an MFN base of zero. */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* * Number of extents written to the above array. This will be smaller * than 'max_extents' if the machphys table is smaller than max_e * 2MB. */ unsigned int nr_extents; }; typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); /* * Returns the location in virtual address space of the machine_to_phys * mapping table. Architectures which do not have a m2p table, or which do not * map it by default into guest address space, do not implement this command. * arg == addr of xen_machphys_mapping_t. */ #define XENMEM_machphys_mapping 12 struct xen_machphys_mapping { xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ }; typedef struct xen_machphys_mapping xen_machphys_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); /* * Sets the GPFN at which a particular page appears in the specified guest's * pseudophysical address space. * arg == addr of xen_add_to_physmap_t. */ #define XENMEM_add_to_physmap 7 struct xen_add_to_physmap { /* Which domain to change the mapping for. */ domid_t domid; /* Source mapping space. */ #define XENMAPSPACE_shared_info 0 /* shared info page */ #define XENMAPSPACE_grant_table 1 /* grant table page */ #define XENMAPSPACE_gmfn 2 /* GMFN */ unsigned int space; /* Index into source mapping space. */ xen_ulong_t idx; /* GPFN where the source mapping page should appear. */ xen_pfn_t gpfn; }; typedef struct xen_add_to_physmap xen_add_to_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); /*** REMOVED ***/ /*#define XENMEM_translate_gpfn_list 8*/ /* * Returns the pseudo-physical memory map as it was when the domain * was started (specified by XENMEM_set_memory_map). * arg == addr of xen_memory_map_t. */ #define XENMEM_memory_map 9 struct xen_memory_map { /* * On call the number of entries which can be stored in buffer. On * return the number of entries which have been stored in * buffer. */ unsigned int nr_entries; /* * Entries in the buffer are in the same format as returned by the * BIOS INT 0x15 EAX=0xE820 call. */ XEN_GUEST_HANDLE(void) buffer; }; typedef struct xen_memory_map xen_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); /* * Returns the real physical memory map. Passes the same structure as * XENMEM_memory_map. * arg == addr of xen_memory_map_t. */ #define XENMEM_machine_memory_map 10 /* * Set the pseudo-physical memory map of a domain, as returned by * XENMEM_memory_map. * arg == addr of xen_foreign_memory_map_t. */ #define XENMEM_set_memory_map 13 struct xen_foreign_memory_map { domid_t domid; struct xen_memory_map map; }; typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); #define XENMEM_set_pod_target 16 #define XENMEM_get_pod_target 17 struct xen_pod_target { /* IN */ uint64_t target_pages; /* OUT */ uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; /* IN */ domid_t domid; }; typedef struct xen_pod_target xen_pod_target_t; #endif /* __XEN_PUBLIC_MEMORY_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/nmi.h000066400000000000000000000053061314037446600237510ustar00rootroot00000000000000/****************************************************************************** * nmi.h * * NMI callback registration and reason codes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_NMI_H__ #define __XEN_PUBLIC_NMI_H__ #include "xen.h" /* * NMI reason codes: * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. */ /* I/O-check error reported via ISA port 0x61, bit 6. */ #define _XEN_NMIREASON_io_error 0 #define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) /* Parity error reported via ISA port 0x61, bit 7. */ #define _XEN_NMIREASON_parity_error 1 #define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error) /* Unknown hardware-generated NMI. */ #define _XEN_NMIREASON_unknown 2 #define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) /* * long nmi_op(unsigned int cmd, void *arg) * NB. All ops return zero on success, else a negative error code. */ /* * Register NMI callback for this (calling) VCPU. Currently this only makes * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. * arg == pointer to xennmi_callback structure. */ #define XENNMI_register_callback 0 struct xennmi_callback { unsigned long handler_address; unsigned long pad; }; typedef struct xennmi_callback xennmi_callback_t; DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t); /* * Deregister NMI callback for this (calling) VCPU. * arg == NULL. */ #define XENNMI_unregister_callback 1 #endif /* __XEN_PUBLIC_NMI_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/params.h000066400000000000000000000076431314037446600244570ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_PARAMS_H__ #define __XEN_PUBLIC_HVM_PARAMS_H__ #include /* * Parameter space for HVMOP_{set,get}_param. */ /* * How should CPU0 event-channel notifications be delivered? * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: * Domain = val[47:32], Bus = val[31:16], * DevFn = val[15: 8], IntX = val[ 1: 0] * If val == 0 then CPU0 event-channel notifications are not delivered. */ #define HVM_PARAM_CALLBACK_IRQ 0 /* * These are not used by Xen. They are here for convenience of HVM-guest * xenbus implementations. */ #define HVM_PARAM_STORE_PFN 1 #define HVM_PARAM_STORE_EVTCHN 2 #define HVM_PARAM_PAE_ENABLED 4 #define HVM_PARAM_IOREQ_PFN 5 #define HVM_PARAM_BUFIOREQ_PFN 6 #ifdef __ia64__ #define HVM_PARAM_NVRAM_FD 7 #define HVM_PARAM_VHPT_SIZE 8 #define HVM_PARAM_BUFPIOREQ_PFN 9 #elif defined(__i386__) || defined(__x86_64__) /* Expose Viridian interfaces to this HVM guest? */ #define HVM_PARAM_VIRIDIAN 9 #endif /* * Set mode for virtual timers (currently x86 only): * delay_for_missed_ticks (default): * Do not advance a vcpu's time beyond the correct delivery time for * interrupts that have been missed due to preemption. Deliver missed * interrupts when the vcpu is rescheduled and advance the vcpu's virtual * time stepwise for each one. * no_delay_for_missed_ticks: * As above, missed interrupts are delivered, but guest time always tracks * wallclock (i.e., real) time while doing so. * no_missed_ticks_pending: * No missed interrupts are held pending. Instead, to ensure ticks are * delivered at some non-zero rate, if we detect missed ticks then the * internal tick alarm is not disabled if the VCPU is preempted during the * next tick period. * one_missed_tick_pending: * Missed interrupts are collapsed together and delivered as one 'late tick'. * Guest time always tracks wallclock (i.e., real) time. */ #define HVM_PARAM_TIMER_MODE 10 #define HVMPTM_delay_for_missed_ticks 0 #define HVMPTM_no_delay_for_missed_ticks 1 #define HVMPTM_no_missed_ticks_pending 2 #define HVMPTM_one_missed_tick_pending 3 /* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */ #define HVM_PARAM_HPET_ENABLED 11 /* Identity-map page directory used by Intel EPT when CR0.PG=0. */ #define HVM_PARAM_IDENT_PT 12 /* Device Model domain, defaults to 0. */ #define HVM_PARAM_DM_DOMAIN 13 /* ACPI S state: currently support S0 and S3 on x86. */ #define HVM_PARAM_ACPI_S_STATE 14 /* TSS used on Intel when CR0.PE=0. */ #define HVM_PARAM_VM86_TSS 15 /* Boolean: Enable aligning all periodic vpts to reduce interrupts */ #define HVM_PARAM_VPT_ALIGN 16 #define HVM_NR_PARAMS 17 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/pgtable-nopmd.h000066400000000000000000000005611314037446600257150ustar00rootroot00000000000000#ifndef _PGTABLE_NOPMD_H #define _PGTABLE_NOPMD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopmd.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPMD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/pgtable-nopud.h000066400000000000000000000006211314037446600257220ustar00rootroot00000000000000#ifndef _PGTABLE_NOPUD_H #define _PGTABLE_NOPUD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopud.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define pud_bad(pud) 0 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPUD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/physdev.h000066400000000000000000000171151314037446600246510ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_PHYSDEV_H__ #define __XEN_PUBLIC_PHYSDEV_H__ #include "xen.h" /* * Prototype for this hypercall is: * int physdev_op(int cmd, void *args) * @cmd == PHYSDEVOP_??? (physdev operation). * @args == Operation-specific extra arguments (NULL if none). */ /* * Notify end-of-interrupt (EOI) for the specified IRQ. * @arg == pointer to physdev_eoi structure. */ #define PHYSDEVOP_eoi 12 struct physdev_eoi { /* IN */ uint32_t irq; }; typedef struct physdev_eoi physdev_eoi_t; DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t); /* * Register a shared page for the hypervisor to indicate whether the guest * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly * once the guest used this function in that the associated event channel * will automatically get unmasked. The page registered is used as a bit * array indexed by Xen's PIRQ value. */ #define PHYSDEVOP_pirq_eoi_gmfn 17 struct physdev_pirq_eoi_gmfn { /* IN */ xen_pfn_t gmfn; }; typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t; DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t); /* * Query the status of an IRQ line. * @arg == pointer to physdev_irq_status_query structure. */ #define PHYSDEVOP_irq_status_query 5 struct physdev_irq_status_query { /* IN */ uint32_t irq; /* OUT */ uint32_t flags; /* XENIRQSTAT_* */ }; typedef struct physdev_irq_status_query physdev_irq_status_query_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t); /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ #define _XENIRQSTAT_needs_eoi (0) #define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) /* IRQ shared by multiple guests? */ #define _XENIRQSTAT_shared (1) #define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) /* * Set the current VCPU's I/O privilege level. * @arg == pointer to physdev_set_iopl structure. */ #define PHYSDEVOP_set_iopl 6 struct physdev_set_iopl { /* IN */ uint32_t iopl; }; typedef struct physdev_set_iopl physdev_set_iopl_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t); /* * Set the current VCPU's I/O-port permissions bitmap. * @arg == pointer to physdev_set_iobitmap structure. */ #define PHYSDEVOP_set_iobitmap 7 struct physdev_set_iobitmap { /* IN */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(uint8) bitmap; #else uint8_t *bitmap; #endif uint32_t nr_ports; }; typedef struct physdev_set_iobitmap physdev_set_iobitmap_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t); /* * Read or write an IO-APIC register. * @arg == pointer to physdev_apic structure. */ #define PHYSDEVOP_apic_read 8 #define PHYSDEVOP_apic_write 9 struct physdev_apic { /* IN */ unsigned long apic_physbase; uint32_t reg; /* IN or OUT */ uint32_t value; }; typedef struct physdev_apic physdev_apic_t; DEFINE_XEN_GUEST_HANDLE(physdev_apic_t); /* * Allocate or free a physical upcall vector for the specified IRQ line. * @arg == pointer to physdev_irq structure. */ #define PHYSDEVOP_alloc_irq_vector 10 #define PHYSDEVOP_free_irq_vector 11 struct physdev_irq { /* IN */ uint32_t irq; /* IN or OUT */ uint32_t vector; }; typedef struct physdev_irq physdev_irq_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_t); #define MAP_PIRQ_TYPE_MSI 0x0 #define MAP_PIRQ_TYPE_GSI 0x1 #define MAP_PIRQ_TYPE_UNKNOWN 0x2 #define PHYSDEVOP_map_pirq 13 struct physdev_map_pirq { domid_t domid; /* IN */ int type; /* IN */ int index; /* IN or OUT */ int pirq; /* IN */ int bus; /* IN */ int devfn; /* IN */ int entry_nr; /* IN */ uint64_t table_base; }; typedef struct physdev_map_pirq physdev_map_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t); #define PHYSDEVOP_unmap_pirq 14 struct physdev_unmap_pirq { domid_t domid; /* IN */ int pirq; }; typedef struct physdev_unmap_pirq physdev_unmap_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t); #define PHYSDEVOP_manage_pci_add 15 #define PHYSDEVOP_manage_pci_remove 16 struct physdev_manage_pci { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_manage_pci physdev_manage_pci_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t); #define PHYSDEVOP_restore_msi 19 struct physdev_restore_msi { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_restore_msi physdev_restore_msi_t; DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t); #define PHYSDEVOP_manage_pci_add_ext 20 struct physdev_manage_pci_ext { /* IN */ uint8_t bus; uint8_t devfn; unsigned is_extfn; unsigned is_virtfn; struct { uint8_t bus; uint8_t devfn; } physfn; }; typedef struct physdev_manage_pci_ext physdev_manage_pci_ext_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_ext_t); /* * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() * hypercall since 0x00030202. */ struct physdev_op { uint32_t cmd; union { struct physdev_irq_status_query irq_status_query; struct physdev_set_iopl set_iopl; struct physdev_set_iobitmap set_iobitmap; struct physdev_apic apic_op; struct physdev_irq irq_op; } u; }; typedef struct physdev_op physdev_op_t; DEFINE_XEN_GUEST_HANDLE(physdev_op_t); /* * Notify that some PIRQ-bound event channels have been unmasked. * ** This command is obsolete since interface version 0x00030202 and is ** * ** unsupported by newer versions of Xen. ** */ #define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 /* * These all-capitals physdev operation names are superceded by the new names * (defined above) since interface version 0x00030202. */ #define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query #define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl #define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap #define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read #define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write #define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector #define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi #define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared #endif /* __XEN_PUBLIC_PHYSDEV_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/platform-compat.h000066400000000000000000000127371314037446600263010ustar00rootroot00000000000000#ifndef COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #define COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #include #include /* * pv drivers header files */ #include #if defined(__LINUX_COMPILER_H) && !defined(__always_inline) #define __always_inline inline #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_SPINLOCK) #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED #endif #if defined(_LINUX_INIT_H) && !defined(__init) #define __init #endif #if defined(__LINUX_CACHE_H) && !defined(__read_mostly) #define __read_mostly #endif #if defined(_LINUX_SKBUFF_H) && !defined(NET_IP_ALIGN) #define NET_IP_ALIGN 0 #endif #if defined(_LINUX_SKBUFF_H) && !defined(CHECKSUM_HW) #define CHECKSUM_HW CHECKSUM_PARTIAL #endif #if defined(_LINUX_ERR_H) && !defined(IS_ERR_VALUE) #define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) #endif #if defined(_ASM_IA64_PGTABLE_H) && !defined(_PGTABLE_NOPUD_H) #include #endif /* Some kernels have this typedef backported so we cannot reliably * detect based on version number, hence we forcibly #define it. */ #if defined(__LINUX_TYPES_H) || defined(__LINUX_GFP_H) || defined(_LINUX_KERNEL_H) #define gfp_t unsigned #endif #if defined(_LINUX_NOTIFIER_H) && !defined(ATOMIC_NOTIFIER_HEAD) #define ATOMIC_NOTIFIER_HEAD(name) struct notifier_block *name #define atomic_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define atomic_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define atomic_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_NOTIFIER_H) && !defined(BLOCKING_NOTIFIER_HEAD) #define BLOCKING_NOTIFIER_HEAD(name) struct notifier_block *name #define blocking_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define blocking_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define blocking_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_MM_H) && defined set_page_count #define init_page_count(page) set_page_count(page, 1) #endif #if defined(__LINUX_GFP_H) && !defined __GFP_NOMEMALLOC #define __GFP_NOMEMALLOC 0 #endif #if defined(_LINUX_FS_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) #define nonseekable_open(inode, filp) /* Nothing to do */ #endif #if defined(_LINUX_MM_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) unsigned long vmalloc_to_pfn(void *addr); #endif #if defined(__LINUX_COMPLETION_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); #endif #if defined(_LINUX_SCHED_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout); #endif #if defined(_LINUX_SLAB_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) void *kzalloc(size_t size, int flags); #endif #if defined(_LINUX_BLKDEV_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define end_that_request_last(req, uptodate) end_that_request_last(req) #endif #if defined(_LINUX_CAPABILITY_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define capable(cap) (1) #endif #if defined(_LINUX_KERNEL_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) extern char *kasprintf(gfp_t gfp, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); #endif #if defined(_LINUX_SYSRQ_H) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) #define handle_sysrq(x,y,z) handle_sysrq(x,y) #endif #if defined(_PAGE_PRESENT) && !defined(_PAGE_NX) #define _PAGE_NX 0 /* * This variable at present is referenced by netfront, but only in code that * is dead when running in hvm guests. To detect potential active uses of it * in the future, don't try to supply a 'valid' value here, so that any * mappings created with it will fault when accessed. */ #define __supported_pte_mask ((maddr_t)0) #endif /* This code duplication is not ideal, but || does not seem to properly * short circuit in a #if condition. **/ #if defined(_LINUX_NETDEVICE_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) #if !defined(SLE_VERSION) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #elif SLE_VERSION_CODE < SLE_VERSION(10,1,0) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #endif #endif #if defined(__LINUX_SEQLOCK_H) && !defined(DEFINE_SEQLOCK) #define DEFINE_SEQLOCK(x) seqlock_t x = SEQLOCK_UNLOCKED #endif /* Bug in RHEL4-U3: rw_lock_t is mistakenly defined in DEFINE_RWLOCK() macro */ #if defined(__LINUX_SPINLOCK_H) && defined(DEFINE_RWLOCK) #define rw_lock_t rwlock_t #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_RWLOCK) #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED #endif //#if defined(_LINUX_INTERRUPT_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /** * RHEL4-U5 pulled back this feature into the older kernel * Since it is a typedef, and not a macro - detect this kernel via * RHEL_VERSION */ //#if !defined(RHEL_VERSION) || (RHEL_VERSION == 4 && RHEL_UPDATE < 5) //#if !defined(RHEL_MAJOR) || (RHEL_MAJOR == 4 && RHEL_MINOR < 5) //typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *); //#endif //#endif //#endif //#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) //#define setup_xen_features xen_setup_features //#endif #ifndef atomic_cmpxchg #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/platform.h000066400000000000000000000301271314037446600250110ustar00rootroot00000000000000/****************************************************************************** * platform.h * * Hardware platform operations. Intended for use by domain-0 kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_PLATFORM_H__ #define __XEN_PUBLIC_PLATFORM_H__ #include "xen.h" #define XENPF_INTERFACE_VERSION 0x03000001 /* * Set clock such that it would read after 00:00:00 UTC, * 1 January, 1970 if the current system time was . */ #define XENPF_settime 17 struct xenpf_settime { /* IN variables. */ uint32_t secs; uint32_t nsecs; uint64_t system_time; }; typedef struct xenpf_settime xenpf_settime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t); /* * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type. * On x86, @type is an architecture-defined MTRR memory type. * On success, returns the MTRR that was used (@reg) and a handle that can * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting. * (x86-specific). */ #define XENPF_add_memtype 31 struct xenpf_add_memtype { /* IN variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; /* OUT variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_add_memtype xenpf_add_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t); /* * Tear down an existing memory-range type. If @handle is remembered then it * should be passed in to accurately tear down the correct setting (in case * of overlapping memory regions with differing types). If it is not known * then @handle should be set to zero. In all cases @reg must be set. * (x86-specific). */ #define XENPF_del_memtype 32 struct xenpf_del_memtype { /* IN variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_del_memtype xenpf_del_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t); /* Read current type of an MTRR (x86-specific). */ #define XENPF_read_memtype 33 struct xenpf_read_memtype { /* IN variables. */ uint32_t reg; /* OUT variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; }; typedef struct xenpf_read_memtype xenpf_read_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t); #define XENPF_microcode_update 35 struct xenpf_microcode_update { /* IN variables. */ XEN_GUEST_HANDLE(const_void) data;/* Pointer to microcode data */ uint32_t length; /* Length of microcode data. */ }; typedef struct xenpf_microcode_update xenpf_microcode_update_t; DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t); #define XENPF_platform_quirk 39 #define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */ #define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */ #define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */ struct xenpf_platform_quirk { /* IN variables. */ uint32_t quirk_id; }; typedef struct xenpf_platform_quirk xenpf_platform_quirk_t; DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t); #define XENPF_firmware_info 50 #define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */ #define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */ #define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */ struct xenpf_firmware_info { /* IN variables. */ uint32_t type; uint32_t index; /* OUT variables. */ union { struct { /* Int13, Fn48: Check Extensions Present. */ uint8_t device; /* %dl: bios device number */ uint8_t version; /* %ah: major version */ uint16_t interface_support; /* %cx: support bitmap */ /* Int13, Fn08: Legacy Get Device Parameters. */ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */ uint8_t legacy_max_head; /* %dh: max head # */ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */ /* NB. First uint16_t of buffer must be set to buffer size. */ XEN_GUEST_HANDLE(void) edd_params; } disk_info; /* XEN_FW_DISK_INFO */ struct { uint8_t device; /* bios device number */ uint32_t mbr_signature; /* offset 0x1b8 in mbr */ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */ struct { /* Int10, AX=4F15: Get EDID info. */ uint8_t capabilities; uint8_t edid_transfer_time; /* must refer to 128-byte buffer */ XEN_GUEST_HANDLE(uint8) edid; } vbeddc_info; /* XEN_FW_VBEDDC_INFO */ } u; }; typedef struct xenpf_firmware_info xenpf_firmware_info_t; DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t); #define XENPF_enter_acpi_sleep 51 struct xenpf_enter_acpi_sleep { /* IN variables */ uint16_t pm1a_cnt_val; /* PM1a control value. */ uint16_t pm1b_cnt_val; /* PM1b control value. */ uint32_t sleep_state; /* Which state to enter (Sn). */ uint32_t flags; /* Must be zero. */ }; typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t; DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t); #define XENPF_change_freq 52 struct xenpf_change_freq { /* IN variables */ uint32_t flags; /* Must be zero. */ uint32_t cpu; /* Physical cpu. */ uint64_t freq; /* New frequency (Hz). */ }; typedef struct xenpf_change_freq xenpf_change_freq_t; DEFINE_XEN_GUEST_HANDLE(xenpf_change_freq_t); /* * Get idle times (nanoseconds since boot) for physical CPUs specified in the * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is * indexed by CPU number; only entries with the corresponding @cpumap_bitmap * bit set are written to. On return, @cpumap_bitmap is modified so that any * non-existent CPUs are cleared. Such CPUs have their @idletime array entry * cleared. */ #define XENPF_getidletime 53 struct xenpf_getidletime { /* IN/OUT variables */ /* IN: CPUs to interrogate; OUT: subset of IN which are present */ XEN_GUEST_HANDLE(uint8) cpumap_bitmap; /* IN variables */ /* Size of cpumap bitmap. */ uint32_t cpumap_nr_cpus; /* Must be indexable for every cpu in cpumap_bitmap. */ XEN_GUEST_HANDLE(uint64) idletime; /* OUT variables */ /* System time when the idletime snapshots were taken. */ uint64_t now; }; typedef struct xenpf_getidletime xenpf_getidletime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t); #define XENPF_set_processor_pminfo 54 /* ability bits */ #define XEN_PROCESSOR_PM_CX 1 #define XEN_PROCESSOR_PM_PX 2 #define XEN_PROCESSOR_PM_TX 4 /* cmd type */ #define XEN_PM_CX 0 #define XEN_PM_PX 1 #define XEN_PM_TX 2 /* Px sub info type */ #define XEN_PX_PCT 1 #define XEN_PX_PSS 2 #define XEN_PX_PPC 4 #define XEN_PX_PSD 8 struct xen_power_register { uint32_t space_id; uint32_t bit_width; uint32_t bit_offset; uint32_t access_size; uint64_t address; }; struct xen_processor_csd { uint32_t domain; /* domain number of one dependent group */ uint32_t coord_type; /* coordination type */ uint32_t num; /* number of processors in same domain */ }; typedef struct xen_processor_csd xen_processor_csd_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_csd_t); struct xen_processor_cx { struct xen_power_register reg; /* GAS for Cx trigger register */ uint8_t type; /* cstate value, c0: 0, c1: 1, ... */ uint32_t latency; /* worst latency (ms) to enter/exit this cstate */ uint32_t power; /* average power consumption(mW) */ uint32_t dpcnt; /* number of dependency entries */ XEN_GUEST_HANDLE(xen_processor_csd_t) dp; /* NULL if no dependency */ }; typedef struct xen_processor_cx xen_processor_cx_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_cx_t); struct xen_processor_flags { uint32_t bm_control:1; uint32_t bm_check:1; uint32_t has_cst:1; uint32_t power_setup_done:1; uint32_t bm_rld_set:1; }; struct xen_processor_power { uint32_t count; /* number of C state entries in array below */ struct xen_processor_flags flags; /* global flags of this processor */ XEN_GUEST_HANDLE(xen_processor_cx_t) states; /* supported c states */ }; struct xen_pct_register { uint8_t descriptor; uint16_t length; uint8_t space_id; uint8_t bit_width; uint8_t bit_offset; uint8_t reserved; uint64_t address; }; struct xen_processor_px { uint64_t core_frequency; /* megahertz */ uint64_t power; /* milliWatts */ uint64_t transition_latency; /* microseconds */ uint64_t bus_master_latency; /* microseconds */ uint64_t control; /* control value */ uint64_t status; /* success indicator */ }; typedef struct xen_processor_px xen_processor_px_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_px_t); struct xen_psd_package { uint64_t num_entries; uint64_t revision; uint64_t domain; uint64_t coord_type; uint64_t num_processors; }; struct xen_processor_performance { uint32_t flags; /* flag for Px sub info type */ uint32_t platform_limit; /* Platform limitation on freq usage */ struct xen_pct_register control_register; struct xen_pct_register status_register; uint32_t state_count; /* total available performance states */ XEN_GUEST_HANDLE(xen_processor_px_t) states; struct xen_psd_package domain_info; uint32_t shared_type; /* coordination type of this processor */ }; typedef struct xen_processor_performance xen_processor_performance_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_performance_t); struct xenpf_set_processor_pminfo { /* IN variables */ uint32_t id; /* ACPI CPU ID */ uint32_t type; /* {XEN_PM_CX, XEN_PM_PX} */ union { struct xen_processor_power power;/* Cx: _CST/_CSD */ struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */ } u; }; typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t; DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t); struct xen_platform_op { uint32_t cmd; uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ union { struct xenpf_settime settime; struct xenpf_add_memtype add_memtype; struct xenpf_del_memtype del_memtype; struct xenpf_read_memtype read_memtype; struct xenpf_microcode_update microcode; struct xenpf_platform_quirk platform_quirk; struct xenpf_firmware_info firmware_info; struct xenpf_enter_acpi_sleep enter_acpi_sleep; struct xenpf_change_freq change_freq; struct xenpf_getidletime getidletime; struct xenpf_set_processor_pminfo set_pminfo; uint8_t pad[128]; } u; }; typedef struct xen_platform_op xen_platform_op_t; DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t); #endif /* __XEN_PUBLIC_PLATFORM_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/protocols.h000066400000000000000000000032101314037446600252020ustar00rootroot00000000000000/****************************************************************************** * protocols.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PROTOCOLS_H__ #define __XEN_PROTOCOLS_H__ #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi" #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi" #define XEN_IO_PROTO_ABI_IA64 "ia64-abi" #if defined(__i386__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 #elif defined(__x86_64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 #elif defined(__ia64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64 #else # error arch fixup needed here #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/sched.h000066400000000000000000000104321314037446600242500ustar00rootroot00000000000000/****************************************************************************** * sched.h * * Scheduler state interactions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_SCHED_H__ #define __XEN_PUBLIC_SCHED_H__ #include "event_channel.h" /* * The prototype for this hypercall is: * long sched_op(int cmd, void *arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == Operation-specific extra argument(s), as described below. * * Versions of Xen prior to 3.0.2 provided only the following legacy version * of this hypercall, supporting only the commands yield, block and shutdown: * long sched_op(int cmd, unsigned long arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) * == SHUTDOWN_* code (SCHEDOP_shutdown) * This legacy version is available to new guests as sched_op_compat(). */ /* * Voluntarily yield the CPU. * @arg == NULL. */ #define SCHEDOP_yield 0 /* * Block execution of this VCPU until an event is received for processing. * If called with event upcalls masked, this operation will atomically * reenable event delivery and check for pending events before blocking the * VCPU. This avoids a "wakeup waiting" race. * @arg == NULL. */ #define SCHEDOP_block 1 /* * Halt execution of this domain (all VCPUs) and notify the system controller. * @arg == pointer to sched_shutdown structure. */ #define SCHEDOP_shutdown 2 struct sched_shutdown { unsigned int reason; /* SHUTDOWN_* */ }; typedef struct sched_shutdown sched_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t); /* * Poll a set of event-channel ports. Return when one or more are pending. An * optional timeout may be specified. * @arg == pointer to sched_poll structure. */ #define SCHEDOP_poll 3 struct sched_poll { XEN_GUEST_HANDLE(evtchn_port_t) ports; unsigned int nr_ports; uint64_t timeout; }; typedef struct sched_poll sched_poll_t; DEFINE_XEN_GUEST_HANDLE(sched_poll_t); /* * Declare a shutdown for another domain. The main use of this function is * in interpreting shutdown requests and reasons for fully-virtualized * domains. A para-virtualized domain may use SCHEDOP_shutdown directly. * @arg == pointer to sched_remote_shutdown structure. */ #define SCHEDOP_remote_shutdown 4 struct sched_remote_shutdown { domid_t domain_id; /* Remote domain ID */ unsigned int reason; /* SHUTDOWN_xxx reason */ }; typedef struct sched_remote_shutdown sched_remote_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t); /* * Reason codes for SCHEDOP_shutdown. These may be interpreted by control * software to determine the appropriate action. For the most part, Xen does * not care about the shutdown code. */ #define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #endif /* __XEN_PUBLIC_SCHED_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/synch_bitops.h000066400000000000000000000062341314037446600256730ustar00rootroot00000000000000#ifndef __XEN_SYNCH_BITOPS_H__ #define __XEN_SYNCH_BITOPS_H__ /* * Copyright 1992, Linus Torvalds. * Heavily modified to provide guaranteed strong synchronisation * when communicating with Xen or other guest OSes running on other CPUs. */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define ADDR (*(volatile long *) addr) static __inline__ void synch_set_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btsl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_clear_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btrl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_change_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btcl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btsl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btrl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btcl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } struct __synch_xchg_dummy { unsigned long a[100]; }; #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x)) #define synch_cmpxchg(ptr, old, new) \ ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ (unsigned long)(old), \ (unsigned long)(new), \ sizeof(*(ptr)))) static inline unsigned long __synch_cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__("lock; cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__("lock; cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #ifdef CONFIG_X86_64 case 4: __asm__ __volatile__("lock; cmpxchgl %k1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__("lock; cmpxchgq %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #else case 4: __asm__ __volatile__("lock; cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #endif } return old; } #define synch_test_bit test_bit #define synch_cmpxchg_subword synch_cmpxchg #endif /* __XEN_SYNCH_BITOPS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/tmem.h000066400000000000000000000067431314037446600241360ustar00rootroot00000000000000/****************************************************************************** * tmem.h * * Guest OS interface to Xen Transcendent Memory. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_TMEM_H__ #define __XEN_PUBLIC_TMEM_H__ #include "xen.h" /* Commands to HYPERVISOR_tmem_op() */ #define TMEM_CONTROL 0 #define TMEM_NEW_POOL 1 #define TMEM_DESTROY_POOL 2 #define TMEM_NEW_PAGE 3 #define TMEM_PUT_PAGE 4 #define TMEM_GET_PAGE 5 #define TMEM_FLUSH_PAGE 6 #define TMEM_FLUSH_OBJECT 7 #define TMEM_READ 8 #define TMEM_WRITE 9 #define TMEM_XCHG 10 /* Subops for HYPERVISOR_tmem_op(TMEM_CONTROL) */ #define TMEMC_THAW 0 #define TMEMC_FREEZE 1 #define TMEMC_FLUSH 2 #define TMEMC_DESTROY 3 #define TMEMC_LIST 4 #define TMEMC_SET_WEIGHT 5 #define TMEMC_SET_CAP 6 #define TMEMC_SET_COMPRESS 7 /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ #define TMEM_POOL_PERSIST 1 #define TMEM_POOL_SHARED 2 #define TMEM_POOL_PAGESIZE_SHIFT 4 #define TMEM_POOL_PAGESIZE_MASK 0xf #define TMEM_POOL_VERSION_SHIFT 24 #define TMEM_POOL_VERSION_MASK 0xff /* Special errno values */ #define EFROZEN 1000 #define EEMPTY 1001 #ifndef __ASSEMBLY__ typedef xen_pfn_t tmem_cli_mfn_t; typedef XEN_GUEST_HANDLE(char) tmem_cli_va_t; struct tmem_op { uint32_t cmd; int32_t pool_id; /* private > 0; shared < 0; 0 is invalid */ union { struct { /* for cmd == TMEM_NEW_POOL */ uint64_t uuid[2]; uint32_t flags; } new; struct { /* for cmd == TMEM_CONTROL */ uint32_t subop; uint32_t cli_id; uint32_t arg1; uint32_t arg2; tmem_cli_va_t buf; } ctrl; struct { uint64_t object; uint32_t index; uint32_t tmem_offset; uint32_t pfn_offset; uint32_t len; tmem_cli_mfn_t cmfn; /* client machine page frame */ } gen; } u; }; typedef struct tmem_op tmem_op_t; DEFINE_XEN_GUEST_HANDLE(tmem_op_t); #endif #endif /* __XEN_PUBLIC_TMEM_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/version.h000066400000000000000000000060571314037446600246570ustar00rootroot00000000000000/****************************************************************************** * version.h * * Xen version, type, and compile information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Nguyen Anh Quynh * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VERSION_H__ #define __XEN_PUBLIC_VERSION_H__ /* NB. All ops return zero on success, except XENVER_{version,pagesize} */ /* arg == NULL; returns major:minor (16:16). */ #define XENVER_version 0 /* arg == xen_extraversion_t. */ #define XENVER_extraversion 1 typedef char xen_extraversion_t[16]; #define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t)) /* arg == xen_compile_info_t. */ #define XENVER_compile_info 2 struct xen_compile_info { char compiler[64]; char compile_by[16]; char compile_domain[32]; char compile_date[32]; }; typedef struct xen_compile_info xen_compile_info_t; #define XENVER_capabilities 3 typedef char xen_capabilities_info_t[1024]; #define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t)) #define XENVER_changeset 4 typedef char xen_changeset_info_t[64]; #define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t)) #define XENVER_platform_parameters 5 struct xen_platform_parameters { unsigned long virt_start; }; typedef struct xen_platform_parameters xen_platform_parameters_t; #define XENVER_get_features 6 struct xen_feature_info { unsigned int submap_idx; /* IN: which 32-bit submap to return */ uint32_t submap; /* OUT: 32-bit submap */ }; typedef struct xen_feature_info xen_feature_info_t; /* Declares the features reported by XENVER_get_features. */ #include "features.h" /* arg == NULL; returns host memory page size. */ #define XENVER_pagesize 7 /* arg == xen_domain_handle_t. */ #define XENVER_guest_handle 8 #define XENVER_commandline 9 typedef char xen_commandline_t[1024]; #endif /* __XEN_PUBLIC_VERSION_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/xen-compat.h000066400000000000000000000036361314037446600252450ustar00rootroot00000000000000/****************************************************************************** * xen-compat.h * * Guest OS interface to Xen. Compatibility layer. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Christian Limpach */ #ifndef __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_LATEST_INTERFACE_VERSION__ 0x00030209 #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Xen is built with matching headers and implements the latest interface. */ #define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__ #elif !defined(__XEN_INTERFACE_VERSION__) /* Guests which do not specify a version get the legacy interface. */ #define __XEN_INTERFACE_VERSION__ 0x00000000 #endif #if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__ #error "These header files do not support the requested interface version." #endif #endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/xen.h000066400000000000000000000624451314037446600237670ustar00rootroot00000000000000/****************************************************************************** * xen.h * * Guest OS interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_XEN_H__ #define __XEN_PUBLIC_XEN_H__ #include #if defined(__i386__) || defined(__x86_64__) #include #elif defined(__ia64__) #include #else #error "Unsupported architecture" #endif #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ DEFINE_XEN_GUEST_HANDLE(char); __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); DEFINE_XEN_GUEST_HANDLE(int); __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); DEFINE_XEN_GUEST_HANDLE(long); __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); DEFINE_XEN_GUEST_HANDLE(void); DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #endif /* * HYPERCALLS */ #define __HYPERVISOR_set_trap_table 0 #define __HYPERVISOR_mmu_update 1 #define __HYPERVISOR_set_gdt 2 #define __HYPERVISOR_stack_switch 3 #define __HYPERVISOR_set_callbacks 4 #define __HYPERVISOR_fpu_taskswitch 5 #define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */ #define __HYPERVISOR_platform_op 7 #define __HYPERVISOR_set_debugreg 8 #define __HYPERVISOR_get_debugreg 9 #define __HYPERVISOR_update_descriptor 10 #define __HYPERVISOR_memory_op 12 #define __HYPERVISOR_multicall 13 #define __HYPERVISOR_update_va_mapping 14 #define __HYPERVISOR_set_timer_op 15 #define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */ #define __HYPERVISOR_xen_version 17 #define __HYPERVISOR_console_io 18 #define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */ #define __HYPERVISOR_grant_table_op 20 #define __HYPERVISOR_vm_assist 21 #define __HYPERVISOR_update_va_mapping_otherdomain 22 #define __HYPERVISOR_iret 23 /* x86 only */ #define __HYPERVISOR_vcpu_op 24 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ #define __HYPERVISOR_mmuext_op 26 #define __HYPERVISOR_xsm_op 27 #define __HYPERVISOR_nmi_op 28 #define __HYPERVISOR_sched_op 29 #define __HYPERVISOR_callback_op 30 #define __HYPERVISOR_xenoprof_op 31 #define __HYPERVISOR_event_channel_op 32 #define __HYPERVISOR_physdev_op 33 #define __HYPERVISOR_hvm_op 34 #define __HYPERVISOR_sysctl 35 #define __HYPERVISOR_domctl 36 #define __HYPERVISOR_kexec_op 37 #define __HYPERVISOR_tmem_op 38 /* Architecture-specific hypercall definitions. */ #define __HYPERVISOR_arch_0 48 #define __HYPERVISOR_arch_1 49 #define __HYPERVISOR_arch_2 50 #define __HYPERVISOR_arch_3 51 #define __HYPERVISOR_arch_4 52 #define __HYPERVISOR_arch_5 53 #define __HYPERVISOR_arch_6 54 #define __HYPERVISOR_arch_7 55 /* * HYPERCALL COMPATIBILITY. */ /* New sched_op hypercall introduced in 0x00030101. */ #if __XEN_INTERFACE_VERSION__ < 0x00030101 #undef __HYPERVISOR_sched_op #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat #endif /* New event-channel and physdev hypercalls introduced in 0x00030202. */ #if __XEN_INTERFACE_VERSION__ < 0x00030202 #undef __HYPERVISOR_event_channel_op #define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat #undef __HYPERVISOR_physdev_op #define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat #endif /* New platform_op hypercall introduced in 0x00030204. */ #if __XEN_INTERFACE_VERSION__ < 0x00030204 #define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op #endif /* * VIRTUAL INTERRUPTS * * Virtual interrupts that a guest OS may receive from Xen. * * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. * The latter can be allocated only once per guest: they must initially be * allocated to VCPU0 but can subsequently be re-bound. */ #define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ #define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ #define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ #define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ #define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ #define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ #define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ #define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ /* Architecture-specific VIRQ definitions. */ #define VIRQ_ARCH_0 16 #define VIRQ_ARCH_1 17 #define VIRQ_ARCH_2 18 #define VIRQ_ARCH_3 19 #define VIRQ_ARCH_4 20 #define VIRQ_ARCH_5 21 #define VIRQ_ARCH_6 22 #define VIRQ_ARCH_7 23 #define NR_VIRQS 24 /* * MMU-UPDATE REQUESTS * * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * ptr[1:0] specifies the appropriate MMU_* command. * * ptr[1:0] == MMU_NORMAL_PT_UPDATE: * Updates an entry in a page table. If updating an L1 table, and the new * table entry is valid/present, the mapped frame must belong to the FD, if * an FD has been specified. If attempting to map an I/O page then the * caller assumes the privilege of the FD. * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. * FD == DOMID_XEN: Map restricted areas of Xen's heap space. * ptr[:2] -- Machine address of the page-table entry to modify. * val -- Value to write. * * ptr[1:0] == MMU_MACHPHYS_UPDATE: * Updates an entry in the machine->pseudo-physical mapping table. * ptr[:2] -- Machine address within the frame whose mapping to modify. * The frame must belong to the FD, if one is specified. * val -- Value to write into the mapping entry. * * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed * with those in @val. */ #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ /* * MMU EXTENDED OPERATIONS * * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * * cmd: MMUEXT_(UN)PIN_*_TABLE * mfn: Machine frame number to be (un)pinned as a p.t. page. * The frame must belong to the FD, if one is specified. * * cmd: MMUEXT_NEW_BASEPTR * mfn: Machine frame number of new page-table base to install in MMU. * * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] * mfn: Machine frame number of new page-table base to install in MMU * when in user space. * * cmd: MMUEXT_TLB_FLUSH_LOCAL * No additional arguments. Flushes local TLB. * * cmd: MMUEXT_INVLPG_LOCAL * linear_addr: Linear address to be flushed from the local TLB. * * cmd: MMUEXT_TLB_FLUSH_MULTI * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_INVLPG_MULTI * linear_addr: Linear address to be flushed. * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_TLB_FLUSH_ALL * No additional arguments. Flushes all VCPUs' TLBs. * * cmd: MMUEXT_INVLPG_ALL * linear_addr: Linear address to be flushed from all VCPUs' TLBs. * * cmd: MMUEXT_FLUSH_CACHE * No additional arguments. Writes back and flushes cache contents. * * cmd: MMUEXT_SET_LDT * linear_addr: Linear address of LDT base (NB. must be page-aligned). * nr_ents: Number of entries in LDT. * * cmd: MMUEXT_CLEAR_PAGE * mfn: Machine frame number to be cleared. * * cmd: MMUEXT_COPY_PAGE * mfn: Machine frame number of the destination page. * src_mfn: Machine frame number of the source page. */ #define MMUEXT_PIN_L1_TABLE 0 #define MMUEXT_PIN_L2_TABLE 1 #define MMUEXT_PIN_L3_TABLE 2 #define MMUEXT_PIN_L4_TABLE 3 #define MMUEXT_UNPIN_TABLE 4 #define MMUEXT_NEW_BASEPTR 5 #define MMUEXT_TLB_FLUSH_LOCAL 6 #define MMUEXT_INVLPG_LOCAL 7 #define MMUEXT_TLB_FLUSH_MULTI 8 #define MMUEXT_INVLPG_MULTI 9 #define MMUEXT_TLB_FLUSH_ALL 10 #define MMUEXT_INVLPG_ALL 11 #define MMUEXT_FLUSH_CACHE 12 #define MMUEXT_SET_LDT 13 #define MMUEXT_NEW_USER_BASEPTR 15 #define MMUEXT_CLEAR_PAGE 16 #define MMUEXT_COPY_PAGE 17 #ifndef __ASSEMBLY__ struct mmuext_op { unsigned int cmd; union { /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR * CLEAR_PAGE, COPY_PAGE */ xen_pfn_t mfn; /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ unsigned long linear_addr; } arg1; union { /* SET_LDT */ unsigned int nr_ents; /* TLB_FLUSH_MULTI, INVLPG_MULTI */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(const_void) vcpumask; #else const void *vcpumask; #endif /* COPY_PAGE */ xen_pfn_t src_mfn; } arg2; }; typedef struct mmuext_op mmuext_op_t; DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); #endif /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ #define UVMF_NONE (0UL<<0) /* No flushing at all. */ #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ #define UVMF_FLUSHTYPE_MASK (3UL<<0) #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ /* * Commands to HYPERVISOR_console_io(). */ #define CONSOLEIO_write 0 #define CONSOLEIO_read 1 /* * Commands to HYPERVISOR_vm_assist(). */ #define VMASST_CMD_enable 0 #define VMASST_CMD_disable 1 /* x86/32 guests: simulate full 4GB segment limits. */ #define VMASST_TYPE_4gb_segments 0 /* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ #define VMASST_TYPE_4gb_segments_notify 1 /* * x86 guests: support writes to bottom-level PTEs. * NB1. Page-directory entries cannot be written. * NB2. Guest must continue to remove all writable mappings of PTEs. */ #define VMASST_TYPE_writable_pagetables 2 /* x86/PAE guests: support PDPTs above 4GB. */ #define VMASST_TYPE_pae_extended_cr3 3 #define MAX_VMASST_TYPE 3 #ifndef __ASSEMBLY__ typedef uint16_t domid_t; /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ #define DOMID_FIRST_RESERVED (0x7FF0U) /* DOMID_SELF is used in certain contexts to refer to oneself. */ #define DOMID_SELF (0x7FF0U) /* * DOMID_IO is used to restrict page-table updates to mapping I/O memory. * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO * is useful to ensure that no mappings to the OS's own heap are accidentally * installed. (e.g., in Linux this could cause havoc as reference counts * aren't adjusted on the I/O-mapping code path). * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can * be specified by any calling domain. */ #define DOMID_IO (0x7FF1U) /* * DOMID_XEN is used to allow privileged domains to map restricted parts of * Xen's heap space (e.g., the machine_to_phys table). * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if * the caller is privileged. */ #define DOMID_XEN (0x7FF2U) /* DOMID_INVALID is used to identity invalid domid */ #define DOMID_INVALID (0x7FFFU) /* * Send an array of these to HYPERVISOR_mmu_update(). * NB. The fields are natural pointer/address size for this architecture. */ struct mmu_update { uint64_t ptr; /* Machine address of PTE. */ uint64_t val; /* New contents of PTE. */ }; typedef struct mmu_update mmu_update_t; DEFINE_XEN_GUEST_HANDLE(mmu_update_t); /* * Send an array of these to HYPERVISOR_multicall(). * NB. The fields are natural register size for this architecture. */ struct multicall_entry { unsigned long op, result; unsigned long args[6]; }; typedef struct multicall_entry multicall_entry_t; DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); /* * Event channel endpoints per domain: * 1024 if a long is 32 bits; 4096 if a long is 64 bits. */ #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) struct vcpu_time_info { /* * Updates to the following values are preceded and followed by an * increment of 'version'. The guest can therefore detect updates by * looking for changes to 'version'. If the least-significant bit of * the version number is set then an update is in progress and the guest * must wait to read a consistent set of values. * The correct way to interact with the version number is similar to * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry. */ uint32_t version; uint32_t pad0; uint64_t tsc_timestamp; /* TSC at last update of time vals. */ uint64_t system_time; /* Time, in nanosecs, since boot. */ /* * Current system time: * system_time + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32) * CPU frequency (Hz): * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift */ uint32_t tsc_to_system_mul; int8_t tsc_shift; int8_t pad1[3]; }; /* 32 bytes */ typedef struct vcpu_time_info vcpu_time_info_t; struct vcpu_info { /* * 'evtchn_upcall_pending' is written non-zero by Xen to indicate * a pending notification for a particular VCPU. It is then cleared * by the guest OS /before/ checking for pending work, thus avoiding * a set-and-check race. Note that the mask is only accessed by Xen * on the CPU that is currently hosting the VCPU. This means that the * pending and mask flags can be updated by the guest without special * synchronisation (i.e., no need for the x86 LOCK prefix). * This may seem suboptimal because if the pending flag is set by * a different CPU then an IPI may be scheduled even when the mask * is set. However, note: * 1. The task of 'interrupt holdoff' is covered by the per-event- * channel mask bits. A 'noisy' event that is continually being * triggered can be masked at source at this very precise * granularity. * 2. The main purpose of the per-VCPU mask is therefore to restrict * reentrant execution: whether for concurrency control, or to * prevent unbounded stack usage. Whatever the purpose, we expect * that the mask will be asserted only for short periods at a time, * and so the likelihood of a 'spurious' IPI is suitably small. * The mask is read before making an event upcall to the guest: a * non-zero mask therefore guarantees that the VCPU will not receive * an upcall activation. The mask is cleared when the VCPU requests * to block: this avoids wakeup-waiting races. */ uint8_t evtchn_upcall_pending; uint8_t evtchn_upcall_mask; unsigned long evtchn_pending_sel; struct arch_vcpu_info arch; struct vcpu_time_info time; }; /* 64 bytes (x86) */ #ifndef __XEN__ typedef struct vcpu_info vcpu_info_t; #endif /* * Xen/kernel shared data -- pointer provided in start_info. * * This structure is defined to be both smaller than a page, and the * only data on the shared page, but may vary in actual size even within * compatible Xen versions; guests should not rely on the size * of this structure remaining constant. */ struct shared_info { struct vcpu_info vcpu_info[MAX_VIRT_CPUS]; /* * A domain can create "event channels" on which it can send and receive * asynchronous event notifications. There are three classes of event that * are delivered by this mechanism: * 1. Bi-directional inter- and intra-domain connections. Domains must * arrange out-of-band to set up a connection (usually by allocating * an unbound 'listener' port and avertising that via a storage service * such as xenstore). * 2. Physical interrupts. A domain with suitable hardware-access * privileges can bind an event-channel port to a physical interrupt * source. * 3. Virtual interrupts ('events'). A domain can bind an event-channel * port to a virtual interrupt source, such as the virtual-timer * device or the emergency console. * * Event channels are addressed by a "port index". Each channel is * associated with two bits of information: * 1. PENDING -- notifies the domain that there is a pending notification * to be processed. This bit is cleared by the guest. * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING * will cause an asynchronous upcall to be scheduled. This bit is only * updated by the guest. It is read-only within Xen. If a channel * becomes pending while the channel is masked then the 'edge' is lost * (i.e., when the channel is unmasked, the guest must manually handle * pending notifications as no upcall will be scheduled by Xen). * * To expedite scanning of pending notifications, any 0->1 pending * transition on an unmasked channel causes a corresponding bit in a * per-vcpu selector word to be set. Each bit in the selector covers a * 'C long' in the PENDING bitfield array. */ unsigned long evtchn_pending[sizeof(unsigned long) * 8]; unsigned long evtchn_mask[sizeof(unsigned long) * 8]; /* * Wallclock time: updated only by control software. Guests should base * their gettimeofday() syscall on this wallclock-base value. */ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ struct arch_shared_info arch; }; #ifndef __XEN__ typedef struct shared_info shared_info_t; #endif /* * Start-of-day memory layout: * 1. The domain is started within contiguous virtual-memory region. * 2. The contiguous region ends on an aligned 4MB boundary. * 3. This the order of bootstrap elements in the initial virtual region: * a. relocated kernel image * b. initial ram disk [mod_start, mod_len] * c. list of allocated page frames [mfn_list, nr_pages] * (unless relocated due to XEN_ELFNOTE_INIT_P2M) * d. start_info_t structure [register ESI (x86)] * e. bootstrap page tables [pt_base, CR3 (x86)] * f. bootstrap stack [register ESP (x86)] * 4. Bootstrap elements are packed together, but each is 4kB-aligned. * 5. The initial ram disk may be omitted. * 6. The list of page frames forms a contiguous 'pseudo-physical' memory * layout for the domain. In particular, the bootstrap virtual-memory * region is a 1:1 mapping to the first section of the pseudo-physical map. * 7. All bootstrap elements are mapped read-writable for the guest OS. The * only exception is the bootstrap page table, which is mapped read-only. * 8. There is guaranteed to be at least 512kB padding after the final * bootstrap element. If necessary, the bootstrap virtual region is * extended by an extra 4MB to ensure this. */ #define MAX_GUEST_CMDLINE 1024 struct start_info { /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ char magic[32]; /* "xen--". */ unsigned long nr_pages; /* Total pages allocated to this domain. */ unsigned long shared_info; /* MACHINE address of shared info struct. */ uint32_t flags; /* SIF_xxx flags. */ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ uint32_t store_evtchn; /* Event channel for store communication. */ union { struct { xen_pfn_t mfn; /* MACHINE page number of console page. */ uint32_t evtchn; /* Event channel for console page. */ } domU; struct { uint32_t info_off; /* Offset of console_info struct. */ uint32_t info_size; /* Size of console_info struct from start.*/ } dom0; } console; /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ unsigned long pt_base; /* VIRTUAL address of page directory. */ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ int8_t cmd_line[MAX_GUEST_CMDLINE]; /* The pfn range here covers both page table and p->m table frames. */ unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */ unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */ }; typedef struct start_info start_info_t; /* New console union for dom0 introduced in 0x00030203. */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 #define console_mfn console.domU.mfn #define console_evtchn console.domU.evtchn #endif /* These flags are passed in the 'flags' field of start_info_t. */ #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ #define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ typedef struct dom0_vga_console_info { uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ #define XEN_VGATYPE_TEXT_MODE_3 0x03 #define XEN_VGATYPE_VESA_LFB 0x23 union { struct { /* Font height, in pixels. */ uint16_t font_height; /* Cursor location (column, row). */ uint16_t cursor_x, cursor_y; /* Number of rows and columns (dimensions in characters). */ uint16_t rows, columns; } text_mode_3; struct { /* Width and height, in pixels. */ uint16_t width, height; /* Bytes per scan line. */ uint16_t bytes_per_line; /* Bits per pixel. */ uint16_t bits_per_pixel; /* LFB physical address, and size (in units of 64kB). */ uint32_t lfb_base; uint32_t lfb_size; /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ uint8_t red_pos, red_size; uint8_t green_pos, green_size; uint8_t blue_pos, blue_size; uint8_t rsvd_pos, rsvd_size; #if __XEN_INTERFACE_VERSION__ >= 0x00030206 /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ uint32_t gbl_caps; /* Mode attributes (offset 0x0, VESA command 0x4f01). */ uint16_t mode_attrs; #endif } vesa_lfb; } u; } dom0_vga_console_info_t; #define xen_vga_console_info dom0_vga_console_info #define xen_vga_console_info_t dom0_vga_console_info_t typedef uint8_t xen_domain_handle_t[16]; /* Turn a plain number into a C unsigned long constant. */ #define __mk_unsigned_long(x) x ## UL #define mk_unsigned_long(x) __mk_unsigned_long(x) __DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t); __DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t); __DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t); __DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t); #else /* __ASSEMBLY__ */ /* In assembly code we cannot use C numeric constant suffixes. */ #define mk_unsigned_long(x) x #endif /* !__ASSEMBLY__ */ /* Default definitions for macros used by domctl/sysctl. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #ifndef uint64_aligned_t #define uint64_aligned_t uint64_t #endif #ifndef XEN_GUEST_HANDLE_64 #define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) #endif #endif #endif /* __XEN_PUBLIC_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/xen_proc.h000066400000000000000000000004021314037446600247730ustar00rootroot00000000000000 #ifndef __ASM_XEN_PROC_H__ #define __ASM_XEN_PROC_H__ #include extern struct proc_dir_entry *create_xen_proc_entry( const char *name, mode_t mode); extern void remove_xen_proc_entry( const char *name); #endif /* __ASM_XEN_PROC_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/xenbus.h000066400000000000000000000255601314037446600244760ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XEN_XENBUS_H #define _XEN_XENBUS_H #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include /* Register callback to watch this node. */ struct xenbus_watch { struct list_head list; /* Path being watched. */ const char *node; /* Callback (executed in a process context with no locks held). */ void (*callback)(struct xenbus_watch *, const char **vec, unsigned int len); /* See XBWF_ definitions below. */ unsigned long flags; }; /* * Execute callback in its own kthread. Useful if the callback is long * running or heavily serialised, to avoid taking out the main xenwatch thread * for a long period of time (or even unwittingly causing a deadlock). */ #define XBWF_new_thread 1 /* A xenbus device. */ struct xenbus_device { const char *devicetype; const char *nodename; const char *otherend; int otherend_id; struct xenbus_watch otherend_watch; struct device dev; enum xenbus_state state; struct completion down; }; static inline struct xenbus_device *to_xenbus_device(struct device *dev) { return container_of(dev, struct xenbus_device, dev); } struct xenbus_device_id { /* .../device// */ char devicetype[32]; /* General class of device. */ }; /* A xenbus driver. */ struct xenbus_driver { char *name; struct module *owner; const struct xenbus_device_id *ids; int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); void (*otherend_changed)(struct xenbus_device *dev, enum xenbus_state backend_state); int (*remove)(struct xenbus_device *dev); int (*suspend)(struct xenbus_device *dev); int (*suspend_cancel)(struct xenbus_device *dev); int (*resume)(struct xenbus_device *dev); int (*uevent)(struct xenbus_device *, char **, int, char *, int); struct device_driver driver; int (*read_otherend_details)(struct xenbus_device *dev); int (*is_ready)(struct xenbus_device *dev); }; static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) { return container_of(drv, struct xenbus_driver, driver); } int xenbus_register_frontend(struct xenbus_driver *drv); int xenbus_register_backend(struct xenbus_driver *drv); void xenbus_unregister_driver(struct xenbus_driver *drv); struct xenbus_transaction { u32 id; }; /* Nil transaction ID. */ #define XBT_NIL ((struct xenbus_transaction) { 0 }) char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num); void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len); int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string); int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_transaction_start(struct xenbus_transaction *t); int xenbus_transaction_end(struct xenbus_transaction t, int abort); /* Single read and scanf: returns -errno or num scanned if > 0. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(scanf, 4, 5))); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(printf, 4, 5))); /* Generic read function: NULL-terminated triples of name, * sprintf-style type string, and pointer. Returns 0 or errno.*/ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...); /* notifer routines for when the xenstore comes up */ int register_xenstore_notifier(struct notifier_block *nb); void unregister_xenstore_notifier(struct notifier_block *nb); int register_xenbus_watch(struct xenbus_watch *watch); void unregister_xenbus_watch(struct xenbus_watch *watch); void xs_suspend(void); void xs_resume(void); void xs_suspend_cancel(void); /* Used by xenbus_dev to borrow kernel's store connection. */ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); /* Prepare for domain suspend: then resume or cancel the suspend. */ void xenbus_suspend(void); void xenbus_resume(void); void xenbus_suspend_cancel(void); #define XENBUS_IS_ERR_READ(str) ({ \ if (!IS_ERR(str) && strlen(str) == 0) { \ kfree(str); \ str = ERR_PTR(-ERANGE); \ } \ IS_ERR(str); \ }) #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE) /** * Register a watch on the given path, using the given xenbus_watch structure * for storage, and the given callback function as the callback. Return 0 on * success, or -errno on error. On success, the given path will be saved as * watch->node, and remains the caller's to free. On error, watch->node will * be NULL, the device will switch to XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); /** * Register a watch on the given path/path2, using the given xenbus_watch * structure for storage, and the given callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (path/path2) will be saved as watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); /** * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); /** * Grant access to the given ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn); /** * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_valloc allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * xenbus_map_ring does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr); /** * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_vfree if you mapped in your memory with * xenbus_map_ring_valloc (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port); /** * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path); /*** * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...); /*** * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...); int xenbus_dev_init(void); const char *xenbus_strstate(enum xenbus_state state); int xenbus_dev_is_online(struct xenbus_device *dev); int xenbus_frontend_closed(struct xenbus_device *dev); int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)); int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)); #endif /* _XEN_XENBUS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/public/xs_wire.h000066400000000000000000000070141314037446600246440ustar00rootroot00000000000000/* * Details of the "wire" protocol between Xen Store Daemon and client * library or guest kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Rusty Russell IBM Corporation */ #ifndef _XS_WIRE_H #define _XS_WIRE_H enum xsd_sockmsg_type { XS_DEBUG, XS_DIRECTORY, XS_READ, XS_GET_PERMS, XS_WATCH, XS_UNWATCH, XS_TRANSACTION_START, XS_TRANSACTION_END, XS_INTRODUCE, XS_RELEASE, XS_GET_DOMAIN_PATH, XS_WRITE, XS_MKDIR, XS_RM, XS_SET_PERMS, XS_WATCH_EVENT, XS_ERROR, XS_IS_DOMAIN_INTRODUCED, XS_RESUME, XS_SET_TARGET }; #define XS_WRITE_NONE "NONE" #define XS_WRITE_CREATE "CREATE" #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" /* We hand errors as strings, for portability. */ struct xsd_errors { int errnum; const char *errstring; }; #ifdef EINVAL #define XSD_ERROR(x) { x, #x } /* LINTED: static unused */ static struct xsd_errors xsd_errors[] #if defined(__GNUC__) __attribute__((unused)) #endif = { XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), XSD_ERROR(EISDIR), XSD_ERROR(ENOENT), XSD_ERROR(ENOMEM), XSD_ERROR(ENOSPC), XSD_ERROR(EIO), XSD_ERROR(ENOTEMPTY), XSD_ERROR(ENOSYS), XSD_ERROR(EROFS), XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN) }; #endif struct xsd_sockmsg { uint32_t type; /* XS_??? */ uint32_t req_id;/* Request identifier, echoed in daemon's response. */ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ uint32_t len; /* Length of data following this. */ /* Generally followed by nul-terminated string(s). */ }; enum xs_watch_type { XS_WATCH_PATH = 0, XS_WATCH_TOKEN }; /* Inter-domain shared memory communications. */ #define XENSTORE_RING_SIZE 1024 typedef uint32_t XENSTORE_RING_IDX; #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) struct xenstore_domain_interface { char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ XENSTORE_RING_IDX req_cons, req_prod; XENSTORE_RING_IDX rsp_cons, rsp_prod; }; /* Violating this is very bad. See docs/misc/xenstore.txt. */ #define XENSTORE_PAYLOAD_MAX 4096 /* Violating these just gets you an error back */ #define XENSTORE_ABS_PATH_MAX 3072 #define XENSTORE_REL_PATH_MAX 2048 #endif /* _XS_WIRE_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/vbd/000077500000000000000000000000001314037446600223065ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/vbd/blkif.h000066400000000000000000000134061314037446600235520ustar00rootroot00000000000000/****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_BLKIF_H__ #define __XEN_PUBLIC_IO_BLKIF_H__ #include #include /* * Front->back notifications: When enqueuing a new request, sending a * notification can be made conditional on req_event (i.e., the generic * hold-off mechanism provided by the ring macros). Backends must set * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). * * Back->front notifications: When enqueuing a new response, sending a * notification can be made conditional on rsp_event (i.e., the generic * hold-off mechanism provided by the ring macros). Frontends must set * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). */ #ifndef blkif_vdev_t #define blkif_vdev_t uint16_t #endif #define blkif_sector_t uint64_t /* * REQUEST CODES. */ #define BLKIF_OP_READ 0 #define BLKIF_OP_WRITE 1 /* * Recognised only if "feature-barrier" is present in backend xenbus info. * The "feature-barrier" node contains a boolean indicating whether barrier * requests are likely to succeed or fail. Either way, a barrier request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt barrier requests. * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* * create the "feature-barrier" node! */ #define BLKIF_OP_WRITE_BARRIER 2 /* * Recognised if "feature-flush-cache" is present in backend xenbus * info. A flush will ask the underlying storage hardware to flush its * non-volatile caches as appropriate. The "feature-flush-cache" node * contains a boolean indicating whether flush requests are likely to * succeed or fail. Either way, a flush request may fail at any time * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying * block-device hardware. The boolean simply indicates whether or not it * is worthwhile for the frontend to attempt flushes. If a backend does * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the * "feature-flush-cache" node! */ #define BLKIF_OP_FLUSH_DISKCACHE 3 /* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 /* * NB. first_sect and last_sect in blkif_request_segment, as well as * sector_number in blkif_request, are always expressed in 512-byte units. * However they must be properly aligned to the real sector size of the * physical disk, which is reported in the "sector-size" node in the backend * xenbus info. Also the xenbus "sectors" node is expressed in 512-byte units. */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; }; struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; typedef struct blkif_request blkif_request_t; struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_response blkif_response_t; /* * STATUS RETURN CODES. */ /* Operation not supported (only happens on barrier writes). */ #define BLKIF_RSP_EOPNOTSUPP -2 /* Operation failed for some unspecified reason (-EIO). */ #define BLKIF_RSP_ERROR -1 /* Operation completed successfully. */ #define BLKIF_RSP_OKAY 0 /* * Generate blkif ring structures and types. */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/vnif/000077500000000000000000000000001314037446600224755ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/vnif/netif.h000066400000000000000000000163561314037446600237660ustar00rootroot00000000000000/****************************************************************************** * netif.h * * Unified network-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_NETIF_H__ #define __XEN_PUBLIC_IO_NETIF_H__ #include #include /* * Notifications after enqueuing any type of message should be conditional on * the appropriate req_event or rsp_event field in the shared ring. * If the client sends notification for rx requests then it should specify * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume * that it cannot safely queue packets (as it may not be kicked to send them). */ /* * This is the 'wire' format for packets: * Request 1: netif_tx_request -- NETTXF_* (any flags) * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info) * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE) * Request 4: netif_tx_request -- NETTXF_more_data * Request 5: netif_tx_request -- NETTXF_more_data * ... * Request N: netif_tx_request -- 0 */ /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETTXF_csum_blank (0) #define NETTXF_csum_blank (1U<<_NETTXF_csum_blank) /* Packet data has been validated against protocol checksum. */ #define _NETTXF_data_validated (1) #define NETTXF_data_validated (1U<<_NETTXF_data_validated) /* Packet continues in the next request descriptor. */ #define _NETTXF_more_data (2) #define NETTXF_more_data (1U<<_NETTXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETTXF_extra_info (3) #define NETTXF_extra_info (1U<<_NETTXF_extra_info) struct netif_tx_request { grant_ref_t gref; /* Reference to buffer page */ uint16_t offset; /* Offset within buffer page */ uint16_t flags; /* NETTXF_* */ uint16_t id; /* Echoed in response message. */ uint16_t size; /* Packet size in bytes. */ }; typedef struct netif_tx_request netif_tx_request_t; /* Types of netif_extra_info descriptors. */ #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MAX (4) /* netif_extra_info flags. */ #define _XEN_NETIF_EXTRA_FLAG_MORE (0) #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) /* GSO types - only TCPv4 currently supported. */ #define XEN_NETIF_GSO_TYPE_TCPV4 (1) /* * This structure needs to fit within both netif_tx_request and * netif_rx_response for compatibility. */ struct netif_extra_info { uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ union { /* * XEN_NETIF_EXTRA_TYPE_GSO: */ struct { /* * Maximum payload size of each segment. For example, for TCP this * is just the path MSS. */ uint16_t size; /* * GSO type. This determines the protocol of the packet and any * extra features required to segment the packet properly. */ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ /* Future expansion. */ uint8_t pad; /* * GSO features. This specifies any extra GSO features required * to process this packet, such as ECN support for TCPv4. */ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ } gso; /* * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}: * Backend advertises availability via 'feature-multicast-control' * xenbus node containing value '1'. * Frontend requests this feature by advertising * 'request-multicast-control' xenbus node containing value '1'. * If multicast control is requested then multicast flooding is * disabled and the frontend must explicitly register its interest * in multicast groups using dummy transmit requests containing * MCAST_{ADD,DEL} extra-info fragments. */ struct { uint8_t addr[6]; /* Address to add/remove. */ } mcast; uint16_t pad[3]; } u; }; typedef struct netif_extra_info netif_extra_info_t; struct netif_tx_response { uint16_t id; int16_t status; /* NETIF_RSP_* */ }; typedef struct netif_tx_response netif_tx_response_t; struct netif_rx_request { uint16_t id; /* Echoed in response message. */ grant_ref_t gref; /* Reference to incoming granted frame */ }; typedef struct netif_rx_request netif_rx_request_t; /* Packet data has been validated against protocol checksum. */ #define _NETRXF_data_validated (0) #define NETRXF_data_validated (1U<<_NETRXF_data_validated) /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETRXF_csum_blank (1) #define NETRXF_csum_blank (1U<<_NETRXF_csum_blank) /* Packet continues in the next request descriptor. */ #define _NETRXF_more_data (2) #define NETRXF_more_data (1U<<_NETRXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETRXF_extra_info (3) #define NETRXF_extra_info (1U<<_NETRXF_extra_info) struct netif_rx_response { uint16_t id; uint16_t offset; /* Offset in page of start of received packet */ uint16_t flags; /* NETRXF_* */ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ }; typedef struct netif_rx_response netif_rx_response_t; /* * Generate netif ring structures and types. */ DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response); DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response); #define NETIF_RSP_DROPPED -2 #define NETIF_RSP_ERROR -1 #define NETIF_RSP_OKAY 0 /* No response: used for auxiliary requests (e.g., netif_tx_extra). */ #define NETIF_RSP_NULL 1 #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/vpci/000077500000000000000000000000001314037446600224745ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/vpci/driver_util.h000066400000000000000000000005421314037446600251760ustar00rootroot00000000000000 #ifndef __ASM_XEN_DRIVER_UTIL_H__ #define __ASM_XEN_DRIVER_UTIL_H__ #include #include /* Allocate/destroy a 'vmalloc' VM area. */ extern struct vm_struct *alloc_vm_area(size_t size); extern void free_vm_area(struct vm_struct *area); extern struct class *get_xen_class(void); #endif /* __ASM_XEN_DRIVER_UTIL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/vpci/gnttab_dma.h000066400000000000000000000026471314037446600247560ustar00rootroot00000000000000/* * Copyright (c) 2007 Herbert Xu * Copyright (c) 2007 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _ASM_I386_GNTTAB_DMA_H #define _ASM_I386_GNTTAB_DMA_H static inline int gnttab_dma_local_pfn(struct page *page) { /* Has it become a local MFN? */ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page)))); } static inline maddr_t gnttab_dma_map_page(struct page *page) { __gnttab_dma_map_page(page); return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT); } static inline void gnttab_dma_unmap_page(maddr_t maddr) { __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr))); } #endif /* _ASM_I386_GNTTAB_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/vpci/platform-pci.h000066400000000000000000000026601314037446600252460ustar00rootroot00000000000000/****************************************************************************** * platform-pci.h * * Xen platform PCI device driver * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. */ #ifndef _XEN_PLATFORM_PCI_H #define _XEN_PLATFORM_PCI_H #include /*the features pvdriver supported */ #define XENPAGING 0x1 #define SUPPORTED_FEATURE XENPAGING extern void write_feature_flag(void); extern void write_monitor_service_flag(void); /*when xenpci resume succeed, write this flag, then uvpmonitor call ndsend*/ extern void write_driver_resume_flag(void); unsigned long alloc_xen_mmio(unsigned long len); void platform_pci_resume(void); extern struct pci_dev *xen_platform_pdev; #endif /* _XEN_PLATFORM_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/vpci/xenbus_comms.h000066400000000000000000000043341314037446600253530ustar00rootroot00000000000000/* * Private include for xenbus communications. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_COMMS_H #define _XENBUS_COMMS_H int xs_init(void); int xb_init_comms(void); /* Low level routines. */ int xb_write(const void *data, unsigned len); int xb_read(void *data, unsigned len); int xb_data_to_read(void); int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; /* For xenbus internal use. */ enum { XENBUS_XSD_UNCOMMITTED = 0, XENBUS_XSD_FOREIGN_INIT, XENBUS_XSD_FOREIGN_READY, XENBUS_XSD_LOCAL_INIT, XENBUS_XSD_LOCAL_READY, }; extern atomic_t xenbus_xsd_state; static inline int is_xenstored_ready(void) { int s = atomic_read(&xenbus_xsd_state); return s == XENBUS_XSD_FOREIGN_READY || s == XENBUS_XSD_LOCAL_READY; } #endif /* _XENBUS_COMMS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/include/vpci/xenbus_probe.h000066400000000000000000000061171314037446600253450ustar00rootroot00000000000000/****************************************************************************** * xenbus_probe.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_PROBE_H #define _XENBUS_PROBE_H #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); extern void xenbus_backend_probe_and_watch(void); extern void xenbus_backend_bus_register(void); extern void xenbus_backend_device_register(void); #else static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_probe_and_watch(void) {} static inline void xenbus_backend_bus_register(void) {} static inline void xenbus_backend_device_register(void) {} #endif struct xen_bus_type { char *root; int error; unsigned int levels; int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename); int (*probe)(const char *type, const char *dir); struct bus_type bus; struct device dev; }; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); extern int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus); extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); extern int xenbus_probe_devices(struct xen_bus_type *bus); extern void dev_changed(const char *node, struct xen_bus_type *bus); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/version.ini000066400000000000000000000000641314037446600222760ustar00rootroot00000000000000KernModeVersion=2.2.0.112 UserModeVersion=2.2.0.112 UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-balloon/000077500000000000000000000000001314037446600223265ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-balloon/Kbuild000066400000000000000000000001511314037446600234600ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-balloon.o xen-balloon-objs := balloon.o xen-balloon-objs += sysfs.o UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-balloon/Makefile000066400000000000000000000000661314037446600237700ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-balloon/balloon.c000066400000000000000000000462211314037446600241250ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /*added by linyuliang 00176349 begin*/ #include /*added by linyuliang 00176349 end*/ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * pv driver header file */ #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); struct balloon_stats balloon_stats; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; /* VM /proc information for memory */ extern unsigned long totalram_pages; #ifndef MODULE extern unsigned long totalhigh_pages; #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else #define inc_totalhigh_pages() ((void)0) #define dec_totalhigh_pages() ((void)0) #endif /*******************/ #ifndef CONFIG_XEN static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif /*******************/ /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process); #else static void balloon_process(void *unused); static DECLARE_WORK(balloon_worker, balloon_process, NULL); #endif static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page) { /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; dec_totalhigh_pages(); } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(void) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); if (PageHighMem(page)) { bs.balloon_high--; inc_totalhigh_pages(); } else bs.balloon_low--; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static inline void balloon_free_page(struct page *page) { #ifndef MODULE if (put_page_testzero(page)) free_cold_page(page); #else /* free_cold_page() is not being exported. */ __free_page(page); #endif } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = bs.target_pages; if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } static unsigned long minimum_target(void) { #ifndef CONFIG_XEN #define max_pfn num_physpages #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN #undef max_pfn #endif } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page);; page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); init_page_count(page); balloon_free_page(page); } bs.current_pages += rc; /*totalram_pages = bs.current_pages;*/ totalram_pages = bs.current_pages-totalram_bias; out: balloon_unlock(flags); return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(pfn_to_page(pfn)); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; /*totalram_pages = bs.current_pages;*/ totalram_pages = bs.current_pages - totalram_bias; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ /*Modified by linyuliang 00176349 begin*/ /*When adjustment be stopped,just write current memory to memory/target.*/ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static int flag; static void balloon_process(struct work_struct *unused) #else static void balloon_process(void *unused) #endif { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); if(current_target() == bs.current_pages) { mutex_unlock(&balloon_mutex); return; } xenbus_write(XBT_NIL,"control/uvp","Balloon_flag","1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); /* Schedule more work if there is some still to be done. */ if ((current_target() != bs.current_pages) || (flag==1)) { sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL,"memory","target",buffer); } xenbus_write(XBT_NIL,"control/uvp","Balloon_flag","0"); //mod_timer(&balloon_timer, jiffies + HZ); mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, minimum_target()); flag = bs.target_pages==target?0:1; schedule_work(&balloon_worker); } /*Modified by linyuliang 00176349 end*/ static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf( page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { /*#if defined(CONFIG_X86) && defined(CONFIG_XEN) */ #if !defined(CONFIG_XEN) #ifndef XENMEM_get_pod_target #define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; #endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; #elif #defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("Initialising balloon driver.\n"); #ifdef CONFIG_XEN bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else /*bs.current_pages = totalram_pages; */ totalram_bias = HYPERVISOR_memory_op( HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target) != -ENOSYS ? XENMEM_maximum_reservation : XENMEM_current_reservation, &pod_target.domid); if ((long)totalram_bias != -ENOSYS) { BUG_ON(totalram_bias < totalram_pages); bs.current_pages = totalram_bias; totalram_bias -= totalram_pages; } else { totalram_bias = 0; bs.current_pages = totalram_pages; } #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #ifdef CONFIG_PROC_FS if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) balloon_append(page); } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void __exit balloon_exit(void) { balloon_sysfs_exit(); /* XXX - release balloon here */ } module_exit(balloon_exit); void balloon_update_driver_allowance(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } #ifdef CONFIG_XEN static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long flags; void *v; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD); if (page == NULL) goto err; v = page_address(page); scrub_pages(v, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, (unsigned long)v, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_unlock(flags); balloon_free_page(page); goto err; } /*totalram_pages = --bs.current_pages;*/ totalram_pages = --bs.current_pages - totalram_bias; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i]); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i]); } balloon_unlock(flags); kfree(pagevec); schedule_work(&balloon_worker); } void balloon_release_driver_page(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page); bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-balloon/common.h000066400000000000000000000043201314037446600237660ustar00rootroot00000000000000/****************************************************************************** * balloon/common.h * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_COMMON_H__ #define __XEN_BALLOON_COMMON_H__ #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; extern struct balloon_stats balloon_stats; #define bs balloon_stats int balloon_sysfs_init(void); void balloon_sysfs_exit(void); void balloon_set_new_target(unsigned long target); #endif /* __XEN_BALLOON_COMMON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-balloon/sysfs.c000066400000000000000000000111531314037446600236420ustar00rootroot00000000000000/****************************************************************************** * balloon/sysfs.c * * Xen balloon driver - sysfs interfaces. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BALLOON_CLASS_NAME "xen_memory" #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, const char *buf, size_t count) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ strcpy(memstring, buf); target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_driver_kb.attr, NULL }; static struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { /* compatible with linux-2.6.24*/ .name = BALLOON_CLASS_NAME, }; static struct sys_device balloon_sysdev; static int __init register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } static __exit void unregister_balloon(struct sys_device *sysdev) { int i; sysfs_remove_group(&sysdev->kobj, &balloon_info_group); for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); } int __init balloon_sysfs_init(void) { return register_balloon(&balloon_sysdev); } void __exit balloon_sysfs_exit(void) { unregister_balloon(&balloon_sysdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/000077500000000000000000000000001314037446600232755ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/Kbuild000066400000000000000000000017051314037446600244350ustar00rootroot00000000000000########################################################### ### Author: King ### ########################################################### include $(M)/config.mk obj-m := xen-platform-pci.o xen-platform-pci-objs += evtchn.o platform-pci.o gnttab.o xen_support.o xen-platform-pci-objs += features.o platform-compat.o xen-platform-pci-objs += reboot.o machine_reboot.o xen-platform-pci-objs += panic-handler.o xen-platform-pci-objs += platform-pci-unplug.o xen-platform-pci-objs += xenbus_comms.o xen-platform-pci-objs += xenbus_xs.o xen-platform-pci-objs += xenbus_probe.o xen-platform-pci-objs += xenbus_dev.o xen-platform-pci-objs += xenbus_client.o xen-platform-pci-objs += xen_proc.o modules: $(MAKE) -C $(KERNELDIR) M=$(PWD) modules ########################################################### ### END ### ########################################################### UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/Makefile000066400000000000000000000000661314037446600247370ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/evtchn.c000066400000000000000000000231571314037446600247400ustar00rootroot00000000000000/****************************************************************************** * evtchn.c * * A simplified event channel for para-drivers in unmodified linux * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, Intel Corporation * * This file may be distributed separately from the Linux kernel, or * incorporated into other software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include /* * pv drivers header files */ #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif void *shared_info_area; #define is_valid_evtchn(x) ((x) != 0) #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn) static struct { spinlock_t lock; irq_handler_t handler; void *dev_id; int evtchn; int close:1; /* close on unbind_from_irqhandler()? */ int inuse:1; int in_handler:1; } irq_evtchn[256]; static int evtchn_to_irq[NR_EVENT_CHANNELS] = { [0 ... NR_EVENT_CHANNELS-1] = -1 }; static DEFINE_SPINLOCK(irq_alloc_lock); static int alloc_xen_irq(void) { static int warned; int irq; spin_lock(&irq_alloc_lock); for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) { if (irq_evtchn[irq].inuse) continue; irq_evtchn[irq].inuse = 1; spin_unlock(&irq_alloc_lock); return irq; } if (!warned) { warned = 1; printk(KERN_WARNING "No available IRQ to bind to: " "increase irq_evtchn[] size in evtchn.c.\n"); } spin_unlock(&irq_alloc_lock); return -ENOSPC; } static void free_xen_irq(int irq) { spin_lock(&irq_alloc_lock); irq_evtchn[irq].inuse = 0; spin_unlock(&irq_alloc_lock); } int irq_to_evtchn_port(int irq) { return irq_evtchn[irq].evtchn; } EXPORT_SYMBOL(irq_to_evtchn_port); void mask_evtchn(int port) { shared_info_t *s = shared_info_area; synch_set_bit(port, &s->evtchn_mask[0]); } EXPORT_SYMBOL(mask_evtchn); void unmask_evtchn(int port) { evtchn_unmask_t op = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op)); } EXPORT_SYMBOL(unmask_evtchn); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { struct evtchn_alloc_unbound alloc_unbound; int err, irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_domain; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { spin_unlock_irq(&irq_evtchn[irq].lock); free_xen_irq(irq); return err; } irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = alloc_unbound.port; irq_evtchn[irq].close = 1; evtchn_to_irq[alloc_unbound.port] = irq; unmask_evtchn(alloc_unbound.port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } #else int bind_listening_port_to_irqhandler( unsigned int remote_domain, irqreturn_t handler, unsigned long irqflags, const char *devname, void *dev_id) { struct evtchn_alloc_unbound alloc_unbound; int err, irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_domain; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { spin_unlock_irq(&irq_evtchn[irq].lock); free_xen_irq(irq); return err; } irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = alloc_unbound.port; irq_evtchn[irq].close = 1; evtchn_to_irq[alloc_unbound.port] = irq; unmask_evtchn(alloc_unbound.port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } #endif EXPORT_SYMBOL(bind_listening_port_to_irqhandler); int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = caller_port; irq_evtchn[irq].close = 0; evtchn_to_irq[caller_port] = irq; unmask_evtchn(caller_port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_caller_port_to_irqhandler); void unbind_from_irqhandler(unsigned int irq, void *dev_id) { int evtchn; spin_lock_irq(&irq_evtchn[irq].lock); evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) { evtchn_to_irq[evtchn] = -1; mask_evtchn(evtchn); if (irq_evtchn[irq].close) { struct evtchn_close close = { .port = evtchn }; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) BUG(); } } irq_evtchn[irq].handler = NULL; irq_evtchn[irq].evtchn = 0; spin_unlock_irq(&irq_evtchn[irq].lock); while (irq_evtchn[irq].in_handler) cpu_relax(); free_xen_irq(irq); } EXPORT_SYMBOL(unbind_from_irqhandler); void notify_remote_via_irq(int irq) { int evtchn; evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL(notify_remote_via_irq); static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 }; static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 }; static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh, unsigned int idx) { return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]); } static irqreturn_t evtchn_interrupt(int irq, void *dev_id #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) , struct pt_regs *regs #else # define handler(irq, dev_id, regs) handler(irq, dev_id) #endif ) { unsigned int l1i, l2i, port; unsigned long masked_l1, masked_l2; /* XXX: All events are bound to vcpu0 but irq may be redirected. */ int cpu = 0; /*smp_processor_id();*/ irq_handler_t handler; shared_info_t *s = shared_info_area; vcpu_info_t *v = &s->vcpu_info[cpu]; unsigned long l1, l2; v->evtchn_upcall_pending = 0; #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ /* Clear master flag /before/ clearing selector flag. */ wmb(); #endif l1 = xchg(&v->evtchn_pending_sel, 0); l1i = per_cpu(last_processed_l1i, cpu); l2i = per_cpu(last_processed_l2i, cpu); while (l1 != 0) { l1i = (l1i + 1) % BITS_PER_LONG; masked_l1 = l1 & ((~0UL) << l1i); if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */ l1i = BITS_PER_LONG - 1; l2i = BITS_PER_LONG - 1; continue; } l1i = __ffs(masked_l1); do { l2 = active_evtchns(cpu, s, l1i); l2i = (l2i + 1) % BITS_PER_LONG; masked_l2 = l2 & ((~0UL) << l2i); if (masked_l2 == 0) { /* if we masked out all events, move on */ l2i = BITS_PER_LONG - 1; break; } l2i = __ffs(masked_l2); /* process port */ port = (l1i * BITS_PER_LONG) + l2i; synch_clear_bit(port, &s->evtchn_pending[0]); irq = evtchn_to_irq[port]; if (irq < 0) continue; spin_lock(&irq_evtchn[irq].lock); handler = irq_evtchn[irq].handler; dev_id = irq_evtchn[irq].dev_id; if (unlikely(handler == NULL)) { printk("Xen IRQ%d (port %d) has no handler!\n", irq, port); spin_unlock(&irq_evtchn[irq].lock); continue; } irq_evtchn[irq].in_handler = 1; spin_unlock(&irq_evtchn[irq].lock); local_irq_enable(); handler(irq, irq_evtchn[irq].dev_id, regs); local_irq_disable(); spin_lock(&irq_evtchn[irq].lock); irq_evtchn[irq].in_handler = 0; spin_unlock(&irq_evtchn[irq].lock); /* if this is the final port processed, we'll pick up here+1 next time */ per_cpu(last_processed_l1i, cpu) = l1i; per_cpu(last_processed_l2i, cpu) = l2i; } while (l2i != BITS_PER_LONG - 1); l2 = active_evtchns(cpu, s, l1i); if (l2 == 0) /* we handled all ports, so we can clear the selector bit */ l1 &= ~(1UL << l1i); } return IRQ_HANDLED; } void irq_resume(void) { int evtchn, irq; for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) { mask_evtchn(evtchn); evtchn_to_irq[evtchn] = -1; } for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) irq_evtchn[irq].evtchn = 0; } int xen_irq_init(struct pci_dev *pdev) { int irq; for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) spin_lock_init(&irq_evtchn[irq].lock); return request_irq(pdev->irq, evtchn_interrupt, #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT, #else IRQF_SHARED | IRQF_SAMPLE_RANDOM | IRQF_DISABLED, #endif "xen-platform-pci", pdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/features.c000066400000000000000000000015371314037446600252650ustar00rootroot00000000000000/****************************************************************************** * features.c * * Xen feature flags. * * Copyright (c) 2006, Ian Campbell, XenSource Inc. */ #include #include #include /* * pv drivers header files */ #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */ EXPORT_SYMBOL(xen_features); void setup_xen_features(void) { xen_feature_info_t fi; int i, j; for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) { fi.submap_idx = i; if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j=0; j<32; j++) xen_features[i*32+j] = !!(fi.submap & 1< #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff #define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t)) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; static unsigned int boot_max_nr_grant_frames; static int gnttab_free_count; static grant_ref_t gnttab_free_head; static DEFINE_SPINLOCK(gnttab_list_lock); static struct grant_entry *shared; static struct gnttab_free_callback *gnttab_free_callback_list; static int gnttab_expand(unsigned int req_entries); #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP]) #define nr_freelist_frames(grant_frames) \ (((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP) static int get_free_entries(int count) { unsigned long flags; int ref, rc; grant_ref_t head; spin_lock_irqsave(&gnttab_list_lock, flags); if ((gnttab_free_count < count) && ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { spin_unlock_irqrestore(&gnttab_list_lock, flags); return rc; } ref = head = gnttab_free_head; gnttab_free_count -= count; while (count-- > 1) head = gnttab_entry(head); gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } #define get_free_entry() get_free_entries(1) static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; callback = gnttab_free_callback_list; gnttab_free_callback_list = NULL; while (callback != NULL) { next = callback->next; if (gnttab_free_count >= callback->count) { callback->next = NULL; callback->queued = 0; callback->fn(callback->arg); } else { callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; } callback = next; } } static inline void check_free_callbacks(void) { if (unlikely(gnttab_free_callback_list)) do_free_callbacks(); } static void put_free_entry(grant_ref_t ref) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; gnttab_free_count++; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } /* * Public grant-issuing interface functions */ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags) { shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); int gnttab_query_foreign_access(grant_ref_t ref) { u16 nflags; nflags = shared[ref].flags; return (nflags & (GTF_reading|GTF_writing)); } EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); int gnttab_end_foreign_access_ref(grant_ref_t ref) { u16 flags, nflags; nflags = shared[ref].flags; do { if ((flags = nflags) & (GTF_reading|GTF_writing)) { printk(KERN_DEBUG "WARNING: g.e. still in use!\n"); return 0; } } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) != flags); return 1; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page) { if (gnttab_end_foreign_access_ref(ref)) { put_free_entry(ref); if (page != 0) free_page(page); } else { /* XXX This needs to be fixed so that the ref and page are placed on a list to be freed up later. */ printk(KERN_DEBUG "WARNING: leaking g.e. and page still in use!\n"); } } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; gnttab_grant_foreign_transfer_ref(ref, domid, pfn); return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, unsigned long pfn) { shared[ref].frame = pfn; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_accept_transfer; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) { unsigned long frame; u16 flags; /* * If a transfer is not even yet started, try to reclaim the grant * reference and return failure (== 0). */ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags) return 0; cpu_relax(); } /* If a transfer is in progress then wait until it is completed. */ while (!(flags & GTF_transfer_completed)) { flags = shared[ref].flags; cpu_relax(); } /* Read the frame number /after/ reading completion status. */ rmb(); frame = shared[ref].frame; BUG_ON(frame == 0); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) { unsigned long frame = gnttab_end_foreign_transfer_ref(ref); put_free_entry(ref); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); void gnttab_free_grant_reference(grant_ref_t ref) { put_free_entry(ref); } EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; unsigned long flags; int count = 1; if (head == GNTTAB_LIST_END) return; spin_lock_irqsave(&gnttab_list_lock, flags); ref = head; while (gnttab_entry(ref) != GNTTAB_LIST_END) { ref = gnttab_entry(ref); count++; } gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = head; gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_references); int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) { int h = get_free_entries(count); if (h < 0) return -ENOSPC; *head = h; return 0; } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); } EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); int gnttab_claim_grant_reference(grant_ref_t *private_head) { grant_ref_t g = *private_head; if (unlikely(g == GNTTAB_LIST_END)) return -ENOSPC; *private_head = gnttab_entry(g); return g; } EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release) { gnttab_entry(release) = *private_head; *private_head = release; } EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); if (callback->queued) goto out; callback->fn = fn; callback->arg = arg; callback->count = count; callback->queued = 1; callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; check_free_callbacks(); out: spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_request_free_callback); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) { struct gnttab_free_callback **pcb; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { if (*pcb == callback) { *pcb = callback->next; callback->queued = 0; break; } } spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); static int grow_gnttab_list(unsigned int more_frames) { unsigned int new_nr_grant_frames, extra_entries, i; unsigned int nr_glist_frames, new_nr_glist_frames; new_nr_grant_frames = nr_grant_frames + more_frames; extra_entries = more_frames * ENTRIES_PER_GRANT_FRAME; nr_glist_frames = nr_freelist_frames(nr_grant_frames); new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames); for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); if (!gnttab_list[i]) goto grow_nomem; } for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; check_free_callbacks(); return 0; grow_nomem: for ( ; i >= nr_glist_frames; i--) free_page((unsigned long) gnttab_list[i]); return -ENOMEM; } static unsigned int __max_nr_grant_frames(void) { struct gnttab_query_size query; int rc; query.dom = DOMID_SELF; rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); if ((rc < 0) || (query.status != GNTST_okay)) return 4; /* Legacy max supported number of frames */ return query.max_nr_frames; } static inline unsigned int max_nr_grant_frames(void) { unsigned int xen_max = __max_nr_grant_frames(); if (xen_max > boot_max_nr_grant_frames) return boot_max_nr_grant_frames; return xen_max; } #ifdef CONFIG_XEN static DEFINE_SEQLOCK(gnttab_dma_lock); #ifdef CONFIG_X86 static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } void *arch_gnttab_alloc_shared(unsigned long *frames) { struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames()); BUG_ON(area == NULL); return area->addr; } #endif /* CONFIG_X86 */ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); if (shared == NULL) shared = arch_gnttab_alloc_shared(frames); #ifdef CONFIG_X86 rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); BUG_ON(rc); frames -= nr_gframes; /* adjust after map_pte_fn() */ #endif /* CONFIG_X86 */ kfree(frames); return 0; } static void gnttab_page_free(struct page *page, unsigned int order) { BUG_ON(order); ClearPageForeign(page); gnttab_reset_grant_page(page); put_page(page); } /* * Must not be called with IRQs off. This should only be used on the * slow path. * * Copy a foreign granted page to local memory. */ int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep) { struct gnttab_unmap_and_replace unmap; mmu_update_t mmu; struct page *page; struct page *new_page; void *new_addr; void *addr; paddr_t pfn; maddr_t mfn; maddr_t new_mfn; int err; page = *pagep; if (!get_page_unless_zero(page)) return -ENOENT; err = -ENOMEM; new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!new_page) goto out; new_addr = page_address(new_page); addr = page_address(page); memcpy(new_addr, addr, PAGE_SIZE); pfn = page_to_pfn(page); mfn = pfn_to_mfn(pfn); new_mfn = virt_to_mfn(new_addr); write_seqlock(&gnttab_dma_lock); /* Make seq visible before checking page_mapped. */ smp_mb(); /* Has the page been DMA-mapped? */ if (unlikely(page_mapped(page))) { write_sequnlock(&gnttab_dma_lock); put_page(new_page); err = -EBUSY; goto out; } if (!xen_feature(XENFEAT_auto_translated_physmap)) set_phys_to_machine(pfn, new_mfn); gnttab_set_replace_op(&unmap, (unsigned long)addr, (unsigned long)new_addr, ref); err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace, &unmap, 1); BUG_ON(err); BUG_ON(unmap.status); write_sequnlock(&gnttab_dma_lock); if (!xen_feature(XENFEAT_auto_translated_physmap)) { set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY); mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu.val = pfn; err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF); BUG_ON(err); } new_page->mapping = page->mapping; new_page->index = page->index; set_bit(PG_foreign, &new_page->flags); *pagep = new_page; SetPageForeign(page, gnttab_page_free); page->mapping = NULL; out: put_page(page); return err; } EXPORT_SYMBOL_GPL(gnttab_copy_grant_page); void gnttab_reset_grant_page(struct page *page) { init_page_count(page); reset_page_mapcount(page); } EXPORT_SYMBOL_GPL(gnttab_reset_grant_page); /* * Keep track of foreign pages marked as PageForeign so that we don't * return them to the remote domain prematurely. * * PageForeign pages are pinned down by increasing their mapcount. * * All other pages are simply returned as is. */ void __gnttab_dma_map_page(struct page *page) { unsigned int seq; if (!is_running_on_xen() || !PageForeign(page)) return; do { seq = read_seqbegin(&gnttab_dma_lock); if (gnttab_dma_local_pfn(page)) break; atomic_set(&page->_mapcount, 0); /* Make _mapcount visible before read_seqretry. */ smp_mb(); } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); } int gnttab_resume(void) { if (max_nr_grant_frames() < nr_grant_frames) return -ENOSYS; return gnttab_map(0, nr_grant_frames - 1); } int gnttab_suspend(void) { #ifdef CONFIG_X86 apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); #endif return 0; } #else /* !CONFIG_XEN */ #include static unsigned long resume_frames; static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; /* Loop backwards, so that the first hypercall has the largest index, * ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); } while (i-- > start_idx); return 0; } int gnttab_resume(void) { unsigned int max_nr_gframes, nr_gframes; nr_gframes = nr_grant_frames; max_nr_gframes = max_nr_grant_frames(); if (max_nr_gframes < nr_gframes) return -ENOSYS; if (!resume_frames) { resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); if (shared == NULL) { printk("error to ioremap gnttab share frames\n"); return -1; } } gnttab_map(0, nr_gframes - 1); return 0; } #endif /* !CONFIG_XEN */ static int gnttab_expand(unsigned int req_entries) { int rc; unsigned int cur, extra; cur = nr_grant_frames; extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) / ENTRIES_PER_GRANT_FRAME); if (cur + extra > max_nr_grant_frames()) return -ENOSPC; if ((rc = gnttab_map(cur, cur + extra - 1)) == 0) rc = grow_gnttab_list(extra); return rc; } int __devinit gnttab_init(void) { int i; unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int nr_init_grefs; if (!is_running_on_xen()) return -ENODEV; nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; nr_glist_frames = nr_freelist_frames(nr_grant_frames); for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) goto ini_nomem; } if (gnttab_resume() < 0) return -ENODEV; nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; return 0; ini_nomem: for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); return -ENOMEM; } #ifdef CONFIG_XEN core_initcall(gnttab_init); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/machine_reboot.c000066400000000000000000000043521314037446600264230ustar00rootroot00000000000000#include #include /* * pv drivers header files */ #include #include #include #include #include struct ap_suspend_info { int do_spin; atomic_t nr_spinning; }; #ifdef CONFIG_SMP /* * Spinning prevents, for example, APs touching grant table entries while * the shared grant table is not mapped into the address space imemdiately * after resume. */ static void ap_suspend(void *_info) { struct ap_suspend_info *info = _info; BUG_ON(!irqs_disabled()); atomic_inc(&info->nr_spinning); mb(); while (info->do_spin) cpu_relax(); mb(); atomic_dec(&info->nr_spinning); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0, 0) #else #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0) #endif #else /* !defined(CONFIG_SMP) */ #define initiate_ap_suspend(i) 0 #endif static int bp_suspend(void) { int suspend_cancelled; BUG_ON(!irqs_disabled()); suspend_cancelled = HYPERVISOR_suspend(0); if (!suspend_cancelled) { platform_pci_resume(); gnttab_resume(); irq_resume(); } return suspend_cancelled; } int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)) { int err, suspend_cancelled, nr_cpus; struct ap_suspend_info info; xenbus_suspend(); preempt_disable(); /* Prevent any races with evtchn_interrupt() handler. */ disable_irq(xen_platform_pdev->irq); info.do_spin = 1; atomic_set(&info.nr_spinning, 0); smp_mb(); nr_cpus = num_online_cpus() - 1; err = initiate_ap_suspend(&info); if (err < 0) { preempt_enable(); xenbus_suspend_cancel(); return err; } while (atomic_read(&info.nr_spinning) != nr_cpus) cpu_relax(); local_irq_disable(); suspend_cancelled = bp_suspend(); resume_notifier(suspend_cancelled); local_irq_enable(); smp_mb(); info.do_spin = 0; while (atomic_read(&info.nr_spinning) != 0) cpu_relax(); enable_irq(xen_platform_pdev->irq); preempt_enable(); if (!suspend_cancelled){ xenbus_resume(); } else xenbus_suspend_cancel(); // update uvp flags write_driver_resume_flag(); write_feature_flag(); write_monitor_service_flag(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/panic-handler.c000066400000000000000000000017221314037446600261500ustar00rootroot00000000000000#include #include #include /* * pv drivers header files */ #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif MODULE_LICENSE("GPL"); #ifdef __ia64__ static void xen_panic_hypercall(struct unw_frame_info *info, void *arg) { current->thread.ksp = (__u64)info->sw - 16; HYPERVISOR_shutdown(SHUTDOWN_crash); /* we're never actually going to get here... */ } #endif static int xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { #ifdef __ia64__ unw_init_running(xen_panic_hypercall, NULL); #else /* !__ia64__ */ HYPERVISOR_shutdown(SHUTDOWN_crash); #endif /* we're never actually going to get here... */ return NOTIFY_DONE; } static struct notifier_block xen_panic_block = { .notifier_call = xen_panic_event }; int xen_panic_handler_init(void) { atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/platform-compat.c000066400000000000000000000070241314037446600265510ustar00rootroot00000000000000#include #include #include #include #include /* * pv drivers header files */ #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) static int system_state = 1; EXPORT_SYMBOL(system_state); #endif void ctrl_alt_del(void) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) kill_proc(1, SIGINT, 1); /* interrupt init */ #else kill_cad_pid(SIGINT, 1); #endif } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) size_t strcspn(const char *s, const char *reject) { const char *p; const char *r; size_t count = 0; for (p = s; *p != '\0'; ++p) { for (r = reject; *r != '\0'; ++r) { if (*p == *r) return count; } ++count; } return count; } EXPORT_SYMBOL(strcspn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(void * vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; } EXPORT_SYMBOL(wait_for_completion_timeout); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) /* fake do_exit using complete_and_exit */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) asmlinkage NORET_TYPE void do_exit(long code) #else fastcall NORET_TYPE void do_exit(long code) #endif { complete_and_exit(NULL, code); } EXPORT_SYMBOL_GPL(do_exit); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout) { __set_current_state(TASK_INTERRUPTIBLE); return schedule_timeout(timeout); } EXPORT_SYMBOL(schedule_timeout_interruptible); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. */ void *kzalloc(size_t size, int flags) { void *ret = kmalloc(size, flags); if (ret) memset(ret, 0, size); return ret; } EXPORT_SYMBOL(kzalloc); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) /* Simplified asprintf. */ char *kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; unsigned int len; char *p, dummy[1]; va_start(ap, fmt); len = vsnprintf(dummy, 0, fmt, ap); va_end(ap); p = kmalloc(len + 1, gfp); if (!p) return NULL; va_start(ap, fmt); vsprintf(p, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL(kasprintf); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/platform-pci-unplug.c000066400000000000000000000117431314037446600273540ustar00rootroot00000000000000/****************************************************************************** * platform-pci-unplug.c * * Xen platform PCI device driver * Copyright (c) 2010, Citrix * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include /****************************************************************************** * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0xffff #define XEN_IOPORT_LINUX_DRVVER ((LINUX_VERSION_CODE << 8) + 0x0) #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define UNPLUG_ALL_IDE_DISKS 1 #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 #define UNPLUG_ALL 7 #define UNPLUG_IGNORE 8 int xen_platform_pci; EXPORT_SYMBOL_GPL(xen_platform_pci); static int xen_emul_unplug; void xen_unplug_emulated_devices(void) { /* If the version matches enable the Xen platform PCI driver. * Also enable the Xen platform PCI driver if the version is really old * and the user told us to ignore it. */ xen_platform_pci = 1; xen_emul_unplug |= UNPLUG_ALL_NICS; xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; /* Set the default value of xen_emul_unplug depending on whether or * not the Xen PV frontends and the Xen platform PCI driver have * been compiled for this kernel (modules or built-in are both OK). */ if (xen_platform_pci && !xen_emul_unplug) { #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Netfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated ICs.\n"); xen_emul_unplug |= UNPLUG_ALL_NICS; #endif #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Blkfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated disks.\n" "You might have to change the root device\n" "from /dev/hd[a-d] to /dev/xvd[a-d]\n" "in your root= kernel command line option\n"); xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; #endif } /* Now unplug the emulated devices */ if (xen_platform_pci && !(xen_emul_unplug & UNPLUG_IGNORE)) outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/platform-pci.c000066400000000000000000000241011314037446600260340ustar00rootroot00000000000000/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include #include #ifdef __ia64__ #include #endif #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DRV_NAME "xen-platform-pci" #define DRV_VERSION "0.10" #define DRV_RELDATE "03/03/2005" static int max_hypercall_stub_pages, nr_hypercall_stub_pages; char *hypercall_stubs; EXPORT_SYMBOL(hypercall_stubs); MODULE_AUTHOR("ssmith@xensource.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); struct pci_dev *xen_platform_pdev; static unsigned long shared_info_frame; static uint64_t callback_via; /* driver should write xenstore flag to tell xen which feature supported */ void write_feature_flag() { int rc; char str[32] = {0}; (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); rc = xenbus_write(XBT_NIL, "control/uvp", "feature_flag", str); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); } } /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { int rc; rc = xenbus_write(XBT_NIL, "control/uvp", "monitor-service-flag", "true"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/monitor-service-flag failed \n"); } } /*DTS2014042508949. driver write this flag when the xenpci resume succeed.*/ void write_driver_resume_flag() { int rc = 0; rc = xenbus_write(XBT_NIL, "control/uvp", "driver-resume-flag", "1"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/driver-resume-flag failed \n"); } } static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; extern void *shared_info_area; #ifdef __ia64__ xencomm_initialize(); #endif setup_xen_features(); shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); shared_info_area = ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); return 0; } static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } #ifndef __ia64__ static uint32_t xen_cpuid_base(void) { uint32_t base, eax, ebx, ecx, edx; char signature[13]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { cpuid(base, &eax, &ebx, &ecx, &edx); *(uint32_t*)(signature + 0) = ebx; *(uint32_t*)(signature + 4) = ecx; *(uint32_t*)(signature + 8) = edx; signature[12] = 0; if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) return base; } return 0; } static int init_hypercall_stubs(void) { uint32_t eax, ebx, ecx, edx, pages, msr, i, base; base = xen_cpuid_base(); if (base == 0) { printk(KERN_WARNING "Detected Xen platform device but not Xen VMM?\n"); return -EINVAL; } cpuid(base + 1, &eax, &ebx, &ecx, &edx); printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff); /* * Find largest supported number of hypercall pages. * We'll create as many as possible up to this number. */ cpuid(base + 2, &pages, &msr, &ecx, &edx); /* * Use __vmalloc() because vmalloc_exec() is not an exported symbol. * PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL. * hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE); */ while (pages > 0) { hypercall_stubs = __vmalloc( pages * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM, __pgprot(__PAGE_KERNEL & ~_PAGE_NX)); if (hypercall_stubs != NULL) break; pages--; /* vmalloc failed: try one fewer pages */ } if (hypercall_stubs == NULL) return -ENOMEM; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; max_hypercall_stub_pages = pages; printk(KERN_INFO "Hypercall area is %u pages.\n", pages); return 0; } static void resume_hypercall_stubs(void) { uint32_t base, ecx, edx, pages, msr, i; base = xen_cpuid_base(); BUG_ON(base == 0); cpuid(base + 2, &pages, &msr, &ecx, &edx); if (pages > max_hypercall_stub_pages) pages = max_hypercall_stub_pages; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; } #else /* __ia64__ */ #define init_hypercall_stubs() (0) #define resume_hypercall_stubs() ((void)0) #endif static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; #ifdef __ia64__ for (irq = 0; irq < 16; irq++) { if (isa_irq_to_vector(irq) == pdev->irq) return irq; /* ISA IRQ */ } #else /* !__ia64__ */ irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) pin = pdev->pin; #else pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); #endif /* We don't know the GSI. Specify the PCI INTx line instead. */ return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3)); } static int set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } int xen_irq_init(struct pci_dev *pdev); int xenbus_init(void); int xen_reboot_init(void); int xen_panic_handler_init(void); int gnttab_init(void); void xen_unplug_emulated_devices(void); static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr, iolen; long mmio_addr, mmio_len; xen_unplug_emulated_devices(); if (xen_platform_pdev) return -EBUSY; xen_platform_pdev = pdev; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); callback_via = get_callback_via(pdev); if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { printk(KERN_WARNING DRV_NAME ":no resources found\n"); return -ENOENT; } if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) { printk(KERN_ERR ":MEM I/O resource 0x%lx @ 0x%lx busy\n", mmio_addr, mmio_len); return -EBUSY; } if (request_region(ioaddr, iolen, DRV_NAME) == NULL) { printk(KERN_ERR DRV_NAME ":I/O resource 0x%lx @ 0x%lx busy\n", iolen, ioaddr); release_mem_region(mmio_addr, mmio_len); return -EBUSY; } platform_mmio = mmio_addr; platform_mmiolen = mmio_len; ret = init_hypercall_stubs(); if (ret < 0) goto out; if ((ret = init_xen_info())) goto out; if ((ret = gnttab_init())) goto out; if ((ret = xen_irq_init(pdev))) goto out; if ((ret = set_callback_via(callback_via))) goto out; if ((ret = xenbus_init())) goto out; if ((ret = xen_reboot_init())) goto out; if ((ret = xen_panic_handler_init())) goto out; /* write flag to xenstore when xenbus is available */ write_feature_flag(); out: if (ret) { release_mem_region(mmio_addr, mmio_len); release_region(ioaddr, iolen); } return ret; } #define XEN_PLATFORM_VENDOR_ID 0x5853 #define XEN_PLATFORM_DEVICE_ID 0x0001 static struct pci_device_id platform_pci_tbl[] __devinitdata = { {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* Continue to recognise the old ID for now */ {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { name: DRV_NAME, probe: platform_pci_init, id_table: platform_pci_tbl, }; static int pci_device_registered; void platform_pci_resume(void) { struct xen_add_to_physmap xatp; resume_hypercall_stubs(); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); if (set_callback_via(callback_via)) printk("platform_pci_resume failure!\n"); } static int __init platform_pci_module_init(void) { int rc; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) rc = pci_module_init(&platform_driver); #else rc = pci_register_driver(&platform_driver); #endif if (rc) { printk(KERN_INFO DRV_NAME ": No platform pci device model found\n"); return rc; } pci_device_registered = 1; return 0; } module_init(platform_pci_module_init); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/reboot.c000066400000000000000000000211461314037446600247370ustar00rootroot00000000000000#define __KERNEL_SYSCALLS__ #include #include #include #include #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Was last suspend request cancelled? */ static int suspend_cancelled; /* Can we leave APs online when we suspend? */ static int fast_suspend; /* * modified by King @ 2011-01-26 */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); #else static void __shutdown_handler(void *unused); static DECLARE_WORK(shutdown_work, __shutdown_handler); #endif /*********************************************************/ static int setup_suspend_evtchn(void); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed(current, cpumask_of_cpu(0)); if (err) { printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { printk(KERN_ERR "Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) schedule_delayed_work(&shutdown_work, 0); #else schedule_work(&shutdown_work); #endif break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) schedule_delayed_work(&shutdown_work, 0); #else schedule_work(&shutdown_work); #endif else BUG_ON(old_state != SHUTDOWN_RESUMING); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static void __shutdown_handler(struct work_struct *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } #else static void __shutdown_handler(void *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } #endif static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } printk(KERN_WARNING "%s(%d): receive shutdown request %s\n", __FUNCTION__, __LINE__, str); if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) ctrl_alt_del(); else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else printk("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) { printk(KERN_ERR "Unable to read sysrq code in " "control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key, NULL, NULL); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static irqreturn_t suspend_int(int irq, void* dev_id) #else static irqreturn_t suspend_int(int irq, void* dev_id, struct pt_regs *ptregs) #endif { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); printk(KERN_INFO "suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } static int setup_shutdown_watcher(void) { int err; xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { printk(KERN_ERR "Failed to set shutdown watcher\n"); return err; } err = register_xenbus_watch(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { printk(KERN_ERR "Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/xen_proc.c000066400000000000000000000011111314037446600252500ustar00rootroot00000000000000#include #include /* * pv drivers header files */ #include static struct proc_dir_entry *xen_base; struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode) { if ( xen_base == NULL ) if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL ) panic("Couldn't create /proc/xen"); return create_proc_entry(name, mode, xen_base); } EXPORT_SYMBOL_GPL(create_xen_proc_entry); void remove_xen_proc_entry(const char *name) { remove_proc_entry(name, xen_base); } EXPORT_SYMBOL_GPL(remove_xen_proc_entry); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/xen_support.c000066400000000000000000000041241314037446600260300ustar00rootroot00000000000000/****************************************************************************** * support.c * Xen module support functions. * Copyright (C) 2004, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include /* * pv drivers header files */ #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if defined (__ia64__) unsigned long __hypercall(unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long cmd) { unsigned long __res; __asm__ __volatile__ (";;\n" "mov r2=%1\n" "break 0x1000 ;;\n" "mov %0=r8 ;;\n" : "=r"(__res) : "r"(cmd) : "r2", "r8", "memory"); return __res; } EXPORT_SYMBOL(__hypercall); int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) { return xencomm_hypercall_grant_table_op(cmd, uop, count); } EXPORT_SYMBOL(HYPERVISOR_grant_table_op); /* without using balloon driver on PV-on-HVM for ia64 */ void balloon_update_driver_allowance(long delta) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); void balloon_release_driver_page(struct page *page) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_release_driver_page); #endif /* __ia64__ */ void xen_machphys_update(unsigned long mfn, unsigned long pfn) { BUG(); } EXPORT_SYMBOL(xen_machphys_update); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/xenbus_client.c000066400000000000000000000175071314037446600263150ustar00rootroot00000000000000/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include /* * pv drivers header files */ #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DPRINTK(fmt, args...) \ pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args) const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", [ XenbusStateReconfiguring ] = "Reconfiguring", [ XenbusStateReconfigured ] = "Reconfigured", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate); int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path); int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2); if (!state) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, state, watch, callback); if (err) kfree(state); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path2); int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not work inside a Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ int current_state; int err; if (state == dev->state) return 0; err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d", ¤t_state); if (err != 1) return 0; err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state); if (err) { if (state != XenbusStateClosing) /* Avoid looping */ xenbus_dev_fatal(dev, err, "writing new state"); return err; } dev->state = state; return 0; } EXPORT_SYMBOL_GPL(xenbus_switch_state); int xenbus_frontend_closed(struct xenbus_device *dev) { xenbus_switch_state(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed); /** * Return the path to the error node for the given device, or NULL on failure. * If the value returned is non-NULL, then it is the caller's to kfree. */ static char *error_path(struct xenbus_device *dev) { return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); } void _dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list ap) { int ret; unsigned int len; char *printf_buffer = NULL, *path_buffer = NULL; #define PRINTF_BUFFER_SIZE 4096 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); if (printf_buffer == NULL) goto fail; len = sprintf(printf_buffer, "%i ", -err); ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = error_path(dev); if (path_buffer == NULL) { printk("xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { printk("xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } fail: if (printf_buffer) kfree(printf_buffer); if (path_buffer) kfree(path_buffer); } void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error); void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); xenbus_switch_state(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal); int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); if (err < 0) xenbus_dev_fatal(dev, err, "granting access to ring page"); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error(dev, err, "freeing event channel %d", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn); enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/xenbus_comms.c000066400000000000000000000151161314037446600261470ustar00rootroot00000000000000/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_irq; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) extern void xenbus_probe(struct work_struct *); #else extern void xenbus_probe(void *); #endif static DECLARE_WORK(probe_work, xenbus_probe); static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); //static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs) static irqreturn_t wake_waiting(int irq, void *unused) { int old, new; old = atomic_read(&xenbus_xsd_state); switch (old) { case XENBUS_XSD_UNCOMMITTED: BUG(); return IRQ_HANDLED; case XENBUS_XSD_FOREIGN_INIT: new = XENBUS_XSD_FOREIGN_READY; break; case XENBUS_XSD_LOCAL_INIT: new = XENBUS_XSD_LOCAL_READY; break; case XENBUS_XSD_FOREIGN_READY: case XENBUS_XSD_LOCAL_READY: default: goto wake; } old = atomic_cmpxchg(&xenbus_xsd_state, old, new); if (old != new) schedule_work(&probe_work); wake: wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { return wait_event_interruptible(xb_waitq, xb_data_to_read()); } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /* Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; int err; if (intf->req_prod != intf->req_cons) printk(KERN_ERR "XENBUS request ring is not quiescent " "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { printk(KERN_WARNING "XENBUS response ring is not quiescent " "(%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); intf->rsp_cons = intf->rsp_prod; } if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); err = bind_caller_port_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/xenbus_dev.c000066400000000000000000000253331314037446600256110ustar00rootroot00000000000000/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[PAGE_SIZE]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static struct proc_dir_entry *xenbus_dev_intf; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; if (!is_xenstored_ready()) return -ENODEV; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static void queue_reply(struct xenbus_dev_data *u, char *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); BUG_ON(rb == NULL); rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, &u->read_buffers); wake_up(&u->read_waitq); } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int path_len, tok_len, body_len, data_len = 0; path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr)); queue_reply(adap->dev_data, (char *)path, path_len); queue_reply(adap->dev_data, (char *)token, tok_len); if (len > 2) queue_reply(adap->dev_data, (char *)vec[2], data_len); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply; char *path, *token; struct watch_adapter *watch, *tmp_watch; int err, rc = len; if (!is_xenstored_ready()) return -ENODEV; if ((len + u->len) > sizeof(u->u.buffer)) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: { static const char *XS_RESP = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); watch->watch.node = kmalloc(strlen(path)+1, GFP_KERNEL); strcpy((char *)watch->watch.node, path); watch->watch.callback = watch_fired; watch->token = kmalloc(strlen(token)+1, GFP_KERNEL); strcpy(watch->token, token); watch->dev_data = u; err = register_xenbus_watch(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = strlen(XS_RESP) + 1; mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&hdr, sizeof(hdr)); queue_reply(u, (char *)XS_RESP, hdr.len); mutex_unlock(&u->reply_mutex); break; } default: if (msg_type == XS_TRANSACTION_START) { trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } } reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; BUG_ON(&trans->list == &u->transactions); list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg)); queue_reply(u, (char *)reply, u->u.msg.len); mutex_unlock(&u->reply_mutex); kfree(reply); break; } out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; if (!is_xenstored_ready()) return -ENODEV; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } #ifdef HAVE_UNLOCKED_IOCTL static long xenbus_dev_ioctl(struct file *file, unsigned int cmd, unsigned long data) { extern int xenbus_conn(domid_t remote_dom, int *grant_ref, evtchn_port_t *local_port); void __user *udata = (void __user *) data; int ret = -ENOTTY; if (!is_initial_xendomain()) return -ENODEV; switch (cmd) { case IOCTL_XENBUS_ALLOC: { xenbus_alloc_t xa; int old; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_FOREIGN_INIT); if (old != XENBUS_XSD_UNCOMMITTED) return -EBUSY; if (copy_from_user(&xa, udata, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } ret = xenbus_conn(xa.dom, &xa.grant_ref, &xa.port); if (ret != 0) { atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } if (copy_to_user(udata, &xa, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } } break; default: break; } return ret; } #endif static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .poll = xenbus_dev_poll, #ifdef HAVE_UNLOCKED_IOCTL .unlocked_ioctl = xenbus_dev_ioctl #endif }; int xenbus_dev_init(void) { xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400); if (xenbus_dev_intf) xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops; return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/xenbus_probe.c000066400000000000000000000770371314037446600261520ustar00rootroot00000000000000/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include /* * pv driver header files */ #include #include #include #include #include #include #include #ifdef MODULE #include #endif #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif int xen_store_evtchn; struct xenstore_domain_interface *xen_store_interface; static unsigned long xen_store_mfn; extern struct mutex xenwatch_mutex; static BLOCKING_NOTIFIER_HEAD(xenstore_chain); static void wait_for_devices(struct xenbus_driver *xendrv); static int xenbus_probe_frontend(const char *type, const char *name); static void xenbus_dev_shutdown(struct device *_dev); /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } /* device// => - */ static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) { printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, BUS_ID_SIZE); if (!strchr(bus_id, '/')) { printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } static int read_backend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "backend-id", "backend"); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ #if defined(CONFIG_XEN) || defined(MODULE) add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); #endif add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype); return 0; } #else static int xenbus_uevent_frontend(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) { struct xenbus_device *xdev; int length = 0, i = 0; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_PATH=%s", xdev->nodename); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "MODALIAS=xen:%s", xdev->devicetype); return 0; } #endif /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { .name = "xen", .match = xenbus_match, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_frontend, #endif }, .dev = { .bus_id = "xen", }, }; static void otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]); return; } state = xenbus_read_driver_state(dev->otherend); DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { struct xen_bus_type *bus = bus; bus = container_of(dev->dev.bus, struct xen_bus_type, bus); /* If we're frontend, drive the state machine to Closed. */ /* This should cause the backend to release our resources. */ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) xenbus_frontend_closed(dev); return; } #endif if (drv->otherend_changed) drv->otherend_changed(dev, state); } static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { return xenbus_watch_path2(dev, dev->otherend, "state", &dev->otherend_watch, otherend_changed); } int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { printk(KERN_WARNING "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { printk(KERN_WARNING "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); return -ENODEV; } int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); free_otherend_details(dev); if (drv->remove) drv->remove(dev); xenbus_switch_state(dev, XenbusStateClosed); return 0; } static void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); /* Commented out since xenstored stubdom is now minios based not linux based #define XENSTORE_DOMAIN_SHARES_THIS_KERNEL */ #ifndef XENSTORE_DOMAIN_SHARES_THIS_KERNEL if (is_initial_xendomain()) #endif return; get_device(&dev->dev); if (dev->state != XenbusStateConnected) { printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); } int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus) { int ret; if (bus->error) return bus->error; drv->driver.name = drv->name; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) drv->driver.owner = drv->owner; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; drv->driver.remove = xenbus_dev_remove; drv->driver.shutdown = xenbus_dev_shutdown; #endif mutex_lock(&xenwatch_mutex); ret = driver_register(&drv->driver); mutex_unlock(&xenwatch_mutex); return ret; } int xenbus_register_frontend(struct xenbus_driver *drv) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(xenbus_register_frontend); void xenbus_unregister_driver(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t xendev_show_nodename(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); static ssize_t xendev_show_devtype(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); static ssize_t xendev_show_modalias(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_driver_state(nodename); if (bus->error) return bus->error; if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); xendev->dev.parent = &bus->dev; xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); if (err) goto fail; /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) goto unregister; err = device_create_file(&xendev->dev, &dev_attr_devtype); if (err) goto unregister; err = device_create_file(&xendev->dev, &dev_attr_modalias); if (err) goto unregister; return 0; unregister: device_remove_file(&xendev->dev, &dev_attr_nodename); device_remove_file(&xendev->dev, &dev_attr_devtype); device_unregister(&xendev->dev); fail: kfree(xendev); return err; } /* device// */ static int xenbus_probe_frontend(const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(&xenbus_frontend, type, nodename); kfree(nodename); return err; } static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n; if (bus->error) return bus->error; dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void dev_changed(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[BUS_ID_SIZE]; const char *p, *root; if (bus->error || char_count(node, '/') < 2) return; exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend//... or device//... */ p = strchr(node, '/') + 1; snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int suspend_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend) err = drv->suspend(xdev); if (err) printk(KERN_WARNING "xenbus: suspend %s failed: %i\n", dev->bus_id, err); return 0; } static int suspend_cancel_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend_cancel) err = drv->suspend_cancel(xdev); if (err) printk(KERN_WARNING "xenbus: suspend_cancel %s failed: %i\n", dev->bus_id, err); return 0; } static int resume_dev(struct device *dev, void *data) { int err; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); err = talk_to_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus: resume (talk_to_otherend) %s failed: %i\n", dev->bus_id, err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { printk(KERN_WARNING "xenbus: resume %s failed: %i\n", dev->bus_id, err); return err; } } err = watch_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus_probe: resume (watch_otherend) %s failed: " "%d.\n", dev->bus_id, err); return err; } return 0; } void xenbus_suspend(void) { DPRINTK(""); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); xenbus_backend_suspend(suspend_dev); xs_suspend(); } EXPORT_SYMBOL_GPL(xenbus_suspend); void xen_unplug_emulated_devices(void); void xenbus_resume(void) { xb_init_comms(); xs_resume(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); xenbus_backend_resume(resume_dev); xen_unplug_emulated_devices(); } EXPORT_SYMBOL_GPL(xenbus_resume); void xenbus_suspend_cancel(void) { xs_suspend_cancel(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); xenbus_backend_resume(suspend_cancel_dev); } EXPORT_SYMBOL_GPL(xenbus_suspend_cancel); /* A flag to determine if xenstored is 'ready' (i.e. has started) */ atomic_t xenbus_xsd_state = ATOMIC_INIT(XENBUS_XSD_UNCOMMITTED); int register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; if (is_xenstored_ready()) ret = nb->notifier_call(nb, 0, NULL); else blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } EXPORT_SYMBOL_GPL(register_xenstore_notifier); void unregister_xenstore_notifier(struct notifier_block *nb) { blocking_notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) void xenbus_probe(struct work_struct *unused) #else void xenbus_probe(void *unused) #endif { BUG_ON(!is_xenstored_ready()); /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); xenbus_backend_probe_and_watch(); /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) static struct file_operations xsd_kva_fops; static struct proc_dir_entry *xsd_kva_intf; static struct proc_dir_entry *xsd_port_intf; static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; int old; int rc; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_LOCAL_INIT); switch (old) { case XENBUS_XSD_UNCOMMITTED: rc = xb_init_comms(); if (rc != 0) return rc; break; case XENBUS_XSD_FOREIGN_INIT: case XENBUS_XSD_FOREIGN_READY: return -EBUSY; case XENBUS_XSD_LOCAL_INIT: case XENBUS_XSD_LOCAL_READY: default: break; } if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static int xsd_kva_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "0x%p", xen_store_interface); *eof = 1; return len; } static int xsd_port_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "%d", xen_store_evtchn); *eof = 1; return len; } #endif static int xb_free_port(evtchn_port_t port) { struct evtchn_close close; close.port = port; return HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); } int xenbus_conn(domid_t remote_dom, unsigned long *grant_ref, evtchn_port_t *local_port) { struct evtchn_alloc_unbound alloc_unbound; int rc, rc2; BUG_ON(atomic_read(&xenbus_xsd_state) != XENBUS_XSD_FOREIGN_INIT); BUG_ON(!is_initial_xendomain()); #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) remove_xen_proc_entry("xsd_kva"); remove_xen_proc_entry("xsd_port"); #endif rc = xb_free_port(xen_store_evtchn); if (rc != 0) goto fail0; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_dom; rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (rc != 0) goto fail0; *local_port = xen_store_evtchn = alloc_unbound.port; /* keep the old page (xen_store_mfn, xen_store_interface) */ rc = gnttab_grant_foreign_access(remote_dom, xen_store_mfn, GTF_permit_access); if (rc < 0) goto fail1; *grant_ref = rc; rc = xb_init_comms(); if (rc != 0) goto fail1; return 0; fail1: rc2 = xb_free_port(xen_store_evtchn); if (rc2 != 0) printk(KERN_WARNING "XENBUS: Error freeing xenstore event channel: %d\n", rc2); fail0: xen_store_evtchn = -1; return rc; } static int xenbus_probe_init(void) { int err = 0; unsigned long page = 0; DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) printk(KERN_WARNING "XENBUS: Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { struct evtchn_alloc_unbound alloc_unbound; /* Allocate page. */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOMID_SELF; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600); if (xsd_kva_intf) { memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, sizeof(xsd_kva_fops)); xsd_kva_fops.mmap = xsd_kva_mmap; xsd_kva_intf->proc_fops = &xsd_kva_fops; xsd_kva_intf->read_proc = xsd_kva_read; } xsd_port_intf = create_xen_proc_entry("xsd_port", 0400); if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else { atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY); #ifdef CONFIG_XEN xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); #else xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN); xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN); xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); #endif /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) goto err; } xenbus_dev_init(); /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); goto err; } /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); if (xenbus_frontend.error) { bus_unregister(&xenbus_frontend.bus); printk(KERN_WARNING "XENBUS: Error registering frontend device: %i\n", xenbus_frontend.error); } } xenbus_backend_device_register(); if (!is_initial_xendomain()) xenbus_probe(NULL); return 0; err: if (page) free_page(page); /* * Do not unregister the xenbus front/backend buses here. The buses * must exist because front/backend drivers will use them when they are * registered. */ return err; } #ifdef CONFIG_XEN postcore_initcall(xenbus_probe_init); MODULE_LICENSE("Dual BSD/GPL"); #else int xenbus_init(void) { return xenbus_probe_init(); } #endif static int is_device_connecting(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_connecting_device(struct device_driver *drv) { if (xenbus_frontend.error) return xenbus_frontend.error; return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", xendev->nodename); return 0; } if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); printk(KERN_WARNING "XENBUS: Timeout connecting " "to device: %s (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } xendrv = to_xenbus_driver(dev->driver); if (xendrv->is_ready && !xendrv->is_ready(xendev)) printk(KERN_WARNING "XENBUS: Device not ready: %s\n", xendev->nodename); return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; /* * Add by Wangming HZ026 @ 2011-6-21 * For waiting frontend driver register success. */ if(!xenbus_frontend.error) { ready_to_wait_for_devices = 1; } /********************************end**************************************/ if (!ready_to_wait_for_devices || !is_running_on_xen()) return; while (exists_connecting_device(drv)) { if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) printk(KERN_WARNING "XENBUS: Waiting for " "devices to initialise: "); seconds_waited += 5; printk("%us...", 300 - seconds_waited); if (seconds_waited == 300) break; } schedule_timeout_interruptible(HZ/10); } if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } #ifndef MODULE static int __init boot_wait_for_devices(void) { if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; wait_for_devices(NULL); } return 0; } late_initcall(boot_wait_for_devices); #endif int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-platform-pci/xenbus_xs.c000066400000000000000000000527031314037446600254660ustar00rootroot00000000000000/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include /* * pv drivers header files */ #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. * * transaction_mutex must be held before incrementing * transaction_count. The mutex is held when a suspend is in * progress to prevent new transactions starting. * * When decrementing transaction_count to zero the wait queue * should be woken up, the suspend code waits for count to * reach zero. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct mutex transaction_mutex; atomic_t transaction_count; wait_queue_head_t transaction_wq; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { printk(KERN_WARNING "XENBUS xen store gave: unknown error %s", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } static void transaction_start(void) { mutex_lock(&xs_state.transaction_mutex); atomic_inc(&xs_state.transaction_count); mutex_unlock(&xs_state.transaction_mutex); } static void transaction_end(void) { if (atomic_dec_and_test(&xs_state.transaction_count)) wake_up(&xs_state.transaction_wq); } static void transaction_suspend(void) { mutex_lock(&xs_state.transaction_mutex); wait_event(xs_state.transaction_wq, atomic_read(&xs_state.transaction_count) == 0); } static void transaction_resume(void) { mutex_unlock(&xs_state.transaction_mutex); } void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; struct xsd_sockmsg req_msg = *msg; int err; if (req_msg.type == XS_TRANSACTION_START) transaction_start(); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((req_msg.type == XS_TRANSACTION_END) || ((req_msg.type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) transaction_end(); return ret; } /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len);; if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { if (printk_ratelimit()) printk(KERN_WARNING "XENBUS unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len) + 1; /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; ret[*num] = strings + len; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; transaction_start(); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { transaction_end(); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); transaction_end(); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; #define PRINTF_BUFFER_SIZE 4096 char *printf_buffer; printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH); if (printf_buffer == NULL) return -ENOMEM; va_start(ap, fmt); ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap); va_end(ap); BUG_ON(ret > PRINTF_BUFFER_SIZE-1); ret = xenbus_write(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); /* Ignore errors due to multiple registration. */ if ((err != 0) && (err != -EEXIST)) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; BUG_ON(watch->flags & XBWF_new_thread); sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) printk(KERN_WARNING "XENBUS Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); /* Flush any currently-executing callback, unless we are it. :-) */ if (current->pid != xenwatch_pid) { mutex_lock(&xenwatch_mutex); mutex_unlock(&xenwatch_mutex); } } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); void xs_suspend(void) { transaction_suspend(); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); transaction_resume(); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); mutex_unlock(&xs_state.transaction_mutex); } static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) printk(KERN_WARNING "XENBUS error %d while reading " "message\n", err); if (kthread_should_stop()) break; } return 0; } int xs_init(void) { struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); mutex_init(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); atomic_set(&xs_state.transaction_count, 0); init_waitqueue_head(&xs_state.transaction_wq); task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-vbd/000077500000000000000000000000001314037446600214535ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-vbd/Kbuild000066400000000000000000000001151314037446600226050ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-vbd.o xen-vbd-objs := blkfront.o vbd.o UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-vbd/Makefile000066400000000000000000000000661314037446600231150ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-vbd/blkfront.c000066400000000000000000000631501314037446600234450ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include /* * pv driver header files */ #include #include #include #include #include #include #include #include "block.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 static void connect(struct blkfront_info *); static void blkfront_closing(struct xenbus_device *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); //static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs); static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(void *arg); static void blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice, i; struct blkfront_info *info; /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } info->xbdev = dev; info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19) INIT_WORK(&info->work, blkif_restart_queue); #else INIT_WORK(&info->work, blkif_restart_queue, (void *)info); #endif for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev->dev.driver_data = info; err = talk_to_backend(dev, info); if (err) { kfree(info); dev->dev.driver_data = NULL; return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; int err; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); err = talk_to_backend(dev, info); if (info->connected == BLKIF_STATE_SUSPENDED && !err) blkif_recover(info); return err; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { const char *message = NULL; struct xenbus_transaction xbt; int err; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } err = xenbus_printf(xbt, dev->nodename, "ring-ref","%u", info->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { message = "writing protocol"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (message) xenbus_dev_fatal(dev, err, "%s", message); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; info->ring_ref = GRANT_INVALID_REF; sring = (blkif_sring_t *)__get_free_page(GFP_NOIO | __GFP_HIGH); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); memset(info->sg, 0, sizeof(info->sg)); err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); if (err < 0) { free_page((unsigned long)sring); info->ring.sring = NULL; goto fail; } info->ring_ref = err; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, IRQF_SAMPLE_RANDOM, "blkif", info); #else err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info); #endif if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev->dev.driver_data; struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateConnected: connect(info); break; case XenbusStateClosing: bd = bdget(info->dev); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); break; } } /* ** Connection ** */ /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (XENBUS_EXIST_ERR(err)) return; printk(KERN_INFO "Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); //revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%lu", &info->feature_barrier, NULL); if (err) info->feature_barrier = 0; err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&blkif_io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); add_disk(info->gd); info->is_ready = 1; if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; unsigned long flags; DPRINTK("blkfront_closing: %s removed\n", dev->nodename); if (info->rq == NULL) goto out; spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&blkif_io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); xlvbd_sysfs_delif(info); xlvbd_del(info); out: xenbus_frontend_closed(dev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); kfree(info); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= BLK_RING_SIZE); info->shadow_free = info->shadow[free].req.id; info->shadow[free].req.id = 0x0fffffee; /* debug */ return free; } static inline void ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = 0; info->shadow_free = id; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } int blkif_open(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users++; return 0; } int blkif_release(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users--; if (info->users == 0) { /* Check whether we have been instructed to close. We will have ignored this request initially, as the device was still mounted. */ struct xenbus_device * dev = info->xbdev; enum xenbus_state state = xenbus_read_driver_state(dev->otherend); if (state == XenbusStateClosing && info->is_ready) blkfront_closing(dev); } return 0; } int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct block_device *bd = inode->i_bdev; struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; struct gendisk *gd = info->gd; if (gd->flags & GENHD_FL_CD) return 0; return -EINVAL; } default: /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", command);*/ return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * blkif_queue_request * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; unsigned long buffer_mfn; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref; grant_ref_t gref_head; struct scatterlist *sg; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; if (gnttab_alloc_grant_references( BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, BLKIF_MAX_SEGMENTS_PER_REQUEST); return 1; } /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = (unsigned long)req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)req->sector; ring_req->handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (blk_barrier_rq(req)) ring_req->operation = BLKIF_OP_WRITE_BARRIER; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) if (blk_pc_request(req)) ring_req->operation = BLKIF_OP_PACKET; #endif ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) for_each_sg(info->sg, sg, ring_req->nr_segments, i) { buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; /* install a grant reference. */ ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ? GTF_readonly : 0 ); info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); ring_req->seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } #else for (i = 0; i < ring_req->nr_segments; ++i) { sg = info->sg + i; buffer_mfn = page_to_phys(sg->page) >> PAGE_SHIFT; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; /* install a grant reference. */ ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ? GTF_readonly : 0 ); info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); ring_req->seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } #endif info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(request_queue_t *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = elv_next_request(rq)) != NULL) { info = req->rq_disk->private_data; if (!blk_fs_request(req)) { end_request(req, 0); continue; } if (RING_FULL(&info->ring)) goto wait; DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%li) buffer:%p [%s]\n", req, req->cmd, (long long)req->sector, req->current_nr_sectors, req->nr_sectors, req->buffer, rq_data_dir(req) ? "write" : "read"); blkdev_dequeue_request(req); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } //static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs) static irqreturn_t blkif_int(int irq, void *dev_id) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; int uptodate; spin_lock_irqsave(&blkif_io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = (struct request *)info->shadow[id].request; blkif_completion(&info->shadow[id]); ADD_ID_TO_FREELIST(info, id); uptodate = (bret->status == BLKIF_RSP_OKAY); switch (bret->operation) { case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk("blkfront: %s: write barrier op failed\n", info->gd->disk_name); uptodate = -EOPNOTSUPP; info->feature_barrier = 0; xlvbd_barrier(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); /*ret = end_that_request_first(req, uptodate, req->hard_nr_sectors); BUG_ON(ret); end_that_request_last(req, uptodate);*/ /* compatible with linux-2.6.25-14 */ __blk_end_request(req, 0, req->hard_nr_sectors << 9); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&blkif_io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); /* Free resources associated with old device channel. */ if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s) { int i; for (i = 0; i < s->req.nr_segments; i++) gnttab_end_foreign_access(s->req.seg[i].gref, 0UL); } static void blkif_recover(struct blkfront_info *info) { int i; blkif_request_t *req; struct blk_shadow *copy; int j; /* Stage 1: Make a safe copy of the shadow state. */ copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); memcpy(copy, info->shadow, sizeof(info->shadow)); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow_free = info->ring.req_prod_pvt; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ if (copy[i].request == 0) continue; /* Grab a request slot and copy shadow state into it. */ req = RING_GET_REQUEST( &info->ring, info->ring.req_prod_pvt); *req = copy[i].req; /* We get a new request id, and must reset the shadow state. */ req->id = GET_ID_FROM_FREELIST(info); memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i])); /* Rewrite any grant references invalidated by susp/resume. */ for (j = 0; j < req->nr_segments; j++) gnttab_grant_foreign_access_ref( req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), rq_data_dir((struct request *) info->shadow[req->id].request) ? GTF_readonly : 0); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; } kfree(copy); (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&blkif_io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Send off requeued requests */ flush_requests(info); /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; return info->is_ready; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static struct xenbus_driver blkfront = { .name = "vbd", .owner = THIS_MODULE, .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, }; static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront); } module_init(xlblk_init); static void __exit xlblk_exit(void) { return xenbus_unregister_driver(&blkfront); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-vbd/block.h000066400000000000000000000114351314037446600227220ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #if 0 #define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a) #else #define DPRINTK_IOCTL(_f, _a...) ((void)0) #endif struct xlbd_type_info { int partn_shift; int disks_per_major; char *devname; char *diskname; }; struct xlbd_major_info { int major; int index; int usage; struct xlbd_type_info *type; }; struct blk_shadow { blkif_request_t req; unsigned long request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; dev_t dev; struct gendisk *gd; int vdevice; blkif_vdev_t handle; int connected; int ring_ref; blkif_front_ring_t ring; struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int irq; struct xlbd_major_info *mi; request_queue_t *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; unsigned long shadow_free; int feature_barrier; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; extern spinlock_t blkif_io_lock; extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (request_queue_t *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); int xlvbd_barrier(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-vbd/vbd.c000066400000000000000000000261411314037446600223760ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include /* * pv drivers header files */ #include "block.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; DEFINE_SPINLOCK(blkif_io_lock); static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; int do_register; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SCSI_RANGE: ptr->type = &xlbd_scsi_type; ptr->index = index - XLBD_MAJOR_SCSI_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major 202, * don't try to register it again */ if (major_info[XLBD_MAJOR_VBD_START] != NULL) do_register = 0; break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(ptr); return NULL; } printk("xen-vbd: registered block device major %i\n", ptr->major); } major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = 10; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = 11 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = 18 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = 26; break; default: if (!VDEV_IS_EXTENDED(vdevice)) index = 27; else index = 28; break; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) { request_queue_t *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) return -1; /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_hardsect_size(rq, sector_size); blk_queue_max_sectors(rq, 512); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; return 0; } static int xlvbd_alloc_gendisk(int major, int minor, blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; unsigned int offset; BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0) nr_minors = 1 << mi->type->partn_shift; gd = alloc_disk(nr_minors); if (gd == NULL) goto out; offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (nr_minors > 1) { if (offset < 26) { sprintf(gd->disk_name, "%s%c", mi->type->diskname, 'a' + offset ); } else { sprintf(gd->disk_name, "%s%c%c", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26) ); } } else { if (offset < 26) { sprintf(gd->disk_name, "%s%c%d", mi->type->diskname, 'a' + offset, minor & ((1 << mi->type->partn_shift) - 1)); } else { sprintf(gd->disk_name, "%s%c%c%d", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26), minor & ((1 << mi->type->partn_shift) - 1)); } } gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size)) { del_gendisk(gd); goto out; } info->rq = gd->queue; info->gd = gd; if (info->feature_barrier) xlvbd_barrier(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { struct block_device *bd; int err = 0; int major, minor; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = 202; minor = BLKIF_MINOR_EXT(vdevice); } info->dev = MKDEV(major, minor); bd = bdget(info->dev); if (bd == NULL) return -ENODEV; err = xlvbd_alloc_gendisk(major, minor, capacity, vdevice, vdisk_info, sector_size, info); bdput(bd); return err; } void xlvbd_del(struct blkfront_info *info) { if (info->mi == NULL) return; BUG_ON(info->gd == NULL); del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); blk_cleanup_queue(info->rq); info->rq = NULL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int xlvbd_barrier(struct blkfront_info *info) { int err; err = blk_queue_ordered(info->rq, info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL); if (err) return err; printk(KERN_INFO "blkfront: %s: barriers %s\n", info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled"); return 0; } #else int xlvbd_barrier(struct blkfront_info *info) { printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name); return -ENOSYS; } #endif #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = xendev->dev.driver_data; if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-vnif/000077500000000000000000000000001314037446600216425ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-vnif/Kbuild000066400000000000000000000001171314037446600227760ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-vnif.o xen-vnif-objs := netfront.o accel.o UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-vnif/Makefile000066400000000000000000000000661314037446600233040ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-vnif/accel.c000066400000000000000000000542521314037446600230650ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include /* * pv drivers header files */ #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront/accel: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront/accel: " fmt, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); static void netfront_accelerator_remove_watch(struct netfront_info *np); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Workqueue to process acceleration configuration changes */ struct workqueue_struct *accel_watch_workqueue; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); accel_watch_workqueue = create_workqueue("net_accel"); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; flush_workqueue(accel_watch_workqueue); destroy_workqueue(accel_watch_workqueue); /* No lock required as everything else should be quiet by now */ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); queue_work(accel_watch_workqueue, &vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* * If old watch exists, e.g. from before suspend/resume, * remove it now */ netfront_accelerator_remove_watch(np); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_purge_watch(struct netfront_accel_vif_state *vif_state) { flush_workqueue(accel_watch_workqueue); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; netfront_accelerator_purge_watch(vif_state); } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. Must be called with the accelerator * mutex held. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } } /* * Initialise the state to track an accelerator plugin module. * * Must be called with the accelerator mutex held. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; list_add(&accelerator->link, &accelerators_list); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { struct netfront_accelerator *accelerator; unsigned long flags; DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) napi_disable(&vif_state->np->napi); #else netif_poll_disable(vif_state->np->netdev); #endif netif_tx_lock_bh(vif_state->np->netdev); accelerator = vif_state->np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); vif_state->hooks = accelerator->hooks; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) napi_enable(&vif_state->np->napi); #else netif_poll_enable(vif_state->np->netdev); #endif } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks && hooks->new_device(np->netdev, dev) == 0) accelerator_set_vif_state_hooks(&np->accel_vif_state); return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* Holds accelerator_mutex to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if (hooks->new_device(np->netdev, vif_state->dev) == 0) accelerator_set_vif_state_hooks(vif_state); } } /* * Called by the netfront accelerator plugin module when it has * loaded. * * Takes the accelerator_mutex and vif_states_lock spinlock. */ int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded); /* * Remove the hooks from a single vif state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { unsigned long flags; /* Make sure there are no data path operations going on */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) napi_disable(&vif_state->np->napi); #else netif_poll_disable(vif_state->np->netdev); #endif netif_tx_lock_bh(vif_state->np->netdev); spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) napi_enable(&vif_state->np->napi); #else netif_poll_enable(vif_state->np->netdev); #endif } /* * Safely remove the accelerator function hooks from a netfront state. * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { if(vif_state->hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ vif_state->hooks->get_stats(vif_state->np->netdev, &vif_state->np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. * * Takes the accelerator mutex, and vif_states_lock spinlock. */ void netfront_accelerator_stop(const char *frontend) { struct netfront_accelerator *accelerator; mutex_lock(&accelerator_mutex); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_remove_hooks(accelerator); goto out; } } out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop); /* * Helper for call_remove and do_suspend * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator = np->accelerator; unsigned long flags; int rc = 0; if (np->accel_vif_state.hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ np->accel_vif_state.hooks->get_stats(np->netdev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); } return rc; } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock */ static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev); np->accelerator = NULL; return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { int rc = 0; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ rc = do_remove(np, dev); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { netfront_accelerator_purge_watch(&np->accel_vif_state); /* * Gratuitously fire the watch handler to reinstate the * configured accelerator */ if (dev->state == XenbusStateConnected) queue_work(accel_watch_workqueue, &np->accel_vif_state.accel_work); return 0; } /* * No lock pre-requisites. Takes the accelerator mutex */ void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; break; } } out: mutex_unlock(&accelerator_mutex); return; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-vnif/netfront.c000066400000000000000000001740571314037446600236630ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include #include #include #include struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif #define RX_COPY_THRESHOLD 256 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define HAVE_CSUM_OFFLOAD 1 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define HAVE_CSUM_OFFLOAD 0 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || unlikely(skb->ip_summed != CHECKSUM_HW)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define HAVE_CSUM_OFFLOAD 0 #define netif_needs_gso(dev, skb) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif #define GRANT_INVALID_REF 0 struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront: " fmt, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static void send_fake_arp(struct net_device *, int); //static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs); static irqreturn_t netif_int(int irq, void *dev_id); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of nic8139 */ #include #include static void remove_nic8139() { struct pci_dev *pci_dev_nic8139 = NULL; //do { pci_dev_nic8139 = pci_get_device(0x10ec, 0x8139, pci_dev_nic8139); if (pci_dev_nic8139) { printk(KERN_INFO "remove_nic8139 : 8139 NIC of subsystem(0x%2x,0x%2x) removed.\n", pci_dev_nic8139->subsystem_vendor, pci_dev_nic8139->subsystem_device); pci_disable_device(pci_dev_nic8139); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) pci_remove_bus_device(pci_dev_nic8139); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pci_dev_nic8139); pci_remove_bus_device(pci_dev_nic8139); #else pci_stop_and_remove_bus_device(pci_dev_nic8139); #endif /* check kernel version */ } //} while (pci_dev_nic8139); # comment off because fedora12 will crash return; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; // remove 8139 nic when xen nic devices appear remove_nic8139(); netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev->dev.driver_data = info; err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { printk(KERN_WARNING "%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); printk(KERN_WARNING "%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev->dev.driver_data = NULL; return err; } static int __devexit netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(info->mac)) { err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", !HAVE_CSUM_OFFLOAD); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", HAVE_TSO); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, IRQF_SAMPLE_RANDOM, netdev->name, netdev); #else err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, SA_SAMPLE_RANDOM, netdev->name, netdev); #endif if (err < 0) goto fail; info->irq = err; return 0; fail: return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev->dev.driver_data; struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); break; case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @param arpType ARPOP_REQUEST or ARPOP_REPLY * @return 0 on success, error code otherwise */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; skb = arp_create(arpType, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); memset(&np->stats, 0, sizeof(np->stats)); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) napi_enable(&np->napi); #endif spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) napi_schedule(&np->napi); #else netif_rx_schedule(dev); #endif } } spin_unlock_bh(&np->rx_lock); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) netif_start_queue(dev); #else network_maybe_wake_tx(dev); #endif return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { printk(KERN_ALERT "network_tx_buf_gc: warning " "-- grant still in use by backend " "domain.\n"); BUG(); } gnttab_end_foreign_access_ref(np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) napi_schedule(&np->napi); #else netif_rx_schedule(dev); #endif } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; struct xen_memory_reservation reservation; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if ( nr_flips != 0 ) { /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); reservation.nr_extents = nr_flips; reservation.extent_order = 0; reservation.address_bits = 0; reservation.domid = DOMID_SELF; if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ np->rx_mcl[i].op = __HYPERVISOR_memory_op; np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; np->rx_mcl[i].args[1] = (unsigned long)&reservation; /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (nr_flips--) BUG_ON(np->rx_mcl[nr_flips].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return 0; } frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irq(&np->tx_lock); if (unlikely(!netfront_carrier_ok(np) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb))) { spin_unlock_irq(&np->tx_lock); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_HW) /* local packet? */ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; #ifdef CONFIG_XEN if (skb->proto_data_valid) /* remote but checksummed? */ tx->flags |= NETTXF_data_validated; #endif #if HAVE_TSO if (skb_shinfo(skb)->gso_size) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->irq); np->stats.tx_bytes += skb->len; np->stats.tx_packets++; dev->trans_start = jiffies; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irq(&np->tx_lock); return 0; drop: np->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } //static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs) static irqreturn_t netif_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) napi_schedule(&np->napi); #else netif_rx_schedule(dev); #endif dev->last_rx = jiffies; } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) WPRINTK("Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) WPRINTK("Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) WPRINTK("rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) WPRINTK("Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { if (net_ratelimit()) WPRINTK("Unfulfilled rx req " "(id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); BUG_ON(!ret); } gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) WPRINTK("Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) WPRINTK("GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) WPRINTK("Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) WPRINTK("GSO unsupported by this kernel.\n"); return -EINVAL; #endif } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static int netif_poll(struct napi_struct *napi, int budget) #else static int netif_poll(struct net_device *dev, int *pbudget) #endif { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct net_device *dev = np->netdev; #else struct netfront_info *np = netdev_priv(dev); #endif struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; struct multicall_entry *mcl; #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) int budget; #endif int work_done, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) if ((budget = *pbudget) > dev->quota) budget = dev->quota; #endif rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); np->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize must approximates the size of true data plus * any supervisor overheads. Adding hypervisor overheads * has been shown to significantly reduce achievable * bandwidth with the default receive buffer size. It is * therefore not wise to account for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to * RX_COPY_THRESHOLD + the supervisor overheads. Here, we * add the size of the data pulled in xennet_fill_frags(). * * We also adjust for any unused space in the main data * area by subtracting (RX_COPY_THRESHOLD - len). This is * especially important with drivers which split incoming * packets into header and data, using only 66 bytes of * the main data area (see the e1000 driver for example.) * On such systems, without this last adjustement, our * achievable receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; /* * Old backends do not assert data_validated but we * can infer it from csum_blank so test both flags. */ if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; #ifdef CONFIG_XEN skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE); skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank); #endif np->stats.rx_packets++; np->stats.rx_bytes += skb->len; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { mcl = np->rx_mcl + pages_flipped; mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = pages_flipped; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) __skb_queue_purge(&errq); #endif while ((skb = __skb_dequeue(&errq))) kfree_skb(skb); while ((skb = __skb_dequeue(&rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); /* Pass it up. */ netif_receive_skb(skb); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) dev->last_rx = jiffies; #endif } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) *pbudget -= work_done; dev->quota -= work_done; #endif if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) __napi_complete(napi); #else __netif_rx_complete(dev); #endif local_irq_restore(flags); } spin_unlock(&np->rx_lock); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) return work_done; #else return more_to_do | accel_more_to_do; #endif } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); if (0 == mfn) { struct page *page = skb_shinfo(skb)->frags[0].page; balloon_release_driver_page(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = mmu - np->rx_mmu; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; mcl++; rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl - np->rx_mcl, NULL); BUG_ON(rc); } } while ((skb = __skb_dequeue(&free_list)) != NULL) dev_kfree_skb(skb); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_ref(ref)) { busy++; continue; } gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); napi_disable(&np->napi); netif_stop_queue(np->netdev); return 0; } static struct net_device_stats *network_get_stats(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_get_stats(np, dev); return &np->stats; } static int xennet_set_mac_address(struct net_device *dev, void *p) { struct netfront_info *np = netdev_priv(dev); struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(np->mac, addr->sa_data, ETH_ALEN); return 0; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static int xennet_set_sg(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } else if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; return ethtool_op_set_sg(dev, data); } static int xennet_set_tso(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } return ethtool_op_set_tso(dev, data); } static void xennet_set_features(struct net_device *dev) { dev_disable_gso_features(dev); xennet_set_sg(dev, 0); /* We need checksum offload to enable scatter/gather and TSO. */ if (!(dev->features & NETIF_F_IP_CSUM)) return; if (xennet_set_sg(dev, 1)) return; /* Before 2.6.9 TSO seems to be unreliable so do not enable it * on older kernels. */ if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)) xennet_set_tso(dev, 1); } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; xennet_set_features(dev); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref( ref, np->xbdev->otherend_id, page_to_pfn(skb_shinfo(skb)->frags->page)); } else { gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static struct ethtool_ops network_ethtool_ops = { .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = xennet_set_sg, #if HAVE_TSO .get_tso = ethtool_op_get_tso, .set_tso = xennet_set_tso, #endif .get_link = ethtool_op_get_link, }; #ifdef CONFIG_SYSFS #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) #else static ssize_t show_rxbuf_min(struct class_device *cd, char *buf) #endif { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) struct netfront_info *info = netdev_priv(to_net_dev(dev)); #else struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); #endif return sprintf(buf, "%u\n", info->rx_min_target); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) #else static ssize_t store_rxbuf_min(struct class_device *cd, const char *buf, size_t len) #endif { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) struct net_device *netdev = to_net_dev(dev); #else struct net_device *netdev = container_of(cd, struct net_device, class_dev); #endif struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) #else static ssize_t show_rxbuf_max(struct class_device *cd, char *buf) #endif { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) struct netfront_info *info = netdev_priv(to_net_dev(dev)); #else struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); #endif return sprintf(buf, "%u\n", info->rx_max_target); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) #else static ssize_t store_rxbuf_max(struct class_device *cd, const char *buf, size_t len) #endif { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) struct net_device *netdev = to_net_dev(dev); #else struct net_device *netdev = container_of(cd, struct net_device, class_dev); #endif struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) #else static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf) #endif { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) struct netfront_info *info = netdev_priv(to_net_dev(dev)); #else struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); #endif return sprintf(buf, "%u\n", info->rx_target); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static struct device_attribute xennet_attrs[] = { #else static const struct class_device_attribute xennet_attrs[] = { #endif __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) error = device_create_file(&netdev->dev, &xennet_attrs[i]); #else error = class_device_create_file(&netdev->class_dev, &xennet_attrs[i]); #endif if (error) goto fail; } return 0; fail: while (--i >= 0) { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) device_remove_file(&netdev->dev, &xennet_attrs[i]); #else class_device_remove_file(&netdev->class_dev, &xennet_attrs[i]); #endif } return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) device_remove_file(&netdev->dev, &xennet_attrs[i]); #else class_device_remove_file(&netdev->class_dev, &xennet_attrs[i]); #endif } } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) static const struct net_device_ops xennet_netdev_ops = { .ndo_uninit = netif_uninit, .ndo_open = network_open, .ndo_stop = network_close, .ndo_start_xmit = network_start_xmit, .ndo_set_multicast_list = network_set_multicast_list, .ndo_set_mac_address = xennet_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats = network_get_stats, }; #endif static struct net_device * __devinit create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, netif_poll, 64); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) && \ LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) netdev->open = network_open; netdev->hard_start_xmit = network_start_xmit; netdev->stop = network_close; netdev->get_stats = network_get_stats; netdev->set_multicast_list = network_set_multicast_list; netdev->uninit = netif_uninit; netdev->set_mac_address = xennet_set_mac_address; netdev->change_mtu = xennet_change_mtu; netif_napi_add(netdev, &np->napi, netif_poll, 64); #endif #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) netdev->poll = netif_poll; netdev->weight = 64; #endif netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) SET_MODULE_OWNER(netdev); #endif SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } #ifdef CONFIG_INET /* * We use this notifier to send out a fake ARP reply to reset switches and * router ARP caches when an IP interface is brought up on a VIF. */ static int inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) { int i = 0; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ if (event == NETDEV_UP && dev->open == network_open) { //send ARP_REQUEST packets first in case many switchboard refuse to //transmit ARPOP_REPLY packets without ARPOP_REQUEST received before. //then send ARPOP_REPLY packets for (i=0; i<3; i++) { send_fake_arp(dev, ARPOP_REQUEST); } for (i=0; i<3; i++) { send_fake_arp(dev, ARPOP_REPLY); } } return NOTIFY_DONE; } static struct notifier_block notifier_inetdev = { .notifier_call = inetdev_notify, .next = NULL, .priority = 0 }; #endif static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler(info->irq, info->netdev); info->irq = 0; end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, (unsigned long)page); } /* ** Driver registration ** */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static struct xenbus_driver netfront_driver = { .name = "vif", .owner = THIS_MODULE, .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(netfront_remove), .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, }; static int __init netif_init(void) { int err; if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN if (MODPARM_rx_flip && MODPARM_rx_copy) { WPRINTK("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_flip = 1; /* Default is to flip. */ #endif netif_init_accel(); IPRINTK("Initialising virtual ethernet driver.\n"); #ifdef CONFIG_INET (void)register_inetaddr_notifier(¬ifier_inetdev); #endif err = xenbus_register_frontend(&netfront_driver); if (err) { #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif } return err; } module_init(netif_init); static void __exit netif_exit(void) { #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif xenbus_unregister_driver(&netfront_driver); netif_exit_accel(); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/FEDORA9/xen-vnif/netfront.h000066400000000000000000000211371314037446600236560ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include /* * pv drivers header files */ #include #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct net_device_stats stats; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) struct napi_struct napi; #endif unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded() */ extern void netfront_accelerator_stop(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/000077500000000000000000000000001314037446600176755ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/Makefile000066400000000000000000000022661314037446600213430ustar00rootroot00000000000000############################################################################### ###### File name : Makefile ###### ###### Author : ###### ###### Description : To optimize the PV_ON_HVM drivers's Makefile ###### ###### History : ###### ###### 2012-02-24 : Create the file ###### ############################################################################### M=$(shell pwd) COMPILEARGS = $(CROSSCOMPILE) ASIANUX_BALLOON = $(shell echo $(KERNDIR) | grep "2.6.9-89" | grep "AXS") balloon_path=other ifneq ($(ASIANUX_BALLOON),) balloon_path = 2.6.9-89 endif obj-m += xen-platform-pci/ obj-m += xen-balloon/ obj-m += xen-vbd/ obj-m += xen-vnif/ all:lnfile make -C $(KERNDIR) M=$(M) modules $(COMPILEARGS) modules_install:lnfile make -C $(KERNDIR) M=$(M) modules_install $(COMPILEARGS) lnfile: @set -e; \ cd xen-balloon; \ for file in `ls $(balloon_path)`; \ do \ ln -sf $(balloon_path)/$$file $$file; \ done; \ cd - clean: make -C $(KERNDIR) M=$(M) clean $(COMPILEARGS) rm -rf Modules.symvers UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/compat-include/000077500000000000000000000000001314037446600226015ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/compat-include/asm-generic/000077500000000000000000000000001314037446600247735ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/compat-include/asm-generic/pgtable-nopmd.h000066400000000000000000000005611314037446600276770ustar00rootroot00000000000000#ifndef _PGTABLE_NOPMD_H #define _PGTABLE_NOPMD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopmd.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPMD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/compat-include/asm-generic/pgtable-nopud.h000066400000000000000000000006211314037446600277040ustar00rootroot00000000000000#ifndef _PGTABLE_NOPUD_H #define _PGTABLE_NOPUD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopud.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define pud_bad(pud) 0 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPUD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/compat-include/linux/000077500000000000000000000000001314037446600237405ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/compat-include/linux/io.h000066400000000000000000000002771314037446600245260ustar00rootroot00000000000000#ifndef _LINUX_IO_H #define _LINUX_IO_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) #error "This version of Linux should not need compat linux/io.h" #endif #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/compat-include/linux/mutex.h000066400000000000000000000015411314037446600252540ustar00rootroot00000000000000/* * Copyright (c) 2006 Cisco Systems. All rights reserved. * * This file is released under the GPLv2. */ /* mutex compatibility for pre-2.6.16 kernels */ #ifndef __LINUX_MUTEX_H #define __LINUX_MUTEX_H #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) #error "This version of Linux should not need compat mutex.h" #endif #include #include #define mutex semaphore #define DEFINE_MUTEX(foo) DECLARE_MUTEX(foo) #define mutex_init(foo) init_MUTEX(foo) #define mutex_lock(foo) down(foo) #define mutex_lock_interruptible(foo) down_interruptible(foo) /* this function follows the spin_trylock() convention, so * * it is negated to the down_trylock() return values! Be careful */ #define mutex_trylock(foo) !down_trylock(foo) #define mutex_unlock(foo) up(foo) #endif /* __LINUX_MUTEX_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/compat-include/xen/000077500000000000000000000000001314037446600233735ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/compat-include/xen/platform-compat.h000066400000000000000000000070671314037446600266630ustar00rootroot00000000000000#ifndef COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #define COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #include #include #include #if defined(__LINUX_COMPILER_H) && !defined(__always_inline) #define __always_inline inline #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_SPINLOCK) #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_RWLOCK) #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED #endif #if defined(_LINUX_INIT_H) && !defined(__init) #define __init #endif #if defined(__LINUX_CACHE_H) && !defined(__read_mostly) #define __read_mostly #endif #if defined(_LINUX_SKBUFF_H) && !defined(NET_IP_ALIGN) #define NET_IP_ALIGN 0 #endif #if defined(_LINUX_SKBUFF_H) && !defined(CHECKSUM_HW) #define CHECKSUM_HW CHECKSUM_PARTIAL #endif #if defined(_LINUX_ERR_H) && !defined(IS_ERR_VALUE) #define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) #endif #if defined(_ASM_IA64_PGTABLE_H) && !defined(_PGTABLE_NOPUD_H) #include #endif /* Some kernels have this typedef backported so we cannot reliably * detect based on version number, hence we forcibly #define it. */ #if defined(__LINUX_TYPES_H) || defined(__LINUX_GFP_H) || defined(_LINUX_KERNEL_H) #define gfp_t unsigned #endif #if defined (_LINUX_NOTIFIER_H) && !defined ATOMIC_NOTIFIER_HEAD #define ATOMIC_NOTIFIER_HEAD(name) struct notifier_block *name #define atomic_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define atomic_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define atomic_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_MM_H) && defined set_page_count #define init_page_count(page) set_page_count(page, 1) #endif #if defined(__LINUX_GFP_H) && !defined __GFP_NOMEMALLOC #define __GFP_NOMEMALLOC 0 #endif #if defined(_LINUX_FS_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) #define nonseekable_open(inode, filp) /* Nothing to do */ #endif #if defined(_LINUX_MM_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) unsigned long vmalloc_to_pfn(void *addr); #endif #if defined(__LINUX_COMPLETION_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); #endif #if defined(_LINUX_SCHED_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout); #endif #if defined(_LINUX_SLAB_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) void *kzalloc(size_t size, int flags); #endif #if defined(_LINUX_BLKDEV_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define end_that_request_last(req, uptodate) end_that_request_last(req) #endif #if defined(_LINUX_CAPABILITY_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define capable(cap) (1) #endif #if defined(_LINUX_KERNEL_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) extern char *kasprintf(gfp_t gfp, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); #endif #if defined(_LINUX_SYSRQ_H) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) #define handle_sysrq(x,y,z) handle_sysrq(x,y) #endif #if defined(_PAGE_PRESENT) && !defined(_PAGE_NX) #define _PAGE_NX 0 /* * This variable at present is referenced by netfront, but only in code that * is dead when running in hvm guests. To detect potential active uses of it * in the future, don't try to supply a 'valid' value here, so that any * mappings created with it will fault when accessed. */ #define __supported_pte_mask ((maddr_t)0) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/config.mk000066400000000000000000000014641314037446600215000ustar00rootroot00000000000000# Hack: we need to use the config which was used to build the kernel, # except that that won't have the right headers etc., so duplicate # some of the mach-xen infrastructure in here. # # (i.e. we need the native config for things like -mregparm, but # a Xen kernel to find the right headers) _XEN_CPPFLAGS += -D__XEN_INTERFACE_VERSION__=0x00030205 _XEN_CPPFLAGS += -DCONFIG_XEN_COMPAT=0xffffff _XEN_CPPFLAGS += -I$(M)/include -I$(M)/compat-include -DHAVE_XEN_PLATFORM_COMPAT_H ifeq ($(ARCH),ia64) _XEN_CPPFLAGS += -DCONFIG_VMX_GUEST endif OSVERSION = $(shell echo $(KERNDIR) | grep 2.6.9-22) ifneq ($(OSVERSION),) _XEN_CPPFLAGS += -DRHEL42 endif _XEN_CPPFLAGS += -include $(srctree)/include/linux/autoconf.h EXTRA_CFLAGS += $(_XEN_CPPFLAGS) EXTRA_AFLAGS += $(_XEN_CPPFLAGS) CPPFLAGS := -I$(M)/include $(CPPFLAGS) UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/000077500000000000000000000000001314037446600213205ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/asm/000077500000000000000000000000001314037446600221005ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/asm/hypercall.h000066400000000000000000000034551314037446600242430ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * 64-bit updates: * Benjamin Liu * Jun Nakajima * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #ifdef CONFIG_X86_64 #include "hypercall_64.h" #else #include "hypercall_32.h" #endif #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/asm/hypercall_32.h000066400000000000000000000234541314037446600245500ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_32_H__ #define __HYPERCALL_32_H__ #include /* memcpy() */ #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #define __STR(x) #x #define STR(x) __STR(x) #ifdef CONFIG_XEN #define HYPERCALL_STR(name) \ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)" #else #define HYPERCALL_STR(name) \ "mov hypercall_stubs,%%eax; " \ "add $("STR(__HYPERVISOR_##name)" * 32),%%eax; " \ "call *%%eax" #endif #define _hypercall0(type, name) \ ({ \ long __res; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res) \ : \ : "memory" ); \ (type)__res; \ }) #define _hypercall1(type, name, a1) \ ({ \ long __res, __ign1; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1) \ : "1" ((long)(a1)) \ : "memory" ); \ (type)__res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ long __res, __ign1, __ign2; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ : "1" ((long)(a1)), "2" ((long)(a2)) \ : "memory" ); \ (type)__res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ long __res, __ign1, __ign2, __ign3; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ (type)__res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ long __res, __ign1, __ign2, __ign3, __ign4; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)) \ : "memory" ); \ (type)__res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)), \ "5" ((long)(a5)) \ : "memory" ); \ (type)__res; \ }) static inline int HYPERVISOR_set_trap_table( trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int HYPERVISOR_mmu_update( mmu_update_t *req, int count, int *success_count, domid_t domid) { return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int HYPERVISOR_mmuext_op( struct mmuext_op *op, int count, int *success_count, domid_t domid) { return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int HYPERVISOR_set_gdt( unsigned long *frame_list, int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int HYPERVISOR_set_callbacks( unsigned long event_selector, unsigned long event_address, unsigned long failsafe_selector, unsigned long failsafe_address) { return _hypercall4(int, set_callbacks, event_selector, event_address, failsafe_selector, failsafe_address); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } static inline int HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } static inline int HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline long HYPERVISOR_set_timer_op( u64 timeout) { unsigned long timeout_hi = (unsigned long)(timeout>>32); unsigned long timeout_lo = (unsigned long)timeout; return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); } static inline int HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int HYPERVISOR_set_debugreg( int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long HYPERVISOR_get_debugreg( int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int HYPERVISOR_update_descriptor( u64 ma, u64 desc) { return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); } static inline int HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; /* when hypercall memory_op failed, retry */ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int HYPERVISOR_multicall( multicall_entry_t *call_list, int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall4(int, update_va_mapping, va, new_val.pte_low, pte_hi, flags); } static inline int HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int HYPERVISOR_acm_op( int cmd, void *arg) { return _hypercall2(int, acm_op, cmd, arg); } static inline int HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int HYPERVISOR_console_io( int cmd, int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { return _hypercall3(int, grant_table_op, cmd, uop, count); } static inline int HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall5(int, update_va_mapping_otherdomain, va, new_val.pte_low, pte_hi, flags, domid); } static inline int HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int HYPERVISOR_vcpu_op( int cmd, int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } static inline unsigned long HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } static inline int HYPERVISOR_callback_op( int cmd, void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/asm/hypercall_64.h000066400000000000000000000232761314037446600245570ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * 64-bit updates: * Benjamin Liu * Jun Nakajima * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_64_H__ #define __HYPERCALL_64_H__ #include /* memcpy() */ #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #define __STR(x) #x #define STR(x) __STR(x) #ifdef CONFIG_XEN #define HYPERCALL_STR(name) \ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)" #else #define HYPERCALL_STR(name) \ "mov hypercall_stubs,%%rax; " \ "add $("STR(__HYPERVISOR_##name)" * 32),%%rax; " \ "call *%%rax" #endif #define _hypercall0(type, name) \ ({ \ long __res; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res) \ : \ : "memory" ); \ (type)__res; \ }) #define _hypercall1(type, name, a1) \ ({ \ long __res, __ign1; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1) \ : "1" ((long)(a1)) \ : "memory" ); \ (type)__res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ long __res, __ign1, __ign2; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \ : "1" ((long)(a1)), "2" ((long)(a2)) \ : "memory" ); \ (type)__res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ long __res, __ign1, __ign2, __ign3; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ (type)__res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ long __res, __ign1, __ign2, __ign3; \ asm volatile ( \ "movq %7,%%r10; " \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "g" ((long)(a4)) \ : "memory", "r10" ); \ (type)__res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ long __res, __ign1, __ign2, __ign3; \ asm volatile ( \ "movq %7,%%r10; movq %8,%%r8; " \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "g" ((long)(a4)), \ "g" ((long)(a5)) \ : "memory", "r10", "r8" ); \ (type)__res; \ }) static inline int HYPERVISOR_set_trap_table( trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int HYPERVISOR_mmu_update( mmu_update_t *req, int count, int *success_count, domid_t domid) { return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int HYPERVISOR_mmuext_op( struct mmuext_op *op, int count, int *success_count, domid_t domid) { return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int HYPERVISOR_set_gdt( unsigned long *frame_list, int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int HYPERVISOR_set_callbacks( unsigned long event_address, unsigned long failsafe_address, unsigned long syscall_address) { return _hypercall3(int, set_callbacks, event_address, failsafe_address, syscall_address); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } static inline int HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } static inline int HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline long HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall1(long, set_timer_op, timeout); } static inline int HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int HYPERVISOR_set_debugreg( int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long HYPERVISOR_get_debugreg( int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int HYPERVISOR_update_descriptor( unsigned long ma, unsigned long word) { return _hypercall2(int, update_descriptor, ma, word); } static inline int HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; /* when hypercall memory_op failed, retry */ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int HYPERVISOR_multicall( multicall_entry_t *call_list, int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { return _hypercall3(int, update_va_mapping, va, new_val.pte, flags); } static inline int HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int HYPERVISOR_acm_op( int cmd, void *arg) { return _hypercall2(int, acm_op, cmd, arg); } static inline int HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int HYPERVISOR_console_io( int cmd, int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { return _hypercall3(int, grant_table_op, cmd, uop, count); } static inline int HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { return _hypercall4(int, update_va_mapping_otherdomain, va, new_val.pte, flags, domid); } static inline int HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int HYPERVISOR_vcpu_op( int cmd, int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int HYPERVISOR_set_segment_base( int reg, unsigned long value) { return _hypercall2(int, set_segment_base, reg, value); } static inline int HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } static inline unsigned long HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } static inline int HYPERVISOR_callback_op( int cmd, void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/asm/hypervisor.h000066400000000000000000000161521314037446600244700ustar00rootroot00000000000000/****************************************************************************** * hypervisor.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERVISOR_H__ #define __HYPERVISOR_H__ #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) # ifdef CONFIG_X86_PAE # include # else # include # endif #elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) # include #endif extern shared_info_t *HYPERVISOR_shared_info; #define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu)) #ifdef CONFIG_SMP #define current_vcpu_info() vcpu_info(smp_processor_id()) #else #define current_vcpu_info() vcpu_info(0) #endif #ifdef CONFIG_X86_32 extern unsigned long hypervisor_virt_start; #endif /* arch/xen/i386/kernel/setup.c */ extern start_info_t *xen_start_info; #ifdef CONFIG_XEN_PRIVILEGED_GUEST #define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN) #else #define is_initial_xendomain() 0 #endif /* arch/xen/kernel/evtchn.c */ /* Force a proper event-channel callback from Xen. */ void force_evtchn_callback(void); /* arch/xen/kernel/process.c */ void xen_cpu_idle (void); /* arch/xen/i386/kernel/hypervisor.c */ void do_hypervisor_callback(struct pt_regs *regs); /* arch/xen/i386/mm/hypervisor.c */ /* * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already * be MACHINE addresses. */ void xen_pt_switch(unsigned long ptr); void xen_new_user_pt(unsigned long ptr); /* x86_64 only */ void xen_load_gs(unsigned int selector); /* x86_64 only */ void xen_tlb_flush(void); void xen_invlpg(unsigned long ptr); void xen_l1_entry_update(pte_t *ptr, pte_t val); void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */ void xen_pgd_pin(unsigned long ptr); void xen_pgd_unpin(unsigned long ptr); void xen_set_ldt(unsigned long ptr, unsigned long bytes); #ifdef CONFIG_SMP #include void xen_tlb_flush_all(void); void xen_invlpg_all(unsigned long ptr); void xen_tlb_flush_mask(cpumask_t *mask); void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr); #endif /* Returns zero on success else negative errno. */ int xen_create_contiguous_region( unsigned long vstart, unsigned int order, unsigned int address_bits); void xen_destroy_contiguous_region( unsigned long vstart, unsigned int order); /* Turn jiffies into Xen system time. */ u64 jiffies_to_st(unsigned long jiffies); #ifdef CONFIG_XEN_SCRUB_PAGES #define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT) #else #define scrub_pages(_p,_n) ((void)0) #endif #include #if defined(CONFIG_X86_64) #define MULTI_UVMFLAGS_INDEX 2 #define MULTI_UVMDOMID_INDEX 3 #else #define MULTI_UVMFLAGS_INDEX 3 #define MULTI_UVMDOMID_INDEX 4 #endif #define is_running_on_xen() 1 static inline int HYPERVISOR_yield( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int HYPERVISOR_block( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0); #endif return rc; } static inline int HYPERVISOR_shutdown( unsigned int reason) { struct sched_shutdown sched_shutdown = { .reason = reason }; int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason); #endif return rc; } static inline int HYPERVISOR_poll( evtchn_port_t *ports, unsigned int nr_ports, u64 timeout) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports, .timeout = jiffies_to_st(timeout) }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline void MULTI_update_va_mapping( multicall_entry_t *mcl, unsigned long va, pte_t new_val, unsigned long flags) { mcl->op = __HYPERVISOR_update_va_mapping; mcl->args[0] = va; #if defined(CONFIG_X86_64) mcl->args[1] = new_val.pte; #elif defined(CONFIG_X86_PAE) mcl->args[1] = new_val.pte_low; mcl->args[2] = new_val.pte_high; #else mcl->args[1] = new_val.pte_low; mcl->args[2] = 0; #endif mcl->args[MULTI_UVMFLAGS_INDEX] = flags; } static inline void MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd, void *uop, unsigned int count) { mcl->op = __HYPERVISOR_grant_table_op; mcl->args[0] = cmd; mcl->args[1] = (unsigned long)uop; mcl->args[2] = count; } static inline void MULTI_update_va_mapping_otherdomain( multicall_entry_t *mcl, unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { mcl->op = __HYPERVISOR_update_va_mapping_otherdomain; mcl->args[0] = va; #if defined(CONFIG_X86_64) mcl->args[1] = new_val.pte; #elif defined(CONFIG_X86_PAE) mcl->args[1] = new_val.pte_low; mcl->args[2] = new_val.pte_high; #else mcl->args[1] = new_val.pte_low; mcl->args[2] = 0; #endif mcl->args[MULTI_UVMFLAGS_INDEX] = flags; mcl->args[MULTI_UVMDOMID_INDEX] = domid; } #endif /* __HYPERVISOR_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/asm/maddr.h000066400000000000000000000132411314037446600233410ustar00rootroot00000000000000#ifndef _I386_MADDR_H #define _I386_MADDR_H #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<31) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ #ifdef CONFIG_X86_PAE typedef unsigned long long paddr_t; typedef unsigned long long maddr_t; #else typedef unsigned long paddr_t; typedef unsigned long maddr_t; #endif #ifdef CONFIG_XEN extern unsigned long *phys_to_machine_mapping; extern unsigned long max_mapnr; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return pfn; BUG_ON(max_mapnr && pfn >= max_mapnr); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return 1; BUG_ON(max_mapnr && pfn >= max_mapnr); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return max_mapnr; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movl %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movl %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if ((pfn < max_mapnr) && !xen_feature(XENFEAT_auto_translated_physmap) && (phys_to_machine_mapping[pfn] != mfn)) return max_mapnr; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { BUG_ON(max_mapnr && pfn >= max_mapnr); if (xen_feature(XENFEAT_auto_translated_physmap)) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } #ifdef CONFIG_X86_PAE static inline paddr_t pte_phys_to_machine(paddr_t phys) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to pfn_to_mfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to mfn_to_pfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #endif #ifdef CONFIG_X86_PAE static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot) { pte_t pte; pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \ (pgprot_val(pgprot) >> 32); pte.pte_high &= (__supported_pte_mask >> 32); pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \ __supported_pte_mask; return pte; } #else #define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #endif #define __pte_ma(x) ((pte_t) { (x) } ) #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _I386_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/asm/synch_bitops.h000066400000000000000000000072471314037446600247670ustar00rootroot00000000000000#ifndef __XEN_SYNCH_BITOPS_H__ #define __XEN_SYNCH_BITOPS_H__ /* * Copyright 1992, Linus Torvalds. * Heavily modified to provide guaranteed strong synchronisation * when communicating with Xen or other guest OSes running on other CPUs. */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define ADDR (*(volatile long *) addr) static __inline__ void synch_set_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btsl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_clear_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btrl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_change_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btcl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btsl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btrl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btcl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } struct __synch_xchg_dummy { unsigned long a[100]; }; #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x)) #define synch_cmpxchg(ptr, old, new) \ ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ (unsigned long)(old), \ (unsigned long)(new), \ sizeof(*(ptr)))) static inline unsigned long __synch_cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__("lock; cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__("lock; cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #ifdef CONFIG_X86_64 case 4: __asm__ __volatile__("lock; cmpxchgl %k1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__("lock; cmpxchgq %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #else case 4: __asm__ __volatile__("lock; cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #endif } return old; } static __always_inline int synch_const_test_bit(int nr, const volatile void * addr) { return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; } static __inline__ int synch_var_test_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "btl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) ); return oldbit; } #define synch_test_bit(nr,addr) \ (__builtin_constant_p(nr) ? \ synch_const_test_bit((nr),(addr)) : \ synch_var_test_bit((nr),(addr))) #define synch_cmpxchg_subword synch_cmpxchg #endif /* __XEN_SYNCH_BITOPS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/000077500000000000000000000000001314037446600221125ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/balloon.h000066400000000000000000000046071314037446600237200ustar00rootroot00000000000000/****************************************************************************** * balloon.h * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_BALLOON_H__ #define __ASM_BALLOON_H__ /* * Inform the balloon driver that it should allow some slop for device-driver * memory activities. */ void balloon_update_driver_allowance(long delta); /* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */ struct page **alloc_empty_pages_and_pagevec(int nr_pages); void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages); void balloon_release_driver_page(struct page *page); /* * Prevent the balloon driver from changing the memory reservation during * a driver critical region. */ extern spinlock_t balloon_lock; #define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags) #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags) #endif /* __ASM_BALLOON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/blkif.h000066400000000000000000000066421314037446600233620ustar00rootroot00000000000000#ifndef __XEN_BLKIF_H__ #define __XEN_BLKIF_H__ #include #include #include /* Not a real protocol. Used to generate ring structs which contain * the elements common to all protocols only. This way we get a * compiler-checkable way to use common struct elements, so we can * avoid using switch(protocol) in a number of places. */ struct blkif_common_request { char dummy; }; struct blkif_common_response { char dummy; }; /* i386 protocol version */ #pragma pack(push, 4) struct blkif_x86_32_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_32_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_x86_32_request blkif_x86_32_request_t; typedef struct blkif_x86_32_response blkif_x86_32_response_t; #pragma pack(pop) /* x86_64 protocol version */ struct blkif_x86_64_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t __attribute__((__aligned__(8))) id; blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_64_response { uint64_t __attribute__((__aligned__(8))) id; uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_x86_64_request blkif_x86_64_request_t; typedef struct blkif_x86_64_response blkif_x86_64_response_t; DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response); DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response); DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response); union blkif_back_rings { blkif_back_ring_t native; blkif_common_back_ring_t common; blkif_x86_32_back_ring_t x86_32; blkif_x86_64_back_ring_t x86_64; }; typedef union blkif_back_rings blkif_back_rings_t; enum blkif_protocol { BLKIF_PROTOCOL_NATIVE = 1, BLKIF_PROTOCOL_X86_32 = 2, BLKIF_PROTOCOL_X86_64 = 3, }; static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src) { int i; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; dst->id = src->id; dst->sector_number = src->sector_number; for (i = 0; i < src->nr_segments; i++) dst->seg[i] = src->seg[i]; } static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src) { int i; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; dst->id = src->id; dst->sector_number = src->sector_number; for (i = 0; i < src->nr_segments; i++) dst->seg[i] = src->seg[i]; } #endif /* __XEN_BLKIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/cpu_hotplug.h000066400000000000000000000016611314037446600246200ustar00rootroot00000000000000#ifndef __XEN_CPU_HOTPLUG_H__ #define __XEN_CPU_HOTPLUG_H__ #include #include #if defined(CONFIG_X86) && defined(CONFIG_SMP) extern cpumask_t cpu_initialized_map; #define cpu_set_initialized(cpu) cpu_set(cpu, cpu_initialized_map) #else #define cpu_set_initialized(cpu) ((void)0) #endif #if defined(CONFIG_HOTPLUG_CPU) int cpu_up_check(unsigned int cpu); void init_xenbus_allowed_cpumask(void); int smp_suspend(void); void smp_resume(void); void cpu_bringup(void); #else /* !defined(CONFIG_HOTPLUG_CPU) */ #define cpu_up_check(cpu) (0) #define init_xenbus_allowed_cpumask() ((void)0) static inline int smp_suspend(void) { if (num_online_cpus() > 1) { printk(KERN_WARNING "Can't suspend SMP guests " "without CONFIG_HOTPLUG_CPU\n"); return -EOPNOTSUPP; } return 0; } static inline void smp_resume(void) { } #endif /* !defined(CONFIG_HOTPLUG_CPU) */ #endif /* __XEN_CPU_HOTPLUG_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/driver_util.h000066400000000000000000000006401314037446600246130ustar00rootroot00000000000000 #ifndef __ASM_XEN_DRIVER_UTIL_H__ #define __ASM_XEN_DRIVER_UTIL_H__ #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) /* Allocate/destroy a 'vmalloc' VM area. */ extern struct vm_struct *alloc_vm_area(unsigned long size); extern void free_vm_area(struct vm_struct *area); #endif extern struct class *get_xen_class(void); #endif /* __ASM_XEN_DRIVER_UTIL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/evtchn.h000066400000000000000000000102621314037446600235530ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Communication via Xen event channels. * Also definitions for the device that demuxes notifications to userspace. * * Copyright (c) 2004-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_EVTCHN_H__ #define __ASM_EVTCHN_H__ #include #include #include #include #include #include /* * LOW-LEVEL DEFINITIONS */ /* * Dynamically bind an event source to an IRQ-like callback handler. * On some platforms this may not be implemented via the Linux IRQ subsystem. * The IRQ argument passed to the callback handler is the same as returned * from the bind call. It may not correspond to a Linux IRQ number. * Returns IRQ or negative errno. */ int bind_caller_port_to_irqhandler( unsigned int caller_port, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); int bind_interdomain_evtchn_to_irqhandler( unsigned int remote_domain, unsigned int remote_port, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irqhandler( unsigned int virq, unsigned int cpu, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (except for bindings * made with bind_caller_port_to_irqhandler()). */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); void irq_resume(void); /* Entry point for notifications into Linux subsystems. */ asmlinkage void evtchn_do_upcall(struct pt_regs *regs); /* Entry point for notifications into the userland character device. */ void evtchn_device_upcall(int port); void mask_evtchn(int port); void unmask_evtchn(int port); static inline void clear_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; synch_clear_bit(port, s->evtchn_pending); } static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); } /* * Use these to access the event channel underlying the IRQ handle returned * by bind_*_to_irqhandler(). */ void notify_remote_via_irq(int irq); int irq_to_evtchn_port(int irq); #endif /* __ASM_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/features.h000066400000000000000000000007041314037446600241020ustar00rootroot00000000000000/****************************************************************************** * features.h * * Query the features reported by Xen. * * Copyright (c) 2006, Ian Campbell */ #ifndef __ASM_XEN_FEATURES_H__ #define __ASM_XEN_FEATURES_H__ #include extern void setup_xen_features(void); extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32]; #define xen_feature(flag) (xen_features[flag]) #endif /* __ASM_XEN_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/gnttab.h000066400000000000000000000112331314037446600235420ustar00rootroot00000000000000/****************************************************************************** * gnttab.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include /* maddr_t */ #include #include struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; }; int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); int gnttab_query_foreign_access(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly); void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); int gnttab_suspend(void); int gnttab_resume(void); static inline void gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr, uint32_t flags, grant_ref_t ref, domid_t domid) { if (flags & GNTMAP_contains_pte) map->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) map->host_addr = __pa(addr); else map->host_addr = addr; map->flags = flags; map->ref = ref; map->dom = domid; } static inline void gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr, uint32_t flags, grant_handle_t handle) { if (flags & GNTMAP_contains_pte) unmap->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) unmap->host_addr = __pa(addr); else unmap->host_addr = addr; unmap->handle = handle; unmap->dev_bus_addr = 0; } #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/hvm.h000066400000000000000000000007101314037446600230530ustar00rootroot00000000000000/* Simple wrappers around HVM functions */ #ifndef XEN_HVM_H__ #define XEN_HVM_H__ #include static inline unsigned long hvm_get_parameter(int idx) { struct xen_hvm_param xhv; int r; xhv.domid = DOMID_SELF; xhv.index = idx; r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); if (r < 0) { printk(KERN_ERR "cannot get hvm parameter %d: %d.\n", idx, r); return 0; } return xhv.value; } #endif /* XEN_HVM_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/hypercall.h000066400000000000000000000007561314037446600242560ustar00rootroot00000000000000#ifndef __XEN_HYPERCALL_H__ #define __XEN_HYPERCALL_H__ #include static inline int HYPERVISOR_multicall_check( multicall_entry_t *call_list, int nr_calls, const unsigned long *rc_list) { int rc = HYPERVISOR_multicall(call_list, nr_calls); if (unlikely(rc < 0)) return rc; BUG_ON(rc); for ( ; nr_calls > 0; --nr_calls, ++call_list) if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0))) return nr_calls; return 0; } #endif /* __XEN_HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/hypervisor_sysfs.h000066400000000000000000000015611314037446600257270ustar00rootroot00000000000000/* * copyright (c) 2006 IBM Corporation * Authored by: Mike D. Day * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _HYP_SYSFS_H_ #define _HYP_SYSFS_H_ #include #include #define HYPERVISOR_ATTR_RO(_name) \ static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) #define HYPERVISOR_ATTR_RW(_name) \ static struct hyp_sysfs_attr _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) extern struct subsystem hypervisor_subsys; struct hyp_sysfs_attr { struct attribute attr; ssize_t (*show)(struct hyp_sysfs_attr *, char *); ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t); void *hyp_attr_data; }; #endif /* _HYP_SYSFS_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/000077500000000000000000000000001314037446600240525ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/COPYING000066400000000000000000000032641314037446600251120ustar00rootroot00000000000000XEN NOTICE ========== This copyright applies to all files within this subdirectory and its subdirectories: include/public/*.h include/public/hvm/*.h include/public/io/*.h The intention is that these files can be freely copied into the source tree of an operating system when porting that OS to run on Xen. Doing so does *not* cause the OS to become subject to the terms of the GPL. All other files in the Xen source distribution are covered by version 2 of the GNU General Public License except where explicitly stated otherwise within individual source files. -- Keir Fraser (on behalf of the Xen team) ===================================================================== Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/acm.h000066400000000000000000000156051314037446600247720ustar00rootroot00000000000000/* * acm.h: Xen access control module interface defintions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005, International Business Machines Corporation. */ #ifndef _XEN_PUBLIC_ACM_H #define _XEN_PUBLIC_ACM_H #include "xen.h" /* if ACM_DEBUG defined, all hooks should * print a short trace message (comment it out * when not in testing mode ) */ /* #define ACM_DEBUG */ #ifdef ACM_DEBUG # define printkd(fmt, args...) printk(fmt,## args) #else # define printkd(fmt, args...) #endif /* default ssid reference value if not supplied */ #define ACM_DEFAULT_SSID 0x0 #define ACM_DEFAULT_LOCAL_SSID 0x0 /* Internal ACM ERROR types */ #define ACM_OK 0 #define ACM_UNDEF -1 #define ACM_INIT_SSID_ERROR -2 #define ACM_INIT_SOID_ERROR -3 #define ACM_ERROR -4 /* External ACCESS DECISIONS */ #define ACM_ACCESS_PERMITTED 0 #define ACM_ACCESS_DENIED -111 #define ACM_NULL_POINTER_ERROR -200 /* Error codes reported in when trying to test for a new policy These error codes are reported in an array of tuples where each error code is followed by a parameter describing the error more closely, such as a domain id. */ #define ACM_EVTCHN_SHARING_VIOLATION 0x100 #define ACM_GNTTAB_SHARING_VIOLATION 0x101 #define ACM_DOMAIN_LOOKUP 0x102 #define ACM_CHWALL_CONFLICT 0x103 #define ACM_SSIDREF_IN_USE 0x104 /* primary policy in lower 4 bits */ #define ACM_NULL_POLICY 0 #define ACM_CHINESE_WALL_POLICY 1 #define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2 #define ACM_POLICY_UNDEFINED 15 /* combinations have secondary policy component in higher 4bit */ #define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY) /* policy: */ #define ACM_POLICY_NAME(X) \ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \ "UNDEFINED" /* the following policy versions must be increased * whenever the interpretation of the related * policy's data structure changes */ #define ACM_POLICY_VERSION 3 #define ACM_CHWALL_VERSION 1 #define ACM_STE_VERSION 1 /* defines a ssid reference used by xen */ typedef uint32_t ssidref_t; /* hooks that are known to domains */ #define ACMHOOK_none 0 #define ACMHOOK_sharing 1 /* -------security policy relevant type definitions-------- */ /* type identifier; compares to "equal" or "not equal" */ typedef uint16_t domaintype_t; /* CHINESE WALL POLICY DATA STRUCTURES * * current accumulated conflict type set: * When a domain is started and has a type that is in * a conflict set, the conflicting types are incremented in * the aggregate set. When a domain is destroyed, the * conflicting types to its type are decremented. * If a domain has multiple types, this procedure works over * all those types. * * conflict_aggregate_set[i] holds the number of * running domains that have a conflict with type i. * * running_types[i] holds the number of running domains * that include type i in their ssidref-referenced type set * * conflict_sets[i][j] is "0" if type j has no conflict * with type i and is "1" otherwise. */ /* high-16 = version, low-16 = check magic */ #define ACM_MAGIC 0x0001debc /* each offset in bytes from start of the struct they * are part of */ /* V3 of the policy buffer aded a version structure */ struct acm_policy_version { uint32_t major; uint32_t minor; }; /* each buffer consists of all policy information for * the respective policy given in the policy code * * acm_policy_buffer, acm_chwall_policy_buffer, * and acm_ste_policy_buffer need to stay 32-bit aligned * because we create binary policies also with external * tools that assume packed representations (e.g. the java tool) */ struct acm_policy_buffer { uint32_t policy_version; /* ACM_POLICY_VERSION */ uint32_t magic; uint32_t len; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_buffer_offset; uint32_t secondary_policy_code; uint32_t secondary_buffer_offset; struct acm_policy_version xml_pol_version; /* add in V3 */ }; struct acm_policy_reference_buffer { uint32_t len; }; struct acm_chwall_policy_buffer { uint32_t policy_version; /* ACM_CHWALL_VERSION */ uint32_t policy_code; uint32_t chwall_max_types; uint32_t chwall_max_ssidrefs; uint32_t chwall_max_conflictsets; uint32_t chwall_ssid_offset; uint32_t chwall_conflict_sets_offset; uint32_t chwall_running_types_offset; uint32_t chwall_conflict_aggregate_offset; }; struct acm_ste_policy_buffer { uint32_t policy_version; /* ACM_STE_VERSION */ uint32_t policy_code; uint32_t ste_max_types; uint32_t ste_max_ssidrefs; uint32_t ste_ssid_offset; }; struct acm_stats_buffer { uint32_t magic; uint32_t len; uint32_t primary_policy_code; uint32_t primary_stats_offset; uint32_t secondary_policy_code; uint32_t secondary_stats_offset; }; struct acm_ste_stats_buffer { uint32_t ec_eval_count; uint32_t gt_eval_count; uint32_t ec_denied_count; uint32_t gt_denied_count; uint32_t ec_cachehit_count; uint32_t gt_cachehit_count; }; struct acm_ssid_buffer { uint32_t len; ssidref_t ssidref; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_max_types; uint32_t primary_types_offset; uint32_t secondary_policy_code; uint32_t secondary_max_types; uint32_t secondary_types_offset; }; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/acm_ops.h000066400000000000000000000110311314037446600256400ustar00rootroot00000000000000/* * acm_ops.h: Xen access control module hypervisor commands * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005,2006 International Business Machines Corporation. */ #ifndef __XEN_PUBLIC_ACM_OPS_H__ #define __XEN_PUBLIC_ACM_OPS_H__ #include "xen.h" #include "acm.h" /* * Make sure you increment the interface version whenever you modify this file! * This makes sure that old versions of acm tools will stop working in a * well-defined way (rather than crashing the machine, for instance). */ #define ACM_INTERFACE_VERSION 0xAAAA0009 /************************************************************************/ /* * Prototype for this hypercall is: * int acm_op(int cmd, void *args) * @cmd == ACMOP_??? (access control module operation). * @args == Operation-specific extra arguments (NULL if none). */ #define ACMOP_setpolicy 1 struct acm_setpolicy { /* IN */ uint32_t interface_version; XEN_GUEST_HANDLE_64(void) pushcache; uint32_t pushcache_size; }; #define ACMOP_getpolicy 2 struct acm_getpolicy { /* IN */ uint32_t interface_version; XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_dumpstats 3 struct acm_dumpstats { /* IN */ uint32_t interface_version; XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_getssid 4 #define ACM_GETBY_ssidref 1 #define ACM_GETBY_domainid 2 struct acm_getssid { /* IN */ uint32_t interface_version; uint32_t get_ssid_by; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id; XEN_GUEST_HANDLE_64(void) ssidbuf; uint32_t ssidbuf_size; }; #define ACMOP_getdecision 5 struct acm_getdecision { /* IN */ uint32_t interface_version; uint32_t get_decision_by1; /* ACM_GETBY_* */ uint32_t get_decision_by2; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id1; union { domaintype_t domainid; ssidref_t ssidref; } id2; uint32_t hook; /* OUT */ uint32_t acm_decision; }; #define ACMOP_chgpolicy 6 struct acm_change_policy { /* IN */ uint32_t interface_version; XEN_GUEST_HANDLE_64(void) policy_pushcache; uint32_t policy_pushcache_size; XEN_GUEST_HANDLE_64(void) del_array; uint32_t delarray_size; XEN_GUEST_HANDLE_64(void) chg_array; uint32_t chgarray_size; /* OUT */ /* array with error code */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; #define ACMOP_relabeldoms 7 struct acm_relabel_doms { /* IN */ uint32_t interface_version; XEN_GUEST_HANDLE_64(void) relabel_map; uint32_t relabel_map_size; /* OUT */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; /* future interface to Xen */ struct xen_acmctl { uint32_t cmd; uint32_t interface_version; union { struct acm_setpolicy setpolicy; struct acm_getpolicy getpolicy; struct acm_dumpstats dumpstats; struct acm_getssid getssid; struct acm_getdecision getdecision; struct acm_change_policy change_policy; struct acm_relabel_doms relabel_doms; } u; }; typedef struct xen_acmctl xen_acmctl_t; DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t); #endif /* __XEN_PUBLIC_ACM_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/arch-ia64.h000066400000000000000000000423701314037446600257070ustar00rootroot00000000000000/****************************************************************************** * arch-ia64/hypervisor-if.h * * Guest OS interface to IA64 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * */ #ifndef __HYPERVISOR_IF_IA64_H__ #define __HYPERVISOR_IF_IA64_H__ /* Structural guest handles introduced in 0x00030201. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030201 #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name #else #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef type * __guest_handle_ ## name #endif #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) #define uint64_aligned_t uint64_t #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); __DEFINE_XEN_GUEST_HANDLE(u64, unsigned long); DEFINE_XEN_GUEST_HANDLE(char); DEFINE_XEN_GUEST_HANDLE(int); DEFINE_XEN_GUEST_HANDLE(long); DEFINE_XEN_GUEST_HANDLE(void); typedef unsigned long xen_pfn_t; DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #define PRI_xen_pfn "lx" #endif /* Arch specific VIRQs definition */ #define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */ #define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */ #define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */ /* Maximum number of virtual CPUs in multi-processor guests. */ /* WARNING: before changing this, check that shared_info fits on a page */ #define MAX_VIRT_CPUS 64 #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; #define INVALID_MFN (~0UL) #define MEM_G (1UL << 30) #define MEM_M (1UL << 20) #define MMIO_START (3 * MEM_G) #define MMIO_SIZE (512 * MEM_M) #define VGA_IO_START 0xA0000UL #define VGA_IO_SIZE 0x20000 #define LEGACY_IO_START (MMIO_START + MMIO_SIZE) #define LEGACY_IO_SIZE (64*MEM_M) #define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE) #define IO_PAGE_SIZE PAGE_SIZE #define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE) #define STORE_PAGE_SIZE PAGE_SIZE #define BUFFER_IO_PAGE_START (STORE_PAGE_START+STORE_PAGE_SIZE) #define BUFFER_IO_PAGE_SIZE PAGE_SIZE #define BUFFER_PIO_PAGE_START (BUFFER_IO_PAGE_START+BUFFER_IO_PAGE_SIZE) #define BUFFER_PIO_PAGE_SIZE PAGE_SIZE #define IO_SAPIC_START 0xfec00000UL #define IO_SAPIC_SIZE 0x100000 #define PIB_START 0xfee00000UL #define PIB_SIZE 0x200000 #define GFW_START (4*MEM_G -16*MEM_M) #define GFW_SIZE (16*MEM_M) struct pt_fpreg { union { unsigned long bits[2]; long double __dummy; /* force 16-byte alignment */ } u; }; struct cpu_user_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long b6; /* scratch */ unsigned long b7; /* scratch */ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ unsigned long ar_ssd; /* reserved for future use (scratch) */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b0; /* return pointer (bp) */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long r15; /* scratch */ /* The remaining registers are NOT saved for system calls. */ unsigned long r14; /* scratch */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ /* * Floating point registers that the kernel considers scratch: */ struct pt_fpreg f6; /* scratch */ struct pt_fpreg f7; /* scratch */ struct pt_fpreg f8; /* scratch */ struct pt_fpreg f9; /* scratch */ struct pt_fpreg f10; /* scratch */ struct pt_fpreg f11; /* scratch */ unsigned long r4; /* preserved */ unsigned long r5; /* preserved */ unsigned long r6; /* preserved */ unsigned long r7; /* preserved */ unsigned long eml_unat; /* used for emulating instruction */ unsigned long pad0; /* alignment pad */ }; typedef struct cpu_user_regs cpu_user_regs_t; union vac { unsigned long value; struct { int a_int:1; int a_from_int_cr:1; int a_to_int_cr:1; int a_from_psr:1; int a_from_cpuid:1; int a_cover:1; int a_bsw:1; long reserved:57; }; }; typedef union vac vac_t; union vdc { unsigned long value; struct { int d_vmsw:1; int d_extint:1; int d_ibr_dbr:1; int d_pmc:1; int d_to_pmd:1; int d_itm:1; long reserved:58; }; }; typedef union vdc vdc_t; struct mapped_regs { union vac vac; union vdc vdc; unsigned long virt_env_vaddr; unsigned long reserved1[29]; unsigned long vhpi; unsigned long reserved2[95]; union { unsigned long vgr[16]; unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active }; union { unsigned long vbgr[16]; unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active }; unsigned long vnat; unsigned long vbnat; unsigned long vcpuid[5]; unsigned long reserved3[11]; unsigned long vpsr; unsigned long vpr; unsigned long reserved4[76]; union { unsigned long vcr[128]; struct { unsigned long dcr; // CR0 unsigned long itm; unsigned long iva; unsigned long rsv1[5]; unsigned long pta; // CR8 unsigned long rsv2[7]; unsigned long ipsr; // CR16 unsigned long isr; unsigned long rsv3; unsigned long iip; unsigned long ifa; unsigned long itir; unsigned long iipa; unsigned long ifs; unsigned long iim; // CR24 unsigned long iha; unsigned long rsv4[38]; unsigned long lid; // CR64 unsigned long ivr; unsigned long tpr; unsigned long eoi; unsigned long irr[4]; unsigned long itv; // CR72 unsigned long pmv; unsigned long cmcv; unsigned long rsv5[5]; unsigned long lrr0; // CR80 unsigned long lrr1; unsigned long rsv6[46]; }; }; union { unsigned long reserved5[128]; struct { unsigned long precover_ifs; unsigned long unat; // not sure if this is needed until NaT arch is done int interrupt_collection_enabled; // virtual psr.ic /* virtual interrupt deliverable flag is evtchn_upcall_mask in * shared info area now. interrupt_mask_addr is the address * of evtchn_upcall_mask for current vcpu */ unsigned char *interrupt_mask_addr; int pending_interruption; unsigned char vpsr_pp; unsigned char vpsr_dfh; unsigned char hpsr_dfh; unsigned char hpsr_mfh; unsigned long reserved5_1[4]; int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual int banknum; // 0 or 1, which virtual register bank is active unsigned long rrs[8]; // region registers unsigned long krs[8]; // kernel registers unsigned long pkrs[8]; // protection key registers unsigned long tmp[8]; // temp registers (e.g. for hyperprivops) }; }; }; typedef struct mapped_regs mapped_regs_t; struct vpd { struct mapped_regs vpd_low; unsigned long reserved6[3456]; unsigned long vmm_avail[128]; unsigned long reserved7[4096]; }; typedef struct vpd vpd_t; struct arch_vcpu_info { }; typedef struct arch_vcpu_info arch_vcpu_info_t; struct arch_shared_info { /* PFN of the start_info page. */ unsigned long start_info_pfn; /* Interrupt vector for event channel. */ int evtchn_vector; uint64_t pad[32]; }; typedef struct arch_shared_info arch_shared_info_t; typedef unsigned long xen_callback_t; struct ia64_tr_entry { unsigned long pte; unsigned long itir; unsigned long vadr; unsigned long rid; }; struct vcpu_extra_regs { struct ia64_tr_entry itrs[8]; struct ia64_tr_entry dtrs[8]; unsigned long iva; unsigned long dcr; unsigned long event_callback_ip; }; struct vcpu_guest_context { #define VGCF_EXTRA_REGS (1<<1) /* Get/Set extra regs. */ unsigned long flags; /* VGCF_* flags */ struct cpu_user_regs user_regs; struct vcpu_extra_regs extra_regs; unsigned long privregs_pfn; }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); /* dom0 vp op */ #define __HYPERVISOR_ia64_dom0vp_op __HYPERVISOR_arch_0 /* Map io space in machine address to dom0 physical address space. Currently physical assigned address equals to machine address. */ #define IA64_DOM0VP_ioremap 0 /* Convert a pseudo physical page frame number to the corresponding machine page frame number. If no page is assigned, INVALID_MFN or GPFN_INV_MASK is returned depending on domain's non-vti/vti mode. */ #define IA64_DOM0VP_phystomach 1 /* Convert a machine page frame number to the corresponding pseudo physical page frame number of the caller domain. */ #define IA64_DOM0VP_machtophys 3 /* Reserved for future use. */ #define IA64_DOM0VP_iounmap 4 /* Unmap and free pages contained in the specified pseudo physical region. */ #define IA64_DOM0VP_zap_physmap 5 /* Assign machine page frame to dom0's pseudo physical address space. */ #define IA64_DOM0VP_add_physmap 6 /* expose the p2m table into domain */ #define IA64_DOM0VP_expose_p2m 7 /* xen perfmon */ #define IA64_DOM0VP_perfmon 8 /* gmfn version of IA64_DOM0VP_add_physmap */ #define IA64_DOM0VP_add_physmap_with_gmfn 9 // flags for page assignement to pseudo physical address space #define _ASSIGN_readonly 0 #define ASSIGN_readonly (1UL << _ASSIGN_readonly) #define ASSIGN_writable (0UL << _ASSIGN_readonly) // dummy flag /* Internal only: memory attribute must be WC/UC/UCE. */ #define _ASSIGN_nocache 1 #define ASSIGN_nocache (1UL << _ASSIGN_nocache) // tlb tracking #define _ASSIGN_tlb_track 2 #define ASSIGN_tlb_track (1UL << _ASSIGN_tlb_track) /* Internal only: associated with PGC_allocated bit */ #define _ASSIGN_pgc_allocated 3 #define ASSIGN_pgc_allocated (1UL << _ASSIGN_pgc_allocated) /* This structure has the same layout of struct ia64_boot_param, defined in . It is redefined here to ease use. */ struct xen_ia64_boot_param { unsigned long command_line; /* physical address of cmd line args */ unsigned long efi_systab; /* physical address of EFI system table */ unsigned long efi_memmap; /* physical address of EFI memory map */ unsigned long efi_memmap_size; /* size of EFI memory map */ unsigned long efi_memdesc_size; /* size of an EFI memory map descriptor */ unsigned int efi_memdesc_version; /* memory descriptor version */ struct { unsigned short num_cols; /* number of columns on console. */ unsigned short num_rows; /* number of rows on console. */ unsigned short orig_x; /* cursor's x position */ unsigned short orig_y; /* cursor's y position */ } console_info; unsigned long fpswa; /* physical address of the fpswa interface */ unsigned long initrd_start; unsigned long initrd_size; unsigned long domain_start; /* va where the boot time domain begins */ unsigned long domain_size; /* how big is the boot domain */ }; #endif /* !__ASSEMBLY__ */ /* Size of the shared_info area (this is not related to page size). */ #define XSI_SHIFT 14 #define XSI_SIZE (1 << XSI_SHIFT) /* Log size of mapped_regs area (64 KB - only 4KB is used). */ #define XMAPPEDREGS_SHIFT 12 #define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT) /* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */ #define XMAPPEDREGS_OFS XSI_SIZE /* Hyperprivops. */ #define HYPERPRIVOP_START 0x1 #define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0) #define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1) #define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2) #define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3) #define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4) #define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5) #define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6) #define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7) #define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8) #define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9) #define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa) #define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb) #define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc) #define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd) #define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe) #define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf) #define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10) #define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11) #define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12) #define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13) #define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14) #define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15) #define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16) #define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17) #define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18) #define HYPERPRIVOP_MAX (0x19) /* Fast and light hypercalls. */ #define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1 /* Xencomm macros. */ #define XENCOMM_INLINE_MASK 0xf800000000000000UL #define XENCOMM_INLINE_FLAG 0x8000000000000000UL #define XENCOMM_IS_INLINE(addr) \ (((unsigned long)(addr) & XENCOMM_INLINE_MASK) == XENCOMM_INLINE_FLAG) #define XENCOMM_INLINE_ADDR(addr) \ ((unsigned long)(addr) & ~XENCOMM_INLINE_MASK) /* xen perfmon */ #ifdef XEN #ifndef __ASSEMBLY__ #ifndef _ASM_IA64_PERFMON_H #include // asm/perfmon.h requires struct list_head #include // for PFM_xxx and pfarg_features_t, pfarg_context_t, pfarg_reg_t, pfarg_load_t #endif /* _ASM_IA64_PERFMON_H */ DEFINE_XEN_GUEST_HANDLE(pfarg_features_t); DEFINE_XEN_GUEST_HANDLE(pfarg_context_t); DEFINE_XEN_GUEST_HANDLE(pfarg_reg_t); DEFINE_XEN_GUEST_HANDLE(pfarg_load_t); #endif /* __ASSEMBLY__ */ #endif /* XEN */ #endif /* __HYPERVISOR_IF_IA64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/arch-powerpc.h000066400000000000000000000101201314037446600266070ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) IBM Corp. 2005, 2006 * * Authors: Hollis Blanchard */ #ifndef __XEN_PUBLIC_ARCH_PPC_64_H__ #define __XEN_PUBLIC_ARCH_PPC_64_H__ #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { \ int __pad[(sizeof (long long) - sizeof (void *)) / sizeof (int)]; \ type *p; \ } __attribute__((__aligned__(8))) __guest_handle_ ## name #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define set_xen_guest_handle(hnd, val) \ do { \ if (sizeof ((hnd).__pad)) \ (hnd).__pad[0] = 0; \ (hnd).p = val; \ } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); DEFINE_XEN_GUEST_HANDLE(char); DEFINE_XEN_GUEST_HANDLE(int); DEFINE_XEN_GUEST_HANDLE(long); DEFINE_XEN_GUEST_HANDLE(void); typedef unsigned long long xen_pfn_t; DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #define PRI_xen_pfn "llx" #endif /* * Pointers and other address fields inside interface structures are padded to * 64 bits. This means that field alignments aren't different between 32- and * 64-bit architectures. */ /* NB. Multi-level macro ensures __LINE__ is expanded before concatenation. */ #define __MEMORY_PADDING(_X) #define _MEMORY_PADDING(_X) __MEMORY_PADDING(_X) #define MEMORY_PADDING _MEMORY_PADDING(__LINE__) /* And the trap vector is... */ #define TRAP_INSTR "li 0,-1; sc" /* XXX just "sc"? */ #ifndef __ASSEMBLY__ #define XENCOMM_INLINE_FLAG (1UL << 63) typedef uint64_t xen_ulong_t; /* User-accessible registers: nost of these need to be saved/restored * for every nested Xen invocation. */ struct cpu_user_regs { uint64_t gprs[32]; uint64_t lr; uint64_t ctr; uint64_t srr0; uint64_t srr1; uint64_t pc; uint64_t msr; uint64_t fpscr; /* XXX Is this necessary */ uint64_t xer; uint64_t hid4; /* debug only */ uint64_t dar; /* debug only */ uint32_t dsisr; /* debug only */ uint32_t cr; uint32_t __pad; /* good spot for another 32bit reg */ uint32_t entry_vector; }; typedef struct cpu_user_regs cpu_user_regs_t; typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* XXX timebase */ /* ONLY used to communicate with dom0! See also struct exec_domain. */ struct vcpu_guest_context { cpu_user_regs_t user_regs; /* User-level CPU registers */ uint64_t sdr1; /* Pagetable base */ /* XXX etc */ }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); struct arch_shared_info { uint64_t boot_timebase; }; struct arch_vcpu_info { }; /* Support for multi-processor guests. */ #define MAX_VIRT_CPUS 32 #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/arch-x86/000077500000000000000000000000001314037446600254125ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/arch-x86/xen-x86_32.h000066400000000000000000000132651314037446600273130ustar00rootroot00000000000000/****************************************************************************** * xen-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2007, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ /* * Hypercall interface: * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5) * Output: %eax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; int $0x82 */ #define TRAP_INSTR "int $0x82" #endif /* * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ #define FLAT_KERNEL_CS FLAT_RING1_CS #define FLAT_KERNEL_DS FLAT_RING1_DS #define FLAT_KERNEL_SS FLAT_RING1_SS #define FLAT_USER_CS FLAT_RING3_CS #define FLAT_USER_DS FLAT_RING3_DS #define FLAT_USER_SS FLAT_RING3_SS /* * Virtual addresses beyond this are not modifiable by guest OSes. The * machine->physical mapping table starts at this address, read-only. */ #ifdef CONFIG_X86_PAE #define __HYPERVISOR_VIRT_START 0xF5800000 #define __MACH2PHYS_VIRT_START 0xF5800000 #define __MACH2PHYS_VIRT_END 0xF6800000 #else #define __HYPERVISOR_VIRT_START 0xFC000000 #define __MACH2PHYS_VIRT_START 0xFC000000 #define __MACH2PHYS_VIRT_END 0xFC400000 #endif #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) #endif /* 32-/64-bit invariability for control interfaces (domctl/sysctl). */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #undef __DEFINE_XEN_GUEST_HANDLE #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } \ __guest_handle_ ## name; \ typedef struct { union { type *p; uint64_aligned_t q; }; } \ __guest_handle_64_ ## name #undef set_xen_guest_handle #define set_xen_guest_handle(hnd, val) \ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \ (hnd).p = val; \ } while ( 0 ) #define uint64_aligned_t uint64_t __attribute__((aligned(8))) #define XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name #endif #ifndef __ASSEMBLY__ struct cpu_user_regs { uint32_t ebx; uint32_t ecx; uint32_t edx; uint32_t esi; uint32_t edi; uint32_t ebp; uint32_t eax; uint16_t error_code; /* private */ uint16_t entry_vector; /* private */ uint32_t eip; uint16_t cs; uint8_t saved_upcall_mask; uint8_t _pad0; uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ uint32_t esp; uint16_t ss, _pad1; uint16_t es, _pad2; uint16_t ds, _pad3; uint16_t fs, _pad4; uint16_t gs, _pad5; }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); /* * Page-directory addresses above 4GB do not fit into architectural %cr3. * When accessing %cr3, or equivalent field in vcpu_guest_context, guests * must use the following accessor macros to pack/unpack valid MFNs. */ #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) struct arch_vcpu_info { unsigned long cr2; unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; struct xen_callback { unsigned long cs; unsigned long eip; }; typedef struct xen_callback xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/arch-x86/xen-x86_64.h000066400000000000000000000156711314037446600273230ustar00rootroot00000000000000/****************************************************************************** * xen-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ /* * Hypercall interface: * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5) * Output: %rax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; syscall * Clobbered: %rcx, %r11, argument registers (as above) */ #define TRAP_INSTR "syscall" #endif /* * 64-bit segment selectors * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING3_CS32 0xe023 /* GDT index 260 */ #define FLAT_RING3_CS64 0xe033 /* GDT index 261 */ #define FLAT_RING3_DS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_DS64 0x0000 /* NULL selector */ #define FLAT_RING3_SS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_SS64 0xe02b /* GDT index 262 */ #define FLAT_KERNEL_DS64 FLAT_RING3_DS64 #define FLAT_KERNEL_DS32 FLAT_RING3_DS32 #define FLAT_KERNEL_DS FLAT_KERNEL_DS64 #define FLAT_KERNEL_CS64 FLAT_RING3_CS64 #define FLAT_KERNEL_CS32 FLAT_RING3_CS32 #define FLAT_KERNEL_CS FLAT_KERNEL_CS64 #define FLAT_KERNEL_SS64 FLAT_RING3_SS64 #define FLAT_KERNEL_SS32 FLAT_RING3_SS32 #define FLAT_KERNEL_SS FLAT_KERNEL_SS64 #define FLAT_USER_DS64 FLAT_RING3_DS64 #define FLAT_USER_DS32 FLAT_RING3_DS32 #define FLAT_USER_DS FLAT_USER_DS64 #define FLAT_USER_CS64 FLAT_RING3_CS64 #define FLAT_USER_CS32 FLAT_RING3_CS32 #define FLAT_USER_CS FLAT_USER_CS64 #define FLAT_USER_SS64 FLAT_RING3_SS64 #define FLAT_USER_SS32 FLAT_RING3_SS32 #define FLAT_USER_SS FLAT_USER_SS64 #define __HYPERVISOR_VIRT_START 0xFFFF800000000000 #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) #endif #ifndef __ASSEMBLY__ /* * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) * @which == SEGBASE_* ; @base == 64-bit base address * Returns 0 on success. */ #define SEGBASE_FS 0 #define SEGBASE_GS_USER 1 #define SEGBASE_GS_KERNEL 2 #define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */ /* * int HYPERVISOR_iret(void) * All arguments are on the kernel stack, in the following format. * Never returns if successful. Current kernel context is lost. * The saved CS is mapped as follows: * RING0 -> RING3 kernel mode. * RING1 -> RING3 kernel mode. * RING2 -> RING3 kernel mode. * RING3 -> RING3 user mode. * However RING0 indicates that the guest kernel should return to iteself * directly with * orb $3,1*8(%rsp) * iretq * If flags contains VGCF_in_syscall: * Restore RAX, RIP, RFLAGS, RSP. * Discard R11, RCX, CS, SS. * Otherwise: * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP. * All other registers are saved on hypercall entry and restored to user. */ /* Guest exited in SYSCALL context? Return to guest with SYSRET? */ #define _VGCF_in_syscall 8 #define VGCF_in_syscall (1<<_VGCF_in_syscall) #define VGCF_IN_SYSCALL VGCF_in_syscall struct iret_context { /* Top of stack (%rsp at point of hypercall). */ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; /* Bottom of iret stack frame. */ }; #ifdef __GNUC__ /* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ #define __DECL_REG(name) union { \ uint64_t r ## name, e ## name; \ uint32_t _e ## name; \ } #else /* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ #define __DECL_REG(name) uint64_t r ## name #endif struct cpu_user_regs { uint64_t r15; uint64_t r14; uint64_t r13; uint64_t r12; __DECL_REG(bp); __DECL_REG(bx); uint64_t r11; uint64_t r10; uint64_t r9; uint64_t r8; __DECL_REG(ax); __DECL_REG(cx); __DECL_REG(dx); __DECL_REG(si); __DECL_REG(di); uint32_t error_code; /* private */ uint32_t entry_vector; /* private */ __DECL_REG(ip); uint16_t cs, _pad0[1]; uint8_t saved_upcall_mask; uint8_t _pad1[3]; __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */ __DECL_REG(sp); uint16_t ss, _pad2[3]; uint16_t es, _pad3[3]; uint16_t ds, _pad4[3]; uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */ }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); #undef __DECL_REG #define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) #define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) struct arch_vcpu_info { unsigned long cr2; unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; typedef unsigned long xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/arch-x86/xen.h000066400000000000000000000171401314037446600263600ustar00rootroot00000000000000/****************************************************************************** * arch-x86/xen.h * * Guest OS interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_H__ /* Structural guest handles introduced in 0x00030201. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030201 #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name #else #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef type * __guest_handle_ ## name #endif #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif #if defined(__i386__) #include "xen-x86_32.h" #elif defined(__x86_64__) #include "xen-x86_64.h" #endif #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); DEFINE_XEN_GUEST_HANDLE(char); DEFINE_XEN_GUEST_HANDLE(int); DEFINE_XEN_GUEST_HANDLE(long); DEFINE_XEN_GUEST_HANDLE(void); typedef unsigned long xen_pfn_t; DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #define PRI_xen_pfn "lx" #endif /* * SEGMENT DESCRIPTOR TABLES */ /* * A number of GDT entries are reserved by Xen. These are not situated at the * start of the GDT because some stupid OSes export hard-coded selector values * in their ABI. These hard-coded values are always near the start of the GDT, * so Xen places itself out of the way, at the far end of the GDT. */ #define FIRST_RESERVED_GDT_PAGE 14 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) /* Maximum number of virtual CPUs in multi-processor guests. */ #define MAX_VIRT_CPUS 32 #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; /* * Send an array of these to HYPERVISOR_set_trap_table(). * The privilege level specifies which modes may enter a trap via a software * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate * privilege levels as follows: * Level == 0: Noone may enter * Level == 1: Kernel may enter * Level == 2: Kernel may enter * Level == 3: Everyone may enter */ #define TI_GET_DPL(_ti) ((_ti)->flags & 3) #define TI_GET_IF(_ti) ((_ti)->flags & 4) #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) struct trap_info { uint8_t vector; /* exception vector */ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ uint16_t cs; /* code selector */ unsigned long address; /* code offset */ }; typedef struct trap_info trap_info_t; DEFINE_XEN_GUEST_HANDLE(trap_info_t); typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* * The following is all CPU context. Note that the fpu_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ struct vcpu_guest_context { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ #define VGCF_I387_VALID (1<<0) #define VGCF_IN_KERNEL (1<<2) #define _VGCF_i387_valid 0 #define VGCF_i387_valid (1<<_VGCF_i387_valid) #define _VGCF_in_kernel 2 #define VGCF_in_kernel (1<<_VGCF_in_kernel) #define _VGCF_failsafe_disables_events 3 #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) #define _VGCF_syscall_disables_events 4 #define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) #define _VGCF_online 5 #define VGCF_online (1<<_VGCF_online) unsigned long flags; /* VGCF_* flags */ struct cpu_user_regs user_regs; /* User-level CPU registers */ struct trap_info trap_ctxt[256]; /* Virtual IDT */ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ #ifdef __i386__ unsigned long event_callback_cs; /* CS:EIP of event callback */ unsigned long event_callback_eip; unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ unsigned long failsafe_callback_eip; #else unsigned long event_callback_eip; unsigned long failsafe_callback_eip; #ifdef __XEN__ union { unsigned long syscall_callback_eip; struct { unsigned int event_callback_cs; /* compat CS of event cb */ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */ }; }; #else unsigned long syscall_callback_eip; #endif #endif unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ #ifdef __x86_64__ /* Segment base addresses. */ uint64_t fs_base; uint64_t gs_base_kernel; uint64_t gs_base_user; #endif }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); struct arch_shared_info { unsigned long max_pfn; /* max pfn that appears in table */ /* Frame containing list of mfns containing list of mfns containing p2m. */ xen_pfn_t pfn_to_mfn_frame_list_list; unsigned long nmi_reason; uint64_t pad[32]; }; typedef struct arch_shared_info arch_shared_info_t; #endif /* !__ASSEMBLY__ */ /* * Prefix forces emulation of some non-trapping instructions. * Currently only CPUID. */ #ifdef __ASSEMBLY__ #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; #define XEN_CPUID XEN_EMULATE_PREFIX cpuid #else #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" #endif #endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/arch-x86_32.h000066400000000000000000000024131314037446600260670ustar00rootroot00000000000000/****************************************************************************** * arch-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/arch-x86_64.h000066400000000000000000000024131314037446600260740ustar00rootroot00000000000000/****************************************************************************** * arch-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/callback.h000066400000000000000000000056561314037446600257730ustar00rootroot00000000000000/****************************************************************************** * callback.h * * Register guest OS callbacks with Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell */ #ifndef __XEN_PUBLIC_CALLBACK_H__ #define __XEN_PUBLIC_CALLBACK_H__ #include "xen.h" /* * Prototype for this hypercall is: * long callback_op(int cmd, void *extra_args) * @cmd == CALLBACKOP_??? (callback operation). * @extra_args == Operation-specific extra arguments (NULL if none). */ #define CALLBACKTYPE_event 0 #define CALLBACKTYPE_failsafe 1 #define CALLBACKTYPE_syscall 2 /* x86_64 only */ /* * sysenter is only available on x86_32 with the * supervisor_mode_kernel option enabled. */ #define CALLBACKTYPE_sysenter 3 #define CALLBACKTYPE_nmi 4 /* * Disable event deliver during callback? This flag is ignored for event and * NMI callbacks: event delivery is unconditionally disabled. */ #define _CALLBACKF_mask_events 0 #define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events) /* * Register a callback. */ #define CALLBACKOP_register 0 struct callback_register { uint16_t type; uint16_t flags; xen_callback_t address; }; typedef struct callback_register callback_register_t; DEFINE_XEN_GUEST_HANDLE(callback_register_t); /* * Unregister a callback. * * Not all callbacks can be unregistered. -EINVAL will be returned if * you attempt to unregister such a callback. */ #define CALLBACKOP_unregister 1 struct callback_unregister { uint16_t type; uint16_t _unused; }; typedef struct callback_unregister callback_unregister_t; DEFINE_XEN_GUEST_HANDLE(callback_unregister_t); #endif /* __XEN_PUBLIC_CALLBACK_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/dom0_ops.h000066400000000000000000000077061314037446600257550ustar00rootroot00000000000000/****************************************************************************** * dom0_ops.h * * Process command requests from domain-0 guest OS. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOM0_OPS_H__ #define __XEN_PUBLIC_DOM0_OPS_H__ #include "xen.h" #include "platform.h" #if __XEN_INTERFACE_VERSION__ >= 0x00030204 #error "dom0_ops.h is a compatibility interface only" #endif #define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION #define DOM0_SETTIME XENPF_settime #define dom0_settime xenpf_settime #define dom0_settime_t xenpf_settime_t #define DOM0_ADD_MEMTYPE XENPF_add_memtype #define dom0_add_memtype xenpf_add_memtype #define dom0_add_memtype_t xenpf_add_memtype_t #define DOM0_DEL_MEMTYPE XENPF_del_memtype #define dom0_del_memtype xenpf_del_memtype #define dom0_del_memtype_t xenpf_del_memtype_t #define DOM0_READ_MEMTYPE XENPF_read_memtype #define dom0_read_memtype xenpf_read_memtype #define dom0_read_memtype_t xenpf_read_memtype_t #define DOM0_MICROCODE XENPF_microcode_update #define dom0_microcode xenpf_microcode_update #define dom0_microcode_t xenpf_microcode_update_t #define DOM0_PLATFORM_QUIRK XENPF_platform_quirk #define dom0_platform_quirk xenpf_platform_quirk #define dom0_platform_quirk_t xenpf_platform_quirk_t typedef uint64_t cpumap_t; /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_MSR 15 struct dom0_msr { /* IN variables. */ uint32_t write; cpumap_t cpu_mask; uint32_t msr; uint32_t in1; uint32_t in2; /* OUT variables. */ uint32_t out1; uint32_t out2; }; typedef struct dom0_msr dom0_msr_t; DEFINE_XEN_GUEST_HANDLE(dom0_msr_t); /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_PHYSICAL_MEMORY_MAP 40 struct dom0_memory_map_entry { uint64_t start, end; uint32_t flags; /* reserved */ uint8_t is_ram; }; typedef struct dom0_memory_map_entry dom0_memory_map_entry_t; DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t); struct dom0_op { uint32_t cmd; uint32_t interface_version; /* DOM0_INTERFACE_VERSION */ union { struct dom0_msr msr; struct dom0_settime settime; struct dom0_add_memtype add_memtype; struct dom0_del_memtype del_memtype; struct dom0_read_memtype read_memtype; struct dom0_microcode microcode; struct dom0_platform_quirk platform_quirk; struct dom0_memory_map_entry physical_memory_map; uint8_t pad[128]; } u; }; typedef struct dom0_op dom0_op_t; DEFINE_XEN_GUEST_HANDLE(dom0_op_t); #endif /* __XEN_PUBLIC_DOM0_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/domctl.h000066400000000000000000000414321314037446600255110ustar00rootroot00000000000000/****************************************************************************** * domctl.h * * Domain management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOMCTL_H__ #define __XEN_PUBLIC_DOMCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "domctl operations are intended for use by node control tools only" #endif #include "xen.h" #define XEN_DOMCTL_INTERFACE_VERSION 0x00000005 struct xenctl_cpumap { XEN_GUEST_HANDLE_64(uint8_t) bitmap; uint32_t nr_cpus; }; /* * NB. xen_domctl.domain is an IN/OUT parameter for this operation. * If it is specified as zero, an id is auto-allocated and returned. */ #define XEN_DOMCTL_createdomain 1 struct xen_domctl_createdomain { /* IN parameters */ uint32_t ssidref; xen_domain_handle_t handle; /* Is this an HVM guest (as opposed to a PV guest)? */ #define _XEN_DOMCTL_CDF_hvm_guest 0 #define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest) uint32_t flags; }; typedef struct xen_domctl_createdomain xen_domctl_createdomain_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t); #define XEN_DOMCTL_destroydomain 2 #define XEN_DOMCTL_pausedomain 3 #define XEN_DOMCTL_unpausedomain 4 #define XEN_DOMCTL_resumedomain 27 #define XEN_DOMCTL_getdomaininfo 5 struct xen_domctl_getdomaininfo { /* OUT variables. */ domid_t domain; /* Also echoed in domctl.domain */ /* Domain is scheduled to die. */ #define _XEN_DOMINF_dying 0 #define XEN_DOMINF_dying (1U<<_XEN_DOMINF_dying) /* Domain is an HVM guest (as opposed to a PV guest). */ #define _XEN_DOMINF_hvm_guest 1 #define XEN_DOMINF_hvm_guest (1U<<_XEN_DOMINF_hvm_guest) /* The guest OS has shut down. */ #define _XEN_DOMINF_shutdown 2 #define XEN_DOMINF_shutdown (1U<<_XEN_DOMINF_shutdown) /* Currently paused by control software. */ #define _XEN_DOMINF_paused 3 #define XEN_DOMINF_paused (1U<<_XEN_DOMINF_paused) /* Currently blocked pending an event. */ #define _XEN_DOMINF_blocked 4 #define XEN_DOMINF_blocked (1U<<_XEN_DOMINF_blocked) /* Domain is currently running. */ #define _XEN_DOMINF_running 5 #define XEN_DOMINF_running (1U<<_XEN_DOMINF_running) /* CPU to which this domain is bound. */ #define XEN_DOMINF_cpumask 255 #define XEN_DOMINF_cpushift 8 /* XEN_DOMINF_shutdown guest-supplied code. */ #define XEN_DOMINF_shutdownmask 255 #define XEN_DOMINF_shutdownshift 16 uint32_t flags; /* XEN_DOMINF_* */ uint64_aligned_t tot_pages; uint64_aligned_t max_pages; uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */ uint64_aligned_t cpu_time; uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */ uint32_t ssidref; xen_domain_handle_t handle; }; typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t); #define XEN_DOMCTL_getmemlist 6 struct xen_domctl_getmemlist { /* IN variables. */ /* Max entries to write to output buffer. */ uint64_aligned_t max_pfns; /* Start index in guest's page list. */ uint64_aligned_t start_pfn; XEN_GUEST_HANDLE_64(uint64_t) buffer; /* OUT variables. */ uint64_aligned_t num_pfns; }; typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t); #define XEN_DOMCTL_getpageframeinfo 7 #define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28 #define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28) #define XEN_DOMCTL_PFINFO_L1TAB (0x1U<<28) #define XEN_DOMCTL_PFINFO_L2TAB (0x2U<<28) #define XEN_DOMCTL_PFINFO_L3TAB (0x3U<<28) #define XEN_DOMCTL_PFINFO_L4TAB (0x4U<<28) #define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28) #define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31) #define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */ #define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28) struct xen_domctl_getpageframeinfo { /* IN variables. */ uint64_aligned_t gmfn; /* GMFN to query */ /* OUT variables. */ /* Is the page PINNED to a type? */ uint32_t type; /* see above type defs */ }; typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t); #define XEN_DOMCTL_getpageframeinfo2 8 struct xen_domctl_getpageframeinfo2 { /* IN variables. */ uint64_aligned_t num; /* IN/OUT variables. */ XEN_GUEST_HANDLE_64(uint32_t) array; }; typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t); /* * Control shadow pagetables operation */ #define XEN_DOMCTL_shadow_op 10 /* Disable shadow mode. */ #define XEN_DOMCTL_SHADOW_OP_OFF 0 /* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */ #define XEN_DOMCTL_SHADOW_OP_ENABLE 32 /* Log-dirty bitmap operations. */ /* Return the bitmap and clean internal copy for next round. */ #define XEN_DOMCTL_SHADOW_OP_CLEAN 11 /* Return the bitmap but do not modify internal copy. */ #define XEN_DOMCTL_SHADOW_OP_PEEK 12 /* Memory allocation accessors. */ #define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30 #define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31 /* Legacy enable operations. */ /* Equiv. to ENABLE with no mode flags. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST 1 /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY 2 /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE 3 /* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */ /* * Shadow pagetables are refcounted: guest does not use explicit mmu * operations nor write-protect its pagetables. */ #define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT (1 << 1) /* * Log pages in a bitmap as they are dirtied. * Used for live relocation to determine which pages must be re-sent. */ #define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2) /* * Automatically translate GPFNs into MFNs. */ #define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3) /* * Xen does not steal virtual address space from the guest. * Requires HVM support. */ #define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4) struct xen_domctl_shadow_op_stats { uint32_t fault_count; uint32_t dirty_count; }; typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t); struct xen_domctl_shadow_op { /* IN variables. */ uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */ /* OP_ENABLE */ uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */ /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */ uint32_t mb; /* Shadow memory allocation in MB */ /* OP_PEEK / OP_CLEAN */ XEN_GUEST_HANDLE_64(uint8_t) dirty_bitmap; uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */ struct xen_domctl_shadow_op_stats stats; }; typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t); #define XEN_DOMCTL_max_mem 11 struct xen_domctl_max_mem { /* IN variables. */ uint64_aligned_t max_memkb; }; typedef struct xen_domctl_max_mem xen_domctl_max_mem_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t); #define XEN_DOMCTL_setvcpucontext 12 #define XEN_DOMCTL_getvcpucontext 13 struct xen_domctl_vcpucontext { uint32_t vcpu; /* IN */ XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */ }; typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t); #define XEN_DOMCTL_getvcpuinfo 14 struct xen_domctl_getvcpuinfo { /* IN variables. */ uint32_t vcpu; /* OUT variables. */ uint8_t online; /* currently online (not hotplugged)? */ uint8_t blocked; /* blocked waiting for an event? */ uint8_t running; /* currently scheduled on its CPU? */ uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */ uint32_t cpu; /* current mapping */ }; typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t); /* Get/set which physical cpus a vcpu can execute on. */ #define XEN_DOMCTL_setvcpuaffinity 9 #define XEN_DOMCTL_getvcpuaffinity 25 struct xen_domctl_vcpuaffinity { uint32_t vcpu; /* IN */ struct xenctl_cpumap cpumap; /* IN/OUT */ }; typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t); #define XEN_DOMCTL_max_vcpus 15 struct xen_domctl_max_vcpus { uint32_t max; /* maximum number of vcpus */ }; typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t); #define XEN_DOMCTL_scheduler_op 16 /* Scheduler types. */ #define XEN_SCHEDULER_SEDF 4 #define XEN_SCHEDULER_CREDIT 5 /* Set or get info? */ #define XEN_DOMCTL_SCHEDOP_putinfo 0 #define XEN_DOMCTL_SCHEDOP_getinfo 1 struct xen_domctl_scheduler_op { uint32_t sched_id; /* XEN_SCHEDULER_* */ uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */ union { struct xen_domctl_sched_sedf { uint64_aligned_t period; uint64_aligned_t slice; uint64_aligned_t latency; uint32_t extratime; uint32_t weight; } sedf; struct xen_domctl_sched_credit { uint16_t weight; uint16_t cap; } credit; } u; }; typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t); #define XEN_DOMCTL_setdomainhandle 17 struct xen_domctl_setdomainhandle { xen_domain_handle_t handle; }; typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t); #define XEN_DOMCTL_setdebugging 18 struct xen_domctl_setdebugging { uint8_t enable; }; typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t); #define XEN_DOMCTL_irq_permission 19 struct xen_domctl_irq_permission { uint8_t pirq; uint8_t allow_access; /* flag to specify enable/disable of IRQ access */ }; typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t); #define XEN_DOMCTL_iomem_permission 20 struct xen_domctl_iomem_permission { uint64_aligned_t first_mfn;/* first page (physical page number) in range */ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */ }; typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t); #define XEN_DOMCTL_ioport_permission 21 struct xen_domctl_ioport_permission { uint32_t first_port; /* first port int range */ uint32_t nr_ports; /* size of port range */ uint8_t allow_access; /* allow or deny access to range? */ }; typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t); #define XEN_DOMCTL_hypercall_init 22 struct xen_domctl_hypercall_init { uint64_aligned_t gmfn; /* GMFN to be initialised */ }; typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t); #define XEN_DOMCTL_arch_setup 23 #define _XEN_DOMAINSETUP_hvm_guest 0 #define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest) #define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */ #define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query) typedef struct xen_domctl_arch_setup { uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */ #ifdef __ia64__ uint64_aligned_t bp; /* mpaddr of boot param area */ uint64_aligned_t maxmem; /* Highest memory address for MDT. */ uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */ #endif } xen_domctl_arch_setup_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t); #define XEN_DOMCTL_settimeoffset 24 struct xen_domctl_settimeoffset { int32_t time_offset_seconds; /* applied to domain wallclock time */ }; typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t); #define XEN_DOMCTL_gethvmcontext 33 #define XEN_DOMCTL_sethvmcontext 34 typedef struct xen_domctl_hvmcontext { uint32_t size; /* IN/OUT: size of buffer / bytes filled */ XEN_GUEST_HANDLE_64(uint8_t) buffer; /* IN/OUT: data, or call * gethvmcontext with NULL * buffer to get size * req'd */ } xen_domctl_hvmcontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t); #define XEN_DOMCTL_set_address_size 35 #define XEN_DOMCTL_get_address_size 36 typedef struct xen_domctl_address_size { uint32_t size; } xen_domctl_address_size_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t); #define XEN_DOMCTL_real_mode_area 26 struct xen_domctl_real_mode_area { uint32_t log; /* log2 of Real Mode Area size */ }; typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t); #define XEN_DOMCTL_sendtrigger 28 #define XEN_DOMCTL_SENDTRIGGER_NMI 0 #define XEN_DOMCTL_SENDTRIGGER_RESET 1 #define XEN_DOMCTL_SENDTRIGGER_INIT 2 struct xen_domctl_sendtrigger { uint32_t trigger; /* IN */ uint32_t vcpu; /* IN */ }; typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t); struct xen_domctl { uint32_t cmd; uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */ domid_t domain; union { struct xen_domctl_createdomain createdomain; struct xen_domctl_getdomaininfo getdomaininfo; struct xen_domctl_getmemlist getmemlist; struct xen_domctl_getpageframeinfo getpageframeinfo; struct xen_domctl_getpageframeinfo2 getpageframeinfo2; struct xen_domctl_vcpuaffinity vcpuaffinity; struct xen_domctl_shadow_op shadow_op; struct xen_domctl_max_mem max_mem; struct xen_domctl_vcpucontext vcpucontext; struct xen_domctl_getvcpuinfo getvcpuinfo; struct xen_domctl_max_vcpus max_vcpus; struct xen_domctl_scheduler_op scheduler_op; struct xen_domctl_setdomainhandle setdomainhandle; struct xen_domctl_setdebugging setdebugging; struct xen_domctl_irq_permission irq_permission; struct xen_domctl_iomem_permission iomem_permission; struct xen_domctl_ioport_permission ioport_permission; struct xen_domctl_hypercall_init hypercall_init; struct xen_domctl_arch_setup arch_setup; struct xen_domctl_settimeoffset settimeoffset; struct xen_domctl_real_mode_area real_mode_area; struct xen_domctl_hvmcontext hvmcontext; struct xen_domctl_address_size address_size; struct xen_domctl_sendtrigger sendtrigger; uint8_t pad[128]; } u; }; typedef struct xen_domctl xen_domctl_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_t); #endif /* __XEN_PUBLIC_DOMCTL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/elfnote.h000066400000000000000000000155261314037446600256700ustar00rootroot00000000000000/****************************************************************************** * elfnote.h * * Definitions used for the Xen ELF notes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell, XenSource Ltd. */ #ifndef __XEN_PUBLIC_ELFNOTE_H__ #define __XEN_PUBLIC_ELFNOTE_H__ /* * The notes should live in a PT_NOTE segment and have "Xen" in the * name field. * * Numeric types are either 4 or 8 bytes depending on the content of * the desc field. * * LEGACY indicated the fields in the legacy __xen_guest string which * this a note type replaces. */ /* * NAME=VALUE pair (string). */ #define XEN_ELFNOTE_INFO 0 /* * The virtual address of the entry point (numeric). * * LEGACY: VIRT_ENTRY */ #define XEN_ELFNOTE_ENTRY 1 /* The virtual address of the hypercall transfer page (numeric). * * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page * number not a virtual address) */ #define XEN_ELFNOTE_HYPERCALL_PAGE 2 /* The virtual address where the kernel image should be mapped (numeric). * * Defaults to 0. * * LEGACY: VIRT_BASE */ #define XEN_ELFNOTE_VIRT_BASE 3 /* * The offset of the ELF paddr field from the acutal required * psuedo-physical address (numeric). * * This is used to maintain backwards compatibility with older kernels * which wrote __PAGE_OFFSET into that field. This field defaults to 0 * if not present. * * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE) */ #define XEN_ELFNOTE_PADDR_OFFSET 4 /* * The version of Xen that we work with (string). * * LEGACY: XEN_VER */ #define XEN_ELFNOTE_XEN_VERSION 5 /* * The name of the guest operating system (string). * * LEGACY: GUEST_OS */ #define XEN_ELFNOTE_GUEST_OS 6 /* * The version of the guest operating system (string). * * LEGACY: GUEST_VER */ #define XEN_ELFNOTE_GUEST_VERSION 7 /* * The loader type (string). * * LEGACY: LOADER */ #define XEN_ELFNOTE_LOADER 8 /* * The kernel supports PAE (x86/32 only, string = "yes", "no" or * "bimodal"). * * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting * may be given as "yes,bimodal" which will cause older Xen to treat * this kernel as PAE. * * LEGACY: PAE (n.b. The legacy interface included a provision to * indicate 'extended-cr3' support allowing L3 page tables to be * placed above 4G. It is assumed that any kernel new enough to use * these ELF notes will include this and therefore "yes" here is * equivalent to "yes[entended-cr3]" in the __xen_guest interface. */ #define XEN_ELFNOTE_PAE_MODE 9 /* * The features supported/required by this kernel (string). * * The string must consist of a list of feature names (as given in * features.h, without the "XENFEAT_" prefix) separated by '|' * characters. If a feature is required for the kernel to function * then the feature name must be preceded by a '!' character. * * LEGACY: FEATURES */ #define XEN_ELFNOTE_FEATURES 10 /* * The kernel requires the symbol table to be loaded (string = "yes" or "no") * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence * of this string as a boolean flag rather than requiring "yes" or * "no". */ #define XEN_ELFNOTE_BSD_SYMTAB 11 /* * The lowest address the hypervisor hole can begin at (numeric). * * This must not be set higher than HYPERVISOR_VIRT_START. Its presence * also indicates to the hypervisor that the kernel can deal with the * hole starting at a higher address. */ #define XEN_ELFNOTE_HV_START_LOW 12 /* * List of maddr_t-sized mask/value pairs describing how to recognize * (non-present) L1 page table entries carrying valid MFNs (numeric). */ #define XEN_ELFNOTE_L1_MFN_VALID 13 /* * Whether or not the guest supports cooperative suspend cancellation. */ #define XEN_ELFNOTE_SUSPEND_CANCEL 14 /* * The number of the highest elfnote defined. */ #define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUSPEND_CANCEL /* * System information exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO * note in case of a system crash. This note will contain various * information about the system, see xen/include/xen/elfcore.h. */ #define XEN_ELFNOTE_CRASH_INFO 0x1000001 /* * System registers exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS * note per cpu in case of a system crash. This note is architecture * specific and will contain registers not saved in the "CORE" note. * See xen/include/xen/elfcore.h for more information. */ #define XEN_ELFNOTE_CRASH_REGS 0x1000002 /* * xen dump-core none note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE * in its dump file to indicate that the file is xen dump-core * file. This note doesn't have any other information. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000 /* * xen dump-core header note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER * in its dump file. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001 /* * xen dump-core xen version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION * in its dump file. It contains the xen version obtained via the * XENVER hypercall. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002 /* * xen dump-core format version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION * in its dump file. It contains a format version identifier. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003 #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/elfstructs.h000066400000000000000000000463261314037446600264340ustar00rootroot00000000000000#ifndef __XEN_PUBLIC_ELFSTRUCTS_H__ #define __XEN_PUBLIC_ELFSTRUCTS_H__ 1 /* * Copyright (c) 1995, 1996 Erik Theisen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ typedef uint8_t Elf_Byte; typedef uint32_t Elf32_Addr; /* Unsigned program address */ typedef uint32_t Elf32_Off; /* Unsigned file offset */ typedef int32_t Elf32_Sword; /* Signed large integer */ typedef uint32_t Elf32_Word; /* Unsigned large integer */ typedef uint16_t Elf32_Half; /* Unsigned medium integer */ typedef uint64_t Elf64_Addr; typedef uint64_t Elf64_Off; typedef int32_t Elf64_Shalf; typedef int32_t Elf64_Sword; typedef uint32_t Elf64_Word; typedef int64_t Elf64_Sxword; typedef uint64_t Elf64_Xword; typedef uint32_t Elf64_Half; typedef uint16_t Elf64_Quarter; /* * e_ident[] identification indexes * See http://www.caldera.com/developers/gabi/2000-07-17/ch4.eheader.html */ #define EI_MAG0 0 /* file ID */ #define EI_MAG1 1 /* file ID */ #define EI_MAG2 2 /* file ID */ #define EI_MAG3 3 /* file ID */ #define EI_CLASS 4 /* file class */ #define EI_DATA 5 /* data encoding */ #define EI_VERSION 6 /* ELF header version */ #define EI_OSABI 7 /* OS/ABI ID */ #define EI_ABIVERSION 8 /* ABI version */ #define EI_PAD 9 /* start of pad bytes */ #define EI_NIDENT 16 /* Size of e_ident[] */ /* e_ident[] magic number */ #define ELFMAG0 0x7f /* e_ident[EI_MAG0] */ #define ELFMAG1 'E' /* e_ident[EI_MAG1] */ #define ELFMAG2 'L' /* e_ident[EI_MAG2] */ #define ELFMAG3 'F' /* e_ident[EI_MAG3] */ #define ELFMAG "\177ELF" /* magic */ #define SELFMAG 4 /* size of magic */ /* e_ident[] file class */ #define ELFCLASSNONE 0 /* invalid */ #define ELFCLASS32 1 /* 32-bit objs */ #define ELFCLASS64 2 /* 64-bit objs */ #define ELFCLASSNUM 3 /* number of classes */ /* e_ident[] data encoding */ #define ELFDATANONE 0 /* invalid */ #define ELFDATA2LSB 1 /* Little-Endian */ #define ELFDATA2MSB 2 /* Big-Endian */ #define ELFDATANUM 3 /* number of data encode defines */ /* e_ident[] Operating System/ABI */ #define ELFOSABI_SYSV 0 /* UNIX System V ABI */ #define ELFOSABI_HPUX 1 /* HP-UX operating system */ #define ELFOSABI_NETBSD 2 /* NetBSD */ #define ELFOSABI_LINUX 3 /* GNU/Linux */ #define ELFOSABI_HURD 4 /* GNU/Hurd */ #define ELFOSABI_86OPEN 5 /* 86Open common IA32 ABI */ #define ELFOSABI_SOLARIS 6 /* Solaris */ #define ELFOSABI_MONTEREY 7 /* Monterey */ #define ELFOSABI_IRIX 8 /* IRIX */ #define ELFOSABI_FREEBSD 9 /* FreeBSD */ #define ELFOSABI_TRU64 10 /* TRU64 UNIX */ #define ELFOSABI_MODESTO 11 /* Novell Modesto */ #define ELFOSABI_OPENBSD 12 /* OpenBSD */ #define ELFOSABI_ARM 97 /* ARM */ #define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */ /* e_ident */ #define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \ (ehdr).e_ident[EI_MAG1] == ELFMAG1 && \ (ehdr).e_ident[EI_MAG2] == ELFMAG2 && \ (ehdr).e_ident[EI_MAG3] == ELFMAG3) /* ELF Header */ typedef struct elfhdr { unsigned char e_ident[EI_NIDENT]; /* ELF Identification */ Elf32_Half e_type; /* object file type */ Elf32_Half e_machine; /* machine */ Elf32_Word e_version; /* object file version */ Elf32_Addr e_entry; /* virtual entry point */ Elf32_Off e_phoff; /* program header table offset */ Elf32_Off e_shoff; /* section header table offset */ Elf32_Word e_flags; /* processor-specific flags */ Elf32_Half e_ehsize; /* ELF header size */ Elf32_Half e_phentsize; /* program header entry size */ Elf32_Half e_phnum; /* number of program header entries */ Elf32_Half e_shentsize; /* section header entry size */ Elf32_Half e_shnum; /* number of section header entries */ Elf32_Half e_shstrndx; /* section header table's "section header string table" entry offset */ } Elf32_Ehdr; typedef struct { unsigned char e_ident[EI_NIDENT]; /* Id bytes */ Elf64_Quarter e_type; /* file type */ Elf64_Quarter e_machine; /* machine type */ Elf64_Half e_version; /* version number */ Elf64_Addr e_entry; /* entry point */ Elf64_Off e_phoff; /* Program hdr offset */ Elf64_Off e_shoff; /* Section hdr offset */ Elf64_Half e_flags; /* Processor flags */ Elf64_Quarter e_ehsize; /* sizeof ehdr */ Elf64_Quarter e_phentsize; /* Program header entry size */ Elf64_Quarter e_phnum; /* Number of program headers */ Elf64_Quarter e_shentsize; /* Section header entry size */ Elf64_Quarter e_shnum; /* Number of section headers */ Elf64_Quarter e_shstrndx; /* String table index */ } Elf64_Ehdr; /* e_type */ #define ET_NONE 0 /* No file type */ #define ET_REL 1 /* relocatable file */ #define ET_EXEC 2 /* executable file */ #define ET_DYN 3 /* shared object file */ #define ET_CORE 4 /* core file */ #define ET_NUM 5 /* number of types */ #define ET_LOPROC 0xff00 /* reserved range for processor */ #define ET_HIPROC 0xffff /* specific e_type */ /* e_machine */ #define EM_NONE 0 /* No Machine */ #define EM_M32 1 /* AT&T WE 32100 */ #define EM_SPARC 2 /* SPARC */ #define EM_386 3 /* Intel 80386 */ #define EM_68K 4 /* Motorola 68000 */ #define EM_88K 5 /* Motorola 88000 */ #define EM_486 6 /* Intel 80486 - unused? */ #define EM_860 7 /* Intel 80860 */ #define EM_MIPS 8 /* MIPS R3000 Big-Endian only */ /* * Don't know if EM_MIPS_RS4_BE, * EM_SPARC64, EM_PARISC, * or EM_PPC are ABI compliant */ #define EM_MIPS_RS4_BE 10 /* MIPS R4000 Big-Endian */ #define EM_SPARC64 11 /* SPARC v9 64-bit unoffical */ #define EM_PARISC 15 /* HPPA */ #define EM_SPARC32PLUS 18 /* Enhanced instruction set SPARC */ #define EM_PPC 20 /* PowerPC */ #define EM_PPC64 21 /* PowerPC 64-bit */ #define EM_ARM 40 /* Advanced RISC Machines ARM */ #define EM_ALPHA 41 /* DEC ALPHA */ #define EM_SPARCV9 43 /* SPARC version 9 */ #define EM_ALPHA_EXP 0x9026 /* DEC ALPHA */ #define EM_IA_64 50 /* Intel Merced */ #define EM_X86_64 62 /* AMD x86-64 architecture */ #define EM_VAX 75 /* DEC VAX */ /* Version */ #define EV_NONE 0 /* Invalid */ #define EV_CURRENT 1 /* Current */ #define EV_NUM 2 /* number of versions */ /* Section Header */ typedef struct { Elf32_Word sh_name; /* name - index into section header string table section */ Elf32_Word sh_type; /* type */ Elf32_Word sh_flags; /* flags */ Elf32_Addr sh_addr; /* address */ Elf32_Off sh_offset; /* file offset */ Elf32_Word sh_size; /* section size */ Elf32_Word sh_link; /* section header table index link */ Elf32_Word sh_info; /* extra information */ Elf32_Word sh_addralign; /* address alignment */ Elf32_Word sh_entsize; /* section entry size */ } Elf32_Shdr; typedef struct { Elf64_Half sh_name; /* section name */ Elf64_Half sh_type; /* section type */ Elf64_Xword sh_flags; /* section flags */ Elf64_Addr sh_addr; /* virtual address */ Elf64_Off sh_offset; /* file offset */ Elf64_Xword sh_size; /* section size */ Elf64_Half sh_link; /* link to another */ Elf64_Half sh_info; /* misc info */ Elf64_Xword sh_addralign; /* memory alignment */ Elf64_Xword sh_entsize; /* table entry size */ } Elf64_Shdr; /* Special Section Indexes */ #define SHN_UNDEF 0 /* undefined */ #define SHN_LORESERVE 0xff00 /* lower bounds of reserved indexes */ #define SHN_LOPROC 0xff00 /* reserved range for processor */ #define SHN_HIPROC 0xff1f /* specific section indexes */ #define SHN_ABS 0xfff1 /* absolute value */ #define SHN_COMMON 0xfff2 /* common symbol */ #define SHN_HIRESERVE 0xffff /* upper bounds of reserved indexes */ /* sh_type */ #define SHT_NULL 0 /* inactive */ #define SHT_PROGBITS 1 /* program defined information */ #define SHT_SYMTAB 2 /* symbol table section */ #define SHT_STRTAB 3 /* string table section */ #define SHT_RELA 4 /* relocation section with addends*/ #define SHT_HASH 5 /* symbol hash table section */ #define SHT_DYNAMIC 6 /* dynamic section */ #define SHT_NOTE 7 /* note section */ #define SHT_NOBITS 8 /* no space section */ #define SHT_REL 9 /* relation section without addends */ #define SHT_SHLIB 10 /* reserved - purpose unknown */ #define SHT_DYNSYM 11 /* dynamic symbol table section */ #define SHT_NUM 12 /* number of section types */ #define SHT_LOPROC 0x70000000 /* reserved range for processor */ #define SHT_HIPROC 0x7fffffff /* specific section header types */ #define SHT_LOUSER 0x80000000 /* reserved range for application */ #define SHT_HIUSER 0xffffffff /* specific indexes */ /* Section names */ #define ELF_BSS ".bss" /* uninitialized data */ #define ELF_DATA ".data" /* initialized data */ #define ELF_DEBUG ".debug" /* debug */ #define ELF_DYNAMIC ".dynamic" /* dynamic linking information */ #define ELF_DYNSTR ".dynstr" /* dynamic string table */ #define ELF_DYNSYM ".dynsym" /* dynamic symbol table */ #define ELF_FINI ".fini" /* termination code */ #define ELF_GOT ".got" /* global offset table */ #define ELF_HASH ".hash" /* symbol hash table */ #define ELF_INIT ".init" /* initialization code */ #define ELF_REL_DATA ".rel.data" /* relocation data */ #define ELF_REL_FINI ".rel.fini" /* relocation termination code */ #define ELF_REL_INIT ".rel.init" /* relocation initialization code */ #define ELF_REL_DYN ".rel.dyn" /* relocaltion dynamic link info */ #define ELF_REL_RODATA ".rel.rodata" /* relocation read-only data */ #define ELF_REL_TEXT ".rel.text" /* relocation code */ #define ELF_RODATA ".rodata" /* read-only data */ #define ELF_SHSTRTAB ".shstrtab" /* section header string table */ #define ELF_STRTAB ".strtab" /* string table */ #define ELF_SYMTAB ".symtab" /* symbol table */ #define ELF_TEXT ".text" /* code */ /* Section Attribute Flags - sh_flags */ #define SHF_WRITE 0x1 /* Writable */ #define SHF_ALLOC 0x2 /* occupies memory */ #define SHF_EXECINSTR 0x4 /* executable */ #define SHF_MASKPROC 0xf0000000 /* reserved bits for processor */ /* specific section attributes */ /* Symbol Table Entry */ typedef struct elf32_sym { Elf32_Word st_name; /* name - index into string table */ Elf32_Addr st_value; /* symbol value */ Elf32_Word st_size; /* symbol size */ unsigned char st_info; /* type and binding */ unsigned char st_other; /* 0 - no defined meaning */ Elf32_Half st_shndx; /* section header index */ } Elf32_Sym; typedef struct { Elf64_Half st_name; /* Symbol name index in str table */ Elf_Byte st_info; /* type / binding attrs */ Elf_Byte st_other; /* unused */ Elf64_Quarter st_shndx; /* section index of symbol */ Elf64_Xword st_value; /* value of symbol */ Elf64_Xword st_size; /* size of symbol */ } Elf64_Sym; /* Symbol table index */ #define STN_UNDEF 0 /* undefined */ /* Extract symbol info - st_info */ #define ELF32_ST_BIND(x) ((x) >> 4) #define ELF32_ST_TYPE(x) (((unsigned int) x) & 0xf) #define ELF32_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf)) #define ELF64_ST_BIND(x) ((x) >> 4) #define ELF64_ST_TYPE(x) (((unsigned int) x) & 0xf) #define ELF64_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf)) /* Symbol Binding - ELF32_ST_BIND - st_info */ #define STB_LOCAL 0 /* Local symbol */ #define STB_GLOBAL 1 /* Global symbol */ #define STB_WEAK 2 /* like global - lower precedence */ #define STB_NUM 3 /* number of symbol bindings */ #define STB_LOPROC 13 /* reserved range for processor */ #define STB_HIPROC 15 /* specific symbol bindings */ /* Symbol type - ELF32_ST_TYPE - st_info */ #define STT_NOTYPE 0 /* not specified */ #define STT_OBJECT 1 /* data object */ #define STT_FUNC 2 /* function */ #define STT_SECTION 3 /* section */ #define STT_FILE 4 /* file */ #define STT_NUM 5 /* number of symbol types */ #define STT_LOPROC 13 /* reserved range for processor */ #define STT_HIPROC 15 /* specific symbol types */ /* Relocation entry with implicit addend */ typedef struct { Elf32_Addr r_offset; /* offset of relocation */ Elf32_Word r_info; /* symbol table index and type */ } Elf32_Rel; /* Relocation entry with explicit addend */ typedef struct { Elf32_Addr r_offset; /* offset of relocation */ Elf32_Word r_info; /* symbol table index and type */ Elf32_Sword r_addend; } Elf32_Rela; /* Extract relocation info - r_info */ #define ELF32_R_SYM(i) ((i) >> 8) #define ELF32_R_TYPE(i) ((unsigned char) (i)) #define ELF32_R_INFO(s,t) (((s) << 8) + (unsigned char)(t)) typedef struct { Elf64_Xword r_offset; /* where to do it */ Elf64_Xword r_info; /* index & type of relocation */ } Elf64_Rel; typedef struct { Elf64_Xword r_offset; /* where to do it */ Elf64_Xword r_info; /* index & type of relocation */ Elf64_Sxword r_addend; /* adjustment value */ } Elf64_Rela; #define ELF64_R_SYM(info) ((info) >> 32) #define ELF64_R_TYPE(info) ((info) & 0xFFFFFFFF) #define ELF64_R_INFO(s,t) (((s) << 32) + (u_int32_t)(t)) /* Program Header */ typedef struct { Elf32_Word p_type; /* segment type */ Elf32_Off p_offset; /* segment offset */ Elf32_Addr p_vaddr; /* virtual address of segment */ Elf32_Addr p_paddr; /* physical address - ignored? */ Elf32_Word p_filesz; /* number of bytes in file for seg. */ Elf32_Word p_memsz; /* number of bytes in mem. for seg. */ Elf32_Word p_flags; /* flags */ Elf32_Word p_align; /* memory alignment */ } Elf32_Phdr; typedef struct { Elf64_Half p_type; /* entry type */ Elf64_Half p_flags; /* flags */ Elf64_Off p_offset; /* offset */ Elf64_Addr p_vaddr; /* virtual address */ Elf64_Addr p_paddr; /* physical address */ Elf64_Xword p_filesz; /* file size */ Elf64_Xword p_memsz; /* memory size */ Elf64_Xword p_align; /* memory & file alignment */ } Elf64_Phdr; /* Segment types - p_type */ #define PT_NULL 0 /* unused */ #define PT_LOAD 1 /* loadable segment */ #define PT_DYNAMIC 2 /* dynamic linking section */ #define PT_INTERP 3 /* the RTLD */ #define PT_NOTE 4 /* auxiliary information */ #define PT_SHLIB 5 /* reserved - purpose undefined */ #define PT_PHDR 6 /* program header */ #define PT_NUM 7 /* Number of segment types */ #define PT_LOPROC 0x70000000 /* reserved range for processor */ #define PT_HIPROC 0x7fffffff /* specific segment types */ /* Segment flags - p_flags */ #define PF_X 0x1 /* Executable */ #define PF_W 0x2 /* Writable */ #define PF_R 0x4 /* Readable */ #define PF_MASKPROC 0xf0000000 /* reserved bits for processor */ /* specific segment flags */ /* Dynamic structure */ typedef struct { Elf32_Sword d_tag; /* controls meaning of d_val */ union { Elf32_Word d_val; /* Multiple meanings - see d_tag */ Elf32_Addr d_ptr; /* program virtual address */ } d_un; } Elf32_Dyn; typedef struct { Elf64_Xword d_tag; /* controls meaning of d_val */ union { Elf64_Addr d_ptr; Elf64_Xword d_val; } d_un; } Elf64_Dyn; /* Dynamic Array Tags - d_tag */ #define DT_NULL 0 /* marks end of _DYNAMIC array */ #define DT_NEEDED 1 /* string table offset of needed lib */ #define DT_PLTRELSZ 2 /* size of relocation entries in PLT */ #define DT_PLTGOT 3 /* address PLT/GOT */ #define DT_HASH 4 /* address of symbol hash table */ #define DT_STRTAB 5 /* address of string table */ #define DT_SYMTAB 6 /* address of symbol table */ #define DT_RELA 7 /* address of relocation table */ #define DT_RELASZ 8 /* size of relocation table */ #define DT_RELAENT 9 /* size of relocation entry */ #define DT_STRSZ 10 /* size of string table */ #define DT_SYMENT 11 /* size of symbol table entry */ #define DT_INIT 12 /* address of initialization func. */ #define DT_FINI 13 /* address of termination function */ #define DT_SONAME 14 /* string table offset of shared obj */ #define DT_RPATH 15 /* string table offset of library search path */ #define DT_SYMBOLIC 16 /* start sym search in shared obj. */ #define DT_REL 17 /* address of rel. tbl. w addends */ #define DT_RELSZ 18 /* size of DT_REL relocation table */ #define DT_RELENT 19 /* size of DT_REL relocation entry */ #define DT_PLTREL 20 /* PLT referenced relocation entry */ #define DT_DEBUG 21 /* bugger */ #define DT_TEXTREL 22 /* Allow rel. mod. to unwritable seg */ #define DT_JMPREL 23 /* add. of PLT's relocation entries */ #define DT_BIND_NOW 24 /* Bind now regardless of env setting */ #define DT_NUM 25 /* Number used. */ #define DT_LOPROC 0x70000000 /* reserved range for processor */ #define DT_HIPROC 0x7fffffff /* specific dynamic array tags */ /* Standard ELF hashing function */ unsigned int elf_hash(const unsigned char *name); /* * Note Definitions */ typedef struct { Elf32_Word namesz; Elf32_Word descsz; Elf32_Word type; } Elf32_Note; typedef struct { Elf64_Half namesz; Elf64_Half descsz; Elf64_Half type; } Elf64_Note; #if defined(ELFSIZE) #define CONCAT(x,y) __CONCAT(x,y) #define ELFNAME(x) CONCAT(elf,CONCAT(ELFSIZE,CONCAT(_,x))) #define ELFNAME2(x,y) CONCAT(x,CONCAT(_elf,CONCAT(ELFSIZE,CONCAT(_,y)))) #define ELFNAMEEND(x) CONCAT(x,CONCAT(_elf,ELFSIZE)) #define ELFDEFNNAME(x) CONCAT(ELF,CONCAT(ELFSIZE,CONCAT(_,x))) #endif #if defined(ELFSIZE) && (ELFSIZE == 32) #define Elf_Ehdr Elf32_Ehdr #define Elf_Phdr Elf32_Phdr #define Elf_Shdr Elf32_Shdr #define Elf_Sym Elf32_Sym #define Elf_Rel Elf32_Rel #define Elf_RelA Elf32_Rela #define Elf_Dyn Elf32_Dyn #define Elf_Word Elf32_Word #define Elf_Sword Elf32_Sword #define Elf_Addr Elf32_Addr #define Elf_Off Elf32_Off #define Elf_Nhdr Elf32_Nhdr #define Elf_Note Elf32_Note #define ELF_R_SYM ELF32_R_SYM #define ELF_R_TYPE ELF32_R_TYPE #define ELF_R_INFO ELF32_R_INFO #define ELFCLASS ELFCLASS32 #define ELF_ST_BIND ELF32_ST_BIND #define ELF_ST_TYPE ELF32_ST_TYPE #define ELF_ST_INFO ELF32_ST_INFO #define AuxInfo Aux32Info #elif defined(ELFSIZE) && (ELFSIZE == 64) #define Elf_Ehdr Elf64_Ehdr #define Elf_Phdr Elf64_Phdr #define Elf_Shdr Elf64_Shdr #define Elf_Sym Elf64_Sym #define Elf_Rel Elf64_Rel #define Elf_RelA Elf64_Rela #define Elf_Dyn Elf64_Dyn #define Elf_Word Elf64_Word #define Elf_Sword Elf64_Sword #define Elf_Addr Elf64_Addr #define Elf_Off Elf64_Off #define Elf_Nhdr Elf64_Nhdr #define Elf_Note Elf64_Note #define ELF_R_SYM ELF64_R_SYM #define ELF_R_TYPE ELF64_R_TYPE #define ELF_R_INFO ELF64_R_INFO #define ELFCLASS ELFCLASS64 #define ELF_ST_BIND ELF64_ST_BIND #define ELF_ST_TYPE ELF64_ST_TYPE #define ELF_ST_INFO ELF64_ST_INFO #define AuxInfo Aux64Info #endif #endif /* __XEN_PUBLIC_ELFSTRUCTS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/event_channel.h000066400000000000000000000213251314037446600270370ustar00rootroot00000000000000/****************************************************************************** * event_channel.h * * Event channels between domains. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, K A Fraser. */ #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ #define __XEN_PUBLIC_EVENT_CHANNEL_H__ /* * Prototype for this hypercall is: * int event_channel_op(int cmd, void *args) * @cmd == EVTCHNOP_??? (event-channel operation). * @args == Operation-specific extra arguments (NULL if none). */ typedef uint32_t evtchn_port_t; DEFINE_XEN_GUEST_HANDLE(evtchn_port_t); /* * EVTCHNOP_alloc_unbound: Allocate a port in domain and mark as * accepting interdomain bindings from domain . A fresh port * is allocated in and returned as . * NOTES: * 1. If the caller is unprivileged then must be DOMID_SELF. * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_alloc_unbound 6 struct evtchn_alloc_unbound { /* IN parameters */ domid_t dom, remote_dom; /* OUT parameters */ evtchn_port_t port; }; typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; /* * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between * the calling domain and . must identify * a port that is unbound and marked as accepting bindings from the calling * domain. A fresh port is allocated in the calling domain and returned as * . * NOTES: * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_bind_interdomain 0 struct evtchn_bind_interdomain { /* IN parameters. */ domid_t remote_dom; evtchn_port_t remote_port; /* OUT parameters. */ evtchn_port_t local_port; }; typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t; /* * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ on specified * vcpu. * NOTES: * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list * in xen.h for the classification of each VIRQ. * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be * re-bound via EVTCHNOP_bind_vcpu. * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. * The allocated event channel is bound to the specified vcpu and the * binding cannot be changed. */ #define EVTCHNOP_bind_virq 1 struct evtchn_bind_virq { /* IN parameters. */ uint32_t virq; uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_virq evtchn_bind_virq_t; /* * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ . * NOTES: * 1. A physical IRQ may be bound to at most one event channel per domain. * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. */ #define EVTCHNOP_bind_pirq 2 struct evtchn_bind_pirq { /* IN parameters. */ uint32_t pirq; #define BIND_PIRQ__WILL_SHARE 1 uint32_t flags; /* BIND_PIRQ__* */ /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_pirq evtchn_bind_pirq_t; /* * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. * NOTES: * 1. The allocated event channel is bound to the specified vcpu. The binding * may not be changed. */ #define EVTCHNOP_bind_ipi 7 struct evtchn_bind_ipi { uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_ipi evtchn_bind_ipi_t; /* * EVTCHNOP_close: Close a local event channel . If the channel is * interdomain then the remote end is placed in the unbound state * (EVTCHNSTAT_unbound), awaiting a new connection. */ #define EVTCHNOP_close 3 struct evtchn_close { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_close evtchn_close_t; /* * EVTCHNOP_send: Send an event to the remote end of the channel whose local * endpoint is . */ #define EVTCHNOP_send 4 struct evtchn_send { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_send evtchn_send_t; /* * EVTCHNOP_status: Get the current status of the communication channel which * has an endpoint at . * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may obtain the status of an event * channel for which is not DOMID_SELF. */ #define EVTCHNOP_status 5 struct evtchn_status { /* IN parameters */ domid_t dom; evtchn_port_t port; /* OUT parameters */ #define EVTCHNSTAT_closed 0 /* Channel is not in use. */ #define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ #define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ uint32_t status; uint32_t vcpu; /* VCPU to which this channel is bound. */ union { struct { domid_t dom; } unbound; /* EVTCHNSTAT_unbound */ struct { domid_t dom; evtchn_port_t port; } interdomain; /* EVTCHNSTAT_interdomain */ uint32_t pirq; /* EVTCHNSTAT_pirq */ uint32_t virq; /* EVTCHNSTAT_virq */ } u; }; typedef struct evtchn_status evtchn_status_t; /* * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an * event is pending. * NOTES: * 1. IPI-bound channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 3. All other channels notify vcpu0 by default. This default is set when * the channel is allocated (a port that is freed and subsequently reused * has its binding reset to vcpu0). */ #define EVTCHNOP_bind_vcpu 8 struct evtchn_bind_vcpu { /* IN parameters. */ evtchn_port_t port; uint32_t vcpu; }; typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t; /* * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver * a notification to the appropriate VCPU if an event is pending. */ #define EVTCHNOP_unmask 9 struct evtchn_unmask { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_unmask evtchn_unmask_t; /* * EVTCHNOP_reset: Close all event channels associated with specified domain. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. */ #define EVTCHNOP_reset 10 struct evtchn_reset { /* IN parameters. */ domid_t dom; }; typedef struct evtchn_reset evtchn_reset_t; /* * Argument to event_channel_op_compat() hypercall. Superceded by new * event_channel_op() hypercall since 0x00030202. */ struct evtchn_op { uint32_t cmd; /* EVTCHNOP_* */ union { struct evtchn_alloc_unbound alloc_unbound; struct evtchn_bind_interdomain bind_interdomain; struct evtchn_bind_virq bind_virq; struct evtchn_bind_pirq bind_pirq; struct evtchn_bind_ipi bind_ipi; struct evtchn_close close; struct evtchn_send send; struct evtchn_status status; struct evtchn_bind_vcpu bind_vcpu; struct evtchn_unmask unmask; } u; }; typedef struct evtchn_op evtchn_op_t; DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/features.h000066400000000000000000000047551314037446600260540ustar00rootroot00000000000000/****************************************************************************** * features.h * * Feature flags, reported by XENVER_get_features. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Keir Fraser */ #ifndef __XEN_PUBLIC_FEATURES_H__ #define __XEN_PUBLIC_FEATURES_H__ /* * If set, the guest does not need to write-protect its pagetables, and can * update them via direct writes. */ #define XENFEAT_writable_page_tables 0 /* * If set, the guest does not need to write-protect its segment descriptor * tables, and can update them via direct writes. */ #define XENFEAT_writable_descriptor_tables 1 /* * If set, translation between the guest's 'pseudo-physical' address space * and the host's machine address space are handled by the hypervisor. In this * mode the guest does not need to perform phys-to/from-machine translations * when performing page table operations. */ #define XENFEAT_auto_translated_physmap 2 /* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ #define XENFEAT_supervisor_mode_kernel 3 /* * If set, the guest does not need to allocate x86 PAE page directories * below 4GB. This flag is usually implied by auto_translated_physmap. */ #define XENFEAT_pae_pgdir_above_4gb 4 #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/foreign/000077500000000000000000000000001314037446600255035ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/foreign/Makefile000066400000000000000000000020001314037446600271330ustar00rootroot00000000000000XEN_ROOT=../../../.. include $(XEN_ROOT)/Config.mk architectures := x86_32 x86_64 ia64 headers := $(patsubst %, %.h, $(architectures)) scripts := $(wildcard *.py) .PHONY: all clean check-headers all: $(headers) check-headers clean: rm -f $(headers) rm -f checker checker.c $(XEN_TARGET_ARCH).size rm -f *.pyc *.o *~ ifeq ($(CROSS_COMPILE)$(XEN_TARGET_ARCH),$(XEN_COMPILE_ARCH)) check-headers: checker ./checker > $(XEN_TARGET_ARCH).size diff -u reference.size $(XEN_TARGET_ARCH).size checker: checker.c $(headers) $(HOSTCC) $(HOSTCFLAGS) -o $@ $< else check-headers: @echo "cross build: skipping check" endif x86_32.h: ../arch-x86/xen-x86_32.h ../arch-x86/xen.h ../xen.h $(scripts) python mkheader.py $* $@ $(filter %.h,$^) x86_64.h: ../arch-x86/xen-x86_64.h ../arch-x86/xen.h ../xen.h $(scripts) python mkheader.py $* $@ $(filter %.h,$^) ia64.h: ../arch-ia64.h ../xen.h $(scripts) python mkheader.py $* $@ $(filter %.h,$^) checker.c: $(scripts) python mkchecker.py $(XEN_TARGET_ARCH) $@ $(architectures) UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/foreign/mkchecker.py000066400000000000000000000023611314037446600300130ustar00rootroot00000000000000#!/usr/bin/python import sys; from structs import structs; # command line arguments arch = sys.argv[1]; outfile = sys.argv[2]; archs = sys.argv[3:]; f = open(outfile, "w"); f.write(''' /* * sanity checks for generated foreign headers: * - verify struct sizes * * generated by %s -- DO NOT EDIT */ #include #include #include #include #include "../xen.h" '''); for a in archs: f.write('#include "%s.h"\n' % a); f.write('int main(int argc, char *argv[])\n{\n'); f.write('\tprintf("\\n");'); f.write('printf("%-20s |", "structs");\n'); for a in archs: f.write('\tprintf("%%8s", "%s");\n' % a); f.write('\tprintf("\\n");'); f.write('\tprintf("\\n");'); for struct in structs: f.write('\tprintf("%%-20s |", "%s");\n' % struct); for a in archs: if a == arch: s = struct; # native else: s = struct + "_" + a; f.write('#ifdef %s_has_no_%s\n' % (a, struct)); f.write('\tprintf("%8s", "-");\n'); f.write("#else\n"); f.write('\tprintf("%%8zd", sizeof(struct %s));\n' % s); f.write("#endif\n"); f.write('\tprintf("\\n");\n\n'); f.write('\tprintf("\\n");\n'); f.write('\texit(0);\n'); f.write('}\n'); f.close(); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/foreign/mkheader.py000066400000000000000000000074471314037446600276510ustar00rootroot00000000000000#!/usr/bin/python import sys, re; from structs import structs, defines; # command line arguments arch = sys.argv[1]; outfile = sys.argv[2]; infiles = sys.argv[3:]; ########################################################################### # configuration #2: architecture information inttypes = {}; header = {}; footer = {}; # x86_32 inttypes["x86_32"] = { "unsigned long" : "uint32_t", "long" : "uint32_t", "xen_pfn_t" : "uint32_t", }; header["x86_32"] = """ #define __i386___X86_32 1 #pragma pack(4) """; footer["x86_32"] = """ #pragma pack() """; # x86_64 inttypes["x86_64"] = { "unsigned long" : "__align8__ uint64_t", "long" : "__align8__ uint64_t", "xen_pfn_t" : "__align8__ uint64_t", }; header["x86_64"] = """ #ifdef __GNUC__ # define __DECL_REG(name) union { uint64_t r ## name, e ## name; } # define __align8__ __attribute__((aligned (8))) #else # define __DECL_REG(name) uint64_t r ## name # define __align8__ FIXME #endif #define __x86_64___X86_64 1 """; # ia64 inttypes["ia64"] = { "unsigned long" : "__align8__ uint64_t", "long" : "__align8__ uint64_t", "xen_pfn_t" : "__align8__ uint64_t", "long double" : "__align16__ ldouble_t", }; header["ia64"] = """ #define __align8__ __attribute__((aligned (8))) #define __align16__ __attribute__((aligned (16))) typedef unsigned char ldouble_t[16]; """; ########################################################################### # main input = ""; output = ""; fileid = re.sub("[-.]", "_", "__FOREIGN_%s__" % outfile.upper()); # read input header files for name in infiles: f = open(name, "r"); input += f.read(); f.close(); # add header output += """ /* * public xen defines and struct for %s * generated by %s -- DO NOT EDIT */ #ifndef %s #define %s 1 """ % (arch, sys.argv[0], fileid, fileid) if arch in header: output += header[arch]; output += "\n"; # add defines to output for line in re.findall("#define[^\n]+", input): for define in defines: regex = "#define\s+%s\\b" % define; match = re.search(regex, line); if None == match: continue; if define.upper()[0] == define[0]: replace = define + "_" + arch.upper(); else: replace = define + "_" + arch; regex = "\\b%s\\b" % define; output += re.sub(regex, replace, line) + "\n"; output += "\n"; # delete defines, comments, empty lines input = re.sub("#define[^\n]+\n", "", input); input = re.compile("/\*(.*?)\*/", re.S).sub("", input) input = re.compile("\n\s*\n", re.S).sub("\n", input); # add structs to output for struct in structs: regex = "struct\s+%s\s*\{(.*?)\n\};" % struct; match = re.search(regex, input, re.S) if None == match: output += "#define %s_has_no_%s 1\n" % (arch, struct); else: output += "struct %s_%s {%s\n};\n" % (struct, arch, match.group(1)); output += "typedef struct %s_%s %s_%s_t;\n" % (struct, arch, struct, arch); output += "\n"; # add footer if arch in footer: output += footer[arch]; output += "\n"; output += "#endif /* %s */\n" % fileid; # replace: defines for define in defines: if define.upper()[0] == define[0]: replace = define + "_" + arch.upper(); else: replace = define + "_" + arch; output = re.sub("\\b%s\\b" % define, replace, output); # replace: structs + struct typedefs for struct in structs: output = re.sub("\\b(struct\s+%s)\\b" % struct, "\\1_%s" % arch, output); output = re.sub("\\b(%s)_t\\b" % struct, "\\1_%s_t" % arch, output); # replace: integer types integers = inttypes[arch].keys(); integers.sort(lambda a, b: cmp(len(b),len(a))); for type in integers: output = re.sub("\\b%s\\b" % type, inttypes[arch][type], output); # print results f = open(outfile, "w"); f.write(output); f.close; UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/foreign/reference.size000066400000000000000000000012251314037446600303350ustar00rootroot00000000000000 structs | x86_32 x86_64 ia64 start_info | 1104 1152 1152 trap_info | 8 16 - pt_fpreg | - - 16 cpu_user_regs | 68 200 496 xen_ia64_boot_param | - - 96 ia64_tr_entry | - - 32 vcpu_extra_regs | - - 536 vcpu_guest_context | 2800 5168 1056 arch_vcpu_info | 24 16 0 vcpu_time_info | 32 32 32 vcpu_info | 64 64 48 arch_shared_info | 268 280 272 shared_info | 2584 3368 4384 UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/foreign/structs.py000066400000000000000000000025651314037446600275740ustar00rootroot00000000000000# configuration: what needs translation structs = [ "start_info", "trap_info", "pt_fpreg", "cpu_user_regs", "xen_ia64_boot_param", "ia64_tr_entry", "vcpu_extra_regs", "vcpu_guest_context", "arch_vcpu_info", "vcpu_time_info", "vcpu_info", "arch_shared_info", "shared_info" ]; defines = [ "__i386__", "__x86_64__", "FLAT_RING1_CS", "FLAT_RING1_DS", "FLAT_RING1_SS", "FLAT_RING3_CS64", "FLAT_RING3_DS64", "FLAT_RING3_SS64", "FLAT_KERNEL_CS64", "FLAT_KERNEL_DS64", "FLAT_KERNEL_SS64", "FLAT_KERNEL_CS", "FLAT_KERNEL_DS", "FLAT_KERNEL_SS", # x86_{32,64} "_VGCF_i387_valid", "VGCF_i387_valid", "_VGCF_in_kernel", "VGCF_in_kernel", "_VGCF_failsafe_disables_events", "VGCF_failsafe_disables_events", "_VGCF_syscall_disables_events", "VGCF_syscall_disables_events", "_VGCF_online", "VGCF_online", # ia64 "VGCF_EXTRA_REGS", # all archs "xen_pfn_to_cr3", "MAX_VIRT_CPUS", "MAX_GUEST_CMDLINE" ]; UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/grant_table.h000066400000000000000000000356211314037446600265140ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ #define __XEN_PUBLIC_GRANT_TABLE_H__ /*********************************** * GRANT TABLE REPRESENTATION */ /* Some rough guidelines on accessing and updating grant-table entries * in a concurrency-safe manner. For more information, Linux contains a * reference implementation for guest OSes (arch/xen/kernel/grant_table.c). * * NB. WMB is a no-op on current-generation x86 processors. However, a * compiler barrier will still be required. * * Introducing a valid entry into the grant table: * 1. Write ent->domid. * 2. Write ent->frame: * GTF_permit_access: Frame to which access is permitted. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new * frame, or zero if none. * 3. Write memory barrier (WMB). * 4. Write ent->flags, inc. valid type. * * Invalidating an unused GTF_permit_access entry: * 1. flags = ent->flags. * 2. Observe that !(flags & (GTF_reading|GTF_writing)). * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * * Invalidating an in-use GTF_permit_access entry: * This cannot be done directly. Request assistance from the domain controller * which can set a timeout on the use of a grant entry and take necessary * action. (NB. This is not yet implemented!). * * Invalidating an unused GTF_accept_transfer entry: * 1. flags = ent->flags. * 2. Observe that !(flags & GTF_transfer_committed). [*] * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. * The guest must /not/ modify the grant entry until the address of the * transferred frame is written. It is safe for the guest to spin waiting * for this to occur (detect by observing GTF_transfer_completed in * ent->flags). * * Invalidating a committed GTF_accept_transfer entry: * 1. Wait for (ent->flags & GTF_transfer_completed). * * Changing a GTF_permit_access from writable to read-only: * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. * * Changing a GTF_permit_access from read-only to writable: * Use SMP-safe bit-setting instruction. */ /* * A grant table comprises a packed array of grant entries in one or more * page frames shared between Xen and a guest. * [XEN]: This field is written by Xen and read by the sharing guest. * [GST]: This field is written by the guest and read by Xen. */ struct grant_entry { /* GTF_xxx: various type and flag information. [XEN,GST] */ uint16_t flags; /* The domain being granted foreign privileges. [GST] */ domid_t domid; /* * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] */ uint32_t frame; }; typedef struct grant_entry grant_entry_t; /* * Type of grant entry. * GTF_invalid: This grant entry grants no privileges. * GTF_permit_access: Allow @domid to map/access @frame. * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame * to this guest. Xen writes the page number to @frame. */ #define GTF_invalid (0U<<0) #define GTF_permit_access (1U<<0) #define GTF_accept_transfer (2U<<0) #define GTF_type_mask (3U<<0) /* * Subflags for GTF_permit_access. * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] */ #define _GTF_readonly (2) #define GTF_readonly (1U<<_GTF_readonly) #define _GTF_reading (3) #define GTF_reading (1U<<_GTF_reading) #define _GTF_writing (4) #define GTF_writing (1U<<_GTF_writing) /* * Subflags for GTF_accept_transfer: * GTF_transfer_committed: Xen sets this flag to indicate that it is committed * to transferring ownership of a page frame. When a guest sees this flag * it must /not/ modify the grant entry until GTF_transfer_completed is * set by Xen. * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag * after reading GTF_transfer_committed. Xen will always write the frame * address, followed by ORing this flag, in a timely manner. */ #define _GTF_transfer_committed (2) #define GTF_transfer_committed (1U<<_GTF_transfer_committed) #define _GTF_transfer_completed (3) #define GTF_transfer_completed (1U<<_GTF_transfer_completed) /*********************************** * GRANT TABLE QUERIES AND USES */ /* * Reference to a grant entry in a specified domain's grant table. */ typedef uint32_t grant_ref_t; /* * Handle to track a mapping created via a grant reference. */ typedef uint32_t grant_handle_t; /* * GNTTABOP_map_grant_ref: Map the grant entry (,) for access * by devices and/or host CPUs. If successful, is a tracking number * that must be presented later to destroy the mapping(s). On error, * is a negative status code. * NOTES: * 1. If GNTMAP_device_map is specified then is the address * via which I/O devices may access the granted frame. * 2. If GNTMAP_host_map is specified then a mapping will be added at * either a host virtual address in the current address space, or at * a PTE at the specified machine address. The type of mapping to * perform is selected through the GNTMAP_contains_pte flag, and the * address is specified in . * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a * host mapping is destroyed by other means then it is *NOT* guaranteed * to be accounted to the correct grant reference! */ #define GNTTABOP_map_grant_ref 0 struct gnttab_map_grant_ref { /* IN parameters. */ uint64_t host_addr; uint32_t flags; /* GNTMAP_* */ grant_ref_t ref; domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ grant_handle_t handle; uint64_t dev_bus_addr; }; typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t); /* * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings * tracked by . If or is zero, that * field is ignored. If non-zero, they must refer to a device/host mapping * that is tracked by * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 3. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_grant_ref 1 struct gnttab_unmap_grant_ref { /* IN parameters. */ uint64_t host_addr; uint64_t dev_bus_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t); /* * GNTTABOP_setup_table: Set up a grant table for comprising at least * pages. The frame addresses are written to the . * Only addresses are written, even if the table is larger. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. * 3. Xen may not support more than a single grant-table page per domain. */ #define GNTTABOP_setup_table 2 struct gnttab_setup_table { /* IN parameters. */ domid_t dom; uint32_t nr_frames; /* OUT parameters. */ int16_t status; /* GNTST_* */ XEN_GUEST_HANDLE(ulong) frame_list; }; typedef struct gnttab_setup_table gnttab_setup_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t); /* * GNTTABOP_dump_table: Dump the contents of the grant table to the * xen console. Debugging use only. */ #define GNTTABOP_dump_table 3 struct gnttab_dump_table { /* IN parameters. */ domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_dump_table gnttab_dump_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t); /* * GNTTABOP_transfer_grant_ref: Transfer to a foreign domain. The * foreign domain has previously registered its interest in the transfer via * . * * Note that, even if the transfer fails, the specified page no longer belongs * to the calling domain *unless* the error is GNTST_bad_page. */ #define GNTTABOP_transfer 4 struct gnttab_transfer { /* IN parameters. */ xen_pfn_t mfn; domid_t domid; grant_ref_t ref; /* OUT parameters. */ int16_t status; }; typedef struct gnttab_transfer gnttab_transfer_t; DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t); /* * GNTTABOP_copy: Hypervisor based copy * source and destinations can be eithers MFNs or, for foreign domains, * grant references. the foreign domain has to grant read/write access * in its grant table. * * The flags specify what type source and destinations are (either MFN * or grant reference). * * Note that this can also be used to copy data between two domains * via a third party if the source and destination domains had previously * grant appropriate access to their pages to the third party. * * source_offset specifies an offset in the source frame, dest_offset * the offset in the target frame and len specifies the number of * bytes to be copied. */ #define _GNTCOPY_source_gref (0) #define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) #define _GNTCOPY_dest_gref (1) #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) #define GNTTABOP_copy 5 typedef struct gnttab_copy { /* IN parameters. */ struct { union { grant_ref_t ref; xen_pfn_t gmfn; } u; domid_t domid; uint16_t offset; } source, dest; uint16_t len; uint16_t flags; /* GNTCOPY_* */ /* OUT parameters. */ int16_t status; } gnttab_copy_t; DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t); /* * GNTTABOP_query_size: Query the current and maximum sizes of the shared * grant table. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. */ #define GNTTABOP_query_size 6 struct gnttab_query_size { /* IN parameters. */ domid_t dom; /* OUT parameters. */ uint32_t nr_frames; uint32_t max_nr_frames; int16_t status; /* GNTST_* */ }; typedef struct gnttab_query_size gnttab_query_size_t; DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t); /* * Bitfield values for update_pin_status.flags. */ /* Map the grant entry for access by I/O devices. */ #define _GNTMAP_device_map (0) #define GNTMAP_device_map (1<<_GNTMAP_device_map) /* Map the grant entry for access by host CPUs. */ #define _GNTMAP_host_map (1) #define GNTMAP_host_map (1<<_GNTMAP_host_map) /* Accesses to the granted frame will be restricted to read-only access. */ #define _GNTMAP_readonly (2) #define GNTMAP_readonly (1<<_GNTMAP_readonly) /* * GNTMAP_host_map subflag: * 0 => The host mapping is usable only by the guest OS. * 1 => The host mapping is usable by guest OS + current application. */ #define _GNTMAP_application_map (3) #define GNTMAP_application_map (1<<_GNTMAP_application_map) /* * GNTMAP_contains_pte subflag: * 0 => This map request contains a host virtual address. * 1 => This map request contains the machine addess of the PTE to update. */ #define _GNTMAP_contains_pte (4) #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) /* * Values for error status returns. All errors are -ve. */ #define GNTST_okay (0) /* Normal return. */ #define GNTST_general_error (-1) /* General undefined error. */ #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ #define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ #define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ #define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ #define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary */ #define GNTTABOP_error_msgs { \ "okay", \ "undefined error", \ "unrecognised domain id", \ "invalid grant reference", \ "invalid mapping handle", \ "invalid virtual address", \ "invalid device address", \ "no spare translation slot in the I/O MMU", \ "permission denied", \ "bad page", \ "copy arguments cross page boundary" \ } #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/hvm/000077500000000000000000000000001314037446600246445ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/hvm/e820.h000066400000000000000000000034031314037446600254730ustar00rootroot00000000000000 /* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_E820_H__ #define __XEN_PUBLIC_HVM_E820_H__ /* PC BIOS standard E820 types. */ #define E820_RAM 1 #define E820_RESERVED 2 #define E820_ACPI 3 #define E820_NVS 4 /* E820 location in HVM virtual address space. */ #define E820_MAP_PAGE 0x00090000 #define E820_MAP_NR_OFFSET 0x000001E8 #define E820_MAP_OFFSET 0x000002D0 struct e820entry { uint64_t addr; uint64_t size; uint32_t type; } __attribute__((packed)); #define HVM_BELOW_4G_RAM_END 0xF0000000 #define HVM_BELOW_4G_MMIO_START HVM_BELOW_4G_RAM_END #define HVM_BELOW_4G_MMIO_LENGTH ((1ULL << 32) - HVM_BELOW_4G_MMIO_START) #endif /* __XEN_PUBLIC_HVM_E820_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/hvm/hvm_info_table.h000066400000000000000000000033221314037446600277710ustar00rootroot00000000000000/****************************************************************************** * hvm/hvm_info_table.h * * HVM parameter and information table, written into guest memory map. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define HVM_INFO_PFN 0x09F #define HVM_INFO_OFFSET 0x800 #define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET) struct hvm_info_table { char signature[8]; /* "HVM INFO" */ uint32_t length; uint8_t checksum; uint8_t acpi_enabled; uint8_t apic_mode; uint32_t nr_vcpus; }; #endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/hvm/hvm_op.h000066400000000000000000000055541314037446600263160ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ #define __XEN_PUBLIC_HVM_HVM_OP_H__ /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */ #define HVMOP_set_param 0 #define HVMOP_get_param 1 struct xen_hvm_param { domid_t domid; /* IN */ uint32_t index; /* IN */ uint64_t value; /* IN/OUT */ }; typedef struct xen_hvm_param xen_hvm_param_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); /* Set the logical level of one of a domain's PCI INTx wires. */ #define HVMOP_set_pci_intx_level 2 struct xen_hvm_set_pci_intx_level { /* Domain to be updated. */ domid_t domid; /* PCI INTx identification in PCI topology (domain:bus:device:intx). */ uint8_t domain, bus, device, intx; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t); /* Set the logical level of one of a domain's ISA IRQ wires. */ #define HVMOP_set_isa_irq_level 3 struct xen_hvm_set_isa_irq_level { /* Domain to be updated. */ domid_t domid; /* ISA device identification, by ISA IRQ (0-15). */ uint8_t isa_irq; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t); #define HVMOP_set_pci_link_route 4 struct xen_hvm_set_pci_link_route { /* Domain to be updated. */ domid_t domid; /* PCI link identifier (0-3). */ uint8_t link; /* ISA IRQ (1-15), or 0 (disable link). */ uint8_t isa_irq; }; typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/hvm/ioreq.h000066400000000000000000000101071314037446600261330ustar00rootroot00000000000000/* * ioreq.h: I/O request definitions for device models * Copyright (c) 2004, Intel Corporation. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef _IOREQ_H_ #define _IOREQ_H_ #define IOREQ_READ 1 #define IOREQ_WRITE 0 #define STATE_IOREQ_NONE 0 #define STATE_IOREQ_READY 1 #define STATE_IOREQ_INPROCESS 2 #define STATE_IORESP_READY 3 #define IOREQ_TYPE_PIO 0 /* pio */ #define IOREQ_TYPE_COPY 1 /* mmio ops */ #define IOREQ_TYPE_AND 2 #define IOREQ_TYPE_OR 3 #define IOREQ_TYPE_XOR 4 #define IOREQ_TYPE_XCHG 5 #define IOREQ_TYPE_ADD 6 #define IOREQ_TYPE_TIMEOFFSET 7 #define IOREQ_TYPE_INVALIDATE 8 /* mapcache */ #define IOREQ_TYPE_SUB 9 /* * VMExit dispatcher should cooperate with instruction decoder to * prepare this structure and notify service OS and DM by sending * virq */ struct ioreq { uint64_t addr; /* physical address */ uint64_t size; /* size in bytes */ uint64_t count; /* for rep prefixes */ uint64_t data; /* data (or paddr of data) */ uint8_t state:4; uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr * of the real data to use. */ uint8_t dir:1; /* 1=read, 0=write */ uint8_t df:1; uint8_t type; /* I/O type */ uint8_t _pad0[6]; uint64_t io_count; /* How many IO done on a vcpu */ }; typedef struct ioreq ioreq_t; struct vcpu_iodata { struct ioreq vp_ioreq; /* Event channel port, used for notifications to/from the device model. */ uint32_t vp_eport; uint32_t _pad0; }; typedef struct vcpu_iodata vcpu_iodata_t; struct shared_iopage { struct vcpu_iodata vcpu_iodata[1]; }; typedef struct shared_iopage shared_iopage_t; #define IOREQ_BUFFER_SLOT_NUM 80 struct buffered_iopage { unsigned int read_pointer; unsigned int write_pointer; ioreq_t ioreq[IOREQ_BUFFER_SLOT_NUM]; }; /* NB. Size of this structure must be no greater than one page. */ typedef struct buffered_iopage buffered_iopage_t; #if defined(__ia64__) struct pio_buffer { uint32_t page_offset; uint32_t pointer; uint32_t data_end; uint32_t buf_size; void *opaque; }; #define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */ #define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */ #define PIO_BUFFER_ENTRY_NUM 2 struct buffered_piopage { struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM]; uint8_t buffer[1]; }; #endif /* defined(__ia64__) */ #if defined(__i386__) || defined(__x86_64__) #define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40 #define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04) #define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08) #endif /* defined(__i386__) || defined(__x86_64__) */ #endif /* _IOREQ_H_ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/hvm/params.h000066400000000000000000000037421314037446600263060ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_PARAMS_H__ #define __XEN_PUBLIC_HVM_PARAMS_H__ #include "hvm_op.h" /* * Parameter space for HVMOP_{set,get}_param. */ /* * How should CPU0 event-channel notifications be delivered? * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: * Domain = val[47:32], Bus = val[31:16], * DevFn = val[15: 8], IntX = val[ 1: 0] * If val == 0 then CPU0 event-channel notifications are not delivered. */ #define HVM_PARAM_CALLBACK_IRQ 0 /* * These are not used by Xen. They are here for convenience of HVM-guest * xenbus implementations. */ #define HVM_PARAM_STORE_PFN 1 #define HVM_PARAM_STORE_EVTCHN 2 #define HVM_PARAM_PAE_ENABLED 4 #define HVM_PARAM_IOREQ_PFN 5 #define HVM_PARAM_BUFIOREQ_PFN 6 #define HVM_NR_PARAMS 7 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/hvm/save.h000066400000000000000000000267631314037446600257710ustar00rootroot00000000000000/* * hvm/save.h * * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * * Copyright (c) 2007 XenSource Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_H__ #define __XEN_PUBLIC_HVM_SAVE_H__ /* * Structures in this header *must* have the same layout in 32bit * and 64bit environments: this means that all fields must be explicitly * sized types and aligned to their sizes, and the structs must be * a multiple of eight bytes long. * * Only the state necessary for saving and restoring (i.e. fields * that are analogous to actual hardware state) should go in this file. * Internal mechanisms should be kept in Xen-private headers. */ /* * Each entry is preceded by a descriptor giving its type and length */ struct hvm_save_descriptor { uint16_t typecode; /* Used to demux the various types below */ uint16_t instance; /* Further demux within a type */ uint32_t length; /* In bytes, *not* including this descriptor */ }; /* * Each entry has a datatype associated with it: for example, the CPU state * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU), * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU). * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system * ugliness. */ #define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; } #define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t) #define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x))) #define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c)) /* * Save/restore header: general info about the save file. */ #define HVM_FILE_MAGIC 0x54381286 #define HVM_FILE_VERSION 0x00000001 struct hvm_save_header { uint32_t magic; /* Must be HVM_FILE_MAGIC */ uint32_t version; /* File format version */ uint64_t changeset; /* Version of Xen that saved this file */ uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */ uint32_t pad0; }; DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header); /* * Processor */ struct hvm_hw_cpu { uint8_t fpu_regs[512]; uint64_t rax; uint64_t rbx; uint64_t rcx; uint64_t rdx; uint64_t rbp; uint64_t rsi; uint64_t rdi; uint64_t rsp; uint64_t r8; uint64_t r9; uint64_t r10; uint64_t r11; uint64_t r12; uint64_t r13; uint64_t r14; uint64_t r15; uint64_t rip; uint64_t rflags; uint64_t cr0; uint64_t cr2; uint64_t cr3; uint64_t cr4; uint64_t dr0; uint64_t dr1; uint64_t dr2; uint64_t dr3; uint64_t dr6; uint64_t dr7; uint32_t cs_sel; uint32_t ds_sel; uint32_t es_sel; uint32_t fs_sel; uint32_t gs_sel; uint32_t ss_sel; uint32_t tr_sel; uint32_t ldtr_sel; uint32_t cs_limit; uint32_t ds_limit; uint32_t es_limit; uint32_t fs_limit; uint32_t gs_limit; uint32_t ss_limit; uint32_t tr_limit; uint32_t ldtr_limit; uint32_t idtr_limit; uint32_t gdtr_limit; uint64_t cs_base; uint64_t ds_base; uint64_t es_base; uint64_t fs_base; uint64_t gs_base; uint64_t ss_base; uint64_t tr_base; uint64_t ldtr_base; uint64_t idtr_base; uint64_t gdtr_base; uint32_t cs_arbytes; uint32_t ds_arbytes; uint32_t es_arbytes; uint32_t fs_arbytes; uint32_t gs_arbytes; uint32_t ss_arbytes; uint32_t tr_arbytes; uint32_t ldtr_arbytes; uint32_t sysenter_cs; uint32_t padding0; uint64_t sysenter_esp; uint64_t sysenter_eip; /* msr for em64t */ uint64_t shadow_gs; /* msr content saved/restored. */ uint64_t msr_flags; uint64_t msr_lstar; uint64_t msr_star; uint64_t msr_cstar; uint64_t msr_syscall_mask; uint64_t msr_efer; /* guest's idea of what rdtsc() would return */ uint64_t tsc; /* pending event, if any */ union { uint32_t pending_event; struct { uint8_t pending_vector:8; uint8_t pending_type:3; uint8_t pending_error_valid:1; uint32_t pending_reserved:19; uint8_t pending_valid:1; }; }; /* error code for pending event */ uint32_t error_code; }; DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu); /* * PIC */ struct hvm_hw_vpic { /* IR line bitmasks. */ uint8_t irr; uint8_t imr; uint8_t isr; /* Line IRx maps to IRQ irq_base+x */ uint8_t irq_base; /* * Where are we in ICW2-4 initialisation (0 means no init in progress)? * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1). * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence) * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence) */ uint8_t init_state:4; /* IR line with highest priority. */ uint8_t priority_add:4; /* Reads from A=0 obtain ISR or IRR? */ uint8_t readsel_isr:1; /* Reads perform a polling read? */ uint8_t poll:1; /* Automatically clear IRQs from the ISR during INTA? */ uint8_t auto_eoi:1; /* Automatically rotate IRQ priorities during AEOI? */ uint8_t rotate_on_auto_eoi:1; /* Exclude slave inputs when considering in-service IRQs? */ uint8_t special_fully_nested_mode:1; /* Special mask mode excludes masked IRs from AEOI and priority checks. */ uint8_t special_mask_mode:1; /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */ uint8_t is_master:1; /* Edge/trigger selection. */ uint8_t elcr; /* Virtual INT output. */ uint8_t int_output; }; DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic); /* * IO-APIC */ #ifdef __ia64__ #define VIOAPIC_IS_IOSAPIC 1 #define VIOAPIC_NUM_PINS 24 #else #define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */ #endif struct hvm_hw_vioapic { uint64_t base_address; uint32_t ioregsel; uint32_t id; union vioapic_redir_entry { uint64_t bits; struct { uint8_t vector; uint8_t delivery_mode:3; uint8_t dest_mode:1; uint8_t delivery_status:1; uint8_t polarity:1; uint8_t remote_irr:1; uint8_t trig_mode:1; uint8_t mask:1; uint8_t reserve:7; #if !VIOAPIC_IS_IOSAPIC uint8_t reserved[4]; uint8_t dest_id; #else uint8_t reserved[3]; uint16_t dest_id; #endif } fields; } redirtbl[VIOAPIC_NUM_PINS]; }; DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic); /* * LAPIC */ struct hvm_hw_lapic { uint64_t apic_base_msr; uint32_t disabled; /* VLAPIC_xx_DISABLED */ uint32_t timer_divisor; }; DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic); struct hvm_hw_lapic_regs { /* A 4k page of register state */ uint8_t data[0x400]; }; DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs); /* * IRQs */ struct hvm_hw_pci_irqs { /* * Virtual interrupt wires for a single PCI bus. * Indexed by: device*4 + INTx#. */ union { DECLARE_BITMAP(i, 32*4); uint64_t pad[2]; }; }; DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs); struct hvm_hw_isa_irqs { /* * Virtual interrupt wires for ISA devices. * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing). */ union { DECLARE_BITMAP(i, 16); uint64_t pad[1]; }; }; DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs); struct hvm_hw_pci_link { /* * PCI-ISA interrupt router. * Each PCI is 'wire-ORed' into one of four links using * the traditional 'barber's pole' mapping ((device + INTx#) & 3). * The router provides a programmable mapping from each link to a GSI. */ uint8_t route[4]; uint8_t pad0[4]; }; DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link); /* * PIT */ struct hvm_hw_pit { struct hvm_hw_pit_channel { uint32_t count; /* can be 65536 */ uint16_t latched_count; uint8_t count_latched; uint8_t status_latched; uint8_t status; uint8_t read_state; uint8_t write_state; uint8_t write_latch; uint8_t rw_mode; uint8_t mode; uint8_t bcd; /* not supported */ uint8_t gate; /* timer start */ } channels[3]; /* 3 x 16 bytes */ uint32_t speaker_data_on; uint32_t pad0; }; DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit); /* * RTC */ #define RTC_CMOS_SIZE 14 struct hvm_hw_rtc { /* CMOS bytes */ uint8_t cmos_data[RTC_CMOS_SIZE]; /* Index register for 2-part operations */ uint8_t cmos_index; uint8_t pad0; }; DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc); /* * HPET */ #define HPET_TIMER_NUM 3 /* 3 timers supported now */ struct hvm_hw_hpet { /* Memory-mapped, software visible registers */ uint64_t capability; /* capabilities */ uint64_t res0; /* reserved */ uint64_t config; /* configuration */ uint64_t res1; /* reserved */ uint64_t isr; /* interrupt status reg */ uint64_t res2[25]; /* reserved */ uint64_t mc64; /* main counter */ uint64_t res3; /* reserved */ struct { /* timers */ uint64_t config; /* configuration/cap */ uint64_t cmp; /* comparator */ uint64_t fsb; /* FSB route, not supported now */ uint64_t res4; /* reserved */ } timers[HPET_TIMER_NUM]; uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */ /* Hidden register state */ uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */ }; DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet); /* * PM timer */ struct hvm_hw_pmtimer { uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */ uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */ uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */ }; DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer); /* * Largest type-code in use */ #define HVM_SAVE_CODE_MAX 13 /* * The series of save records is teminated by a zero-type, zero-length * descriptor. */ struct hvm_save_end {}; DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); #endif /* __XEN_PUBLIC_HVM_SAVE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/hvm/vmx_assist.h000066400000000000000000000071151314037446600272210ustar00rootroot00000000000000/* * vmx_assist.h: Context definitions for the VMXASSIST world switch. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Leendert van Doorn, leendert@watson.ibm.com * Copyright (c) 2005, International Business Machines Corporation. */ #ifndef _VMX_ASSIST_H_ #define _VMX_ASSIST_H_ #define VMXASSIST_BASE 0xD0000 #define VMXASSIST_MAGIC 0x17101966 #define VMXASSIST_MAGIC_OFFSET (VMXASSIST_BASE+8) #define VMXASSIST_NEW_CONTEXT (VMXASSIST_BASE + 12) #define VMXASSIST_OLD_CONTEXT (VMXASSIST_NEW_CONTEXT + 4) #ifndef __ASSEMBLY__ union vmcs_arbytes { struct arbyte_fields { unsigned int seg_type : 4, s : 1, dpl : 2, p : 1, reserved0 : 4, avl : 1, reserved1 : 1, default_ops_size: 1, g : 1, null_bit : 1, reserved2 : 15; } fields; unsigned int bytes; }; /* * World switch state */ struct vmx_assist_context { uint32_t eip; /* execution pointer */ uint32_t esp; /* stack pointer */ uint32_t eflags; /* flags register */ uint32_t cr0; uint32_t cr3; /* page table directory */ uint32_t cr4; uint32_t idtr_limit; /* idt */ uint32_t idtr_base; uint32_t gdtr_limit; /* gdt */ uint32_t gdtr_base; uint32_t cs_sel; /* cs selector */ uint32_t cs_limit; uint32_t cs_base; union vmcs_arbytes cs_arbytes; uint32_t ds_sel; /* ds selector */ uint32_t ds_limit; uint32_t ds_base; union vmcs_arbytes ds_arbytes; uint32_t es_sel; /* es selector */ uint32_t es_limit; uint32_t es_base; union vmcs_arbytes es_arbytes; uint32_t ss_sel; /* ss selector */ uint32_t ss_limit; uint32_t ss_base; union vmcs_arbytes ss_arbytes; uint32_t fs_sel; /* fs selector */ uint32_t fs_limit; uint32_t fs_base; union vmcs_arbytes fs_arbytes; uint32_t gs_sel; /* gs selector */ uint32_t gs_limit; uint32_t gs_base; union vmcs_arbytes gs_arbytes; uint32_t tr_sel; /* task selector */ uint32_t tr_limit; uint32_t tr_base; union vmcs_arbytes tr_arbytes; uint32_t ldtr_sel; /* ldtr selector */ uint32_t ldtr_limit; uint32_t ldtr_base; union vmcs_arbytes ldtr_arbytes; }; typedef struct vmx_assist_context vmx_assist_context_t; #endif /* __ASSEMBLY__ */ #endif /* _VMX_ASSIST_H_ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/io/000077500000000000000000000000001314037446600244615ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/io/blkif.h000066400000000000000000000112651314037446600257260ustar00rootroot00000000000000/****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_BLKIF_H__ #define __XEN_PUBLIC_IO_BLKIF_H__ #include "ring.h" #include "../grant_table.h" /* * Front->back notifications: When enqueuing a new request, sending a * notification can be made conditional on req_event (i.e., the generic * hold-off mechanism provided by the ring macros). Backends must set * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). * * Back->front notifications: When enqueuing a new response, sending a * notification can be made conditional on rsp_event (i.e., the generic * hold-off mechanism provided by the ring macros). Frontends must set * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). */ #ifndef blkif_vdev_t #define blkif_vdev_t uint16_t #endif #define blkif_sector_t uint64_t /* * REQUEST CODES. */ #define BLKIF_OP_READ 0 #define BLKIF_OP_WRITE 1 /* * Recognised only if "feature-barrier" is present in backend xenbus info. * The "feature_barrier" node contains a boolean indicating whether barrier * requests are likely to succeed or fail. Either way, a barrier request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt barrier requests. * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* * create the "feature-barrier" node! */ #define BLKIF_OP_WRITE_BARRIER 2 /* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; }; struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; typedef struct blkif_request blkif_request_t; struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_response blkif_response_t; /* * STATUS RETURN CODES. */ /* Operation not supported (only happens on barrier writes). */ #define BLKIF_RSP_EOPNOTSUPP -2 /* Operation failed for some unspecified reason (-EIO). */ #define BLKIF_RSP_ERROR -1 /* Operation completed successfully. */ #define BLKIF_RSP_OKAY 0 /* * Generate blkif ring structures and types. */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/io/console.h000066400000000000000000000033341314037446600262770ustar00rootroot00000000000000/****************************************************************************** * console.h * * Console I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_CONSOLE_H__ #define __XEN_PUBLIC_IO_CONSOLE_H__ typedef uint32_t XENCONS_RING_IDX; #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) struct xencons_interface { char in[1024]; char out[2048]; XENCONS_RING_IDX in_cons, in_prod; XENCONS_RING_IDX out_cons, out_prod; }; #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/io/fbif.h000066400000000000000000000102211314037446600255340ustar00rootroot00000000000000/* * fbif.h -- Xen virtual frame buffer device * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_FBIF_H__ #define __XEN_PUBLIC_IO_FBIF_H__ /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. */ /* Event type 1 currently not used */ /* * Framebuffer update notification event * Capable frontend sets feature-update in xenstore. * Backend requests it by setting request-update in xenstore. */ #define XENFB_TYPE_UPDATE 2 struct xenfb_update { uint8_t type; /* XENFB_TYPE_UPDATE */ int32_t x; /* source x */ int32_t y; /* source y */ int32_t width; /* rect width */ int32_t height; /* rect height */ }; #define XENFB_OUT_EVENT_SIZE 40 union xenfb_out_event { uint8_t type; struct xenfb_update update; char pad[XENFB_OUT_EVENT_SIZE]; }; /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. * No in events currently defined. */ #define XENFB_IN_EVENT_SIZE 40 union xenfb_in_event { uint8_t type; char pad[XENFB_IN_EVENT_SIZE]; }; /* shared page */ #define XENFB_IN_RING_SIZE 1024 #define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE) #define XENFB_IN_RING_OFFS 1024 #define XENFB_IN_RING(page) \ ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS)) #define XENFB_IN_RING_REF(page, idx) \ (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN]) #define XENFB_OUT_RING_SIZE 2048 #define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE) #define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE) #define XENFB_OUT_RING(page) \ ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS)) #define XENFB_OUT_RING_REF(page, idx) \ (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN]) struct xenfb_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; int32_t width; /* the width of the framebuffer (in pixels) */ int32_t height; /* the height of the framebuffer (in pixels) */ uint32_t line_length; /* the length of a row of pixels (in bytes) */ uint32_t mem_length; /* the length of the framebuffer (in bytes) */ uint8_t depth; /* the depth of a pixel (in bits) */ /* * Framebuffer page directory * * Each directory page holds PAGE_SIZE / sizeof(*pd) * framebuffer pages, and can thus map up to PAGE_SIZE * * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and * sizeof(unsigned long) == 4, that's 4 Megs. Two directory * pages should be enough for a while. */ unsigned long pd[2]; }; /* * Wart: xenkbd needs to know resolution. Put it here until a better * solution is found, but don't leak it to the backend. */ #ifdef __KERNEL__ #define XENFB_WIDTH 800 #define XENFB_HEIGHT 600 #define XENFB_DEPTH 32 #endif #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/io/kbdif.h000066400000000000000000000074421314037446600257200ustar00rootroot00000000000000/* * kbdif.h -- Xen virtual keyboard/mouse * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_KBDIF_H__ #define __XEN_PUBLIC_IO_KBDIF_H__ /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. */ /* Pointer movement event */ #define XENKBD_TYPE_MOTION 1 /* Event type 2 currently not used */ /* Key event (includes pointer buttons) */ #define XENKBD_TYPE_KEY 3 /* * Pointer position event * Capable backend sets feature-abs-pointer in xenstore. * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting * request-abs-update in xenstore. */ #define XENKBD_TYPE_POS 4 struct xenkbd_motion { uint8_t type; /* XENKBD_TYPE_MOTION */ int32_t rel_x; /* relative X motion */ int32_t rel_y; /* relative Y motion */ }; struct xenkbd_key { uint8_t type; /* XENKBD_TYPE_KEY */ uint8_t pressed; /* 1 if pressed; 0 otherwise */ uint32_t keycode; /* KEY_* from linux/input.h */ }; struct xenkbd_position { uint8_t type; /* XENKBD_TYPE_POS */ int32_t abs_x; /* absolute X position (in FB pixels) */ int32_t abs_y; /* absolute Y position (in FB pixels) */ }; #define XENKBD_IN_EVENT_SIZE 40 union xenkbd_in_event { uint8_t type; struct xenkbd_motion motion; struct xenkbd_key key; struct xenkbd_position pos; char pad[XENKBD_IN_EVENT_SIZE]; }; /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. * No out events currently defined. */ #define XENKBD_OUT_EVENT_SIZE 40 union xenkbd_out_event { uint8_t type; char pad[XENKBD_OUT_EVENT_SIZE]; }; /* shared page */ #define XENKBD_IN_RING_SIZE 2048 #define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE) #define XENKBD_IN_RING_OFFS 1024 #define XENKBD_IN_RING(page) \ ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS)) #define XENKBD_IN_RING_REF(page, idx) \ (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN]) #define XENKBD_OUT_RING_SIZE 1024 #define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE) #define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE) #define XENKBD_OUT_RING(page) \ ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS)) #define XENKBD_OUT_RING_REF(page, idx) \ (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN]) struct xenkbd_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; }; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/io/netif.h000066400000000000000000000145051314037446600257440ustar00rootroot00000000000000/****************************************************************************** * netif.h * * Unified network-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_NETIF_H__ #define __XEN_PUBLIC_IO_NETIF_H__ #include "ring.h" #include "../grant_table.h" /* * Notifications after enqueuing any type of message should be conditional on * the appropriate req_event or rsp_event field in the shared ring. * If the client sends notification for rx requests then it should specify * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume * that it cannot safely queue packets (as it may not be kicked to send them). */ /* * This is the 'wire' format for packets: * Request 1: netif_tx_request -- NETTXF_* (any flags) * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info) * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE) * Request 4: netif_tx_request -- NETTXF_more_data * Request 5: netif_tx_request -- NETTXF_more_data * ... * Request N: netif_tx_request -- 0 */ /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETTXF_csum_blank (0) #define NETTXF_csum_blank (1U<<_NETTXF_csum_blank) /* Packet data has been validated against protocol checksum. */ #define _NETTXF_data_validated (1) #define NETTXF_data_validated (1U<<_NETTXF_data_validated) /* Packet continues in the next request descriptor. */ #define _NETTXF_more_data (2) #define NETTXF_more_data (1U<<_NETTXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETTXF_extra_info (3) #define NETTXF_extra_info (1U<<_NETTXF_extra_info) struct netif_tx_request { grant_ref_t gref; /* Reference to buffer page */ uint16_t offset; /* Offset within buffer page */ uint16_t flags; /* NETTXF_* */ uint16_t id; /* Echoed in response message. */ uint16_t size; /* Packet size in bytes. */ }; typedef struct netif_tx_request netif_tx_request_t; /* Types of netif_extra_info descriptors. */ #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_MAX (2) /* netif_extra_info flags. */ #define _XEN_NETIF_EXTRA_FLAG_MORE (0) #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) /* GSO types - only TCPv4 currently supported. */ #define XEN_NETIF_GSO_TYPE_TCPV4 (1) /* * This structure needs to fit within both netif_tx_request and * netif_rx_response for compatibility. */ struct netif_extra_info { uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ union { struct { /* * Maximum payload size of each segment. For example, for TCP this * is just the path MSS. */ uint16_t size; /* * GSO type. This determines the protocol of the packet and any * extra features required to segment the packet properly. */ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ /* Future expansion. */ uint8_t pad; /* * GSO features. This specifies any extra GSO features required * to process this packet, such as ECN support for TCPv4. */ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ } gso; uint16_t pad[3]; } u; }; struct netif_tx_response { uint16_t id; int16_t status; /* NETIF_RSP_* */ }; typedef struct netif_tx_response netif_tx_response_t; struct netif_rx_request { uint16_t id; /* Echoed in response message. */ grant_ref_t gref; /* Reference to incoming granted frame */ }; typedef struct netif_rx_request netif_rx_request_t; /* Packet data has been validated against protocol checksum. */ #define _NETRXF_data_validated (0) #define NETRXF_data_validated (1U<<_NETRXF_data_validated) /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETRXF_csum_blank (1) #define NETRXF_csum_blank (1U<<_NETRXF_csum_blank) /* Packet continues in the next request descriptor. */ #define _NETRXF_more_data (2) #define NETRXF_more_data (1U<<_NETRXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETRXF_extra_info (3) #define NETRXF_extra_info (1U<<_NETRXF_extra_info) struct netif_rx_response { uint16_t id; uint16_t offset; /* Offset in page of start of received packet */ uint16_t flags; /* NETRXF_* */ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ }; typedef struct netif_rx_response netif_rx_response_t; /* * Generate netif ring structures and types. */ DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response); DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response); #define NETIF_RSP_DROPPED -2 #define NETIF_RSP_ERROR -1 #define NETIF_RSP_OKAY 0 /* No response: used for auxiliary requests (e.g., netif_tx_extra). */ #define NETIF_RSP_NULL 1 #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/io/pciif.h000066400000000000000000000051331314037446600257260ustar00rootroot00000000000000/* * PCI Backend/Frontend Common Data Structures & Macros * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Ryan Wilson */ #ifndef __XEN_PCI_COMMON_H__ #define __XEN_PCI_COMMON_H__ /* Be sure to bump this number if you change this file */ #define XEN_PCI_MAGIC "7" /* xen_pci_sharedinfo flags */ #define _XEN_PCIF_active (0) #define XEN_PCIF_active (1<<_XEN_PCI_active) /* xen_pci_op commands */ #define XEN_PCI_OP_conf_read (0) #define XEN_PCI_OP_conf_write (1) /* xen_pci_op error numbers */ #define XEN_PCI_ERR_success (0) #define XEN_PCI_ERR_dev_not_found (-1) #define XEN_PCI_ERR_invalid_offset (-2) #define XEN_PCI_ERR_access_denied (-3) #define XEN_PCI_ERR_not_implemented (-4) /* XEN_PCI_ERR_op_failed - backend failed to complete the operation */ #define XEN_PCI_ERR_op_failed (-5) struct xen_pci_op { /* IN: what action to perform: XEN_PCI_OP_* */ uint32_t cmd; /* OUT: will contain an error number (if any) from errno.h */ int32_t err; /* IN: which device to touch */ uint32_t domain; /* PCI Domain/Segment */ uint32_t bus; uint32_t devfn; /* IN: which configuration registers to touch */ int32_t offset; int32_t size; /* IN/OUT: Contains the result after a READ or the value to WRITE */ uint32_t value; }; struct xen_pci_sharedinfo { /* flags - XEN_PCIF_* */ uint32_t flags; struct xen_pci_op op; }; #endif /* __XEN_PCI_COMMON_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/io/protocols.h000066400000000000000000000011751314037446600266620ustar00rootroot00000000000000#ifndef __XEN_PROTOCOLS_H__ #define __XEN_PROTOCOLS_H__ #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi" #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi" #define XEN_IO_PROTO_ABI_IA64 "ia64-abi" #define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi" #if defined(__i386__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 #elif defined(__x86_64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 #elif defined(__ia64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64 #elif defined(__powerpc64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64 #else # error arch fixup needed here #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/io/ring.h000066400000000000000000000345621314037446600256030ustar00rootroot00000000000000/****************************************************************************** * ring.h * * Shared producer-consumer ring macros. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Tim Deegan and Andrew Warfield November 2004. */ #ifndef __XEN_PUBLIC_IO_RING_H__ #define __XEN_PUBLIC_IO_RING_H__ typedef unsigned int RING_IDX; /* Round a 32-bit unsigned constant down to the nearest power of two. */ #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) /* * Calculate size of a shared ring, given the total available space for the * ring and indexes (_sz), and the name tag of the request/response structure. * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ #define __RING_SIZE(_s, _sz) \ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* * Macros to make the correct C datatypes for a new kind of ring. * * To make a new ring datatype, you need to have two message structures, * let's say request_t, and response_t already defined. * * In a header where you want the ring datatype declared, you then do: * * DEFINE_RING_TYPES(mytag, request_t, response_t); * * These expand out to give you a set of types, as you can see below. * The most important of these are: * * mytag_sring_t - The shared ring. * mytag_front_ring_t - The 'front' half of the ring. * mytag_back_ring_t - The 'back' half of the ring. * * To initialize a ring in your code you need to know the location and size * of the shared memory area (PAGE_SIZE, for instance). To initialise * the front half: * * mytag_front_ring_t front_ring; * SHARED_RING_INIT((mytag_sring_t *)shared_page); * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); * * Initializing the back follows similarly (note that only the front * initializes the shared ring): * * mytag_back_ring_t back_ring; * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); */ #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ \ /* Shared ring entry */ \ union __name##_sring_entry { \ __req_t req; \ __rsp_t rsp; \ }; \ \ /* Shared ring page */ \ struct __name##_sring { \ RING_IDX req_prod, req_event; \ RING_IDX rsp_prod, rsp_event; \ uint8_t pad[48]; \ union __name##_sring_entry ring[1]; /* variable-length */ \ }; \ \ /* "Front" end's private variables */ \ struct __name##_front_ring { \ RING_IDX req_prod_pvt; \ RING_IDX rsp_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* "Back" end's private variables */ \ struct __name##_back_ring { \ RING_IDX rsp_prod_pvt; \ RING_IDX req_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* Syntactic sugar */ \ typedef struct __name##_sring __name##_sring_t; \ typedef struct __name##_front_ring __name##_front_ring_t; \ typedef struct __name##_back_ring __name##_back_ring_t /* * Macros for manipulating rings. * * FRONT_RING_whatever works on the "front end" of a ring: here * requests are pushed on to the ring and responses taken off it. * * BACK_RING_whatever works on the "back end" of a ring: here * requests are taken off the ring and responses put on. * * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. * This is OK in 1-for-1 request-response situations where the * requestor (front end) never has more than RING_SIZE()-1 * outstanding requests. */ /* Initialising empty rings */ #define SHARED_RING_INIT(_s) do { \ (_s)->req_prod = (_s)->rsp_prod = 0; \ (_s)->req_event = (_s)->rsp_event = 1; \ memset((_s)->pad, 0, sizeof((_s)->pad)); \ } while(0) #define FRONT_RING_INIT(_r, _s, __size) do { \ (_r)->req_prod_pvt = 0; \ (_r)->rsp_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) #define BACK_RING_INIT(_r, _s, __size) do { \ (_r)->rsp_prod_pvt = 0; \ (_r)->req_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) /* Initialize to existing shared indexes -- for recovery */ #define FRONT_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->req_prod_pvt = (_s)->req_prod; \ (_r)->rsp_cons = (_s)->rsp_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) #define BACK_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ (_r)->req_cons = (_s)->req_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) /* How big is this ring? */ #define RING_SIZE(_r) \ ((_r)->nr_ents) /* Number of free requests (for use on front side only). */ #define RING_FREE_REQUESTS(_r) \ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) /* Test if there is an empty slot available on the front ring. * (This is only meaningful from the front. ) */ #define RING_FULL(_r) \ (RING_FREE_REQUESTS(_r) == 0) /* Test if there are outstanding messages to be processed on a ring. */ #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) #ifdef __GNUC__ #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ unsigned int rsp = RING_SIZE(_r) - \ ((_r)->req_cons - (_r)->rsp_prod_pvt); \ req < rsp ? req : rsp; \ }) #else /* Same as above, but without the nice GCC ({ ... }) syntax. */ #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ ((((_r)->sring->req_prod - (_r)->req_cons) < \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ ((_r)->sring->req_prod - (_r)->req_cons) : \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) #endif /* Direct access to individual ring elements, by index. */ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) #define RING_GET_RESPONSE(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) /* Loop termination condition: Would the specified index overflow the ring? */ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) #define RING_PUSH_REQUESTS(_r) do { \ wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) #define RING_PUSH_RESPONSES(_r) do { \ wmb(); /* front sees responses /before/ updated producer index */ \ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ } while (0) /* * Notification hold-off (req_event and rsp_event): * * When queueing requests or responses on a shared ring, it may not always be * necessary to notify the remote end. For example, if requests are in flight * in a backend, the front may be able to queue further requests without * notifying the back (if the back checks for new requests when it queues * responses). * * When enqueuing requests or responses: * * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument * is a boolean return value. True indicates that the receiver requires an * asynchronous notification. * * After dequeuing requests or responses (before sleeping the connection): * * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). * The second argument is a boolean return value. True indicates that there * are pending messages on the ring (i.e., the connection should not be put * to sleep). * * These macros will set the req_event/rsp_event field to trigger a * notification on the very next message that is enqueued. If you want to * create batches of work (i.e., only receive a notification after several * messages have been enqueued) then you will need to create a customised * version of the FINAL_CHECK macro in your own code, which sets the event * field appropriately. */ #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->req_prod; \ RING_IDX __new = (_r)->req_prod_pvt; \ wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = __new; \ mb(); /* back sees new requests /before/ we check req_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->rsp_prod; \ RING_IDX __new = (_r)->rsp_prod_pvt; \ wmb(); /* front sees responses /before/ updated producer index */ \ (_r)->sring->rsp_prod = __new; \ mb(); /* front sees new responses /before/ we check rsp_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ if (_work_to_do) break; \ (_r)->sring->req_event = (_r)->req_cons + 1; \ mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ } while (0) #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ if (_work_to_do) break; \ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ } while (0) #endif /* __XEN_PUBLIC_IO_RING_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/io/tpmif.h000066400000000000000000000046251314037446600257600ustar00rootroot00000000000000/****************************************************************************** * tpmif.h * * TPM I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, IBM Corporation * * Author: Stefan Berger, stefanb@us.ibm.com * Grant table support: Mahadevan Gomathisankaran * * This code has been derived from tools/libxc/xen/io/netif.h * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_TPMIF_H__ #define __XEN_PUBLIC_IO_TPMIF_H__ #include "../grant_table.h" struct tpmif_tx_request { unsigned long addr; /* Machine address of packet. */ grant_ref_t ref; /* grant table access reference */ uint16_t unused; uint16_t size; /* Packet size in bytes. */ }; typedef struct tpmif_tx_request tpmif_tx_request_t; /* * The TPMIF_TX_RING_SIZE defines the number of pages the * front-end and backend can exchange (= size of array). */ typedef uint32_t TPMIF_RING_IDX; #define TPMIF_TX_RING_SIZE 1 /* This structure must fit in a memory page. */ struct tpmif_ring { struct tpmif_tx_request req; }; typedef struct tpmif_ring tpmif_ring_t; struct tpmif_tx_interface { struct tpmif_ring ring[TPMIF_TX_RING_SIZE]; }; typedef struct tpmif_tx_interface tpmif_tx_interface_t; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/io/xenbus.h000066400000000000000000000044231314037446600261410ustar00rootroot00000000000000/***************************************************************************** * xenbus.h * * Xenbus protocol details. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 XenSource Ltd. */ #ifndef _XEN_PUBLIC_IO_XENBUS_H #define _XEN_PUBLIC_IO_XENBUS_H /* * The state of either end of the Xenbus, i.e. the current communication * status of initialisation across the bus. States here imply nothing about * the state of the connection between the driver and the kernel's device * layers. */ enum xenbus_state { XenbusStateUnknown = 0, XenbusStateInitialising = 1, /* * InitWait: Finished early initialisation but waiting for information * from the peer or hotplug scripts. */ XenbusStateInitWait = 2, /* * Initialised: Waiting for a connection from the peer. */ XenbusStateInitialised = 3, XenbusStateConnected = 4, /* * Closing: The device is being closed due to an error or an unplug event. */ XenbusStateClosing = 5, XenbusStateClosed = 6 }; typedef enum xenbus_state XenbusState; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/io/xs_wire.h000066400000000000000000000063141314037446600263160ustar00rootroot00000000000000/* * Details of the "wire" protocol between Xen Store Daemon and client * library or guest kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Rusty Russell IBM Corporation */ #ifndef _XS_WIRE_H #define _XS_WIRE_H enum xsd_sockmsg_type { XS_DEBUG, XS_DIRECTORY, XS_READ, XS_GET_PERMS, XS_WATCH, XS_UNWATCH, XS_TRANSACTION_START, XS_TRANSACTION_END, XS_INTRODUCE, XS_RELEASE, XS_GET_DOMAIN_PATH, XS_WRITE, XS_MKDIR, XS_RM, XS_SET_PERMS, XS_WATCH_EVENT, XS_ERROR, XS_IS_DOMAIN_INTRODUCED, XS_RESUME }; #define XS_WRITE_NONE "NONE" #define XS_WRITE_CREATE "CREATE" #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" /* We hand errors as strings, for portability. */ struct xsd_errors { int errnum; const char *errstring; }; #define XSD_ERROR(x) { x, #x } static struct xsd_errors xsd_errors[] __attribute__((unused)) = { XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), XSD_ERROR(EISDIR), XSD_ERROR(ENOENT), XSD_ERROR(ENOMEM), XSD_ERROR(ENOSPC), XSD_ERROR(EIO), XSD_ERROR(ENOTEMPTY), XSD_ERROR(ENOSYS), XSD_ERROR(EROFS), XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN) }; struct xsd_sockmsg { uint32_t type; /* XS_??? */ uint32_t req_id;/* Request identifier, echoed in daemon's response. */ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ uint32_t len; /* Length of data following this. */ /* Generally followed by nul-terminated string(s). */ }; enum xs_watch_type { XS_WATCH_PATH = 0, XS_WATCH_TOKEN }; /* Inter-domain shared memory communications. */ #define XENSTORE_RING_SIZE 1024 typedef uint32_t XENSTORE_RING_IDX; #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) struct xenstore_domain_interface { char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ XENSTORE_RING_IDX req_cons, req_prod; XENSTORE_RING_IDX rsp_cons, rsp_prod; }; #endif /* _XS_WIRE_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/kexec.h000066400000000000000000000107261314037446600253300ustar00rootroot00000000000000/****************************************************************************** * kexec.h - Public portion * * Xen port written by: * - Simon 'Horms' Horman * - Magnus Damm */ #ifndef _XEN_PUBLIC_KEXEC_H #define _XEN_PUBLIC_KEXEC_H /* This file describes the Kexec / Kdump hypercall interface for Xen. * * Kexec under vanilla Linux allows a user to reboot the physical machine * into a new user-specified kernel. The Xen port extends this idea * to allow rebooting of the machine from dom0. When kexec for dom0 * is used to reboot, both the hypervisor and the domains get replaced * with some other kernel. It is possible to kexec between vanilla * Linux and Xen and back again. Xen to Xen works well too. * * The hypercall interface for kexec can be divided into three main * types of hypercall operations: * * 1) Range information: * This is used by the dom0 kernel to ask the hypervisor about various * address information. This information is needed to allow kexec-tools * to fill in the ELF headers for /proc/vmcore properly. * * 2) Load and unload of images: * There are no big surprises here, the kexec binary from kexec-tools * runs in userspace in dom0. The tool loads/unloads data into the * dom0 kernel such as new kernel, initramfs and hypervisor. When * loaded the dom0 kernel performs a load hypercall operation, and * before releasing all page references the dom0 kernel calls unload. * * 3) Kexec operation: * This is used to start a previously loaded kernel. */ #include "xen.h" #if defined(__i386__) || defined(__x86_64__) #define KEXEC_XEN_NO_PAGES 17 #endif /* * Prototype for this hypercall is: * int kexec_op(int cmd, void *args) * @cmd == KEXEC_CMD_... * KEXEC operation to perform * @args == Operation-specific extra arguments (NULL if none). */ /* * Kexec supports two types of operation: * - kexec into a regular kernel, very similar to a standard reboot * - KEXEC_TYPE_DEFAULT is used to specify this type * - kexec into a special "crash kernel", aka kexec-on-panic * - KEXEC_TYPE_CRASH is used to specify this type * - parts of our system may be broken at kexec-on-panic time * - the code should be kept as simple and self-contained as possible */ #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 /* The kexec implementation for Xen allows the user to load two * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH. * All data needed for a kexec reboot is kept in one xen_kexec_image_t * per "instance". The data mainly consists of machine address lists to pages * together with destination addresses. The data in xen_kexec_image_t * is passed to the "code page" which is one page of code that performs * the final relocations before jumping to the new kernel. */ typedef struct xen_kexec_image { #if defined(__i386__) || defined(__x86_64__) unsigned long page_list[KEXEC_XEN_NO_PAGES]; #endif unsigned long indirection_page; unsigned long start_address; } xen_kexec_image_t; /* * Perform kexec having previously loaded a kexec or kdump kernel * as appropriate. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] */ #define KEXEC_CMD_kexec 0 typedef struct xen_kexec_exec { int type; } xen_kexec_exec_t; /* * Load/Unload kernel image for kexec or kdump. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] * image == relocation information for kexec (ignored for unload) [in] */ #define KEXEC_CMD_kexec_load 1 #define KEXEC_CMD_kexec_unload 2 typedef struct xen_kexec_load { int type; xen_kexec_image_t image; } xen_kexec_load_t; #define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */ #define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */ #define KEXEC_RANGE_MA_CPU 2 /* machine address and size of a CPU note */ /* * Find the address and size of certain memory areas * range == KEXEC_RANGE_... [in] * nr == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in] * size == number of bytes reserved in window [out] * start == address of the first byte in the window [out] */ #define KEXEC_CMD_kexec_get_range 3 typedef struct xen_kexec_range { int range; int nr; unsigned long size; unsigned long start; } xen_kexec_range_t; #endif /* _XEN_PUBLIC_KEXEC_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/libelf.h000066400000000000000000000151131314037446600254610ustar00rootroot00000000000000#ifndef __XC_LIBELF__ #define __XC_LIBELF__ 1 #if defined(__i386__) || defined(__x86_64) || defined(__ia64__) #define XEN_ELF_LITTLE_ENDIAN #elif defined(__powerpc__) #define XEN_ELF_BIG_ENDIAN #else #error define architectural endianness #endif #undef ELFSIZE #include "elfnote.h" #include "elfstructs.h" #include "features.h" /* ------------------------------------------------------------------------ */ typedef union { Elf32_Ehdr e32; Elf64_Ehdr e64; } elf_ehdr; typedef union { Elf32_Phdr e32; Elf64_Phdr e64; } elf_phdr; typedef union { Elf32_Shdr e32; Elf64_Shdr e64; } elf_shdr; typedef union { Elf32_Sym e32; Elf64_Sym e64; } elf_sym; typedef union { Elf32_Rel e32; Elf64_Rel e64; } elf_rel; typedef union { Elf32_Rela e32; Elf64_Rela e64; } elf_rela; typedef union { Elf32_Note e32; Elf64_Note e64; } elf_note; struct elf_binary { /* elf binary */ const char *image; size_t size; char class; char data; const elf_ehdr *ehdr; const char *sec_strtab; const elf_shdr *sym_tab; const char *sym_strtab; /* loaded to */ char *dest; uint64_t pstart; uint64_t pend; uint64_t reloc_offset; #ifndef __XEN__ /* misc */ FILE *log; #endif int verbose; }; /* ------------------------------------------------------------------------ */ /* accessing elf header fields */ #ifdef XEN_ELF_BIG_ENDIAN # define NATIVE_ELFDATA ELFDATA2MSB #else # define NATIVE_ELFDATA ELFDATA2LSB #endif #define elf_32bit(elf) (ELFCLASS32 == (elf)->class) #define elf_64bit(elf) (ELFCLASS64 == (elf)->class) #define elf_msb(elf) (ELFDATA2MSB == (elf)->data) #define elf_lsb(elf) (ELFDATA2LSB == (elf)->data) #define elf_swap(elf) (NATIVE_ELFDATA != (elf)->data) #define elf_uval(elf, str, elem) \ ((ELFCLASS64 == (elf)->class) \ ? elf_access_unsigned((elf), (str), \ offsetof(typeof(*(str)),e64.elem), \ sizeof((str)->e64.elem)) \ : elf_access_unsigned((elf), (str), \ offsetof(typeof(*(str)),e32.elem), \ sizeof((str)->e32.elem))) #define elf_sval(elf, str, elem) \ ((ELFCLASS64 == (elf)->class) \ ? elf_access_signed((elf), (str), \ offsetof(typeof(*(str)),e64.elem), \ sizeof((str)->e64.elem)) \ : elf_access_signed((elf), (str), \ offsetof(typeof(*(str)),e32.elem), \ sizeof((str)->e32.elem))) #define elf_size(elf, str) \ ((ELFCLASS64 == (elf)->class) \ ? sizeof((str)->e64) \ : sizeof((str)->e32)) uint64_t elf_access_unsigned(struct elf_binary *elf, const void *ptr, uint64_t offset, size_t size); int64_t elf_access_signed(struct elf_binary *elf, const void *ptr, uint64_t offset, size_t size); uint64_t elf_round_up(struct elf_binary *elf, uint64_t addr); /* ------------------------------------------------------------------------ */ /* xc_libelf_tools.c */ int elf_shdr_count(struct elf_binary *elf); int elf_phdr_count(struct elf_binary *elf); const elf_shdr *elf_shdr_by_name(struct elf_binary *elf, const char *name); const elf_shdr *elf_shdr_by_index(struct elf_binary *elf, int index); const elf_phdr *elf_phdr_by_index(struct elf_binary *elf, int index); const char *elf_section_name(struct elf_binary *elf, const elf_shdr * shdr); const void *elf_section_start(struct elf_binary *elf, const elf_shdr * shdr); const void *elf_section_end(struct elf_binary *elf, const elf_shdr * shdr); const void *elf_segment_start(struct elf_binary *elf, const elf_phdr * phdr); const void *elf_segment_end(struct elf_binary *elf, const elf_phdr * phdr); const elf_sym *elf_sym_by_name(struct elf_binary *elf, const char *symbol); const elf_sym *elf_sym_by_index(struct elf_binary *elf, int index); const char *elf_note_name(struct elf_binary *elf, const elf_note * note); const void *elf_note_desc(struct elf_binary *elf, const elf_note * note); uint64_t elf_note_numeric(struct elf_binary *elf, const elf_note * note); const elf_note *elf_note_next(struct elf_binary *elf, const elf_note * note); int elf_is_elfbinary(const void *image); int elf_phdr_is_loadable(struct elf_binary *elf, const elf_phdr * phdr); /* ------------------------------------------------------------------------ */ /* xc_libelf_loader.c */ int elf_init(struct elf_binary *elf, const char *image, size_t size); #ifdef __XEN__ void elf_set_verbose(struct elf_binary *elf); #else void elf_set_logfile(struct elf_binary *elf, FILE * log, int verbose); #endif void elf_parse_binary(struct elf_binary *elf); void elf_load_binary(struct elf_binary *elf); void *elf_get_ptr(struct elf_binary *elf, unsigned long addr); uint64_t elf_lookup_addr(struct elf_binary *elf, const char *symbol); /* ------------------------------------------------------------------------ */ /* xc_libelf_relocate.c */ int elf_reloc(struct elf_binary *elf); /* ------------------------------------------------------------------------ */ /* xc_libelf_dominfo.c */ #define UNSET_ADDR ((uint64_t)-1) enum xen_elfnote_type { XEN_ENT_NONE = 0, XEN_ENT_LONG = 1, XEN_ENT_STR = 2 }; struct xen_elfnote { enum xen_elfnote_type type; const char *name; union { const char *str; uint64_t num; } data; }; struct elf_dom_parms { /* raw */ const char *guest_info; const void *elf_note_start; const void *elf_note_end; struct xen_elfnote elf_notes[XEN_ELFNOTE_MAX + 1]; /* parsed */ char guest_os[16]; char guest_ver[16]; char xen_ver[16]; char loader[16]; int pae; int bsd_symtab; uint64_t virt_base; uint64_t virt_entry; uint64_t virt_hypercall; uint64_t virt_hv_start_low; uint64_t elf_paddr_offset; uint32_t f_supported[XENFEAT_NR_SUBMAPS]; uint32_t f_required[XENFEAT_NR_SUBMAPS]; /* calculated */ uint64_t virt_offset; uint64_t virt_kstart; uint64_t virt_kend; }; static inline void elf_xen_feature_set(int nr, uint32_t * addr) { addr[nr >> 5] |= 1 << (nr & 31); } static inline int elf_xen_feature_get(int nr, uint32_t * addr) { return !!(addr[nr >> 5] & (1 << (nr & 31))); } int elf_xen_parse_features(const char *features, uint32_t *supported, uint32_t *required); int elf_xen_parse_note(struct elf_binary *elf, struct elf_dom_parms *parms, const elf_note *note); int elf_xen_parse_guest_info(struct elf_binary *elf, struct elf_dom_parms *parms); int elf_xen_parse(struct elf_binary *elf, struct elf_dom_parms *parms); #endif /* __XC_LIBELF__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/memory.h000066400000000000000000000230441314037446600255360ustar00rootroot00000000000000/****************************************************************************** * memory.h * * Memory reservation and information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_MEMORY_H__ #define __XEN_PUBLIC_MEMORY_H__ /* * Increase or decrease the specified domain's memory reservation. Returns the * number of extents successfully allocated or freed. * arg == addr of struct xen_memory_reservation. */ #define XENMEM_increase_reservation 0 #define XENMEM_decrease_reservation 1 #define XENMEM_populate_physmap 6 struct xen_memory_reservation { /* * XENMEM_increase_reservation: * OUT: MFN (*not* GMFN) bases of extents that were allocated * XENMEM_decrease_reservation: * IN: GMFN bases of extents to free * XENMEM_populate_physmap: * IN: GPFN bases of extents to populate with memory * OUT: GMFN bases of extents that were allocated * (NB. This command also updates the mach_to_phys translation table) */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* Number of extents, and size/alignment of each (2^extent_order pages). */ xen_ulong_t nr_extents; unsigned int extent_order; /* * Maximum # bits addressable by the user of the allocated region (e.g., * I/O devices often have a 32-bit limitation even in 64-bit systems). If * zero then the user has no addressing restriction. * This field is not used by XENMEM_decrease_reservation. */ unsigned int address_bits; /* * Domain whose reservation is being changed. * Unprivileged domains can specify only DOMID_SELF. */ domid_t domid; }; typedef struct xen_memory_reservation xen_memory_reservation_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); /* * An atomic exchange of memory pages. If return code is zero then * @out.extent_list provides GMFNs of the newly-allocated memory. * Returns zero on complete success, otherwise a negative error code. * On complete success then always @nr_exchanged == @in.nr_extents. * On partial success @nr_exchanged indicates how much work was done. */ #define XENMEM_exchange 11 struct xen_memory_exchange { /* * [IN] Details of memory extents to be exchanged (GMFN bases). * Note that @in.address_bits is ignored and unused. */ struct xen_memory_reservation in; /* * [IN/OUT] Details of new memory extents. * We require that: * 1. @in.domid == @out.domid * 2. @in.nr_extents << @in.extent_order == * @out.nr_extents << @out.extent_order * 3. @in.extent_start and @out.extent_start lists must not overlap * 4. @out.extent_start lists GPFN bases to be populated * 5. @out.extent_start is overwritten with allocated GMFN bases */ struct xen_memory_reservation out; /* * [OUT] Number of input extents that were successfully exchanged: * 1. The first @nr_exchanged input extents were successfully * deallocated. * 2. The corresponding first entries in the output extent list correctly * indicate the GMFNs that were successfully exchanged. * 3. All other input and output extents are untouched. * 4. If not all input exents are exchanged then the return code of this * command will be non-zero. * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! */ xen_ulong_t nr_exchanged; }; typedef struct xen_memory_exchange xen_memory_exchange_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); /* * Returns the maximum machine frame number of mapped RAM in this system. * This command always succeeds (it never returns an error code). * arg == NULL. */ #define XENMEM_maximum_ram_page 2 /* * Returns the current or maximum memory reservation, in pages, of the * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. * arg == addr of domid_t. */ #define XENMEM_current_reservation 3 #define XENMEM_maximum_reservation 4 /* * Returns the maximum GPFN in use by the guest, or -ve errcode on failure. */ #define XENMEM_maximum_gpfn 14 /* * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys * mapping table. Architectures which do not have a m2p table do not implement * this command. * arg == addr of xen_machphys_mfn_list_t. */ #define XENMEM_machphys_mfn_list 5 struct xen_machphys_mfn_list { /* * Size of the 'extent_start' array. Fewer entries will be filled if the * machphys table is smaller than max_extents * 2MB. */ unsigned int max_extents; /* * Pointer to buffer to fill with list of extent starts. If there are * any large discontiguities in the machine address space, 2MB gaps in * the machphys table will be represented by an MFN base of zero. */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* * Number of extents written to the above array. This will be smaller * than 'max_extents' if the machphys table is smaller than max_e * 2MB. */ unsigned int nr_extents; }; typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); /* * Returns the location in virtual address space of the machine_to_phys * mapping table. Architectures which do not have a m2p table, or which do not * map it by default into guest address space, do not implement this command. * arg == addr of xen_machphys_mapping_t. */ #define XENMEM_machphys_mapping 12 struct xen_machphys_mapping { xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ }; typedef struct xen_machphys_mapping xen_machphys_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); /* * Sets the GPFN at which a particular page appears in the specified guest's * pseudophysical address space. * arg == addr of xen_add_to_physmap_t. */ #define XENMEM_add_to_physmap 7 struct xen_add_to_physmap { /* Which domain to change the mapping for. */ domid_t domid; /* Source mapping space. */ #define XENMAPSPACE_shared_info 0 /* shared info page */ #define XENMAPSPACE_grant_table 1 /* grant table page */ unsigned int space; /* Index into source mapping space. */ xen_ulong_t idx; /* GPFN where the source mapping page should appear. */ xen_pfn_t gpfn; }; typedef struct xen_add_to_physmap xen_add_to_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); /* * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error * code on failure. This call only works for auto-translated guests. */ #define XENMEM_translate_gpfn_list 8 struct xen_translate_gpfn_list { /* Which domain to translate for? */ domid_t domid; /* Length of list. */ xen_ulong_t nr_gpfns; /* List of GPFNs to translate. */ XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list; /* * Output list to contain MFN translations. May be the same as the input * list (in which case each input GPFN is overwritten with the output MFN). */ XEN_GUEST_HANDLE(xen_pfn_t) mfn_list; }; typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t); /* * Returns the pseudo-physical memory map as it was when the domain * was started (specified by XENMEM_set_memory_map). * arg == addr of xen_memory_map_t. */ #define XENMEM_memory_map 9 struct xen_memory_map { /* * On call the number of entries which can be stored in buffer. On * return the number of entries which have been stored in * buffer. */ unsigned int nr_entries; /* * Entries in the buffer are in the same format as returned by the * BIOS INT 0x15 EAX=0xE820 call. */ XEN_GUEST_HANDLE(void) buffer; }; typedef struct xen_memory_map xen_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); /* * Returns the real physical memory map. Passes the same structure as * XENMEM_memory_map. * arg == addr of xen_memory_map_t. */ #define XENMEM_machine_memory_map 10 /* * Set the pseudo-physical memory map of a domain, as returned by * XENMEM_memory_map. * arg == addr of xen_foreign_memory_map_t. */ #define XENMEM_set_memory_map 13 struct xen_foreign_memory_map { domid_t domid; struct xen_memory_map map; }; typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); #endif /* __XEN_PUBLIC_MEMORY_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/nmi.h000066400000000000000000000052641314037446600250150ustar00rootroot00000000000000/****************************************************************************** * nmi.h * * NMI callback registration and reason codes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_NMI_H__ #define __XEN_PUBLIC_NMI_H__ /* * NMI reason codes: * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. */ /* I/O-check error reported via ISA port 0x61, bit 6. */ #define _XEN_NMIREASON_io_error 0 #define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) /* Parity error reported via ISA port 0x61, bit 7. */ #define _XEN_NMIREASON_parity_error 1 #define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error) /* Unknown hardware-generated NMI. */ #define _XEN_NMIREASON_unknown 2 #define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) /* * long nmi_op(unsigned int cmd, void *arg) * NB. All ops return zero on success, else a negative error code. */ /* * Register NMI callback for this (calling) VCPU. Currently this only makes * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. * arg == pointer to xennmi_callback structure. */ #define XENNMI_register_callback 0 struct xennmi_callback { unsigned long handler_address; unsigned long pad; }; typedef struct xennmi_callback xennmi_callback_t; DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t); /* * Deregister NMI callback for this (calling) VCPU. * arg == NULL. */ #define XENNMI_unregister_callback 1 #endif /* __XEN_PUBLIC_NMI_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/physdev.h000066400000000000000000000125431314037446600257120ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_PHYSDEV_H__ #define __XEN_PUBLIC_PHYSDEV_H__ /* * Prototype for this hypercall is: * int physdev_op(int cmd, void *args) * @cmd == PHYSDEVOP_??? (physdev operation). * @args == Operation-specific extra arguments (NULL if none). */ /* * Notify end-of-interrupt (EOI) for the specified IRQ. * @arg == pointer to physdev_eoi structure. */ #define PHYSDEVOP_eoi 12 struct physdev_eoi { /* IN */ uint32_t irq; }; typedef struct physdev_eoi physdev_eoi_t; DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t); /* * Query the status of an IRQ line. * @arg == pointer to physdev_irq_status_query structure. */ #define PHYSDEVOP_irq_status_query 5 struct physdev_irq_status_query { /* IN */ uint32_t irq; /* OUT */ uint32_t flags; /* XENIRQSTAT_* */ }; typedef struct physdev_irq_status_query physdev_irq_status_query_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t); /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ #define _XENIRQSTAT_needs_eoi (0) #define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) /* IRQ shared by multiple guests? */ #define _XENIRQSTAT_shared (1) #define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) /* * Set the current VCPU's I/O privilege level. * @arg == pointer to physdev_set_iopl structure. */ #define PHYSDEVOP_set_iopl 6 struct physdev_set_iopl { /* IN */ uint32_t iopl; }; typedef struct physdev_set_iopl physdev_set_iopl_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t); /* * Set the current VCPU's I/O-port permissions bitmap. * @arg == pointer to physdev_set_iobitmap structure. */ #define PHYSDEVOP_set_iobitmap 7 struct physdev_set_iobitmap { /* IN */ XEN_GUEST_HANDLE_00030205(uint8_t) bitmap; uint32_t nr_ports; }; typedef struct physdev_set_iobitmap physdev_set_iobitmap_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t); /* * Read or write an IO-APIC register. * @arg == pointer to physdev_apic structure. */ #define PHYSDEVOP_apic_read 8 #define PHYSDEVOP_apic_write 9 struct physdev_apic { /* IN */ unsigned long apic_physbase; uint32_t reg; /* IN or OUT */ uint32_t value; }; typedef struct physdev_apic physdev_apic_t; DEFINE_XEN_GUEST_HANDLE(physdev_apic_t); /* * Allocate or free a physical upcall vector for the specified IRQ line. * @arg == pointer to physdev_irq structure. */ #define PHYSDEVOP_alloc_irq_vector 10 #define PHYSDEVOP_free_irq_vector 11 struct physdev_irq { /* IN */ uint32_t irq; /* IN or OUT */ uint32_t vector; }; typedef struct physdev_irq physdev_irq_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_t); /* * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() * hypercall since 0x00030202. */ struct physdev_op { uint32_t cmd; union { struct physdev_irq_status_query irq_status_query; struct physdev_set_iopl set_iopl; struct physdev_set_iobitmap set_iobitmap; struct physdev_apic apic_op; struct physdev_irq irq_op; } u; }; typedef struct physdev_op physdev_op_t; DEFINE_XEN_GUEST_HANDLE(physdev_op_t); /* * Notify that some PIRQ-bound event channels have been unmasked. * ** This command is obsolete since interface version 0x00030202 and is ** * ** unsupported by newer versions of Xen. ** */ #define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 /* * These all-capitals physdev operation names are superceded by the new names * (defined above) since interface version 0x00030202. */ #define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query #define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl #define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap #define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read #define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write #define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector #define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi #define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared #endif /* __XEN_PUBLIC_PHYSDEV_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/platform.h000066400000000000000000000114401314037446600260470ustar00rootroot00000000000000/****************************************************************************** * platform.h * * Hardware platform operations. Intended for use by domain-0 kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_PLATFORM_H__ #define __XEN_PUBLIC_PLATFORM_H__ #include "xen.h" #define XENPF_INTERFACE_VERSION 0x03000001 /* * Set clock such that it would read after 00:00:00 UTC, * 1 January, 1970 if the current system time was . */ #define XENPF_settime 17 struct xenpf_settime { /* IN variables. */ uint32_t secs; uint32_t nsecs; uint64_t system_time; }; typedef struct xenpf_settime xenpf_settime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t); /* * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type. * On x86, @type is an architecture-defined MTRR memory type. * On success, returns the MTRR that was used (@reg) and a handle that can * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting. * (x86-specific). */ #define XENPF_add_memtype 31 struct xenpf_add_memtype { /* IN variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; /* OUT variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_add_memtype xenpf_add_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t); /* * Tear down an existing memory-range type. If @handle is remembered then it * should be passed in to accurately tear down the correct setting (in case * of overlapping memory regions with differing types). If it is not known * then @handle should be set to zero. In all cases @reg must be set. * (x86-specific). */ #define XENPF_del_memtype 32 struct xenpf_del_memtype { /* IN variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_del_memtype xenpf_del_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t); /* Read current type of an MTRR (x86-specific). */ #define XENPF_read_memtype 33 struct xenpf_read_memtype { /* IN variables. */ uint32_t reg; /* OUT variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; }; typedef struct xenpf_read_memtype xenpf_read_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t); #define XENPF_microcode_update 35 struct xenpf_microcode_update { /* IN variables. */ XEN_GUEST_HANDLE(void) data; /* Pointer to microcode data */ uint32_t length; /* Length of microcode data. */ }; typedef struct xenpf_microcode_update xenpf_microcode_update_t; DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t); #define XENPF_platform_quirk 39 #define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */ #define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */ #define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */ struct xenpf_platform_quirk { /* IN variables. */ uint32_t quirk_id; }; typedef struct xenpf_platform_quirk xenpf_platform_quirk_t; DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t); struct xen_platform_op { uint32_t cmd; uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ union { struct xenpf_settime settime; struct xenpf_add_memtype add_memtype; struct xenpf_del_memtype del_memtype; struct xenpf_read_memtype read_memtype; struct xenpf_microcode_update microcode; struct xenpf_platform_quirk platform_quirk; uint8_t pad[128]; } u; }; typedef struct xen_platform_op xen_platform_op_t; DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t); #endif /* __XEN_PUBLIC_PLATFORM_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/sched.h000066400000000000000000000104321314037446600253110ustar00rootroot00000000000000/****************************************************************************** * sched.h * * Scheduler state interactions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_SCHED_H__ #define __XEN_PUBLIC_SCHED_H__ #include "event_channel.h" /* * The prototype for this hypercall is: * long sched_op(int cmd, void *arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == Operation-specific extra argument(s), as described below. * * Versions of Xen prior to 3.0.2 provided only the following legacy version * of this hypercall, supporting only the commands yield, block and shutdown: * long sched_op(int cmd, unsigned long arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) * == SHUTDOWN_* code (SCHEDOP_shutdown) * This legacy version is available to new guests as sched_op_compat(). */ /* * Voluntarily yield the CPU. * @arg == NULL. */ #define SCHEDOP_yield 0 /* * Block execution of this VCPU until an event is received for processing. * If called with event upcalls masked, this operation will atomically * reenable event delivery and check for pending events before blocking the * VCPU. This avoids a "wakeup waiting" race. * @arg == NULL. */ #define SCHEDOP_block 1 /* * Halt execution of this domain (all VCPUs) and notify the system controller. * @arg == pointer to sched_shutdown structure. */ #define SCHEDOP_shutdown 2 struct sched_shutdown { unsigned int reason; /* SHUTDOWN_* */ }; typedef struct sched_shutdown sched_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t); /* * Poll a set of event-channel ports. Return when one or more are pending. An * optional timeout may be specified. * @arg == pointer to sched_poll structure. */ #define SCHEDOP_poll 3 struct sched_poll { XEN_GUEST_HANDLE(evtchn_port_t) ports; unsigned int nr_ports; uint64_t timeout; }; typedef struct sched_poll sched_poll_t; DEFINE_XEN_GUEST_HANDLE(sched_poll_t); /* * Declare a shutdown for another domain. The main use of this function is * in interpreting shutdown requests and reasons for fully-virtualized * domains. A para-virtualized domain may use SCHEDOP_shutdown directly. * @arg == pointer to sched_remote_shutdown structure. */ #define SCHEDOP_remote_shutdown 4 struct sched_remote_shutdown { domid_t domain_id; /* Remote domain ID */ unsigned int reason; /* SHUTDOWN_xxx reason */ }; typedef struct sched_remote_shutdown sched_remote_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t); /* * Reason codes for SCHEDOP_shutdown. These may be interpreted by control * software to determine the appropriate action. For the most part, Xen does * not care about the shutdown code. */ #define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #endif /* __XEN_PUBLIC_SCHED_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/sysctl.h000066400000000000000000000142541314037446600255520ustar00rootroot00000000000000/****************************************************************************** * sysctl.h * * System management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_SYSCTL_H__ #define __XEN_PUBLIC_SYSCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "sysctl operations are intended for use by node control tools only" #endif #include "xen.h" #include "domctl.h" #define XEN_SYSCTL_INTERFACE_VERSION 0x00000003 /* * Read console content from Xen buffer ring. */ #define XEN_SYSCTL_readconsole 1 struct xen_sysctl_readconsole { /* IN variables. */ uint32_t clear; /* Non-zero -> clear after reading. */ XEN_GUEST_HANDLE_64(char) buffer; /* Buffer start */ /* IN/OUT variables. */ uint32_t count; /* In: Buffer size; Out: Used buffer size */ }; typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t); /* Get trace buffers machine base address */ #define XEN_SYSCTL_tbuf_op 2 struct xen_sysctl_tbuf_op { /* IN variables */ #define XEN_SYSCTL_TBUFOP_get_info 0 #define XEN_SYSCTL_TBUFOP_set_cpu_mask 1 #define XEN_SYSCTL_TBUFOP_set_evt_mask 2 #define XEN_SYSCTL_TBUFOP_set_size 3 #define XEN_SYSCTL_TBUFOP_enable 4 #define XEN_SYSCTL_TBUFOP_disable 5 uint32_t cmd; /* IN/OUT variables */ struct xenctl_cpumap cpu_mask; uint32_t evt_mask; /* OUT variables */ uint64_aligned_t buffer_mfn; uint32_t size; }; typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t); /* * Get physical information about the host machine */ #define XEN_SYSCTL_physinfo 3 struct xen_sysctl_physinfo { uint32_t threads_per_core; uint32_t cores_per_socket; uint32_t sockets_per_node; uint32_t nr_nodes; uint32_t cpu_khz; uint64_aligned_t total_pages; uint64_aligned_t free_pages; uint64_aligned_t scrub_pages; uint32_t hw_cap[8]; }; typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t); /* * Get the ID of the current scheduler. */ #define XEN_SYSCTL_sched_id 4 struct xen_sysctl_sched_id { /* OUT variable */ uint32_t sched_id; }; typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t); /* Interface for controlling Xen software performance counters. */ #define XEN_SYSCTL_perfc_op 5 /* Sub-operations: */ #define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */ #define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */ struct xen_sysctl_perfc_desc { char name[80]; /* name of perf counter */ uint32_t nr_vals; /* number of values for this counter */ }; typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t); typedef uint32_t xen_sysctl_perfc_val_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t); struct xen_sysctl_perfc_op { /* IN variables. */ uint32_t cmd; /* XEN_SYSCTL_PERFCOP_??? */ /* OUT variables. */ uint32_t nr_counters; /* number of counters description */ uint32_t nr_vals; /* number of values */ /* counter information (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc; /* counter values (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val; }; typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t); #define XEN_SYSCTL_getdomaininfolist 6 struct xen_sysctl_getdomaininfolist { /* IN variables. */ domid_t first_domain; uint32_t max_domains; XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer; /* OUT variables. */ uint32_t num_domains; }; typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t); /* * Inject debug keys into Xen. */ #define XEN_SYSCTL_debug_keys 7 struct xen_sysctl_debug_keys { /* IN variables. */ XEN_GUEST_HANDLE_64(char) keys; uint32_t nr_keys; }; typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t); struct xen_sysctl { uint32_t cmd; uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */ union { struct xen_sysctl_readconsole readconsole; struct xen_sysctl_tbuf_op tbuf_op; struct xen_sysctl_physinfo physinfo; struct xen_sysctl_sched_id sched_id; struct xen_sysctl_perfc_op perfc_op; struct xen_sysctl_getdomaininfolist getdomaininfolist; struct xen_sysctl_debug_keys debug_keys; uint8_t pad[128]; } u; }; typedef struct xen_sysctl xen_sysctl_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t); #endif /* __XEN_PUBLIC_SYSCTL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/trace.h000066400000000000000000000116431314037446600253260ustar00rootroot00000000000000/****************************************************************************** * include/public/trace.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Mark Williamson, (C) 2004 Intel Research Cambridge * Copyright (C) 2005 Bin Ren */ #ifndef __XEN_PUBLIC_TRACE_H__ #define __XEN_PUBLIC_TRACE_H__ /* Trace classes */ #define TRC_CLS_SHIFT 16 #define TRC_GEN 0x0001f000 /* General trace */ #define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */ #define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */ #define TRC_HVM 0x0008f000 /* Xen HVM trace */ #define TRC_MEM 0x0010f000 /* Xen memory trace */ #define TRC_ALL 0xfffff000 /* Trace subclasses */ #define TRC_SUBCLS_SHIFT 12 /* trace subclasses for SVM */ #define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */ #define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */ /* Trace events per class */ #define TRC_LOST_RECORDS (TRC_GEN + 1) #define TRC_SCHED_DOM_ADD (TRC_SCHED + 1) #define TRC_SCHED_DOM_REM (TRC_SCHED + 2) #define TRC_SCHED_SLEEP (TRC_SCHED + 3) #define TRC_SCHED_WAKE (TRC_SCHED + 4) #define TRC_SCHED_YIELD (TRC_SCHED + 5) #define TRC_SCHED_BLOCK (TRC_SCHED + 6) #define TRC_SCHED_SHUTDOWN (TRC_SCHED + 7) #define TRC_SCHED_CTL (TRC_SCHED + 8) #define TRC_SCHED_ADJDOM (TRC_SCHED + 9) #define TRC_SCHED_SWITCH (TRC_SCHED + 10) #define TRC_SCHED_S_TIMER_FN (TRC_SCHED + 11) #define TRC_SCHED_T_TIMER_FN (TRC_SCHED + 12) #define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED + 13) #define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED + 14) #define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED + 15) #define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1) #define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2) #define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3) /* trace events per subclass */ #define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01) #define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02) #define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01) #define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02) #define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03) #define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04) #define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05) #define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06) #define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07) #define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08) #define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09) #define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A) #define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B) #define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C) #define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D) #define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E) #define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F) #define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10) #define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11) #define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12) #define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13) #define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14) /* This structure represents a single trace buffer record. */ struct t_rec { uint64_t cycles; /* cycle counter timestamp */ uint32_t event; /* event ID */ unsigned long data[5]; /* event data items */ }; /* * This structure contains the metadata for a single trace buffer. The head * field, indexes into an array of struct t_rec's. */ struct t_buf { uint32_t cons; /* Next item to be consumed by control tools. */ uint32_t prod; /* Next item to be produced by Xen. */ /* 'nr_recs' records follow immediately after the meta-data header. */ }; #endif /* __XEN_PUBLIC_TRACE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/vcpu.h000066400000000000000000000165421314037446600252100ustar00rootroot00000000000000/****************************************************************************** * vcpu.h * * VCPU initialisation, query, and hotplug. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VCPU_H__ #define __XEN_PUBLIC_VCPU_H__ /* * Prototype for this hypercall is: * int vcpu_op(int cmd, int vcpuid, void *extra_args) * @cmd == VCPUOP_??? (VCPU operation). * @vcpuid == VCPU to operate on. * @extra_args == Operation-specific extra arguments (NULL if none). */ /* * Initialise a VCPU. Each VCPU can be initialised only once. A * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. * * @extra_arg == pointer to vcpu_guest_context structure containing initial * state for the VCPU. */ #define VCPUOP_initialise 0 /* * Bring up a VCPU. This makes the VCPU runnable. This operation will fail * if the VCPU has not been initialised (VCPUOP_initialise). */ #define VCPUOP_up 1 /* * Bring down a VCPU (i.e., make it non-runnable). * There are a few caveats that callers should observe: * 1. This operation may return, and VCPU_is_up may return false, before the * VCPU stops running (i.e., the command is asynchronous). It is a good * idea to ensure that the VCPU has entered a non-critical loop before * bringing it down. Alternatively, this operation is guaranteed * synchronous if invoked by the VCPU itself. * 2. After a VCPU is initialised, there is currently no way to drop all its * references to domain memory. Even a VCPU that is down still holds * memory references via its pagetable base pointer and GDT. It is good * practise to move a VCPU onto an 'idle' or default page table, LDT and * GDT before bringing it down. */ #define VCPUOP_down 2 /* Returns 1 if the given VCPU is up. */ #define VCPUOP_is_up 3 /* * Return information about the state and running time of a VCPU. * @extra_arg == pointer to vcpu_runstate_info structure. */ #define VCPUOP_get_runstate_info 4 struct vcpu_runstate_info { /* VCPU's current state (RUNSTATE_*). */ int state; /* When was current state entered (system time, ns)? */ uint64_t state_entry_time; /* * Time spent in each RUNSTATE_* (ns). The sum of these times is * guaranteed not to drift from system time. */ uint64_t time[4]; }; typedef struct vcpu_runstate_info vcpu_runstate_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t); /* VCPU is currently running on a physical CPU. */ #define RUNSTATE_running 0 /* VCPU is runnable, but not currently scheduled on any physical CPU. */ #define RUNSTATE_runnable 1 /* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ #define RUNSTATE_blocked 2 /* * VCPU is not runnable, but it is not blocked. * This is a 'catch all' state for things like hotplug and pauses by the * system administrator (or for critical sections in the hypervisor). * RUNSTATE_blocked dominates this state (it is the preferred state). */ #define RUNSTATE_offline 3 /* * Register a shared memory area from which the guest may obtain its own * runstate information without needing to execute a hypercall. * Notes: * 1. The registered address may be virtual or physical or guest handle, * depending on the platform. Virtual address or guest handle should be * registered on x86 systems. * 2. Only one shared area may be registered per VCPU. The shared area is * updated by the hypervisor each time the VCPU is scheduled. Thus * runstate.state will always be RUNSTATE_running and * runstate.state_entry_time will indicate the system time at which the * VCPU was last scheduled to run. * @extra_arg == pointer to vcpu_register_runstate_memory_area structure. */ #define VCPUOP_register_runstate_memory_area 5 struct vcpu_register_runstate_memory_area { union { XEN_GUEST_HANDLE(vcpu_runstate_info_t) h; struct vcpu_runstate_info *v; uint64_t p; } addr; }; typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t); /* * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer * which can be set via these commands. Periods smaller than one millisecond * may not be supported. */ #define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ #define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ struct vcpu_set_periodic_timer { uint64_t period_ns; }; typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t); /* * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot * timer which can be set via these commands. */ #define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */ struct vcpu_set_singleshot_timer { uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */ uint32_t flags; /* VCPU_SSHOTTMR_??? */ }; typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t); /* Flags to VCPUOP_set_singleshot_timer. */ /* Require the timeout to be in the future (return -ETIME if it's passed). */ #define _VCPU_SSHOTTMR_future (0) #define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future) /* * Register a memory location in the guest address space for the * vcpu_info structure. This allows the guest to place the vcpu_info * structure in a convenient place, such as in a per-cpu data area. * The pointer need not be page aligned, but the structure must not * cross a page boundary. * * If the specified mfn is INVALID_MFN, then it reverts to using the * vcpu_info structure in the shared_info page. */ #define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */ struct vcpu_register_vcpu_info { xen_pfn_t mfn; /* mfn of page to place vcpu_info */ uint32_t offset; /* offset within page */ }; typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t); #endif /* __XEN_PUBLIC_VCPU_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/version.h000066400000000000000000000057531314037446600257220ustar00rootroot00000000000000/****************************************************************************** * version.h * * Xen version, type, and compile information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Nguyen Anh Quynh * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VERSION_H__ #define __XEN_PUBLIC_VERSION_H__ /* NB. All ops return zero on success, except XENVER_{version,pagesize} */ /* arg == NULL; returns major:minor (16:16). */ #define XENVER_version 0 /* arg == xen_extraversion_t. */ #define XENVER_extraversion 1 typedef char xen_extraversion_t[16]; #define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t)) /* arg == xen_compile_info_t. */ #define XENVER_compile_info 2 struct xen_compile_info { char compiler[64]; char compile_by[16]; char compile_domain[32]; char compile_date[32]; }; typedef struct xen_compile_info xen_compile_info_t; #define XENVER_capabilities 3 typedef char xen_capabilities_info_t[1024]; #define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t)) #define XENVER_changeset 4 typedef char xen_changeset_info_t[64]; #define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t)) #define XENVER_platform_parameters 5 struct xen_platform_parameters { unsigned long virt_start; }; typedef struct xen_platform_parameters xen_platform_parameters_t; #define XENVER_get_features 6 struct xen_feature_info { unsigned int submap_idx; /* IN: which 32-bit submap to return */ uint32_t submap; /* OUT: 32-bit submap */ }; typedef struct xen_feature_info xen_feature_info_t; /* Declares the features reported by XENVER_get_features. */ #include "features.h" /* arg == NULL; returns host memory page size. */ #define XENVER_pagesize 7 /* arg == xen_domain_handle_t. */ #define XENVER_guest_handle 8 #endif /* __XEN_PUBLIC_VERSION_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/xen-compat.h000066400000000000000000000042031314037446600262750ustar00rootroot00000000000000/****************************************************************************** * xen-compat.h * * Guest OS interface to Xen. Compatibility layer. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Christian Limpach */ #ifndef __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_LATEST_INTERFACE_VERSION__ 0x00030205 #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Xen is built with matching headers and implements the latest interface. */ #define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__ #elif !defined(__XEN_INTERFACE_VERSION__) /* Guests which do not specify a version get the legacy interface. */ #define __XEN_INTERFACE_VERSION__ 0x00000000 #endif #if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__ #error "These header files do not support the requested interface version." #endif /* Fields defined as a Xen guest handle since 0x00030205. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 #define XEN_GUEST_HANDLE_00030205(type) XEN_GUEST_HANDLE(type) #else #define XEN_GUEST_HANDLE_00030205(type) type * #endif #endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/xen.h000066400000000000000000000566661314037446600250400ustar00rootroot00000000000000/****************************************************************************** * xen.h * * Guest OS interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_XEN_H__ #define __XEN_PUBLIC_XEN_H__ #include "xen-compat.h" #if defined(__i386__) || defined(__x86_64__) #include "arch-x86/xen.h" #elif defined(__ia64__) #include "arch-ia64.h" #elif defined(__powerpc__) #include "arch-powerpc.h" #else #error "Unsupported architecture" #endif /* * HYPERCALLS */ #define __HYPERVISOR_set_trap_table 0 #define __HYPERVISOR_mmu_update 1 #define __HYPERVISOR_set_gdt 2 #define __HYPERVISOR_stack_switch 3 #define __HYPERVISOR_set_callbacks 4 #define __HYPERVISOR_fpu_taskswitch 5 #define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */ #define __HYPERVISOR_platform_op 7 #define __HYPERVISOR_set_debugreg 8 #define __HYPERVISOR_get_debugreg 9 #define __HYPERVISOR_update_descriptor 10 #define __HYPERVISOR_memory_op 12 #define __HYPERVISOR_multicall 13 #define __HYPERVISOR_update_va_mapping 14 #define __HYPERVISOR_set_timer_op 15 #define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */ #define __HYPERVISOR_xen_version 17 #define __HYPERVISOR_console_io 18 #define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */ #define __HYPERVISOR_grant_table_op 20 #define __HYPERVISOR_vm_assist 21 #define __HYPERVISOR_update_va_mapping_otherdomain 22 #define __HYPERVISOR_iret 23 /* x86 only */ #define __HYPERVISOR_vcpu_op 24 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ #define __HYPERVISOR_mmuext_op 26 #define __HYPERVISOR_acm_op 27 #define __HYPERVISOR_nmi_op 28 #define __HYPERVISOR_sched_op 29 #define __HYPERVISOR_callback_op 30 #define __HYPERVISOR_xenoprof_op 31 #define __HYPERVISOR_event_channel_op 32 #define __HYPERVISOR_physdev_op 33 #define __HYPERVISOR_hvm_op 34 #define __HYPERVISOR_sysctl 35 #define __HYPERVISOR_domctl 36 #define __HYPERVISOR_kexec_op 37 /* Architecture-specific hypercall definitions. */ #define __HYPERVISOR_arch_0 48 #define __HYPERVISOR_arch_1 49 #define __HYPERVISOR_arch_2 50 #define __HYPERVISOR_arch_3 51 #define __HYPERVISOR_arch_4 52 #define __HYPERVISOR_arch_5 53 #define __HYPERVISOR_arch_6 54 #define __HYPERVISOR_arch_7 55 /* * HYPERCALL COMPATIBILITY. */ /* New sched_op hypercall introduced in 0x00030101. */ #if __XEN_INTERFACE_VERSION__ < 0x00030101 #undef __HYPERVISOR_sched_op #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat #endif /* New event-channel and physdev hypercalls introduced in 0x00030202. */ #if __XEN_INTERFACE_VERSION__ < 0x00030202 #undef __HYPERVISOR_event_channel_op #define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat #undef __HYPERVISOR_physdev_op #define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat #endif /* New platform_op hypercall introduced in 0x00030204. */ #if __XEN_INTERFACE_VERSION__ < 0x00030204 #define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op #endif /* * VIRTUAL INTERRUPTS * * Virtual interrupts that a guest OS may receive from Xen. * * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. * The latter can be allocated only once per guest: they must initially be * allocated to VCPU0 but can subsequently be re-bound. */ #define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ #define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ #define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ #define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ #define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ #define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ #define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ #define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ /* Architecture-specific VIRQ definitions. */ #define VIRQ_ARCH_0 16 #define VIRQ_ARCH_1 17 #define VIRQ_ARCH_2 18 #define VIRQ_ARCH_3 19 #define VIRQ_ARCH_4 20 #define VIRQ_ARCH_5 21 #define VIRQ_ARCH_6 22 #define VIRQ_ARCH_7 23 #define NR_VIRQS 24 /* * MMU-UPDATE REQUESTS * * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * ptr[1:0] specifies the appropriate MMU_* command. * * ptr[1:0] == MMU_NORMAL_PT_UPDATE: * Updates an entry in a page table. If updating an L1 table, and the new * table entry is valid/present, the mapped frame must belong to the FD, if * an FD has been specified. If attempting to map an I/O page then the * caller assumes the privilege of the FD. * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. * FD == DOMID_XEN: Map restricted areas of Xen's heap space. * ptr[:2] -- Machine address of the page-table entry to modify. * val -- Value to write. * * ptr[1:0] == MMU_MACHPHYS_UPDATE: * Updates an entry in the machine->pseudo-physical mapping table. * ptr[:2] -- Machine address within the frame whose mapping to modify. * The frame must belong to the FD, if one is specified. * val -- Value to write into the mapping entry. */ #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ /* * MMU EXTENDED OPERATIONS * * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * * cmd: MMUEXT_(UN)PIN_*_TABLE * mfn: Machine frame number to be (un)pinned as a p.t. page. * The frame must belong to the FD, if one is specified. * * cmd: MMUEXT_NEW_BASEPTR * mfn: Machine frame number of new page-table base to install in MMU. * * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] * mfn: Machine frame number of new page-table base to install in MMU * when in user space. * * cmd: MMUEXT_TLB_FLUSH_LOCAL * No additional arguments. Flushes local TLB. * * cmd: MMUEXT_INVLPG_LOCAL * linear_addr: Linear address to be flushed from the local TLB. * * cmd: MMUEXT_TLB_FLUSH_MULTI * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_INVLPG_MULTI * linear_addr: Linear address to be flushed. * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_TLB_FLUSH_ALL * No additional arguments. Flushes all VCPUs' TLBs. * * cmd: MMUEXT_INVLPG_ALL * linear_addr: Linear address to be flushed from all VCPUs' TLBs. * * cmd: MMUEXT_FLUSH_CACHE * No additional arguments. Writes back and flushes cache contents. * * cmd: MMUEXT_SET_LDT * linear_addr: Linear address of LDT base (NB. must be page-aligned). * nr_ents: Number of entries in LDT. */ #define MMUEXT_PIN_L1_TABLE 0 #define MMUEXT_PIN_L2_TABLE 1 #define MMUEXT_PIN_L3_TABLE 2 #define MMUEXT_PIN_L4_TABLE 3 #define MMUEXT_UNPIN_TABLE 4 #define MMUEXT_NEW_BASEPTR 5 #define MMUEXT_TLB_FLUSH_LOCAL 6 #define MMUEXT_INVLPG_LOCAL 7 #define MMUEXT_TLB_FLUSH_MULTI 8 #define MMUEXT_INVLPG_MULTI 9 #define MMUEXT_TLB_FLUSH_ALL 10 #define MMUEXT_INVLPG_ALL 11 #define MMUEXT_FLUSH_CACHE 12 #define MMUEXT_SET_LDT 13 #define MMUEXT_NEW_USER_BASEPTR 15 #ifndef __ASSEMBLY__ struct mmuext_op { unsigned int cmd; union { /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */ xen_pfn_t mfn; /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ unsigned long linear_addr; } arg1; union { /* SET_LDT */ unsigned int nr_ents; /* TLB_FLUSH_MULTI, INVLPG_MULTI */ XEN_GUEST_HANDLE_00030205(void) vcpumask; } arg2; }; typedef struct mmuext_op mmuext_op_t; DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); #endif /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ #define UVMF_NONE (0UL<<0) /* No flushing at all. */ #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ #define UVMF_FLUSHTYPE_MASK (3UL<<0) #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ /* * Commands to HYPERVISOR_console_io(). */ #define CONSOLEIO_write 0 #define CONSOLEIO_read 1 /* * Commands to HYPERVISOR_vm_assist(). */ #define VMASST_CMD_enable 0 #define VMASST_CMD_disable 1 /* x86/32 guests: simulate full 4GB segment limits. */ #define VMASST_TYPE_4gb_segments 0 /* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ #define VMASST_TYPE_4gb_segments_notify 1 /* * x86 guests: support writes to bottom-level PTEs. * NB1. Page-directory entries cannot be written. * NB2. Guest must continue to remove all writable mappings of PTEs. */ #define VMASST_TYPE_writable_pagetables 2 /* x86/PAE guests: support PDPTs above 4GB. */ #define VMASST_TYPE_pae_extended_cr3 3 #define MAX_VMASST_TYPE 3 #ifndef __ASSEMBLY__ typedef uint16_t domid_t; /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ #define DOMID_FIRST_RESERVED (0x7FF0U) /* DOMID_SELF is used in certain contexts to refer to oneself. */ #define DOMID_SELF (0x7FF0U) /* * DOMID_IO is used to restrict page-table updates to mapping I/O memory. * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO * is useful to ensure that no mappings to the OS's own heap are accidentally * installed. (e.g., in Linux this could cause havoc as reference counts * aren't adjusted on the I/O-mapping code path). * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can * be specified by any calling domain. */ #define DOMID_IO (0x7FF1U) /* * DOMID_XEN is used to allow privileged domains to map restricted parts of * Xen's heap space (e.g., the machine_to_phys table). * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if * the caller is privileged. */ #define DOMID_XEN (0x7FF2U) /* * Send an array of these to HYPERVISOR_mmu_update(). * NB. The fields are natural pointer/address size for this architecture. */ struct mmu_update { uint64_t ptr; /* Machine address of PTE. */ uint64_t val; /* New contents of PTE. */ }; typedef struct mmu_update mmu_update_t; DEFINE_XEN_GUEST_HANDLE(mmu_update_t); /* * Send an array of these to HYPERVISOR_multicall(). * NB. The fields are natural register size for this architecture. */ struct multicall_entry { unsigned long op, result; unsigned long args[6]; }; typedef struct multicall_entry multicall_entry_t; DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); /* * Event channel endpoints per domain: * 1024 if a long is 32 bits; 4096 if a long is 64 bits. */ #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) struct vcpu_time_info { /* * Updates to the following values are preceded and followed by an * increment of 'version'. The guest can therefore detect updates by * looking for changes to 'version'. If the least-significant bit of * the version number is set then an update is in progress and the guest * must wait to read a consistent set of values. * The correct way to interact with the version number is similar to * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry. */ uint32_t version; uint32_t pad0; uint64_t tsc_timestamp; /* TSC at last update of time vals. */ uint64_t system_time; /* Time, in nanosecs, since boot. */ /* * Current system time: * system_time + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32) * CPU frequency (Hz): * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift */ uint32_t tsc_to_system_mul; int8_t tsc_shift; int8_t pad1[3]; }; /* 32 bytes */ typedef struct vcpu_time_info vcpu_time_info_t; struct vcpu_info { /* * 'evtchn_upcall_pending' is written non-zero by Xen to indicate * a pending notification for a particular VCPU. It is then cleared * by the guest OS /before/ checking for pending work, thus avoiding * a set-and-check race. Note that the mask is only accessed by Xen * on the CPU that is currently hosting the VCPU. This means that the * pending and mask flags can be updated by the guest without special * synchronisation (i.e., no need for the x86 LOCK prefix). * This may seem suboptimal because if the pending flag is set by * a different CPU then an IPI may be scheduled even when the mask * is set. However, note: * 1. The task of 'interrupt holdoff' is covered by the per-event- * channel mask bits. A 'noisy' event that is continually being * triggered can be masked at source at this very precise * granularity. * 2. The main purpose of the per-VCPU mask is therefore to restrict * reentrant execution: whether for concurrency control, or to * prevent unbounded stack usage. Whatever the purpose, we expect * that the mask will be asserted only for short periods at a time, * and so the likelihood of a 'spurious' IPI is suitably small. * The mask is read before making an event upcall to the guest: a * non-zero mask therefore guarantees that the VCPU will not receive * an upcall activation. The mask is cleared when the VCPU requests * to block: this avoids wakeup-waiting races. */ uint8_t evtchn_upcall_pending; uint8_t evtchn_upcall_mask; unsigned long evtchn_pending_sel; struct arch_vcpu_info arch; struct vcpu_time_info time; }; /* 64 bytes (x86) */ #ifndef __XEN__ typedef struct vcpu_info vcpu_info_t; #endif /* * Xen/kernel shared data -- pointer provided in start_info. * * This structure is defined to be both smaller than a page, and the * only data on the shared page, but may vary in actual size even within * compatible Xen versions; guests should not rely on the size * of this structure remaining constant. */ struct shared_info { struct vcpu_info vcpu_info[MAX_VIRT_CPUS]; /* * A domain can create "event channels" on which it can send and receive * asynchronous event notifications. There are three classes of event that * are delivered by this mechanism: * 1. Bi-directional inter- and intra-domain connections. Domains must * arrange out-of-band to set up a connection (usually by allocating * an unbound 'listener' port and avertising that via a storage service * such as xenstore). * 2. Physical interrupts. A domain with suitable hardware-access * privileges can bind an event-channel port to a physical interrupt * source. * 3. Virtual interrupts ('events'). A domain can bind an event-channel * port to a virtual interrupt source, such as the virtual-timer * device or the emergency console. * * Event channels are addressed by a "port index". Each channel is * associated with two bits of information: * 1. PENDING -- notifies the domain that there is a pending notification * to be processed. This bit is cleared by the guest. * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING * will cause an asynchronous upcall to be scheduled. This bit is only * updated by the guest. It is read-only within Xen. If a channel * becomes pending while the channel is masked then the 'edge' is lost * (i.e., when the channel is unmasked, the guest must manually handle * pending notifications as no upcall will be scheduled by Xen). * * To expedite scanning of pending notifications, any 0->1 pending * transition on an unmasked channel causes a corresponding bit in a * per-vcpu selector word to be set. Each bit in the selector covers a * 'C long' in the PENDING bitfield array. */ unsigned long evtchn_pending[sizeof(unsigned long) * 8]; unsigned long evtchn_mask[sizeof(unsigned long) * 8]; /* * Wallclock time: updated only by control software. Guests should base * their gettimeofday() syscall on this wallclock-base value. */ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ struct arch_shared_info arch; }; #ifndef __XEN__ typedef struct shared_info shared_info_t; #endif /* * Start-of-day memory layout: * 1. The domain is started within contiguous virtual-memory region. * 2. The contiguous region ends on an aligned 4MB boundary. * 3. This the order of bootstrap elements in the initial virtual region: * a. relocated kernel image * b. initial ram disk [mod_start, mod_len] * c. list of allocated page frames [mfn_list, nr_pages] * d. start_info_t structure [register ESI (x86)] * e. bootstrap page tables [pt_base, CR3 (x86)] * f. bootstrap stack [register ESP (x86)] * 4. Bootstrap elements are packed together, but each is 4kB-aligned. * 5. The initial ram disk may be omitted. * 6. The list of page frames forms a contiguous 'pseudo-physical' memory * layout for the domain. In particular, the bootstrap virtual-memory * region is a 1:1 mapping to the first section of the pseudo-physical map. * 7. All bootstrap elements are mapped read-writable for the guest OS. The * only exception is the bootstrap page table, which is mapped read-only. * 8. There is guaranteed to be at least 512kB padding after the final * bootstrap element. If necessary, the bootstrap virtual region is * extended by an extra 4MB to ensure this. */ #define MAX_GUEST_CMDLINE 1024 struct start_info { /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ char magic[32]; /* "xen--". */ unsigned long nr_pages; /* Total pages allocated to this domain. */ unsigned long shared_info; /* MACHINE address of shared info struct. */ uint32_t flags; /* SIF_xxx flags. */ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ uint32_t store_evtchn; /* Event channel for store communication. */ union { struct { xen_pfn_t mfn; /* MACHINE page number of console page. */ uint32_t evtchn; /* Event channel for console page. */ } domU; struct { uint32_t info_off; /* Offset of console_info struct. */ uint32_t info_size; /* Size of console_info struct from start.*/ } dom0; } console; /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ unsigned long pt_base; /* VIRTUAL address of page directory. */ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ int8_t cmd_line[MAX_GUEST_CMDLINE]; }; typedef struct start_info start_info_t; /* New console union for dom0 introduced in 0x00030203. */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 #define console_mfn console.domU.mfn #define console_evtchn console.domU.evtchn #endif /* These flags are passed in the 'flags' field of start_info_t. */ #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ typedef struct dom0_vga_console_info { uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ #define XEN_VGATYPE_TEXT_MODE_3 0x03 #define XEN_VGATYPE_VESA_LFB 0x23 union { struct { /* Font height, in pixels. */ uint16_t font_height; /* Cursor location (column, row). */ uint16_t cursor_x, cursor_y; /* Number of rows and columns (dimensions in characters). */ uint16_t rows, columns; } text_mode_3; struct { /* Width and height, in pixels. */ uint16_t width, height; /* Bytes per scan line. */ uint16_t bytes_per_line; /* Bits per pixel. */ uint16_t bits_per_pixel; /* LFB physical address, and size (in units of 64kB). */ uint32_t lfb_base; uint32_t lfb_size; /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ uint8_t red_pos, red_size; uint8_t green_pos, green_size; uint8_t blue_pos, blue_size; uint8_t rsvd_pos, rsvd_size; } vesa_lfb; } u; } dom0_vga_console_info_t; typedef uint8_t xen_domain_handle_t[16]; /* Turn a plain number into a C unsigned long constant. */ #define __mk_unsigned_long(x) x ## UL #define mk_unsigned_long(x) __mk_unsigned_long(x) DEFINE_XEN_GUEST_HANDLE(uint8_t); DEFINE_XEN_GUEST_HANDLE(uint16_t); DEFINE_XEN_GUEST_HANDLE(uint32_t); DEFINE_XEN_GUEST_HANDLE(uint64_t); #else /* __ASSEMBLY__ */ /* In assembly code we cannot use C numeric constant suffixes. */ #define mk_unsigned_long(x) x #endif /* !__ASSEMBLY__ */ /* Default definitions for macros used by domctl/sysctl. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #ifndef uint64_aligned_t #define uint64_aligned_t uint64_t #endif #ifndef XEN_GUEST_HANDLE_64 #define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) #endif #endif #endif /* __XEN_PUBLIC_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/xencomm.h000066400000000000000000000032131314037446600256700ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) IBM Corp. 2006 */ #ifndef _XEN_XENCOMM_H_ #define _XEN_XENCOMM_H_ /* A xencomm descriptor is a scatter/gather list containing physical * addresses corresponding to a virtually contiguous memory area. The * hypervisor translates these physical addresses to machine addresses to copy * to and from the virtually contiguous area. */ #define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */ #define XENCOMM_INVALID (~0UL) struct xencomm_desc { uint32_t magic; uint32_t nr_addrs; /* the number of entries in address[] */ uint64_t address[0]; }; #endif /* _XEN_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/interface/xenoprof.h000066400000000000000000000075111314037446600260670ustar00rootroot00000000000000/****************************************************************************** * xenoprof.h * * Interface for enabling system wide profiling based on hardware performance * counters * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Hewlett-Packard Co. * Written by Aravind Menon & Jose Renato Santos */ #ifndef __XEN_PUBLIC_XENOPROF_H__ #define __XEN_PUBLIC_XENOPROF_H__ #include "xen.h" /* * Commands to HYPERVISOR_xenoprof_op(). */ #define XENOPROF_init 0 #define XENOPROF_reset_active_list 1 #define XENOPROF_reset_passive_list 2 #define XENOPROF_set_active 3 #define XENOPROF_set_passive 4 #define XENOPROF_reserve_counters 5 #define XENOPROF_counter 6 #define XENOPROF_setup_events 7 #define XENOPROF_enable_virq 8 #define XENOPROF_start 9 #define XENOPROF_stop 10 #define XENOPROF_disable_virq 11 #define XENOPROF_release_counters 12 #define XENOPROF_shutdown 13 #define XENOPROF_get_buffer 14 #define XENOPROF_last_op 14 #define MAX_OPROF_EVENTS 32 #define MAX_OPROF_DOMAINS 25 #define XENOPROF_CPU_TYPE_SIZE 64 /* Xenoprof performance events (not Xen events) */ struct event_log { uint64_t eip; uint8_t mode; uint8_t event; }; /* Xenoprof buffer shared between Xen and domain - 1 per VCPU */ struct xenoprof_buf { uint32_t event_head; uint32_t event_tail; uint32_t event_size; uint32_t vcpu_id; uint64_t xen_samples; uint64_t kernel_samples; uint64_t user_samples; uint64_t lost_samples; struct event_log event_log[1]; }; #ifndef __XEN__ typedef struct xenoprof_buf xenoprof_buf_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t); #endif struct xenoprof_init { int32_t num_events; int32_t is_primary; char cpu_type[XENOPROF_CPU_TYPE_SIZE]; }; typedef struct xenoprof_init xenoprof_init_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t); struct xenoprof_get_buffer { int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; }; typedef struct xenoprof_get_buffer xenoprof_get_buffer_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t); struct xenoprof_counter { uint32_t ind; uint64_t count; uint32_t enabled; uint32_t event; uint32_t hypervisor; uint32_t kernel; uint32_t user; uint64_t unit_mask; }; typedef struct xenoprof_counter xenoprof_counter_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t); typedef struct xenoprof_passive { uint16_t domain_id; int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; } xenoprof_passive_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t); #endif /* __XEN_PUBLIC_XENOPROF_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/pcifront.h000066400000000000000000000026671314037446600241220ustar00rootroot00000000000000/* * PCI Frontend - arch-dependendent declarations * * Author: Ryan Wilson */ #ifndef __XEN_ASM_PCIFRONT_H__ #define __XEN_ASM_PCIFRONT_H__ #include #ifdef __KERNEL__ #ifndef __ia64__ struct pcifront_device; struct pci_bus; struct pcifront_sd { int domain; struct pcifront_device *pdev; }; static inline struct pcifront_device * pcifront_get_pdev(struct pcifront_sd *sd) { return sd->pdev; } static inline void pcifront_init_sd(struct pcifront_sd *sd, int domain, struct pcifront_device *pdev) { sd->domain = domain; sd->pdev = pdev; } #if defined(CONFIG_PCI_DOMAINS) static inline int pci_domain_nr(struct pci_bus *bus) { struct pcifront_sd *sd = bus->sysdata; return sd->domain; } static inline int pci_proc_domain(struct pci_bus *bus) { return pci_domain_nr(bus); } #endif /* CONFIG_PCI_DOMAINS */ #else /* __ia64__ */ #include #define pcifront_sd pci_controller static inline struct pcifront_device * pcifront_get_pdev(struct pcifront_sd *sd) { return (struct pcifront_device *)sd->platform_data; } static inline void pcifront_init_sd(struct pcifront_sd *sd, int domain, struct pcifront_device *pdev) { sd->segment = domain; sd->acpi_handle = NULL; sd->iommu = NULL; sd->windows = 0; sd->window = NULL; sd->platform_data = pdev; } #endif /* __ia64__ */ extern struct rw_semaphore pci_bus_sem; #endif /* __KERNEL__ */ #endif /* __XEN_ASM_PCIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/public/000077500000000000000000000000001314037446600233705ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/public/evtchn.h000066400000000000000000000056351314037446600250410ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Interface to /dev/xen/evtchn. * * Copyright (c) 2003-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_EVTCHN_H__ #define __LINUX_PUBLIC_EVTCHN_H__ /* * Bind a fresh port to VIRQ @virq. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_VIRQ \ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq)) struct ioctl_evtchn_bind_virq { unsigned int virq; }; /* * Bind a fresh port to remote <@remote_domain, @remote_port>. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_INTERDOMAIN \ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain)) struct ioctl_evtchn_bind_interdomain { unsigned int remote_domain, remote_port; }; /* * Allocate a fresh port for binding to @remote_domain. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_UNBOUND_PORT \ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port)) struct ioctl_evtchn_bind_unbound_port { unsigned int remote_domain; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_UNBIND \ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind)) struct ioctl_evtchn_unbind { unsigned int port; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_NOTIFY \ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify)) struct ioctl_evtchn_notify { unsigned int port; }; /* Clear and reinitialise the event buffer. Clear error condition. */ #define IOCTL_EVTCHN_RESET \ _IOC(_IOC_NONE, 'E', 5, 0) #endif /* __LINUX_PUBLIC_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/public/gntdev.h000066400000000000000000000100661314037446600250330ustar00rootroot00000000000000/****************************************************************************** * gntdev.h * * Interface to /dev/xen/gntdev. * * Copyright (c) 2007, D G Murray * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_GNTDEV_H__ #define __LINUX_PUBLIC_GNTDEV_H__ struct ioctl_gntdev_grant_ref { /* The domain ID of the grant to be mapped. */ uint32_t domid; /* The grant reference of the grant to be mapped. */ uint32_t ref; }; /* * Inserts the grant references into the mapping table of an instance * of gntdev. N.B. This does not perform the mapping, which is deferred * until mmap() is called with @index as the offset. */ #define IOCTL_GNTDEV_MAP_GRANT_REF \ _IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref)) struct ioctl_gntdev_map_grant_ref { /* IN parameters */ /* The number of grants to be mapped. */ uint32_t count; uint32_t pad; /* OUT parameters */ /* The offset to be used on a subsequent call to mmap(). */ uint64_t index; /* Variable IN parameter. */ /* Array of grant references, of size @count. */ struct ioctl_gntdev_grant_ref refs[1]; }; /* * Removes the grant references from the mapping table of an instance of * of gntdev. N.B. munmap() must be called on the relevant virtual address(es) * before this ioctl is called, or an error will result. */ #define IOCTL_GNTDEV_UNMAP_GRANT_REF \ _IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) struct ioctl_gntdev_unmap_grant_ref { /* IN parameters */ /* The offset was returned by the corresponding map operation. */ uint64_t index; /* The number of pages to be unmapped. */ uint32_t count; uint32_t pad; }; /* * Returns the offset in the driver's address space that corresponds * to @vaddr. This can be used to perform a munmap(), followed by an * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by * the caller. The number of pages that were allocated at the same time as * @vaddr is returned in @count. * * N.B. Where more than one page has been mapped into a contiguous range, the * supplied @vaddr must correspond to the start of the range; otherwise * an error will result. It is only possible to munmap() the entire * contiguously-allocated range at once, and not any subrange thereof. */ #define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \ _IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr)) struct ioctl_gntdev_get_offset_for_vaddr { /* IN parameters */ /* The virtual address of the first mapped page in a range. */ uint64_t vaddr; /* OUT parameters */ /* The offset that was used in the initial mmap() operation. */ uint64_t offset; /* The number of pages mapped in the VM area that begins at @vaddr. */ uint32_t count; uint32_t pad; }; #endif /* __LINUX_PUBLIC_GNTDEV_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/public/privcmd.h000066400000000000000000000052141314037446600252070ustar00rootroot00000000000000/****************************************************************************** * privcmd.h * * Interface to /proc/xen/privcmd. * * Copyright (c) 2003-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_PRIVCMD_H__ #define __LINUX_PUBLIC_PRIVCMD_H__ #include #ifndef __user #define __user #endif typedef struct privcmd_hypercall { __u64 op; __u64 arg[5]; } privcmd_hypercall_t; typedef struct privcmd_mmap_entry { __u64 va; __u64 mfn; __u64 npages; } privcmd_mmap_entry_t; typedef struct privcmd_mmap { int num; domid_t dom; /* target domain */ privcmd_mmap_entry_t __user *entry; } privcmd_mmap_t; typedef struct privcmd_mmapbatch { int num; /* number of pages to populate */ domid_t dom; /* target domain */ __u64 addr; /* virtual address */ xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */ } privcmd_mmapbatch_t; /* * @cmd: IOCTL_PRIVCMD_HYPERCALL * @arg: &privcmd_hypercall_t * Return: Value returned from execution of the specified hypercall. */ #define IOCTL_PRIVCMD_HYPERCALL \ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t)) #define IOCTL_PRIVCMD_MMAP \ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t)) #define IOCTL_PRIVCMD_MMAPBATCH \ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t)) #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/xen_proc.h000066400000000000000000000004021314037446600240740ustar00rootroot00000000000000 #ifndef __ASM_XEN_PROC_H__ #define __ASM_XEN_PROC_H__ #include extern struct proc_dir_entry *create_xen_proc_entry( const char *name, mode_t mode); extern void remove_xen_proc_entry( const char *name); #endif /* __ASM_XEN_PROC_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/xenbus.h000066400000000000000000000252741314037446600236010ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XEN_XENBUS_H #define _XEN_XENBUS_H #include #include #include #include #include #include #include #include #include /* Register callback to watch this node. */ struct xenbus_watch { struct list_head list; /* Path being watched. */ const char *node; /* Callback (executed in a process context with no locks held). */ void (*callback)(struct xenbus_watch *, const char **vec, unsigned int len); /* See XBWF_ definitions below. */ unsigned long flags; }; /* * Execute callback in its own kthread. Useful if the callback is long * running or heavily serialised, to avoid taking out the main xenwatch thread * for a long period of time (or even unwittingly causing a deadlock). */ #define XBWF_new_thread 1 /* A xenbus device. */ struct xenbus_device { const char *devicetype; const char *nodename; const char *otherend; int otherend_id; struct xenbus_watch otherend_watch; struct device dev; enum xenbus_state state; struct completion down; }; static inline struct xenbus_device *to_xenbus_device(struct device *dev) { return container_of(dev, struct xenbus_device, dev); } struct xenbus_device_id { /* .../device// */ char devicetype[32]; /* General class of device. */ }; /* A xenbus driver. */ struct xenbus_driver { char *name; struct module *owner; const struct xenbus_device_id *ids; int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); void (*otherend_changed)(struct xenbus_device *dev, enum xenbus_state backend_state); int (*remove)(struct xenbus_device *dev); int (*suspend)(struct xenbus_device *dev); int (*suspend_cancel)(struct xenbus_device *dev); int (*resume)(struct xenbus_device *dev); int (*uevent)(struct xenbus_device *, char **, int, char *, int); struct device_driver driver; int (*read_otherend_details)(struct xenbus_device *dev); int (*is_ready)(struct xenbus_device *dev); }; static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) { return container_of(drv, struct xenbus_driver, driver); } int xenbus_register_frontend(struct xenbus_driver *drv); int xenbus_register_backend(struct xenbus_driver *drv); void xenbus_unregister_driver(struct xenbus_driver *drv); struct xenbus_transaction { u32 id; }; /* Nil transaction ID. */ #define XBT_NIL ((struct xenbus_transaction) { 0 }) char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num); void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len); int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string); int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_transaction_start(struct xenbus_transaction *t); int xenbus_transaction_end(struct xenbus_transaction t, int abort); /* Single read and scanf: returns -errno or num scanned if > 0. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(scanf, 4, 5))); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(printf, 4, 5))); /* Generic read function: NULL-terminated triples of name, * sprintf-style type string, and pointer. Returns 0 or errno.*/ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...); /* notifer routines for when the xenstore comes up */ int register_xenstore_notifier(struct notifier_block *nb); void unregister_xenstore_notifier(struct notifier_block *nb); int register_xenbus_watch(struct xenbus_watch *watch); void unregister_xenbus_watch(struct xenbus_watch *watch); void xs_suspend(void); void xs_resume(void); void xs_suspend_cancel(void); /* Used by xenbus_dev to borrow kernel's store connection. */ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); /* Prepare for domain suspend: then resume or cancel the suspend. */ void xenbus_suspend(void); void xenbus_resume(void); void xenbus_suspend_cancel(void); #define XENBUS_IS_ERR_READ(str) ({ \ if (!IS_ERR(str) && strlen(str) == 0) { \ kfree(str); \ str = ERR_PTR(-ERANGE); \ } \ IS_ERR(str); \ }) #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE) /** * Register a watch on the given path, using the given xenbus_watch structure * for storage, and the given callback function as the callback. Return 0 on * success, or -errno on error. On success, the given path will be saved as * watch->node, and remains the caller's to free. On error, watch->node will * be NULL, the device will switch to XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); /** * Register a watch on the given path/path2, using the given xenbus_watch * structure for storage, and the given callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (path/path2) will be saved as watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); /** * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); /** * Grant access to the given ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn); /** * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_valloc allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * xenbus_map_ring does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr); /** * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_vfree if you mapped in your memory with * xenbus_map_ring_valloc (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port); /** * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path); /*** * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...); /*** * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...); int xenbus_dev_init(void); const char *xenbus_strstate(enum xenbus_state state); int xenbus_dev_is_online(struct xenbus_device *dev); int xenbus_frontend_closed(struct xenbus_device *dev); #endif /* _XEN_XENBUS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/xencons.h000066400000000000000000000007771314037446600237530ustar00rootroot00000000000000#ifndef __ASM_XENCONS_H__ #define __ASM_XENCONS_H__ struct dom0_vga_console_info; void dom0_init_screen_info(const struct dom0_vga_console_info *info); void xencons_force_flush(void); void xencons_resume(void); /* Interrupt work hooks. Receive data, or kick data out. */ void xencons_rx(char *buf, unsigned len, struct pt_regs *regs); void xencons_tx(void); int xencons_ring_init(void); int xencons_ring_send(const char *data, unsigned len); void xencons_early_setup(void); #endif /* __ASM_XENCONS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/include/xen/xenoprof.h000066400000000000000000000025651314037446600241330ustar00rootroot00000000000000/****************************************************************************** * xen/xenoprof.h * * Copyright (c) 2006 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_XENOPROF_H__ #define __XEN_XENOPROF_H__ #ifdef CONFIG_XEN #include struct oprofile_operations; int xenoprofile_init(struct oprofile_operations * ops); void xenoprofile_exit(void); struct xenoprof_shared_buffer { char *buffer; struct xenoprof_arch_shared_buffer arch; }; #else #define xenoprofile_init(ops) (-ENOSYS) #define xenoprofile_exit() do { } while (0) #endif /* CONFIG_XEN */ #endif /* __XEN_XENOPROF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/version.ini000066400000000000000000000000641314037446600220630ustar00rootroot00000000000000KernModeVersion=2.2.0.112 UserModeVersion=2.2.0.112 UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-balloon/000077500000000000000000000000001314037446600221135ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-balloon/2.6.9-89/000077500000000000000000000000001314037446600230255ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-balloon/2.6.9-89/balloon.c000066400000000000000000000412501314037446600246210ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CONFIG_XEN_PV_ON_HVM #include #include #endif #include #include #include #include #ifdef CONFIG_XEN_PV_ON_HVM #include #endif #include #include #include "common.h" /* for pv-on-hvm */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DECLARE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); /* We aim for 'current allocation' == 'target allocation'. */ static unsigned long current_pages; static unsigned long target_pages; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; /* VM /proc information for memory */ extern unsigned long totalram_pages; /* We may hit the hard limit in Xen. If we do then we remember it. */ static unsigned long hard_limit; /* * Drivers may alter the memory reservation independently, but they must * inform the balloon driver so that we can avoid hitting the hard limit. */ static unsigned long driver_pages; /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); static unsigned long balloon_low, balloon_high; /* Main work function, always executed in process context. */ static void balloon_process(void *unused); static DECLARE_WORK(balloon_worker, balloon_process, NULL); static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page) { /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); balloon_high++; } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); balloon_low++; } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(void) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); if (PageHighMem(page)) balloon_high--; else balloon_low--; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = min(target_pages, hard_limit); if (target > (current_pages + balloon_low + balloon_high)) target = current_pages + balloon_low + balloon_high; return target; } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page);; page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op( XENMEM_populate_physmap, &reservation); if (rc < nr_pages) { int ret; /* We hit the Xen hard limit: reprobe. */ set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = rc; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != rc); hard_limit = current_pages + rc - driver_pages; goto out; } for (i = 0; i < nr_pages; i++) { page = balloon_retrieve(); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); /* Update P->M and M->P tables. */ set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN xen_machphys_update(frame_list[i], pfn); /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); set_page_count(page, 1); __free_page(page); } current_pages += nr_pages; totalram_pages = current_pages; out: balloon_unlock(flags); return 0; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(pfn_to_page(pfn)); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); current_pages -= nr_pages; totalram_pages = current_pages; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ static void balloon_process(void *unused) { int need_sleep = 0; long credit; char buffer[16]; down(&balloon_mutex); xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); /* Schedule more work if there is some still to be done. */ if (current_target() != current_pages) { mod_timer(&balloon_timer, jiffies + HZ); sprintf(buffer,"%lu",current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); up(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ static void set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ hard_limit = ~0UL; target_pages = target; schedule_work(&balloon_worker); } static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf( page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n" "Xen hard limit: ", PAGES2KB(current_pages), PAGES2KB(target_pages), PAGES2KB(balloon_low), PAGES2KB(balloon_high), PAGES2KB(driver_pages)); if (hard_limit != ~0UL) len += sprintf(page + len, "%8lu kB\n", PAGES2KB(hard_limit)); else len += sprintf(page + len, " ??? kB\n"); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { #if defined(CONFIG_X86) && defined(CONFIG_XEN) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("Initialising balloon driver.\n"); #ifdef CONFIG_XEN current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = current_pages; #else current_pages = totalram_pages; #endif target_pages = current_pages; balloon_low = 0; balloon_high = 0; driver_pages = 0UL; hard_limit = ~0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; /* don't include on bare-metal & FV guest */ #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN) if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif #if defined(CONFIG_X86) && defined(CONFIG_XEN) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); balloon_append(page); } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); void balloon_update_driver_allowance(long delta) { unsigned long flags; balloon_lock(flags); driver_pages += delta; balloon_unlock(flags); } #ifdef CONFIG_XEN static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long vaddr, flags; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { page = pagevec[i] = alloc_page(GFP_KERNEL); if (page == NULL) goto err; vaddr = (unsigned long)page_address(page); scrub_pages(vaddr, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_unlock(flags); __free_page(page); goto err; } totalram_pages = --current_pages; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i]); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i]); } balloon_unlock(flags); kfree(pagevec); schedule_work(&balloon_worker); } void balloon_release_driver_page(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page); driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-balloon/Kbuild000066400000000000000000000002551314037446600232520ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-balloon.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-balloon-y := balloon.o sysfs.o xen-balloon-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-balloon/Makefile000066400000000000000000000000661314037446600235550ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-balloon/balloon.c000066400000000000000000000465451314037446600237230ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /*added by linyuliang 00176349 begin */ #include #include /*added by linyuliang 00176349 end */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); struct balloon_stats balloon_stats; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; /* VM /proc information for memory */ extern unsigned long totalram_pages; #ifndef CONFIG_XEN /* * In HVM guests accounting here uses the Xen visible values, but the kernel * determined totalram_pages value shouldn't get altered. Since totalram_pages * includes neither the kernel static image nor any memory allocated prior to * or from the bootmem allocator, we have to synchronize the two values. */ static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ /*modified by linyuliang 00176349 begin */ static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process,NULL); /*modified by linyuliang 00176349 end */ static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page) { /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(void) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); if (PageHighMem(page)) bs.balloon_high--; else bs.balloon_low--; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = min(bs.target_pages, bs.hard_limit); if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } /*added by linyuliang 00176349 begin */ unsigned long balloon_minimum_target(void) { #ifndef CONFIG_XEN #define max_pfn num_physpages #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN #undef max_pfn #endif } /*added by linyuliang 00176349 end */ static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page); page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; /*modified by linyuliang 00176349 begin */ /*when xen has less pages than nr_pages,don not give it back! */ rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); page_zone(page)->present_pages++; init_page_count(page); __free_page(page); } bs.current_pages += rc; /*modified by linyuliang 00176349 end */ totalram_pages = bs.current_pages - totalram_bias; out: balloon_unlock(flags); /*modified by linyuliang 00176349 begin */ //return 0; return rc < 0 ? rc : rc != nr_pages; /*modified by linyuliang 00176349 end */ } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; /*added by linyuliang 00176349 begin */ IPRINTK("nr_pages = %lu\n",nr_pages); IPRINTK("need_sleep= %d\n",need_sleep); /*added by linyuliang 00176349 end */ break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); balloon_append(pfn_to_page(pfn)); page = pfn_to_page(pfn); SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); page_zone(page)->present_pages--; } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; totalram_pages = bs.current_pages - totalram_bias; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ /*modified by linyuliang 00176349 begin */ /*When adjustment be stopped one time,adjustment over! then write current memory to xenstore*/ static int flag; static void balloon_process(struct work_struct *unused) { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); // if (!need_sleep) // { if (current_target() != bs.current_pages || (flag==1)) { sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); // } /* Schedule more work if there is some still to be done. */ // if (current_target() != bs.current_pages) // mod_timer(&balloon_timer, jiffies + HZ); mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, balloon_minimum_target()); flag = bs.target_pages== target ? 0:1; // IPRINTK("b_set_new_tar_bs.target_pages= %lu",bs.target_pages); schedule_work(&balloon_worker); } /*modified by linyuliang 00176349 end */ static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf( page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n" "Xen hard limit: ", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); if (bs.hard_limit != ~0UL) len += sprintf(page + len, "%8lu kB\n", PAGES2KB(bs.hard_limit)); else len += sprintf(page + len, " ??? kB\n"); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { #if !defined(CONFIG_XEN) # ifndef XENMEM_get_pod_target # define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; # endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; #elif defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("Initialising balloon driver.\n"); #ifdef CONFIG_XEN bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else totalram_bias = HYPERVISOR_memory_op( HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target) != -ENOSYS ? XENMEM_maximum_reservation : XENMEM_current_reservation, &pod_target.domid); if ((long)totalram_bias != -ENOSYS) { BUG_ON(totalram_bias < totalram_pages); bs.current_pages = totalram_bias; totalram_bias -= totalram_pages; } else { totalram_bias = 0; bs.current_pages = totalram_pages; } #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; bs.hard_limit = ~0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #ifdef CONFIG_PROC_FS if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) balloon_append(page); } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void balloon_exit(void) { /* XXX - release balloon here */ return; } module_exit(balloon_exit); void balloon_update_driver_allowance(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } #ifdef CONFIG_XEN static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long vaddr, flags; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { page = pagevec[i] = alloc_page(GFP_KERNEL); if (page == NULL) goto err; vaddr = (unsigned long)page_address(page); scrub_pages(vaddr, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_unlock(flags); __free_page(page); goto err; } totalram_pages = --bs.current_pages - totalram_bias; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i]); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i]); } balloon_unlock(flags); kfree(pagevec); schedule_work(&balloon_worker); } void balloon_release_driver_page(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page); bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-balloon/common.h000066400000000000000000000044621314037446600235620ustar00rootroot00000000000000/****************************************************************************** * balloon/common.h * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_COMMON_H__ #define __XEN_BALLOON_COMMON_H__ #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* We may hit the hard limit in Xen. If we do then we remember it. */ unsigned long hard_limit; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; extern struct balloon_stats balloon_stats; #define bs balloon_stats int balloon_sysfs_init(void); void balloon_sysfs_exit(void); void balloon_set_new_target(unsigned long target); #endif /* __XEN_BALLOON_COMMON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-balloon/other/000077500000000000000000000000001314037446600232345ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-balloon/other/balloon.c000066400000000000000000000461311314037446600250330ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /*added by linyuliang 00176349 begin */ #include #include /*added by linyuliang 00176349 end */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); struct balloon_stats balloon_stats; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; /* VM /proc information for memory */ extern unsigned long totalram_pages; #ifndef CONFIG_XEN /* * In HVM guests accounting here uses the Xen visible values, but the kernel * determined totalram_pages value shouldn't get altered. Since totalram_pages * includes neither the kernel static image nor any memory allocated prior to * or from the bootmem allocator, we have to synchronize the two values. */ static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ /*modified by linyuliang 00176349 begin */ static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process,NULL); /*modified by linyuliang 00176349 end */ static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page) { /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(void) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); if (PageHighMem(page)) bs.balloon_high--; else bs.balloon_low--; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = min(bs.target_pages, bs.hard_limit); if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } /*added by linyuliang 00176349 begin */ unsigned long balloon_minimum_target(void) { #ifndef CONFIG_XEN #define max_pfn num_physpages #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN #undef max_pfn #endif } /*added by linyuliang 00176349 end */ static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page); page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; /*modified by linyuliang 00176349 begin */ /*when xen has less pages than nr_pages,don not give it back! */ rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); page_zone(page)->present_pages++; init_page_count(page); __free_page(page); } bs.current_pages += rc; /*modified by linyuliang 00176349 end */ totalram_pages = bs.current_pages - totalram_bias; out: balloon_unlock(flags); /*modified by linyuliang 00176349 begin */ //return 0; return rc < 0 ? rc : rc != nr_pages; /*modified by linyuliang 00176349 end */ } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; /*added by linyuliang 00176349 begin */ IPRINTK("nr_pages = %lu\n",nr_pages); IPRINTK("need_sleep= %d\n",need_sleep); /*added by linyuliang 00176349 end */ break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); balloon_append(pfn_to_page(pfn)); page = pfn_to_page(pfn); SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); page_zone(page)->present_pages--; } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; totalram_pages = bs.current_pages - totalram_bias; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ /*modified by linyuliang 00176349 begin */ /*When adjustment be stopped one time,adjustment over! then write current memory to xenstore*/ static int flag; static void balloon_process(struct work_struct *unused) { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); if (current_target() != bs.current_pages || (flag==1)) { sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, balloon_minimum_target()); flag = bs.target_pages== target ? 0:1; // IPRINTK("b_set_new_tar_bs.target_pages= %lu",bs.target_pages); schedule_work(&balloon_worker); } /*modified by linyuliang 00176349 end */ static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf( page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n" "Xen hard limit: ", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); if (bs.hard_limit != ~0UL) len += sprintf(page + len, "%8lu kB\n", PAGES2KB(bs.hard_limit)); else len += sprintf(page + len, " ??? kB\n"); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { #if !defined(CONFIG_XEN) # ifndef XENMEM_get_pod_target # define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; # endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; #elif defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("Initialising balloon driver.\n"); #ifdef CONFIG_XEN bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else totalram_bias = HYPERVISOR_memory_op( HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target) != -ENOSYS ? XENMEM_maximum_reservation : XENMEM_current_reservation, &pod_target.domid); if ((long)totalram_bias != -ENOSYS) { BUG_ON(totalram_bias < totalram_pages); bs.current_pages = totalram_bias; totalram_bias -= totalram_pages; } else { totalram_bias = 0; bs.current_pages = totalram_pages; } #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; bs.hard_limit = ~0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #ifdef CONFIG_PROC_FS if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) balloon_append(page); } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void balloon_exit(void) { /* XXX - release balloon here */ return; } module_exit(balloon_exit); void balloon_update_driver_allowance(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } #ifdef CONFIG_XEN static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long vaddr, flags; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { page = pagevec[i] = alloc_page(GFP_KERNEL); if (page == NULL) goto err; vaddr = (unsigned long)page_address(page); scrub_pages(vaddr, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_unlock(flags); __free_page(page); goto err; } totalram_pages = --bs.current_pages - totalram_bias; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i]); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i]); } balloon_unlock(flags); kfree(pagevec); schedule_work(&balloon_worker); } void balloon_release_driver_page(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page); bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-balloon/sysfs.c000066400000000000000000000114701314037446600234310ustar00rootroot00000000000000/****************************************************************************** * balloon/sysfs.c * * Xen balloon driver - sysfs interfaces. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ /* * sysdev_{create,remove}_file isn't in rhel5 kabi. * just disable it all for now -- kraxel */ #if 0 #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BALLOON_CLASS_NAME "memory" #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); BALLOON_SHOW(hard_limit_kb, (bs.hard_limit!=~0UL) ? "%lu\n" : "???\n", (bs.hard_limit!=~0UL) ? PAGES2KB(bs.hard_limit) : 0); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, const char *buf, size_t count) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ strcpy(memstring, buf); target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_hard_limit_kb.attr, &attr_driver_kb.attr, NULL }; static struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { set_kset_name(BALLOON_CLASS_NAME), }; static struct sys_device balloon_sysdev; static int register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } static void unregister_balloon(struct sys_device *sysdev) { int i; sysfs_remove_group(&sysdev->kobj, &balloon_info_group); for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); } #endif int balloon_sysfs_init(void) { // return register_balloon(&balloon_sysdev); return 0; } void balloon_sysfs_exit(void) { // unregister_balloon(&balloon_sysdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/000077500000000000000000000000001314037446600230625ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/Kbuild000066400000000000000000000013171314037446600242210ustar00rootroot00000000000000include $(M)/config.mk obj-m := xen-platform-pci.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-platform-pci-objs := evtchn.o platform-pci.o gnttab.o xen_support.o xen-platform-pci-objs += features.o platform-compat.o xen-platform-pci-objs += reboot.o machine_reboot.o xen-platform-pci-objs += panic-handler.o xen-platform-pci-objs += platform-pci-unplug.o xen-platform-pci-objs += xenbus_comms.o xen-platform-pci-objs += xenbus_xs.o xen-platform-pci-objs += xenbus_probe.o xen-platform-pci-objs += xenbus_dev.o xen-platform-pci-objs += xenbus_client.o xen-platform-pci-objs += xen_proc.o # Can we do better ? ifeq ($(ARCH),ia64) xen-platform-pci-objs += xencomm.o xencomm_arch.o xcom_hcall.o xcom_asm.o endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/Makefile000066400000000000000000000000661314037446600245240ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/evtchn.c000066400000000000000000000204421314037446600245170ustar00rootroot00000000000000/****************************************************************************** * evtchn.c * * A simplified event channel for para-drivers in unmodified linux * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, Intel Corporation * * This file may be distributed separately from the Linux kernel, or * incorporated into other software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif void *shared_info_area; #define is_valid_evtchn(x) ((x) != 0) #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn) static struct { spinlock_t lock; irqreturn_t(*handler) (int, void *, struct pt_regs *); void *dev_id; int evtchn; int close:1; /* close on unbind_from_irqhandler()? */ int inuse:1; int in_handler:1; } irq_evtchn[256]; static int evtchn_to_irq[NR_EVENT_CHANNELS] = { [0 ... NR_EVENT_CHANNELS-1] = -1 }; static DEFINE_SPINLOCK(irq_alloc_lock); static int alloc_xen_irq(void) { static int warned; int irq; spin_lock(&irq_alloc_lock); for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) { if (irq_evtchn[irq].inuse) continue; irq_evtchn[irq].inuse = 1; spin_unlock(&irq_alloc_lock); return irq; } if (!warned) { warned = 1; printk(KERN_WARNING "No available IRQ to bind to: " "increase irq_evtchn[] size in evtchn.c.\n"); } spin_unlock(&irq_alloc_lock); return -ENOSPC; } static void free_xen_irq(int irq) { spin_lock(&irq_alloc_lock); irq_evtchn[irq].inuse = 0; spin_unlock(&irq_alloc_lock); } int irq_to_evtchn_port(int irq) { return irq_evtchn[irq].evtchn; } EXPORT_SYMBOL(irq_to_evtchn_port); void mask_evtchn(int port) { shared_info_t *s = shared_info_area; synch_set_bit(port, &s->evtchn_mask[0]); } EXPORT_SYMBOL(mask_evtchn); void unmask_evtchn(int port) { unsigned int cpu; shared_info_t *s = shared_info_area; vcpu_info_t *vcpu_info; cpu = get_cpu(); vcpu_info = &s->vcpu_info[cpu]; /* Slow path (hypercall) if this is a non-local port. We only ever bind event channels to vcpu 0 in HVM guests. */ if (unlikely(cpu != 0)) { evtchn_unmask_t op = { .port = port }; (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op); put_cpu(); return; } synch_clear_bit(port, &s->evtchn_mask[0]); /* * The following is basically the equivalent of * 'hw_resend_irq'. Just like a real IO-APIC we 'lose the * interrupt edge' if the channel is masked. */ if (synch_test_bit(port, &s->evtchn_pending[0]) && !synch_test_and_set_bit(port / BITS_PER_LONG, &vcpu_info->evtchn_pending_sel)) { vcpu_info->evtchn_upcall_pending = 1; if (!vcpu_info->evtchn_upcall_mask) force_evtchn_callback(); } put_cpu(); } EXPORT_SYMBOL(unmask_evtchn); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id) { struct evtchn_alloc_unbound alloc_unbound; int err, irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_domain; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { spin_unlock_irq(&irq_evtchn[irq].lock); free_xen_irq(irq); return err; } irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = alloc_unbound.port; irq_evtchn[irq].close = 1; evtchn_to_irq[alloc_unbound.port] = irq; unmask_evtchn(alloc_unbound.port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_listening_port_to_irqhandler); int bind_caller_port_to_irqhandler( unsigned int caller_port, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id) { int irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = caller_port; irq_evtchn[irq].close = 0; evtchn_to_irq[caller_port] = irq; unmask_evtchn(caller_port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_caller_port_to_irqhandler); void unbind_from_irqhandler(unsigned int irq, void *dev_id) { int evtchn; spin_lock_irq(&irq_evtchn[irq].lock); evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) { evtchn_to_irq[evtchn] = -1; mask_evtchn(evtchn); if (irq_evtchn[irq].close) { struct evtchn_close close = { .port = evtchn }; HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); } } irq_evtchn[irq].handler = NULL; irq_evtchn[irq].evtchn = 0; spin_unlock_irq(&irq_evtchn[irq].lock); while (irq_evtchn[irq].in_handler) cpu_relax(); free_xen_irq(irq); } EXPORT_SYMBOL(unbind_from_irqhandler); void notify_remote_via_irq(int irq) { int evtchn; evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL(notify_remote_via_irq); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) static irqreturn_t evtchn_interrupt(int irq, void *dev_id) #else static irqreturn_t evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs) #endif { unsigned int l1i, port; /* XXX: All events are bound to vcpu0 but irq may be redirected. */ int cpu = 0; /*smp_processor_id();*/ irqreturn_t(*handler) (int, void *, struct pt_regs *); shared_info_t *s = shared_info_area; vcpu_info_t *v = &s->vcpu_info[cpu]; unsigned long l1, l2; v->evtchn_upcall_pending = 0; /* NB. No need for a barrier here -- XCHG is a barrier on x86. */ l1 = xchg(&v->evtchn_pending_sel, 0); while (l1 != 0) { l1i = __ffs(l1); l1 &= ~(1 << l1i); while ((l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i])) { port = (l1i * BITS_PER_LONG) + __ffs(l2); synch_clear_bit(port, &s->evtchn_pending[0]); irq = evtchn_to_irq[port]; if (irq < 0) continue; spin_lock(&irq_evtchn[irq].lock); handler = irq_evtchn[irq].handler; dev_id = irq_evtchn[irq].dev_id; if (unlikely(handler == NULL)) { printk("Xen IRQ%d (port %d) has no handler!\n", irq, port); spin_unlock(&irq_evtchn[irq].lock); continue; } irq_evtchn[irq].in_handler = 1; spin_unlock(&irq_evtchn[irq].lock); local_irq_enable(); handler(irq, irq_evtchn[irq].dev_id, NULL); local_irq_disable(); spin_lock(&irq_evtchn[irq].lock); irq_evtchn[irq].in_handler = 0; spin_unlock(&irq_evtchn[irq].lock); } } return IRQ_HANDLED; } void force_evtchn_callback(void) { (void)HYPERVISOR_xen_version(0, NULL); } EXPORT_SYMBOL(force_evtchn_callback); void irq_resume(void) { int evtchn, irq; for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) { mask_evtchn(evtchn); evtchn_to_irq[evtchn] = -1; } for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) irq_evtchn[irq].evtchn = 0; } int xen_irq_init(struct pci_dev *pdev) { int irq; for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) spin_lock_init(&irq_evtchn[irq].lock); return request_irq(pdev->irq, evtchn_interrupt, SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT, "xen-platform-pci", pdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/features.c000066400000000000000000000014641314037446600250510ustar00rootroot00000000000000/****************************************************************************** * features.c * * Xen feature flags. * * Copyright (c) 2006, Ian Campbell, XenSource Inc. */ #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */ EXPORT_SYMBOL(xen_features); void setup_xen_features(void) { xen_feature_info_t fi; int i, j; for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) { fi.submap_idx = i; if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j=0; j<32; j++) xen_features[i*32+j] = !!(fi.submap & 1< #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff #define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t)) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; static unsigned int boot_max_nr_grant_frames; static int gnttab_free_count; static grant_ref_t gnttab_free_head; static DEFINE_SPINLOCK(gnttab_list_lock); static struct grant_entry *shared; static struct gnttab_free_callback *gnttab_free_callback_list; static int gnttab_expand(unsigned int req_entries); #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP]) static int get_free_entries(int count) { unsigned long flags; int ref, rc; grant_ref_t head; spin_lock_irqsave(&gnttab_list_lock, flags); if ((gnttab_free_count < count) && ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { spin_unlock_irqrestore(&gnttab_list_lock, flags); return rc; } ref = head = gnttab_free_head; gnttab_free_count -= count; while (count-- > 1) head = gnttab_entry(head); gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } #define get_free_entry() get_free_entries(1) static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; callback = gnttab_free_callback_list; gnttab_free_callback_list = NULL; while (callback != NULL) { next = callback->next; if (gnttab_free_count >= callback->count) { callback->next = NULL; callback->fn(callback->arg); } else { callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; } callback = next; } } static inline void check_free_callbacks(void) { if (unlikely(gnttab_free_callback_list)) do_free_callbacks(); } static void put_free_entry(grant_ref_t ref) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; gnttab_free_count++; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } /* * Public grant-issuing interface functions */ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; shared[ref].frame = frame; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0); return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly) { shared[ref].frame = frame; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0); } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); int gnttab_query_foreign_access(grant_ref_t ref) { u16 nflags; nflags = shared[ref].flags; return (nflags & (GTF_reading|GTF_writing)); } EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) { u16 flags, nflags; nflags = shared[ref].flags; do { if ((flags = nflags) & (GTF_reading|GTF_writing)) { printk(KERN_ALERT "WARNING: g.e. still in use!\n"); return 0; } } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) != flags); return 1; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); void gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page) { if (gnttab_end_foreign_access_ref(ref, readonly)) { put_free_entry(ref); if (page != 0) free_page(page); } else { /* XXX This needs to be fixed so that the ref and page are placed on a list to be freed up later. */ printk(KERN_WARNING "WARNING: leaking g.e. and page still in use!\n"); } } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; gnttab_grant_foreign_transfer_ref(ref, domid, pfn); return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, unsigned long pfn) { shared[ref].frame = pfn; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_accept_transfer; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) { unsigned long frame; u16 flags; /* * If a transfer is not even yet started, try to reclaim the grant * reference and return failure (== 0). */ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags) return 0; cpu_relax(); } /* If a transfer is in progress then wait until it is completed. */ while (!(flags & GTF_transfer_completed)) { flags = shared[ref].flags; cpu_relax(); } /* Read the frame number /after/ reading completion status. */ rmb(); frame = shared[ref].frame; BUG_ON(frame == 0); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) { unsigned long frame = gnttab_end_foreign_transfer_ref(ref); put_free_entry(ref); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); void gnttab_free_grant_reference(grant_ref_t ref) { put_free_entry(ref); } EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; unsigned long flags; int count = 1; if (head == GNTTAB_LIST_END) return; spin_lock_irqsave(&gnttab_list_lock, flags); ref = head; while (gnttab_entry(ref) != GNTTAB_LIST_END) { ref = gnttab_entry(ref); count++; } gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = head; gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_references); int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) { int h = get_free_entries(count); if (h < 0) return -ENOSPC; *head = h; return 0; } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); } EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); int gnttab_claim_grant_reference(grant_ref_t *private_head) { grant_ref_t g = *private_head; if (unlikely(g == GNTTAB_LIST_END)) return -ENOSPC; *private_head = gnttab_entry(g); return g; } EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release) { gnttab_entry(release) = *private_head; *private_head = release; } EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); if (callback->next) goto out; callback->fn = fn; callback->arg = arg; callback->count = count; callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; check_free_callbacks(); out: spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_request_free_callback); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) { struct gnttab_free_callback **pcb; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { if (*pcb == callback) { *pcb = callback->next; break; } } spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); static int grow_gnttab_list(unsigned int more_frames) { unsigned int new_nr_grant_frames, extra_entries, i; new_nr_grant_frames = nr_grant_frames + more_frames; extra_entries = more_frames * GREFS_PER_GRANT_FRAME; for (i = nr_grant_frames; i < new_nr_grant_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); if (!gnttab_list[i]) goto grow_nomem; } for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames; i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; check_free_callbacks(); return 0; grow_nomem: for ( ; i >= nr_grant_frames; i--) free_page((unsigned long) gnttab_list[i]); return -ENOMEM; } static unsigned int __max_nr_grant_frames(void) { struct gnttab_query_size query; int rc; query.dom = DOMID_SELF; rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); if ((rc < 0) || (query.status != GNTST_okay)) return 4; /* Legacy max supported number of frames */ return query.max_nr_frames; } static inline unsigned int max_nr_grant_frames(void) { unsigned int xen_max = __max_nr_grant_frames(); if (xen_max > boot_max_nr_grant_frames) return boot_max_nr_grant_frames; return xen_max; } #ifdef CONFIG_XEN #ifndef __ia64__ static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } #endif static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); #ifndef __ia64__ if (shared == NULL) { struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames()); BUG_ON(area == NULL); shared = area->addr; } rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); BUG_ON(rc); frames -= nr_gframes; /* adjust after map_pte_fn() */ #else shared = __va(frames[0] << PAGE_SHIFT); #endif kfree(frames); return 0; } int gnttab_resume(void) { if (max_nr_grant_frames() < nr_grant_frames) return -ENOSYS; return gnttab_map(0, nr_grant_frames - 1); } int gnttab_suspend(void) { #ifndef __ia64__ apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); #endif return 0; } #else /* !CONFIG_XEN */ #include static unsigned long resume_frames; static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; /* Loop backwards, so that the first hypercall has the largest index, * ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); } while (i-- > start_idx); return 0; } int gnttab_resume(void) { unsigned int max_nr_gframes, nr_gframes; nr_gframes = nr_grant_frames; max_nr_gframes = max_nr_grant_frames(); if (max_nr_gframes < nr_gframes) return -ENOSYS; if (!resume_frames) { resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); if (shared == NULL) { printk("error to ioremap gnttab share frames\n"); return -1; } } gnttab_map(0, nr_gframes - 1); return 0; } #endif /* !CONFIG_XEN */ static int gnttab_expand(unsigned int req_entries) { int rc; unsigned int cur, extra; cur = nr_grant_frames; extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) / GREFS_PER_GRANT_FRAME); if (cur + extra > max_nr_grant_frames()) return -ENOSPC; if ((rc = gnttab_map(cur, cur + extra - 1)) == 0) rc = grow_gnttab_list(extra); return rc; } int __devinit gnttab_init(void) { int i; unsigned int max_nr_glist_frames; unsigned int nr_init_grefs; if (!is_running_on_xen()) return -ENODEV; nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ max_nr_glist_frames = (boot_max_nr_grant_frames * GREFS_PER_GRANT_FRAME / (PAGE_SIZE / sizeof(grant_ref_t))); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; for (i = 0; i < nr_grant_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) goto ini_nomem; } if (gnttab_resume() < 0) return -ENODEV; nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; return 0; ini_nomem: for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); return -ENOMEM; } #ifdef CONFIG_XEN core_initcall(gnttab_init); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/machine_reboot.c000066400000000000000000000042701314037446600262070ustar00rootroot00000000000000#include #include #include #include #include #include "platform-pci.h" #include struct ap_suspend_info { int do_spin; atomic_t nr_spinning; }; #ifdef CONFIG_SMP /* * Spinning prevents, for example, APs touching grant table entries while * the shared grant table is not mapped into the address space imemdiately * after resume. */ static void ap_suspend(void *_info) { struct ap_suspend_info *info = _info; BUG_ON(!irqs_disabled()); atomic_inc(&info->nr_spinning); mb(); while (info->do_spin) cpu_relax(); mb(); atomic_dec(&info->nr_spinning); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0, 0) #else #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0) #endif #else /* !defined(CONFIG_SMP) */ #define initiate_ap_suspend(i) 0 #endif static int bp_suspend(void) { int suspend_cancelled; BUG_ON(!irqs_disabled()); suspend_cancelled = HYPERVISOR_suspend(0); if (!suspend_cancelled) { platform_pci_resume(); gnttab_resume(); irq_resume(); } return suspend_cancelled; } int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)) { int err, suspend_cancelled, nr_cpus; struct ap_suspend_info info; xenbus_suspend(); preempt_disable(); /* Prevent any races with evtchn_interrupt() handler. */ disable_irq(xen_platform_pdev->irq); info.do_spin = 1; atomic_set(&info.nr_spinning, 0); smp_mb(); nr_cpus = num_online_cpus() - 1; err = initiate_ap_suspend(&info); if (err < 0) { preempt_enable(); xenbus_suspend_cancel(); return err; } while (atomic_read(&info.nr_spinning) != nr_cpus) cpu_relax(); local_irq_disable(); suspend_cancelled = bp_suspend(); resume_notifier(suspend_cancelled); local_irq_enable(); smp_mb(); info.do_spin = 0; while (atomic_read(&info.nr_spinning) != 0) cpu_relax(); enable_irq(xen_platform_pdev->irq); preempt_enable(); if (!suspend_cancelled){ xenbus_resume(); } else xenbus_suspend_cancel(); // update uvp flags write_driver_resume_flag(); write_feature_flag(); write_monitor_service_flag(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/panic-handler.c000066400000000000000000000025311314037446600257340ustar00rootroot00000000000000#include #include #include #include MODULE_LICENSE("GPL"); #ifdef __ia64__ static void xen_panic_hypercall(struct unw_frame_info *info, void *arg) { current->thread.ksp = (__u64)info->sw - 16; HYPERVISOR_shutdown(SHUTDOWN_crash); /* we're never actually going to get here... */ } #endif static int xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { #ifdef __ia64__ unw_init_running(xen_panic_hypercall, NULL); #else /* !__ia64__ */ HYPERVISOR_shutdown(SHUTDOWN_crash); #endif /* we're never actually going to get here... */ return NOTIFY_DONE; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) static struct notifier_block xen_panic_block = { xen_panic_event, NULL, 0 /* try to go last */ }; #else static struct notifier_block xen_panic_block = { .notifier_call= xen_panic_event, .next= NULL, .priority= 0/* try to go last */ }; #endif /*LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)*/ static int __init setup_panic_event(void) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) notifier_chain_register(&panic_notifier_list, &xen_panic_block); #else atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); #endif /*LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)*/ return 0; } int xen_panic_handler_init(void) { return setup_panic_event(); } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/platform-compat.c000066400000000000000000000071601314037446600263370ustar00rootroot00000000000000#include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) static int system_state = 1; EXPORT_SYMBOL(system_state); #endif static int xen_reboot_vm(void) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *reboot_argv[] = { "/sbin/reboot", NULL }; if(call_usermodehelper("/sbin/reboot", reboot_argv, envp, 0)<0) printk(KERN_ERR "Xen reboot vm Failed.\n"); return 0; } void ctrl_alt_del(void) { xen_reboot_vm(); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) size_t strcspn(const char *s, const char *reject) { const char *p; const char *r; size_t count = 0; for (p = s; *p != '\0'; ++p) { for (r = reject; *r != '\0'; ++r) { if (*p == *r) return count; } ++count; } return count; } EXPORT_SYMBOL(strcspn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(void * vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); #endif #ifdef RHEL42 unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; } EXPORT_SYMBOL(wait_for_completion_timeout); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) /* fake do_exit using complete_and_exit */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) asmlinkage NORET_TYPE void do_exit(long code) #else fastcall NORET_TYPE void do_exit(long code) #endif { complete_and_exit(NULL, code); } EXPORT_SYMBOL_GPL(do_exit); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout) { __set_current_state(TASK_INTERRUPTIBLE); return schedule_timeout(timeout); } EXPORT_SYMBOL(schedule_timeout_interruptible); #endif #ifdef RHEL42 /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. */ void *kzalloc(size_t size, int flags) { void *ret = kmalloc(size, flags); if (ret) memset(ret, 0, size); return ret; } EXPORT_SYMBOL(kzalloc); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) /* Simplified asprintf. */ char *kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; unsigned int len; char *p, dummy[1]; va_start(ap, fmt); len = vsnprintf(dummy, 0, fmt, ap); va_end(ap); p = kmalloc(len + 1, gfp); if (!p) return NULL; va_start(ap, fmt); vsprintf(p, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL(kasprintf); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/platform-pci-unplug.c000066400000000000000000000117421314037446600271400ustar00rootroot00000000000000/****************************************************************************** * platform-pci-unplug.c * * Xen platform PCI device driver * Copyright (c) 2010, Citrix * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include /****************************************************************************** * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0xffff #define XEN_IOPORT_LINUX_DRVVER ((LINUX_VERSION_CODE << 8) + 0x0) #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define UNPLUG_ALL_IDE_DISKS 1 #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 #define UNPLUG_ALL 7 #define UNPLUG_IGNORE 8 int xen_platform_pci; EXPORT_SYMBOL_GPL(xen_platform_pci); static int xen_emul_unplug; void xen_unplug_emulated_devices(void) { /* If the version matches enable the Xen platform PCI driver. * Also enable the Xen platform PCI driver if the version is really old * and the user told us to ignore it. */ xen_platform_pci = 1; xen_emul_unplug |= UNPLUG_ALL_NICS; xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; /* Set the default value of xen_emul_unplug depending on whether or * not the Xen PV frontends and the Xen platform PCI driver have * been compiled for this kernel (modules or built-in are both OK). */ if (xen_platform_pci && !xen_emul_unplug) { #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Netfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated ICs.\n"); xen_emul_unplug |= UNPLUG_ALL_NICS; #endif #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Blkfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated disks.\n" "You might have to change the root device\n" "from /dev/hd[a-d] to /dev/xvd[a-d]\n" "in your root= kernel command line option\n"); xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; #endif } /* Now unplug the emulated devices */ if (xen_platform_pci && !(xen_emul_unplug & UNPLUG_IGNORE)) outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/platform-pci.c000066400000000000000000000225411314037446600256270ustar00rootroot00000000000000/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __ia64__ #include #endif #include "platform-pci.h" #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DRV_NAME "xen-platform-pci" #define DRV_VERSION "0.10" #define DRV_RELDATE "03/03/2005" char *hypercall_stubs; EXPORT_SYMBOL(hypercall_stubs); MODULE_AUTHOR("ssmith@xensource.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); struct pci_dev *xen_platform_pdev; static unsigned long shared_info_frame; static uint64_t callback_via; /* driver should write xenstore flag to tell xen which feature supported */ void write_feature_flag() { int rc; char str[32] = {0}; (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); rc = xenbus_write(XBT_NIL, "control/uvp", "feature_flag", str); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); } } /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { int rc; rc = xenbus_write(XBT_NIL, "control/uvp", "monitor-service-flag", "true"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/monitor-service-flag failed \n"); } } /*DTS2014042508949. driver write this flag when the xenpci resume succeed.*/ void write_driver_resume_flag() { int rc = 0; rc = xenbus_write(XBT_NIL, "control/uvp", "driver-resume-flag", "1"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/driver-resume-flag failed \n"); } } static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; extern void *shared_info_area; #ifdef __ia64__ xencomm_init(); #endif setup_xen_features(); shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); shared_info_area = ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); return 0; } static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } #ifndef __ia64__ static uint32_t xen_cpuid_base(void) { uint32_t base = 0, eax = 0, ebx = 0, ecx = 0, edx = 0; char signature[13]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { cpuid(base, &eax, &ebx, &ecx, &edx); *(uint32_t*)(signature + 0) = ebx; *(uint32_t*)(signature + 4) = ecx; *(uint32_t*)(signature + 8) = edx; signature[12] = 0; if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) return base; } return 0; } static int get_hypercall_stubs(void) { uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0, pages = 0, msr = 0, i = 0, base = 0; base = xen_cpuid_base(); if (base == 0) { printk(KERN_WARNING "Detected Xen platform device but not Xen VMM?\n"); return -EINVAL; } cpuid(base + 1, &eax, &ebx, &ecx, &edx); printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff); /* * Find largest supported number of hypercall pages. * We'll create as many as possible up to this number. */ cpuid(base + 2, &pages, &msr, &ecx, &edx); printk(KERN_INFO "Hypercall area is %u pages.\n", pages); /* Use __vmalloc() because vmalloc_exec() is not an exported symbol. */ /* PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL. */ /* hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE); */ hypercall_stubs = __vmalloc(pages * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM, __pgprot(__PAGE_KERNEL & ~_PAGE_NX)); if (hypercall_stubs == NULL) return -ENOMEM; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } return 0; } #else /* __ia64__ */ #define get_hypercall_stubs() (0) #endif static uint64_t get_callback_via(struct pci_dev *pdev) { #ifdef __ia64__ static const int isa_irq_low = 0x20, isa_irq_high = 0x2f; #else static const int isa_irq_low = 0x00, isa_irq_high = 0x0f; #endif u8 pin; int irq; irq = pdev->irq; if (irq >= isa_irq_low && irq <= isa_irq_high) return irq; /* ISA IRQ */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) pin = pdev->pin; #else pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); #endif /* We don't know the GSI. Specify the PCI INTx line instead. */ return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3)); } static int set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } int xen_irq_init(struct pci_dev *pdev); int xenbus_init(void); int xen_reboot_init(void); int xen_panic_handler_init(void); int gnttab_init(void); void xen_unplug_emulated_devices(void); static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr, iolen; long mmio_addr, mmio_len; xen_unplug_emulated_devices(); if (xen_platform_pdev) return -EBUSY; xen_platform_pdev = pdev; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); callback_via = get_callback_via(pdev); if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { printk(KERN_WARNING DRV_NAME ":no resources found\n"); return -ENOENT; } if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) { printk(KERN_ERR ":MEM I/O resource 0x%lx @ 0x%lx busy\n", mmio_addr, mmio_len); return -EBUSY; } if (request_region(ioaddr, iolen, DRV_NAME) == NULL) { printk(KERN_ERR DRV_NAME ":I/O resource 0x%lx @ 0x%lx busy\n", iolen, ioaddr); release_mem_region(mmio_addr, mmio_len); return -EBUSY; } platform_mmio = mmio_addr; platform_mmiolen = mmio_len; ret = get_hypercall_stubs(); if (ret < 0) goto out; if ((ret = init_xen_info())) goto out; if ((ret = gnttab_init())) goto out; if ((ret = xen_irq_init(pdev))) goto out; if ((ret = set_callback_via(callback_via))) goto out; if ((ret = xenbus_init())) goto out; if ((ret = xen_reboot_init())) goto out; if ((ret = xen_panic_handler_init())) goto out; /* write flag to xenstore when xenbus is available */ write_feature_flag(); out: if (ret) { release_mem_region(mmio_addr, mmio_len); release_region(ioaddr, iolen); } return ret; } #define XEN_PLATFORM_VENDOR_ID 0x5853 #define XEN_PLATFORM_DEVICE_ID 0x0001 static struct pci_device_id platform_pci_tbl[] __devinitdata = { {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* Continue to recognise the old ID for now */ {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { name: DRV_NAME, probe: platform_pci_init, id_table: platform_pci_tbl, }; static int pci_device_registered; void platform_pci_resume(void) { struct xen_add_to_physmap xatp; /* do 2 things for PV driver restore on HVM * 1: rebuild share info * 2: set callback irq again */ xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); if (set_callback_via(callback_via)) printk("platform_pci_resume failure!\n"); } static int __init platform_pci_module_init(void) { int rc; rc = pci_register_driver(&platform_driver); if (rc < 0) { printk(KERN_INFO DRV_NAME ": No platform pci device model found\n"); return rc; } pci_device_registered = 1; return 0; } module_init(platform_pci_module_init); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/platform-pci.h000066400000000000000000000026611314037446600256350ustar00rootroot00000000000000/****************************************************************************** * platform-pci.h * * Xen platform PCI device driver * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. */ #ifndef _XEN_PLATFORM_PCI_H #define _XEN_PLATFORM_PCI_H #include /* the features pvdriver supported */ #define XENPAGING 0x1 #define SUPPORTED_FEATURE XENPAGING extern void write_feature_flag(void); extern void write_monitor_service_flag(void); /*when xenpci resume succeed, write this flag, then uvpmonitor call ndsend*/ extern void write_driver_resume_flag(void); unsigned long alloc_xen_mmio(unsigned long len); void platform_pci_resume(void); extern struct pci_dev *xen_platform_pdev; #endif /* _XEN_PLATFORM_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/reboot.c000066400000000000000000000155521314037446600245300ustar00rootroot00000000000000#define __KERNEL_SYSCALLS__ #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 /* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only * report a crash, not be instructed to crash! * HALT is the same as POWEROFF, as far as we're concerned. The tools use * the distinction when we return the reason code to them. */ #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) static void __shutdown_handler(struct work_struct *unused); static DECLARE_WORK(shutdown_work, __shutdown_handler); #else static void __shutdown_handler(void *unused); static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL); #endif int __xen_suspend(int fast_suspend, void (*resume_notifier)(void)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } static void xen_resume_notifier(void) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed(current, cpumask_of_cpu(0)); if (err) { printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { printk(KERN_ERR "Xen suspend failed (%d)\n", err); goto fail; } old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_work(&shutdown_work); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } static int kthread_create_on_cpu(int (*f)(void *arg), void *arg, const char *name, int cpu) { struct task_struct *p; p = kthread_create(f, arg, name); if (IS_ERR(p)) return PTR_ERR(p); kthread_bind(p, cpu); wake_up_process(p); return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) static void __shutdown_handler(struct work_struct *unused) #else static void __shutdown_handler(void *unused) #endif { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, old_state, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } /*պǨʱsuspendʧᵼǨƹȥsuspendʧܺǨƳʱ*/ //xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } printk(KERN_WARNING "%s(%d): receive shutdown request %s\n", __FUNCTION__, __LINE__, str); if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) ctrl_alt_del(); else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else printk("Ignoring shutdown request: %s\n", str); if (new_state != SHUTDOWN_INVALID) { old_state = xchg(&shutting_down, new_state); if (old_state == SHUTDOWN_INVALID) schedule_work(&shutdown_work); else BUG_ON(old_state != SHUTDOWN_RESUMING); } kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) { printk(KERN_ERR "Unable to read sysrq code in " "control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key, NULL, NULL); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; static int setup_shutdown_watcher(void) { int err; xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { printk(KERN_ERR "Failed to set shutdown watcher\n"); return err; } err = register_xenbus_watch(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/xen_proc.c000066400000000000000000000010451314037446600250430ustar00rootroot00000000000000 #include #include #include static struct proc_dir_entry *xen_base; struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode) { if ( xen_base == NULL ) if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL ) panic("Couldn't create /proc/xen"); return create_proc_entry(name, mode, xen_base); } EXPORT_SYMBOL_GPL(create_xen_proc_entry); void remove_xen_proc_entry(const char *name) { remove_proc_entry(name, xen_base); } EXPORT_SYMBOL_GPL(remove_xen_proc_entry); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/xen_support.c000066400000000000000000000040601314037446600256140ustar00rootroot00000000000000/****************************************************************************** * support.c * Xen module support functions. * Copyright (C) 2004, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if defined (__ia64__) unsigned long __hypercall(unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long cmd) { unsigned long __res; __asm__ __volatile__ (";;\n" "mov r2=%1\n" "break 0x1000 ;;\n" "mov %0=r8 ;;\n" : "=r"(__res) : "r"(cmd) : "r2", "r8", "memory"); return __res; } EXPORT_SYMBOL(__hypercall); int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) { return xencomm_mini_hypercall_grant_table_op(cmd, uop, count); } EXPORT_SYMBOL(HYPERVISOR_grant_table_op); /* without using balloon driver on PV-on-HVM for ia64 */ void balloon_update_driver_allowance(long delta) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); void balloon_release_driver_page(struct page *page) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_release_driver_page); #endif /* __ia64__ */ void xen_machphys_update(unsigned long mfn, unsigned long pfn) { BUG(); } EXPORT_SYMBOL(xen_machphys_update); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/xenbus_backend_client.c000066400000000000000000000106761314037446600275510ustar00rootroot00000000000000/****************************************************************************** * Backend-client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code in the backend * driver. * * Copyright (C) 2005-2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include /* Based on Rusty Russell's skeleton driver's map_page */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref) { struct gnttab_map_grant_ref op; struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE); if (!area) return ERR_PTR(-ENOMEM); gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map, gnt_ref, dev->otherend_id); if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { free_vm_area(area); xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); BUG_ON(!IS_ERR(ERR_PTR(op.status))); return ERR_PTR(op.status); } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; return area; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, dev->otherend_id); if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring); /* Based on Rusty Russell's skeleton driver's unmap_page */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map, (grant_handle_t)area->phys_addr); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) free_vm_area(area); else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring); int xenbus_dev_is_online(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/xenbus_client.c000066400000000000000000000172231314037446600260750ustar00rootroot00000000000000/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DPRINTK(fmt, args...) \ pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args) const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate); int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path); int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; char *state = kasprintf(GFP_KERNEL, "%s/%s", path, path2); if (!state) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, state, watch, callback); if (err) kfree(state); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path2); int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not work inside a Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ int current_state; int err; if (state == dev->state) return 0; err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d", ¤t_state); if (err != 1) return 0; err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state); if (err) { if (state != XenbusStateClosing) /* Avoid looping */ xenbus_dev_fatal(dev, err, "writing new state"); return err; } dev->state = state; return 0; } EXPORT_SYMBOL_GPL(xenbus_switch_state); int xenbus_frontend_closed(struct xenbus_device *dev) { xenbus_switch_state(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed); /** * Return the path to the error node for the given device, or NULL on failure. * If the value returned is non-NULL, then it is the caller's to kfree. */ static char *error_path(struct xenbus_device *dev) { return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); } void _dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list ap) { int ret; unsigned int len; char *printf_buffer = NULL, *path_buffer = NULL; #define PRINTF_BUFFER_SIZE 4096 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); if (printf_buffer == NULL) goto fail; len = sprintf(printf_buffer, "%i ", -err); ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = error_path(dev); if (path_buffer == NULL) { printk("xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { printk("xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } fail: if (printf_buffer) kfree(printf_buffer); if (path_buffer) kfree(path_buffer); } void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error); void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); xenbus_switch_state(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal); int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); if (err < 0) xenbus_dev_fatal(dev, err, "granting access to ring page"); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error(dev, err, "freeing event channel %d", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn); enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/xenbus_comms.c000066400000000000000000000142161314037446600257340ustar00rootroot00000000000000/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_irq; extern int xenstored_ready; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) extern void xenbus_probe(struct work_struct *unused); static DECLARE_WORK(probe_work, xenbus_probe); #else extern void xenbus_probe(void *); static DECLARE_WORK(probe_work, xenbus_probe, NULL); #endif static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs) { if (unlikely(xenstored_ready == 0)) { xenstored_ready = 1; schedule_work(&probe_work); } wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { return wait_event_interruptible(xb_waitq, xb_data_to_read()); } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /* Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; int err; if (intf->req_prod != intf->req_cons) printk(KERN_ERR "XENBUS request ring is not quiescent " "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { printk(KERN_WARNING "XENBUS response ring is not quiescent " "(%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); intf->rsp_cons = intf->rsp_prod; } if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); err = bind_caller_port_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/xenbus_comms.h000066400000000000000000000035561314037446600257460ustar00rootroot00000000000000/* * Private include for xenbus communications. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_COMMS_H #define _XENBUS_COMMS_H int xs_init(void); int xb_init_comms(void); /* Low level routines. */ int xb_write(const void *data, unsigned len); int xb_read(void *data, unsigned len); int xb_data_to_read(void); int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; #endif /* _XENBUS_COMMS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/xenbus_dev.c000066400000000000000000000231041314037446600253700ustar00rootroot00000000000000/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[PAGE_SIZE]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static struct proc_dir_entry *xenbus_dev_intf; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static void queue_reply(struct xenbus_dev_data *u, char *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); BUG_ON(rb == NULL); rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, &u->read_buffers); wake_up(&u->read_waitq); } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int path_len, tok_len, body_len; path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; body_len = path_len + tok_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr)); queue_reply(adap->dev_data, (char *)path, path_len); queue_reply(adap->dev_data, (char *)token, tok_len); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply; char *path, *token; struct watch_adapter *watch, *tmp_watch; int err, rc = len; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 if ((len + u->len) > sizeof(u->u.buffer)) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_TRANSACTION_START: case XS_TRANSACTION_END: case XS_DIRECTORY: case XS_READ: case XS_GET_PERMS: case XS_RELEASE: case XS_GET_DOMAIN_PATH: case XS_WRITE: case XS_LINUX_WEAK_WRITE: case XS_MKDIR: case XS_RM: case XS_SET_PERMS: if (msg_type == XS_TRANSACTION_START) { trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } } reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; BUG_ON(&trans->list == &u->transactions); list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg)); queue_reply(u, (char *)reply, u->u.msg.len); mutex_unlock(&u->reply_mutex); kfree(reply); break; case XS_WATCH: case XS_UNWATCH: { static const char *XS_RESP = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); watch->watch.node = kmalloc(strlen(path)+1, GFP_KERNEL); strcpy((char *)watch->watch.node, path); watch->watch.callback = watch_fired; watch->token = kmalloc(strlen(token)+1, GFP_KERNEL); strcpy(watch->token, token); watch->dev_data = u; err = register_xenbus_watch(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = strlen(XS_RESP) + 1; mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&hdr, sizeof(hdr)); queue_reply(u, (char *)XS_RESP, hdr.len); mutex_unlock(&u->reply_mutex); break; } default: rc = -EINVAL; break; } out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .poll = xenbus_dev_poll, }; int xenbus_dev_init(void) { xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400); if (xenbus_dev_intf) xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops; return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/xenbus_probe.c000066400000000000000000000674501314037446600257350ustar00rootroot00000000000000/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif int xen_store_evtchn; struct xenstore_domain_interface *xen_store_interface; static unsigned long xen_store_mfn; extern struct mutex xenwatch_mutex; static ATOMIC_NOTIFIER_HEAD(xenstore_chain); static void wait_for_devices(struct xenbus_driver *xendrv); static int xenbus_probe_frontend(const char *type, const char *name); static void xenbus_dev_shutdown(struct device *_dev); /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } /* device// => - */ static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) { printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, BUS_ID_SIZE); if (!strchr(bus_id, '/')) { printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } static int read_backend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "backend-id", "backend"); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) static int xenbus_uevent_frontend(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) { struct xenbus_device *xdev; int length = 0, i = 0; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_PATH=%s", xdev->nodename); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "MODALIAS=xen:%s", xdev->devicetype); return 0; } #endif /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { .name = "xen", .match = xenbus_match, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_frontend, #endif }, .dev = { .bus_id = "xen", }, }; static void otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]); return; } state = xenbus_read_driver_state(dev->otherend); DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { struct xen_bus_type *bus = NULL; bus = container_of(dev->dev.bus, struct xen_bus_type, bus); /* If we're frontend, drive the state machine to Closed. */ /* This should cause the backend to release our resources. */ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) xenbus_frontend_closed(dev); return; } if (drv->otherend_changed) drv->otherend_changed(dev, state); } static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { return xenbus_watch_path2(dev, dev->otherend, "state", &dev->otherend_watch, otherend_changed); } int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { printk(KERN_WARNING "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { printk(KERN_WARNING "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); return -ENODEV; } int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); free_otherend_details(dev); if (drv->remove) drv->remove(dev); xenbus_switch_state(dev, XenbusStateClosed); return 0; } static void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); get_device(&dev->dev); if (dev->state != XenbusStateConnected) { printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); } int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus) { int ret; if (bus->error) return bus->error; drv->driver.name = drv->name; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) drv->driver.owner = drv->owner; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; drv->driver.remove = xenbus_dev_remove; drv->driver.shutdown = xenbus_dev_shutdown; #endif mutex_lock(&xenwatch_mutex); ret = driver_register(&drv->driver); mutex_unlock(&xenwatch_mutex); return ret; } int xenbus_register_frontend(struct xenbus_driver *drv) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(xenbus_register_frontend); void xenbus_unregister_driver(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t xendev_show_nodename(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); static ssize_t xendev_show_devtype(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); static ssize_t xendev_show_modalias(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_driver_state(nodename); if (bus->error) return bus->error; if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); xendev->dev.parent = &bus->dev; xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); if (err) goto fail; /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) goto unregister; err = device_create_file(&xendev->dev, &dev_attr_devtype); if (err) goto unregister; err = device_create_file(&xendev->dev, &dev_attr_modalias); if (err) goto unregister; return 0; unregister: device_remove_file(&xendev->dev, &dev_attr_nodename); device_remove_file(&xendev->dev, &dev_attr_devtype); device_unregister(&xendev->dev); fail: kfree(xendev); return err; } /* device// */ static int xenbus_probe_frontend(const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(&xenbus_frontend, type, nodename); kfree(nodename); return err; } static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n; if (bus->error) return bus->error; dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void dev_changed(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[BUS_ID_SIZE]; const char *p, *root; if (bus->error || char_count(node, '/') < 2) return; exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend//... or device//... */ p = strchr(node, '/') + 1; snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int suspend_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend) err = drv->suspend(xdev); if (err) printk(KERN_WARNING "xenbus: suspend %s failed: %i\n", dev->bus_id, err); return 0; } static int suspend_cancel_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend_cancel) err = drv->suspend_cancel(xdev); if (err) printk(KERN_WARNING "xenbus: suspend_cancel %s failed: %i\n", dev->bus_id, err); return 0; } static int resume_dev(struct device *dev, void *data) { int err; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); err = talk_to_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus: resume (talk_to_otherend) %s failed: %i\n", dev->bus_id, err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { printk(KERN_WARNING "xenbus: resume %s failed: %i\n", dev->bus_id, err); return err; } } err = watch_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus_probe: resume (watch_otherend) %s failed: " "%d.\n", dev->bus_id, err); return err; } return 0; } void xenbus_suspend(void) { DPRINTK(""); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); xenbus_backend_suspend(suspend_dev); xs_suspend(); } EXPORT_SYMBOL_GPL(xenbus_suspend); void xen_unplug_emulated_devices(void); void xenbus_resume(void) { xb_init_comms(); xs_resume(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); xenbus_backend_resume(resume_dev); xen_unplug_emulated_devices(); } EXPORT_SYMBOL_GPL(xenbus_resume); void xenbus_suspend_cancel(void) { xs_suspend_cancel(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); xenbus_backend_resume(suspend_cancel_dev); } EXPORT_SYMBOL_GPL(xenbus_suspend_cancel); /* A flag to determine if xenstored is 'ready' (i.e. has started) */ int xenstored_ready = 0; int register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; if (xenstored_ready > 0) ret = nb->notifier_call(nb, 0, NULL); else atomic_notifier_chain_register(&xenstore_chain, nb); return ret; } EXPORT_SYMBOL_GPL(register_xenstore_notifier); void unregister_xenstore_notifier(struct notifier_block *nb) { atomic_notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); void xenbus_probe(void *unused) { BUG_ON((xenstored_ready <= 0)); /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); xenbus_backend_probe_and_watch(); /* Notify others that xenstore is up */ atomic_notifier_call_chain(&xenstore_chain, 0, NULL); } #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) static struct file_operations xsd_kva_fops; static struct proc_dir_entry *xsd_kva_intf; static struct proc_dir_entry *xsd_port_intf; static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static int xsd_kva_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "0x%p", xen_store_interface); *eof = 1; return len; } static int xsd_port_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "%d", xen_store_evtchn); *eof = 1; return len; } #endif static int xenbus_probe_init(void) { int err = 0; unsigned long page = 0; DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) printk(KERN_WARNING "XENBUS: Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { struct evtchn_alloc_unbound alloc_unbound; /* Allocate page. */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = 0; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600); if (xsd_kva_intf) { memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, sizeof(xsd_kva_fops)); xsd_kva_fops.mmap = xsd_kva_mmap; xsd_kva_intf->proc_fops = &xsd_kva_fops; xsd_kva_intf->read_proc = xsd_kva_read; } xsd_port_intf = create_xen_proc_entry("xsd_port", 0400); if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else { xenstored_ready = 1; #ifdef CONFIG_XEN xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); #else xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN); xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN); xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); #endif } xenbus_dev_init(); /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); goto err; } /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); if (xenbus_frontend.error) { bus_unregister(&xenbus_frontend.bus); printk(KERN_WARNING "XENBUS: Error registering frontend device: %i\n", xenbus_frontend.error); } } xenbus_backend_device_register(); if (!is_initial_xendomain()) xenbus_probe(NULL); return 0; err: if (page) free_page(page); /* * Do not unregister the xenbus front/backend buses here. The buses * must exist because front/backend drivers will use them when they are * registered. */ return err; } #ifdef CONFIG_XEN postcore_initcall(xenbus_probe_init); MODULE_LICENSE("Dual BSD/GPL"); #else int xenbus_init(void) { return xenbus_probe_init(); } #endif static int is_disconnected_device(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_disconnected_device(struct device_driver *drv) { if (xenbus_frontend.error) return xenbus_frontend.error; return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_disconnected_device); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", xendev->nodename); } else if (xendev->state != XenbusStateConnected) { printk(KERN_WARNING "XENBUS: Timeout connecting " "to device: %s (state %d)\n", xendev->nodename, xendev->state); } return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; } if (!ready_to_wait_for_devices || !is_running_on_xen()) return; while (exists_disconnected_device(drv)) { //5s if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) printk(KERN_WARNING "XENBUS: Waiting for " "devices to initialise: "); seconds_waited += 5; printk("%us...", 300 - seconds_waited); if (seconds_waited == 300) break; } //100ms schedule_timeout_interruptible(HZ/10); } if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } #ifndef MODULE static int __init boot_wait_for_devices(void) { if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; wait_for_devices(NULL); } return 0; } late_initcall(boot_wait_for_devices); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/xenbus_probe.h000066400000000000000000000061171314037446600257330ustar00rootroot00000000000000/****************************************************************************** * xenbus_probe.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_PROBE_H #define _XENBUS_PROBE_H #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); extern void xenbus_backend_probe_and_watch(void); extern void xenbus_backend_bus_register(void); extern void xenbus_backend_device_register(void); #else static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_probe_and_watch(void) {} static inline void xenbus_backend_bus_register(void) {} static inline void xenbus_backend_device_register(void) {} #endif struct xen_bus_type { char *root; int error; unsigned int levels; int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename); int (*probe)(const char *type, const char *dir); struct bus_type bus; struct device dev; }; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); extern int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus); extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); extern int xenbus_probe_devices(struct xen_bus_type *bus); extern void dev_changed(const char *node, struct xen_bus_type *bus); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/xenbus_probe_backend.c000066400000000000000000000167621314037446600274040ustar00rootroot00000000000000/****************************************************************************** * Talks to Xen Store to figure out what devices we have (backend half). * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_uevent_backend(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size); static int xenbus_probe_backend(const char *type, const char *domid); extern int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node); static int read_frontend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "frontend-id", "frontend"); } /* backend/// => -- */ static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) { int domid, err; const char *devid, *type, *frontend; unsigned int typelen; type = strchr(nodename, '/'); if (!type) return -EINVAL; type++; typelen = strcspn(type, "/"); if (!typelen || type[typelen] != '/') return -EINVAL; devid = strrchr(nodename, '/') + 1; err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid, "frontend", NULL, &frontend, NULL); if (err) return err; if (strlen(frontend) == 0) err = -ERANGE; if (!err && !xenbus_exists(XBT_NIL, frontend, "")) err = -ENOENT; kfree(frontend); if (err) return err; if (snprintf(bus_id, BUS_ID_SIZE, "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE) return -ENOSPC; return 0; } static struct xen_bus_type xenbus_backend = { .root = "backend", .levels = 3, /* backend/type// */ .get_bus_id = backend_bus_id, .probe = xenbus_probe_backend, .error = -ENODEV, .bus = { .name = "xen-backend", .match = xenbus_match, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, // .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_backend, }, .dev = { .bus_id = "xen-backend", }, }; static int xenbus_uevent_backend(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) { struct xenbus_device *xdev; struct xenbus_driver *drv; int i = 0; int length = 0; DPRINTK(""); if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_PATH=%s", xdev->nodename); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_BASE_PATH=%s", xenbus_backend.root); /* terminate, set to next free slot, shrink available space */ envp[i] = NULL; envp = &envp[i]; num_envp -= i; buffer = &buffer[length]; buffer_size -= length; if (dev->driver) { drv = to_xenbus_driver(dev->driver); if (drv && drv->uevent) return drv->uevent(xdev, envp, num_envp, buffer, buffer_size); } return 0; } int xenbus_register_backend(struct xenbus_driver *drv) { drv->read_otherend_details = read_frontend_details; return xenbus_register_driver_common(drv, &xenbus_backend); } EXPORT_SYMBOL_GPL(xenbus_register_backend); /* backend/// */ static int xenbus_probe_backend_unit(const char *dir, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); if (!nodename) return -ENOMEM; DPRINTK("%s\n", nodename); err = xenbus_probe_node(&xenbus_backend, type, nodename); kfree(nodename); return err; } /* backend// */ static int xenbus_probe_backend(const char *type, const char *domid) { char *nodename; int err = 0; char **dir; unsigned int i, dir_n = 0; DPRINTK(""); nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid); if (!nodename) return -ENOMEM; dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n); if (IS_ERR(dir)) { kfree(nodename); return PTR_ERR(dir); } for (i = 0; i < dir_n; i++) { err = xenbus_probe_backend_unit(nodename, type, dir[i]); if (err) break; } kfree(dir); kfree(nodename); return err; } static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); } static struct xenbus_watch be_watch = { .node = "backend", .callback = backend_changed, }; void xenbus_backend_suspend(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_resume(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_probe_and_watch(void) { xenbus_probe_devices(&xenbus_backend); register_xenbus_watch(&be_watch); } void xenbus_backend_bus_register(void) { xenbus_backend.error = bus_register(&xenbus_backend.bus); if (xenbus_backend.error) printk(KERN_WARNING "XENBUS: Error registering backend bus: %i\n", xenbus_backend.error); } void xenbus_backend_device_register(void) { if (xenbus_backend.error) return; xenbus_backend.error = device_register(&xenbus_backend.dev); if (xenbus_backend.error) { bus_unregister(&xenbus_backend.bus); printk(KERN_WARNING "XENBUS: Error registering backend device: %i\n", xenbus_backend.error); } } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-platform-pci/xenbus_xs.c000066400000000000000000000477131314037446600252600ustar00rootroot00000000000000/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct rw_semaphore transaction_mutex; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { printk(KERN_WARNING "XENBUS xen store gave: unknown error %s", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; struct xsd_sockmsg req_msg = *msg; int err; if (req_msg.type == XS_TRANSACTION_START) down_read(&xs_state.transaction_mutex); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((req_msg.type == XS_TRANSACTION_END) || ((req_msg.type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) up_read(&xs_state.transaction_mutex); return ret; } /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len);; if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { printk(KERN_WARNING "XENBUS unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_KERNEL, "%s", dir); else buffer = kasprintf(GFP_KERNEL, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len); /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; down_read(&xs_state.transaction_mutex); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { up_read(&xs_state.transaction_mutex); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); up_read(&xs_state.transaction_mutex); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; #define PRINTF_BUFFER_SIZE 4096 char *printf_buffer; printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); if (printf_buffer == NULL) return -ENOMEM; va_start(ap, fmt); ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap); va_end(ap); BUG_ON(ret > PRINTF_BUFFER_SIZE-1); ret = xenbus_write(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); /* Ignore errors due to multiple registration. */ if ((err != 0) && (err != -EEXIST)) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) printk(KERN_WARNING "XENBUS Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); /* Flush any currently-executing callback, unless we are it. :-) */ if (current->pid != xenwatch_pid) { mutex_lock(&xenwatch_mutex); mutex_unlock(&xenwatch_mutex); } } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); void xs_suspend(void) { down_write(&xs_state.transaction_mutex); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.transaction_mutex); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); up_write(&xs_state.transaction_mutex); } static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent != &watch_events) { msg = list_entry(ent, struct xs_stored_msg, list); if (msg->u.watch.handle->flags & XBWF_new_thread) kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); else xenwatch_handle_callback(msg); } mutex_unlock(&xenwatch_mutex); } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_KERNEL); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } body = kmalloc(msg->hdr.len + 1, GFP_KERNEL); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; for (;;) { err = process_msg(); if (err) printk(KERN_WARNING "XENBUS error %d while reading " "message\n", err); if (kthread_should_stop()) break; } return 0; } int xs_init(void) { int err; struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); init_rwsem(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) return err; task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-vbd/000077500000000000000000000000001314037446600212405ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-vbd/Kbuild000066400000000000000000000001151314037446600223720ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-vbd.o xen-vbd-objs := blkfront.o vbd.o UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-vbd/Makefile000066400000000000000000000000661314037446600227020ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-vbd/blkfront.c000066400000000000000000000606341314037446600232360ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 static void connect(struct blkfront_info *); static void blkfront_closing(struct xenbus_device *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs); static void blkif_restart_queue(struct work_struct *work); static void blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice, i; struct blkfront_info *info; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); printk(KERN_INFO "blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } info->xbdev = dev; info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) INIT_WORK(&info->work, blkif_restart_queue); #else INIT_WORK(&info->work, blkif_restart_queue, &info->work); #endif for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev->dev.driver_data = info; err = talk_to_backend(dev, info); if (err) { kfree(info); dev->dev.driver_data = NULL; return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; int err; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); err = talk_to_backend(dev, info); if (info->connected == BLKIF_STATE_SUSPENDED && !err) blkif_recover(info); return err; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { const char *message = NULL; struct xenbus_transaction xbt; int err; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } err = xenbus_printf(xbt, dev->nodename, "ring-ref","%u", info->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { message = "writing protocol"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (message) xenbus_dev_fatal(dev, err, "%s", message); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; info->ring_ref = GRANT_INVALID_REF; sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); if (err < 0) { free_page((unsigned long)sring); info->ring.sring = NULL; goto fail; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev->dev.driver_data; struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateInitialised: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateConnected: connect(info); break; case XenbusStateClosing: bd = bdget(info->dev); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); break; } } /* ** Connection ** */ /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (XENBUS_EXIST_ERR(err)) return; printk(KERN_INFO "Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); //revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%lu", &info->feature_barrier, NULL); if (err) info->feature_barrier = 0; err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&blkif_io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); add_disk(info->gd); info->is_ready = 1; if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; unsigned long flags; DPRINTK("blkfront_closing: %s removed\n", dev->nodename); if (info->rq == NULL) goto out; spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&blkif_io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); xlvbd_del(info); out: xenbus_frontend_closed(dev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); kfree(info); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free > BLK_RING_SIZE); info->shadow_free = info->shadow[free].req.id; info->shadow[free].req.id = 0x0fffffee; /* debug */ return free; } static inline void ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = 0; info->shadow_free = id; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(struct work_struct *work) { struct blkfront_info *info = container_of(work, struct blkfront_info, work); spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } int blkif_open(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users++; return 0; } int blkif_release(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users--; if (info->users == 0) { /* Check whether we have been instructed to close. We will have ignored this request initially, as the device was still mounted. */ struct xenbus_device * dev = info->xbdev; enum xenbus_state state = xenbus_read_driver_state(dev->otherend); if (state == XenbusStateClosing) blkfront_closing(dev); } return 0; } int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct block_device *bd = inode->i_bdev; struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; default: /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", command);*/ return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * blkif_queue_request * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; unsigned long buffer_mfn; blkif_request_t *ring_req; struct bio *bio; struct bio_vec *bvec; int idx; unsigned long id; unsigned int fsect, lsect; int ref; grant_ref_t gref_head; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; if (gnttab_alloc_grant_references( BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, BLKIF_MAX_SEGMENTS_PER_REQUEST); return 1; } /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = (unsigned long)req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)req->sector; ring_req->handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (blk_barrier_rq(req)) ring_req->operation = BLKIF_OP_WRITE_BARRIER; ring_req->nr_segments = 0; rq_for_each_bio (bio, req) { bio_for_each_segment (bvec, bio, idx) { BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST); buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT; fsect = bvec->bv_offset >> 9; lsect = fsect + (bvec->bv_len >> 9) - 1; /* install a grant reference. */ ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ); info->shadow[id].frame[ring_req->nr_segments] = mfn_to_pfn(buffer_mfn); ring_req->seg[ring_req->nr_segments] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; ring_req->nr_segments++; } } info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(request_queue_t *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = elv_next_request(rq)) != NULL) { info = req->rq_disk->private_data; if (!blk_fs_request(req)) { end_request(req, 0); continue; } if (RING_FULL(&info->ring)) goto wait; DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%li) buffer:%p [%s]\n", req, req->cmd, (long long)req->sector, req->current_nr_sectors, req->nr_sectors, req->buffer, rq_data_dir(req) ? "write" : "read"); blkdev_dequeue_request(req); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; int uptodate; spin_lock_irqsave(&blkif_io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = (struct request *)info->shadow[id].request; blkif_completion(&info->shadow[id]); ADD_ID_TO_FREELIST(info, id); uptodate = (bret->status == BLKIF_RSP_OKAY); switch (bret->operation) { case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk("blkfront: %s: write barrier op failed\n", info->gd->disk_name); uptodate = -EOPNOTSUPP; info->feature_barrier = 0; xlvbd_barrier(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); ret = end_that_request_first(req, uptodate, req->hard_nr_sectors); BUG_ON(ret); end_that_request_last(req, uptodate); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&blkif_io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); /* Free resources associated with old device channel. */ if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, 0, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s) { int i; for (i = 0; i < s->req.nr_segments; i++) gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL); } static void blkif_recover(struct blkfront_info *info) { int i; blkif_request_t *req; struct blk_shadow *copy; int j; /* Stage 1: Make a safe copy of the shadow state. */ copy = kmalloc(sizeof(info->shadow), GFP_KERNEL | __GFP_NOFAIL); memcpy(copy, info->shadow, sizeof(info->shadow)); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow_free = info->ring.req_prod_pvt; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ if (copy[i].request == 0) continue; /* Grab a request slot and copy shadow state into it. */ req = RING_GET_REQUEST( &info->ring, info->ring.req_prod_pvt); *req = copy[i].req; /* We get a new request id, and must reset the shadow state. */ req->id = GET_ID_FROM_FREELIST(info); memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i])); /* Rewrite any grant references invalidated by susp/resume. */ for (j = 0; j < req->nr_segments; j++) gnttab_grant_foreign_access_ref( req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), rq_data_dir( (struct request *) info->shadow[req->id].request)); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; } kfree(copy); (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&blkif_io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Send off requeued requests */ flush_requests(info); /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; return info->is_ready; } /* ** Driver Registration ** */ static struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static struct xenbus_driver blkfront = { .name = "vbd", .owner = THIS_MODULE, .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, }; static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront); } module_init(xlblk_init); static void xlblk_exit(void) { return xenbus_unregister_driver(&blkfront); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-vbd/block.h000066400000000000000000000106661314037446600225140ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #if 0 #define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a) #else #define DPRINTK_IOCTL(_f, _a...) ((void)0) #endif struct xlbd_type_info { int partn_shift; int disks_per_major; char *devname; char *diskname; }; struct xlbd_major_info { int major; int index; int usage; struct xlbd_type_info *type; }; struct blk_shadow { blkif_request_t req; unsigned long request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; dev_t dev; struct gendisk *gd; int vdevice; blkif_vdev_t handle; int connected; int ring_ref; blkif_front_ring_t ring; unsigned int irq; struct xlbd_major_info *mi; request_queue_t *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; unsigned long shadow_free; int feature_barrier; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; extern spinlock_t blkif_io_lock; extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (request_queue_t *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); int xlvbd_barrier(struct blkfront_info *info); #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-vbd/vbd.c000066400000000000000000000244551314037446600221710ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_MAJOR 202 #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; DEFINE_SPINLOCK(blkif_io_lock); static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; int do_register, i; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SCSI_RANGE: ptr->type = &xlbd_scsi_type; ptr->index = index - XLBD_MAJOR_SCSI_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* if someone already registered block major EXT_MAJOR, * don't try to register it again */ for (i = XLBD_MAJOR_VBD_START; i < XLBD_MAJOR_VBD_START+ NUM_VBD_MAJORS; i++ ) { if (major_info[i] != NULL && major_info[i]->major == ptr->major) { do_register = 0; break; } } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(ptr); return NULL; } printk("xen-vbd: registered block device major %i\n", ptr->major); } major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = 10; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = 11 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = 18 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = 26; break; default: if (!VDEV_IS_EXTENDED(vdevice)) index = 27; else index = 28; break; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) { request_queue_t *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) elevator_init(rq, "noop"); #else elevator_init(rq, &elevator_noop); #endif /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_hardsect_size(rq, sector_size); blk_queue_max_sectors(rq, 512); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); gd->queue = rq; return 0; } static int xlvbd_alloc_gendisk(int major, int minor, blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; unsigned int offset; BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0) nr_minors = 1 << mi->type->partn_shift; gd = alloc_disk(nr_minors); if (gd == NULL) goto out; offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (nr_minors > 1) { if (offset < 26) { sprintf(gd->disk_name, "%s%c", mi->type->diskname, 'a' + offset ); } else { sprintf(gd->disk_name, "%s%c%c", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26) ); } } else { if (offset < 26) { sprintf(gd->disk_name, "%s%c%d", mi->type->diskname, 'a' + offset, minor & ((1 << mi->type->partn_shift) - 1)); } else { sprintf(gd->disk_name, "%s%c%c%d", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26), minor & ((1 << mi->type->partn_shift) - 1)); } } gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size)) { del_gendisk(gd); goto out; } info->rq = gd->queue; info->gd = gd; if (info->feature_barrier) xlvbd_barrier(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { struct block_device *bd; int err = 0; int major, minor; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = EXT_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); } info->dev = MKDEV(major, minor); bd = bdget(info->dev); if (bd == NULL) return -ENODEV; err = xlvbd_alloc_gendisk(major, minor, capacity, vdevice, vdisk_info, sector_size, info); bdput(bd); return err; } void xlvbd_del(struct blkfront_info *info) { if (info->mi == NULL) return; BUG_ON(info->gd == NULL); del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); blk_cleanup_queue(info->rq); info->rq = NULL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int xlvbd_barrier(struct blkfront_info *info) { int err; err = blk_queue_ordered(info->rq, info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL); if (err) return err; printk("blkfront: %s: barriers %s\n", info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled"); return 0; } #else int xlvbd_barrier(struct blkfront_info *info) { printk("blkfront: %s: barriers disabled\n", info->gd->disk_name); return -ENOSYS; } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-vnif/000077500000000000000000000000001314037446600214275ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-vnif/Kbuild000066400000000000000000000001101314037446600225540ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-vnif.o xen-vnif-objs := netfront.o UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-vnif/Makefile000066400000000000000000000000661314037446600230710ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL4/xen-vnif/netfront.c000066400000000000000000001601051314037446600234350ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif #define RX_COPY_THRESHOLD 256 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define HAVE_NO_CSUM_OFFLOAD 1 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || unlikely(skb->ip_summed != CHECKSUM_HW)); } #else #define netif_needs_gso(dev, skb) 0 #define dev_disable_gso_features(dev) ((void)0) #endif #define GRANT_INVALID_REF 0 #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE) struct netfront_info { struct list_head list; struct net_device *netdev; struct net_device_stats stats; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; }; struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront: " fmt, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static int send_fake_arp(struct net_device *, int arpType); static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs); #if defined(CONFIG_SYSFS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of nic8139 */ #include #include static void remove_nic8139() { struct pci_dev *pci_dev_nic8139 = NULL; //do { pci_dev_nic8139 = pci_get_device(0x10ec, 0x8139, pci_dev_nic8139); if (pci_dev_nic8139) { printk(KERN_INFO "remove_nic8139 : 8139 NIC of subsystem(0x%2x,0x%2x) removed.\n", pci_dev_nic8139->subsystem_vendor, pci_dev_nic8139->subsystem_device); pci_disable_device(pci_dev_nic8139); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) pci_remove_bus_device(pci_dev_nic8139); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pci_dev_nic8139); pci_remove_bus_device(pci_dev_nic8139); #else pci_stop_and_remove_bus_device(pci_dev_nic8139); #endif /* check kernel version */ } //} while (pci_dev_nic8139); # comment off because fedora12 will crash return; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; // remove 8139 nic when xen nic devices appear remove_nic8139(); netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev->dev.driver_data = info; err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { printk(KERN_WARNING "%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); printk(KERN_WARNING "%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev->dev.driver_data = NULL; return err; } static int __devexit netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; DPRINTK("%s\n", dev->nodename); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_netdev(info->netdev); return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; DPRINTK("%s\n", dev->nodename); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } #ifdef HAVE_NO_CSUM_OFFLOAD err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", 1); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } #endif err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } #ifdef HAVE_TSO err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } #endif err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_KERNEL); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_KERNEL); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, SA_SAMPLE_RANDOM, netdev->name, netdev); if (err < 0) goto fail; info->irq = err; return 0; fail: return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev->dev.driver_data; struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); break; case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @return 0 on success, error code otherwise */ static int send_fake_arp(struct net_device *dev, int arpType) { struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return 0; // creat GARP if ( ARPOP_REQUEST == arpType ) { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); } else { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); } if (skb == NULL) return -ENOMEM; return dev_queue_xmit(skb); } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev))) netif_wake_queue(dev); } static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); memset(&np->stats, 0, sizeof(np->stats)); spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) netif_rx_schedule(dev); } spin_unlock_bh(&np->rx_lock); network_maybe_wake_tx(dev); return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { printk(KERN_ALERT "network_tx_buf_gc: warning " "-- grant still in use by backend " "domain.\n"); BUG(); } gnttab_end_foreign_access_ref( np->grant_tx_ref[id], GNTMAP_readonly); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; netif_rx_schedule(dev); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; struct xen_memory_reservation reservation; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if ( nr_flips != 0 ) { /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); reservation.nr_extents = nr_flips; reservation.extent_order = 0; reservation.address_bits = 0; reservation.domid = DOMID_SELF; if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ np->rx_mcl[i].op = __HYPERVISOR_memory_op; np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; np->rx_mcl[i].args[1] = (unsigned long)&reservation; /* Zap PTEs and give away pages in one big * multicall. */ (void)HYPERVISOR_multicall(np->rx_mcl, i+1); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irq(&np->tx_lock); if (unlikely(!netfront_carrier_ok(np) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb))) { spin_unlock_irq(&np->tx_lock); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_HW) /* local packet? */ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; #ifdef CONFIG_XEN if (skb->proto_data_valid) /* remote but checksummed? */ tx->flags |= NETTXF_data_validated; #endif #ifdef HAVE_TSO if (skb_shinfo(skb)->gso_size) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->irq); np->stats.tx_bytes += skb->len; np->stats.tx_packets++; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irq(&np->tx_lock); return 0; drop: np->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) netif_rx_schedule(dev); } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) WPRINTK("Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) WPRINTK("Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) WPRINTK("rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) WPRINTK("Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { if (net_ratelimit()) WPRINTK("Unfulfilled rx req " "(id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); /* to avoid build warnings */ #ifndef CONFIG_X86_PAE void *vaddr = page_address(page); #endif mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; /* not for rhel4 & rhel5 -- _supported_pte_mask not exported for pfn_pte_ma */ #ifndef CONFIG_X86_PAE MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); #endif mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref, 0); BUG_ON(!ret); } gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) WPRINTK("Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) WPRINTK("GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) WPRINTK("Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #ifdef HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #ifdef HAVE_GSO skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) WPRINTK("GSO unsupported by this kernel.\n"); return -EINVAL; #endif } static int netif_poll(struct net_device *dev, int *pbudget) { struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; struct multicall_entry *mcl; int work_done, budget, more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); if ((budget = *pbudget) > dev->quota) budget = dev->quota; rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); np->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } #ifndef CONFIG_IA64 NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; #else /* IA64 */ skb->nh.raw = (void *)skb_shinfo(skb)->frags[0].page; skb->h.raw = skb->nh.raw + rx->offset; #endif len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize must approximates the size of true data plus * any supervisor overheads. Adding hypervisor overheads * has been shown to significantly reduce achievable * bandwidth with the default receive buffer size. It is * therefore not wise to account for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to * RX_COPY_THRESHOLD + the supervisor overheads. Here, we * add the size of the data pulled in xennet_fill_frags(). * * We also adjust for any unused space in the main data * area by subtracting (RX_COPY_THRESHOLD - len). This is * especially important with drivers which split incoming * packets into header and data, using only 66 bytes of * the main data area (see the e1000 driver for example.) * On such systems, without this last adjustement, our * achievable receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; /* * Old backends do not assert data_validated but we * can infer it from csum_blank so test both flags. */ if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; #ifdef CONFIG_XEN skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE); skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank); #endif np->stats.rx_packets++; np->stats.rx_bytes += skb->len; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { mcl = np->rx_mcl + pages_flipped; mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = pages_flipped; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; (void)HYPERVISOR_multicall(np->rx_mcl, pages_flipped + 1); } } while ((skb = __skb_dequeue(&errq))) kfree_skb(skb); while ((skb = __skb_dequeue(&rxq)) != NULL) { #ifndef CONFIG_IA64 struct page *page = NETFRONT_SKB_CB(skb)->page; unsigned offset = NETFRONT_SKB_CB(skb)->offset; #else /* IA64 */ struct page *page = (struct page *)skb->nh.raw; #endif void *vaddr = page_address(page); #ifndef CONFIG_IA64 memcpy(skb->data, vaddr + offset, skb_headlen(skb)); #else /* IA64 */ memcpy(skb->data, vaddr + (skb->h.raw - skb->nh.raw), skb_headlen(skb)); #endif if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); /* Pass it up. */ netif_receive_skb(skb); dev->last_rx = jiffies; } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); *pbudget -= work_done; dev->quota -= work_done; if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do) __netif_rx_complete(dev); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return more_to_do; } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_ref( np->grant_tx_ref[i], GNTMAP_readonly); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; if (np->copying_receiver) { WPRINTK("%s: fix me for copying receiver.\n", __FUNCTION__); return; } skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); if (0 == mfn) { struct page *page = skb_shinfo(skb)->frags[0].page; balloon_release_driver_page(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); /* not for rhel4 & rhel5 -- _supported_pte_mask not exported for pfn_pte_ma */ #ifndef CONFIG_X86_PAE void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); #endif mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } IPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = mmu - np->rx_mmu; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; mcl++; rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl - np->rx_mcl, NULL); BUG_ON(rc); } } while ((skb = __skb_dequeue(&free_list)) != NULL) dev_kfree_skb(skb); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); return 0; } static struct net_device_stats *network_get_stats(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return &np->stats; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static int xennet_set_sg(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } else if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; return ethtool_op_set_sg(dev, data); } static int xennet_set_tso(struct net_device *dev, u32 data) { #ifdef HAVE_TSO if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } return ethtool_op_set_tso(dev, data); #else return -ENOSYS; #endif } static void xennet_set_features(struct net_device *dev) { dev_disable_gso_features(dev); xennet_set_sg(dev, 0); /* We need checksum offload to enable scatter/gather and TSO. */ if (!(dev->features & NETIF_F_IP_CSUM)) return; if (xennet_set_sg(dev, 1)) return; /* Before 2.6.9 TSO seems to be unreliable so do not enable it * on older kernels. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) xennet_set_tso(dev, 1); #endif } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; xennet_set_features(dev); IPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref( ref, np->xbdev->otherend_id, page_to_pfn(skb_shinfo(skb)->frags->page)); } else { gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); netif_release_rx_bufs(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static struct ethtool_ops network_ethtool_ops = { .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = xennet_set_sg, .get_tso = ethtool_op_get_tso, .set_tso = xennet_set_tso, .get_link = ethtool_op_get_link, }; /* recent kernels (f7) have sysfs changes, * netfront isn't adapted yet ... */ #if defined(CONFIG_SYSFS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) static ssize_t show_rxbuf_min(struct class_device *cd, char *buf) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct class_device *cd, const char *buf, size_t len) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct class_device *cd, char *buf) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct class_device *cd, const char *buf, size_t len) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_target); } static const struct class_device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { error = class_device_create_file(&netdev->class_dev, &xennet_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) class_device_remove_file(&netdev->class_dev, &xennet_attrs[i]); return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { class_device_remove_file(&netdev->class_dev, &xennet_attrs[i]); } } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } static struct net_device * __devinit create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->open = network_open; netdev->hard_start_xmit = network_start_xmit; netdev->stop = network_close; netdev->get_stats = network_get_stats; netdev->poll = netif_poll; netdev->set_multicast_list = network_set_multicast_list; netdev->uninit = netif_uninit; netdev->change_mtu = xennet_change_mtu; netdev->weight = 64; netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } /* * We use this notifier to send out a fake ARP reply to reset switches and * router ARP caches when an IP interface is brought up on a VIF. */ static int inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) { int count = 0; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ if (event == NETDEV_UP && dev->open == network_open) { for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REQUEST); } for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REPLY); } } return NOTIFY_DONE; } static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler(info->irq, info->netdev); info->irq = 0; end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, 0, (unsigned long)page); } /* ** Driver registration ** */ static struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static struct xenbus_driver netfront = { .name = "vif", .owner = THIS_MODULE, .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(netfront_remove), .resume = netfront_resume, .otherend_changed = backend_changed, }; MODULE_ALIAS("xen:vif"); static struct notifier_block notifier_inetdev = { .notifier_call = inetdev_notify, .next = NULL, .priority = 0 }; static int __init netif_init(void) { if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN if (MODPARM_rx_flip && MODPARM_rx_copy) { WPRINTK("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_flip = 1; /* Default is to flip. */ #endif if (is_initial_xendomain()) return 0; IPRINTK("Initialising virtual ethernet driver.\n"); (void)register_inetaddr_notifier(¬ifier_inetdev); return xenbus_register_frontend(&netfront); } module_init(netif_init); static void __exit netif_exit(void) { if (is_initial_xendomain()) return; unregister_inetaddr_notifier(¬ifier_inetdev); return xenbus_unregister_driver(&netfront); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/000077500000000000000000000000001314037446600176765ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/Makefile000066400000000000000000000031131314037446600213340ustar00rootroot00000000000000############################################################################### ###### File name : Makefile ###### ###### Author : ###### ###### Description : To optimize the PV_ON_HVM drivers's Makefile ###### ###### History : ###### ###### 2012-02-24: Create the file. ###### ###### 2012-12-22: Makefile integrate with config.mk. ###### ############################################################################### M=$(shell pwd) COMPILEARGS = $(CROSSCOMPILE) RHEL55_VBD = $(shell echo $(KERNDIR) | grep 2.6.18-194) RHEL50_VBD = $(shell echo $(KERNDIR) | grep 2.6.18-8) ZBQL_VBD = $(shell echo $(KERNDIR) | grep 2.6.18-SKL) FEDORA6_VBD = $(shell echo $(KERNDIR) | grep 2.6.18-1.2798) vbd_path=other include $(M)/config.mk obj-m += xen-platform-pci/ obj-m += xen-balloon/ obj-m += xen-vbd/ obj-m += xen-vnif/ obj-m += xen-scsi/ ifneq ($(RHEL55_VBD), ) vbd_path = 2.6.18-194 endif ifneq ($(RHEL50_VBD), ) vbd_path = 2.6.18-8 endif ifneq ($(ZBQL_VBD), ) vbd_path = 2.6.18-8 endif ifneq ($(FEDORA6_VBD), ) vbd_path = 2.6.18-8 endif all:lnfile make -C $(KERNDIR) M=$(M) modules $(COMPILEARGS) modules_install:lnfile make -C $(KERNDIR) M=$(M) modules_install $(COMPILEARGS) lnfile: @set -e; \ cd xen-vbd; \ for file in `ls $(vbd_path)`; \ do \ ln -sf $(vbd_path)/$$file $$file; \ done; \ cd - clean: make -C $(KERNDIR) M=$(M) clean $(COMPILEARGS) rm -rf Modules.symvers UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/compat-include/000077500000000000000000000000001314037446600226025ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/compat-include/xen/000077500000000000000000000000001314037446600233745ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/compat-include/xen/platform-compat.h000066400000000000000000000126461314037446600266630ustar00rootroot00000000000000#ifndef COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #define COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #include #include #include #if defined(__LINUX_COMPILER_H) && !defined(__always_inline) #define __always_inline inline #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_SPINLOCK) #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED #endif #if defined(_LINUX_INIT_H) && !defined(__init) #define __init #endif #if defined(__LINUX_CACHE_H) && !defined(__read_mostly) #define __read_mostly #endif #if defined(_LINUX_SKBUFF_H) && !defined(NET_IP_ALIGN) #define NET_IP_ALIGN 0 #endif #if defined(_LINUX_SKBUFF_H) && !defined(CHECKSUM_HW) #define CHECKSUM_HW CHECKSUM_PARTIAL #endif #if defined(_LINUX_ERR_H) && !defined(IS_ERR_VALUE) #define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) #endif #if defined(_ASM_IA64_PGTABLE_H) && !defined(_PGTABLE_NOPUD_H) #include #endif /* Some kernels have this typedef backported so we cannot reliably * detect based on version number, hence we forcibly #define it. */ #if defined(__LINUX_TYPES_H) || defined(__LINUX_GFP_H) || defined(_LINUX_KERNEL_H) #define gfp_t unsigned #endif #if defined(_LINUX_NOTIFIER_H) && !defined(ATOMIC_NOTIFIER_HEAD) #define ATOMIC_NOTIFIER_HEAD(name) struct notifier_block *name #define atomic_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define atomic_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define atomic_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_NOTIFIER_H) && !defined(BLOCKING_NOTIFIER_HEAD) #define BLOCKING_NOTIFIER_HEAD(name) struct notifier_block *name #define blocking_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define blocking_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define blocking_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_MM_H) && defined set_page_count #define init_page_count(page) set_page_count(page, 1) #endif #if defined(__LINUX_GFP_H) && !defined __GFP_NOMEMALLOC #define __GFP_NOMEMALLOC 0 #endif #if defined(_LINUX_FS_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) #define nonseekable_open(inode, filp) /* Nothing to do */ #endif #if defined(_LINUX_MM_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) unsigned long vmalloc_to_pfn(void *addr); #endif #if defined(__LINUX_COMPLETION_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); #endif #if defined(_LINUX_SCHED_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout); #endif #if defined(_LINUX_SLAB_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) void *kzalloc(size_t size, int flags); #endif #if defined(_LINUX_BLKDEV_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define end_that_request_last(req, uptodate) end_that_request_last(req) #endif #if defined(_LINUX_CAPABILITY_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define capable(cap) (1) #endif #if defined(_LINUX_KERNEL_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) extern char *kasprintf(gfp_t gfp, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); #endif #if defined(_LINUX_SYSRQ_H) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) #define handle_sysrq(x,y,z) handle_sysrq(x,y) #endif #if defined(_PAGE_PRESENT) && !defined(_PAGE_NX) #define _PAGE_NX 0 /* * This variable at present is referenced by netfront, but only in code that * is dead when running in hvm guests. To detect potential active uses of it * in the future, don't try to supply a 'valid' value here, so that any * mappings created with it will fault when accessed. */ #define __supported_pte_mask ((maddr_t)0) #endif /* This code duplication is not ideal, but || does not seem to properly * short circuit in a #if condition. **/ #if defined(_LINUX_NETDEVICE_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) #if !defined(SLE_VERSION) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #elif SLE_VERSION_CODE < SLE_VERSION(10,1,0) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #endif #endif #if defined(__LINUX_SEQLOCK_H) && !defined(DEFINE_SEQLOCK) #define DEFINE_SEQLOCK(x) seqlock_t x = SEQLOCK_UNLOCKED #endif /* Bug in RHEL4-U3: rw_lock_t is mistakenly defined in DEFINE_RWLOCK() macro */ #if defined(__LINUX_SPINLOCK_H) && defined(DEFINE_RWLOCK) #define rw_lock_t rwlock_t #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_RWLOCK) #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED #endif #if defined(_LINUX_INTERRUPT_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /** * RHEL4-U5 pulled back this feature into the older kernel * Since it is a typedef, and not a macro - detect this kernel via * RHEL_VERSION */ #if !defined(RHEL_VERSION) || (RHEL_VERSION == 4 && RHEL_UPDATE < 5) #if !defined(RHEL_MAJOR) || (RHEL_MAJOR == 4 && RHEL_MINOR < 5) typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *); #endif #endif #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) #define setup_xen_features xen_setup_features #endif #ifndef atomic_cmpxchg #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/config.mk000066400000000000000000000023111314037446600214710ustar00rootroot00000000000000######################### TOP config.mk begin ################################# # Hack: we need to use the config which was used to build the kernel, # except that that won't have the right headers etc., so duplicate # some of the mach-xen infrastructure in here. # # (i.e. we need the native config for things like -mregparm, but # a Xen kernel to find the right headers) _XEN_CPPFLAGS += -D__XEN_INTERFACE_VERSION__=0x00030205 _XEN_CPPFLAGS += -DCONFIG_XEN_COMPAT=0xffffff _XEN_CPPFLAGS += -I$(M)/include -I$(M)/compat-include -DHAVE_XEN_PLATFORM_COMPAT_H KERNELRELEASE := $(shell echo $(KERNDIR) | grep "2.6.18-194.el5\|2.6.18-8\|2.6.18-SKL\|2.6.18-1.2798") KERNELRELEASE_DISK := $(shell echo $(KERNDIR) | grep 2.6.18-128) AUTOCONF := $(shell find $(CROSS_COMPILE_KERNELSOURCE) -name autoconf.h) ifneq ($(KERNELRELEASE), ) _XEN_CPPFLAGS += -DCUSTOMIZED endif ifeq ($(ARCH),ia64) _XEN_CPPFLAGS += -DCONFIG_VMX_GUEST endif ifneq ($(KERNELRELEASE_DISK), ) _XEN_CPPFLAGS += -DDISK_EX_FEATURE endif _XEN_CPPFLAGS += -include $(AUTOCONF) EXTRA_CFLAGS += $(_XEN_CPPFLAGS) EXTRA_AFLAGS += $(_XEN_CPPFLAGS) CPPFLAGS := -I$(M)/include $(CPPFLAGS) ######################### TOP config.mk end ################################### UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/000077500000000000000000000000001314037446600213215ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/000077500000000000000000000000001314037446600225705ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/8253pit.h000066400000000000000000000002371314037446600240610ustar00rootroot00000000000000/* * 8253/8254 Programmable Interval Timer */ #ifndef _8253PIT_H #define _8253PIT_H #include #define PIT_TICK_RATE CLOCK_TICK_RATE #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/Kbuild000066400000000000000000000002711314037446600237250ustar00rootroot00000000000000include include/asm-generic/Kbuild.asm header-y += boot.h header-y += debugreg.h header-y += ldt.h header-y += ucontext.h unifdef-y += mtrr.h unifdef-y += setup.h unifdef-y += vm86.h UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/a.out.h000066400000000000000000000013741314037446600237740ustar00rootroot00000000000000#ifndef __I386_A_OUT_H__ #define __I386_A_OUT_H__ struct exec { unsigned long a_info; /* Use macros N_MAGIC, etc for access */ unsigned a_text; /* length of text, in bytes */ unsigned a_data; /* length of data, in bytes */ unsigned a_bss; /* length of uninitialized data area for file, in bytes */ unsigned a_syms; /* length of symbol table data in file, in bytes */ unsigned a_entry; /* start address */ unsigned a_trsize; /* length of relocation info for text, in bytes */ unsigned a_drsize; /* length of relocation info for data, in bytes */ }; #define N_TRSIZE(a) ((a).a_trsize) #define N_DRSIZE(a) ((a).a_drsize) #define N_SYMSIZE(a) ((a).a_syms) #ifdef __KERNEL__ #define STACK_TOP TASK_SIZE #endif #endif /* __A_OUT_GNU_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/acpi.h000066400000000000000000000122131314037446600236540ustar00rootroot00000000000000/* * asm-i386/acpi.h * * Copyright (C) 2001 Paul Diefenbaugh * Copyright (C) 2001 Patrick Mochel * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef _ASM_ACPI_H #define _ASM_ACPI_H #ifdef __KERNEL__ #include #include /* defines cmpxchg */ #ifdef CONFIG_XEN #include #endif #define COMPILER_DEPENDENT_INT64 long long #define COMPILER_DEPENDENT_UINT64 unsigned long long /* * Calling conventions: * * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) * ACPI_EXTERNAL_XFACE - External ACPI interfaces * ACPI_INTERNAL_XFACE - Internal ACPI interfaces * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces */ #define ACPI_SYSTEM_XFACE #define ACPI_EXTERNAL_XFACE #define ACPI_INTERNAL_XFACE #define ACPI_INTERNAL_VAR_XFACE /* Asm macros */ #define ACPI_ASM_MACROS #define BREAKPOINT3 #define ACPI_DISABLE_IRQS() local_irq_disable() #define ACPI_ENABLE_IRQS() local_irq_enable() #define ACPI_FLUSH_CPU_CACHE() wbinvd() static inline int __acpi_acquire_global_lock (unsigned int *lock) { unsigned int old, new, val; do { old = *lock; new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); val = cmpxchg(lock, old, new); } while (unlikely (val != old)); return (new < 3) ? -1 : 0; } static inline int __acpi_release_global_lock (unsigned int *lock) { unsigned int old, new, val; do { old = *lock; new = old & ~0x3; val = cmpxchg(lock, old, new); } while (unlikely (val != old)); return old & 0x1; } #define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ ((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr)) #define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ ((Acq) = __acpi_release_global_lock((unsigned int *) GLptr)) /* * Math helper asm macros */ #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ asm("divl %2;" \ :"=a"(q32), "=d"(r32) \ :"r"(d32), \ "0"(n_lo), "1"(n_hi)) #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ asm("shrl $1,%2;" \ "rcrl $1,%3;" \ :"=r"(n_hi), "=r"(n_lo) \ :"0"(n_hi), "1"(n_lo)) #ifdef CONFIG_X86_IO_APIC extern void check_acpi_pci(void); #else static inline void check_acpi_pci(void) { } #endif #ifdef CONFIG_ACPI extern int acpi_lapic; extern int acpi_ioapic; extern int acpi_noirq; extern int acpi_strict; extern int acpi_disabled; extern int acpi_ht; extern int acpi_pci_disabled; static inline void disable_acpi(void) { acpi_disabled = 1; acpi_ht = 0; acpi_pci_disabled = 1; acpi_noirq = 1; } /* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ #define FIX_ACPI_PAGES 4 extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); #ifdef CONFIG_X86_IO_APIC extern int skip_ioapic_setup; extern int acpi_skip_timer_override; static inline void disable_ioapic_setup(void) { skip_ioapic_setup = 1; } static inline int ioapic_setup_disabled(void) { return skip_ioapic_setup; } #else static inline void disable_ioapic_setup(void) { } #endif static inline void acpi_noirq_set(void) { acpi_noirq = 1; } static inline void acpi_disable_pci(void) { acpi_pci_disabled = 1; acpi_noirq_set(); } extern int acpi_irq_balance_set(char *str); #ifdef CONFIG_XEN static inline int acpi_notify_hypervisor_state(u8 sleep_state, u32 pm1a_cnt_val, u32 pm1b_cnt_val) { struct xen_platform_op op = { .cmd = XENPF_enter_acpi_sleep, .interface_version = XENPF_INTERFACE_VERSION, .u = { .enter_acpi_sleep = { .pm1a_cnt_val = pm1a_cnt_val, .pm1b_cnt_val = pm1b_cnt_val, .sleep_state = sleep_state, }, }, }; return HYPERVISOR_platform_op(&op); } #endif /* CONFIG_XEN */ #else /* !CONFIG_ACPI */ #define acpi_lapic 0 #define acpi_ioapic 0 static inline void acpi_noirq_set(void) { } static inline void acpi_disable_pci(void) { } #endif /* !CONFIG_ACPI */ #ifdef CONFIG_ACPI_SLEEP /* routines for saving/restoring kernel state */ extern int acpi_save_state_mem(void); extern void acpi_restore_state_mem(void); extern unsigned long acpi_wakeup_address; /* early initialization routine */ extern void acpi_reserve_bootmem(void); #endif /*CONFIG_ACPI_SLEEP*/ extern u8 x86_acpiid_to_apicid[]; #ifndef CONFIG_XEN #define ARCH_HAS_POWER_INIT 1 #endif #endif /*__KERNEL__*/ #endif /*_ASM_ACPI_H*/ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/agp.h000066400000000000000000000024151314037446600235120ustar00rootroot00000000000000#ifndef AGP_H #define AGP_H 1 #include #include /* * Functions to keep the agpgart mappings coherent with the MMU. * The GART gives the CPU a physical alias of pages in memory. The alias region is * mapped uncacheable. Make sure there are no conflicting mappings * with different cachability attributes for the same page. This avoids * data corruption on some CPUs. */ /* Caller's responsibility to call global_flush_tlb() for * performance reasons */ #define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE) #define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL) #define flush_agp_mappings() global_flush_tlb() /* Could use CLFLUSH here if the cpu supports it. But then it would need to be called for each cacheline of the whole page so it may not be worth it. Would need a page for it. */ #define flush_agp_cache() wbinvd() /* Convert a physical address to an address suitable for the GART. */ #define phys_to_gart(x) (x) #define gart_to_phys(x) (x) /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) \ ((char *)__get_free_pages(GFP_KERNEL, (order))) #define free_gatt_pages(table, order) \ free_pages((unsigned long)(table), (order)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/alternative.h000066400000000000000000000101231314037446600252540ustar00rootroot00000000000000#ifndef _I386_ALTERNATIVE_H #define _I386_ALTERNATIVE_H #ifdef __KERNEL__ #include #include struct alt_instr { u8 *instr; /* original instruction */ u8 *replacement; u8 cpuid; /* cpuid bit set for replacement */ u8 instrlen; /* length of original instruction */ u8 replacementlen; /* length of new instruction, <= instrlen */ u8 pad; }; extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); struct module; #ifdef CONFIG_SMP extern void alternatives_smp_module_add(struct module *mod, char *name, void *locks, void *locks_end, void *text, void *text_end); extern void alternatives_smp_module_del(struct module *mod); extern void alternatives_smp_switch(int smp); #else static inline void alternatives_smp_module_add(struct module *mod, char *name, void *locks, void *locks_end, void *text, void *text_end) {} static inline void alternatives_smp_module_del(struct module *mod) {} static inline void alternatives_smp_switch(int smp) {} #endif #endif /* * Alternative instructions for different CPU types or capabilities. * * This allows to use optimized instructions even on generic binary * kernels. * * length of oldinstr must be longer or equal the length of newinstr * It can be padded with nops as needed. * * For non barrier like inlines please define new variants * without volatile and memory clobber. */ #define alternative(oldinstr, newinstr, feature) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ " .align 4\n" \ " .long 661b\n" /* label */ \ " .long 663f\n" /* new instruction */ \ " .byte %c0\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ ".section .altinstr_replacement,\"ax\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */\ ".previous" :: "i" (feature) : "memory") /* * Alternative inline assembly with input. * * Pecularities: * No memory clobber here. * Argument numbers start with 1. * Best is to use constraints that are fixed size (like (%1) ... "r") * If you use variable sized constraints like "m" or "g" in the * replacement maake sure to pad to the worst case length. */ #define alternative_input(oldinstr, newinstr, feature, input...) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ " .align 4\n" \ " .long 661b\n" /* label */ \ " .long 663f\n" /* new instruction */ \ " .byte %c0\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ ".section .altinstr_replacement,\"ax\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */\ ".previous" :: "i" (feature), ##input) /* * Alternative inline assembly for SMP. * * The LOCK_PREFIX macro defined here replaces the LOCK and * LOCK_PREFIX macros used everywhere in the source tree. * * SMP alternatives use the same data structures as the other * alternatives and the X86_FEATURE_UP flag to indicate the case of a * UP system running a SMP kernel. The existing apply_alternatives() * works fine for patching a SMP kernel for UP. * * The SMP alternative tables can be kept after boot and contain both * UP and SMP versions of the instructions to allow switching back to * SMP at runtime, when hotplugging in a new CPU, which is especially * useful in virtualized environments. * * The very common lock prefix is handled as special case in a * separate table which is a pure address list without replacement ptr * and size information. That keeps the table sizes small. */ #ifdef CONFIG_SMP #define LOCK_PREFIX \ ".section .smp_locks,\"a\"\n" \ " .align 4\n" \ " .long 661f\n" /* address */ \ ".previous\n" \ "661:\n\tlock; " #else /* ! CONFIG_SMP */ #define LOCK_PREFIX "" #endif #endif /* _I386_ALTERNATIVE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/apic.h000066400000000000000000000063541314037446600236650ustar00rootroot00000000000000#ifndef __ASM_APIC_H #define __ASM_APIC_H #include #include #include #include #include #define Dprintk(x...) /* * Debugging macros */ #define APIC_QUIET 0 #define APIC_VERBOSE 1 #define APIC_DEBUG 2 extern int enable_local_apic; extern int apic_verbosity; static inline void lapic_disable(void) { enable_local_apic = -1; clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); } static inline void lapic_enable(void) { enable_local_apic = 1; } /* * Define the default level of output to be very little * This can be turned up by using apic=verbose for more * information and apic=debug for _lots_ of information. * apic_verbosity is defined in apic.c */ #define apic_printk(v, s, a...) do { \ if ((v) <= apic_verbosity) \ printk(s, ##a); \ } while (0) #ifdef CONFIG_X86_LOCAL_APIC /* * Basic functions accessing APICs. */ static __inline void apic_write(unsigned long reg, unsigned long v) { *((volatile unsigned long *)(APIC_BASE+reg)) = v; } static __inline void apic_write_atomic(unsigned long reg, unsigned long v) { xchg((volatile unsigned long *)(APIC_BASE+reg), v); } static __inline unsigned long apic_read(unsigned long reg) { return *((volatile unsigned long *)(APIC_BASE+reg)); } static __inline__ void apic_wait_icr_idle(void) { while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ) cpu_relax(); } int get_physical_broadcast(void); #ifdef CONFIG_X86_GOOD_APIC # define FORCE_READ_AROUND_WRITE 0 # define apic_read_around(x) # define apic_write_around(x,y) apic_write((x),(y)) #else # define FORCE_READ_AROUND_WRITE 1 # define apic_read_around(x) apic_read(x) # define apic_write_around(x,y) apic_write_atomic((x),(y)) #endif static inline void ack_APIC_irq(void) { /* * ack_APIC_irq() actually gets compiled as a single instruction: * - a single rmw on Pentium/82489DX * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) * ... yummie. */ /* Docs say use 0 for future compatibility */ apic_write_around(APIC_EOI, 0); } extern void (*wait_timer_tick)(void); extern int get_maxlvt(void); extern void clear_local_APIC(void); extern void connect_bsp_APIC (void); extern void disconnect_bsp_APIC (int virt_wire_setup); extern void disable_local_APIC (void); extern void lapic_shutdown (void); extern int verify_local_APIC (void); extern void cache_APIC_registers (void); extern void sync_Arb_IDs (void); extern void init_bsp_APIC (void); extern void setup_local_APIC (void); extern void init_apic_mappings (void); extern void smp_local_timer_interrupt (struct pt_regs * regs); extern void setup_boot_APIC_clock (void); extern void setup_secondary_APIC_clock (void); extern int APIC_init_uniprocessor (void); extern void disable_APIC_timer(void); extern void enable_APIC_timer(void); extern void enable_NMI_through_LVT0 (void * dummy); extern int disable_timer_pin_1; #ifndef CONFIG_XEN void smp_send_timer_broadcast_ipi(struct pt_regs *regs); void switch_APIC_timer_to_ipi(void *cpumask); void switch_ipi_to_APIC_timer(void *cpumask); #define ARCH_APICTIMER_STOPS_ON_C3 1 #endif extern int timer_over_8254; #else /* !CONFIG_X86_LOCAL_APIC */ static inline void lapic_shutdown(void) { } #endif /* !CONFIG_X86_LOCAL_APIC */ #endif /* __ASM_APIC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/apicdef.h000066400000000000000000000223261314037446600243410ustar00rootroot00000000000000#ifndef __ASM_APICDEF_H #define __ASM_APICDEF_H /* * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) * * Alan Cox , 1995. * Ingo Molnar , 1999, 2000 */ #define APIC_DEFAULT_PHYS_BASE 0xfee00000 #define APIC_ID 0x20 #define APIC_LVR 0x30 #define APIC_LVR_MASK 0xFF00FF #define GET_APIC_VERSION(x) ((x)&0xFF) #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) #define APIC_INTEGRATED(x) ((x)&0xF0) #define APIC_XAPIC(x) ((x) >= 0x14) #define APIC_TASKPRI 0x80 #define APIC_TPRI_MASK 0xFF #define APIC_ARBPRI 0x90 #define APIC_ARBPRI_MASK 0xFF #define APIC_PROCPRI 0xA0 #define APIC_EOI 0xB0 #define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ #define APIC_RRR 0xC0 #define APIC_LDR 0xD0 #define APIC_LDR_MASK (0xFF<<24) #define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFF) #define SET_APIC_LOGICAL_ID(x) (((x)<<24)) #define APIC_ALL_CPUS 0xFF #define APIC_DFR 0xE0 #define APIC_DFR_CLUSTER 0x0FFFFFFFul #define APIC_DFR_FLAT 0xFFFFFFFFul #define APIC_SPIV 0xF0 #define APIC_SPIV_FOCUS_DISABLED (1<<9) #define APIC_SPIV_APIC_ENABLED (1<<8) #define APIC_ISR 0x100 #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ #define APIC_TMR 0x180 #define APIC_IRR 0x200 #define APIC_ESR 0x280 #define APIC_ESR_SEND_CS 0x00001 #define APIC_ESR_RECV_CS 0x00002 #define APIC_ESR_SEND_ACC 0x00004 #define APIC_ESR_RECV_ACC 0x00008 #define APIC_ESR_SENDILL 0x00020 #define APIC_ESR_RECVILL 0x00040 #define APIC_ESR_ILLREGA 0x00080 #define APIC_ICR 0x300 #define APIC_DEST_SELF 0x40000 #define APIC_DEST_ALLINC 0x80000 #define APIC_DEST_ALLBUT 0xC0000 #define APIC_ICR_RR_MASK 0x30000 #define APIC_ICR_RR_INVALID 0x00000 #define APIC_ICR_RR_INPROG 0x10000 #define APIC_ICR_RR_VALID 0x20000 #define APIC_INT_LEVELTRIG 0x08000 #define APIC_INT_ASSERT 0x04000 #define APIC_ICR_BUSY 0x01000 #define APIC_DEST_LOGICAL 0x00800 #define APIC_DM_FIXED 0x00000 #define APIC_DM_LOWEST 0x00100 #define APIC_DM_SMI 0x00200 #define APIC_DM_REMRD 0x00300 #define APIC_DM_NMI 0x00400 #define APIC_DM_INIT 0x00500 #define APIC_DM_STARTUP 0x00600 #define APIC_DM_EXTINT 0x00700 #define APIC_VECTOR_MASK 0x000FF #define APIC_ICR2 0x310 #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) #define SET_APIC_DEST_FIELD(x) ((x)<<24) #define APIC_LVTT 0x320 #define APIC_LVTTHMR 0x330 #define APIC_LVTPC 0x340 #define APIC_LVT0 0x350 #define APIC_LVT_TIMER_BASE_MASK (0x3<<18) #define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) #define SET_APIC_TIMER_BASE(x) (((x)<<18)) #define APIC_TIMER_BASE_CLKIN 0x0 #define APIC_TIMER_BASE_TMBASE 0x1 #define APIC_TIMER_BASE_DIV 0x2 #define APIC_LVT_TIMER_PERIODIC (1<<17) #define APIC_LVT_MASKED (1<<16) #define APIC_LVT_LEVEL_TRIGGER (1<<15) #define APIC_LVT_REMOTE_IRR (1<<14) #define APIC_INPUT_POLARITY (1<<13) #define APIC_SEND_PENDING (1<<12) #define APIC_MODE_MASK 0x700 #define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) #define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) #define APIC_MODE_FIXED 0x0 #define APIC_MODE_NMI 0x4 #define APIC_MODE_EXTINT 0x7 #define APIC_LVT1 0x360 #define APIC_LVTERR 0x370 #define APIC_TMICT 0x380 #define APIC_TMCCT 0x390 #define APIC_TDCR 0x3E0 #define APIC_TDR_DIV_TMBASE (1<<2) #define APIC_TDR_DIV_1 0xB #define APIC_TDR_DIV_2 0x0 #define APIC_TDR_DIV_4 0x1 #define APIC_TDR_DIV_8 0x2 #define APIC_TDR_DIV_16 0x3 #define APIC_TDR_DIV_32 0x8 #define APIC_TDR_DIV_64 0x9 #define APIC_TDR_DIV_128 0xA #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) #define MAX_IO_APICS 64 /* * the local APIC register structure, memory mapped. Not terribly well * tested, but we might eventually use this one in the future - the * problem why we cannot use it right now is the P5 APIC, it has an * errata which cannot take 8-bit reads and writes, only 32-bit ones ... */ #define u32 unsigned int struct local_apic { /*000*/ struct { u32 __reserved[4]; } __reserved_01; /*010*/ struct { u32 __reserved[4]; } __reserved_02; /*020*/ struct { /* APIC ID Register */ u32 __reserved_1 : 24, phys_apic_id : 4, __reserved_2 : 4; u32 __reserved[3]; } id; /*030*/ const struct { /* APIC Version Register */ u32 version : 8, __reserved_1 : 8, max_lvt : 8, __reserved_2 : 8; u32 __reserved[3]; } version; /*040*/ struct { u32 __reserved[4]; } __reserved_03; /*050*/ struct { u32 __reserved[4]; } __reserved_04; /*060*/ struct { u32 __reserved[4]; } __reserved_05; /*070*/ struct { u32 __reserved[4]; } __reserved_06; /*080*/ struct { /* Task Priority Register */ u32 priority : 8, __reserved_1 : 24; u32 __reserved_2[3]; } tpr; /*090*/ const struct { /* Arbitration Priority Register */ u32 priority : 8, __reserved_1 : 24; u32 __reserved_2[3]; } apr; /*0A0*/ const struct { /* Processor Priority Register */ u32 priority : 8, __reserved_1 : 24; u32 __reserved_2[3]; } ppr; /*0B0*/ struct { /* End Of Interrupt Register */ u32 eoi; u32 __reserved[3]; } eoi; /*0C0*/ struct { u32 __reserved[4]; } __reserved_07; /*0D0*/ struct { /* Logical Destination Register */ u32 __reserved_1 : 24, logical_dest : 8; u32 __reserved_2[3]; } ldr; /*0E0*/ struct { /* Destination Format Register */ u32 __reserved_1 : 28, model : 4; u32 __reserved_2[3]; } dfr; /*0F0*/ struct { /* Spurious Interrupt Vector Register */ u32 spurious_vector : 8, apic_enabled : 1, focus_cpu : 1, __reserved_2 : 22; u32 __reserved_3[3]; } svr; /*100*/ struct { /* In Service Register */ /*170*/ u32 bitfield; u32 __reserved[3]; } isr [8]; /*180*/ struct { /* Trigger Mode Register */ /*1F0*/ u32 bitfield; u32 __reserved[3]; } tmr [8]; /*200*/ struct { /* Interrupt Request Register */ /*270*/ u32 bitfield; u32 __reserved[3]; } irr [8]; /*280*/ union { /* Error Status Register */ struct { u32 send_cs_error : 1, receive_cs_error : 1, send_accept_error : 1, receive_accept_error : 1, __reserved_1 : 1, send_illegal_vector : 1, receive_illegal_vector : 1, illegal_register_address : 1, __reserved_2 : 24; u32 __reserved_3[3]; } error_bits; struct { u32 errors; u32 __reserved_3[3]; } all_errors; } esr; /*290*/ struct { u32 __reserved[4]; } __reserved_08; /*2A0*/ struct { u32 __reserved[4]; } __reserved_09; /*2B0*/ struct { u32 __reserved[4]; } __reserved_10; /*2C0*/ struct { u32 __reserved[4]; } __reserved_11; /*2D0*/ struct { u32 __reserved[4]; } __reserved_12; /*2E0*/ struct { u32 __reserved[4]; } __reserved_13; /*2F0*/ struct { u32 __reserved[4]; } __reserved_14; /*300*/ struct { /* Interrupt Command Register 1 */ u32 vector : 8, delivery_mode : 3, destination_mode : 1, delivery_status : 1, __reserved_1 : 1, level : 1, trigger : 1, __reserved_2 : 2, shorthand : 2, __reserved_3 : 12; u32 __reserved_4[3]; } icr1; /*310*/ struct { /* Interrupt Command Register 2 */ union { u32 __reserved_1 : 24, phys_dest : 4, __reserved_2 : 4; u32 __reserved_3 : 24, logical_dest : 8; } dest; u32 __reserved_4[3]; } icr2; /*320*/ struct { /* LVT - Timer */ u32 vector : 8, __reserved_1 : 4, delivery_status : 1, __reserved_2 : 3, mask : 1, timer_mode : 1, __reserved_3 : 14; u32 __reserved_4[3]; } lvt_timer; /*330*/ struct { /* LVT - Thermal Sensor */ u32 vector : 8, delivery_mode : 3, __reserved_1 : 1, delivery_status : 1, __reserved_2 : 3, mask : 1, __reserved_3 : 15; u32 __reserved_4[3]; } lvt_thermal; /*340*/ struct { /* LVT - Performance Counter */ u32 vector : 8, delivery_mode : 3, __reserved_1 : 1, delivery_status : 1, __reserved_2 : 3, mask : 1, __reserved_3 : 15; u32 __reserved_4[3]; } lvt_pc; /*350*/ struct { /* LVT - LINT0 */ u32 vector : 8, delivery_mode : 3, __reserved_1 : 1, delivery_status : 1, polarity : 1, remote_irr : 1, trigger : 1, mask : 1, __reserved_2 : 15; u32 __reserved_3[3]; } lvt_lint0; /*360*/ struct { /* LVT - LINT1 */ u32 vector : 8, delivery_mode : 3, __reserved_1 : 1, delivery_status : 1, polarity : 1, remote_irr : 1, trigger : 1, mask : 1, __reserved_2 : 15; u32 __reserved_3[3]; } lvt_lint1; /*370*/ struct { /* LVT - Error */ u32 vector : 8, __reserved_1 : 4, delivery_status : 1, __reserved_2 : 3, mask : 1, __reserved_3 : 15; u32 __reserved_4[3]; } lvt_error; /*380*/ struct { /* Timer Initial Count Register */ u32 initial_count; u32 __reserved_2[3]; } timer_icr; /*390*/ const struct { /* Timer Current Count Register */ u32 curr_count; u32 __reserved_2[3]; } timer_ccr; /*3A0*/ struct { u32 __reserved[4]; } __reserved_16; /*3B0*/ struct { u32 __reserved[4]; } __reserved_17; /*3C0*/ struct { u32 __reserved[4]; } __reserved_18; /*3D0*/ struct { u32 __reserved[4]; } __reserved_19; /*3E0*/ struct { /* Timer Divide Configuration Register */ u32 divisor : 4, __reserved_1 : 28; u32 __reserved_2[3]; } timer_dcr; /*3F0*/ struct { u32 __reserved[4]; } __reserved_20; } __attribute__ ((packed)); #undef u32 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/arch_hooks.h000066400000000000000000000014641314037446600250660ustar00rootroot00000000000000#ifndef _ASM_ARCH_HOOKS_H #define _ASM_ARCH_HOOKS_H #include /* * linux/include/asm/arch_hooks.h * * define the architecture specific hooks */ /* these aren't arch hooks, they are generic routines * that can be used by the hooks */ extern void init_ISA_irqs(void); extern void apic_intr_init(void); extern void smp_intr_init(void); extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs); /* these are the defined hooks */ extern void intr_init_hook(void); extern void pre_intr_init_hook(void); extern void pre_setup_arch_hook(void); extern void trap_init_hook(void); extern void time_init_hook(void); extern void mca_nmi_hook(void); extern int setup_early_printk(char *); extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/atomic.h000066400000000000000000000134121314037446600242160ustar00rootroot00000000000000#ifndef __ARCH_I386_ATOMIC__ #define __ARCH_I386_ATOMIC__ #include #include /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. */ /* * Make sure gcc doesn't try to be clever and move things around * on us. We need to use _exactly_ the address the user gave us, * not some alias that contains the same information. */ typedef struct { volatile int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } /** * atomic_read - read atomic variable * @v: pointer of type atomic_t * * Atomically reads the value of @v. */ #define atomic_read(v) ((v)->counter) /** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value * * Atomically sets the value of @v to @i. */ #define atomic_set(v,i) (((v)->counter) = (i)) /** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t * * Atomically adds @i to @v. */ static __inline__ void atomic_add(int i, atomic_t *v) { __asm__ __volatile__( LOCK_PREFIX "addl %1,%0" :"+m" (v->counter) :"ir" (i)); } /** * atomic_sub - subtract the atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t * * Atomically subtracts @i from @v. */ static __inline__ void atomic_sub(int i, atomic_t *v) { __asm__ __volatile__( LOCK_PREFIX "subl %1,%0" :"+m" (v->counter) :"ir" (i)); } /** * atomic_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @v: pointer of type atomic_t * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all * other cases. */ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) { unsigned char c; __asm__ __volatile__( LOCK_PREFIX "subl %2,%0; sete %1" :"+m" (v->counter), "=qm" (c) :"ir" (i) : "memory"); return c; } /** * atomic_inc - increment atomic variable * @v: pointer of type atomic_t * * Atomically increments @v by 1. */ static __inline__ void atomic_inc(atomic_t *v) { __asm__ __volatile__( LOCK_PREFIX "incl %0" :"+m" (v->counter)); } /** * atomic_dec - decrement atomic variable * @v: pointer of type atomic_t * * Atomically decrements @v by 1. */ static __inline__ void atomic_dec(atomic_t *v) { __asm__ __volatile__( LOCK_PREFIX "decl %0" :"+m" (v->counter)); } /** * atomic_dec_and_test - decrement and test * @v: pointer of type atomic_t * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. */ static __inline__ int atomic_dec_and_test(atomic_t *v) { unsigned char c; __asm__ __volatile__( LOCK_PREFIX "decl %0; sete %1" :"+m" (v->counter), "=qm" (c) : : "memory"); return c != 0; } /** * atomic_inc_and_test - increment and test * @v: pointer of type atomic_t * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. */ static __inline__ int atomic_inc_and_test(atomic_t *v) { unsigned char c; __asm__ __volatile__( LOCK_PREFIX "incl %0; sete %1" :"+m" (v->counter), "=qm" (c) : : "memory"); return c != 0; } /** * atomic_add_negative - add and test if negative * @v: pointer of type atomic_t * @i: integer value to add * * Atomically adds @i to @v and returns true * if the result is negative, or false when * result is greater than or equal to zero. */ static __inline__ int atomic_add_negative(int i, atomic_t *v) { unsigned char c; __asm__ __volatile__( LOCK_PREFIX "addl %2,%0; sets %1" :"+m" (v->counter), "=qm" (c) :"ir" (i) : "memory"); return c; } /** * atomic_add_return - add and return * @v: pointer of type atomic_t * @i: integer value to add * * Atomically adds @i to @v and returns @i + @v */ static __inline__ int atomic_add_return(int i, atomic_t *v) { int __i; #ifdef CONFIG_M386 unsigned long flags; if(unlikely(boot_cpu_data.x86==3)) goto no_xadd; #endif /* Modern 486+ processor */ __i = i; __asm__ __volatile__( LOCK_PREFIX "xaddl %0, %1;" :"=r"(i) :"m"(v->counter), "0"(i)); return i + __i; #ifdef CONFIG_M386 no_xadd: /* Legacy 386 processor */ local_irq_save(flags); __i = atomic_read(v); atomic_set(v, i + __i); local_irq_restore(flags); return i + __i; #endif } static __inline__ int atomic_sub_return(int i, atomic_t *v) { return atomic_add_return(-i,v); } #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** * atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ #define atomic_add_unless(v, a, u) \ ({ \ int c, old; \ c = atomic_read(v); \ for (;;) { \ if (unlikely(c == (u))) \ break; \ old = atomic_cmpxchg((v), c, c + (a)); \ if (likely(old == c)) \ break; \ c = old; \ } \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_return(v) (atomic_add_return(1,v)) #define atomic_dec_return(v) (atomic_sub_return(1,v)) /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ : : "r" (~(mask)),"m" (*addr) : "memory") #define atomic_set_mask(mask, addr) \ __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ : : "r" (mask),"m" (*(addr)) : "memory") /* Atomic operations are already serializing on x86 */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/auxvec.h000066400000000000000000000003551314037446600242370ustar00rootroot00000000000000#ifndef __ASMi386_AUXVEC_H #define __ASMi386_AUXVEC_H /* * Architecture-neutral AT_ values in 0-17, leave some room * for more of them, start the x86-specific ones at 32. */ #define AT_SYSINFO 32 #define AT_SYSINFO_EHDR 33 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/bitops.h000066400000000000000000000242261314037446600242470ustar00rootroot00000000000000#ifndef _I386_BITOPS_H #define _I386_BITOPS_H /* * Copyright 1992, Linus Torvalds. */ #include #include /* * These have to be done with inline assembly: that way the bit-setting * is guaranteed to be atomic. All bit operations return 0 if the bit * was cleared before the operation and != 0 if it was not. * * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ #define ADDR (*(volatile long *) addr) /** * set_bit - Atomically set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * This function is atomic and may not be reordered. See __set_bit() * if you do not require the atomic guarantees. * * Note: there are no guarantees that this function will not be reordered * on non x86 architectures, so if you are writting portable code, * make sure not to rely on its reordering guarantees. * * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static inline void set_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( LOCK_PREFIX "btsl %1,%0" :"+m" (ADDR) :"Ir" (nr)); } /** * __set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * Unlike set_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ static inline void __set_bit(int nr, volatile unsigned long * addr) { __asm__( "btsl %1,%0" :"+m" (ADDR) :"Ir" (nr)); } /** * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ static inline void clear_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( LOCK_PREFIX "btrl %1,%0" :"+m" (ADDR) :"Ir" (nr)); } static inline void __clear_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( "btrl %1,%0" :"+m" (ADDR) :"Ir" (nr)); } #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() /** * __change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * * Unlike change_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ static inline void __change_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( "btcl %1,%0" :"+m" (ADDR) :"Ir" (nr)); } /** * change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from * * change_bit() is atomic and may not be reordered. It may be * reordered on other architectures than x86. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static inline void change_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( LOCK_PREFIX "btcl %1,%0" :"+m" (ADDR) :"Ir" (nr)); } /** * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It may be reordered on other architectures than x86. * It also implies a memory barrier. */ static inline int test_and_set_bit(int nr, volatile unsigned long * addr) { int oldbit; __asm__ __volatile__( LOCK_PREFIX "btsl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr) : "memory"); return oldbit; } /** * __test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) { int oldbit; __asm__( "btsl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr)); return oldbit; } /** * test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It can be reorderdered on other architectures other than x86. * It also implies a memory barrier. */ static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) { int oldbit; __asm__ __volatile__( LOCK_PREFIX "btrl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr) : "memory"); return oldbit; } /** * __test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) { int oldbit; __asm__( "btrl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr)); return oldbit; } /* WARNING: non atomic and it can be reordered! */ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) { int oldbit; __asm__ __volatile__( "btcl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr) : "memory"); return oldbit; } /** * test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ static inline int test_and_change_bit(int nr, volatile unsigned long* addr) { int oldbit; __asm__ __volatile__( LOCK_PREFIX "btcl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr) : "memory"); return oldbit; } #if 0 /* Fool kernel-doc since it doesn't do macros yet */ /** * test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ static int test_bit(int nr, const volatile void * addr); #endif static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) { return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; } static inline int variable_test_bit(int nr, const volatile unsigned long * addr) { int oldbit; __asm__ __volatile__( "btl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit) :"m" (ADDR),"Ir" (nr)); return oldbit; } #define test_bit(nr,addr) \ (__builtin_constant_p(nr) ? \ constant_test_bit((nr),(addr)) : \ variable_test_bit((nr),(addr))) #undef ADDR /** * find_first_zero_bit - find the first zero bit in a memory region * @addr: The address to start the search at * @size: The maximum size to search * * Returns the bit-number of the first zero bit, not the number of the byte * containing a bit. */ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) { int d0, d1, d2; int res; if (!size) return 0; /* This looks at memory. Mark it volatile to tell gcc not to move it around */ __asm__ __volatile__( "movl $-1,%%eax\n\t" "xorl %%edx,%%edx\n\t" "repe; scasl\n\t" "je 1f\n\t" "xorl -4(%%edi),%%eax\n\t" "subl $4,%%edi\n\t" "bsfl %%eax,%%edx\n" "1:\tsubl %%ebx,%%edi\n\t" "shll $3,%%edi\n\t" "addl %%edi,%%edx" :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); return res; } /** * find_next_zero_bit - find the first zero bit in a memory region * @addr: The address to base the search on * @offset: The bitnumber to start searching at * @size: The maximum size to search */ int find_next_zero_bit(const unsigned long *addr, int size, int offset); /** * __ffs - find first bit in word. * @word: The word to search * * Undefined if no bit exists, so code should check against 0 first. */ static inline unsigned long __ffs(unsigned long word) { __asm__("bsfl %1,%0" :"=r" (word) :"rm" (word)); return word; } /** * find_first_bit - find the first set bit in a memory region * @addr: The address to start the search at * @size: The maximum size to search * * Returns the bit-number of the first set bit, not the number of the byte * containing a bit. */ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) { unsigned x = 0; while (x < size) { unsigned long val = *addr++; if (val) return __ffs(val) + x; x += (sizeof(*addr)<<3); } return x; } /** * find_next_bit - find the first set bit in a memory region * @addr: The address to base the search on * @offset: The bitnumber to start searching at * @size: The maximum size to search */ int find_next_bit(const unsigned long *addr, int size, int offset); /** * ffz - find first zero in word. * @word: The word to search * * Undefined if no zero exists, so code should check against ~0UL first. */ static inline unsigned long ffz(unsigned long word) { __asm__("bsfl %1,%0" :"=r" (word) :"r" (~word)); return word; } #ifdef __KERNEL__ #include /** * ffs - find first bit set * @x: the word to search * * This is defined the same way as * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). */ static inline int ffs(int x) { int r; __asm__("bsfl %1,%0\n\t" "jnz 1f\n\t" "movl $-1,%0\n" "1:" : "=r" (r) : "rm" (x)); return r+1; } /** * fls - find last bit set * @x: the word to search * * This is defined the same way as ffs. */ static inline int fls(int x) { int r; __asm__("bsrl %1,%0\n\t" "jnz 1f\n\t" "movl $-1,%0\n" "1:" : "=r" (r) : "rm" (x)); return r+1; } #include #endif /* __KERNEL__ */ #include #ifdef __KERNEL__ #include #define ext2_set_bit_atomic(lock,nr,addr) \ test_and_set_bit((nr),(unsigned long*)addr) #define ext2_clear_bit_atomic(lock,nr, addr) \ test_and_clear_bit((nr),(unsigned long*)addr) #include #endif /* __KERNEL__ */ #endif /* _I386_BITOPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/boot.h000066400000000000000000000006311314037446600237040ustar00rootroot00000000000000#ifndef _LINUX_BOOT_H #define _LINUX_BOOT_H /* Don't touch these, unless you really know what you're doing. */ #define DEF_INITSEG 0x9000 #define DEF_SYSSEG 0x1000 #define DEF_SETUPSEG 0x9020 #define DEF_SYSSIZE 0x7F00 /* Internal svga startup constants */ #define NORMAL_VGA 0xffff /* 80x25 mode */ #define EXTENDED_VGA 0xfffe /* 80x50 mode */ #define ASK_VGA 0xfffd /* ask for it at bootup */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/bug.h000066400000000000000000000010041314037446600235110ustar00rootroot00000000000000#ifndef _I386_BUG_H #define _I386_BUG_H /* * Tell the user there is some problem. * The offending file and line are encoded after the "officially * undefined" opcode for parsing in the trap handler. */ #ifdef CONFIG_BUG #define HAVE_ARCH_BUG #ifdef CONFIG_DEBUG_BUGVERBOSE #define BUG() \ __asm__ __volatile__( "ud2\n" \ "\t.word %c0\n" \ "\t.long %c1\n" \ : : "i" (__LINE__), "i" (__FILE__)) #else #define BUG() __asm__ __volatile__("ud2\n") #endif #endif #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/bugs.h000066400000000000000000000115301314037446600237010ustar00rootroot00000000000000/* * include/asm-i386/bugs.h * * Copyright (C) 1994 Linus Torvalds * * Cyrix stuff, June 1998 by: * - Rafael R. Reilova (moved everything from head.S), * * - Channing Corn (tests & fixes), * - Andrew D. Balsa (code cleanup). */ /* * This is included by init/main.c to check for architecture-dependent bugs. * * Needs: * void check_bugs(void); */ #include #include #include #include static int __init no_halt(char *s) { boot_cpu_data.hlt_works_ok = 0; return 1; } __setup("no-hlt", no_halt); static int __init mca_pentium(char *s) { mca_pentium_flag = 1; return 1; } __setup("mca-pentium", mca_pentium); static int __init no_387(char *s) { boot_cpu_data.hard_math = 0; write_cr0(0xE | read_cr0()); return 1; } __setup("no387", no_387); static double __initdata x = 4195835.0; static double __initdata y = 3145727.0; /* * This used to check for exceptions.. * However, it turns out that to support that, * the XMM trap handlers basically had to * be buggy. So let's have a correct XMM trap * handler, and forget about printing out * some status at boot. * * We should really only care about bugs here * anyway. Not features. */ static void __init check_fpu(void) { if (!boot_cpu_data.hard_math) { #ifndef CONFIG_MATH_EMULATION printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); printk(KERN_EMERG "Giving up.\n"); for (;;) ; #endif return; } /* trap_init() enabled FXSR and company _before_ testing for FP problems here. */ /* Test for the divl bug.. */ __asm__("fninit\n\t" "fldl %1\n\t" "fdivl %2\n\t" "fmull %2\n\t" "fldl %1\n\t" "fsubp %%st,%%st(1)\n\t" "fistpl %0\n\t" "fwait\n\t" "fninit" : "=m" (*&boot_cpu_data.fdiv_bug) : "m" (*&x), "m" (*&y)); if (boot_cpu_data.fdiv_bug) printk("Hmm, FPU with FDIV bug.\n"); } static void __init check_hlt(void) { printk(KERN_INFO "Checking 'hlt' instruction... "); if (!boot_cpu_data.hlt_works_ok) { printk("disabled\n"); return; } halt(); halt(); halt(); halt(); printk("OK.\n"); } /* * Most 386 processors have a bug where a POPAD can lock the * machine even from user space. */ static void __init check_popad(void) { #ifndef CONFIG_X86_POPAD_OK int res, inp = (int) &res; printk(KERN_INFO "Checking for popad bug... "); __asm__ __volatile__( "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx " : "=&a" (res) : "d" (inp) : "ecx", "edi" ); /* If this fails, it means that any user program may lock the CPU hard. Too bad. */ if (res != 12345678) printk( "Buggy.\n" ); else printk( "OK.\n" ); #endif } /* * Check whether we are able to run this kernel safely on SMP. * * - In order to run on a i386, we need to be compiled for i386 * (for due to lack of "invlpg" and working WP on a i386) * - In order to run on anything without a TSC, we need to be * compiled for a i486. * - In order to support the local APIC on a buggy Pentium machine, * we need to be compiled with CONFIG_X86_GOOD_APIC disabled, * which happens implicitly if compiled for a Pentium or lower * (unless an advanced selection of CPU features is used) as an * otherwise config implies a properly working local APIC without * the need to do extra reads from the APIC. */ static void __init check_config(void) { /* * We'd better not be a i386 if we're configured to use some * i486+ only features! (WP works in supervisor mode and the * new "invlpg" and "bswap" instructions) */ #if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_BSWAP) if (boot_cpu_data.x86 == 3) panic("Kernel requires i486+ for 'invlpg' and other features"); #endif /* * If we configured ourselves for a TSC, we'd better have one! */ #ifdef CONFIG_X86_TSC if (!cpu_has_tsc) panic("Kernel compiled for Pentium+, requires TSC feature!"); #endif /* * If we were told we had a good local APIC, check for buggy Pentia, * i.e. all B steppings and the C2 stepping of P54C when using their * integrated APIC (see 11AP erratum in "Pentium Processor * Specification Update"). */ #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && cpu_has_apic && boot_cpu_data.x86 == 5 && boot_cpu_data.x86_model == 2 && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11)) panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!"); #endif } extern void alternative_instructions(void); static void __init check_bugs(void) { identify_cpu(&boot_cpu_data); #ifndef CONFIG_SMP printk("CPU: "); print_cpu_info(&boot_cpu_data); #endif check_config(); check_fpu(); check_hlt(); check_popad(); system_utsname.machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); alternative_instructions(); } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/byteorder.h000066400000000000000000000024251314037446600247430ustar00rootroot00000000000000#ifndef _I386_BYTEORDER_H #define _I386_BYTEORDER_H #include #include #ifdef __GNUC__ /* For avoiding bswap on i386 */ #ifdef __KERNEL__ #endif static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) { #ifdef CONFIG_X86_BSWAP __asm__("bswap %0" : "=r" (x) : "0" (x)); #else __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */ "rorl $16,%0\n\t" /* swap words */ "xchgb %b0,%h0" /* swap higher bytes */ :"=q" (x) : "0" (x)); #endif return x; } static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val) { union { struct { __u32 a,b; } s; __u64 u; } v; v.u = val; #ifdef CONFIG_X86_BSWAP asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); #else v.s.a = ___arch__swab32(v.s.a); v.s.b = ___arch__swab32(v.s.b); asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); #endif return v.u; } /* Do not define swab16. Gcc is smart enough to recognize "C" version and convert it into rotation or exhange. */ #define __arch__swab64(x) ___arch__swab64(x) #define __arch__swab32(x) ___arch__swab32(x) #define __BYTEORDER_HAS_U64__ #endif /* __GNUC__ */ #include #endif /* _I386_BYTEORDER_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/cache.h000066400000000000000000000004471314037446600240110ustar00rootroot00000000000000/* * include/asm-i386/cache.h */ #ifndef __ARCH_I386_CACHE_H #define __ARCH_I386_CACHE_H /* L1 cache line size */ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define __read_mostly __attribute__((__section__(".data.read_mostly"))) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/cacheflush.h000066400000000000000000000024741314037446600250550ustar00rootroot00000000000000#ifndef _I386_CACHEFLUSH_H #define _I386_CACHEFLUSH_H /* Keep includes the same across arches. */ #include /* Caches aren't brain-dead on the intel. */ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_icache_range(start, end) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) void global_flush_tlb(void); int change_page_attr(struct page *page, int numpages, pgprot_t prot); #ifdef CONFIG_DEBUG_PAGEALLOC /* internal debugging function */ void kernel_map_pages(struct page *page, int numpages, int enable); #endif #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); #endif #endif /* _I386_CACHEFLUSH_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/checksum.h000066400000000000000000000115341314037446600245470ustar00rootroot00000000000000#ifndef _I386_CHECKSUM_H #define _I386_CHECKSUM_H #include #include /* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) * * returns a 32-bit number suitable for feeding into itself * or csum_tcpudp_magic * * this function must be called with even lengths, except * for the last fragment, which may be odd * * it's best to have buff aligned on a 32-bit boundary */ asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); /* * the same as csum_partial, but copies from src while it * checksums, and handles user-space pointer exceptions correctly, when needed. * * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ asmlinkage unsigned int csum_partial_copy_generic(const unsigned char *src, unsigned char *dst, int len, int sum, int *src_err_ptr, int *dst_err_ptr); /* * Note: when you get a NULL pointer exception here this means someone * passed in an incorrect kernel address to one of these functions. * * If you use these functions directly please don't forget the * access_ok(). */ static __inline__ unsigned int csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst, int len, int sum) { return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); } static __inline__ unsigned int csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst, int len, int sum, int *err_ptr) { might_sleep(); return csum_partial_copy_generic((__force unsigned char *)src, dst, len, sum, err_ptr, NULL); } /* * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. * * By Jorge Cwik , adapted for linux by * Arnt Gulbrandsen. */ static inline unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) { unsigned int sum; __asm__ __volatile__( "movl (%1), %0 ;\n" "subl $4, %2 ;\n" "jbe 2f ;\n" "addl 4(%1), %0 ;\n" "adcl 8(%1), %0 ;\n" "adcl 12(%1), %0 ;\n" "1: adcl 16(%1), %0 ;\n" "lea 4(%1), %1 ;\n" "decl %2 ;\n" "jne 1b ;\n" "adcl $0, %0 ;\n" "movl %0, %2 ;\n" "shrl $16, %0 ;\n" "addw %w2, %w0 ;\n" "adcl $0, %0 ;\n" "notl %0 ;\n" "2: ;\n" /* Since the input registers which are loaded with iph and ihl are modified, we must also specify them as outputs, or gcc will assume they contain their original values. */ : "=r" (sum), "=r" (iph), "=r" (ihl) : "1" (iph), "2" (ihl) : "memory"); return(sum); } /* * Fold a partial checksum */ static inline unsigned int csum_fold(unsigned int sum) { __asm__( "addl %1, %0 ;\n" "adcl $0xffff, %0 ;\n" : "=r" (sum) : "r" (sum << 16), "0" (sum & 0xffff0000) ); return (~sum) >> 16; } static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, unsigned short proto, unsigned int sum) { __asm__( "addl %1, %0 ;\n" "adcl %2, %0 ;\n" "adcl %3, %0 ;\n" "adcl $0, %0 ;\n" : "=r" (sum) : "g" (daddr), "g"(saddr), "g"((ntohs(len)<<16)+proto*256), "0"(sum)); return sum; } /* * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, unsigned short proto, unsigned int sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } /* * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ static inline unsigned short ip_compute_csum(unsigned char * buff, int len) { return csum_fold (csum_partial(buff, len, 0)); } #define _HAVE_ARCH_IPV6_CSUM static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len, unsigned short proto, unsigned int sum) { __asm__( "addl 0(%1), %0 ;\n" "adcl 4(%1), %0 ;\n" "adcl 8(%1), %0 ;\n" "adcl 12(%1), %0 ;\n" "adcl 0(%2), %0 ;\n" "adcl 4(%2), %0 ;\n" "adcl 8(%2), %0 ;\n" "adcl 12(%2), %0 ;\n" "adcl %3, %0 ;\n" "adcl %4, %0 ;\n" "adcl $0, %0 ;\n" : "=&r" (sum) : "r" (saddr), "r" (daddr), "r"(htonl(len)), "r"(htonl(proto)), "0"(sum)); return csum_fold(sum); } /* * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src, unsigned char __user *dst, int len, int sum, int *err_ptr) { might_sleep(); if (access_ok(VERIFY_WRITE, dst, len)) return csum_partial_copy_generic(src, (__force unsigned char *)dst, len, sum, NULL, err_ptr); if (len) *err_ptr = -EFAULT; return -1; /* invalid checksum */ } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/cpu.h000066400000000000000000000006131314037446600235300ustar00rootroot00000000000000#ifndef _ASM_I386_CPU_H_ #define _ASM_I386_CPU_H_ #include #include #include #include #include struct i386_cpu { struct cpu cpu; }; extern int arch_register_cpu(int num); #ifdef CONFIG_HOTPLUG_CPU extern void arch_unregister_cpu(int); #endif DECLARE_PER_CPU(int, cpu_state); #endif /* _ASM_I386_CPU_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/cpufeature.h000066400000000000000000000163571314037446600251200ustar00rootroot00000000000000/* * cpufeature.h * * Defines x86 CPU feature bits */ #ifndef __ASM_I386_CPUFEATURE_H #define __ASM_I386_CPUFEATURE_H #include #define NCAPINTS 7 /* N 32-bit words worth of info */ /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ #define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ #define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ #define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ #define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ #define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */ #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ /* of FPU context), and CR4.OSFXSR available */ #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ #define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ #define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ /* Don't duplicate feature flags which are redundant with Intel! */ #define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ #define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ #define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ #define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ #define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ /* Other features, Linux-defined mapping, word 3 */ /* This range is used for feature bits which conflict or are synthesized */ #define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ #define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ /* cpu types for specific tunings: */ #define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ #define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ #define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ #define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ #define X86_FEATURE_CID (4*32+10) /* Context ID */ #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ #define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ #define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ #define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ #define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */ #define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */ #define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */ #define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */ /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) #define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) #define cpu_has_de boot_cpu_has(X86_FEATURE_DE) #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) #define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) #define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) #define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) #define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) #define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) #define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) #define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) #define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR) #define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) #define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) #define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) #define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) #define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) #define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) #define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) #define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) #define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) #define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) #endif /* __ASM_I386_CPUFEATURE_H */ /* * Local Variables: * mode:c * comment-column:42 * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/cputime.h000066400000000000000000000001631314037446600244070ustar00rootroot00000000000000#ifndef __I386_CPUTIME_H #define __I386_CPUTIME_H #include #endif /* __I386_CPUTIME_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/current.h000066400000000000000000000004161314037446600244240ustar00rootroot00000000000000#ifndef _I386_CURRENT_H #define _I386_CURRENT_H #include struct task_struct; static __always_inline struct task_struct * get_current(void) { return current_thread_info()->task; } #define current get_current() #endif /* !(_I386_CURRENT_H) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/debugreg.h000066400000000000000000000053011314037446600245240ustar00rootroot00000000000000#ifndef _I386_DEBUGREG_H #define _I386_DEBUGREG_H /* Indicate the register numbers for a number of the specific debug registers. Registers 0-3 contain the addresses we wish to trap on */ #define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */ #define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */ #define DR_STATUS 6 /* u_debugreg[DR_STATUS] */ #define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */ /* Define a few things for the status register. We can use this to determine which debugging register was responsible for the trap. The other bits are either reserved or not of interest to us. */ #define DR_TRAP0 (0x1) /* db0 */ #define DR_TRAP1 (0x2) /* db1 */ #define DR_TRAP2 (0x4) /* db2 */ #define DR_TRAP3 (0x8) /* db3 */ #define DR_STEP (0x4000) /* single-step */ #define DR_SWITCH (0x8000) /* task switch */ /* Now define a bunch of things for manipulating the control register. The top two bytes of the control register consist of 4 fields of 4 bits - each field corresponds to one of the four debug registers, and indicates what types of access we trap on, and how large the data field is that we are looking at */ #define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */ #define DR_CONTROL_SIZE 4 /* 4 control bits per register */ #define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */ #define DR_RW_WRITE (0x1) #define DR_RW_READ (0x3) #define DR_LEN_1 (0x0) /* Settings for data length to trap on */ #define DR_LEN_2 (0x4) #define DR_LEN_4 (0xC) /* The low byte to the control register determine which registers are enabled. There are 4 fields of two bits. One bit is "local", meaning that the processor will reset the bit after a task switch and the other is global meaning that we have to explicitly reset the bit. With linux, you can use either one, since we explicitly zero the register when we enter kernel mode. */ #define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ #define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ #define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ #define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ #define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */ /* The second byte to the control register has a few special things. We can slow the instruction pipeline for instructions coming via the gdt or the ldt if we want to. I am not sure why this is an advantage */ #define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */ #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/delay.h000066400000000000000000000013171314037446600240410ustar00rootroot00000000000000#ifndef _I386_DELAY_H #define _I386_DELAY_H /* * Copyright (C) 1993 Linus Torvalds * * Delay routines calling functions in arch/i386/lib/delay.c */ extern void __bad_udelay(void); extern void __bad_ndelay(void); extern void __udelay(unsigned long usecs); extern void __ndelay(unsigned long nsecs); extern void __const_udelay(unsigned long usecs); extern void __delay(unsigned long loops); #define udelay(n) (__builtin_constant_p(n) ? \ ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ __udelay(n)) #define ndelay(n) (__builtin_constant_p(n) ? \ ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ __ndelay(n)) void use_tsc_delay(void); #endif /* defined(_I386_DELAY_H) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/desc.h000066400000000000000000000103011314037446600236520ustar00rootroot00000000000000#ifndef __ARCH_DESC_H #define __ARCH_DESC_H #include #include #define CPU_16BIT_STACK_SIZE 1024 #ifndef __ASSEMBLY__ #include #include #include #include extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); struct Xgt_desc_struct { unsigned short size; unsigned long address __attribute__((packed)); unsigned short pad; } __attribute__ ((packed)); extern struct Xgt_desc_struct idt_descr; DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) { return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; } #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) #define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr)) #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt)) #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) #define store_tr(tr) __asm__ ("str %0":"=mr" (tr)) #define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt)) /* * This is the ldt that every process will get unless we need * something other than this. */ extern struct desc_struct default_ldt[]; extern void set_intr_gate(unsigned int irq, void * addr); #define _set_tssldt_desc(n,addr,limit,type) \ __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ "movw %w1,2(%2)\n\t" \ "rorl $16,%1\n\t" \ "movb %b1,4(%2)\n\t" \ "movb %4,5(%2)\n\t" \ "movb $0,6(%2)\n\t" \ "movb %h1,7(%2)\n\t" \ "rorl $16,%1" \ : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type)) static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr) { _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr, offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89); } #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) { _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82); } #define LDT_entry_a(info) \ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) #define LDT_entry_b(info) \ (((info)->base_addr & 0xff000000) | \ (((info)->base_addr & 0x00ff0000) >> 16) | \ ((info)->limit & 0xf0000) | \ (((info)->read_exec_only ^ 1) << 9) | \ ((info)->contents << 10) | \ (((info)->seg_not_present ^ 1) << 15) | \ ((info)->seg_32bit << 22) | \ ((info)->limit_in_pages << 23) | \ ((info)->useable << 20) | \ 0x7000) #define LDT_empty(info) (\ (info)->base_addr == 0 && \ (info)->limit == 0 && \ (info)->contents == 0 && \ (info)->read_exec_only == 1 && \ (info)->seg_32bit == 0 && \ (info)->limit_in_pages == 0 && \ (info)->seg_not_present == 1 && \ (info)->useable == 0 ) static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b) { __u32 *lp = (__u32 *)((char *)ldt + entry*8); *lp = entry_a; *(lp+1) = entry_b; } #if TLS_SIZE != 24 # error update this code. #endif static inline void load_TLS(struct thread_struct *t, unsigned int cpu) { #define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] C(0); C(1); C(2); #undef C } static inline void clear_LDT(void) { int cpu = get_cpu(); set_ldt_desc(cpu, &default_ldt[0], 5); load_LDT_desc(); put_cpu(); } /* * load one particular LDT into the current CPU */ static inline void load_LDT_nolock(mm_context_t *pc, int cpu) { void *segments = pc->ldt; int count = pc->size; if (likely(!count)) { segments = &default_ldt[0]; count = 5; } set_ldt_desc(cpu, segments, count); load_LDT_desc(); } static inline void load_LDT(mm_context_t *pc) { int cpu = get_cpu(); load_LDT_nolock(pc, cpu); put_cpu(); } static inline unsigned long get_desc_base(unsigned long *desc) { unsigned long base; base = ((desc[0] >> 16) & 0x0000ffff) | ((desc[1] << 16) & 0x00ff0000) | (desc[1] & 0xff000000); return base; } #endif /* !__ASSEMBLY__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/div64.h000066400000000000000000000022331314037446600236750ustar00rootroot00000000000000#ifndef __I386_DIV64 #define __I386_DIV64 /* * do_div() is NOT a C function. It wants to return * two values (the quotient and the remainder), but * since that doesn't work very well in C, what it * does is: * * - modifies the 64-bit dividend _in_place_ * - returns the 32-bit remainder * * This ends up being the most efficient "calling * convention" on x86. */ #define do_div(n,base) ({ \ unsigned long __upper, __low, __high, __mod, __base; \ __base = (base); \ asm("":"=a" (__low), "=d" (__high):"A" (n)); \ __upper = __high; \ if (__high) { \ __upper = __high % (__base); \ __high = __high / (__base); \ } \ asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \ asm("":"=A" (n):"a" (__low),"d" (__high)); \ __mod; \ }) /* * (long)X = ((long long)divs) / (long)div * (long)rem = ((long long)divs) % (long)div * * Warning, this will do an exception if X overflows. */ #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) static inline long div_ll_X_l_rem(long long divs, long div, long *rem) { long dum2; __asm__("divl %2":"=a"(dum2), "=d"(*rem) : "rm"(div), "A"(divs)); return dum2; } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/dma-mapping.h000066400000000000000000000101251314037446600251320ustar00rootroot00000000000000#ifndef _ASM_I386_DMA_MAPPING_H #define _ASM_I386_DMA_MAPPING_H #include #include #include #include #include #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); static inline dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction) { if (direction == DMA_NONE) BUG(); WARN_ON(size == 0); flush_write_buffers(); return virt_to_phys(ptr); } static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { if (direction == DMA_NONE) BUG(); } static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { int i; if (direction == DMA_NONE) BUG(); WARN_ON(nents == 0 || sg[0].length == 0); for (i = 0; i < nents; i++ ) { BUG_ON(!sg[i].page); sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; } flush_write_buffers(); return nents; } static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); return page_to_phys(page) + offset; } static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); } static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); } static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { flush_write_buffers(); } static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { } static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { flush_write_buffers(); } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { flush_write_buffers(); } static inline int dma_mapping_error(dma_addr_t dma_addr) { return 0; } static inline int dma_supported(struct device *dev, u64 mask) { /* * we fall back to GFP_DMA when the mask isn't all 1s, * so we can't guarantee allocations that must be * within a tighter range than GFP_DMA.. */ if(mask < 0x00ffffff) return 0; return 1; } static inline int dma_set_mask(struct device *dev, u64 mask) { if(!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; *dev->dma_mask = mask; return 0; } static inline int dma_get_cache_alignment(void) { /* no easy way to get cache size on all x86, so return the * maximum possible, to be safe */ return (1 << INTERNODE_CACHE_SHIFT); } #define dma_is_consistent(d) (1) static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { flush_write_buffers(); } #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY extern int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dma_addr_t device_addr, size_t size, int flags); extern void dma_release_declared_memory(struct device *dev); extern void * dma_mark_declared_memory_occupied(struct device *dev, dma_addr_t device_addr, size_t size); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/dma.h000066400000000000000000000231231314037446600235030ustar00rootroot00000000000000/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ * linux/include/asm/dma.h: Defines for using and allocating dma channels. * Written by Hennus Bergman, 1992. * High DMA channel support & info by Hannu Savolainen * and John Boyd, Nov. 1992. */ #ifndef _ASM_DMA_H #define _ASM_DMA_H #include /* And spinlocks */ #include /* need byte IO */ #include #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER #define dma_outb outb_p #else #define dma_outb outb #endif #define dma_inb inb /* * NOTES about DMA transfers: * * controller 1: channels 0-3, byte operations, ports 00-1F * controller 2: channels 4-7, word operations, ports C0-DF * * - ALL registers are 8 bits only, regardless of transfer size * - channel 4 is not used - cascades 1 into 2. * - channels 0-3 are byte - addresses/counts are for physical bytes * - channels 5-7 are word - addresses/counts are for physical words * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries * - transfer count loaded to registers is 1 less than actual count * - controller 2 offsets are all even (2x offsets for controller 1) * - page registers for 5-7 don't use data bit 0, represent 128K pages * - page registers for 0-3 use bit 0, represent 64K pages * * DMA transfers are limited to the lower 16MB of _physical_ memory. * Note that addresses loaded into registers must be _physical_ addresses, * not logical addresses (which may differ if paging is active). * * Address mapping for channels 0-3: * * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) * | ... | | ... | | ... | * | ... | | ... | | ... | * | ... | | ... | | ... | * P7 ... P0 A7 ... A0 A7 ... A0 * | Page | Addr MSB | Addr LSB | (DMA registers) * * Address mapping for channels 5-7: * * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) * | ... | \ \ ... \ \ \ ... \ \ * | ... | \ \ ... \ \ \ ... \ (not used) * | ... | \ \ ... \ \ \ ... \ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 * | Page | Addr MSB | Addr LSB | (DMA registers) * * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at * the hardware level, so odd-byte transfers aren't possible). * * Transfer count (_not # bytes_) is limited to 64K, represented as actual * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, * and up to 128K bytes may be transferred on channels 5-7 in one operation. * */ #define MAX_DMA_CHANNELS 8 /* The maximum address that we can perform a DMA transfer to on this platform */ #define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) /* 8237 DMA controllers */ #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ /* DMA controller registers */ #define DMA1_CMD_REG 0x08 /* command register (w) */ #define DMA1_STAT_REG 0x08 /* status register (r) */ #define DMA1_REQ_REG 0x09 /* request register (w) */ #define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ #define DMA1_MODE_REG 0x0B /* mode register (w) */ #define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ #define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ #define DMA1_RESET_REG 0x0D /* Master Clear (w) */ #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ #define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ #define DMA2_CMD_REG 0xD0 /* command register (w) */ #define DMA2_STAT_REG 0xD0 /* status register (r) */ #define DMA2_REQ_REG 0xD2 /* request register (w) */ #define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ #define DMA2_MODE_REG 0xD6 /* mode register (w) */ #define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ #define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ #define DMA2_RESET_REG 0xDA /* Master Clear (w) */ #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ #define DMA_ADDR_0 0x00 /* DMA address registers */ #define DMA_ADDR_1 0x02 #define DMA_ADDR_2 0x04 #define DMA_ADDR_3 0x06 #define DMA_ADDR_4 0xC0 #define DMA_ADDR_5 0xC4 #define DMA_ADDR_6 0xC8 #define DMA_ADDR_7 0xCC #define DMA_CNT_0 0x01 /* DMA count registers */ #define DMA_CNT_1 0x03 #define DMA_CNT_2 0x05 #define DMA_CNT_3 0x07 #define DMA_CNT_4 0xC2 #define DMA_CNT_5 0xC6 #define DMA_CNT_6 0xCA #define DMA_CNT_7 0xCE #define DMA_PAGE_0 0x87 /* DMA page registers */ #define DMA_PAGE_1 0x83 #define DMA_PAGE_2 0x81 #define DMA_PAGE_3 0x82 #define DMA_PAGE_5 0x8B #define DMA_PAGE_6 0x89 #define DMA_PAGE_7 0x8A #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ #define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ #define DMA_AUTOINIT 0x10 extern spinlock_t dma_spin_lock; static __inline__ unsigned long claim_dma_lock(void) { unsigned long flags; spin_lock_irqsave(&dma_spin_lock, flags); return flags; } static __inline__ void release_dma_lock(unsigned long flags) { spin_unlock_irqrestore(&dma_spin_lock, flags); } /* enable/disable a specific DMA channel */ static __inline__ void enable_dma(unsigned int dmanr) { if (dmanr<=3) dma_outb(dmanr, DMA1_MASK_REG); else dma_outb(dmanr & 3, DMA2_MASK_REG); } static __inline__ void disable_dma(unsigned int dmanr) { if (dmanr<=3) dma_outb(dmanr | 4, DMA1_MASK_REG); else dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); } /* Clear the 'DMA Pointer Flip Flop'. * Write 0 for LSB/MSB, 1 for MSB/LSB access. * Use this once to initialize the FF to a known state. * After that, keep track of it. :-) * --- In order to do that, the DMA routines below should --- * --- only be used while holding the DMA lock ! --- */ static __inline__ void clear_dma_ff(unsigned int dmanr) { if (dmanr<=3) dma_outb(0, DMA1_CLEAR_FF_REG); else dma_outb(0, DMA2_CLEAR_FF_REG); } /* set mode (above) for a specific DMA channel */ static __inline__ void set_dma_mode(unsigned int dmanr, char mode) { if (dmanr<=3) dma_outb(mode | dmanr, DMA1_MODE_REG); else dma_outb(mode | (dmanr&3), DMA2_MODE_REG); } /* Set only the page register bits of the transfer address. * This is used for successive transfers when we know the contents of * the lower 16 bits of the DMA current address register, but a 64k boundary * may have been crossed. */ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) { switch(dmanr) { case 0: dma_outb(pagenr, DMA_PAGE_0); break; case 1: dma_outb(pagenr, DMA_PAGE_1); break; case 2: dma_outb(pagenr, DMA_PAGE_2); break; case 3: dma_outb(pagenr, DMA_PAGE_3); break; case 5: dma_outb(pagenr & 0xfe, DMA_PAGE_5); break; case 6: dma_outb(pagenr & 0xfe, DMA_PAGE_6); break; case 7: dma_outb(pagenr & 0xfe, DMA_PAGE_7); break; } } /* Set transfer address & page bits for specific DMA channel. * Assumes dma flipflop is clear. */ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) { set_dma_page(dmanr, a>>16); if (dmanr <= 3) { dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); } else { dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); } } /* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for * a specific DMA channel. * You must ensure the parameters are valid. * NOTE: from a manual: "the number of transfers is one more * than the initial word count"! This is taken into account. * Assumes dma flip-flop is clear. * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. */ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) { count--; if (dmanr <= 3) { dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); } else { dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); } } /* Get DMA residue count. After a DMA transfer, this * should return zero. Reading this while a DMA transfer is * still in progress will return unpredictable results. * If called before the channel has been used, it may return 1. * Otherwise, it returns the number of _bytes_ left to transfer. * * Assumes DMA flip-flop is clear. */ static __inline__ int get_dma_residue(unsigned int dmanr) { unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; /* using short to get 16-bit wrap around */ unsigned short count; count = 1 + dma_inb(io_port); count += dma_inb(io_port) << 8; return (dmanr<=3)? count : (count<<1); } /* These are in kernel/dma.c: */ extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ extern void free_dma(unsigned int dmanr); /* release it again */ /* From PCI */ #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif #endif /* _ASM_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/dmi.h000066400000000000000000000003471314037446600235160ustar00rootroot00000000000000#ifndef _ASM_DMI_H #define _ASM_DMI_H 1 #include /* Use early IO mappings for DMI because it's initialized early */ #define dmi_ioremap bt_ioremap #define dmi_iounmap bt_iounmap #define dmi_alloc alloc_bootmem #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/dwarf2.h000066400000000000000000000026741314037446600241370ustar00rootroot00000000000000#ifndef _DWARF2_H #define _DWARF2_H #include #ifndef __ASSEMBLY__ #warning "asm/dwarf2.h should be only included in pure assembly files" #endif /* Macros for dwarf2 CFI unwind table entries. See "as.info" for details on these pseudo ops. Unfortunately they are only supported in very new binutils, so define them away for older version. */ #ifdef CONFIG_UNWIND_INFO #define CFI_STARTPROC .cfi_startproc #define CFI_ENDPROC .cfi_endproc #define CFI_DEF_CFA .cfi_def_cfa #define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register #define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset #define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset #define CFI_OFFSET .cfi_offset #define CFI_REL_OFFSET .cfi_rel_offset #define CFI_REGISTER .cfi_register #define CFI_RESTORE .cfi_restore #define CFI_REMEMBER_STATE .cfi_remember_state #define CFI_RESTORE_STATE .cfi_restore_state #else /* Due to the structure of pre-exisiting code, don't use assembler line comment character # to ignore the arguments. Instead, use a dummy macro. */ .macro ignore a=0, b=0, c=0, d=0 .endm #define CFI_STARTPROC ignore #define CFI_ENDPROC ignore #define CFI_DEF_CFA ignore #define CFI_DEF_CFA_REGISTER ignore #define CFI_DEF_CFA_OFFSET ignore #define CFI_ADJUST_CFA_OFFSET ignore #define CFI_OFFSET ignore #define CFI_REL_OFFSET ignore #define CFI_REGISTER ignore #define CFI_RESTORE ignore #define CFI_REMEMBER_STATE ignore #define CFI_RESTORE_STATE ignore #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/e820.h000066400000000000000000000023601314037446600234200ustar00rootroot00000000000000/* * structures and definitions for the int 15, ax=e820 memory map * scheme. * * In a nutshell, arch/i386/boot/setup.S populates a scratch table * in the empty_zero_block that contains a list of usable address/size * duples. In arch/i386/kernel/setup.c, this information is * transferred into the e820map, and in arch/i386/mm/init.c, that * new information is used to mark pages reserved or not. * */ #ifndef __E820_HEADER #define __E820_HEADER #define E820MAP 0x2d0 /* our map */ #define E820MAX 128 /* number of entries in E820MAP */ #define E820NR 0x1e8 /* # entries in E820MAP */ #define E820_RAM 1 #define E820_RESERVED 2 #define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */ #define E820_NVS 4 #define HIGH_MEMORY (1024*1024) #ifndef __ASSEMBLY__ struct e820map { int nr_map; struct e820entry { unsigned long long addr; /* start of memory segment */ unsigned long long size; /* size of memory segment */ unsigned long type; /* type of memory segment */ } map[E820MAX]; }; extern struct e820map e820; extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); extern int e820_any_mapped(u64 start, u64 end, unsigned type); #endif/*!__ASSEMBLY__*/ #endif/*__E820_HEADER*/ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/edac.h000066400000000000000000000006471314037446600236440ustar00rootroot00000000000000#ifndef ASM_EDAC_H #define ASM_EDAC_H /* ECC atomic, DMA, SMP and interrupt safe scrub function */ static __inline__ void atomic_scrub(void *va, u32 size) { unsigned long *virt_addr = va; u32 i; for (i = 0; i < size / 4; i++, virt_addr++) /* Very carefully read and write to memory atomically * so we are interrupt, DMA and SMP safe. */ __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/elf.h000066400000000000000000000122411314037446600235070ustar00rootroot00000000000000#ifndef __ASMi386_ELF_H #define __ASMi386_ELF_H /* * ELF register definitions.. */ #include #include #include #include #define R_386_NONE 0 #define R_386_32 1 #define R_386_PC32 2 #define R_386_GOT32 3 #define R_386_PLT32 4 #define R_386_COPY 5 #define R_386_GLOB_DAT 6 #define R_386_JMP_SLOT 7 #define R_386_RELATIVE 8 #define R_386_GOTOFF 9 #define R_386_GOTPC 10 #define R_386_NUM 11 typedef unsigned long elf_greg_t; #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_i387_struct elf_fpregset_t; typedef struct user_fxsr_struct elf_fpxregset_t; /* * This is used to ensure we don't load something for the wrong architecture. */ #define elf_check_arch(x) \ (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) /* * These are used to set parameters in the core dumps. */ #define ELF_CLASS ELFCLASS32 #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_386 #ifdef __KERNEL__ #include #include /* for savesegment */ #include /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx contains a pointer to a function which might be registered using `atexit'. This provides a mean for the dynamic linker to call DT_FINI functions for shared libraries that have been loaded before the code runs. A value of 0 tells we have no such handler. We might as well make sure everything else is cleared too (except for %esp), just to make things more deterministic. */ #define ELF_PLAT_INIT(_r, load_addr) do { \ _r->ebx = 0; _r->ecx = 0; _r->edx = 0; \ _r->esi = 0; _r->edi = 0; _r->ebp = 0; \ _r->eax = 0; \ } while (0) #define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical use of this is to invoke "./ld.so someprog" to test out a new version of the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is now struct_user_regs, they are different) */ #define ELF_CORE_COPY_REGS(pr_reg, regs) \ pr_reg[0] = regs->ebx; \ pr_reg[1] = regs->ecx; \ pr_reg[2] = regs->edx; \ pr_reg[3] = regs->esi; \ pr_reg[4] = regs->edi; \ pr_reg[5] = regs->ebp; \ pr_reg[6] = regs->eax; \ pr_reg[7] = regs->xds; \ pr_reg[8] = regs->xes; \ savesegment(fs,pr_reg[9]); \ savesegment(gs,pr_reg[10]); \ pr_reg[11] = regs->orig_eax; \ pr_reg[12] = regs->eip; \ pr_reg[13] = regs->xcs; \ pr_reg[14] = regs->eflags; \ pr_reg[15] = regs->esp; \ pr_reg[16] = regs->xss; /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, but it's not easy, and we've already done it here. */ #define ELF_HWCAP (boot_cpu_data.x86_capability[0]) /* This yields a string that ld.so will use to load implementation specific libraries for optimization. This is more specific in intent than poking at uname or /proc/cpuinfo. For the moment, we have only optimizations for the Intel generations, but that could change... */ #define ELF_PLATFORM (system_utsname.machine) #define SET_PERSONALITY(ex, ibcs2) do { } while (0) /* * An executable for which elf_read_implies_exec() returns TRUE will * have the READ_IMPLIES_EXEC personality flag set automatically. */ #define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X) struct task_struct; extern int dump_task_regs (struct task_struct *, elf_gregset_t *); extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct *); #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) #define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) #define VDSO_BASE ((unsigned long)current->mm->context.vdso) #ifdef CONFIG_COMPAT_VDSO # define VDSO_COMPAT_BASE VDSO_HIGH_BASE # define VDSO_PRELINK VDSO_HIGH_BASE #else # define VDSO_COMPAT_BASE VDSO_BASE # define VDSO_PRELINK 0 #endif #define VDSO_COMPAT_SYM(x) \ (VDSO_COMPAT_BASE + (unsigned long)(x) - VDSO_PRELINK) #define VDSO_SYM(x) \ (VDSO_BASE + (unsigned long)(x) - VDSO_PRELINK) #define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE) #define VDSO_EHDR ((const struct elfhdr *) VDSO_COMPAT_BASE) extern void __kernel_vsyscall; #define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall) #define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack); extern unsigned int vdso_enabled; #define ARCH_DLINFO \ do if (vdso_enabled) { \ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE); \ } while (0) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/emergency-restart.h000066400000000000000000000002271314037446600264020ustar00rootroot00000000000000#ifndef _ASM_EMERGENCY_RESTART_H #define _ASM_EMERGENCY_RESTART_H extern void machine_emergency_restart(void); #endif /* _ASM_EMERGENCY_RESTART_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/errno.h000066400000000000000000000001241314037446600240630ustar00rootroot00000000000000#ifndef _I386_ERRNO_H #define _I386_ERRNO_H #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/fcntl.h000066400000000000000000000000371314037446600240470ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/fixmap.h000066400000000000000000000106701314037446600242310ustar00rootroot00000000000000/* * fixmap.h: compile-time virtual memory allocation * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Ingo Molnar * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #ifndef _ASM_FIXMAP_H #define _ASM_FIXMAP_H /* used by vmalloc.c, vsyscall.lds.S. * * Leave one empty page between vmalloc'ed areas and * the start of the fixmap. */ extern unsigned long __FIXADDR_TOP; #ifndef __ASSEMBLY__ #include #include #include #include #ifdef CONFIG_HIGHMEM #include #include #endif /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at * compile time, but to set the physical address only * in the boot process. We allocate these special addresses * from the end of virtual memory (0xfffff000) backwards. * Also this lets us do fail-safe vmalloc(), we * can guarantee that these special addresses and * vmalloc()-ed addresses never overlap. * * these 'compile-time allocated' memory buffers are * fixed-size 4k pages. (or larger if used with an increment * highger than 1) use fixmap_set(idx,phys) to associate * physical memory with fixmap indices. * * TLB entries of such buffers will not be flushed across * task switches. */ enum fixed_addresses { FIX_HOLE, FIX_VDSO, #ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ #endif #ifdef CONFIG_X86_IO_APIC FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, #endif #ifdef CONFIG_X86_VISWS_APIC FIX_CO_CPU, /* Cobalt timer */ FIX_CO_APIC, /* Cobalt APIC Redirection Table */ FIX_LI_PCIA, /* Lithium PCI Bridge A */ FIX_LI_PCIB, /* Lithium PCI Bridge B */ #endif #ifdef CONFIG_X86_F00F_BUG FIX_F00F_IDT, /* Virtual mapping for IDT */ #endif #ifdef CONFIG_X86_CYCLONE_TIMER FIX_CYCLONE_TIMER, /*cyclone timer register*/ #endif #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, #endif #ifdef CONFIG_ACPI FIX_ACPI_BEGIN, FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, #endif #ifdef CONFIG_PCI_MMCONFIG FIX_PCIE_MCFG, #endif __end_of_permanent_fixed_addresses, /* temporary boot-time mappings, used before ioremap() is functional */ #define NR_FIX_BTMAPS 16 FIX_BTMAP_END = __end_of_permanent_fixed_addresses, FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, FIX_WP_TEST, __end_of_fixed_addresses }; extern void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags); extern void set_fixaddr_top(unsigned long top); #define set_fixmap(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL) /* * Some hardware wants to get fixmapped without caching. */ #define set_fixmap_nocache(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) #define clear_fixmap(idx) \ __set_fixmap(idx, 0, __pgprot(0)) #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) #define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) #define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) extern void __this_fixmap_does_not_exist(void); /* * 'index to address' translation. If anyone tries to use the idx * directly without tranlation, we catch the bug with a NULL-deference * kernel oops. Illegal ranges of incoming indices are caught too. */ static __always_inline unsigned long fix_to_virt(const unsigned int idx) { /* * this branch gets completely eliminated after inlining, * except when someone tries to use fixaddr indices in an * illegal way. (such as mixing up address types or using * out-of-range indices). * * If it doesn't get removed, the linker will complain * loudly with a reasonably clear error message.. */ if (idx >= __end_of_fixed_addresses) __this_fixmap_does_not_exist(); return __fix_to_virt(idx); } static inline unsigned long virt_to_fix(const unsigned long vaddr) { BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); return __virt_to_fix(vaddr); } #endif /* !__ASSEMBLY__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/floppy.h000066400000000000000000000150141314037446600242530ustar00rootroot00000000000000/* * Architecture specific parts of the Floppy driver * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995 */ #ifndef __ASM_I386_FLOPPY_H #define __ASM_I386_FLOPPY_H #include /* * The DMA channel used by the floppy controller cannot access data at * addresses >= 16MB * * Went back to the 1MB limit, as some people had problems with the floppy * driver otherwise. It doesn't matter much for performance anyway, as most * floppy accesses go through the track buffer. */ #define _CROSS_64KB(a,s,vdma) \ (!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) #define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) #define SW fd_routine[use_virtual_dma&1] #define CSW fd_routine[can_use_virtual_dma & 1] #define fd_inb(port) inb_p(port) #define fd_outb(value,port) outb_p(value,port) #define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy") #define fd_free_dma() CSW._free_dma(FLOPPY_DMA) #define fd_enable_irq() enable_irq(FLOPPY_IRQ) #define fd_disable_irq() disable_irq(FLOPPY_IRQ) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) #define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA) #define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size) #define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io) #define FLOPPY_CAN_FALLBACK_ON_NODMA static int virtual_dma_count; static int virtual_dma_residue; static char *virtual_dma_addr; static int virtual_dma_mode; static int doing_pdma; static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs) { register unsigned char st; #undef TRACE_FLPY_INT #ifdef TRACE_FLPY_INT static int calls=0; static int bytes=0; static int dma_wait=0; #endif if (!doing_pdma) return floppy_interrupt(irq, dev_id, regs); #ifdef TRACE_FLPY_INT if(!calls) bytes = virtual_dma_count; #endif { register int lcount; register char *lptr; st = 1; for(lcount=virtual_dma_count, lptr=virtual_dma_addr; lcount; lcount--, lptr++) { st=inb(virtual_dma_port+4) & 0xa0 ; if(st != 0xa0) break; if(virtual_dma_mode) outb_p(*lptr, virtual_dma_port+5); else *lptr = inb_p(virtual_dma_port+5); } virtual_dma_count = lcount; virtual_dma_addr = lptr; st = inb(virtual_dma_port+4); } #ifdef TRACE_FLPY_INT calls++; #endif if(st == 0x20) return IRQ_HANDLED; if(!(st & 0x20)) { virtual_dma_residue += virtual_dma_count; virtual_dma_count=0; #ifdef TRACE_FLPY_INT printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", virtual_dma_count, virtual_dma_residue, calls, bytes, dma_wait); calls = 0; dma_wait=0; #endif doing_pdma = 0; floppy_interrupt(irq, dev_id, regs); return IRQ_HANDLED; } #ifdef TRACE_FLPY_INT if(!virtual_dma_count) dma_wait++; #endif return IRQ_HANDLED; } static void fd_disable_dma(void) { if(! (can_use_virtual_dma & 1)) disable_dma(FLOPPY_DMA); doing_pdma = 0; virtual_dma_residue += virtual_dma_count; virtual_dma_count=0; } static int vdma_request_dma(unsigned int dmanr, const char * device_id) { return 0; } static void vdma_nop(unsigned int dummy) { } static int vdma_get_dma_residue(unsigned int dummy) { return virtual_dma_count + virtual_dma_residue; } static int fd_request_irq(void) { if(can_use_virtual_dma) return request_irq(FLOPPY_IRQ, floppy_hardint, IRQF_DISABLED, "floppy", NULL); else return request_irq(FLOPPY_IRQ, floppy_interrupt, IRQF_DISABLED, "floppy", NULL); } static unsigned long dma_mem_alloc(unsigned long size) { return __get_dma_pages(GFP_KERNEL,get_order(size)); } static unsigned long vdma_mem_alloc(unsigned long size) { return (unsigned long) vmalloc(size); } #define nodma_mem_alloc(size) vdma_mem_alloc(size) static void _fd_dma_mem_free(unsigned long addr, unsigned long size) { if((unsigned int) addr >= (unsigned int) high_memory) vfree((void *)addr); else free_pages(addr, get_order(size)); } #define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size) static void _fd_chose_dma_mode(char *addr, unsigned long size) { if(can_use_virtual_dma == 2) { if((unsigned int) addr >= (unsigned int) high_memory || isa_virt_to_bus(addr) >= 0x1000000 || _CROSS_64KB(addr, size, 0)) use_virtual_dma = 1; else use_virtual_dma = 0; } else { use_virtual_dma = can_use_virtual_dma & 1; } } #define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size) static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) { doing_pdma = 1; virtual_dma_port = io; virtual_dma_mode = (mode == DMA_MODE_WRITE); virtual_dma_addr = addr; virtual_dma_count = size; virtual_dma_residue = 0; return 0; } static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) { #ifdef FLOPPY_SANITY_CHECK if (CROSS_64KB(addr, size)) { printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size); return -1; } #endif /* actual, physical DMA */ doing_pdma = 0; clear_dma_ff(FLOPPY_DMA); set_dma_mode(FLOPPY_DMA,mode); set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr)); set_dma_count(FLOPPY_DMA,size); enable_dma(FLOPPY_DMA); return 0; } static struct fd_routine_l { int (*_request_dma)(unsigned int dmanr, const char * device_id); void (*_free_dma)(unsigned int dmanr); int (*_get_dma_residue)(unsigned int dummy); unsigned long (*_dma_mem_alloc) (unsigned long size); int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); } fd_routine[] = { { request_dma, free_dma, get_dma_residue, dma_mem_alloc, hard_dma_setup }, { vdma_request_dma, vdma_nop, vdma_get_dma_residue, vdma_mem_alloc, vdma_dma_setup } }; static int FDC1 = 0x3f0; static int FDC2 = -1; /* * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock * is needed to prevent corrupted CMOS RAM in case "insmod floppy" * coincides with another rtc CMOS user. Paul G. */ #define FLOPPY0_TYPE ({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ val = (CMOS_READ(0x10) >> 4) & 15; \ spin_unlock_irqrestore(&rtc_lock, flags); \ val; \ }) #define FLOPPY1_TYPE ({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ val = CMOS_READ(0x10) & 15; \ spin_unlock_irqrestore(&rtc_lock, flags); \ val; \ }) #define N_FDC 2 #define N_DRIVE 8 #define FLOPPY_MOTOR_MASK 0xf0 #define AUTO_DMA #define EXTRA_FLOPPY_PARAMS #endif /* __ASM_I386_FLOPPY_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/futex.h000066400000000000000000000060441314037446600241000ustar00rootroot00000000000000#ifndef _ASM_FUTEX_H #define _ASM_FUTEX_H #ifdef __KERNEL__ #include #include #include #include #include #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile ( \ "1: " insn "\n" \ "2: .section .fixup,\"ax\"\n\ 3: mov %3, %1\n\ jmp 2b\n\ .previous\n\ .section __ex_table,\"a\"\n\ .align 8\n\ .long 1b,3b\n\ .previous" \ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ : "i" (-EFAULT), "0" (oparg), "1" (0)) #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile ( \ "1: movl %2, %0\n\ movl %0, %3\n" \ insn "\n" \ "2: " LOCK_PREFIX "cmpxchgl %3, %2\n\ jnz 1b\n\ 3: .section .fixup,\"ax\"\n\ 4: mov %5, %1\n\ jmp 3b\n\ .previous\n\ .section __ex_table,\"a\"\n\ .align 8\n\ .long 1b,4b,2b,4b\n\ .previous" \ : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ "=&r" (tem) \ : "r" (oparg), "i" (-EFAULT), "1" (0)) static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; int oparg = (encoded_op << 8) >> 20; int cmparg = (encoded_op << 20) >> 20; int oldval = 0, ret, tem; if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; inc_preempt_count(); if (op == FUTEX_OP_SET) __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); else { #ifndef CONFIG_X86_BSWAP if (boot_cpu_data.x86 == 3) ret = -ENOSYS; else #endif switch (op) { case FUTEX_OP_ADD: __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, uaddr, oparg); break; case FUTEX_OP_OR: __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ANDN: __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg); break; case FUTEX_OP_XOR: __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg); break; default: ret = -ENOSYS; } } dec_preempt_count(); if (!ret) { switch (cmp) { case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; default: ret = -ENOSYS; } } return ret; } static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; __asm__ __volatile__( "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" "2: .section .fixup, \"ax\" \n" "3: mov %2, %0 \n" " jmp 2b \n" " .previous \n" " .section __ex_table, \"a\" \n" " .align 8 \n" " .long 1b,3b \n" " .previous \n" : "=a" (oldval), "+m" (*uaddr) : "i" (-EFAULT), "r" (newval), "0" (oldval) : "memory" ); return oldval; } #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/genapic.h000066400000000000000000000065231314037446600243550ustar00rootroot00000000000000#ifndef _ASM_GENAPIC_H #define _ASM_GENAPIC_H 1 /* * Generic APIC driver interface. * * An straight forward mapping of the APIC related parts of the * x86 subarchitecture interface to a dynamic object. * * This is used by the "generic" x86 subarchitecture. * * Copyright 2003 Andi Kleen, SuSE Labs. */ struct mpc_config_translation; struct mpc_config_bus; struct mp_config_table; struct mpc_config_processor; struct genapic { char *name; int (*probe)(void); int (*apic_id_registered)(void); cpumask_t (*target_cpus)(void); int int_delivery_mode; int int_dest_mode; int ESR_DISABLE; int apic_destination_logical; unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); unsigned long (*check_apicid_present)(int apicid); int no_balance_irq; int no_ioapic_check; void (*init_apic_ldr)(void); physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); void (*clustered_apic_check)(void); int (*multi_timer_check)(int apic, int irq); int (*apicid_to_node)(int logical_apicid); int (*cpu_to_logical_apicid)(int cpu); int (*cpu_present_to_apicid)(int mps_cpu); physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); int (*mpc_apic_id)(struct mpc_config_processor *m, struct mpc_config_translation *t); void (*setup_portio_remap)(void); int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); void (*enable_apic_mode)(void); u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); /* mpparse */ void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *, struct mpc_config_translation *); void (*mpc_oem_pci_bus)(struct mpc_config_bus *, struct mpc_config_translation *); /* When one of the next two hooks returns 1 the genapic is switched to this. Essentially they are additional probe functions. */ int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, char *productid); int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); unsigned (*get_apic_id)(unsigned long x); unsigned long apic_id_mask; unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); /* ipi */ void (*send_IPI_mask)(cpumask_t mask, int vector); void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); }; #define APICFUNC(x) .x = x #define APIC_INIT(aname, aprobe) { \ .name = aname, \ .probe = aprobe, \ .int_delivery_mode = INT_DELIVERY_MODE, \ .int_dest_mode = INT_DEST_MODE, \ .no_balance_irq = NO_BALANCE_IRQ, \ .ESR_DISABLE = esr_disable, \ .apic_destination_logical = APIC_DEST_LOGICAL, \ APICFUNC(apic_id_registered), \ APICFUNC(target_cpus), \ APICFUNC(check_apicid_used), \ APICFUNC(check_apicid_present), \ APICFUNC(init_apic_ldr), \ APICFUNC(ioapic_phys_id_map), \ APICFUNC(clustered_apic_check), \ APICFUNC(multi_timer_check), \ APICFUNC(apicid_to_node), \ APICFUNC(cpu_to_logical_apicid), \ APICFUNC(cpu_present_to_apicid), \ APICFUNC(apicid_to_cpu_present), \ APICFUNC(mpc_apic_id), \ APICFUNC(setup_portio_remap), \ APICFUNC(check_phys_apicid_present), \ APICFUNC(mpc_oem_bus_info), \ APICFUNC(mpc_oem_pci_bus), \ APICFUNC(mps_oem_check), \ APICFUNC(get_apic_id), \ .apic_id_mask = APIC_ID_MASK, \ APICFUNC(cpu_mask_to_apicid), \ APICFUNC(acpi_madt_oem_check), \ APICFUNC(send_IPI_mask), \ APICFUNC(send_IPI_allbutself), \ APICFUNC(send_IPI_all), \ APICFUNC(enable_apic_mode), \ APICFUNC(phys_pkg_id), \ } extern struct genapic *genapic; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/hardirq.h000066400000000000000000000011071314037446600243720ustar00rootroot00000000000000#ifndef __ASM_HARDIRQ_H #define __ASM_HARDIRQ_H #include #include typedef struct { unsigned int __softirq_pending; unsigned long idle_timestamp; unsigned int __nmi_count; /* arch dependent */ unsigned int apic_timer_irqs; /* arch dependent */ } ____cacheline_aligned irq_cpustat_t; DECLARE_PER_CPU(irq_cpustat_t, irq_stat); extern irq_cpustat_t irq_stat[]; #define __ARCH_IRQ_STAT #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) void ack_bad_irq(unsigned int irq); #include #endif /* __ASM_HARDIRQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/highmem.h000066400000000000000000000040271314037446600243620ustar00rootroot00000000000000/* * highmem.h: virtual kernel memory mappings for high memory * * Used in CONFIG_HIGHMEM systems for memory pages which * are not addressable by direct kernel virtual addresses. * * Copyright (C) 1999 Gerhard Wichert, Siemens AG * Gerhard.Wichert@pdb.siemens.de * * * Redesigned the x86 32-bit VM architecture to deal with * up to 16 Terabyte physical memory. With current x86 CPUs * we now support up to 64 Gigabytes physical RAM. * * Copyright (C) 1999 Ingo Molnar */ #ifndef _ASM_HIGHMEM_H #define _ASM_HIGHMEM_H #ifdef __KERNEL__ #include #include #include #include /* declarations for highmem.c */ extern unsigned long highstart_pfn, highend_pfn; extern pte_t *kmap_pte; extern pgprot_t kmap_prot; extern pte_t *pkmap_page_table; /* * Right now we initialize only a single pte table. It can be extended * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. */ #ifdef CONFIG_X86_PAE #define LAST_PKMAP 512 #else #define LAST_PKMAP 1024 #endif /* * Ordering is: * * FIXADDR_TOP * fixed_addresses * FIXADDR_START * temp fixed addresses * FIXADDR_BOOT_START * Persistent kmap area * PKMAP_BASE * VMALLOC_END * Vmalloc area * VMALLOC_START * high_memory */ #define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK ) #define LAST_PKMAP_MASK (LAST_PKMAP-1) #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) extern void * FASTCALL(kmap_high(struct page *page)); extern void FASTCALL(kunmap_high(struct page *page)); void *kmap(struct page *page); void kunmap(struct page *page); void *kmap_atomic(struct page *page, enum km_type type); void kunmap_atomic(void *kvaddr, enum km_type type); void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); struct page *kmap_atomic_to_page(void *ptr); #define flush_cache_kmaps() do { } while (0) #endif /* __KERNEL__ */ #endif /* _ASM_HIGHMEM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/hpet.h000066400000000000000000000063211314037446600237030ustar00rootroot00000000000000 #ifndef _I386_HPET_H #define _I386_HPET_H #ifdef CONFIG_HPET_TIMER #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Documentation on HPET can be found at: * http://www.intel.com/ial/home/sp/pcmmspec.htm * ftp://download.intel.com/ial/home/sp/mmts098.pdf */ #define HPET_MMAP_SIZE 1024 #define HPET_ID 0x000 #define HPET_PERIOD 0x004 #define HPET_CFG 0x010 #define HPET_STATUS 0x020 #define HPET_COUNTER 0x0f0 #define HPET_T0_CFG 0x100 #define HPET_T0_CMP 0x108 #define HPET_T0_ROUTE 0x110 #define HPET_T1_CFG 0x120 #define HPET_T1_CMP 0x128 #define HPET_T1_ROUTE 0x130 #define HPET_T2_CFG 0x140 #define HPET_T2_CMP 0x148 #define HPET_T2_ROUTE 0x150 #define HPET_ID_LEGSUP 0x00008000 #define HPET_ID_NUMBER 0x00001f00 #define HPET_ID_REV 0x000000ff #define HPET_ID_NUMBER_SHIFT 8 #define HPET_CFG_ENABLE 0x001 #define HPET_CFG_LEGACY 0x002 #define HPET_LEGACY_8254 2 #define HPET_LEGACY_RTC 8 #define HPET_TN_ENABLE 0x004 #define HPET_TN_PERIODIC 0x008 #define HPET_TN_PERIODIC_CAP 0x010 #define HPET_TN_SETVAL 0x040 #define HPET_TN_32BIT 0x100 /* Use our own asm for 64 bit multiply/divide */ #define ASM_MUL64_REG(eax_out,edx_out,reg_in,eax_in) \ __asm__ __volatile__("mull %2" \ :"=a" (eax_out), "=d" (edx_out) \ :"r" (reg_in), "0" (eax_in)) #define ASM_DIV64_REG(eax_out,edx_out,reg_in,eax_in,edx_in) \ __asm__ __volatile__("divl %2" \ :"=a" (eax_out), "=d" (edx_out) \ :"r" (reg_in), "0" (eax_in), "1" (edx_in)) #define KERNEL_TICK_USEC (1000000UL/HZ) /* tick value in microsec */ /* Max HPET Period is 10^8 femto sec as in HPET spec */ #define HPET_MAX_PERIOD (100000000UL) /* * Min HPET period is 10^5 femto sec just for safety. If it is less than this, * then 32 bit HPET counter wrapsaround in less than 0.5 sec. */ #define HPET_MIN_PERIOD (100000UL) #define HPET_TICK_RATE (HZ * 100000UL) extern unsigned long hpet_tick; /* hpet clks count per tick */ extern unsigned long hpet_address; /* hpet memory map physical address */ extern int hpet_use_timer; extern int hpet_rtc_timer_init(void); extern int hpet_enable(void); extern int hpet_reenable(void); extern int is_hpet_enabled(void); extern int is_hpet_capable(void); extern int hpet_readl(unsigned long a); #ifdef CONFIG_HPET_EMULATE_RTC extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec); extern int hpet_set_periodic_freq(unsigned long freq); extern int hpet_rtc_dropped_irq(void); extern int hpet_rtc_timer_init(void); extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs); #endif /* CONFIG_HPET_EMULATE_RTC */ #endif /* CONFIG_HPET_TIMER */ #endif /* _I386_HPET_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/hw_irq.h000066400000000000000000000034531314037446600242370ustar00rootroot00000000000000#ifndef _ASM_HW_IRQ_H #define _ASM_HW_IRQ_H /* * linux/include/asm/hw_irq.h * * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar * * moved some of the old arch/i386/kernel/irq.h to here. VY * * IRQ/IPI changes taken from work by Thomas Radke * */ #include #include #include #include struct hw_interrupt_type; #define NMI_VECTOR 0x02 /* * Various low-level irq details needed by irq.c, process.c, * time.c, io_apic.c and smp.c * * Interrupt entry/exit code at both C and assembly level */ extern u8 irq_vector[NR_IRQ_VECTORS]; #define IO_APIC_VECTOR(irq) (irq_vector[irq]) #define AUTO_ASSIGN -1 extern void (*interrupt[NR_IRQS])(void); #ifdef CONFIG_SMP fastcall void reschedule_interrupt(void); fastcall void invalidate_interrupt(void); fastcall void call_function_interrupt(void); #endif #ifdef CONFIG_X86_LOCAL_APIC fastcall void apic_timer_interrupt(void); fastcall void error_interrupt(void); fastcall void spurious_interrupt(void); fastcall void thermal_interrupt(struct pt_regs *); #define platform_legacy_irq(irq) ((irq) < 16) #endif void disable_8259A_irq(unsigned int irq); void enable_8259A_irq(unsigned int irq); int i8259A_irq_pending(unsigned int irq); void make_8259A_irq(unsigned int irq); void init_8259A(int aeoi); void FASTCALL(send_IPI_self(int vector)); void init_VISWS_APIC_irqs(void); void setup_IO_APIC(void); void disable_IO_APIC(void); void print_IO_APIC(void); int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); void send_IPI(int dest, int vector); void setup_ioapic_dest(void); extern unsigned long io_apic_irqs; extern atomic_t irq_err_count; extern atomic_t irq_mis_count; #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) #endif /* _ASM_HW_IRQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/i387.h000066400000000000000000000072761314037446600234470ustar00rootroot00000000000000/* * include/asm-i386/i387.h * * Copyright (C) 1994 Linus Torvalds * * Pentium III FXSR, SSE support * General FPU state handling cleanups * Gareth Hughes , May 2000 */ #ifndef __ASM_I386_I387_H #define __ASM_I386_I387_H #include #include #include #include #include #include extern void mxcsr_feature_mask_init(void); extern void init_fpu(struct task_struct *); /* * FPU lazy state save handling... */ /* * The "nop" is needed to make the instructions the same * length. */ #define restore_fpu(tsk) \ alternative_input( \ "nop ; frstor %1", \ "fxrstor %1", \ X86_FEATURE_FXSR, \ "m" ((tsk)->thread.i387.fxsave)) extern void kernel_fpu_begin(void); #define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) /* We need a safe address that is cheap to find and that is already in L1 during context switch. The best choices are unfortunately different for UP and SMP */ #ifdef CONFIG_SMP #define safe_address (__per_cpu_offset[0]) #else #define safe_address (kstat_cpu(0).cpustat.user) #endif /* * These must be called with preempt disabled */ static inline void __save_init_fpu( struct task_struct *tsk ) { /* Use more nops than strictly needed in case the compiler varies code */ alternative_input( "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, "fxsave %[fx]\n" "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", X86_FEATURE_FXSR, [fx] "m" (tsk->thread.i387.fxsave), [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory"); /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is pending. Clear the x87 state here by setting it to fixed values. safe_address is a random variable that should be in L1 */ alternative_input( GENERIC_NOP8 GENERIC_NOP2, "emms\n\t" /* clear stack tags */ "fildl %[addr]", /* set F?P to defined value */ X86_FEATURE_FXSAVE_LEAK, [addr] "m" (safe_address)); task_thread_info(tsk)->status &= ~TS_USEDFPU; } #define __unlazy_fpu( tsk ) do { \ if (task_thread_info(tsk)->status & TS_USEDFPU) \ save_init_fpu( tsk ); \ } while (0) #define __clear_fpu( tsk ) \ do { \ if (task_thread_info(tsk)->status & TS_USEDFPU) { \ asm volatile("fnclex ; fwait"); \ task_thread_info(tsk)->status &= ~TS_USEDFPU; \ stts(); \ } \ } while (0) /* * These disable preemption on their own and are safe */ static inline void save_init_fpu( struct task_struct *tsk ) { preempt_disable(); __save_init_fpu(tsk); stts(); preempt_enable(); } #define unlazy_fpu( tsk ) do { \ preempt_disable(); \ __unlazy_fpu(tsk); \ preempt_enable(); \ } while (0) #define clear_fpu( tsk ) do { \ preempt_disable(); \ __clear_fpu( tsk ); \ preempt_enable(); \ } while (0) \ /* * FPU state interaction... */ extern unsigned short get_fpu_cwd( struct task_struct *tsk ); extern unsigned short get_fpu_swd( struct task_struct *tsk ); extern unsigned short get_fpu_mxcsr( struct task_struct *tsk ); /* * Signal frame handlers... */ extern int save_i387( struct _fpstate __user *buf ); extern int restore_i387( struct _fpstate __user *buf ); /* * ptrace request handers... */ extern int get_fpregs( struct user_i387_struct __user *buf, struct task_struct *tsk ); extern int set_fpregs( struct task_struct *tsk, struct user_i387_struct __user *buf ); extern int get_fpxregs( struct user_fxsr_struct __user *buf, struct task_struct *tsk ); extern int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct __user *buf ); /* * FPU state for core dumps... */ extern int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu ); #endif /* __ASM_I386_I387_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/i8253.h000066400000000000000000000001551314037446600235140ustar00rootroot00000000000000#ifndef __ASM_I8253_H__ #define __ASM_I8253_H__ extern spinlock_t i8253_lock; #endif /* __ASM_I8253_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/i8259.h000066400000000000000000000007711314037446600235260ustar00rootroot00000000000000#ifndef __ASM_I8259_H__ #define __ASM_I8259_H__ extern unsigned int cached_irq_mask; #define __byte(x,y) (((unsigned char *) &(y))[x]) #define cached_master_mask (__byte(0, cached_irq_mask)) #define cached_slave_mask (__byte(1, cached_irq_mask)) extern spinlock_t i8259A_lock; extern void init_8259A(int auto_eoi); extern void enable_8259A_irq(unsigned int irq); extern void disable_8259A_irq(unsigned int irq); extern unsigned int startup_8259A_irq(unsigned int irq); #endif /* __ASM_I8259_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/ide.h000066400000000000000000000030201314037446600234750ustar00rootroot00000000000000/* * linux/include/asm-i386/ide.h * * Copyright (C) 1994-1996 Linus Torvalds & authors */ /* * This file contains the i386 architecture specific IDE code. */ #ifndef __ASMi386_IDE_H #define __ASMi386_IDE_H #ifdef __KERNEL__ #ifndef MAX_HWIFS # ifdef CONFIG_BLK_DEV_IDEPCI #define MAX_HWIFS 10 # else #define MAX_HWIFS 6 # endif #endif #define IDE_ARCH_OBSOLETE_DEFAULTS static __inline__ int ide_default_irq(unsigned long base) { switch (base) { case 0x1f0: return 14; case 0x170: return 15; case 0x1e8: return 11; case 0x168: return 10; case 0x1e0: return 8; case 0x160: return 12; default: return 0; } } static __inline__ unsigned long ide_default_io_base(int index) { /* * If PCI is present then it is not safe to poke around * the other legacy IDE ports. Only 0x1f0 and 0x170 are * defined compatibility mode ports for PCI. A user can * override this using ide= but we must default safe. */ if (pci_find_device(PCI_ANY_ID, PCI_ANY_ID, NULL) == NULL) { switch(index) { case 2: return 0x1e8; case 3: return 0x168; case 4: return 0x1e0; case 5: return 0x160; } } switch (index) { case 0: return 0x1f0; case 1: return 0x170; default: return 0; } } #define IDE_ARCH_OBSOLETE_INIT #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ #ifdef CONFIG_BLK_DEV_IDEPCI #define ide_init_default_irq(base) (0) #else #define ide_init_default_irq(base) ide_default_irq(base) #endif #include #endif /* __KERNEL__ */ #endif /* __ASMi386_IDE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/intel_arch_perfmon.h000066400000000000000000000012231314037446600265750ustar00rootroot00000000000000#ifndef X86_INTEL_ARCH_PERFMON_H #define X86_INTEL_ARCH_PERFMON_H 1 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 #define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) #define ARCH_PERFMON_EVENTSEL_USR (1 << 16) #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) #endif /* X86_INTEL_ARCH_PERFMON_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/io.h000066400000000000000000000251241314037446600233540ustar00rootroot00000000000000#ifndef _ASM_IO_H #define _ASM_IO_H #include #include /* * This file contains the definitions for the x86 IO instructions * inb/inw/inl/outb/outw/outl and the "string versions" of the same * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" * versions of the single-IO instructions (inb_p/inw_p/..). * * This file is not meant to be obfuscating: it's just complicated * to (a) handle it all in a way that makes gcc able to optimize it * as well as possible and (b) trying to avoid writing the same thing * over and over again with slight variations and possibly making a * mistake somewhere. */ /* * Thanks to James van Artsdalen for a better timing-fix than * the two short jumps: using outb's to a nonexistent port seems * to guarantee better timings even on fast machines. * * On the other hand, I'd like to be sure of a non-existent port: * I feel a bit unsafe about using 0x80 (should be safe, though) * * Linus */ /* * Bit simplified and optimized by Jan Hubicka * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. * * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, * isa_read[wl] and isa_write[wl] fixed * - Arnaldo Carvalho de Melo */ #define IO_SPACE_LIMIT 0xffff #define XQUAD_PORTIO_BASE 0xfe400000 #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ #ifdef __KERNEL__ #include #include /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access */ #define xlate_dev_mem_ptr(p) __va(p) /* * Convert a virtual cached pointer to an uncached pointer */ #define xlate_dev_kmem_ptr(p) p /** * virt_to_phys - map virtual addresses to physical * @address: address to remap * * The returned physical address is the physical (CPU) mapping for * the memory address given. It is only valid to use this function on * addresses directly mapped or allocated via kmalloc. * * This function does not give bus mappings for DMA transfers. In * almost all conceivable cases a device driver should not be using * this function */ static inline unsigned long virt_to_phys(volatile void * address) { return __pa(address); } /** * phys_to_virt - map physical address to virtual * @address: address to remap * * The returned virtual address is a current CPU mapping for * the memory address given. It is only valid to use this function on * addresses that have a kernel mapping * * This function does not handle bus mappings for DMA transfers. In * almost all conceivable cases a device driver should not be using * this function */ static inline void * phys_to_virt(unsigned long address) { return __va(address); } /* * Change "struct page" to physical address. */ #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); /** * ioremap - map bus memory into CPU space * @offset: bus address of the memory * @size: size of the resource to map * * ioremap performs a platform specific sequence of operations to * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual * address. */ static inline void __iomem * ioremap(unsigned long offset, unsigned long size) { return __ioremap(offset, size, 0); } extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size); extern void iounmap(volatile void __iomem *addr); /* * bt_ioremap() and bt_iounmap() are for temporary early boot-time * mappings, before the real ioremap() is functional. * A boot-time mapping is currently limited to at most 16 pages. */ extern void *bt_ioremap(unsigned long offset, unsigned long size); extern void bt_iounmap(void *addr, unsigned long size); /* Use early IO mappings for DMI because it's initialized early */ #define dmi_ioremap bt_ioremap #define dmi_iounmap bt_iounmap #define dmi_alloc alloc_bootmem /* * ISA I/O bus memory addresses are 1:1 with the physical address. */ #define isa_virt_to_bus virt_to_phys #define isa_page_to_bus page_to_phys #define isa_bus_to_virt phys_to_virt /* * However PCI ones are not necessarily 1:1 and therefore these interfaces * are forbidden in portable PCI drivers. * * Allow them on x86 for legacy drivers, though. */ #define virt_to_bus virt_to_phys #define bus_to_virt phys_to_virt /* * readX/writeX() are used to access memory mapped devices. On some * architectures the memory mapped IO stuff needs to be accessed * differently. On the x86 architecture, we just read/write the * memory location directly. */ static inline unsigned char readb(const volatile void __iomem *addr) { return *(volatile unsigned char __force *) addr; } static inline unsigned short readw(const volatile void __iomem *addr) { return *(volatile unsigned short __force *) addr; } static inline unsigned int readl(const volatile void __iomem *addr) { return *(volatile unsigned int __force *) addr; } #define readb_relaxed(addr) readb(addr) #define readw_relaxed(addr) readw(addr) #define readl_relaxed(addr) readl(addr) #define __raw_readb readb #define __raw_readw readw #define __raw_readl readl static inline void writeb(unsigned char b, volatile void __iomem *addr) { *(volatile unsigned char __force *) addr = b; } static inline void writew(unsigned short b, volatile void __iomem *addr) { *(volatile unsigned short __force *) addr = b; } static inline void writel(unsigned int b, volatile void __iomem *addr) { *(volatile unsigned int __force *) addr = b; } #define __raw_writeb writeb #define __raw_writew writew #define __raw_writel writel #define mmiowb() static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) { memset((void __force *) addr, val, count); } static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) { __memcpy(dst, (void __force *) src, count); } static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) { __memcpy((void __force *) dst, src, count); } /* * ISA space is 'always mapped' on a typical x86 system, no need to * explicitly ioremap() it. The fact that the ISA IO space is mapped * to PAGE_OFFSET is pure coincidence - it does not mean ISA values * are physical addresses. The following constant pointer can be * used as the IO-area pointer (it can be iounmapped as well, so the * analogy with PCI is quite large): */ #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) /* * Again, i386 does not require mem IO specific function. */ #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d)) /** * check_signature - find BIOS signatures * @io_addr: mmio address to check * @signature: signature block * @length: length of signature * * Perform a signature comparison with the mmio address io_addr. This * address should have been obtained by ioremap. * Returns 1 on a match. */ static inline int check_signature(volatile void __iomem * io_addr, const unsigned char *signature, int length) { int retval = 0; do { if (readb(io_addr) != *signature) goto out; io_addr++; signature++; length--; } while (length); retval = 1; out: return retval; } /* * Cache management * * This needed for two cases * 1. Out of order aware processors * 2. Accidentally out of order processors (PPro errata #51) */ #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) static inline void flush_write_buffers(void) { __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory"); } #define dma_cache_inv(_start,_size) flush_write_buffers() #define dma_cache_wback(_start,_size) flush_write_buffers() #define dma_cache_wback_inv(_start,_size) flush_write_buffers() #else /* Nothing to do */ #define dma_cache_inv(_start,_size) do { } while (0) #define dma_cache_wback(_start,_size) do { } while (0) #define dma_cache_wback_inv(_start,_size) do { } while (0) #define flush_write_buffers() #endif #endif /* __KERNEL__ */ #ifdef SLOW_IO_BY_JUMPING #define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:" #else #define __SLOW_DOWN_IO "outb %%al,$0x80;" #endif static inline void slow_down_io(void) { __asm__ __volatile__( __SLOW_DOWN_IO #ifdef REALLY_SLOW_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO #endif : : ); } #ifdef CONFIG_X86_NUMAQ extern void *xquad_portio; /* Where the IO area was mapped */ #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) #define __BUILDIO(bwl,bw,type) \ static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \ if (xquad_portio) \ write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \ else \ out##bwl##_local(value, port); \ } \ static inline void out##bwl(unsigned type value, int port) { \ out##bwl##_quad(value, port, 0); \ } \ static inline unsigned type in##bwl##_quad(int port, int quad) { \ if (xquad_portio) \ return read##bwl(XQUAD_PORT_ADDR(port, quad)); \ else \ return in##bwl##_local(port); \ } \ static inline unsigned type in##bwl(int port) { \ return in##bwl##_quad(port, 0); \ } #else #define __BUILDIO(bwl,bw,type) \ static inline void out##bwl(unsigned type value, int port) { \ out##bwl##_local(value, port); \ } \ static inline unsigned type in##bwl(int port) { \ return in##bwl##_local(port); \ } #endif #define BUILDIO(bwl,bw,type) \ static inline void out##bwl##_local(unsigned type value, int port) { \ __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \ } \ static inline unsigned type in##bwl##_local(int port) { \ unsigned type value; \ __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \ return value; \ } \ static inline void out##bwl##_local_p(unsigned type value, int port) { \ out##bwl##_local(value, port); \ slow_down_io(); \ } \ static inline unsigned type in##bwl##_local_p(int port) { \ unsigned type value = in##bwl##_local(port); \ slow_down_io(); \ return value; \ } \ __BUILDIO(bwl,bw,type) \ static inline void out##bwl##_p(unsigned type value, int port) { \ out##bwl(value, port); \ slow_down_io(); \ } \ static inline unsigned type in##bwl##_p(int port) { \ unsigned type value = in##bwl(port); \ slow_down_io(); \ return value; \ } \ static inline void outs##bwl(int port, const void *addr, unsigned long count) { \ __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \ } \ static inline void ins##bwl(int port, void *addr, unsigned long count) { \ __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \ } BUILDIO(b,b,char) BUILDIO(w,w,short) BUILDIO(l,,int) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/io_apic.h000066400000000000000000000130761314037446600243530ustar00rootroot00000000000000#ifndef __ASM_IO_APIC_H #define __ASM_IO_APIC_H #include #include /* * Intel IO-APIC support for SMP and UP systems. * * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar */ #ifdef CONFIG_X86_IO_APIC #if defined(CONFIG_PCI_MSI) && !defined(CONFIG_XEN) static inline int use_pci_vector(void) {return 1;} static inline void disable_edge_ioapic_vector(unsigned int vector) { } static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { } static inline void end_edge_ioapic_vector (unsigned int vector) { } #define startup_level_ioapic startup_level_ioapic_vector #define shutdown_level_ioapic mask_IO_APIC_vector #define enable_level_ioapic unmask_IO_APIC_vector #define disable_level_ioapic mask_IO_APIC_vector #define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_vector #define end_level_ioapic end_level_ioapic_vector #define set_ioapic_affinity set_ioapic_affinity_vector #define startup_edge_ioapic startup_edge_ioapic_vector #define shutdown_edge_ioapic disable_edge_ioapic_vector #define enable_edge_ioapic unmask_IO_APIC_vector #define disable_edge_ioapic disable_edge_ioapic_vector #define ack_edge_ioapic ack_edge_ioapic_vector #define end_edge_ioapic end_edge_ioapic_vector #else static inline int use_pci_vector(void) {return 0;} static inline void disable_edge_ioapic_irq(unsigned int irq) { } static inline void mask_and_ack_level_ioapic_irq(unsigned int irq) { } static inline void end_edge_ioapic_irq (unsigned int irq) { } #define startup_level_ioapic startup_level_ioapic_irq #define shutdown_level_ioapic mask_IO_APIC_irq #define enable_level_ioapic unmask_IO_APIC_irq #define disable_level_ioapic mask_IO_APIC_irq #define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_irq #define end_level_ioapic end_level_ioapic_irq #define set_ioapic_affinity set_ioapic_affinity_irq #define startup_edge_ioapic startup_edge_ioapic_irq #define shutdown_edge_ioapic disable_edge_ioapic_irq #define enable_edge_ioapic unmask_IO_APIC_irq #define disable_edge_ioapic disable_edge_ioapic_irq #define ack_edge_ioapic ack_edge_ioapic_irq #define end_edge_ioapic end_edge_ioapic_irq #endif #define IO_APIC_BASE(idx) \ ((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \ + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK))) /* * The structure of the IO-APIC: */ union IO_APIC_reg_00 { u32 raw; struct { u32 __reserved_2 : 14, LTS : 1, delivery_type : 1, __reserved_1 : 8, ID : 8; } __attribute__ ((packed)) bits; }; union IO_APIC_reg_01 { u32 raw; struct { u32 version : 8, __reserved_2 : 7, PRQ : 1, entries : 8, __reserved_1 : 8; } __attribute__ ((packed)) bits; }; union IO_APIC_reg_02 { u32 raw; struct { u32 __reserved_2 : 24, arbitration : 4, __reserved_1 : 4; } __attribute__ ((packed)) bits; }; union IO_APIC_reg_03 { u32 raw; struct { u32 boot_DT : 1, __reserved_1 : 31; } __attribute__ ((packed)) bits; }; /* * # of IO-APICs and # of IRQ routing registers */ extern int nr_ioapics; extern int nr_ioapic_registers[MAX_IO_APICS]; enum ioapic_irq_destination_types { dest_Fixed = 0, dest_LowestPrio = 1, dest_SMI = 2, dest__reserved_1 = 3, dest_NMI = 4, dest_INIT = 5, dest__reserved_2 = 6, dest_ExtINT = 7 }; struct IO_APIC_route_entry { __u32 vector : 8, delivery_mode : 3, /* 000: FIXED * 001: lowest prio * 111: ExtINT */ dest_mode : 1, /* 0: physical, 1: logical */ delivery_status : 1, polarity : 1, irr : 1, trigger : 1, /* 0: edge, 1: level */ mask : 1, /* 0: enabled, 1: disabled */ __reserved_2 : 15; union { struct { __u32 __reserved_1 : 24, physical_dest : 4, __reserved_2 : 4; } physical; struct { __u32 __reserved_1 : 24, logical_dest : 8; } logical; } dest; } __attribute__ ((packed)); /* * MP-BIOS irq configuration table structures: */ /* I/O APIC entries */ extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; /* # of MP IRQ source entries */ extern int mp_irq_entries; /* MP IRQ source entries */ extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; /* non-0 if default (table-less) MP configuration */ extern int mpc_default_type; static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) { *IO_APIC_BASE(apic) = reg; return *(IO_APIC_BASE(apic)+4); } static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) { *IO_APIC_BASE(apic) = reg; *(IO_APIC_BASE(apic)+4) = value; } /* * Re-write a value: to be used for read-modify-write * cycles where the read already set up the index register. * * Older SiS APIC requires we rewrite the index regiser */ extern int sis_apic_bug; static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) { if (sis_apic_bug) *IO_APIC_BASE(apic) = reg; *(IO_APIC_BASE(apic)+4) = value; } /* 1 if "noapic" boot option passed */ extern int skip_ioapic_setup; /* * If we use the IO-APIC for IRQ routing, disable automatic * assignment of PCI IRQ's. */ #define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) #ifdef CONFIG_ACPI extern int io_apic_get_unique_id (int ioapic, int apic_id); extern int io_apic_get_version (int ioapic); extern int io_apic_get_redir_entries (int ioapic); extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low); extern int timer_uses_ioapic_pin_0; #endif /* CONFIG_ACPI */ extern int (*ioapic_renumber_irq)(int ioapic, int irq); #else /* !CONFIG_X86_IO_APIC */ #define io_apic_assign_pci_irqs 0 #endif extern int assign_irq_vector(int irq); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/ioctl.h000066400000000000000000000000371314037446600240530ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/ioctls.h000066400000000000000000000050421314037446600242370ustar00rootroot00000000000000#ifndef __ARCH_I386_IOCTLS_H__ #define __ARCH_I386_IOCTLS_H__ #include /* 0x54 is just a magic number to make these relatively unique ('T') */ #define TCGETS 0x5401 #define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */ #define TCSETSW 0x5403 #define TCSETSF 0x5404 #define TCGETA 0x5405 #define TCSETA 0x5406 #define TCSETAW 0x5407 #define TCSETAF 0x5408 #define TCSBRK 0x5409 #define TCXONC 0x540A #define TCFLSH 0x540B #define TIOCEXCL 0x540C #define TIOCNXCL 0x540D #define TIOCSCTTY 0x540E #define TIOCGPGRP 0x540F #define TIOCSPGRP 0x5410 #define TIOCOUTQ 0x5411 #define TIOCSTI 0x5412 #define TIOCGWINSZ 0x5413 #define TIOCSWINSZ 0x5414 #define TIOCMGET 0x5415 #define TIOCMBIS 0x5416 #define TIOCMBIC 0x5417 #define TIOCMSET 0x5418 #define TIOCGSOFTCAR 0x5419 #define TIOCSSOFTCAR 0x541A #define FIONREAD 0x541B #define TIOCINQ FIONREAD #define TIOCLINUX 0x541C #define TIOCCONS 0x541D #define TIOCGSERIAL 0x541E #define TIOCSSERIAL 0x541F #define TIOCPKT 0x5420 #define FIONBIO 0x5421 #define TIOCNOTTY 0x5422 #define TIOCSETD 0x5423 #define TIOCGETD 0x5424 #define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ /* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */ #define TIOCSBRK 0x5427 /* BSD compatibility */ #define TIOCCBRK 0x5428 /* BSD compatibility */ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ #define FIONCLEX 0x5450 #define FIOCLEX 0x5451 #define FIOASYNC 0x5452 #define TIOCSERCONFIG 0x5453 #define TIOCSERGWILD 0x5454 #define TIOCSERSWILD 0x5455 #define TIOCGLCKTRMIOS 0x5456 #define TIOCSLCKTRMIOS 0x5457 #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ #define TIOCSERGETLSR 0x5459 /* Get line status register */ #define TIOCSERGETMULTI 0x545A /* Get multiport config */ #define TIOCSERSETMULTI 0x545B /* Set multiport config */ #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ #define FIOQSIZE 0x5460 /* Used for packet mode */ #define TIOCPKT_DATA 0 #define TIOCPKT_FLUSHREAD 1 #define TIOCPKT_FLUSHWRITE 2 #define TIOCPKT_STOP 4 #define TIOCPKT_START 8 #define TIOCPKT_NOSTOP 16 #define TIOCPKT_DOSTOP 32 #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/ipc.h000066400000000000000000000000351314037446600235120ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/ipcbuf.h000066400000000000000000000011641314037446600242130ustar00rootroot00000000000000#ifndef __i386_IPCBUF_H__ #define __i386_IPCBUF_H__ /* * The ipc64_perm structure for i386 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * * Pad space is left for: * - 32-bit mode_t and seq * - 2 miscellaneous 32-bit values */ struct ipc64_perm { __kernel_key_t key; __kernel_uid32_t uid; __kernel_gid32_t gid; __kernel_uid32_t cuid; __kernel_gid32_t cgid; __kernel_mode_t mode; unsigned short __pad1; unsigned short seq; unsigned short __pad2; unsigned long __unused1; unsigned long __unused2; }; #endif /* __i386_IPCBUF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/irq.h000066400000000000000000000016571314037446600235450ustar00rootroot00000000000000#ifndef _ASM_IRQ_H #define _ASM_IRQ_H /* * linux/include/asm/irq.h * * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar * * IRQ/IPI changes taken from work by Thomas Radke * */ #include /* include comes from machine specific directory */ #include "irq_vectors.h" #include static __inline__ int irq_canonicalize(int irq) { return ((irq == 2) ? 9 : irq); } #ifdef CONFIG_X86_LOCAL_APIC # define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ #endif #ifdef CONFIG_4KSTACKS extern void irq_ctx_init(int cpu); extern void irq_ctx_exit(int cpu); # define __ARCH_HAS_DO_SOFTIRQ #else # define irq_ctx_init(cpu) do { } while (0) # define irq_ctx_exit(cpu) do { } while (0) #endif #ifdef CONFIG_IRQBALANCE extern int irqbalance_disable(char *str); #endif #ifdef CONFIG_HOTPLUG_CPU extern void fixup_irqs(cpumask_t map); #endif #endif /* _ASM_IRQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/irqflags.h000066400000000000000000000045061314037446600245560ustar00rootroot00000000000000/* * include/asm-i386/irqflags.h * * IRQ flags handling * * This file gets included from lowlevel asm headers too, to provide * wrapped versions of the local_irq_*() APIs, based on the * raw_local_irq_*() functions from the lowlevel headers. */ #ifndef _ASM_IRQFLAGS_H #define _ASM_IRQFLAGS_H #ifndef __ASSEMBLY__ static inline unsigned long __raw_local_save_flags(void) { unsigned long flags; __asm__ __volatile__( "pushfl ; popl %0" : "=g" (flags) : /* no input */ ); return flags; } #define raw_local_save_flags(flags) \ do { (flags) = __raw_local_save_flags(); } while (0) static inline void raw_local_irq_restore(unsigned long flags) { __asm__ __volatile__( "pushl %0 ; popfl" : /* no output */ :"g" (flags) :"memory", "cc" ); } static inline void raw_local_irq_disable(void) { __asm__ __volatile__("cli" : : : "memory"); } static inline void raw_local_irq_enable(void) { __asm__ __volatile__("sti" : : : "memory"); } /* * Used in the idle loop; sti takes one instruction cycle * to complete: */ static inline void raw_safe_halt(void) { __asm__ __volatile__("sti; hlt" : : : "memory"); } /* * Used when interrupts are already enabled or to * shutdown the processor: */ static inline void halt(void) { __asm__ __volatile__("hlt": : :"memory"); } static inline int raw_irqs_disabled_flags(unsigned long flags) { return !(flags & (1 << 9)); } static inline int raw_irqs_disabled(void) { unsigned long flags = __raw_local_save_flags(); return raw_irqs_disabled_flags(flags); } /* * For spinlocks, etc: */ static inline unsigned long __raw_local_irq_save(void) { unsigned long flags = __raw_local_save_flags(); raw_local_irq_disable(); return flags; } #define raw_local_irq_save(flags) \ do { (flags) = __raw_local_irq_save(); } while (0) #endif /* __ASSEMBLY__ */ /* * Do the CPU's IRQ-state tracing from assembly code. We call a * C function, so save all the C-clobbered registers: */ #ifdef CONFIG_TRACE_IRQFLAGS # define TRACE_IRQS_ON \ pushl %eax; \ pushl %ecx; \ pushl %edx; \ call trace_hardirqs_on; \ popl %edx; \ popl %ecx; \ popl %eax; # define TRACE_IRQS_OFF \ pushl %eax; \ pushl %ecx; \ pushl %edx; \ call trace_hardirqs_off; \ popl %edx; \ popl %ecx; \ popl %eax; #else # define TRACE_IRQS_ON # define TRACE_IRQS_OFF #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/ist.h000066400000000000000000000015351314037446600235440ustar00rootroot00000000000000#ifndef _ASM_IST_H #define _ASM_IST_H /* * Include file for the interface to IST BIOS * Copyright 2002 Andy Grover * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #ifdef __KERNEL__ struct ist_info { unsigned long signature; unsigned long command; unsigned long event; unsigned long perf_level; }; extern struct ist_info ist_info; #endif /* __KERNEL__ */ #endif /* _ASM_IST_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/k8.h000066400000000000000000000000331314037446600232570ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/kdebug.h000066400000000000000000000021371314037446600242050ustar00rootroot00000000000000#ifndef _I386_KDEBUG_H #define _I386_KDEBUG_H 1 /* * Aug-05 2004 Ported by Prasanna S Panchamukhi * from x86_64 architecture. */ #include struct pt_regs; struct die_args { struct pt_regs *regs; const char *str; long err; int trapnr; int signr; }; extern int register_die_notifier(struct notifier_block *); extern int unregister_die_notifier(struct notifier_block *); extern int register_page_fault_notifier(struct notifier_block *); extern int unregister_page_fault_notifier(struct notifier_block *); extern struct atomic_notifier_head i386die_chain; /* Grossly misnamed. */ enum die_val { DIE_OOPS = 1, DIE_INT3, DIE_DEBUG, DIE_PANIC, DIE_NMI, DIE_DIE, DIE_NMIWATCHDOG, DIE_KERNELDEBUG, DIE_TRAP, DIE_GPF, DIE_CALL, DIE_NMI_IPI, DIE_PAGE_FAULT, }; static inline int notify_die(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig) { struct die_args args = { .regs = regs, .str = str, .err = err, .trapnr = trap, .signr = sig }; return atomic_notifier_call_chain(&i386die_chain, val, &args); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/kexec.h000066400000000000000000000100241314037446600240350ustar00rootroot00000000000000#ifndef _I386_KEXEC_H #define _I386_KEXEC_H #define PA_CONTROL_PAGE 0 #define VA_CONTROL_PAGE 1 #define PA_PGD 2 #define VA_PGD 3 #define PA_PTE_0 4 #define VA_PTE_0 5 #define PA_PTE_1 6 #define VA_PTE_1 7 #ifdef CONFIG_X86_PAE #define PA_PMD_0 8 #define VA_PMD_0 9 #define PA_PMD_1 10 #define VA_PMD_1 11 #define PAGES_NR 12 #else #define PAGES_NR 8 #endif #ifndef __ASSEMBLY__ #include #include #include /* * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. * I.e. Maximum page that is mapped directly into kernel memory, * and kmap is not required. * * Someone correct me if FIXADDR_START - PAGEOFFSET is not the correct * calculation for the amount of memory directly mappable into the * kernel memory space. */ /* Maximum physical address we can use pages from */ #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) /* Maximum address we can reach in physical address mode */ #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) /* Maximum address we can use for the control code buffer */ #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE #define KEXEC_CONTROL_CODE_SIZE 4096 /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_386 /* We can also handle crash dumps from 64 bit kernel. */ #define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) #define MAX_NOTE_BYTES 1024 /* CPU does not save ss and esp on stack if execution is already * running in kernel mode at the time of NMI occurrence. This code * fixes it. */ static inline void crash_fixup_ss_esp(struct pt_regs *newregs, struct pt_regs *oldregs) { memcpy(newregs, oldregs, sizeof(*newregs)); newregs->esp = (unsigned long)&(oldregs->esp); __asm__ __volatile__( "xorl %%eax, %%eax\n\t" "movw %%ss, %%ax\n\t" :"=a"(newregs->xss)); } /* * This function is responsible for capturing register states if coming * via panic otherwise just fix up the ss and esp if coming via kernel * mode exception. */ static inline void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) { if (oldregs) crash_fixup_ss_esp(newregs, oldregs); else { __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx)); __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx)); __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx)); __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi)); __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi)); __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp)); __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax)); __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp)); __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss)); __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs)); __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds)); __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes)); __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags)); newregs->eip = (unsigned long)current_text_addr(); } } asmlinkage NORET_TYPE void relocate_kernel(unsigned long indirection_page, unsigned long control_page, unsigned long start_address, unsigned int has_pae) ATTRIB_NORET; /* Under Xen we need to work with machine addresses. These macros give the * machine address of a certain page to the generic kexec code instead of * the pseudo physical address which would be given by the default macros. */ #ifdef CONFIG_XEN #define KEXEC_ARCH_HAS_PAGE_MACROS #define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page)) #define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn)) #define kexec_virt_to_phys(addr) virt_to_machine(addr) #define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr)) #endif #endif /* __ASSEMBLY__ */ #endif /* _I386_KEXEC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/kmap_types.h000066400000000000000000000006611314037446600251200ustar00rootroot00000000000000#ifndef _ASM_KMAP_TYPES_H #define _ASM_KMAP_TYPES_H #ifdef CONFIG_DEBUG_HIGHMEM # define D(n) __KM_FENCE_##n , #else # define D(n) #endif enum km_type { D(0) KM_BOUNCE_READ, D(1) KM_SKB_SUNRPC_DATA, D(2) KM_SKB_DATA_SOFTIRQ, D(3) KM_USER0, D(4) KM_USER1, D(5) KM_BIO_SRC_IRQ, D(6) KM_BIO_DST_IRQ, D(7) KM_PTE0, D(8) KM_PTE1, D(9) KM_IRQ0, D(10) KM_IRQ1, D(11) KM_SOFTIRQ0, D(12) KM_SOFTIRQ1, D(13) KM_TYPE_NR }; #undef D #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/kprobes.h000066400000000000000000000054771314037446600244230ustar00rootroot00000000000000#ifndef _ASM_KPROBES_H #define _ASM_KPROBES_H /* * Kernel Probes (KProbes) * include/asm-i386/kprobes.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2002, 2004 * * 2002-Oct Created by Vamsi Krishna S Kernel * Probes initial implementation ( includes suggestions from * Rusty Russell). */ #include #include #define __ARCH_WANT_KPROBES_INSN_SLOT struct kprobe; struct pt_regs; typedef u8 kprobe_opcode_t; #define BREAKPOINT_INSTRUCTION 0xcc #define RELATIVEJUMP_INSTRUCTION 0xe9 #define MAX_INSN_SIZE 16 #define MAX_STACK_SIZE 64 #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \ ? (MAX_STACK_SIZE) \ : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES #define ARCH_INACTIVE_KPROBE_COUNT 0 #define flush_insn_slot(p) do { } while (0) void arch_remove_kprobe(struct kprobe *p); void kretprobe_trampoline(void); /* Architecture specific copy of original instruction*/ struct arch_specific_insn { /* copy of the original instruction */ kprobe_opcode_t *insn; /* * If this flag is not 0, this kprobe can be boost when its * post_handler and break_handler is not set. */ int boostable; }; struct prev_kprobe { struct kprobe *kp; unsigned long status; unsigned long old_eflags; unsigned long saved_eflags; }; /* per-cpu kprobe control block */ struct kprobe_ctlblk { unsigned long kprobe_status; unsigned long kprobe_old_eflags; unsigned long kprobe_saved_eflags; long *jprobe_saved_esp; struct pt_regs jprobe_saved_regs; kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; struct prev_kprobe prev_kprobe; }; /* trap3/1 are intr gates for kprobes. So, restore the status of IF, * if necessary, before executing the original int3/1 (trap) handler. */ static inline void restore_interrupts(struct pt_regs *regs) { if (regs->eflags & IF_MASK) local_irq_enable(); } extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); #endif /* _ASM_KPROBES_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/ldt.h000066400000000000000000000013001314037446600235160ustar00rootroot00000000000000/* * ldt.h * * Definitions of structures used with the modify_ldt system call. */ #ifndef _LINUX_LDT_H #define _LINUX_LDT_H /* Maximum number of LDT entries supported. */ #define LDT_ENTRIES 8192 /* The size of each LDT entry. */ #define LDT_ENTRY_SIZE 8 #ifndef __ASSEMBLY__ struct user_desc { unsigned int entry_number; unsigned long base_addr; unsigned int limit; unsigned int seg_32bit:1; unsigned int contents:2; unsigned int read_exec_only:1; unsigned int limit_in_pages:1; unsigned int seg_not_present:1; unsigned int useable:1; }; #define MODIFY_LDT_CONTENTS_DATA 0 #define MODIFY_LDT_CONTENTS_STACK 1 #define MODIFY_LDT_CONTENTS_CODE 2 #endif /* !__ASSEMBLY__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/linkage.h000066400000000000000000000006071314037446600243560ustar00rootroot00000000000000#ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) #define FASTCALL(x) x __attribute__((regparm(3))) #define fastcall __attribute__((regparm(3))) #define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret)) #ifdef CONFIG_X86_ALIGNMENT_16 #define __ALIGN .align 16,0x90 #define __ALIGN_STR ".align 16,0x90" #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/local.h000066400000000000000000000042311314037446600240330ustar00rootroot00000000000000#ifndef _ARCH_I386_LOCAL_H #define _ARCH_I386_LOCAL_H #include typedef struct { volatile long counter; } local_t; #define LOCAL_INIT(i) { (i) } #define local_read(v) ((v)->counter) #define local_set(v,i) (((v)->counter) = (i)) static __inline__ void local_inc(local_t *v) { __asm__ __volatile__( "incl %0" :"+m" (v->counter)); } static __inline__ void local_dec(local_t *v) { __asm__ __volatile__( "decl %0" :"+m" (v->counter)); } static __inline__ void local_add(long i, local_t *v) { __asm__ __volatile__( "addl %1,%0" :"+m" (v->counter) :"ir" (i)); } static __inline__ void local_sub(long i, local_t *v) { __asm__ __volatile__( "subl %1,%0" :"+m" (v->counter) :"ir" (i)); } /* On x86, these are no better than the atomic variants. */ #define __local_inc(l) local_inc(l) #define __local_dec(l) local_dec(l) #define __local_add(i,l) local_add((i),(l)) #define __local_sub(i,l) local_sub((i),(l)) /* Use these for per-cpu local_t variables: on some archs they are * much more efficient than these naive implementations. Note they take * a variable, not an address. */ /* Need to disable preemption for the cpu local counters otherwise we could still access a variable of a previous CPU in a non atomic way. */ #define cpu_local_wrap_v(v) \ ({ local_t res__; \ preempt_disable(); \ res__ = (v); \ preempt_enable(); \ res__; }) #define cpu_local_wrap(v) \ ({ preempt_disable(); \ v; \ preempt_enable(); }) \ #define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v))) #define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i))) #define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v))) #define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v))) #define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v))) #define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v))) #define __cpu_local_inc(v) cpu_local_inc(v) #define __cpu_local_dec(v) cpu_local_dec(v) #define __cpu_local_add(i, v) cpu_local_add((i), (v)) #define __cpu_local_sub(i, v) cpu_local_sub((i), (v)) #endif /* _ARCH_I386_LOCAL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/000077500000000000000000000000001314037446600242705ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/000077500000000000000000000000001314037446600250505ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/agp.h000066400000000000000000000033271314037446600257750ustar00rootroot00000000000000#ifndef AGP_H #define AGP_H 1 #include #include #include /* * Functions to keep the agpgart mappings coherent with the MMU. * The GART gives the CPU a physical alias of pages in memory. The alias region is * mapped uncacheable. Make sure there are no conflicting mappings * with different cachability attributes for the same page. This avoids * data corruption on some CPUs. */ /* Caller's responsibility to call global_flush_tlb() for * performance reasons */ #define map_page_into_agp(page) ( \ xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \ ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)) #define unmap_page_from_agp(page) ( \ xen_destroy_contiguous_region((unsigned long)page_address(page), 0), \ /* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \ change_page_attr(page, 1, PAGE_KERNEL)) #define flush_agp_mappings() global_flush_tlb() /* Could use CLFLUSH here if the cpu supports it. But then it would need to be called for each cacheline of the whole page so it may not be worth it. Would need a page for it. */ #define flush_agp_cache() wbinvd() /* Convert a physical address to an address suitable for the GART. */ #define phys_to_gart(x) phys_to_machine(x) #define gart_to_phys(x) machine_to_phys(x) /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) ({ \ char *_t; dma_addr_t _d; \ _t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); \ _t; }) #define free_gatt_pages(table, order) \ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/desc.h000066400000000000000000000104351314037446600261420ustar00rootroot00000000000000#ifndef __ARCH_DESC_H #define __ARCH_DESC_H #include #include #define CPU_16BIT_STACK_SIZE 1024 #ifndef __ASSEMBLY__ #include #include #include extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); struct Xgt_desc_struct { unsigned short size; unsigned long address __attribute__((packed)); unsigned short pad; } __attribute__ ((packed)); extern struct Xgt_desc_struct idt_descr; DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) { return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; } #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) #define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr)) #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt)) #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) #define store_tr(tr) __asm__ ("str %0":"=mr" (tr)) #define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt)) /* * This is the ldt that every process will get unless we need * something other than this. */ extern struct desc_struct default_ldt[]; extern void set_intr_gate(unsigned int irq, void * addr); #define _set_tssldt_desc(n,addr,limit,type) \ __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ "movw %w1,2(%2)\n\t" \ "rorl $16,%1\n\t" \ "movb %b1,4(%2)\n\t" \ "movb %4,5(%2)\n\t" \ "movb $0,6(%2)\n\t" \ "movb %h1,7(%2)\n\t" \ "rorl $16,%1" \ : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type)) #ifndef CONFIG_X86_NO_TSS static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr) { _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr, offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89); } #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) #endif static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) { _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82); } #define LDT_entry_a(info) \ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) #define LDT_entry_b(info) \ (((info)->base_addr & 0xff000000) | \ (((info)->base_addr & 0x00ff0000) >> 16) | \ ((info)->limit & 0xf0000) | \ (((info)->read_exec_only ^ 1) << 9) | \ ((info)->contents << 10) | \ (((info)->seg_not_present ^ 1) << 15) | \ ((info)->seg_32bit << 22) | \ ((info)->limit_in_pages << 23) | \ ((info)->useable << 20) | \ 0x7000) #define LDT_empty(info) (\ (info)->base_addr == 0 && \ (info)->limit == 0 && \ (info)->contents == 0 && \ (info)->read_exec_only == 1 && \ (info)->seg_32bit == 0 && \ (info)->limit_in_pages == 0 && \ (info)->seg_not_present == 1 && \ (info)->useable == 0 ) extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b); #if TLS_SIZE != 24 # error update this code. #endif static inline void load_TLS(struct thread_struct *t, unsigned int cpu) { #define C(i) if (HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), \ *(u64 *)&t->tls_array[i])) \ BUG(); C(0); C(1); C(2); #undef C } static inline void clear_LDT(void) { int cpu = get_cpu(); /* * NB. We load the default_ldt for lcall7/27 handling on demand, as * it slows down context switching. Noone uses it anyway. */ cpu = cpu; /* XXX avoid compiler warning */ xen_set_ldt(NULL, 0); put_cpu(); } /* * load one particular LDT into the current CPU */ static inline void load_LDT_nolock(mm_context_t *pc, int cpu) { void *segments = pc->ldt; int count = pc->size; if (likely(!count)) segments = NULL; xen_set_ldt(segments, count); } static inline void load_LDT(mm_context_t *pc) { int cpu = get_cpu(); load_LDT_nolock(pc, cpu); put_cpu(); } static inline unsigned long get_desc_base(unsigned long *desc) { unsigned long base; base = ((desc[0] >> 16) & 0x0000ffff) | ((desc[1] << 16) & 0x00ff0000) | (desc[1] & 0xff000000); return base; } #endif /* !__ASSEMBLY__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/dma-mapping.h000066400000000000000000000101321314037446600274100ustar00rootroot00000000000000#ifndef _ASM_I386_DMA_MAPPING_H #define _ASM_I386_DMA_MAPPING_H /* * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for * documentation. */ #include #include #include #include #include static inline int address_needs_mapping(struct device *hwdev, dma_addr_t addr) { dma_addr_t mask = 0xffffffff; /* If the device has a mask, use it, otherwise default to 32 bits */ if (hwdev && hwdev->dma_mask) mask = *hwdev->dma_mask; return (addr & ~mask) != 0; } extern int range_straddles_page_boundary(paddr_t p, size_t size); #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction); extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction); extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, enum dma_data_direction direction); extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, enum dma_data_direction direction); #ifdef CONFIG_HIGHMEM extern dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction); extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction); #else #define dma_map_page(dev, page, offset, size, dir) \ dma_map_single(dev, page_address(page) + (offset), (size), (dir)) #define dma_unmap_page dma_unmap_single #endif extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction); extern void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction); static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); } static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { dma_sync_single_for_device(dev, dma_handle+offset, size, direction); } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { if (swiotlb) swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction); flush_write_buffers(); } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { if (swiotlb) swiotlb_sync_sg_for_device(dev,sg,nelems,direction); flush_write_buffers(); } extern int dma_mapping_error(dma_addr_t dma_addr); extern int dma_supported(struct device *dev, u64 mask); static inline int dma_set_mask(struct device *dev, u64 mask) { if(!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; *dev->dma_mask = mask; return 0; } static inline int dma_get_cache_alignment(void) { /* no easy way to get cache size on all x86, so return the * maximum possible, to be safe */ return (1 << INTERNODE_CACHE_SHIFT); } #define dma_is_consistent(d) (1) static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { flush_write_buffers(); } #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY extern int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dma_addr_t device_addr, size_t size, int flags); extern void dma_release_declared_memory(struct device *dev); extern void * dma_mark_declared_memory_occupied(struct device *dev, dma_addr_t device_addr, size_t size); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/fixmap.h000066400000000000000000000110501314037446600265020ustar00rootroot00000000000000/* * fixmap.h: compile-time virtual memory allocation * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Ingo Molnar * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #ifndef _ASM_FIXMAP_H #define _ASM_FIXMAP_H /* used by vmalloc.c, vsyscall.lds.S. * * Leave one empty page between vmalloc'ed areas and * the start of the fixmap. */ extern unsigned long __FIXADDR_TOP; #ifndef __ASSEMBLY__ #include #include #include #include #ifdef CONFIG_HIGHMEM #include #include #endif /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at * compile time, but to set the physical address only * in the boot process. We allocate these special addresses * from the end of virtual memory (0xfffff000) backwards. * Also this lets us do fail-safe vmalloc(), we * can guarantee that these special addresses and * vmalloc()-ed addresses never overlap. * * these 'compile-time allocated' memory buffers are * fixed-size 4k pages. (or larger if used with an increment * highger than 1) use fixmap_set(idx,phys) to associate * physical memory with fixmap indices. * * TLB entries of such buffers will not be flushed across * task switches. */ enum fixed_addresses { FIX_HOLE, FIX_VDSO, #ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ #endif #ifdef CONFIG_X86_IO_APIC FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, #endif #ifdef CONFIG_X86_VISWS_APIC FIX_CO_CPU, /* Cobalt timer */ FIX_CO_APIC, /* Cobalt APIC Redirection Table */ FIX_LI_PCIA, /* Lithium PCI Bridge A */ FIX_LI_PCIB, /* Lithium PCI Bridge B */ #endif #ifdef CONFIG_X86_F00F_BUG FIX_F00F_IDT, /* Virtual mapping for IDT */ #endif #ifdef CONFIG_X86_CYCLONE_TIMER FIX_CYCLONE_TIMER, /*cyclone timer register*/ #endif #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, #endif #ifdef CONFIG_ACPI FIX_ACPI_BEGIN, FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, #endif #ifdef CONFIG_PCI_MMCONFIG FIX_PCIE_MCFG, #endif FIX_SHARED_INFO, #define NR_FIX_ISAMAPS 256 FIX_ISAMAP_END, FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1, __end_of_permanent_fixed_addresses, /* temporary boot-time mappings, used before ioremap() is functional */ #define NR_FIX_BTMAPS 16 FIX_BTMAP_END = __end_of_permanent_fixed_addresses, FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, FIX_WP_TEST, __end_of_fixed_addresses }; extern void set_fixaddr_top(unsigned long top); extern void __set_fixmap(enum fixed_addresses idx, maddr_t phys, pgprot_t flags); #define set_fixmap(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL) /* * Some hardware wants to get fixmapped without caching. */ #define set_fixmap_nocache(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) #define clear_fixmap(idx) \ __set_fixmap(idx, 0, __pgprot(0)) #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) #define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) #define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) extern void __this_fixmap_does_not_exist(void); /* * 'index to address' translation. If anyone tries to use the idx * directly without tranlation, we catch the bug with a NULL-deference * kernel oops. Illegal ranges of incoming indices are caught too. */ static __always_inline unsigned long fix_to_virt(const unsigned int idx) { /* * this branch gets completely eliminated after inlining, * except when someone tries to use fixaddr indices in an * illegal way. (such as mixing up address types or using * out-of-range indices). * * If it doesn't get removed, the linker will complain * loudly with a reasonably clear error message.. */ if (idx >= __end_of_fixed_addresses) __this_fixmap_does_not_exist(); return __fix_to_virt(idx); } static inline unsigned long virt_to_fix(const unsigned long vaddr) { BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); return __virt_to_fix(vaddr); } #endif /* !__ASSEMBLY__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/gnttab_dma.h000066400000000000000000000026471314037446600273320ustar00rootroot00000000000000/* * Copyright (c) 2007 Herbert Xu * Copyright (c) 2007 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _ASM_I386_GNTTAB_DMA_H #define _ASM_I386_GNTTAB_DMA_H static inline int gnttab_dma_local_pfn(struct page *page) { /* Has it become a local MFN? */ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page)))); } static inline maddr_t gnttab_dma_map_page(struct page *page) { __gnttab_dma_map_page(page); return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT); } static inline void gnttab_dma_unmap_page(maddr_t maddr) { __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr))); } #endif /* _ASM_I386_GNTTAB_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/highmem.h000066400000000000000000000050561314037446600266450ustar00rootroot00000000000000/* * highmem.h: virtual kernel memory mappings for high memory * * Used in CONFIG_HIGHMEM systems for memory pages which * are not addressable by direct kernel virtual addresses. * * Copyright (C) 1999 Gerhard Wichert, Siemens AG * Gerhard.Wichert@pdb.siemens.de * * * Redesigned the x86 32-bit VM architecture to deal with * up to 16 Terabyte physical memory. With current x86 CPUs * we now support up to 64 Gigabytes physical RAM. * * Copyright (C) 1999 Ingo Molnar */ #ifndef _ASM_HIGHMEM_H #define _ASM_HIGHMEM_H #ifdef __KERNEL__ #include #include #include #include /* declarations for highmem.c */ extern unsigned long highstart_pfn, highend_pfn; extern pte_t *kmap_pte; extern pgprot_t kmap_prot; extern pte_t *pkmap_page_table; /* * Right now we initialize only a single pte table. It can be extended * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. */ #ifdef CONFIG_X86_PAE #define LAST_PKMAP 512 #else #define LAST_PKMAP 1024 #endif /* * Ordering is: * * FIXADDR_TOP * fixed_addresses * FIXADDR_START * temp fixed addresses * FIXADDR_BOOT_START * Persistent kmap area * PKMAP_BASE * VMALLOC_END * Vmalloc area * VMALLOC_START * high_memory */ #define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK ) #define LAST_PKMAP_MASK (LAST_PKMAP-1) #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) extern void * FASTCALL(kmap_high(struct page *page)); extern void FASTCALL(kunmap_high(struct page *page)); void *kmap(struct page *page); void kunmap(struct page *page); void *kmap_atomic(struct page *page, enum km_type type); void *kmap_atomic_pte(struct page *page, enum km_type type); void kunmap_atomic(void *kvaddr, enum km_type type); void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); struct page *kmap_atomic_to_page(void *ptr); #define flush_cache_kmaps() do { } while (0) void clear_highpage(struct page *); static inline void clear_user_highpage(struct page *page, unsigned long vaddr) { clear_highpage(page); } #define __HAVE_ARCH_CLEAR_HIGHPAGE #define __HAVE_ARCH_CLEAR_USER_HIGHPAGE void copy_highpage(struct page *to, struct page *from); static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr) { copy_highpage(to, from); } #define __HAVE_ARCH_COPY_HIGHPAGE #define __HAVE_ARCH_COPY_USER_HIGHPAGE #endif /* __KERNEL__ */ #endif /* _ASM_HIGHMEM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/hw_irq.h000066400000000000000000000034511314037446600265150ustar00rootroot00000000000000#ifndef _ASM_HW_IRQ_H #define _ASM_HW_IRQ_H /* * linux/include/asm/hw_irq.h * * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar * * moved some of the old arch/i386/kernel/irq.h to here. VY * * IRQ/IPI changes taken from work by Thomas Radke * */ #include #include #include #include struct hw_interrupt_type; #define NMI_VECTOR 0x02 /* * Various low-level irq details needed by irq.c, process.c, * time.c, io_apic.c and smp.c * * Interrupt entry/exit code at both C and assembly level */ extern u8 irq_vector[NR_IRQ_VECTORS]; #define IO_APIC_VECTOR(irq) (irq_vector[irq]) #define AUTO_ASSIGN -1 extern void (*interrupt[NR_IRQS])(void); #ifdef CONFIG_SMP fastcall void reschedule_interrupt(void); fastcall void invalidate_interrupt(void); fastcall void call_function_interrupt(void); #endif #ifdef CONFIG_X86_LOCAL_APIC fastcall void apic_timer_interrupt(void); fastcall void error_interrupt(void); fastcall void spurious_interrupt(void); fastcall void thermal_interrupt(struct pt_regs *); #define platform_legacy_irq(irq) ((irq) < 16) #endif void disable_8259A_irq(unsigned int irq); void enable_8259A_irq(unsigned int irq); int i8259A_irq_pending(unsigned int irq); void make_8259A_irq(unsigned int irq); void init_8259A(int aeoi); void FASTCALL(send_IPI_self(int vector)); void init_VISWS_APIC_irqs(void); void setup_IO_APIC(void); void disable_IO_APIC(void); #define print_IO_APIC() int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); void send_IPI(int dest, int vector); void setup_ioapic_dest(void); extern unsigned long io_apic_irqs; extern atomic_t irq_err_count; extern atomic_t irq_mis_count; #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) #endif /* _ASM_HW_IRQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/hypercall.h000066400000000000000000000234761314037446600272200ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #include /* memcpy() */ #include #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif #ifdef CONFIG_XEN #define HYPERCALL_STR(name) \ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" #else #define HYPERCALL_STR(name) \ "mov hypercall_stubs,%%eax; " \ "add $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ "call *%%eax" #endif #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res) \ : \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, a1) \ ({ \ type __res; \ long __ign1; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1) \ : "1" ((long)(a1)) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ long __ign1, __ign2; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ : "1" ((long)(a1)), "2" ((long)(a2)) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ long __ign1, __ign2, __ign3, __ign4; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ long __ign1, __ign2, __ign3, __ign4, __ign5; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)), \ "5" ((long)(a5)) \ : "memory" ); \ __res; \ }) static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_selector, unsigned long event_address, unsigned long failsafe_selector, unsigned long failsafe_address) { return _hypercall4(int, set_callbacks, event_selector, event_address, failsafe_selector, failsafe_address); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { unsigned long timeout_hi = (unsigned long)(timeout>>32); unsigned long timeout_lo = (unsigned long)timeout; return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); } static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_update_descriptor( u64 ma, u64 desc) { return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { return _hypercall2(int, memory_op, cmd, arg); } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall4(int, update_va_mapping, va, new_val.pte_low, pte_hi, flags); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { return _hypercall3(int, grant_table_op, cmd, uop, count); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall5(int, update_va_mapping_otherdomain, va, new_val.pte_low, pte_hi, flags, domid); } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/hypervisor.h000066400000000000000000000160101314037446600274310ustar00rootroot00000000000000/****************************************************************************** * hypervisor.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERVISOR_H__ #define __HYPERVISOR_H__ #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) # ifdef CONFIG_X86_PAE # include # else # include # endif #elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) # include #endif extern shared_info_t *HYPERVISOR_shared_info; #define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu)) #ifdef CONFIG_SMP #define current_vcpu_info() vcpu_info(smp_processor_id()) #else #define current_vcpu_info() vcpu_info(0) #endif #ifdef CONFIG_X86_32 extern unsigned long hypervisor_virt_start; #endif /* arch/xen/i386/kernel/setup.c */ extern start_info_t *xen_start_info; #ifdef CONFIG_XEN_PRIVILEGED_GUEST #define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN) #else #define is_initial_xendomain() 0 #endif /* arch/xen/kernel/evtchn.c */ /* Force a proper event-channel callback from Xen. */ void force_evtchn_callback(void); /* arch/xen/kernel/process.c */ void xen_cpu_idle (void); /* arch/xen/i386/kernel/hypervisor.c */ void do_hypervisor_callback(struct pt_regs *regs); /* arch/xen/i386/mm/hypervisor.c */ /* * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already * be MACHINE addresses. */ void xen_pt_switch(unsigned long ptr); void xen_new_user_pt(unsigned long ptr); /* x86_64 only */ void xen_load_gs(unsigned int selector); /* x86_64 only */ void xen_tlb_flush(void); void xen_invlpg(unsigned long ptr); void xen_l1_entry_update(pte_t *ptr, pte_t val); void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */ void xen_pgd_pin(unsigned long ptr); void xen_pgd_unpin(unsigned long ptr); void xen_set_ldt(const void *ptr, unsigned int ents); #ifdef CONFIG_SMP #include void xen_tlb_flush_all(void); void xen_invlpg_all(unsigned long ptr); void xen_tlb_flush_mask(cpumask_t *mask); void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr); #endif /* Returns zero on success else negative errno. */ int xen_create_contiguous_region( unsigned long vstart, unsigned int order, unsigned int address_bits); void xen_destroy_contiguous_region( unsigned long vstart, unsigned int order); struct page; int xen_limit_pages_to_max_mfn( struct page *pages, unsigned int order, unsigned int address_bits); /* Turn jiffies into Xen system time. */ u64 jiffies_to_st(unsigned long jiffies); #ifdef CONFIG_XEN_SCRUB_PAGES void scrub_pages(void *, unsigned int); #else #define scrub_pages(_p,_n) ((void)0) #endif #include #if defined(CONFIG_X86_64) #define MULTI_UVMFLAGS_INDEX 2 #define MULTI_UVMDOMID_INDEX 3 #else #define MULTI_UVMFLAGS_INDEX 3 #define MULTI_UVMDOMID_INDEX 4 #endif #ifdef CONFIG_XEN #define is_running_on_xen() 1 #else extern char *hypercall_stubs; #define is_running_on_xen() (!!hypercall_stubs) #endif static inline int HYPERVISOR_yield( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int HYPERVISOR_block( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0); #endif return rc; } static inline void /*__noreturn*/ HYPERVISOR_shutdown( unsigned int reason) { struct sched_shutdown sched_shutdown = { .reason = reason }; VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown)); #if CONFIG_XEN_COMPAT <= 0x030002 VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason)); #endif /* Don't recurse needlessly. */ BUG_ON(reason != SHUTDOWN_crash); for(;;); } static inline int __must_check HYPERVISOR_poll( evtchn_port_t *ports, unsigned int nr_ports, u64 timeout) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports, .timeout = jiffies_to_st(timeout) }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } #ifdef CONFIG_XEN static inline void MULTI_update_va_mapping( multicall_entry_t *mcl, unsigned long va, pte_t new_val, unsigned long flags) { mcl->op = __HYPERVISOR_update_va_mapping; mcl->args[0] = va; #if defined(CONFIG_X86_64) mcl->args[1] = new_val.pte; #elif defined(CONFIG_X86_PAE) mcl->args[1] = new_val.pte_low; mcl->args[2] = new_val.pte_high; #else mcl->args[1] = new_val.pte_low; mcl->args[2] = 0; #endif mcl->args[MULTI_UVMFLAGS_INDEX] = flags; } static inline void MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd, void *uop, unsigned int count) { mcl->op = __HYPERVISOR_grant_table_op; mcl->args[0] = cmd; mcl->args[1] = (unsigned long)uop; mcl->args[2] = count; } #else /* !defined(CONFIG_XEN) */ /* Multicalls not supported for HVM guests. */ #define MULTI_update_va_mapping(a,b,c,d) ((void)0) #define MULTI_grant_table_op(a,b,c,d) ((void)0) #endif #endif /* __HYPERVISOR_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/io.h000066400000000000000000000266121314037446600256370ustar00rootroot00000000000000#ifndef _ASM_IO_H #define _ASM_IO_H #include #include /* * This file contains the definitions for the x86 IO instructions * inb/inw/inl/outb/outw/outl and the "string versions" of the same * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" * versions of the single-IO instructions (inb_p/inw_p/..). * * This file is not meant to be obfuscating: it's just complicated * to (a) handle it all in a way that makes gcc able to optimize it * as well as possible and (b) trying to avoid writing the same thing * over and over again with slight variations and possibly making a * mistake somewhere. */ /* * Thanks to James van Artsdalen for a better timing-fix than * the two short jumps: using outb's to a nonexistent port seems * to guarantee better timings even on fast machines. * * On the other hand, I'd like to be sure of a non-existent port: * I feel a bit unsafe about using 0x80 (should be safe, though) * * Linus */ /* * Bit simplified and optimized by Jan Hubicka * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. * * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, * isa_read[wl] and isa_write[wl] fixed * - Arnaldo Carvalho de Melo */ #define IO_SPACE_LIMIT 0xffff #define XQUAD_PORTIO_BASE 0xfe400000 #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ #ifdef __KERNEL__ #include #include #include /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access */ #define xlate_dev_mem_ptr(p) __va(p) /* * Convert a virtual cached pointer to an uncached pointer */ #define xlate_dev_kmem_ptr(p) p /** * virt_to_phys - map virtual addresses to physical * @address: address to remap * * The returned physical address is the physical (CPU) mapping for * the memory address given. It is only valid to use this function on * addresses directly mapped or allocated via kmalloc. * * This function does not give bus mappings for DMA transfers. In * almost all conceivable cases a device driver should not be using * this function */ static inline unsigned long virt_to_phys(volatile void * address) { return __pa(address); } /** * phys_to_virt - map physical address to virtual * @address: address to remap * * The returned virtual address is a current CPU mapping for * the memory address given. It is only valid to use this function on * addresses that have a kernel mapping * * This function does not handle bus mappings for DMA transfers. In * almost all conceivable cases a device driver should not be using * this function */ static inline void * phys_to_virt(unsigned long address) { return __va(address); } /* * Change "struct page" to physical address. */ #define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) #define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page))) #define page_to_bus(page) (phys_to_machine(page_to_pseudophys(page))) #define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \ (unsigned long) bio_offset((bio))) #define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \ (unsigned long) (bv)->bv_offset) #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \ bvec_to_pseudophys((vec2)))) extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); /** * ioremap - map bus memory into CPU space * @offset: bus address of the memory * @size: size of the resource to map * * ioremap performs a platform specific sequence of operations to * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual * address. */ static inline void __iomem * ioremap(unsigned long offset, unsigned long size) { return __ioremap(offset, size, 0); } extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size); extern void iounmap(volatile void __iomem *addr); /* * bt_ioremap() and bt_iounmap() are for temporary early boot-time * mappings, before the real ioremap() is functional. * A boot-time mapping is currently limited to at most 16 pages. */ extern void *bt_ioremap(unsigned long offset, unsigned long size); extern void bt_iounmap(void *addr, unsigned long size); /* Use early IO mappings for DMI because it's initialized early */ #define dmi_ioremap bt_ioremap #define dmi_iounmap bt_iounmap #define dmi_alloc alloc_bootmem /* * ISA I/O bus memory addresses are 1:1 with the physical address. */ #define isa_virt_to_bus(_x) ({ BUG(); virt_to_bus(_x); }) #define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x #define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x)) /* * However PCI ones are not necessarily 1:1 and therefore these interfaces * are forbidden in portable PCI drivers. * * Allow them on x86 for legacy drivers, though. */ #define virt_to_bus(_x) phys_to_machine(__pa(_x)) #define bus_to_virt(_x) __va(machine_to_phys(_x)) /* * readX/writeX() are used to access memory mapped devices. On some * architectures the memory mapped IO stuff needs to be accessed * differently. On the x86 architecture, we just read/write the * memory location directly. */ static inline unsigned char readb(const volatile void __iomem *addr) { return *(volatile unsigned char __force *) addr; } static inline unsigned short readw(const volatile void __iomem *addr) { return *(volatile unsigned short __force *) addr; } static inline unsigned int readl(const volatile void __iomem *addr) { return *(volatile unsigned int __force *) addr; } #define readb_relaxed(addr) readb(addr) #define readw_relaxed(addr) readw(addr) #define readl_relaxed(addr) readl(addr) #define __raw_readb readb #define __raw_readw readw #define __raw_readl readl static inline void writeb(unsigned char b, volatile void __iomem *addr) { *(volatile unsigned char __force *) addr = b; } static inline void writew(unsigned short b, volatile void __iomem *addr) { *(volatile unsigned short __force *) addr = b; } static inline void writel(unsigned int b, volatile void __iomem *addr) { *(volatile unsigned int __force *) addr = b; } #define __raw_writeb writeb #define __raw_writew writew #define __raw_writel writel #define mmiowb() static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) { memset((void __force *) addr, val, count); } static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) { __memcpy(dst, (void __force *) src, count); } static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) { __memcpy((void __force *) dst, src, count); } /* * ISA space is 'always mapped' on a typical x86 system, no need to * explicitly ioremap() it. The fact that the ISA IO space is mapped * to PAGE_OFFSET is pure coincidence - it does not mean ISA values * are physical addresses. The following constant pointer can be * used as the IO-area pointer (it can be iounmapped as well, so the * analogy with PCI is quite large): */ #define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN))) /* * Again, i386 does not require mem IO specific function. */ #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d)) /** * check_signature - find BIOS signatures * @io_addr: mmio address to check * @signature: signature block * @length: length of signature * * Perform a signature comparison with the mmio address io_addr. This * address should have been obtained by ioremap. * Returns 1 on a match. */ static inline int check_signature(volatile void __iomem * io_addr, const unsigned char *signature, int length) { int retval = 0; do { if (readb(io_addr) != *signature) goto out; io_addr++; signature++; length--; } while (length); retval = 1; out: return retval; } /* * Cache management * * This needed for two cases * 1. Out of order aware processors * 2. Accidentally out of order processors (PPro errata #51) */ #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) static inline void flush_write_buffers(void) { __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory"); } #define dma_cache_inv(_start,_size) flush_write_buffers() #define dma_cache_wback(_start,_size) flush_write_buffers() #define dma_cache_wback_inv(_start,_size) flush_write_buffers() #else /* Nothing to do */ #define dma_cache_inv(_start,_size) do { } while (0) #define dma_cache_wback(_start,_size) do { } while (0) #define dma_cache_wback_inv(_start,_size) do { } while (0) #define flush_write_buffers() #endif #endif /* __KERNEL__ */ #ifdef SLOW_IO_BY_JUMPING #define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:" #else #define __SLOW_DOWN_IO "outb %%al,$0x80;" #endif static inline void slow_down_io(void) { __asm__ __volatile__( __SLOW_DOWN_IO #ifdef REALLY_SLOW_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO #endif : : ); } #ifdef CONFIG_X86_NUMAQ extern void *xquad_portio; /* Where the IO area was mapped */ #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) #define __BUILDIO(bwl,bw,type) \ static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \ if (xquad_portio) \ write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \ else \ out##bwl##_local(value, port); \ } \ static inline void out##bwl(unsigned type value, int port) { \ out##bwl##_quad(value, port, 0); \ } \ static inline unsigned type in##bwl##_quad(int port, int quad) { \ if (xquad_portio) \ return read##bwl(XQUAD_PORT_ADDR(port, quad)); \ else \ return in##bwl##_local(port); \ } \ static inline unsigned type in##bwl(int port) { \ return in##bwl##_quad(port, 0); \ } #else #define __BUILDIO(bwl,bw,type) \ static inline void out##bwl(unsigned type value, int port) { \ out##bwl##_local(value, port); \ } \ static inline unsigned type in##bwl(int port) { \ return in##bwl##_local(port); \ } #endif #define BUILDIO(bwl,bw,type) \ static inline void out##bwl##_local(unsigned type value, int port) { \ __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \ } \ static inline unsigned type in##bwl##_local(int port) { \ unsigned type value; \ __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \ return value; \ } \ static inline void out##bwl##_local_p(unsigned type value, int port) { \ out##bwl##_local(value, port); \ slow_down_io(); \ } \ static inline unsigned type in##bwl##_local_p(int port) { \ unsigned type value = in##bwl##_local(port); \ slow_down_io(); \ return value; \ } \ __BUILDIO(bwl,bw,type) \ static inline void out##bwl##_p(unsigned type value, int port) { \ out##bwl(value, port); \ slow_down_io(); \ } \ static inline unsigned type in##bwl##_p(int port) { \ unsigned type value = in##bwl(port); \ slow_down_io(); \ return value; \ } \ static inline void outs##bwl(int port, const void *addr, unsigned long count) { \ __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \ } \ static inline void ins##bwl(int port, void *addr, unsigned long count) { \ __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \ } BUILDIO(b,b,char) BUILDIO(w,w,short) BUILDIO(l,,int) /* We will be supplying our own /dev/mem implementation */ #define ARCH_HAS_DEV_MEM #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/irqflags.h000066400000000000000000000056411314037446600270370ustar00rootroot00000000000000/* * include/asm-i386/irqflags.h * * IRQ flags handling * * This file gets included from lowlevel asm headers too, to provide * wrapped versions of the local_irq_*() APIs, based on the * raw_local_irq_*() functions from the lowlevel headers. */ #ifndef _ASM_IRQFLAGS_H #define _ASM_IRQFLAGS_H #ifndef __ASSEMBLY__ /* * The use of 'barrier' in the following reflects their use as local-lock * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following * critical operations are executed. All critical operations must complete * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also * includes these barriers, for example. */ #define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask) #define raw_local_save_flags(flags) \ do { (flags) = __raw_local_save_flags(); } while (0) #define raw_local_irq_restore(x) \ do { \ vcpu_info_t *_vcpu; \ barrier(); \ _vcpu = current_vcpu_info(); \ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ barrier(); /* unmask then check (avoid races) */ \ if (unlikely(_vcpu->evtchn_upcall_pending)) \ force_evtchn_callback(); \ } \ } while (0) #define raw_local_irq_disable() \ do { \ current_vcpu_info()->evtchn_upcall_mask = 1; \ barrier(); \ } while (0) #define raw_local_irq_enable() \ do { \ vcpu_info_t *_vcpu; \ barrier(); \ _vcpu = current_vcpu_info(); \ _vcpu->evtchn_upcall_mask = 0; \ barrier(); /* unmask then check (avoid races) */ \ if (unlikely(_vcpu->evtchn_upcall_pending)) \ force_evtchn_callback(); \ } while (0) /* * Used in the idle loop; sti takes one instruction cycle * to complete: */ void raw_safe_halt(void); /* * Used when interrupts are already enabled or to * shutdown the processor: */ void halt(void); static inline int raw_irqs_disabled_flags(unsigned long flags) { return (flags != 0); } #define raw_irqs_disabled() \ ({ \ unsigned long flags = __raw_local_save_flags(); \ \ raw_irqs_disabled_flags(flags); \ }) /* * For spinlocks, etc: */ #define __raw_local_irq_save() \ ({ \ unsigned long flags = __raw_local_save_flags(); \ \ raw_local_irq_disable(); \ \ flags; \ }) #define raw_local_irq_save(flags) \ do { (flags) = __raw_local_irq_save(); } while (0) #endif /* __ASSEMBLY__ */ /* * Do the CPU's IRQ-state tracing from assembly code. We call a * C function, so save all the C-clobbered registers: */ #ifdef CONFIG_TRACE_IRQFLAGS # define TRACE_IRQS_ON \ pushl %eax; \ pushl %ecx; \ pushl %edx; \ call trace_hardirqs_on; \ popl %edx; \ popl %ecx; \ popl %eax; # define TRACE_IRQS_OFF \ pushl %eax; \ pushl %ecx; \ pushl %edx; \ call trace_hardirqs_off; \ popl %edx; \ popl %ecx; \ popl %eax; #else # define TRACE_IRQS_ON # define TRACE_IRQS_OFF #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/maddr.h000066400000000000000000000133331314037446600263130ustar00rootroot00000000000000#ifndef _I386_MADDR_H #define _I386_MADDR_H #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<31) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ #ifdef CONFIG_X86_PAE typedef unsigned long long paddr_t; typedef unsigned long long maddr_t; #else typedef unsigned long paddr_t; typedef unsigned long maddr_t; #endif #ifdef CONFIG_XEN extern unsigned long *phys_to_machine_mapping; extern unsigned long max_mapnr; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return pfn; BUG_ON(max_mapnr && pfn >= max_mapnr); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return 1; BUG_ON(max_mapnr && pfn >= max_mapnr); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return max_mapnr; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movl %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movl %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if ((pfn < max_mapnr) && !xen_feature(XENFEAT_auto_translated_physmap) && (phys_to_machine_mapping[pfn] != mfn)) return max_mapnr; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { BUG_ON(max_mapnr && pfn >= max_mapnr); if (xen_feature(XENFEAT_auto_translated_physmap)) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } #ifdef CONFIG_X86_PAE static inline paddr_t pte_phys_to_machine(paddr_t phys) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to pfn_to_mfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to mfn_to_pfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #endif #ifdef CONFIG_X86_PAE #define __pte_ma(x) ((pte_t) { (x), (maddr_t)(x) >> 32 } ) static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot) { pte_t pte; pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \ (pgprot_val(pgprot) >> 32); pte.pte_high &= (__supported_pte_mask >> 32); pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \ __supported_pte_mask; return pte; } #else #define __pte_ma(x) ((pte_t) { (x) } ) #define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #endif #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _I386_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/mmu.h000066400000000000000000000012161314037446600260170ustar00rootroot00000000000000#ifndef __i386_MMU_H #define __i386_MMU_H #include /* * The i386 doesn't have a mmu context, but * we put the segment information here. * * cpu_vm_mask is used to optimize ldt flushing. */ typedef struct { int size; struct semaphore sem; void *ldt; void *vdso; #ifdef CONFIG_XEN int has_foreign_mappings; #endif } mm_context_t; /* mm/memory.c:exit_mmap hook */ extern void _arch_exit_mmap(struct mm_struct *mm); #define arch_exit_mmap(_mm) _arch_exit_mmap(_mm) /* kernel/fork.c:dup_mmap hook */ extern void _arch_dup_mmap(struct mm_struct *mm); #define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/mmu_context.h000066400000000000000000000055661314037446600275770ustar00rootroot00000000000000#ifndef __I386_SCHED_H #define __I386_SCHED_H #include #include #include #include /* * Used for LDT copy/destruction. */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm); void destroy_context(struct mm_struct *mm); static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #if 0 /* XEN: no lazy tlb */ unsigned cpu = smp_processor_id(); if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; #endif } #define prepare_arch_switch(next) __prepare_arch_switch() static inline void __prepare_arch_switch(void) { /* * Save away %fs and %gs. No need to save %es and %ds, as those * are always kernel segments while inside the kernel. Must * happen before reload of cr3/ldt (i.e., not in __switch_to). */ asm volatile ( "mov %%fs,%0 ; mov %%gs,%1" : "=m" (current->thread.fs), "=m" (current->thread.gs)); asm volatile ( "movl %0,%%fs ; movl %0,%%gs" : : "r" (0) ); } extern void mm_pin(struct mm_struct *mm); extern void mm_unpin(struct mm_struct *mm); void mm_pin_all(void); static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { int cpu = smp_processor_id(); struct mmuext_op _op[2], *op = _op; if (likely(prev != next)) { BUG_ON(!xen_feature(XENFEAT_writable_page_tables) && !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags)); /* stop flush ipis for the previous mm */ cpu_clear(cpu, prev->cpu_vm_mask); #if 0 /* XEN: no lazy tlb */ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; per_cpu(cpu_tlbstate, cpu).active_mm = next; #endif cpu_set(cpu, next->cpu_vm_mask); /* Re-load page tables: load_cr3(next->pgd) */ op->cmd = MMUEXT_NEW_BASEPTR; op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT); op++; /* * load the LDT, if the LDT is different: */ if (unlikely(prev->context.ldt != next->context.ldt)) { /* load_LDT_nolock(&next->context, cpu) */ op->cmd = MMUEXT_SET_LDT; op->arg1.linear_addr = (unsigned long)next->context.ldt; op->arg2.nr_ents = next->context.size; op++; } BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF)); } #if 0 /* XEN: no lazy tlb */ else { per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload %cr3. */ load_cr3(next->pgd); load_LDT_nolock(&next->context, cpu); } } #endif } #define deactivate_mm(tsk, mm) \ asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags)) mm_pin(next); switch_mm(prev, next, NULL); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/page.h000066400000000000000000000157731314037446600261520ustar00rootroot00000000000000#ifndef _I386_PAGE_H #define _I386_PAGE_H /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) #ifdef CONFIG_X86_PAE #define __PHYSICAL_MASK_SHIFT 40 #define __PHYSICAL_MASK ((1ULL << __PHYSICAL_MASK_SHIFT) - 1) #define PHYSICAL_PAGE_MASK (~((1ULL << PAGE_SHIFT) - 1) & __PHYSICAL_MASK) #else #define __PHYSICAL_MASK_SHIFT 32 #define __PHYSICAL_MASK (~0UL) #define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK) #endif #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) #ifdef __KERNEL__ /* * Need to repeat this here in order to not include pgtable.h (which in turn * depends on definitions made here), but to be able to use the symbolic * below. The preprocessor will warn if the two definitions aren't identical. */ #define _PAGE_PRESENT 0x001 #define _PAGE_IO 0x200 #ifndef __ASSEMBLY__ #include #include #include #include #include #include #ifdef CONFIG_X86_USE_3DNOW #include #define clear_page(page) mmx_clear_page((void *)(page)) #define copy_page(to,from) mmx_copy_page(to,from) #else #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE /* * On older X86 processors it's not a win to use MMX here it seems. * Maybe the K6-III ? */ #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) #endif #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) /* * These are used to make use of C type-checking.. */ extern int nx_enabled; #ifdef CONFIG_X86_PAE extern unsigned long long __supported_pte_mask; typedef struct { unsigned long pte_low, pte_high; } pte_t; typedef struct { unsigned long long pmd; } pmd_t; typedef struct { unsigned long long pgd; } pgd_t; typedef struct { unsigned long long pgprot; } pgprot_t; #define pgprot_val(x) ((x).pgprot) #include #define __pte(x) ({ unsigned long long _x = (x); \ if ((_x & (_PAGE_PRESENT|_PAGE_IO)) == _PAGE_PRESENT) \ _x = pte_phys_to_machine(_x); \ ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); }) #define __pgd(x) ({ unsigned long long _x = (x); \ (pgd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; }) #define __pmd(x) ({ unsigned long long _x = (x); \ (pmd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; }) static inline unsigned long long __pte_val(pte_t x) { return ((unsigned long long)x.pte_high << 32) | x.pte_low; } static inline unsigned long long pte_val(pte_t x) { unsigned long long ret = __pte_val(x); if ((x.pte_low & (_PAGE_PRESENT|_PAGE_IO)) == _PAGE_PRESENT) ret = pte_machine_to_phys(ret); return ret; } #define __pmd_val(x) ((x).pmd) static inline unsigned long long pmd_val(pmd_t x) { unsigned long long ret = __pmd_val(x); #if CONFIG_XEN_COMPAT <= 0x030002 if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT; #else if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret); #endif return ret; } #define __pud_val(x) __pgd_val((x).pgd) #define __pgd_val(x) ((x).pgd) static inline unsigned long long pgd_val(pgd_t x) { unsigned long long ret = __pgd_val(x); if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret); return ret; } #define HPAGE_SHIFT 21 #else typedef struct { unsigned long pte_low; } pte_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; #define pgprot_val(x) ((x).pgprot) #include #define boot_pte_t pte_t /* or would you rather have a typedef */ #define __pte_val(x) ((x).pte_low) #define pte_val(x) ((__pte_val(x) & (_PAGE_PRESENT|_PAGE_IO)) \ == _PAGE_PRESENT ? \ machine_to_phys(__pte_val(x)) : \ __pte_val(x)) #define __pte(x) ({ unsigned long _x = (x); \ if ((_x & (_PAGE_PRESENT|_PAGE_IO)) == _PAGE_PRESENT) \ _x = phys_to_machine(_x); \ ((pte_t) { _x }); }) #define __pmd_val(x) __pud_val((x).pud) #define __pud_val(x) __pgd_val((x).pgd) #define __pgd(x) ({ unsigned long _x = (x); \ (pgd_t) {((_x) & _PAGE_PRESENT) ? phys_to_machine(_x) : (_x)}; }) #define __pgd_val(x) ((x).pgd) static inline unsigned long pgd_val(pgd_t x) { unsigned long ret = __pgd_val(x); #if CONFIG_XEN_COMPAT <= 0x030002 if (ret) ret = machine_to_phys(ret) | _PAGE_PRESENT; #else if (ret & _PAGE_PRESENT) ret = machine_to_phys(ret); #endif return ret; } #define HPAGE_SHIFT 22 #endif #define PTE_MASK PHYSICAL_PAGE_MASK #ifdef CONFIG_HUGETLB_PAGE #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif #define __pgprot(x) ((pgprot_t) { (x) } ) #endif /* !__ASSEMBLY__ */ /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) /* * This handles the memory map.. We could make this a config * option, but too many people screw it up, and too few need * it. * * A __PAGE_OFFSET of 0xC0000000 means that the kernel has * a virtual address space of one gigabyte, which limits the * amount of physical memory you can use to about 950MB. * * If you want more physical memory than this then see the CONFIG_HIGHMEM4G * and CONFIG_HIGHMEM64G options in the kernel configuration. */ #ifndef __ASSEMBLY__ struct vm_area_struct; /* * This much address space is reserved for vmalloc() and iomap() * as well as fixmap mappings. */ extern unsigned int __VMALLOC_RESERVE; extern int sysctl_legacy_va_layout; extern int page_is_ram(unsigned long pagenr); #endif /* __ASSEMBLY__ */ #ifdef __ASSEMBLY__ #define __PAGE_OFFSET CONFIG_PAGE_OFFSET #define __PHYSICAL_START CONFIG_PHYSICAL_START #else #define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) #endif #define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START) #if CONFIG_XEN_COMPAT <= 0x030002 #undef LOAD_OFFSET #define LOAD_OFFSET 0 #endif #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) #define MAXMEM (__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #ifdef CONFIG_FLATMEM #define pfn_valid(pfn) ((pfn) < max_mapnr) #endif /* CONFIG_FLATMEM */ #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define VM_DATA_DEFAULT_FLAGS \ (VM_READ | VM_WRITE | \ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include #include #define __HAVE_ARCH_GATE_AREA 1 #endif /* __KERNEL__ */ #endif /* _I386_PAGE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/pci.h000066400000000000000000000075131314037446600260020ustar00rootroot00000000000000#ifndef __i386_PCI_H #define __i386_PCI_H #ifdef __KERNEL__ #include /* for struct page */ /* Can be used to override the logic in pci_scan_bus for skipping already-configured bus numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the loader */ #ifdef CONFIG_PCI extern unsigned int pcibios_assign_all_busses(void); #else #define pcibios_assign_all_busses() 0 #endif #include #define pcibios_scan_all_fns(a, b) (!is_initial_xendomain()) extern unsigned long pci_mem_start; #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM (pci_mem_start) #define PCIBIOS_MIN_CARDBUS_IO 0x4000 void pcibios_config_init(void); struct pci_bus * pcibios_scan_root(int bus); void pcibios_set_master(struct pci_dev *dev); void pcibios_penalize_isa_irq(int irq, int active); struct irq_routing_table *pcibios_get_irq_routing_table(void); int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); /* Dynamic DMA mapping stuff. * i386 has everything mapped statically. */ #include #include #include #include #include struct pci_dev; #ifdef CONFIG_SWIOTLB /* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */ #define PCI_DMA_BUS_IS_PHYS (0) #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ dma_addr_t ADDR_NAME; #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ __u32 LEN_NAME; #define pci_unmap_addr(PTR, ADDR_NAME) \ ((PTR)->ADDR_NAME) #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ (((PTR)->ADDR_NAME) = (VAL)) #define pci_unmap_len(PTR, LEN_NAME) \ ((PTR)->LEN_NAME) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ (((PTR)->LEN_NAME) = (VAL)) #else /* The PCI address space does equal the physical memory * address space. The networking and block device layers use * this boolean for bounce buffer decisions. */ #define PCI_DMA_BUS_IS_PHYS (1) /* pci_unmap_{page,single} is a nop so... */ #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) #define pci_unmap_addr(PTR, ADDR_NAME) (0) #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) #define pci_unmap_len(PTR, LEN_NAME) (0) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) #endif /* This is always fine. */ #define pci_dac_dma_supported(pci_dev, mask) (1) static inline dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction) { return ((dma64_addr_t) page_to_phys(page) + (dma64_addr_t) offset); } static inline struct page * pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr) { return pfn_to_page(dma_addr >> PAGE_SHIFT); } static inline unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr) { return (dma_addr & ~PAGE_MASK); } static inline void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) { } static inline void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) { flush_write_buffers(); } #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); static inline void pcibios_add_platform_entries(struct pci_dev *dev) { } #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, unsigned long *strategy_parameter) { *strat = PCI_DMA_BURST_INFINITY; *strategy_parameter = ~0UL; } #endif #endif /* __KERNEL__ */ #ifdef CONFIG_XEN_PCIDEV_FRONTEND #include #endif /* CONFIG_XEN_PCIDEV_FRONTEND */ /* implement the pci_ DMA API in terms of the generic device dma_ one */ #include /* generic pci stuff */ #include #endif /* __i386_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/pgalloc.h000066400000000000000000000033441314037446600266460ustar00rootroot00000000000000#ifndef _I386_PGALLOC_H #define _I386_PGALLOC_H #include #include #include /* for struct page */ #include /* for phys_to_virt and page_to_pseudophys */ #define pmd_populate_kernel(mm, pmd, pte) \ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) #define pmd_populate(mm, pmd, pte) \ do { \ unsigned long pfn = page_to_pfn(pte); \ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) { \ if (!PageHighMem(pte)) \ BUG_ON(HYPERVISOR_update_va_mapping( \ (unsigned long)__va(pfn << PAGE_SHIFT), \ pfn_pte(pfn, PAGE_KERNEL_RO), 0)); \ else if (!test_and_set_bit(PG_pinned, &pte->flags)) \ kmap_flush_unused(); \ set_pmd(pmd, \ __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT))); \ } else \ *(pmd) = __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT)); \ } while (0) /* * Allocate and free page tables. */ extern pgd_t *pgd_alloc(struct mm_struct *); extern void pgd_free(pgd_t *pgd); extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); extern struct page *pte_alloc_one(struct mm_struct *, unsigned long); static inline void pte_free_kernel(pte_t *pte) { make_lowmem_page_writable(pte, XENFEAT_writable_page_tables); free_page((unsigned long)pte); } extern void pte_free(struct page *pte); #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) #ifdef CONFIG_X86_PAE /* * In the PAE case we free the pmds as part of the pgd. */ #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(x) do { } while (0) #define __pmd_free_tlb(tlb,x) do { } while (0) #define pud_populate(mm, pmd, pte) BUG() #endif #define check_pgt_cache() do { } while (0) #endif /* _I386_PGALLOC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h000066400000000000000000000005641314037446600307520ustar00rootroot00000000000000#ifndef _I386_PGTABLE_2LEVEL_DEFS_H #define _I386_PGTABLE_2LEVEL_DEFS_H #define HAVE_SHARED_KERNEL_PMD 0 /* * traditional i386 two-level paging structure: */ #define PGDIR_SHIFT 22 #define PTRS_PER_PGD 1024 /* * the i386 is two-level, so we don't really have any * PMD directory physically. */ #define PTRS_PER_PTE 1024 #endif /* _I386_PGTABLE_2LEVEL_DEFS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/pgtable-2level.h000066400000000000000000000070621314037446600300330ustar00rootroot00000000000000#ifndef _I386_PGTABLE_2LEVEL_H #define _I386_PGTABLE_2LEVEL_H #include #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx (pfn %05lx).\n", __FILE__, __LINE__, \ __pte_val(e), pte_pfn(e)) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx (pfn %05lx).\n", __FILE__, __LINE__, \ __pgd_val(e), pgd_val(e) >> PAGE_SHIFT) /* * Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following * hook is made available. */ #define set_pte(pteptr, pteval) (*(pteptr) = pteval) #define set_pte_at(_mm,addr,ptep,pteval) do { \ if (((_mm) != current->mm && (_mm) != &init_mm) || \ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \ set_pte((ptep), (pteval)); \ } while (0) #define set_pte_at_sync(_mm,addr,ptep,pteval) do { \ if (((_mm) != current->mm && (_mm) != &init_mm) || \ HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \ set_pte((ptep), (pteval)); \ xen_invlpg((addr)); \ } \ } while (0) #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) #define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval)) #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pte_none(x) (!(x).pte_low) static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; if (!pte_none(pte)) { if ((mm != &init_mm) || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) pte = __pte_ma(xchg(&ptep->pte_low, 0)); } return pte; } #define ptep_clear_flush(vma, addr, ptep) \ ({ \ pte_t *__ptep = (ptep); \ pte_t __res = *__ptep; \ if (!pte_none(__res) && \ ((vma)->vm_mm != current->mm || \ HYPERVISOR_update_va_mapping(addr, __pte(0), \ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \ UVMF_INVLPG|UVMF_MULTI))) { \ __ptep->pte_low = 0; \ flush_tlb_page(vma, addr); \ } \ __res; \ }) #define pte_same(a, b) ((a).pte_low == (b).pte_low) #define __pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT) #define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte))) #define pte_pfn(_pte) ((_pte).pte_low & _PAGE_IO ? max_mapnr : \ (_pte).pte_low & _PAGE_PRESENT ? \ mfn_to_local_pfn(__pte_mfn(_pte)) : \ __pte_mfn(_pte)) #define pte_page(_pte) pfn_to_page(pte_pfn(_pte)) #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) /* * All present user pages are user-executable: */ static inline int pte_exec(pte_t pte) { return pte_user(pte); } /* * All present pages are kernel-executable: */ static inline int pte_exec_kernel(pte_t pte) { return 1; } /* * Bits 0, 6 and 7 are taken, split up the 29 bits of offset * into this range: */ #define PTE_FILE_MAX_BITS 29 #define pte_to_pgoff(pte) \ ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 )) #define pgoff_to_pte(off) \ ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x1f) #define __swp_offset(x) ((x).val >> 8) #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) void vmalloc_sync_all(void); #endif /* _I386_PGTABLE_2LEVEL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h000066400000000000000000000007331314037446600307510ustar00rootroot00000000000000#ifndef _I386_PGTABLE_3LEVEL_DEFS_H #define _I386_PGTABLE_3LEVEL_DEFS_H #define HAVE_SHARED_KERNEL_PMD 0 /* * PGDIR_SHIFT determines what a top-level page table entry can map */ #define PGDIR_SHIFT 30 #define PTRS_PER_PGD 4 /* * PMD_SHIFT determines the size of the area a middle-level * page table can map */ #define PMD_SHIFT 21 #define PTRS_PER_PMD 512 /* * entries per page directory level */ #define PTRS_PER_PTE 512 #endif /* _I386_PGTABLE_3LEVEL_DEFS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/pgtable-3level.h000066400000000000000000000137631314037446600300410ustar00rootroot00000000000000#ifndef _I386_PGTABLE_3LEVEL_H #define _I386_PGTABLE_3LEVEL_H #include /* * Intel Physical Address Extension (PAE) Mode - three-level page * tables on PPro+ CPUs. * * Copyright (C) 1999 Ingo Molnar */ #define pte_ERROR(e) \ printk("%s:%d: bad pte %p(%016Lx pfn %08lx).\n", __FILE__, __LINE__, \ &(e), __pte_val(e), pte_pfn(e)) #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \ &(e), __pmd_val(e), (pmd_val(e) & PTE_MASK) >> PAGE_SHIFT) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \ &(e), __pgd_val(e), (pgd_val(e) & PTE_MASK) >> PAGE_SHIFT) #define pud_none(pud) 0 #define pud_bad(pud) 0 #define pud_present(pud) 1 /* * Is the pte executable? */ static inline int pte_x(pte_t pte) { return !(__pte_val(pte) & _PAGE_NX); } /* * All present user-pages with !NX bit are user-executable: */ static inline int pte_exec(pte_t pte) { return pte_user(pte) && pte_x(pte); } /* * All present pages with !NX bit are kernel-executable: */ static inline int pte_exec_kernel(pte_t pte) { return pte_x(pte); } /* Rules for using set_pte: the pte being assigned *must* be * either not present or in a state where the hardware will * not attempt to update the pte. In places where this is * not possible, use pte_get_and_clear to obtain the old pte * value and then use set_pte to update it. -ben */ #define __HAVE_ARCH_SET_PTE_ATOMIC static inline void set_pte(pte_t *ptep, pte_t pte) { ptep->pte_high = pte.pte_high; smp_wmb(); ptep->pte_low = pte.pte_low; } #define set_pte_atomic(pteptr,pteval) \ set_64bit((unsigned long long *)(pteptr),__pte_val(pteval)) #define set_pte_at(_mm,addr,ptep,pteval) do { \ if (((_mm) != current->mm && (_mm) != &init_mm) || \ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \ set_pte((ptep), (pteval)); \ } while (0) #define set_pte_at_sync(_mm,addr,ptep,pteval) do { \ if (((_mm) != current->mm && (_mm) != &init_mm) || \ HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \ set_pte((ptep), (pteval)); \ xen_invlpg((addr)); \ } \ } while (0) #define set_pmd(pmdptr,pmdval) \ xen_l2_entry_update((pmdptr), (pmdval)) #define set_pud(pudptr,pudval) \ xen_l3_entry_update((pudptr), (pudval)) /* * Pentium-II erratum A13: in PAE mode we explicitly have to flush * the TLB via cr3 if the top-level pgd is changed... * We do not let the generic code free and clear pgd entries due to * this erratum. */ static inline void pud_clear (pud_t * pud) { } #define pud_page(pud) \ ((struct page *) __va(pud_val(pud) & PAGE_MASK)) #define pud_page_kernel(pud) \ ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) /* Find an entry in the second-level page table.. */ #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ pmd_index(address)) static inline int pte_none(pte_t pte) { return !(pte.pte_low | pte.pte_high); } /* * For PTEs and PDEs, we must clear the P-bit first when clearing a page table * entry, so clear the bottom half first and enforce ordering with a compiler * barrier. */ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { if ((mm != current->mm && mm != &init_mm) || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) { ptep->pte_low = 0; smp_wmb(); ptep->pte_high = 0; } } #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; if (!pte_none(pte)) { if ((mm != &init_mm) || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) { uint64_t val = __pte_val(pte); if (__cmpxchg64(ptep, val, 0) != val) { /* xchg acts as a barrier before the setting of the high bits */ pte.pte_low = xchg(&ptep->pte_low, 0); pte.pte_high = ptep->pte_high; ptep->pte_high = 0; } } } return pte; } #define ptep_clear_flush(vma, addr, ptep) \ ({ \ pte_t *__ptep = (ptep); \ pte_t __res = *__ptep; \ if (!pte_none(__res) && \ ((vma)->vm_mm != current->mm || \ HYPERVISOR_update_va_mapping(addr, __pte(0), \ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \ UVMF_INVLPG|UVMF_MULTI))) { \ __ptep->pte_low = 0; \ smp_wmb(); \ __ptep->pte_high = 0; \ flush_tlb_page(vma, addr); \ } \ __res; \ }) static inline int pte_same(pte_t a, pte_t b) { return a.pte_low == b.pte_low && a.pte_high == b.pte_high; } #define pte_page(x) pfn_to_page(pte_pfn(x)) #define __pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \ ((_pte).pte_high << (32-PAGE_SHIFT))) #define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte))) #define pte_pfn(_pte) ((_pte).pte_low & _PAGE_IO ? max_mapnr : \ (_pte).pte_low & _PAGE_PRESENT ? \ mfn_to_local_pfn(__pte_mfn(_pte)) : \ __pte_mfn(_pte)) extern unsigned long long __supported_pte_mask; static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { return __pte((((unsigned long long)page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & __supported_pte_mask); } static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) { return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & __supported_pte_mask); } /* * Bits 0, 6 and 7 are taken in the low part of the pte, * put the 32 bits of offset into the high part. */ #define pte_to_pgoff(pte) ((pte).pte_high) #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) #define PTE_FILE_MAX_BITS 32 /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val) & 0x1f) #define __swp_offset(x) ((x).val >> 5) #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) #define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) #define __pmd_free_tlb(tlb, x) do { } while (0) void vmalloc_sync_all(void); #endif /* _I386_PGTABLE_3LEVEL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/pgtable.h000066400000000000000000000444051314037446600266460ustar00rootroot00000000000000#ifndef _I386_PGTABLE_H #define _I386_PGTABLE_H #include /* * The Linux memory management assumes a three-level page table setup. On * the i386, we use that, but "fold" the mid level into the top-level page * table, so that we physically have the same two-level page table as the * i386 mmu expects. * * This file contains the functions and defines necessary to modify and use * the i386 page table tree. */ #ifndef __ASSEMBLY__ #include #include #include #ifndef _I386_BITOPS_H #include #endif #include #include #include /* Is this pagetable pinned? */ #define PG_pinned PG_arch_1 struct mm_struct; struct vm_area_struct; /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) extern unsigned long empty_zero_page[1024]; extern pgd_t *swapper_pg_dir; extern kmem_cache_t *pgd_cache; extern kmem_cache_t *pmd_cache; extern spinlock_t pgd_lock; extern struct page *pgd_list; void pmd_ctor(void *, kmem_cache_t *, unsigned long); void pgd_ctor(void *, kmem_cache_t *, unsigned long); void pgd_dtor(void *, kmem_cache_t *, unsigned long); void pgtable_cache_init(void); void paging_init(void); /* * The Linux x86 paging architecture is 'compile-time dual-mode', it * implements both the traditional 2-level x86 page tables and the * newer 3-level PAE-mode page tables. */ #ifdef CONFIG_X86_PAE # include # define PMD_SIZE (1UL << PMD_SHIFT) # define PMD_MASK (~(PMD_SIZE-1)) #else # include #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) #define TWOLEVEL_PGDIR_SHIFT 22 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS) /* Just any arbitrary offset to the start of the vmalloc VM area: the * current 8MB value just means that there will be a 8MB "hole" after the * physical memory until the kernel virtual memory starts. That means that * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \ 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) #ifdef CONFIG_HIGHMEM # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) #else # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) #endif /* * _PAGE_PSE set in the page directory entry just means that * the page directory entry points directly to a 4MB-aligned block of * memory. */ #define _PAGE_BIT_PRESENT 0 #define _PAGE_BIT_RW 1 #define _PAGE_BIT_USER 2 #define _PAGE_BIT_PWT 3 #define _PAGE_BIT_PCD 4 #define _PAGE_BIT_ACCESSED 5 #define _PAGE_BIT_DIRTY 6 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ /*#define _PAGE_BIT_UNUSED1 9*/ /* available for programmer */ #define _PAGE_BIT_UNUSED2 10 #define _PAGE_BIT_UNUSED3 11 #define _PAGE_BIT_NX 63 #define _PAGE_PRESENT 0x001 #define _PAGE_RW 0x002 #define _PAGE_USER 0x004 #define _PAGE_PWT 0x008 #define _PAGE_PCD 0x010 #define _PAGE_ACCESSED 0x020 #define _PAGE_DIRTY 0x040 #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ /*#define _PAGE_UNUSED1 0x200*/ /* available for programmer */ #define _PAGE_UNUSED2 0x400 #define _PAGE_UNUSED3 0x800 /* If _PAGE_PRESENT is clear, we use these: */ #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ #define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE; pte_present gives true */ #ifdef CONFIG_X86_PAE #define _PAGE_NX (1ULL<<_PAGE_BIT_NX) #else #define _PAGE_NX 0 #endif /* Mapped page is I/O or foreign and has no associated page struct. */ #define _PAGE_IO 0x200 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_IO) #define PAGE_NONE \ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define PAGE_SHARED \ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_SHARED_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_COPY_NOEXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_COPY_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_COPY \ PAGE_COPY_NOEXEC #define PAGE_READONLY \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_READONLY_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define _PAGE_KERNEL \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) #define _PAGE_KERNEL_EXEC \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) /* * The i386 can't do page protection for execute, and considers that * the same are read. Also, write permissions imply read permissions. * This is the closest we can get.. */ #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_COPY #define __P011 PAGE_COPY #define __P100 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_COPY_EXEC #define __P111 PAGE_COPY_EXEC #define __S000 PAGE_NONE #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED #define __S100 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC /* * Define this if things work differently on an i386 and an i486: * it will (on an i486) warn about kernel memory accesses that are * done without a 'access_ok(VERIFY_WRITE,..)' */ #undef TEST_ACCESS_OK /* The boot page tables (all created as a single array) */ extern unsigned long pg0[]; #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)__pmd_val(x)) #if CONFIG_XEN_COMPAT <= 0x030002 /* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t. can temporarily clear it. */ #define pmd_present(x) (__pmd_val(x)) #define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT)) #else #define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT) #define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #endif #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* * The following only work if pte_present() is true. * Undefined behaviour if not.. */ static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; } /* * The following only works if pte_present() is not true. */ static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; } static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; } static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; } static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } #ifdef CONFIG_X86_PAE # include #else # include #endif #define ptep_test_and_clear_dirty(vma, addr, ptep) \ ({ \ pte_t __pte = *(ptep); \ int __ret = pte_dirty(__pte); \ if (__ret) { \ __pte = pte_mkclean(__pte); \ if ((vma)->vm_mm != current->mm || \ HYPERVISOR_update_va_mapping(addr, __pte, 0)) \ (ptep)->pte_low = __pte.pte_low; \ } \ __ret; \ }) #define ptep_test_and_clear_young(vma, addr, ptep) \ ({ \ pte_t __pte = *(ptep); \ int __ret = pte_young(__pte); \ if (__ret) \ __pte = pte_mkold(__pte); \ if ((vma)->vm_mm != current->mm || \ HYPERVISOR_update_va_mapping(addr, __pte, 0)) \ (ptep)->pte_low = __pte.pte_low; \ __ret; \ }) #define ptep_get_and_clear_full(mm, addr, ptep, full) \ ((full) ? ({ \ pte_t __res = *(ptep); \ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) \ xen_l1_entry_update(ptep, __pte(0)); \ else \ *(ptep) = __pte(0); \ __res; \ }) : \ ptep_get_and_clear(mm, addr, ptep)) static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; if (pte_write(pte)) set_pte_at(mm, addr, ptep, pte_wrprotect(pte)); } /* * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * * dst - pointer to pgd range anwhere on a pgd page * src - "" * count - the number of pgds to copy. * * dst and src can be on the same page, but the range must not overlap, * and must not cross a page boundary. */ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) { memcpy(dst, src, count * sizeof(pgd_t)); } /* * Macro to mark a page protection value as "uncacheable". On processors which do not support * it, this is a no-op. */ #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { /* * Since this might change the present bit (which controls whether * a pte_t object has undergone p2m translation), we must use * pte_val() on the input pte and __pte() for the return value. */ paddr_t pteval = pte_val(pte); pteval &= _PAGE_CHG_MASK; pteval |= pgprot_val(newprot); #ifdef CONFIG_X86_PAE pteval &= __supported_pte_mask; #endif return __pte(pteval); } #define pmd_large(pmd) \ ((__pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) /* * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] * * this macro returns the index of the entry in the pgd page which would * control the given virtual address */ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pgd_index_k(addr) pgd_index(addr) /* * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */ #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) /* * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] * * this macro returns the index of the entry in the pmd page which would * control the given virtual address */ #define pmd_index(address) \ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) /* * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] * * this macro returns the index of the entry in the pte page which would * control the given virtual address */ #define pte_index(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) \ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) #define pmd_page_kernel(pmd) \ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) /* * Helper function that returns the kernel pagetable entry controlling * the virtual address 'address'. NULL means no pagetable entry present. * NOTE: the return type is pte_t but if the pmd is PSE then we return it * as a pte too. */ extern pte_t *lookup_address(unsigned long address); /* * Make a given kernel text page executable/non-executable. * Returns the previous executability setting of that page (which * is used to restore the previous state). Used by the SMP bootup code. * NOTE: this is an __init function for security reasons. */ #ifdef CONFIG_X86_PAE extern int set_kernel_exec(unsigned long vaddr, int enable); #else static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} #endif extern void noexec_setup(const char *str); #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \ pte_index(address)) #define pte_offset_map_nested(dir, address) \ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \ pte_index(address)) #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) #else #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) #endif #define __HAVE_ARCH_PTEP_ESTABLISH #define ptep_establish(vma, address, ptep, pteval) \ do { \ if ( likely((vma)->vm_mm == current->mm) ) { \ BUG_ON(HYPERVISOR_update_va_mapping(address, \ pteval, \ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \ UVMF_INVLPG|UVMF_MULTI)); \ } else { \ xen_l1_entry_update(ptep, pteval); \ flush_tlb_page(vma, address); \ } \ } while (0) /* * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. * * Also, we only update the dirty/accessed state if we set * the dirty bit by hand in the kernel, since the hardware * will do the accessed bit for us, and we don't want to * race with other CPU's that might be updating the dirty * bit at the same time. */ #define update_mmu_cache(vma,address,pte) do { } while (0) #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ do { \ if (dirty) \ ptep_establish(vma, address, ptep, entry); \ } while (0) #include void make_lowmem_page_readonly(void *va, unsigned int feature); void make_lowmem_page_writable(void *va, unsigned int feature); void make_page_readonly(void *va, unsigned int feature); void make_page_writable(void *va, unsigned int feature); void make_pages_readonly(void *va, unsigned int nr, unsigned int feature); void make_pages_writable(void *va, unsigned int nr, unsigned int feature); #define virt_to_ptep(va) \ ({ \ pte_t *__ptep = lookup_address((unsigned long)(va)); \ BUG_ON(!__ptep || !pte_present(*__ptep)); \ __ptep; \ }) #define arbitrary_virt_to_machine(va) \ (((maddr_t)pte_mfn(*virt_to_ptep(va)) << PAGE_SHIFT) \ | ((unsigned long)(va) & (PAGE_SIZE - 1))) #ifdef CONFIG_HIGHPTE #include struct page *kmap_atomic_to_page(void *); #define ptep_to_machine(ptep) \ ({ \ pte_t *__ptep = (ptep); \ page_to_phys(kmap_atomic_to_page(__ptep)) \ | ((unsigned long)__ptep & (PAGE_SIZE - 1)); \ }) #else #define ptep_to_machine(ptep) virt_to_machine(ptep) #endif #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_FLATMEM #define kern_addr_valid(addr) (1) #endif /* CONFIG_FLATMEM */ int direct_remap_pfn_range(struct vm_area_struct *vma, unsigned long address, unsigned long mfn, unsigned long size, pgprot_t prot, domid_t domid); int direct_kernel_remap_pfn_range(unsigned long address, unsigned long mfn, unsigned long size, pgprot_t prot, domid_t domid); int create_lookup_pte_addr(struct mm_struct *mm, unsigned long address, uint64_t *ptep); int touch_pte_range(struct mm_struct *mm, unsigned long address, unsigned long size); int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot); #define arch_change_pte_range(mm, pmd, addr, end, newprot) \ xen_change_pte_range(mm, pmd, addr, end, newprot) #define io_remap_pfn_range(vma,from,pfn,size,prot) \ direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO) #define MK_IOSPACE_PFN(space, pfn) (pfn) #define GET_IOSPACE(pfn) 0 #define GET_PFN(pfn) (pfn) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL #define __HAVE_ARCH_PTEP_CLEAR_FLUSH #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME #include #endif /* _I386_PGTABLE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/processor.h000066400000000000000000000471431314037446600272510ustar00rootroot00000000000000/* * include/asm-i386/processor.h * * Copyright (C) 1994 Linus Torvalds */ #ifndef __ASM_I386_PROCESSOR_H #define __ASM_I386_PROCESSOR_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* flag for disabling the tsc */ extern int tsc_disable; struct desc_struct { unsigned long a,b; }; #define desc_empty(desc) \ (!((desc)->a | (desc)->b)) #define desc_equal(desc1, desc2) \ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) /* * Default implementation of macro that returns current * instruction pointer ("program counter"). */ #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) /* * CPU type and hardware bug flags. Kept separately for each CPU. * Members of this structure are referenced in head.S, so think twice * before touching them. [mj] */ struct cpuinfo_x86 { __u8 x86; /* CPU family */ __u8 x86_vendor; /* CPU vendor */ __u8 x86_model; __u8 x86_mask; char wp_works_ok; /* It doesn't on 386's */ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ char hard_math; char rfu; int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ unsigned long x86_capability[NCAPINTS]; char x86_vendor_id[16]; char x86_model_id[64]; int x86_cache_size; /* in KB - valid for CPUS which support this call */ int x86_cache_alignment; /* In bytes */ char fdiv_bug; char f00f_bug; char coma_bug; char pad0; int x86_power; unsigned long loops_per_jiffy; #ifdef CONFIG_SMP cpumask_t llc_shared_map; /* cpus sharing the last level cache */ #endif unsigned char x86_max_cores; /* cpuid returned max cores value */ unsigned char apicid; #ifdef CONFIG_SMP unsigned char booted_cores; /* number of cores as seen by OS */ __u8 phys_proc_id; /* Physical processor id. */ __u8 cpu_core_id; /* Core id */ #endif } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 #define X86_VENDOR_CYRIX 1 #define X86_VENDOR_AMD 2 #define X86_VENDOR_UMC 3 #define X86_VENDOR_NEXGEN 4 #define X86_VENDOR_CENTAUR 5 #define X86_VENDOR_RISE 6 #define X86_VENDOR_TRANSMETA 7 #define X86_VENDOR_NSC 8 #define X86_VENDOR_NUM 9 #define X86_VENDOR_UNKNOWN 0xff /* * capabilities of CPUs */ extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; #ifndef CONFIG_X86_NO_TSS extern struct tss_struct doublefault_tss; DECLARE_PER_CPU(struct tss_struct, init_tss); #endif #ifdef CONFIG_SMP extern struct cpuinfo_x86 cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] #else #define cpu_data (&boot_cpu_data) #define current_cpu_data boot_cpu_data #endif extern int cpu_llc_id[NR_CPUS]; extern char ignore_fpu_irq; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); extern unsigned short num_cache_leaves; #ifdef CONFIG_X86_HT extern void detect_ht(struct cpuinfo_x86 *c); #else static inline void detect_ht(struct cpuinfo_x86 *c) {} #endif /* * EFLAGS bits */ #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ #define X86_EFLAGS_NT 0x00004000 /* Nested Task */ #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ /* * Generic CPUID function * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx * resulting in stale register contents being returned. */ static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { __asm__(XEN_CPUID : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "c"(0)); } /* Some CPUID calls want 'count' to be placed in ecx */ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, int *edx) { __asm__(XEN_CPUID : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "c" (count)); } /* * CPUID functions returning a single datum */ static inline unsigned int cpuid_eax(unsigned int op) { unsigned int eax; __asm__(XEN_CPUID : "=a" (eax) : "0" (op) : "bx", "cx", "dx"); return eax; } static inline unsigned int cpuid_ebx(unsigned int op) { unsigned int eax, ebx; __asm__(XEN_CPUID : "=a" (eax), "=b" (ebx) : "0" (op) : "cx", "dx" ); return ebx; } static inline unsigned int cpuid_ecx(unsigned int op) { unsigned int eax, ecx; __asm__(XEN_CPUID : "=a" (eax), "=c" (ecx) : "0" (op) : "bx", "dx" ); return ecx; } static inline unsigned int cpuid_edx(unsigned int op) { unsigned int eax, edx; __asm__(XEN_CPUID : "=a" (eax), "=d" (edx) : "0" (op) : "bx", "cx"); return edx; } #define load_cr3(pgdir) write_cr3(__pa(pgdir)) /* * Intel CPU features in CR4 */ #define X86_CR4_VME 0x0001 /* enable vm86 extensions */ #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */ #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */ #define X86_CR4_DE 0x0008 /* enable debugging extensions */ #define X86_CR4_PSE 0x0010 /* enable page size extensions */ #define X86_CR4_PAE 0x0020 /* enable physical address extensions */ #define X86_CR4_MCE 0x0040 /* Machine check enable */ #define X86_CR4_PGE 0x0080 /* enable global pages */ #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */ #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ /* * Save the cr4 feature set we're using (ie * Pentium 4MB enable and PPro Global page * enable), so that any CPU's that boot up * after us can get the correct flags. */ extern unsigned long mmu_cr4_features; static inline void set_in_cr4 (unsigned long mask) { unsigned cr4; mmu_cr4_features |= mask; cr4 = read_cr4(); cr4 |= mask; write_cr4(cr4); } static inline void clear_in_cr4 (unsigned long mask) { unsigned cr4; mmu_cr4_features &= ~mask; cr4 = read_cr4(); cr4 &= ~mask; write_cr4(cr4); } /* * NSC/Cyrix CPU configuration register indexes */ #define CX86_PCR0 0x20 #define CX86_GCR 0xb8 #define CX86_CCR0 0xc0 #define CX86_CCR1 0xc1 #define CX86_CCR2 0xc2 #define CX86_CCR3 0xc3 #define CX86_CCR4 0xe8 #define CX86_CCR5 0xe9 #define CX86_CCR6 0xea #define CX86_CCR7 0xeb #define CX86_PCR1 0xf0 #define CX86_DIR0 0xfe #define CX86_DIR1 0xff #define CX86_ARR_BASE 0xc4 #define CX86_RCR_BASE 0xdc /* * NSC/Cyrix CPU indexed register access macros */ #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); }) #define setCx86(reg, data) do { \ outb((reg), 0x22); \ outb((data), 0x23); \ } while (0) /* Stop speculative execution */ static inline void sync_core(void) { int tmp; asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); } static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { /* "monitor %eax,%ecx,%edx;" */ asm volatile( ".byte 0x0f,0x01,0xc8;" : :"a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax,%ecx;" */ asm volatile( ".byte 0x0f,0x01,0xc9;" : :"a" (eax), "c" (ecx)); } /* from system description table in BIOS. Mostly for MCA use, but others may find it useful. */ extern unsigned int machine_id; extern unsigned int machine_submodel_id; extern unsigned int BIOS_revision; extern unsigned int mca_pentium_flag; /* Boot loader type from the setup header */ extern int bootloader_type; /* * User space process size: 3GB (default). */ #define TASK_SIZE (PAGE_OFFSET) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) #define HAVE_ARCH_PICK_MMAP_LAYOUT /* * Size of io_bitmap. */ #define IO_BITMAP_BITS 65536 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) #ifndef CONFIG_X86_NO_TSS #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) #endif #define INVALID_IO_BITMAP_OFFSET 0x8000 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 struct i387_fsave_struct { long cwd; long swd; long twd; long fip; long fcs; long foo; long fos; long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ long status; /* software status information */ }; struct i387_fxsave_struct { unsigned short cwd; unsigned short swd; unsigned short twd; unsigned short fop; long fip; long fcs; long foo; long fos; long mxcsr; long mxcsr_mask; long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ long padding[56]; } __attribute__ ((aligned (16))); struct i387_soft_struct { long cwd; long swd; long twd; long fip; long fcs; long foo; long fos; long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ unsigned char ftop, changed, lookahead, no_update, rm, alimit; struct info *info; unsigned long entry_eip; }; union i387_union { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; }; typedef struct { unsigned long seg; } mm_segment_t; struct thread_struct; #ifndef CONFIG_X86_NO_TSS struct tss_struct { unsigned short back_link,__blh; unsigned long esp0; unsigned short ss0,__ss0h; unsigned long esp1; unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */ unsigned long esp2; unsigned short ss2,__ss2h; unsigned long __cr3; unsigned long eip; unsigned long eflags; unsigned long eax,ecx,edx,ebx; unsigned long esp; unsigned long ebp; unsigned long esi; unsigned long edi; unsigned short es, __esh; unsigned short cs, __csh; unsigned short ss, __ssh; unsigned short ds, __dsh; unsigned short fs, __fsh; unsigned short gs, __gsh; unsigned short ldt, __ldth; unsigned short trace, io_bitmap_base; /* * The extra 1 is there because the CPU will access an * additional byte beyond the end of the IO permission * bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; /* * Cache the current maximum and the last task that used the bitmap: */ unsigned long io_bitmap_max; struct thread_struct *io_bitmap_owner; /* * pads the TSS to be cacheline-aligned (size is 0x100) */ unsigned long __cacheline_filler[35]; /* * .. and then another 0x100 bytes for emergency kernel stack */ unsigned long stack[64]; } __attribute__((packed)); #endif #define ARCH_MIN_TASKALIGN 16 struct thread_struct { /* cached TLS descriptors. */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; unsigned long esp0; unsigned long sysenter_cs; unsigned long eip; unsigned long esp; unsigned long fs; unsigned long gs; /* Hardware debugging registers */ unsigned long debugreg[8]; /* %%db0-7 debug registers */ /* fault info */ unsigned long cr2, trap_no, error_code; /* floating point info */ union i387_union i387; /* virtual 86 mode info */ struct vm86_struct __user * vm86_info; unsigned long screen_bitmap; unsigned long v86flags, v86mask, saved_esp0; unsigned int saved_fs, saved_gs; /* IO permissions */ unsigned long *io_bitmap_ptr; unsigned long iopl; /* max allowed port in the bitmap, in bytes: */ unsigned long io_bitmap_max; }; #define INIT_THREAD { \ .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ } #ifndef CONFIG_X86_NO_TSS /* * Note that the .io_bitmap member must be extra-big. This is because * the CPU will access an additional byte beyond the end of the IO * permission bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ #define INIT_TSS { \ .esp0 = sizeof(init_stack) + (long)&init_stack, \ .ss0 = __KERNEL_DS, \ .ss1 = __KERNEL_CS, \ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ } static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread) { tss->esp0 = thread->esp0; /* This can only happen when SEP is enabled, no need to test "SEP"arately */ if (unlikely(tss->ss1 != thread->sysenter_cs)) { tss->ss1 = thread->sysenter_cs; wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); } } #define load_esp0(tss, thread) \ __load_esp0(tss, thread) #else #define load_esp0(tss, thread) do { \ if (HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)) \ BUG(); \ } while (0) #endif #define start_thread(regs, new_eip, new_esp) do { \ __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \ set_fs(USER_DS); \ regs->xds = __USER_DS; \ regs->xes = __USER_DS; \ regs->xss = __USER_DS; \ regs->xcs = __USER_CS; \ regs->eip = new_eip; \ regs->esp = new_esp; \ } while (0) /* * These special macros can be used to get or set a debugging register */ #define get_debugreg(var, register) \ (var) = HYPERVISOR_get_debugreg((register)) #define set_debugreg(value, register) \ WARN_ON(HYPERVISOR_set_debugreg((register), (value))) /* * Set IOPL bits in EFLAGS from given mask */ static inline void set_iopl_mask(unsigned mask) { struct physdev_set_iopl set_iopl; /* Force the change at ring 0. */ set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3; WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl)); } /* Forward declaration, a strange C thing */ struct task_struct; struct mm_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); /* Prepare to copy thread state - unlazy all lazy status */ extern void prepare_to_copy(struct task_struct *tsk); /* * create a kernel thread without removing it from tasklists */ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern unsigned long thread_saved_pc(struct task_struct *tsk); void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack); unsigned long get_wchan(struct task_struct *p); #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) #define KSTK_TOP(info) \ ({ \ unsigned long *__ptr = (unsigned long *)(info); \ (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ }) /* * The below -8 is to reserve 8 bytes on top of the ring0 stack. * This is necessary to guarantee that the entire "struct pt_regs" * is accessable even if the CPU haven't stored the SS/ESP registers * on the stack (interrupt gate does not save these registers * when switching to the same priv ring). * Therefore beware: accessing the xss/esp fields of the * "struct pt_regs" is possible, but they may contain the * completely wrong values. */ #define task_pt_regs(task) \ ({ \ struct pt_regs *__regs__; \ __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ __regs__ - 1; \ }) #define KSTK_EIP(task) (task_pt_regs(task)->eip) #define KSTK_ESP(task) (task_pt_regs(task)->esp) struct microcode_header { unsigned int hdrver; unsigned int rev; unsigned int date; unsigned int sig; unsigned int cksum; unsigned int ldrver; unsigned int pf; unsigned int datasize; unsigned int totalsize; unsigned int reserved[3]; }; struct microcode { struct microcode_header hdr; unsigned int bits[0]; }; typedef struct microcode microcode_t; typedef struct microcode_header microcode_header_t; /* microcode format is extended from prescott processors */ struct extended_signature { unsigned int sig; unsigned int pf; unsigned int cksum; }; struct extended_sigtable { unsigned int count; unsigned int cksum; unsigned int reserved[3]; struct extended_signature sigs[0]; }; /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) { __asm__ __volatile__("rep;nop": : :"memory"); } #define cpu_relax() rep_nop() /* generic versions from gas */ #define GENERIC_NOP1 ".byte 0x90\n" #define GENERIC_NOP2 ".byte 0x89,0xf6\n" #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 /* Opteron nops */ #define K8_NOP1 GENERIC_NOP1 #define K8_NOP2 ".byte 0x66,0x90\n" #define K8_NOP3 ".byte 0x66,0x66,0x90\n" #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" #define K8_NOP5 K8_NOP3 K8_NOP2 #define K8_NOP6 K8_NOP3 K8_NOP3 #define K8_NOP7 K8_NOP4 K8_NOP3 #define K8_NOP8 K8_NOP4 K8_NOP4 /* K7 nops */ /* uses eax dependencies (arbitary choice) */ #define K7_NOP1 GENERIC_NOP1 #define K7_NOP2 ".byte 0x8b,0xc0\n" #define K7_NOP3 ".byte 0x8d,0x04,0x20\n" #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" #define K7_NOP5 K7_NOP4 ASM_NOP1 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" #define K7_NOP8 K7_NOP7 ASM_NOP1 #ifdef CONFIG_MK8 #define ASM_NOP1 K8_NOP1 #define ASM_NOP2 K8_NOP2 #define ASM_NOP3 K8_NOP3 #define ASM_NOP4 K8_NOP4 #define ASM_NOP5 K8_NOP5 #define ASM_NOP6 K8_NOP6 #define ASM_NOP7 K8_NOP7 #define ASM_NOP8 K8_NOP8 #elif defined(CONFIG_MK7) #define ASM_NOP1 K7_NOP1 #define ASM_NOP2 K7_NOP2 #define ASM_NOP3 K7_NOP3 #define ASM_NOP4 K7_NOP4 #define ASM_NOP5 K7_NOP5 #define ASM_NOP6 K7_NOP6 #define ASM_NOP7 K7_NOP7 #define ASM_NOP8 K7_NOP8 #else #define ASM_NOP1 GENERIC_NOP1 #define ASM_NOP2 GENERIC_NOP2 #define ASM_NOP3 GENERIC_NOP3 #define ASM_NOP4 GENERIC_NOP4 #define ASM_NOP5 GENERIC_NOP5 #define ASM_NOP6 GENERIC_NOP6 #define ASM_NOP7 GENERIC_NOP7 #define ASM_NOP8 GENERIC_NOP8 #endif #define ASM_NOP_MAX 8 /* Prefetch instructions for Pentium III and AMD Athlon */ /* It's not worth to care about 3dnow! prefetches for the K6 because they are microcoded there and very slow. However we don't do prefetches for pre XP Athlons currently That should be fixed. */ #define ARCH_HAS_PREFETCH static inline void prefetch(const void *x) { alternative_input(ASM_NOP4, "prefetchnta (%1)", X86_FEATURE_XMM, "r" (x)); } #define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCHW #define ARCH_HAS_SPINLOCK_PREFETCH /* 3dnow! prefetch to get an exclusive cache line. Useful for spinlocks to avoid one state transition in the cache coherency protocol. */ static inline void prefetchw(const void *x) { alternative_input(ASM_NOP4, "prefetchw (%1)", X86_FEATURE_3DNOW, "r" (x)); } #define spin_lock_prefetch(x) prefetchw(x) extern void select_idle_routine(const struct cpuinfo_x86 *c); #define cache_line_size() (boot_cpu_data.x86_cache_alignment) extern unsigned long boot_option_idle_override; extern void enable_sep_cpu(void); extern int sysenter_setup(void); #endif /* __ASM_I386_PROCESSOR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/ptrace.h000066400000000000000000000040431314037446600265000ustar00rootroot00000000000000#ifndef _I386_PTRACE_H #define _I386_PTRACE_H #define EBX 0 #define ECX 1 #define EDX 2 #define ESI 3 #define EDI 4 #define EBP 5 #define EAX 6 #define DS 7 #define ES 8 #define FS 9 #define GS 10 #define ORIG_EAX 11 #define EIP 12 #define CS 13 #define EFL 14 #define UESP 15 #define SS 16 #define FRAME_SIZE 17 /* this struct defines the way the registers are stored on the stack during a system call. */ struct pt_regs { long ebx; long ecx; long edx; long esi; long edi; long ebp; long eax; int xds; int xes; long orig_eax; long eip; int xcs; long eflags; long esp; int xss; }; /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ #define PTRACE_GETREGS 12 #define PTRACE_SETREGS 13 #define PTRACE_GETFPREGS 14 #define PTRACE_SETFPREGS 15 #define PTRACE_GETFPXREGS 18 #define PTRACE_SETFPXREGS 19 #define PTRACE_OLDSETOPTIONS 21 #define PTRACE_GET_THREAD_AREA 25 #define PTRACE_SET_THREAD_AREA 26 #define PTRACE_SYSEMU 31 #define PTRACE_SYSEMU_SINGLESTEP 32 #ifdef __KERNEL__ #include struct task_struct; extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); /* * user_mode_vm(regs) determines whether a register set came from user mode. * This is true if V8086 mode was enabled OR if the register set was from * protected mode with RPL-3 CS value. This tricky test checks that with * one comparison. Many places in the kernel can bypass this full check * if they have already ruled out V8086 mode, so user_mode(regs) can be used. */ static inline int user_mode(struct pt_regs *regs) { return (regs->xcs & 2) != 0; } static inline int user_mode_vm(struct pt_regs *regs) { return ((regs->xcs & 2) | (regs->eflags & VM_MASK)) != 0; } #define instruction_pointer(regs) ((regs)->eip) #if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER) extern unsigned long profile_pc(struct pt_regs *regs); #else #define profile_pc(regs) instruction_pointer(regs) #endif #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/scatterlist.h000066400000000000000000000011301314037446600275550ustar00rootroot00000000000000#ifndef _I386_SCATTERLIST_H #define _I386_SCATTERLIST_H struct scatterlist { struct page *page; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; }; /* These macros should be used after a pci_map_sg call has been done * to get bus addresses of each of the SG entries and their lengths. * You should only work with the number of sg entries pci_map_sg * returns. */ #define sg_dma_address(sg) ((sg)->dma_address) #define sg_dma_len(sg) ((sg)->dma_length) #define ISA_DMA_THRESHOLD (0x00ffffff) #endif /* !(_I386_SCATTERLIST_H) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/segment.h000066400000000000000000000065351314037446600266740ustar00rootroot00000000000000#ifndef _ASM_SEGMENT_H #define _ASM_SEGMENT_H /* * The layout of the per-CPU GDT under Linux: * * 0 - null * 1 - reserved * 2 - reserved * 3 - reserved * * 4 - unused <==== new cacheline * 5 - unused * * ------- start of TLS (Thread-Local Storage) segments: * * 6 - TLS segment #1 [ glibc's TLS segment ] * 7 - TLS segment #2 [ Wine's %fs Win32 segment ] * 8 - TLS segment #3 * 9 - reserved * 10 - reserved * 11 - reserved * * ------- start of kernel segments: * * 12 - kernel code segment <==== new cacheline * 13 - kernel data segment * 14 - default user CS * 15 - default user DS * 16 - TSS * 17 - LDT * 18 - PNPBIOS support (16->32 gate) * 19 - PNPBIOS support * 20 - PNPBIOS support * 21 - PNPBIOS support * 22 - PNPBIOS support * 23 - APM BIOS support * 24 - APM BIOS support * 25 - APM BIOS support * * 26 - ESPFIX small SS * 27 - unused * 28 - unused * 29 - unused * 30 - unused * 31 - TSS for double fault handler */ #define GDT_ENTRY_TLS_ENTRIES 3 #define GDT_ENTRY_TLS_MIN 6 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) #define GDT_ENTRY_DEFAULT_USER_CS 14 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3) #define GDT_ENTRY_DEFAULT_USER_DS 15 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3) #define GDT_ENTRY_KERNEL_BASE 12 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) #define GET_KERNEL_CS() (__KERNEL_CS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) ) #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) #define GET_KERNEL_DS() (__KERNEL_DS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) ) #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) #define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) #define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6) #define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11) #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) #define GDT_ENTRY_DOUBLEFAULT_TSS 31 /* * The GDT has 32 entries */ #define GDT_ENTRIES 32 #define GDT_SIZE (GDT_ENTRIES * 8) /* Simple and small GDT entries for booting only */ #define GDT_ENTRY_BOOT_CS 2 #define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) #define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) #define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) /* The PnP BIOS entries in the GDT */ #define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) #define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) #define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) #define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) #define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) /* The PnP BIOS selectors */ #define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ #define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ #define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ #define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ #define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ /* * The interrupt descriptor table has room for 256 idt's, * the global descriptor table is dependent on the number * of tasks we can have.. */ #define IDT_ENTRIES 256 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/setup.h000066400000000000000000000055071314037446600263700ustar00rootroot00000000000000/* * Just a place holder. We don't want to have to test x86 before * we include stuff */ #ifndef _i386_SETUP_H #define _i386_SETUP_H #ifdef __KERNEL__ #include /* * Reserved space for vmalloc and iomap - defined in asm/page.h */ #define MAXMEM_PFN PFN_DOWN(MAXMEM) #define MAX_NONPAE_PFN (1 << 20) #endif #define PARAM_SIZE 4096 #define COMMAND_LINE_SIZE 256 #define OLD_CL_MAGIC_ADDR 0x90020 #define OLD_CL_MAGIC 0xA33F #define OLD_CL_BASE_ADDR 0x90000 #define OLD_CL_OFFSET 0x90022 #define NEW_CL_POINTER 0x228 /* Relative to real mode data */ #ifndef __ASSEMBLY__ /* * This is set up by the setup-routine at boot-time */ extern unsigned char boot_params[PARAM_SIZE]; #define PARAM (boot_params) #define SCREEN_INFO (*(struct screen_info *) (PARAM+0)) #define EXT_MEM_K (*(unsigned short *) (PARAM+2)) #define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0)) #define E820_MAP_NR (*(char*) (PARAM+E820NR)) #define E820_MAP ((struct e820entry *) (PARAM+E820MAP)) #define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40)) #define IST_INFO (*(struct ist_info *) (PARAM+0x60)) #define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80)) #define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0)) #define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4))) #define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8))) #define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc))) #define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0))) #define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4))) #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2)) #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8)) #define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA)) #define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC)) #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF)) #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210)) #define KERNEL_START (*(unsigned long *) (PARAM+0x214)) #define INITRD_START (__pa(xen_start_info->mod_start)) #define INITRD_SIZE (xen_start_info->mod_len) #define EDID_INFO (*(struct edid_info *) (PARAM+0x440)) #define EDD_NR (*(unsigned char *) (PARAM+EDDNR)) #define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF)) #define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF)) #define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF)) /* * Do NOT EVER look at the BIOS memory size location. * It does not work on many machines. */ #define LOWMEMSIZE() (0x9f000) struct e820entry; char * __init machine_specific_memory_setup(void); int __init copy_e820_map(struct e820entry * biosmap, int nr_map); int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); void __init add_memory_region(unsigned long long start, unsigned long long size, int type); #endif /* __ASSEMBLY__ */ #endif /* _i386_SETUP_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/smp.h000066400000000000000000000044631314037446600260270ustar00rootroot00000000000000#ifndef __ASM_SMP_H #define __ASM_SMP_H /* * We need the APIC definitions automatically as part of 'smp.h' */ #ifndef __ASSEMBLY__ #include #include #include #endif #ifdef CONFIG_X86_LOCAL_APIC #ifndef __ASSEMBLY__ #include #include #include #ifdef CONFIG_X86_IO_APIC #include #endif #include #endif #endif #define BAD_APICID 0xFFu #ifdef CONFIG_SMP #ifndef __ASSEMBLY__ /* * Private routines/data */ extern void smp_alloc_memory(void); extern int pic_mode; extern int smp_num_siblings; extern cpumask_t cpu_sibling_map[]; extern cpumask_t cpu_core_map[]; extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); #define MAX_APICID 256 extern u8 x86_cpu_to_apicid[]; #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] #ifdef CONFIG_HOTPLUG_CPU extern void cpu_exit_clear(void); extern void cpu_uninit(void); #endif /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. We map APIC_BASE very early in page_setup(), * so this is correct in the x86 case. */ #define raw_smp_processor_id() (current_thread_info()->cpu) extern cpumask_t cpu_possible_map; #define cpu_callin_map cpu_possible_map /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) { return cpus_weight(cpu_possible_map); } #ifdef CONFIG_X86_LOCAL_APIC #ifdef APIC_DEFINITION extern int hard_smp_processor_id(void); #else #include static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); } #endif static __inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); } #endif extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); extern void prefill_possible_map(void); #endif /* !__ASSEMBLY__ */ #else /* CONFIG_SMP */ #define cpu_physical_id(cpu) boot_cpu_physical_apicid #define NO_PROC_ID 0xFF /* No processor magic marker */ #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/spinlock.h000066400000000000000000000111111314037446600270360ustar00rootroot00000000000000#ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H #include #include #include #include /* * Your basic SMP spinlocks, allowing only a single CPU anywhere * * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. * * We make no fairness assumptions. They have a cost. * * (the type definitions are in asm/spinlock_types.h) */ #define __raw_spin_is_locked(x) \ (*(volatile signed char *)(&(x)->slock) <= 0) #define __raw_spin_lock_string \ "\n1:\t" \ LOCK_PREFIX " ; decb %0\n\t" \ "jns 3f\n" \ "2:\t" \ "rep;nop\n\t" \ "cmpb $0,%0\n\t" \ "jle 2b\n\t" \ "jmp 1b\n" \ "3:\n\t" /* * NOTE: there's an irqs-on section here, which normally would have to be * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use * __raw_spin_lock_string_flags(). */ #define __raw_spin_lock_string_flags \ "\n1:\t" \ LOCK_PREFIX " ; decb %0\n\t" \ "jns 5f\n" \ "2:\t" \ "testl $0x200, %1\n\t" \ "jz 4f\n\t" \ "#sti\n" \ "3:\t" \ "rep;nop\n\t" \ "cmpb $0, %0\n\t" \ "jle 3b\n\t" \ "#cli\n\t" \ "jmp 1b\n" \ "4:\t" \ "rep;nop\n\t" \ "cmpb $0, %0\n\t" \ "jg 1b\n\t" \ "jmp 4b\n" \ "5:\n\t" static inline void __raw_spin_lock(raw_spinlock_t *lock) { asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory"); } /* * It is easier for the lock validator if interrupts are not re-enabled * in the middle of a lock-acquire. This is a performance feature anyway * so we turn it off: */ #ifndef CONFIG_PROVE_LOCKING static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory"); } #endif static inline int __raw_spin_trylock(raw_spinlock_t *lock) { char oldval; __asm__ __volatile__( "xchgb %b0,%1" :"=q" (oldval), "+m" (lock->slock) :"0" (0) : "memory"); return oldval > 0; } /* * __raw_spin_unlock based on writing $1 to the low byte. * This method works. Despite all the confusion. * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) * (PPro errata 66, 92) */ #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) #define __raw_spin_unlock_string \ "movb $1,%0" \ :"+m" (lock->slock) : : "memory" static inline void __raw_spin_unlock(raw_spinlock_t *lock) { __asm__ __volatile__( __raw_spin_unlock_string ); } #else #define __raw_spin_unlock_string \ "xchgb %b0, %1" \ :"=q" (oldval), "+m" (lock->slock) \ :"0" (oldval) : "memory" static inline void __raw_spin_unlock(raw_spinlock_t *lock) { char oldval = 1; __asm__ __volatile__( __raw_spin_unlock_string ); } #endif #define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) /* * Read-write spinlocks, allowing multiple readers * but only one writer. * * NOTE! it is quite common to have readers in interrupts * but no interrupt writers. For those circumstances we * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. * * On x86, we implement read-write locks as a 32-bit counter * with the high bit (sign) being the "contended" bit. * * The inline assembly is non-obvious. Think about it. * * Changed to use the same technique as rw semaphores. See * semaphore.h for details. -ben * * the helpers are in arch/i386/kernel/semaphore.c */ /** * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ #define __raw_read_can_lock(x) ((int)(x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) static inline void __raw_read_lock(raw_rwlock_t *rw) { __build_read_lock(rw, "__read_lock_failed"); } static inline void __raw_write_lock(raw_rwlock_t *rw) { __build_write_lock(rw, "__write_lock_failed"); } static inline int __raw_read_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; atomic_dec(count); if (atomic_read(count) >= 0) return 1; atomic_inc(count); return 0; } static inline int __raw_write_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) return 1; atomic_add(RW_LOCK_BIAS, count); return 0; } static inline void __raw_read_unlock(raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" : "+m" (rw->lock) : : "memory"); } #endif /* __ASM_SPINLOCK_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/swiotlb.h000066400000000000000000000031251314037446600267050ustar00rootroot00000000000000#ifndef _ASM_SWIOTLB_H #define _ASM_SWIOTLB_H 1 /* SWIOTLB interface */ extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir); extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir); extern void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir); extern void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir); extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, int dir); extern void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, int dir); extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction); extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction); extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); #ifdef CONFIG_HIGHMEM extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction); extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction); #endif extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); extern void swiotlb_init(void); #ifdef CONFIG_SWIOTLB extern int swiotlb; #else #define swiotlb 0 #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/synch_bitops.h000066400000000000000000000062311314037446600277270ustar00rootroot00000000000000#ifndef __XEN_SYNCH_BITOPS_H__ #define __XEN_SYNCH_BITOPS_H__ /* * Copyright 1992, Linus Torvalds. * Heavily modified to provide guaranteed strong synchronisation * when communicating with Xen or other guest OSes running on other CPUs. */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define ADDR (*(volatile long *) addr) static __inline__ void synch_set_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btsl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_clear_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btrl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_change_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btcl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btsl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btrl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btcl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } struct __synch_xchg_dummy { unsigned long a[100]; }; #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x)) #define synch_cmpxchg(ptr, old, new) \ ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ (unsigned long)(old), \ (unsigned long)(new), \ sizeof(*(ptr)))) static inline unsigned long __synch_cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__("lock; cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__("lock; cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #ifdef CONFIG_X86_64 case 4: __asm__ __volatile__("lock; cmpxchgl %k1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__("lock; cmpxchgq %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #else case 4: __asm__ __volatile__("lock; cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #endif } return old; } #define synch_test_bit test_bit #define synch_cmpxchg_subword synch_cmpxchg #endif /* __XEN_SYNCH_BITOPS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/system.h000066400000000000000000000323321314037446600265500ustar00rootroot00000000000000#ifndef __ASM_SYSTEM_H #define __ASM_SYSTEM_H #include #include #include #include /* for LOCK_PREFIX */ #include #include #ifdef __KERNEL__ struct task_struct; /* one of the stranger aspects of C forward declarations.. */ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); /* * Saving eflags is important. It switches not only IOPL between tasks, * it also protects other tasks from NT leaking through sysenter etc. */ #define switch_to(prev,next,last) do { \ unsigned long esi,edi; \ asm volatile("pushfl\n\t" /* Save flags */ \ "pushl %%ebp\n\t" \ "movl %%esp,%0\n\t" /* save ESP */ \ "movl %5,%%esp\n\t" /* restore ESP */ \ "movl $1f,%1\n\t" /* save EIP */ \ "pushl %6\n\t" /* restore EIP */ \ "jmp __switch_to\n" \ "1:\t" \ "popl %%ebp\n\t" \ "popfl" \ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ "=a" (last),"=S" (esi),"=D" (edi) \ :"m" (next->thread.esp),"m" (next->thread.eip), \ "2" (prev), "d" (next)); \ } while (0) #define _set_base(addr,base) do { unsigned long __pr; \ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ "rorl $16,%%edx\n\t" \ "movb %%dl,%2\n\t" \ "movb %%dh,%3" \ :"=&d" (__pr) \ :"m" (*((addr)+2)), \ "m" (*((addr)+4)), \ "m" (*((addr)+7)), \ "0" (base) \ ); } while(0) #define _set_limit(addr,limit) do { unsigned long __lr; \ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ "rorl $16,%%edx\n\t" \ "movb %2,%%dh\n\t" \ "andb $0xf0,%%dh\n\t" \ "orb %%dh,%%dl\n\t" \ "movb %%dl,%2" \ :"=&d" (__lr) \ :"m" (*(addr)), \ "m" (*((addr)+6)), \ "0" (limit) \ ); } while(0) #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) /* * Load a segment. Fall back on loading the zero * segment if something goes wrong.. */ #define loadsegment(seg,value) \ asm volatile("\n" \ "1:\t" \ "mov %0,%%" #seg "\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3:\t" \ "pushl $0\n\t" \ "popl %%" #seg "\n\t" \ "jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n\t" \ ".align 4\n\t" \ ".long 1b,3b\n" \ ".previous" \ : :"rm" (value)) /* * Save a segment register away */ #define savesegment(seg, value) \ asm volatile("mov %%" #seg ",%0":"=rm" (value)) #define read_cr0() ({ \ unsigned int __dummy; \ __asm__ __volatile__( \ "movl %%cr0,%0\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) #define write_cr0(x) \ __asm__ __volatile__("movl %0,%%cr0": :"r" (x)) #define read_cr2() (current_vcpu_info()->arch.cr2) #define write_cr2(x) \ __asm__ __volatile__("movl %0,%%cr2": :"r" (x)) #define read_cr3() ({ \ unsigned int __dummy; \ __asm__ ( \ "movl %%cr3,%0\n\t" \ :"=r" (__dummy)); \ __dummy = xen_cr3_to_pfn(__dummy); \ mfn_to_pfn(__dummy) << PAGE_SHIFT; \ }) #define write_cr3(x) ({ \ unsigned int __dummy = pfn_to_mfn((x) >> PAGE_SHIFT); \ __dummy = xen_pfn_to_cr3(__dummy); \ __asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy)); \ }) #define read_cr4() ({ \ unsigned int __dummy; \ __asm__( \ "movl %%cr4,%0\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) #define read_cr4_safe() ({ \ unsigned int __dummy; \ /* This could fault if %cr4 does not exist */ \ __asm__("1: movl %%cr4, %0 \n" \ "2: \n" \ ".section __ex_table,\"a\" \n" \ ".long 1b,2b \n" \ ".previous \n" \ : "=r" (__dummy): "0" (0)); \ __dummy; \ }) #define write_cr4(x) \ __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) /* * Clear and set 'TS' bit respectively */ #define clts() (HYPERVISOR_fpu_taskswitch(0)) #define stts() (HYPERVISOR_fpu_taskswitch(1)) #endif /* __KERNEL__ */ #define wbinvd() \ __asm__ __volatile__ ("wbinvd": : :"memory") static inline unsigned long get_limit(unsigned long segment) { unsigned long __limit; __asm__("lsll %1,%0" :"=r" (__limit):"r" (segment)); return __limit+1; } #define nop() __asm__ __volatile__ ("nop") #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) #define tas(ptr) (xchg((ptr),1)) struct __xchg_dummy { unsigned long a[100]; }; #define __xg(x) ((struct __xchg_dummy *)(x)) #ifdef CONFIG_X86_CMPXCHG64 /* * The semantics of XCHGCMP8B are a bit strange, this is why * there is a loop and the loading of %%eax and %%edx has to * be inside. This inlines well in most cases, the cached * cost is around ~38 cycles. (in the future we might want * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that * might have an implicit FPU-save as a cost, so it's not * clear which path to go.) * * cmpxchg8b must be used with the lock prefix here to allow * the instruction to be executed atomically, see page 3-102 * of the instruction set reference 24319102.pdf. We need * the reader side to see the coherent 64bit value. */ static inline void __set_64bit (unsigned long long * ptr, unsigned int low, unsigned int high) { __asm__ __volatile__ ( "\n1:\t" "movl (%0), %%eax\n\t" "movl 4(%0), %%edx\n\t" "lock cmpxchg8b (%0)\n\t" "jnz 1b" : /* no outputs */ : "D"(ptr), "b"(low), "c"(high) : "ax","dx","memory"); } static inline void __set_64bit_constant (unsigned long long *ptr, unsigned long long value) { __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); } #define ll_low(x) *(((unsigned int*)&(x))+0) #define ll_high(x) *(((unsigned int*)&(x))+1) static inline void __set_64bit_var (unsigned long long *ptr, unsigned long long value) { __set_64bit(ptr,ll_low(value), ll_high(value)); } #define set_64bit(ptr,value) \ (__builtin_constant_p(value) ? \ __set_64bit_constant(ptr, value) : \ __set_64bit_var(ptr, value) ) #define _set_64bit(ptr,value) \ (__builtin_constant_p(value) ? \ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ __set_64bit(ptr, ll_low(value), ll_high(value)) ) #endif /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note 2: xchg has side effect, so that attribute volatile is necessary, * but generally the primitive is invalid, *ptr is output argument. --ANK */ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { case 1: __asm__ __volatile__("xchgb %b0,%1" :"=q" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 2: __asm__ __volatile__("xchgw %w0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 4: __asm__ __volatile__("xchgl %0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; } return x; } /* * Atomic compare and exchange. Compare OLD with MEM, if identical, * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ #ifdef CONFIG_X86_CMPXCHG #define __HAVE_ARCH_CMPXCHG 1 #define cmpxchg(ptr,o,n)\ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ (unsigned long)(n),sizeof(*(ptr)))) #endif static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 4: __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; } return old; } #ifndef CONFIG_X86_CMPXCHG /* * Building a kernel capable running on 80386. It may be necessary to * simulate the cmpxchg on the 80386 CPU. For that purpose we define * a function for each of the sizes we support. */ extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, unsigned long new, int size) { switch (size) { case 1: return cmpxchg_386_u8(ptr, old, new); case 2: return cmpxchg_386_u16(ptr, old, new); case 4: return cmpxchg_386_u32(ptr, old, new); } return old; } #define cmpxchg(ptr,o,n) \ ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 3)) \ __ret = __cmpxchg((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr))); \ else \ __ret = cmpxchg_386((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr))); \ __ret; \ }) #endif #ifdef CONFIG_X86_CMPXCHG64 static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, unsigned long long new) { unsigned long long prev; __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" : "=A"(prev) : "b"((unsigned long)new), "c"((unsigned long)(new >> 32)), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; } #define cmpxchg64(ptr,o,n)\ ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\ (unsigned long long)(n))) #endif /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking * to devices. * * For now, "wmb()" doesn't actually do anything, as all * Intel CPU's follow what Intel calls a *Processor Order*, * in which all writes are seen in the program order even * outside the CPU. * * I expect future Intel CPU's to have a weaker ordering, * but I'd also expect them to finally get their act together * and add some real memory barriers if so. * * Some non intel clones support out of order store. wmb() ceases to be a * nop for these. */ /* * Actually only lfence would be needed for mb() because all stores done * by the kernel should be already ordered. But keep a full barrier for now. */ #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) /** * read_barrier_depends - Flush all pending reads that subsequents reads * depend on. * * No data-dependent reads from memory-like regions are ever reordered * over this barrier. All reads preceding this primitive are guaranteed * to access memory (but not necessarily other CPUs' caches) before any * reads following this primitive that depend on the data return by * any of the preceding reads. This primitive is much lighter weight than * rmb() on most CPUs, and is never heavier weight than is * rmb(). * * These ordering constraints are respected by both the local CPU * and the compiler. * * Ordering is not guaranteed by anything other than these primitives, * not even by data dependencies. See the documentation for * memory_barrier() for examples and URLs to more information. * * For example, the following code would force ordering (the initial * value of "a" is zero, "b" is one, and "p" is "&a"): * * * CPU 0 CPU 1 * * b = 2; * memory_barrier(); * p = &b; q = p; * read_barrier_depends(); * d = *q; * * * because the read of "*q" depends on the read of "p" and these * two reads are separated by a read_barrier_depends(). However, * the following code, with the same initial values for "a" and "b": * * * CPU 0 CPU 1 * * a = 2; * memory_barrier(); * b = 3; y = b; * read_barrier_depends(); * x = a; * * * does not enforce ordering, since there is no data dependency between * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() * in cases like this where there are no data dependencies. **/ #define read_barrier_depends() do { } while(0) #ifdef CONFIG_X86_OOSTORE /* Actually there are no OOO store capable CPUs for now that do SSE, but make it already an possibility. */ #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) #else #define wmb() __asm__ __volatile__ ("": : :"memory") #endif #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() #define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define smp_read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif #include /* * disable hlt during certain critical i/o operations */ #define HAVE_DISABLE_HLT void disable_hlt(void); void enable_hlt(void); extern int es7000_plat; void cpu_idle_wait(void); /* * On SMP systems, when the scheduler does migration-cost autodetection, * it needs a way to flush as much of the CPU's caches as possible: */ static inline void sched_cacheflush(void) { wbinvd(); } extern unsigned long arch_align_stack(unsigned long sp); extern void free_init_pages(char *what, unsigned long begin, unsigned long end); void default_idle(void); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/tlbflush.h000066400000000000000000000047751314037446600270610ustar00rootroot00000000000000#ifndef _I386_TLBFLUSH_H #define _I386_TLBFLUSH_H #include #include #define __flush_tlb() xen_tlb_flush() #define __flush_tlb_global() xen_tlb_flush() #define __flush_tlb_all() xen_tlb_flush() extern unsigned long pgkern_mask; #define cpu_has_invlpg (boot_cpu_data.x86 > 3) #define __flush_tlb_single(addr) xen_invlpg(addr) #define __flush_tlb_one(addr) __flush_tlb_single(addr) /* * TLB flushing: * * - flush_tlb() flushes the current mm struct TLBs * - flush_tlb_all() flushes all processes TLBs * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * * ..but the i386 has somewhat limited tlb flushing capabilities, * and page-granular flushes are available only on i486 and up. */ #ifndef CONFIG_SMP #define flush_tlb() __flush_tlb() #define flush_tlb_all() __flush_tlb_all() #define local_flush_tlb() __flush_tlb() static inline void flush_tlb_mm(struct mm_struct *mm) { if (mm == current->active_mm) __flush_tlb(); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { if (vma->vm_mm == current->active_mm) __flush_tlb_one(addr); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (vma->vm_mm == current->active_mm) __flush_tlb(); } #else #include #define local_flush_tlb() \ __flush_tlb() #define flush_tlb_all xen_tlb_flush_all #define flush_tlb_current_task() xen_tlb_flush_mask(¤t->mm->cpu_vm_mask) #define flush_tlb_mm(mm) xen_tlb_flush_mask(&(mm)->cpu_vm_mask) #define flush_tlb_page(vma, va) xen_invlpg_mask(&(vma)->vm_mm->cpu_vm_mask, va) #define flush_tlb() flush_tlb_current_task() static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) { flush_tlb_mm(vma->vm_mm); } #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 struct tlb_state { struct mm_struct *active_mm; int state; char __cacheline_padding[L1_CACHE_BYTES-8]; }; DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); #endif #define flush_tlb_kernel_range(start, end) flush_tlb_all() static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { /* i386 does not keep any page table caches in TLB */ } #endif /* _I386_TLBFLUSH_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/vga.h000066400000000000000000000005761314037446600260060ustar00rootroot00000000000000/* * Access to VGA videoram * * (c) 1998 Martin Mares */ #ifndef _LINUX_ASM_VGA_H_ #define _LINUX_ASM_VGA_H_ /* * On the PC, we can just recalculate addresses and then * access the videoram directly without any black magic. */ #define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x) #define vga_readb(x) (*(x)) #define vga_writeb(x,y) (*(y) = (x)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/asm/xenoprof.h000066400000000000000000000034651314037446600270710ustar00rootroot00000000000000/****************************************************************************** * asm-i386/mach-xen/asm/xenoprof.h * * Copyright (c) 2006 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __ASM_XENOPROF_H__ #define __ASM_XENOPROF_H__ #ifdef CONFIG_XEN struct super_block; struct dentry; int xenoprof_create_files(struct super_block * sb, struct dentry * root); #define HAVE_XENOPROF_CREATE_FILES struct xenoprof_init; void xenoprof_arch_init_counter(struct xenoprof_init *init); void xenoprof_arch_counter(void); void xenoprof_arch_start(void); void xenoprof_arch_stop(void); struct xenoprof_arch_shared_buffer { /* nothing */ }; struct xenoprof_shared_buffer; void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf); struct xenoprof_get_buffer; int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer, struct xenoprof_shared_buffer* sbuf); struct xenoprof_passive; int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain, struct xenoprof_shared_buffer* sbuf); #endif /* CONFIG_XEN */ #endif /* __ASM_XENOPROF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/irq_vectors.h000066400000000000000000000067221314037446600270100ustar00rootroot00000000000000/* * This file should contain #defines for all of the interrupt vector * numbers used by this architecture. * * In addition, there are some standard defines: * * FIRST_EXTERNAL_VECTOR: * The first free place for external interrupts * * SYSCALL_VECTOR: * The IRQ vector a syscall makes the user to kernel transition * under. * * TIMER_IRQ: * The IRQ number the timer interrupt comes in at. * * NR_IRQS: * The total number of interrupt vectors (including all the * architecture specific interrupts) needed. * */ #ifndef _ASM_IRQ_VECTORS_H #define _ASM_IRQ_VECTORS_H /* * IDT vectors usable for external interrupt sources start * at 0x20: */ #define FIRST_EXTERNAL_VECTOR 0x20 #define SYSCALL_VECTOR 0x80 /* * Vectors 0x20-0x2f are used for ISA interrupts. */ #if 0 /* * Special IRQ vectors used by the SMP architecture, 0xf0-0xff * * some of the following vectors are 'rare', they are merged * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. * TLB, reschedule and local APIC vectors are performance-critical. * * Vectors 0xf0-0xfa are free (reserved for future Linux use). */ #define SPURIOUS_APIC_VECTOR 0xff #define ERROR_APIC_VECTOR 0xfe #define INVALIDATE_TLB_VECTOR 0xfd #define RESCHEDULE_VECTOR 0xfc #define CALL_FUNCTION_VECTOR 0xfb #define THERMAL_APIC_VECTOR 0xf0 /* * Local APIC timer IRQ vector is on a different priority level, * to work around the 'lost local interrupt if more than 2 IRQ * sources per level' errata. */ #define LOCAL_TIMER_VECTOR 0xef #endif #define SPURIOUS_APIC_VECTOR 0xff #define ERROR_APIC_VECTOR 0xfe /* * First APIC vector available to drivers: (vectors 0x30-0xee) * we start at 0x31 to spread out vectors evenly between priority * levels. (0x80 is the syscall vector) */ #define FIRST_DEVICE_VECTOR 0x31 #define FIRST_SYSTEM_VECTOR 0xef /* * 16 8259A IRQ's, 208 potential APIC interrupt sources. * Right now the APIC is mostly only used for SMP. * 256 vectors is an architectural limit. (we can have * more than 256 devices theoretically, but they will * have to use shared interrupts) * Since vectors 0x00-0x1f are used/reserved for the CPU, * the usable vector space is 0x20-0xff (224 vectors) */ #define RESCHEDULE_VECTOR 0 #define CALL_FUNCTION_VECTOR 1 #define NR_IPIS 2 /* * The maximum number of vectors supported by i386 processors * is limited to 256. For processors other than i386, NR_VECTORS * should be changed accordingly. */ #define NR_VECTORS 256 #define FPU_IRQ 13 #define FIRST_VM86_IRQ 3 #define LAST_VM86_IRQ 15 #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) /* * The flat IRQ space is divided into two regions: * 1. A one-to-one mapping of real physical IRQs. This space is only used * if we have physical device-access privilege. This region is at the * start of the IRQ space so that existing device drivers do not need * to be modified to translate physical IRQ numbers into our IRQ space. * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These * are bound using the provided bind/unbind functions. */ #define PIRQ_BASE 0 #if !defined(MAX_IO_APICS) # define NR_PIRQS (NR_VECTORS + 32 * NR_CPUS) #elif NR_CPUS < MAX_IO_APICS # define NR_PIRQS (NR_VECTORS + 32 * NR_CPUS) #else # define NR_PIRQS (NR_VECTORS + 32 * MAX_IO_APICS) #endif #define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS) #define NR_DYNIRQS 256 #define NR_IRQS (NR_PIRQS + NR_DYNIRQS) #define NR_IRQ_VECTORS NR_IRQS #endif /* _ASM_IRQ_VECTORS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/mach_traps.h000066400000000000000000000014221314037446600265610ustar00rootroot00000000000000/* * include/asm-xen/asm-i386/mach-xen/mach_traps.h * * Machine specific NMI handling for Xen */ #ifndef _MACH_TRAPS_H #define _MACH_TRAPS_H #include #include static inline void clear_mem_error(unsigned char reason) {} static inline void clear_io_check_error(unsigned char reason) {} static inline unsigned char get_nmi_reason(void) { shared_info_t *s = HYPERVISOR_shared_info; unsigned char reason = 0; /* construct a value which looks like it came from * port 0x61. */ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason)) reason |= 0x40; if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason)) reason |= 0x80; return reason; } static inline void reassert_nmi(void) {} #endif /* !_MACH_TRAPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mach-xen/setup_arch.h000066400000000000000000000002231314037446600265730ustar00rootroot00000000000000/* Hook to call BIOS initialisation function */ #define ARCH_SETUP machine_specific_arch_setup(); void __init machine_specific_arch_setup(void); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/math_emu.h000066400000000000000000000013631314037446600245430ustar00rootroot00000000000000#ifndef _I386_MATH_EMU_H #define _I386_MATH_EMU_H #include int restore_i387_soft(void *s387, struct _fpstate __user *buf); int save_i387_soft(void *s387, struct _fpstate __user *buf); /* This structure matches the layout of the data saved to the stack following a device-not-present interrupt, part of it saved automatically by the 80386/80486. */ struct info { long ___orig_eip; long ___ebx; long ___ecx; long ___edx; long ___esi; long ___edi; long ___ebp; long ___eax; long ___ds; long ___es; long ___orig_eax; long ___eip; long ___cs; long ___eflags; long ___esp; long ___ss; long ___vm86_es; /* This and the following only in vm86 mode */ long ___vm86_ds; long ___vm86_fs; long ___vm86_gs; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mc146818rtc.h000066400000000000000000000051031314037446600245440ustar00rootroot00000000000000/* * Machine dependent access functions for RTC registers. */ #ifndef _ASM_MC146818RTC_H #define _ASM_MC146818RTC_H #include #include #include #ifndef RTC_PORT #define RTC_PORT(x) (0x70 + (x)) #define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ #endif #ifdef __HAVE_ARCH_CMPXCHG /* * This lock provides nmi access to the CMOS/RTC registers. It has some * special properties. It is owned by a CPU and stores the index register * currently being accessed (if owned). The idea here is that it works * like a normal lock (normally). However, in an NMI, the NMI code will * first check to see if its CPU owns the lock, meaning that the NMI * interrupted during the read/write of the device. If it does, it goes ahead * and performs the access and then restores the index register. If it does * not, it locks normally. * * Note that since we are working with NMIs, we need this lock even in * a non-SMP machine just to mark that the lock is owned. * * This only works with compare-and-swap. There is no other way to * atomically claim the lock and set the owner. */ #include extern volatile unsigned long cmos_lock; /* * All of these below must be called with interrupts off, preempt * disabled, etc. */ static inline void lock_cmos(unsigned char reg) { unsigned long new; new = ((smp_processor_id()+1) << 8) | reg; for (;;) { if (cmos_lock) continue; if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0) return; } } static inline void unlock_cmos(void) { cmos_lock = 0; } static inline int do_i_have_lock_cmos(void) { return (cmos_lock >> 8) == (smp_processor_id()+1); } static inline unsigned char current_lock_cmos_reg(void) { return cmos_lock & 0xff; } #define lock_cmos_prefix(reg) \ do { \ unsigned long cmos_flags; \ local_irq_save(cmos_flags); \ lock_cmos(reg) #define lock_cmos_suffix(reg) \ unlock_cmos(); \ local_irq_restore(cmos_flags); \ } while (0) #else #define lock_cmos_prefix(reg) do {} while (0) #define lock_cmos_suffix(reg) do {} while (0) #define lock_cmos(reg) #define unlock_cmos() #define do_i_have_lock_cmos() 0 #define current_lock_cmos_reg() 0 #endif /* * The yet supported machines all access the RTC index register via * an ISA port access but the way to access the date register differs ... */ #define CMOS_READ(addr) rtc_cmos_read(addr) #define CMOS_WRITE(val, addr) rtc_cmos_write(val, addr) unsigned char rtc_cmos_read(unsigned char addr); void rtc_cmos_write(unsigned char val, unsigned char addr); #define RTC_IRQ 8 #endif /* _ASM_MC146818RTC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mca.h000066400000000000000000000023221314037446600235000ustar00rootroot00000000000000/* -*- mode: c; c-basic-offset: 8 -*- */ /* Platform specific MCA defines */ #ifndef _ASM_MCA_H #define _ASM_MCA_H /* Maximal number of MCA slots - actually, some machines have less, but * they all have sufficient number of POS registers to cover 8. */ #define MCA_MAX_SLOT_NR 8 /* Most machines have only one MCA bus. The only multiple bus machines * I know have at most two */ #define MAX_MCA_BUSSES 2 #define MCA_PRIMARY_BUS 0 #define MCA_SECONDARY_BUS 1 /* Dummy slot numbers on primary MCA for integrated functions */ #define MCA_INTEGSCSI (MCA_MAX_SLOT_NR) #define MCA_INTEGVIDEO (MCA_MAX_SLOT_NR+1) #define MCA_MOTHERBOARD (MCA_MAX_SLOT_NR+2) /* Dummy POS values for integrated functions */ #define MCA_DUMMY_POS_START 0x10000 #define MCA_INTEGSCSI_POS (MCA_DUMMY_POS_START+1) #define MCA_INTEGVIDEO_POS (MCA_DUMMY_POS_START+2) #define MCA_MOTHERBOARD_POS (MCA_DUMMY_POS_START+3) /* MCA registers */ #define MCA_MOTHERBOARD_SETUP_REG 0x94 #define MCA_ADAPTER_SETUP_REG 0x96 #define MCA_POS_REG(n) (0x100+(n)) #define MCA_ENABLED 0x01 /* POS 2, set if adapter enabled */ /* Max number of adapters, including both slots and various integrated * things. */ #define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mca_dma.h000066400000000000000000000121571314037446600243300ustar00rootroot00000000000000#ifndef MCA_DMA_H #define MCA_DMA_H #include #include /* * Microchannel specific DMA stuff. DMA on an MCA machine is fairly similar to * standard PC dma, but it certainly has its quirks. DMA register addresses * are in a different place and there are some added functions. Most of this * should be pretty obvious on inspection. Note that the user must divide * count by 2 when using 16-bit dma; that is not handled by these functions. * * Ramen Noodles are yummy. * * 1998 Tymm Twillman */ /* * Registers that are used by the DMA controller; FN is the function register * (tell the controller what to do) and EXE is the execution register (how * to do it) */ #define MCA_DMA_REG_FN 0x18 #define MCA_DMA_REG_EXE 0x1A /* * Functions that the DMA controller can do */ #define MCA_DMA_FN_SET_IO 0x00 #define MCA_DMA_FN_SET_ADDR 0x20 #define MCA_DMA_FN_GET_ADDR 0x30 #define MCA_DMA_FN_SET_COUNT 0x40 #define MCA_DMA_FN_GET_COUNT 0x50 #define MCA_DMA_FN_GET_STATUS 0x60 #define MCA_DMA_FN_SET_MODE 0x70 #define MCA_DMA_FN_SET_ARBUS 0x80 #define MCA_DMA_FN_MASK 0x90 #define MCA_DMA_FN_RESET_MASK 0xA0 #define MCA_DMA_FN_MASTER_CLEAR 0xD0 /* * Modes (used by setting MCA_DMA_FN_MODE in the function register) * * Note that the MODE_READ is read from memory (write to device), and * MODE_WRITE is vice-versa. */ #define MCA_DMA_MODE_XFER 0x04 /* read by default */ #define MCA_DMA_MODE_READ 0x04 /* same as XFER */ #define MCA_DMA_MODE_WRITE 0x08 /* OR with MODE_XFER to use */ #define MCA_DMA_MODE_IO 0x01 /* DMA from IO register */ #define MCA_DMA_MODE_16 0x40 /* 16 bit xfers */ /** * mca_enable_dma - channel to enable DMA on * @dmanr: DMA channel * * Enable the MCA bus DMA on a channel. This can be called from * IRQ context. */ static __inline__ void mca_enable_dma(unsigned int dmanr) { outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN); } /** * mca_disble_dma - channel to disable DMA on * @dmanr: DMA channel * * Enable the MCA bus DMA on a channel. This can be called from * IRQ context. */ static __inline__ void mca_disable_dma(unsigned int dmanr) { outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN); } /** * mca_set_dma_addr - load a 24bit DMA address * @dmanr: DMA channel * @a: 24bit bus address * * Load the address register in the DMA controller. This has a 24bit * limitation (16Mb). */ static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a) { outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN); outb(a & 0xff, MCA_DMA_REG_EXE); outb((a >> 8) & 0xff, MCA_DMA_REG_EXE); outb((a >> 16) & 0xff, MCA_DMA_REG_EXE); } /** * mca_get_dma_addr - load a 24bit DMA address * @dmanr: DMA channel * * Read the address register in the DMA controller. This has a 24bit * limitation (16Mb). The return is a bus address. */ static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr) { unsigned int addr; outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN); addr = inb(MCA_DMA_REG_EXE); addr |= inb(MCA_DMA_REG_EXE) << 8; addr |= inb(MCA_DMA_REG_EXE) << 16; return addr; } /** * mca_set_dma_count - load a 16bit transfer count * @dmanr: DMA channel * @count: count * * Set the DMA count for this channel. This can be up to 64Kbytes. * Setting a count of zero will not do what you expect. */ static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count) { count--; /* transfers one more than count -- correct for this */ outb(MCA_DMA_FN_SET_COUNT | dmanr, MCA_DMA_REG_FN); outb(count & 0xff, MCA_DMA_REG_EXE); outb((count >> 8) & 0xff, MCA_DMA_REG_EXE); } /** * mca_get_dma_residue - get the remaining bytes to transfer * @dmanr: DMA channel * * This function returns the number of bytes left to transfer * on this DMA channel. */ static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr) { unsigned short count; outb(MCA_DMA_FN_GET_COUNT | dmanr, MCA_DMA_REG_FN); count = 1 + inb(MCA_DMA_REG_EXE); count += inb(MCA_DMA_REG_EXE) << 8; return count; } /** * mca_set_dma_io - set the port for an I/O transfer * @dmanr: DMA channel * @io_addr: an I/O port number * * Unlike the ISA bus DMA controllers the DMA on MCA bus can transfer * with an I/O port target. */ static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) { /* * DMA from a port address -- set the io address */ outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN); outb(io_addr & 0xff, MCA_DMA_REG_EXE); outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE); } /** * mca_set_dma_mode - set the DMA mode * @dmanr: DMA channel * @mode: mode to set * * The DMA controller supports several modes. The mode values you can * set are : * * %MCA_DMA_MODE_READ when reading from the DMA device. * * %MCA_DMA_MODE_WRITE to writing to the DMA device. * * %MCA_DMA_MODE_IO to do DMA to or from an I/O port. * * %MCA_DMA_MODE_16 to do 16bit transfers. * */ static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) { outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN); outb(mode, MCA_DMA_REG_EXE); } #endif /* MCA_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mce.h000066400000000000000000000001711314037446600235040ustar00rootroot00000000000000#ifdef CONFIG_X86_MCE extern void mcheck_init(struct cpuinfo_x86 *c); #else #define mcheck_init(c) do {} while(0) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mman.h000066400000000000000000000011501314037446600236660ustar00rootroot00000000000000#ifndef __I386_MMAN_H__ #define __I386_MMAN_H__ #include #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ #define MAP_LOCKED 0x2000 /* pages are locked */ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ #endif /* __I386_MMAN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mmu.h000066400000000000000000000004721314037446600235420ustar00rootroot00000000000000#ifndef __i386_MMU_H #define __i386_MMU_H #include /* * The i386 doesn't have a mmu context, but * we put the segment information here. * * cpu_vm_mask is used to optimize ldt flushing. */ typedef struct { int size; struct semaphore sem; void *ldt; void *vdso; } mm_context_t; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mmu_context.h000066400000000000000000000032601314037446600253040ustar00rootroot00000000000000#ifndef __I386_SCHED_H #define __I386_SCHED_H #include #include #include #include /* * Used for LDT copy/destruction. */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm); void destroy_context(struct mm_struct *mm); static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #ifdef CONFIG_SMP unsigned cpu = smp_processor_id(); if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; #endif } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { int cpu = smp_processor_id(); if (likely(prev != next)) { /* stop flush ipis for the previous mm */ cpu_clear(cpu, prev->cpu_vm_mask); #ifdef CONFIG_SMP per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; per_cpu(cpu_tlbstate, cpu).active_mm = next; #endif cpu_set(cpu, next->cpu_vm_mask); /* Re-load page tables */ load_cr3(next->pgd); /* * load the LDT, if the LDT is different: */ if (unlikely(prev->context.ldt != next->context.ldt)) load_LDT_nolock(&next->context, cpu); } #ifdef CONFIG_SMP else { per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload %cr3. */ load_cr3(next->pgd); load_LDT_nolock(&next->context, cpu); } } #endif } #define deactivate_mm(tsk, mm) \ asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) #define activate_mm(prev, next) \ switch_mm((prev),(next),NULL) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mmx.h000066400000000000000000000004161314037446600235430ustar00rootroot00000000000000#ifndef _ASM_MMX_H #define _ASM_MMX_H /* * MMX 3Dnow! helper operations */ #include extern void *_mmx_memcpy(void *to, const void *from, size_t size); extern void mmx_clear_page(void *page); extern void mmx_copy_page(void *to, void *from); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mmzone.h000066400000000000000000000062651314037446600242570ustar00rootroot00000000000000/* * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002 * */ #ifndef _ASM_MMZONE_H_ #define _ASM_MMZONE_H_ #include #ifdef CONFIG_NUMA extern struct pglist_data *node_data[]; #define NODE_DATA(nid) (node_data[nid]) #ifdef CONFIG_X86_NUMAQ #include #elif defined(CONFIG_ACPI_SRAT)/* summit or generic arch */ #include #endif extern int get_memcfg_numa_flat(void ); /* * This allows any one NUMA architecture to be compiled * for, and still fall back to the flat function if it * fails. */ static inline void get_memcfg_numa(void) { #ifdef CONFIG_X86_NUMAQ if (get_memcfg_numaq()) return; #elif defined(CONFIG_ACPI_SRAT) if (get_memcfg_from_srat()) return; #endif get_memcfg_numa_flat(); } extern int early_pfn_to_nid(unsigned long pfn); #else /* !CONFIG_NUMA */ #define get_memcfg_numa get_memcfg_numa_flat #define get_zholes_size(n) (0) #endif /* CONFIG_NUMA */ #ifdef CONFIG_DISCONTIGMEM /* * generic node memory support, the following assumptions apply: * * 1) memory comes in 256Mb contigious chunks which are either present or not * 2) we will not have more than 64Gb in total * * for now assume that 64Gb is max amount of RAM for whole system * 64Gb / 4096bytes/page = 16777216 pages */ #define MAX_NR_PAGES 16777216 #define MAX_ELEMENTS 256 #define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS) extern s8 physnode_map[]; static inline int pfn_to_nid(unsigned long pfn) { #ifdef CONFIG_NUMA return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]); #else return 0; #endif } /* * Following are macros that each numa implmentation must define. */ #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) \ ({ \ pg_data_t *__pgdat = NODE_DATA(nid); \ __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ }) /* XXX: FIXME -- wli */ #define kern_addr_valid(kaddr) (0) #ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ #define pfn_valid(pfn) ((pfn) < num_physpages) #else static inline int pfn_valid(int pfn) { int nid = pfn_to_nid(pfn); if (nid >= 0) return (pfn < node_end_pfn(nid)); return 0; } #endif /* CONFIG_X86_NUMAQ */ #endif /* CONFIG_DISCONTIGMEM */ #ifdef CONFIG_NEED_MULTIPLE_NODES /* * Following are macros that are specific to this numa platform. */ #define reserve_bootmem(addr, size) \ reserve_bootmem_node(NODE_DATA(0), (addr), (size)) #define alloc_bootmem(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) #define alloc_bootmem_pages(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) #define alloc_bootmem_node(ignore, x) \ __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_node(ignore, x) \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages_node(ignore, x) \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) #endif /* CONFIG_NEED_MULTIPLE_NODES */ #endif /* _ASM_MMZONE_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/module.h000066400000000000000000000040151314037446600242260ustar00rootroot00000000000000#ifndef _ASM_I386_MODULE_H #define _ASM_I386_MODULE_H /* x86 is simple */ struct mod_arch_specific { }; #define Elf_Shdr Elf32_Shdr #define Elf_Sym Elf32_Sym #define Elf_Ehdr Elf32_Ehdr #ifdef CONFIG_M386 #define MODULE_PROC_FAMILY "386 " #elif defined CONFIG_M486 #define MODULE_PROC_FAMILY "486 " #elif defined CONFIG_M586 #define MODULE_PROC_FAMILY "586 " #elif defined CONFIG_M586TSC #define MODULE_PROC_FAMILY "586TSC " #elif defined CONFIG_M586MMX #define MODULE_PROC_FAMILY "586MMX " #elif defined CONFIG_M686 #define MODULE_PROC_FAMILY "686 " #elif defined CONFIG_MPENTIUMII #define MODULE_PROC_FAMILY "PENTIUMII " #elif defined CONFIG_MPENTIUMIII #define MODULE_PROC_FAMILY "PENTIUMIII " #elif defined CONFIG_MPENTIUMM #define MODULE_PROC_FAMILY "PENTIUMM " #elif defined CONFIG_MPENTIUM4 #define MODULE_PROC_FAMILY "PENTIUM4 " #elif defined CONFIG_MK6 #define MODULE_PROC_FAMILY "K6 " #elif defined CONFIG_MK7 #define MODULE_PROC_FAMILY "K7 " #elif defined CONFIG_MK8 #define MODULE_PROC_FAMILY "K8 " #elif defined CONFIG_X86_ELAN #define MODULE_PROC_FAMILY "ELAN " #elif defined CONFIG_MCRUSOE #define MODULE_PROC_FAMILY "CRUSOE " #elif defined CONFIG_MEFFICEON #define MODULE_PROC_FAMILY "EFFICEON " #elif defined CONFIG_MWINCHIPC6 #define MODULE_PROC_FAMILY "WINCHIPC6 " #elif defined CONFIG_MWINCHIP2 #define MODULE_PROC_FAMILY "WINCHIP2 " #elif defined CONFIG_MWINCHIP3D #define MODULE_PROC_FAMILY "WINCHIP3D " #elif defined CONFIG_MCYRIXIII #define MODULE_PROC_FAMILY "CYRIXIII " #elif defined CONFIG_MVIAC3_2 #define MODULE_PROC_FAMILY "VIAC3-2 " #elif defined CONFIG_MGEODEGX1 #define MODULE_PROC_FAMILY "GEODEGX1 " #elif defined CONFIG_MGEODE_LX #define MODULE_PROC_FAMILY "GEODE " #else #error unknown processor family #endif #ifdef CONFIG_REGPARM #define MODULE_REGPARM "REGPARM " #else #define MODULE_REGPARM "" #endif #ifdef CONFIG_4KSTACKS #define MODULE_STACKSIZE "4KSTACKS " #else #define MODULE_STACKSIZE "" #endif #define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE #endif /* _ASM_I386_MODULE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mpspec.h000066400000000000000000000056711314037446600242410ustar00rootroot00000000000000#ifndef __ASM_MPSPEC_H #define __ASM_MPSPEC_H #include #include #include extern int mp_bus_id_to_type [MAX_MP_BUSSES]; extern int mp_bus_id_to_node [MAX_MP_BUSSES]; extern int mp_bus_id_to_local [MAX_MP_BUSSES]; extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; extern unsigned int def_to_bigsmp; extern unsigned int boot_cpu_physical_apicid; extern int smp_found_config; extern void find_smp_config (void); extern void get_smp_config (void); extern int nr_ioapics; extern int apic_version [MAX_APICS]; extern int mp_irq_entries; extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; extern int mpc_default_type; extern unsigned long mp_lapic_addr; extern int pic_mode; extern int using_apic_timer; #ifdef CONFIG_ACPI extern void mp_register_lapic (u8 id, u8 enabled); extern void mp_register_lapic_address (u64 address); extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); extern void mp_config_acpi_legacy_irqs (void); extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low); #endif /* CONFIG_ACPI */ #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) struct physid_mask { unsigned long mask[PHYSID_ARRAY_SIZE]; }; typedef struct physid_mask physid_mask_t; #define physid_set(physid, map) set_bit(physid, (map).mask) #define physid_clear(physid, map) clear_bit(physid, (map).mask) #define physid_isset(physid, map) test_bit(physid, (map).mask) #define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) #define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) #define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) #define physids_clear(map) bitmap_zero((map).mask, MAX_APICS) #define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS) #define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) #define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) #define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) #define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) #define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) #define physids_coerce(map) ((map).mask[0]) #define physids_promote(physids) \ ({ \ physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ __physid_mask.mask[0] = physids; \ __physid_mask; \ }) #define physid_mask_of_physid(physid) \ ({ \ physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ physid_set(physid, __physid_mask); \ __physid_mask; \ }) #define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } #define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } extern physid_mask_t phys_cpu_present_map; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mpspec_def.h000066400000000000000000000106041314037446600250470ustar00rootroot00000000000000#ifndef __ASM_MPSPEC_DEF_H #define __ASM_MPSPEC_DEF_H /* * Structure definitions for SMP machines following the * Intel Multiprocessing Specification 1.1 and 1.4. */ /* * This tag identifies where the SMP configuration * information is. */ #define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') #define MAX_MPC_ENTRY 1024 #define MAX_APICS 256 struct intel_mp_floating { char mpf_signature[4]; /* "_MP_" */ unsigned long mpf_physptr; /* Configuration table address */ unsigned char mpf_length; /* Our length (paragraphs) */ unsigned char mpf_specification;/* Specification version */ unsigned char mpf_checksum; /* Checksum (makes sum 0) */ unsigned char mpf_feature1; /* Standard or configuration ? */ unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ unsigned char mpf_feature3; /* Unused (0) */ unsigned char mpf_feature4; /* Unused (0) */ unsigned char mpf_feature5; /* Unused (0) */ }; struct mp_config_table { char mpc_signature[4]; #define MPC_SIGNATURE "PCMP" unsigned short mpc_length; /* Size of table */ char mpc_spec; /* 0x01 */ char mpc_checksum; char mpc_oem[8]; char mpc_productid[12]; unsigned long mpc_oemptr; /* 0 if not present */ unsigned short mpc_oemsize; /* 0 if not present */ unsigned short mpc_oemcount; unsigned long mpc_lapic; /* APIC address */ unsigned long reserved; }; /* Followed by entries */ #define MP_PROCESSOR 0 #define MP_BUS 1 #define MP_IOAPIC 2 #define MP_INTSRC 3 #define MP_LINTSRC 4 #define MP_TRANSLATION 192 /* Used by IBM NUMA-Q to describe node locality */ struct mpc_config_processor { unsigned char mpc_type; unsigned char mpc_apicid; /* Local APIC number */ unsigned char mpc_apicver; /* Its versions */ unsigned char mpc_cpuflag; #define CPU_ENABLED 1 /* Processor is available */ #define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ unsigned long mpc_cpufeature; #define CPU_STEPPING_MASK 0x0F #define CPU_MODEL_MASK 0xF0 #define CPU_FAMILY_MASK 0xF00 unsigned long mpc_featureflag; /* CPUID feature value */ unsigned long mpc_reserved[2]; }; struct mpc_config_bus { unsigned char mpc_type; unsigned char mpc_busid; unsigned char mpc_bustype[6]; }; /* List of Bus Type string values, Intel MP Spec. */ #define BUSTYPE_EISA "EISA" #define BUSTYPE_ISA "ISA" #define BUSTYPE_INTERN "INTERN" /* Internal BUS */ #define BUSTYPE_MCA "MCA" #define BUSTYPE_VL "VL" /* Local bus */ #define BUSTYPE_PCI "PCI" #define BUSTYPE_PCMCIA "PCMCIA" #define BUSTYPE_CBUS "CBUS" #define BUSTYPE_CBUSII "CBUSII" #define BUSTYPE_FUTURE "FUTURE" #define BUSTYPE_MBI "MBI" #define BUSTYPE_MBII "MBII" #define BUSTYPE_MPI "MPI" #define BUSTYPE_MPSA "MPSA" #define BUSTYPE_NUBUS "NUBUS" #define BUSTYPE_TC "TC" #define BUSTYPE_VME "VME" #define BUSTYPE_XPRESS "XPRESS" #define BUSTYPE_NEC98 "NEC98" struct mpc_config_ioapic { unsigned char mpc_type; unsigned char mpc_apicid; unsigned char mpc_apicver; unsigned char mpc_flags; #define MPC_APIC_USABLE 0x01 unsigned long mpc_apicaddr; }; struct mpc_config_intsrc { unsigned char mpc_type; unsigned char mpc_irqtype; unsigned short mpc_irqflag; unsigned char mpc_srcbus; unsigned char mpc_srcbusirq; unsigned char mpc_dstapic; unsigned char mpc_dstirq; }; enum mp_irq_source_types { mp_INT = 0, mp_NMI = 1, mp_SMI = 2, mp_ExtINT = 3 }; #define MP_IRQDIR_DEFAULT 0 #define MP_IRQDIR_HIGH 1 #define MP_IRQDIR_LOW 3 struct mpc_config_lintsrc { unsigned char mpc_type; unsigned char mpc_irqtype; unsigned short mpc_irqflag; unsigned char mpc_srcbusid; unsigned char mpc_srcbusirq; unsigned char mpc_destapic; #define MP_APIC_ALL 0xFF unsigned char mpc_destapiclint; }; struct mp_config_oemtable { char oem_signature[4]; #define MPC_OEM_SIGNATURE "_OEM" unsigned short oem_length; /* Size of table */ char oem_rev; /* 0x01 */ char oem_checksum; char mpc_oem[8]; }; struct mpc_config_translation { unsigned char mpc_type; unsigned char trans_len; unsigned char trans_type; unsigned char trans_quad; unsigned char trans_global; unsigned char trans_local; unsigned short trans_reserved; }; /* * Default configurations * * 1 2 CPU ISA 82489DX * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining * 3 2 CPU EISA 82489DX * 4 2 CPU MCA 82489DX * 5 2 CPU ISA+PCI * 6 2 CPU EISA+PCI * 7 2 CPU MCA+PCI */ enum mp_bustype { MP_BUS_ISA = 1, MP_BUS_EISA, MP_BUS_PCI, MP_BUS_MCA, MP_BUS_NEC98 }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/msgbuf.h000066400000000000000000000017171314037446600242320ustar00rootroot00000000000000#ifndef _I386_MSGBUF_H #define _I386_MSGBUF_H /* * The msqid64_ds structure for i386 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * * Pad space is left for: * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ struct msqid64_ds { struct ipc64_perm msg_perm; __kernel_time_t msg_stime; /* last msgsnd time */ unsigned long __unused1; __kernel_time_t msg_rtime; /* last msgrcv time */ unsigned long __unused2; __kernel_time_t msg_ctime; /* last change time */ unsigned long __unused3; unsigned long msg_cbytes; /* current number of bytes on queue */ unsigned long msg_qnum; /* number of messages in queue */ unsigned long msg_qbytes; /* max number of bytes on queue */ __kernel_pid_t msg_lspid; /* pid of last msgsnd */ __kernel_pid_t msg_lrpid; /* last receive pid */ unsigned long __unused4; unsigned long __unused5; }; #endif /* _I386_MSGBUF_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/msi.h000066400000000000000000000006341314037446600235340ustar00rootroot00000000000000/* * Copyright (C) 2003-2004 Intel * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) */ #ifndef ASM_MSI_H #define ASM_MSI_H #include #include #define LAST_DEVICE_VECTOR (FIRST_SYSTEM_VECTOR - 1) #define MSI_TARGET_CPU_SHIFT 12 extern struct msi_ops msi_apic_ops; static inline int msi_arch_init(void) { msi_register(&msi_apic_ops); return 0; } #endif /* ASM_MSI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/msr.h000066400000000000000000000204611314037446600235450ustar00rootroot00000000000000#ifndef __ASM_MSR_H #define __ASM_MSR_H /* * Access to machine-specific registers (available on 586 and better only) * Note: the rd* operations modify the parameters directly (without using * pointer indirection), this allows gcc to optimize better */ #define rdmsr(msr,val1,val2) \ __asm__ __volatile__("rdmsr" \ : "=a" (val1), "=d" (val2) \ : "c" (msr)) #define wrmsr(msr,val1,val2) \ __asm__ __volatile__("wrmsr" \ : /* no outputs */ \ : "c" (msr), "a" (val1), "d" (val2)) #define rdmsrl(msr,val) do { \ unsigned long l__,h__; \ rdmsr (msr, l__, h__); \ val = l__; \ val |= ((u64)h__<<32); \ } while(0) static inline void wrmsrl (unsigned long msr, unsigned long long val) { unsigned long lo, hi; lo = (unsigned long) val; hi = val >> 32; wrmsr (msr, lo, hi); } /* wrmsr with exception handling */ #define wrmsr_safe(msr,a,b) ({ int ret__; \ asm volatile("2: wrmsr ; xorl %0,%0\n" \ "1:\n\t" \ ".section .fixup,\"ax\"\n\t" \ "3: movl %4,%0 ; jmp 1b\n\t" \ ".previous\n\t" \ ".section __ex_table,\"a\"\n" \ " .align 4\n\t" \ " .long 2b,3b\n\t" \ ".previous" \ : "=a" (ret__) \ : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\ ret__; }) /* rdmsr with exception handling */ #define rdmsr_safe(msr,a,b) ({ int ret__; \ asm volatile("2: rdmsr ; xorl %0,%0\n" \ "1:\n\t" \ ".section .fixup,\"ax\"\n\t" \ "3: movl %4,%0 ; jmp 1b\n\t" \ ".previous\n\t" \ ".section __ex_table,\"a\"\n" \ " .align 4\n\t" \ " .long 2b,3b\n\t" \ ".previous" \ : "=r" (ret__), "=a" (*(a)), "=d" (*(b)) \ : "c" (msr), "i" (-EFAULT));\ ret__; }) #define rdtsc(low,high) \ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) #define rdtscl(low) \ __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx") #define rdtscll(val) \ __asm__ __volatile__("rdtsc" : "=A" (val)) #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) #define rdpmc(counter,low,high) \ __asm__ __volatile__("rdpmc" \ : "=a" (low), "=d" (high) \ : "c" (counter)) /* symbolic names for some interesting MSRs */ /* Intel defined MSRs. */ #define MSR_IA32_P5_MC_ADDR 0 #define MSR_IA32_P5_MC_TYPE 1 #define MSR_IA32_PLATFORM_ID 0x17 #define MSR_IA32_EBL_CR_POWERON 0x2a #define MSR_IA32_APICBASE 0x1b #define MSR_IA32_APICBASE_BSP (1<<8) #define MSR_IA32_APICBASE_ENABLE (1<<11) #define MSR_IA32_APICBASE_BASE (0xfffff<<12) #define MSR_IA32_UCODE_WRITE 0x79 #define MSR_IA32_UCODE_REV 0x8b #define MSR_P6_PERFCTR0 0xc1 #define MSR_P6_PERFCTR1 0xc2 #define MSR_IA32_BBL_CR_CTL 0x119 #define MSR_IA32_SYSENTER_CS 0x174 #define MSR_IA32_SYSENTER_ESP 0x175 #define MSR_IA32_SYSENTER_EIP 0x176 #define MSR_IA32_MCG_CAP 0x179 #define MSR_IA32_MCG_STATUS 0x17a #define MSR_IA32_MCG_CTL 0x17b /* P4/Xeon+ specific */ #define MSR_IA32_MCG_EAX 0x180 #define MSR_IA32_MCG_EBX 0x181 #define MSR_IA32_MCG_ECX 0x182 #define MSR_IA32_MCG_EDX 0x183 #define MSR_IA32_MCG_ESI 0x184 #define MSR_IA32_MCG_EDI 0x185 #define MSR_IA32_MCG_EBP 0x186 #define MSR_IA32_MCG_ESP 0x187 #define MSR_IA32_MCG_EFLAGS 0x188 #define MSR_IA32_MCG_EIP 0x189 #define MSR_IA32_MCG_RESERVED 0x18A #define MSR_P6_EVNTSEL0 0x186 #define MSR_P6_EVNTSEL1 0x187 #define MSR_IA32_PERF_STATUS 0x198 #define MSR_IA32_PERF_CTL 0x199 #define MSR_IA32_THERM_CONTROL 0x19a #define MSR_IA32_THERM_INTERRUPT 0x19b #define MSR_IA32_THERM_STATUS 0x19c #define MSR_IA32_MISC_ENABLE 0x1a0 #define MSR_IA32_DEBUGCTLMSR 0x1d9 #define MSR_IA32_LASTBRANCHFROMIP 0x1db #define MSR_IA32_LASTBRANCHTOIP 0x1dc #define MSR_IA32_LASTINTFROMIP 0x1dd #define MSR_IA32_LASTINTTOIP 0x1de #define MSR_IA32_MC0_CTL 0x400 #define MSR_IA32_MC0_STATUS 0x401 #define MSR_IA32_MC0_ADDR 0x402 #define MSR_IA32_MC0_MISC 0x403 /* Pentium IV performance counter MSRs */ #define MSR_P4_BPU_PERFCTR0 0x300 #define MSR_P4_BPU_PERFCTR1 0x301 #define MSR_P4_BPU_PERFCTR2 0x302 #define MSR_P4_BPU_PERFCTR3 0x303 #define MSR_P4_MS_PERFCTR0 0x304 #define MSR_P4_MS_PERFCTR1 0x305 #define MSR_P4_MS_PERFCTR2 0x306 #define MSR_P4_MS_PERFCTR3 0x307 #define MSR_P4_FLAME_PERFCTR0 0x308 #define MSR_P4_FLAME_PERFCTR1 0x309 #define MSR_P4_FLAME_PERFCTR2 0x30a #define MSR_P4_FLAME_PERFCTR3 0x30b #define MSR_P4_IQ_PERFCTR0 0x30c #define MSR_P4_IQ_PERFCTR1 0x30d #define MSR_P4_IQ_PERFCTR2 0x30e #define MSR_P4_IQ_PERFCTR3 0x30f #define MSR_P4_IQ_PERFCTR4 0x310 #define MSR_P4_IQ_PERFCTR5 0x311 #define MSR_P4_BPU_CCCR0 0x360 #define MSR_P4_BPU_CCCR1 0x361 #define MSR_P4_BPU_CCCR2 0x362 #define MSR_P4_BPU_CCCR3 0x363 #define MSR_P4_MS_CCCR0 0x364 #define MSR_P4_MS_CCCR1 0x365 #define MSR_P4_MS_CCCR2 0x366 #define MSR_P4_MS_CCCR3 0x367 #define MSR_P4_FLAME_CCCR0 0x368 #define MSR_P4_FLAME_CCCR1 0x369 #define MSR_P4_FLAME_CCCR2 0x36a #define MSR_P4_FLAME_CCCR3 0x36b #define MSR_P4_IQ_CCCR0 0x36c #define MSR_P4_IQ_CCCR1 0x36d #define MSR_P4_IQ_CCCR2 0x36e #define MSR_P4_IQ_CCCR3 0x36f #define MSR_P4_IQ_CCCR4 0x370 #define MSR_P4_IQ_CCCR5 0x371 #define MSR_P4_ALF_ESCR0 0x3ca #define MSR_P4_ALF_ESCR1 0x3cb #define MSR_P4_BPU_ESCR0 0x3b2 #define MSR_P4_BPU_ESCR1 0x3b3 #define MSR_P4_BSU_ESCR0 0x3a0 #define MSR_P4_BSU_ESCR1 0x3a1 #define MSR_P4_CRU_ESCR0 0x3b8 #define MSR_P4_CRU_ESCR1 0x3b9 #define MSR_P4_CRU_ESCR2 0x3cc #define MSR_P4_CRU_ESCR3 0x3cd #define MSR_P4_CRU_ESCR4 0x3e0 #define MSR_P4_CRU_ESCR5 0x3e1 #define MSR_P4_DAC_ESCR0 0x3a8 #define MSR_P4_DAC_ESCR1 0x3a9 #define MSR_P4_FIRM_ESCR0 0x3a4 #define MSR_P4_FIRM_ESCR1 0x3a5 #define MSR_P4_FLAME_ESCR0 0x3a6 #define MSR_P4_FLAME_ESCR1 0x3a7 #define MSR_P4_FSB_ESCR0 0x3a2 #define MSR_P4_FSB_ESCR1 0x3a3 #define MSR_P4_IQ_ESCR0 0x3ba #define MSR_P4_IQ_ESCR1 0x3bb #define MSR_P4_IS_ESCR0 0x3b4 #define MSR_P4_IS_ESCR1 0x3b5 #define MSR_P4_ITLB_ESCR0 0x3b6 #define MSR_P4_ITLB_ESCR1 0x3b7 #define MSR_P4_IX_ESCR0 0x3c8 #define MSR_P4_IX_ESCR1 0x3c9 #define MSR_P4_MOB_ESCR0 0x3aa #define MSR_P4_MOB_ESCR1 0x3ab #define MSR_P4_MS_ESCR0 0x3c0 #define MSR_P4_MS_ESCR1 0x3c1 #define MSR_P4_PMH_ESCR0 0x3ac #define MSR_P4_PMH_ESCR1 0x3ad #define MSR_P4_RAT_ESCR0 0x3bc #define MSR_P4_RAT_ESCR1 0x3bd #define MSR_P4_SAAT_ESCR0 0x3ae #define MSR_P4_SAAT_ESCR1 0x3af #define MSR_P4_SSU_ESCR0 0x3be #define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */ #define MSR_P4_TBPU_ESCR0 0x3c2 #define MSR_P4_TBPU_ESCR1 0x3c3 #define MSR_P4_TC_ESCR0 0x3c4 #define MSR_P4_TC_ESCR1 0x3c5 #define MSR_P4_U2L_ESCR0 0x3b0 #define MSR_P4_U2L_ESCR1 0x3b1 /* AMD Defined MSRs */ #define MSR_K6_EFER 0xC0000080 #define MSR_K6_STAR 0xC0000081 #define MSR_K6_WHCR 0xC0000082 #define MSR_K6_UWCCR 0xC0000085 #define MSR_K6_EPMR 0xC0000086 #define MSR_K6_PSOR 0xC0000087 #define MSR_K6_PFIR 0xC0000088 #define MSR_K7_EVNTSEL0 0xC0010000 #define MSR_K7_EVNTSEL1 0xC0010001 #define MSR_K7_EVNTSEL2 0xC0010002 #define MSR_K7_EVNTSEL3 0xC0010003 #define MSR_K7_PERFCTR0 0xC0010004 #define MSR_K7_PERFCTR1 0xC0010005 #define MSR_K7_PERFCTR2 0xC0010006 #define MSR_K7_PERFCTR3 0xC0010007 #define MSR_K7_HWCR 0xC0010015 #define MSR_K7_CLK_CTL 0xC001001b #define MSR_K7_FID_VID_CTL 0xC0010041 #define MSR_K7_FID_VID_STATUS 0xC0010042 /* extended feature register */ #define MSR_EFER 0xc0000080 /* EFER bits: */ /* Execute Disable enable */ #define _EFER_NX 11 #define EFER_NX (1<<_EFER_NX) /* Centaur-Hauls/IDT defined MSRs. */ #define MSR_IDT_FCR1 0x107 #define MSR_IDT_FCR2 0x108 #define MSR_IDT_FCR3 0x109 #define MSR_IDT_FCR4 0x10a #define MSR_IDT_MCR0 0x110 #define MSR_IDT_MCR1 0x111 #define MSR_IDT_MCR2 0x112 #define MSR_IDT_MCR3 0x113 #define MSR_IDT_MCR4 0x114 #define MSR_IDT_MCR5 0x115 #define MSR_IDT_MCR6 0x116 #define MSR_IDT_MCR7 0x117 #define MSR_IDT_MCR_CTRL 0x120 /* VIA Cyrix defined MSRs*/ #define MSR_VIA_FCR 0x1107 #define MSR_VIA_LONGHAUL 0x110a #define MSR_VIA_RNG 0x110b #define MSR_VIA_BCR2 0x1147 /* Transmeta defined MSRs */ #define MSR_TMTA_LONGRUN_CTRL 0x80868010 #define MSR_TMTA_LONGRUN_FLAGS 0x80868011 #define MSR_TMTA_LRTI_READOUT 0x80868018 #define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a #endif /* __ASM_MSR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mtrr.h000066400000000000000000000075511314037446600237350ustar00rootroot00000000000000/* Generic MTRR (Memory Type Range Register) ioctls. Copyright (C) 1997-1999 Richard Gooch This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. Richard Gooch may be reached by email at rgooch@atnf.csiro.au The postal address is: Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. */ #ifndef _LINUX_MTRR_H #define _LINUX_MTRR_H #include #include #define MTRR_IOCTL_BASE 'M' struct mtrr_sentry { unsigned long base; /* Base address */ unsigned int size; /* Size of region */ unsigned int type; /* Type of region */ }; struct mtrr_gentry { unsigned int regnum; /* Register number */ unsigned long base; /* Base address */ unsigned int size; /* Size of region */ unsigned int type; /* Type of region */ }; /* These are the various ioctls */ #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) #define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry) #define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry) #define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry) #define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry) #define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry) #define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry) #define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry) #define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry) /* These are the region types */ #define MTRR_TYPE_UNCACHABLE 0 #define MTRR_TYPE_WRCOMB 1 /*#define MTRR_TYPE_ 2*/ /*#define MTRR_TYPE_ 3*/ #define MTRR_TYPE_WRTHROUGH 4 #define MTRR_TYPE_WRPROT 5 #define MTRR_TYPE_WRBACK 6 #define MTRR_NUM_TYPES 7 #ifdef __KERNEL__ /* The following functions are for use by other drivers */ # ifdef CONFIG_MTRR extern int mtrr_add (unsigned long base, unsigned long size, unsigned int type, char increment); extern int mtrr_add_page (unsigned long base, unsigned long size, unsigned int type, char increment); extern int mtrr_del (int reg, unsigned long base, unsigned long size); extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); extern void mtrr_ap_init(void); extern void mtrr_bp_init(void); # else static __inline__ int mtrr_add (unsigned long base, unsigned long size, unsigned int type, char increment) { return -ENODEV; } static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, unsigned int type, char increment) { return -ENODEV; } static __inline__ int mtrr_del (int reg, unsigned long base, unsigned long size) { return -ENODEV; } static __inline__ int mtrr_del_page (int reg, unsigned long base, unsigned long size) { return -ENODEV; } static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} #define mtrr_ap_init() do {} while (0) #define mtrr_bp_init() do {} while (0) # endif #endif #endif /* _LINUX_MTRR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/mutex.h000066400000000000000000000101211314037446600240760ustar00rootroot00000000000000/* * Assembly implementation of the mutex fastpath, based on atomic * decrement/increment. * * started by Ingo Molnar: * * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar */ #ifndef _ASM_MUTEX_H #define _ASM_MUTEX_H #include "asm/alternative.h" /** * __mutex_fastpath_lock - try to take the lock by moving the count * from 1 to a 0 value * @count: pointer of type atomic_t * @fn: function to call if the original value was not 1 * * Change the count from 1 to a value lower than 1, and call if it * wasn't 1 originally. This function MUST leave the value lower than 1 * even when the "1" assertion wasn't true. */ #define __mutex_fastpath_lock(count, fail_fn) \ do { \ unsigned int dummy; \ \ typecheck(atomic_t *, count); \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ LOCK_PREFIX " decl (%%eax) \n" \ " js 2f \n" \ "1: \n" \ \ LOCK_SECTION_START("") \ "2: call "#fail_fn" \n" \ " jmp 1b \n" \ LOCK_SECTION_END \ \ :"=a" (dummy) \ : "a" (count) \ : "memory", "ecx", "edx"); \ } while (0) /** * __mutex_fastpath_lock_retval - try to take the lock by moving the count * from 1 to a 0 value * @count: pointer of type atomic_t * @fail_fn: function to call if the original value was not 1 * * Change the count from 1 to a value lower than 1, and call if it * wasn't 1 originally. This function returns 0 if the fastpath succeeds, * or anything the slow path function returns */ static inline int __mutex_fastpath_lock_retval(atomic_t *count, int fastcall (*fail_fn)(atomic_t *)) { if (unlikely(atomic_dec_return(count) < 0)) return fail_fn(count); else return 0; } /** * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1 * @count: pointer of type atomic_t * @fail_fn: function to call if the original value was not 0 * * try to promote the mutex from 0 to 1. if it wasn't 0, call . * In the failure case, this function is allowed to either set the value * to 1, or to set it to a value lower than 1. * * If the implementation sets it to a value of lower than 1, the * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs * to return 0 otherwise. */ #define __mutex_fastpath_unlock(count, fail_fn) \ do { \ unsigned int dummy; \ \ typecheck(atomic_t *, count); \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ LOCK_PREFIX " incl (%%eax) \n" \ " jle 2f \n" \ "1: \n" \ \ LOCK_SECTION_START("") \ "2: call "#fail_fn" \n" \ " jmp 1b \n" \ LOCK_SECTION_END \ \ :"=a" (dummy) \ : "a" (count) \ : "memory", "ecx", "edx"); \ } while (0) #define __mutex_slowpath_needs_to_unlock() 1 /** * __mutex_fastpath_trylock - try to acquire the mutex, without waiting * * @count: pointer of type atomic_t * @fail_fn: fallback function * * Change the count from 1 to a value lower than 1, and return 0 (failure) * if it wasn't 1 originally, or return 1 (success) otherwise. This function * MUST leave the value lower than 1 even when the "1" assertion wasn't true. * Additionally, if the value was < 0 originally, this function must not leave * it to 0 on failure. */ static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { /* * We have two variants here. The cmpxchg based one is the best one * because it never induce a false contention state. It is included * here because architectures using the inc/dec algorithms over the * xchg ones are much more likely to support cmpxchg natively. * * If not we fall back to the spinlock based variant - that is * just as efficient (and simpler) as a 'destructive' probing of * the mutex state would be. */ #ifdef __HAVE_ARCH_CMPXCHG if (likely(atomic_cmpxchg(count, 1, 0) == 1)) return 1; return 0; #else return fail_fn(count); #endif } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/namei.h000066400000000000000000000005641314037446600240370ustar00rootroot00000000000000/* $Id: namei.h,v 1.1 1996/12/13 14:48:21 jj Exp $ * linux/include/asm-i386/namei.h * * Included from linux/fs/namei.c */ #ifndef __I386_NAMEI_H #define __I386_NAMEI_H /* This dummy routine maybe changed to something useful * for /usr/gnemul/ emulation stuff. * Look at asm-sparc/namei.h for details. */ #define __emul_prefix() NULL #endif /* __I386_NAMEI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/nmi.h000066400000000000000000000015771314037446600235360ustar00rootroot00000000000000/* * linux/include/asm-i386/nmi.h */ #ifndef ASM_NMI_H #define ASM_NMI_H #include struct pt_regs; typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); /** * set_nmi_callback * * Set a handler for an NMI. Only one handler may be * set. Return 1 if the NMI was handled. */ void set_nmi_callback(nmi_callback_t callback); /** * unset_nmi_callback * * Remove the handler previously set. */ void unset_nmi_callback(void); extern void setup_apic_nmi_watchdog (void); extern int reserve_lapic_nmi(void); extern void release_lapic_nmi(void); extern void disable_timer_nmi_watchdog(void); extern void enable_timer_nmi_watchdog(void); extern void nmi_watchdog_tick (struct pt_regs * regs); extern unsigned int nmi_watchdog; #define NMI_DEFAULT -1 #define NMI_NONE 0 #define NMI_IO_APIC 1 #define NMI_LOCAL_APIC 2 #define NMI_INVALID 3 #endif /* ASM_NMI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/numa.h000066400000000000000000000000331314037446600236750ustar00rootroot00000000000000 int pxm_to_nid(int pxm); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/numaq.h000066400000000000000000000135521314037446600240700ustar00rootroot00000000000000/* * Written by: Patricia Gaughen, IBM Corporation * * Copyright (C) 2002, IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to */ #ifndef NUMAQ_H #define NUMAQ_H #ifdef CONFIG_X86_NUMAQ extern int get_memcfg_numaq(void); /* * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the */ #define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private quad space */ /* * Communication area for each processor on lynxer-processor tests. * * NOTE: If you change the size of this eachproc structure you need * to change the definition for EACH_QUAD_SIZE. */ struct eachquadmem { unsigned int priv_mem_start; /* Starting address of this */ /* quad's private memory. */ /* This is always 0. */ /* In MB. */ unsigned int priv_mem_size; /* Size of this quad's */ /* private memory. */ /* In MB. */ unsigned int low_shrd_mem_strp_start;/* Starting address of this */ /* quad's low shared block */ /* (untranslated). */ /* In MB. */ unsigned int low_shrd_mem_start; /* Starting address of this */ /* quad's low shared memory */ /* (untranslated). */ /* In MB. */ unsigned int low_shrd_mem_size; /* Size of this quad's low */ /* shared memory. */ /* In MB. */ unsigned int lmmio_copb_start; /* Starting address of this */ /* quad's local memory */ /* mapped I/O in the */ /* compatibility OPB. */ /* In MB. */ unsigned int lmmio_copb_size; /* Size of this quad's local */ /* memory mapped I/O in the */ /* compatibility OPB. */ /* In MB. */ unsigned int lmmio_nopb_start; /* Starting address of this */ /* quad's local memory */ /* mapped I/O in the */ /* non-compatibility OPB. */ /* In MB. */ unsigned int lmmio_nopb_size; /* Size of this quad's local */ /* memory mapped I/O in the */ /* non-compatibility OPB. */ /* In MB. */ unsigned int io_apic_0_start; /* Starting address of I/O */ /* APIC 0. */ unsigned int io_apic_0_sz; /* Size I/O APIC 0. */ unsigned int io_apic_1_start; /* Starting address of I/O */ /* APIC 1. */ unsigned int io_apic_1_sz; /* Size I/O APIC 1. */ unsigned int hi_shrd_mem_start; /* Starting address of this */ /* quad's high shared memory.*/ /* In MB. */ unsigned int hi_shrd_mem_size; /* Size of this quad's high */ /* shared memory. */ /* In MB. */ unsigned int mps_table_addr; /* Address of this quad's */ /* MPS tables from BIOS, */ /* in system space.*/ unsigned int lcl_MDC_pio_addr; /* Port-I/O address for */ /* local access of MDC. */ unsigned int rmt_MDC_mmpio_addr; /* MM-Port-I/O address for */ /* remote access of MDC. */ unsigned int mm_port_io_start; /* Starting address of this */ /* quad's memory mapped Port */ /* I/O space. */ unsigned int mm_port_io_size; /* Size of this quad's memory*/ /* mapped Port I/O space. */ unsigned int mm_rmt_io_apic_start; /* Starting address of this */ /* quad's memory mapped */ /* remote I/O APIC space. */ unsigned int mm_rmt_io_apic_size; /* Size of this quad's memory*/ /* mapped remote I/O APIC */ /* space. */ unsigned int mm_isa_start; /* Starting address of this */ /* quad's memory mapped ISA */ /* space (contains MDC */ /* memory space). */ unsigned int mm_isa_size; /* Size of this quad's memory*/ /* mapped ISA space (contains*/ /* MDC memory space). */ unsigned int rmt_qmi_addr; /* Remote addr to access QMI.*/ unsigned int lcl_qmi_addr; /* Local addr to access QMI. */ }; /* * Note: This structure must be NOT be changed unless the multiproc and * OS are changed to reflect the new structure. */ struct sys_cfg_data { unsigned int quad_id; unsigned int bsp_proc_id; /* Boot Strap Processor in this quad. */ unsigned int scd_version; /* Version number of this table. */ unsigned int first_quad_id; unsigned int quads_present31_0; /* 1 bit for each quad */ unsigned int quads_present63_32; /* 1 bit for each quad */ unsigned int config_flags; unsigned int boot_flags; unsigned int csr_start_addr; /* Absolute value (not in MB) */ unsigned int csr_size; /* Absolute value (not in MB) */ unsigned int lcl_apic_start_addr; /* Absolute value (not in MB) */ unsigned int lcl_apic_size; /* Absolute value (not in MB) */ unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */ unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */ /* may not be totally populated */ unsigned int split_mem_enbl; /* 0 for no low shared memory */ unsigned int mmio_sz; /* Size of total system memory mapped I/O */ /* (in MB). */ unsigned int quad_spin_lock; /* Spare location used for quad */ /* bringup. */ unsigned int nonzero55; /* For checksumming. */ unsigned int nonzeroaa; /* For checksumming. */ unsigned int scd_magic_number; unsigned int system_type; unsigned int checksum; /* * memory configuration area for each quad */ struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ }; static inline unsigned long *get_zholes_size(int nid) { return NULL; } #endif /* CONFIG_X86_NUMAQ */ #endif /* NUMAQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/page.h000066400000000000000000000103441314037446600236570ustar00rootroot00000000000000#ifndef _I386_PAGE_H #define _I386_PAGE_H /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) #ifdef __KERNEL__ #ifndef __ASSEMBLY__ #ifdef CONFIG_X86_USE_3DNOW #include #define clear_page(page) mmx_clear_page((void *)(page)) #define copy_page(to,from) mmx_copy_page(to,from) #else /* * On older X86 processors it's not a win to use MMX here it seems. * Maybe the K6-III ? */ #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) #endif #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE /* * These are used to make use of C type-checking.. */ extern int nx_enabled; #ifdef CONFIG_X86_PAE extern unsigned long long __supported_pte_mask; typedef struct { unsigned long pte_low, pte_high; } pte_t; typedef struct { unsigned long long pmd; } pmd_t; typedef struct { unsigned long long pgd; } pgd_t; typedef struct { unsigned long long pgprot; } pgprot_t; #define pmd_val(x) ((x).pmd) #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) #define __pmd(x) ((pmd_t) { (x) } ) #define HPAGE_SHIFT 21 #else typedef struct { unsigned long pte_low; } pte_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; #define boot_pte_t pte_t /* or would you rather have a typedef */ #define pte_val(x) ((x).pte_low) #define HPAGE_SHIFT 22 #endif #define PTE_MASK PAGE_MASK #ifdef CONFIG_HUGETLB_PAGE #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif #define pgd_val(x) ((x).pgd) #define pgprot_val(x) ((x).pgprot) #define __pte(x) ((pte_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) #endif /* !__ASSEMBLY__ */ /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) /* * This handles the memory map.. We could make this a config * option, but too many people screw it up, and too few need * it. * * A __PAGE_OFFSET of 0xC0000000 means that the kernel has * a virtual address space of one gigabyte, which limits the * amount of physical memory you can use to about 950MB. * * If you want more physical memory than this then see the CONFIG_HIGHMEM4G * and CONFIG_HIGHMEM64G options in the kernel configuration. */ #ifndef __ASSEMBLY__ struct vm_area_struct; /* * This much address space is reserved for vmalloc() and iomap() * as well as fixmap mappings. */ extern unsigned int __VMALLOC_RESERVE; extern int sysctl_legacy_va_layout; extern int page_is_ram(unsigned long pagenr); #endif /* __ASSEMBLY__ */ #ifdef __ASSEMBLY__ #define __PAGE_OFFSET CONFIG_PAGE_OFFSET #define __PHYSICAL_START CONFIG_PHYSICAL_START #else #define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) #endif #define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START) #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) #define MAXMEM (__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #ifdef CONFIG_FLATMEM #define pfn_valid(pfn) ((pfn) < max_mapnr) #endif /* CONFIG_FLATMEM */ #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define VM_DATA_DEFAULT_FLAGS \ (VM_READ | VM_WRITE | \ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include #include #define __HAVE_ARCH_GATE_AREA 1 #endif /* __KERNEL__ */ #endif /* _I386_PAGE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/param.h000066400000000000000000000007141314037446600240430ustar00rootroot00000000000000#ifndef _ASMi386_PARAM_H #define _ASMi386_PARAM_H #ifdef __KERNEL__ # define HZ CONFIG_HZ /* Internal kernel timer frequency */ # define USER_HZ 100 /* .. some user interfaces are in "ticks" */ # define CLOCKS_PER_SEC (USER_HZ) /* like times() */ #endif #ifndef HZ #define HZ 100 #endif #define EXEC_PAGESIZE 4096 #ifndef NOGROUP #define NOGROUP (-1) #endif #define MAXHOSTNAMELEN 64 /* max length of hostname */ #define COMMAND_LINE_SIZE 256 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/parport.h000066400000000000000000000007731314037446600244370ustar00rootroot00000000000000/* * parport.h: ia32-specific parport initialisation * * Copyright (C) 1999, 2000 Tim Waugh * * This file should only be included by drivers/parport/parport_pc.c. */ #ifndef _ASM_I386_PARPORT_H #define _ASM_I386_PARPORT_H 1 static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) { return parport_pc_find_isa_ports (autoirq, autodma); } #endif /* !(_ASM_I386_PARPORT_H) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/pci-direct.h000066400000000000000000000000431314037446600247610ustar00rootroot00000000000000#include "asm-x86_64/pci-direct.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/pci.h000066400000000000000000000062131314037446600235160ustar00rootroot00000000000000#ifndef __i386_PCI_H #define __i386_PCI_H #ifdef __KERNEL__ #include /* for struct page */ /* Can be used to override the logic in pci_scan_bus for skipping already-configured bus numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the loader */ #ifdef CONFIG_PCI extern unsigned int pcibios_assign_all_busses(void); #else #define pcibios_assign_all_busses() 0 #endif #define pcibios_scan_all_fns(a, b) 0 extern unsigned long pci_mem_start; #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM (pci_mem_start) #define PCIBIOS_MIN_CARDBUS_IO 0x4000 void pcibios_config_init(void); struct pci_bus * pcibios_scan_root(int bus); void pcibios_set_master(struct pci_dev *dev); void pcibios_penalize_isa_irq(int irq, int active); struct irq_routing_table *pcibios_get_irq_routing_table(void); int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); /* Dynamic DMA mapping stuff. * i386 has everything mapped statically. */ #include #include #include #include #include struct pci_dev; /* The PCI address space does equal the physical memory * address space. The networking and block device layers use * this boolean for bounce buffer decisions. */ #define PCI_DMA_BUS_IS_PHYS (1) /* pci_unmap_{page,single} is a nop so... */ #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) #define pci_unmap_addr(PTR, ADDR_NAME) (0) #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) #define pci_unmap_len(PTR, LEN_NAME) (0) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) /* This is always fine. */ #define pci_dac_dma_supported(pci_dev, mask) (1) static inline dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction) { return ((dma64_addr_t) page_to_phys(page) + (dma64_addr_t) offset); } static inline struct page * pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr) { return pfn_to_page(dma_addr >> PAGE_SHIFT); } static inline unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr) { return (dma_addr & ~PAGE_MASK); } static inline void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) { } static inline void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) { flush_write_buffers(); } #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); static inline void pcibios_add_platform_entries(struct pci_dev *dev) { } #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, unsigned long *strategy_parameter) { *strat = PCI_DMA_BURST_INFINITY; *strategy_parameter = ~0UL; } #endif #endif /* __KERNEL__ */ /* implement the pci_ DMA API in terms of the generic device dma_ one */ #include /* generic pci stuff */ #include #endif /* __i386_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/percpu.h000066400000000000000000000001761314037446600242430ustar00rootroot00000000000000#ifndef __ARCH_I386_PERCPU__ #define __ARCH_I386_PERCPU__ #include #endif /* __ARCH_I386_PERCPU__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/pgalloc.h000066400000000000000000000023221314037446600243610ustar00rootroot00000000000000#ifndef _I386_PGALLOC_H #define _I386_PGALLOC_H #include #include #include /* for struct page */ #define pmd_populate_kernel(mm, pmd, pte) \ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) #define pmd_populate(mm, pmd, pte) \ set_pmd(pmd, __pmd(_PAGE_TABLE + \ ((unsigned long long)page_to_pfn(pte) << \ (unsigned long long) PAGE_SHIFT))) /* * Allocate and free page tables. */ extern pgd_t *pgd_alloc(struct mm_struct *); extern void pgd_free(pgd_t *pgd); extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); extern struct page *pte_alloc_one(struct mm_struct *, unsigned long); static inline void pte_free_kernel(pte_t *pte) { free_page((unsigned long)pte); } static inline void pte_free(struct page *pte) { __free_page(pte); } #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) #ifdef CONFIG_X86_PAE /* * In the PAE case we free the pmds as part of the pgd. */ #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(x) do { } while (0) #define __pmd_free_tlb(tlb,x) do { } while (0) #define pud_populate(mm, pmd, pte) BUG() #endif #define check_pgt_cache() do { } while (0) #endif /* _I386_PGALLOC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/pgtable-2level-defs.h000066400000000000000000000005641314037446600264720ustar00rootroot00000000000000#ifndef _I386_PGTABLE_2LEVEL_DEFS_H #define _I386_PGTABLE_2LEVEL_DEFS_H #define HAVE_SHARED_KERNEL_PMD 0 /* * traditional i386 two-level paging structure: */ #define PGDIR_SHIFT 22 #define PTRS_PER_PGD 1024 /* * the i386 is two-level, so we don't really have any * PMD directory physically. */ #define PTRS_PER_PTE 1024 #endif /* _I386_PGTABLE_2LEVEL_DEFS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/pgtable-2level.h000066400000000000000000000042201314037446600255440ustar00rootroot00000000000000#ifndef _I386_PGTABLE_2LEVEL_H #define _I386_PGTABLE_2LEVEL_H #include #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) /* * Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following * hook is made available. */ #define set_pte(pteptr, pteval) (*(pteptr) = pteval) #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) #define pte_same(a, b) ((a).pte_low == (b).pte_low) #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_none(x) (!(x).pte_low) #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) /* * All present user pages are user-executable: */ static inline int pte_exec(pte_t pte) { return pte_user(pte); } /* * All present pages are kernel-executable: */ static inline int pte_exec_kernel(pte_t pte) { return 1; } /* * Bits 0, 6 and 7 are taken, split up the 29 bits of offset * into this range: */ #define PTE_FILE_MAX_BITS 29 #define pte_to_pgoff(pte) \ ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 )) #define pgoff_to_pte(off) \ ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x1f) #define __swp_offset(x) ((x).val >> 8) #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) void vmalloc_sync_all(void); #endif /* _I386_PGTABLE_2LEVEL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/pgtable-3level-defs.h000066400000000000000000000007331314037446600264710ustar00rootroot00000000000000#ifndef _I386_PGTABLE_3LEVEL_DEFS_H #define _I386_PGTABLE_3LEVEL_DEFS_H #define HAVE_SHARED_KERNEL_PMD 1 /* * PGDIR_SHIFT determines what a top-level page table entry can map */ #define PGDIR_SHIFT 30 #define PTRS_PER_PGD 4 /* * PMD_SHIFT determines the size of the area a middle-level * page table can map */ #define PMD_SHIFT 21 #define PTRS_PER_PMD 512 /* * entries per page directory level */ #define PTRS_PER_PTE 512 #endif /* _I386_PGTABLE_3LEVEL_DEFS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/pgtable-3level.h000066400000000000000000000111531314037446600255500ustar00rootroot00000000000000#ifndef _I386_PGTABLE_3LEVEL_H #define _I386_PGTABLE_3LEVEL_H #include /* * Intel Physical Address Extension (PAE) Mode - three-level page * tables on PPro+ CPUs. * * Copyright (C) 1999 Ingo Molnar */ #define pte_ERROR(e) \ printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) #define pud_none(pud) 0 #define pud_bad(pud) 0 #define pud_present(pud) 1 /* * Is the pte executable? */ static inline int pte_x(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); } /* * All present user-pages with !NX bit are user-executable: */ static inline int pte_exec(pte_t pte) { return pte_user(pte) && pte_x(pte); } /* * All present pages with !NX bit are kernel-executable: */ static inline int pte_exec_kernel(pte_t pte) { return pte_x(pte); } /* Rules for using set_pte: the pte being assigned *must* be * either not present or in a state where the hardware will * not attempt to update the pte. In places where this is * not possible, use pte_get_and_clear to obtain the old pte * value and then use set_pte to update it. -ben */ static inline void set_pte(pte_t *ptep, pte_t pte) { ptep->pte_high = pte.pte_high; smp_wmb(); ptep->pte_low = pte.pte_low; } #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define __HAVE_ARCH_SET_PTE_ATOMIC #define set_pte_atomic(pteptr,pteval) \ set_64bit((unsigned long long *)(pteptr),pte_val(pteval)) #define set_pmd(pmdptr,pmdval) \ set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval)) #define set_pud(pudptr,pudval) \ (*(pudptr) = (pudval)) /* * Pentium-II erratum A13: in PAE mode we explicitly have to flush * the TLB via cr3 if the top-level pgd is changed... * We do not let the generic code free and clear pgd entries due to * this erratum. */ static inline void pud_clear (pud_t * pud) { } #define pud_page(pud) \ ((struct page *) __va(pud_val(pud) & PAGE_MASK)) #define pud_page_kernel(pud) \ ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) /* Find an entry in the second-level page table.. */ #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ pmd_index(address)) /* * For PTEs and PDEs, we must clear the P-bit first when clearing a page table * entry, so clear the bottom half first and enforce ordering with a compiler * barrier. */ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { ptep->pte_low = 0; smp_wmb(); ptep->pte_high = 0; } static inline void pmd_clear(pmd_t *pmd) { u32 *tmp = (u32 *)pmd; *tmp = 0; smp_wmb(); *(tmp + 1) = 0; } static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t res; /* xchg acts as a barrier before the setting of the high bits */ res.pte_low = xchg(&ptep->pte_low, 0); res.pte_high = ptep->pte_high; ptep->pte_high = 0; return res; } static inline int pte_same(pte_t a, pte_t b) { return a.pte_low == b.pte_low && a.pte_high == b.pte_high; } #define pte_page(x) pfn_to_page(pte_pfn(x)) static inline int pte_none(pte_t pte) { return !pte.pte_low && !pte.pte_high; } static inline unsigned long pte_pfn(pte_t pte) { return (pte.pte_low >> PAGE_SHIFT) | (pte.pte_high << (32 - PAGE_SHIFT)); } extern unsigned long long __supported_pte_mask; static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { pte_t pte; pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \ (pgprot_val(pgprot) >> 32); pte.pte_high &= (__supported_pte_mask >> 32); pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \ __supported_pte_mask; return pte; } static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) { return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | \ pgprot_val(pgprot)) & __supported_pte_mask); } /* * Bits 0, 6 and 7 are taken in the low part of the pte, * put the 32 bits of offset into the high part. */ #define pte_to_pgoff(pte) ((pte).pte_high) #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) #define PTE_FILE_MAX_BITS 32 /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val) & 0x1f) #define __swp_offset(x) ((x).val >> 5) #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) #define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) #define __pmd_free_tlb(tlb, x) do { } while (0) #define vmalloc_sync_all() ((void)0) #endif /* _I386_PGTABLE_3LEVEL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/pgtable.h000066400000000000000000000357111314037446600243660ustar00rootroot00000000000000#ifndef _I386_PGTABLE_H #define _I386_PGTABLE_H /* * The Linux memory management assumes a three-level page table setup. On * the i386, we use that, but "fold" the mid level into the top-level page * table, so that we physically have the same two-level page table as the * i386 mmu expects. * * This file contains the functions and defines necessary to modify and use * the i386 page table tree. */ #ifndef __ASSEMBLY__ #include #include #include #ifndef _I386_BITOPS_H #include #endif #include #include #include struct mm_struct; struct vm_area_struct; /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) extern unsigned long empty_zero_page[1024]; extern pgd_t swapper_pg_dir[1024]; extern kmem_cache_t *pgd_cache; extern kmem_cache_t *pmd_cache; extern spinlock_t pgd_lock; extern struct page *pgd_list; void pmd_ctor(void *, kmem_cache_t *, unsigned long); void pgd_ctor(void *, kmem_cache_t *, unsigned long); void pgd_dtor(void *, kmem_cache_t *, unsigned long); void pgtable_cache_init(void); void paging_init(void); /* * The Linux x86 paging architecture is 'compile-time dual-mode', it * implements both the traditional 2-level x86 page tables and the * newer 3-level PAE-mode page tables. */ #ifdef CONFIG_X86_PAE # include # define PMD_SIZE (1UL << PMD_SHIFT) # define PMD_MASK (~(PMD_SIZE-1)) #else # include #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) #define TWOLEVEL_PGDIR_SHIFT 22 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS) /* Just any arbitrary offset to the start of the vmalloc VM area: the * current 8MB value just means that there will be a 8MB "hole" after the * physical memory until the kernel virtual memory starts. That means that * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \ 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) #ifdef CONFIG_HIGHMEM # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) #else # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) #endif /* * _PAGE_PSE set in the page directory entry just means that * the page directory entry points directly to a 4MB-aligned block of * memory. */ #define _PAGE_BIT_PRESENT 0 #define _PAGE_BIT_RW 1 #define _PAGE_BIT_USER 2 #define _PAGE_BIT_PWT 3 #define _PAGE_BIT_PCD 4 #define _PAGE_BIT_ACCESSED 5 #define _PAGE_BIT_DIRTY 6 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ #define _PAGE_BIT_UNUSED2 10 #define _PAGE_BIT_UNUSED3 11 #define _PAGE_BIT_NX 63 #define _PAGE_PRESENT 0x001 #define _PAGE_RW 0x002 #define _PAGE_USER 0x004 #define _PAGE_PWT 0x008 #define _PAGE_PCD 0x010 #define _PAGE_ACCESSED 0x020 #define _PAGE_DIRTY 0x040 #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ #define _PAGE_UNUSED1 0x200 /* available for programmer */ #define _PAGE_UNUSED2 0x400 #define _PAGE_UNUSED3 0x800 /* If _PAGE_PRESENT is clear, we use these: */ #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ #define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE; pte_present gives true */ #ifdef CONFIG_X86_PAE #define _PAGE_NX (1ULL<<_PAGE_BIT_NX) #else #define _PAGE_NX 0 #endif #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) #define PAGE_NONE \ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define PAGE_SHARED \ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_SHARED_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_COPY_NOEXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_COPY_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_COPY \ PAGE_COPY_NOEXEC #define PAGE_READONLY \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_READONLY_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define _PAGE_KERNEL \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) #define _PAGE_KERNEL_EXEC \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) /* * The i386 can't do page protection for execute, and considers that * the same are read. Also, write permissions imply read permissions. * This is the closest we can get.. */ #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_COPY #define __P011 PAGE_COPY #define __P100 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_COPY_EXEC #define __P111 PAGE_COPY_EXEC #define __S000 PAGE_NONE #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED #define __S100 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC /* * Define this if things work differently on an i386 and an i486: * it will (on an i486) warn about kernel memory accesses that are * done without a 'access_ok(VERIFY_WRITE,..)' */ #undef TEST_ACCESS_OK /* The boot page tables (all created as a single array) */ extern unsigned long pg0[]; #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* * The following only work if pte_present() is true. * Undefined behaviour if not.. */ static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; } /* * The following only works if pte_present() is not true. */ static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; } static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; } static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; } static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } #ifdef CONFIG_X86_PAE # include #else # include #endif static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_dirty(*ptep)) return 0; return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); } static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_young(*ptep)) return 0; return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); } static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) { pte_t pte; if (full) { pte = *ptep; pte_clear(mm, addr, ptep); } else { pte = ptep_get_and_clear(mm, addr, ptep); } return pte; } static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { clear_bit(_PAGE_BIT_RW, &ptep->pte_low); } /* * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * * dst - pointer to pgd range anwhere on a pgd page * src - "" * count - the number of pgds to copy. * * dst and src can be on the same page, but the range must not overlap, * and must not cross a page boundary. */ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) { memcpy(dst, src, count * sizeof(pgd_t)); } /* * Macro to mark a page protection value as "uncacheable". On processors which do not support * it, this is a no-op. */ #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte.pte_low &= _PAGE_CHG_MASK; pte.pte_low |= pgprot_val(newprot); #ifdef CONFIG_X86_PAE /* * Chop off the NX bit (if present), and add the NX portion of * the newprot (if present): */ pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); pte.pte_high |= (pgprot_val(newprot) >> 32) & \ (__supported_pte_mask >> 32); #endif return pte; } #define pmd_large(pmd) \ ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) /* * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] * * this macro returns the index of the entry in the pgd page which would * control the given virtual address */ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pgd_index_k(addr) pgd_index(addr) /* * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */ #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) /* * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] * * this macro returns the index of the entry in the pmd page which would * control the given virtual address */ #define pmd_index(address) \ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) /* * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] * * this macro returns the index of the entry in the pte page which would * control the given virtual address */ #define pte_index(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) \ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) #define pmd_page_kernel(pmd) \ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) /* * Helper function that returns the kernel pagetable entry controlling * the virtual address 'address'. NULL means no pagetable entry present. * NOTE: the return type is pte_t but if the pmd is PSE then we return it * as a pte too. */ extern pte_t *lookup_address(unsigned long address); /* * Make a given kernel text page executable/non-executable. * Returns the previous executability setting of that page (which * is used to restore the previous state). Used by the SMP bootup code. * NOTE: this is an __init function for security reasons. */ #ifdef CONFIG_X86_PAE extern int set_kernel_exec(unsigned long vaddr, int enable); #else static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} #endif extern void noexec_setup(const char *str); #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) #define pte_offset_map_nested(dir, address) \ ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) #else #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) #endif /* * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. * * Also, we only update the dirty/accessed state if we set * the dirty bit by hand in the kernel, since the hardware * will do the accessed bit for us, and we don't want to * race with other CPU's that might be updating the dirty * bit at the same time. */ #define update_mmu_cache(vma,address,pte) do { } while (0) #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ do { \ if (__dirty) { \ (__ptep)->pte_low = (__entry).pte_low; \ flush_tlb_page(__vma, __address); \ } \ } while (0) #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_FLATMEM #define kern_addr_valid(addr) (1) #endif /* CONFIG_FLATMEM */ #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) #define MK_IOSPACE_PFN(space, pfn) (pfn) #define GET_IOSPACE(pfn) 0 #define GET_PFN(pfn) (pfn) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME #include #endif /* _I386_PGTABLE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/poll.h000066400000000000000000000010351314037446600237060ustar00rootroot00000000000000#ifndef __i386_POLL_H #define __i386_POLL_H /* These are specified by iBCS2 */ #define POLLIN 0x0001 #define POLLPRI 0x0002 #define POLLOUT 0x0004 #define POLLERR 0x0008 #define POLLHUP 0x0010 #define POLLNVAL 0x0020 /* The rest seem to be more-or-less nonstandard. Check them! */ #define POLLRDNORM 0x0040 #define POLLRDBAND 0x0080 #define POLLWRNORM 0x0100 #define POLLWRBAND 0x0200 #define POLLMSG 0x0400 #define POLLREMOVE 0x1000 #define POLLRDHUP 0x2000 struct pollfd { int fd; short events; short revents; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/posix_types.h000066400000000000000000000045731314037446600253400ustar00rootroot00000000000000#ifndef __ARCH_I386_POSIX_TYPES_H #define __ARCH_I386_POSIX_TYPES_H /* * This file is generally used by user-level software, so you need to * be a little careful about namespace pollution etc. Also, we cannot * assume GCC is being used. */ typedef unsigned long __kernel_ino_t; typedef unsigned short __kernel_mode_t; typedef unsigned short __kernel_nlink_t; typedef long __kernel_off_t; typedef int __kernel_pid_t; typedef unsigned short __kernel_ipc_pid_t; typedef unsigned short __kernel_uid_t; typedef unsigned short __kernel_gid_t; typedef unsigned int __kernel_size_t; typedef int __kernel_ssize_t; typedef int __kernel_ptrdiff_t; typedef long __kernel_time_t; typedef long __kernel_suseconds_t; typedef long __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef int __kernel_daddr_t; typedef char * __kernel_caddr_t; typedef unsigned short __kernel_uid16_t; typedef unsigned short __kernel_gid16_t; typedef unsigned int __kernel_uid32_t; typedef unsigned int __kernel_gid32_t; typedef unsigned short __kernel_old_uid_t; typedef unsigned short __kernel_old_gid_t; typedef unsigned short __kernel_old_dev_t; #ifdef __GNUC__ typedef long long __kernel_loff_t; #endif typedef struct { #if defined(__KERNEL__) || defined(__USE_ALL) int val[2]; #else /* !defined(__KERNEL__) && !defined(__USE_ALL) */ int __val[2]; #endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */ } __kernel_fsid_t; #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) #undef __FD_SET #define __FD_SET(fd,fdsetp) \ __asm__ __volatile__("btsl %1,%0": \ "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) #undef __FD_CLR #define __FD_CLR(fd,fdsetp) \ __asm__ __volatile__("btrl %1,%0": \ "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) #undef __FD_ISSET #define __FD_ISSET(fd,fdsetp) (__extension__ ({ \ unsigned char __result; \ __asm__ __volatile__("btl %1,%2 ; setb %0" \ :"=q" (__result) :"r" ((int) (fd)), \ "m" (*(__kernel_fd_set *) (fdsetp))); \ __result; })) #undef __FD_ZERO #define __FD_ZERO(fdsetp) \ do { \ int __d0, __d1; \ __asm__ __volatile__("cld ; rep ; stosl" \ :"=m" (*(__kernel_fd_set *) (fdsetp)), \ "=&c" (__d0), "=&D" (__d1) \ :"a" (0), "1" (__FDSET_LONGS), \ "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \ } while (0) #endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/processor.h000066400000000000000000000464651314037446600247770ustar00rootroot00000000000000/* * include/asm-i386/processor.h * * Copyright (C) 1994 Linus Torvalds */ #ifndef __ASM_I386_PROCESSOR_H #define __ASM_I386_PROCESSOR_H #include #include #include #include #include #include #include #include #include #include #include #include #include /* flag for disabling the tsc */ extern int tsc_disable; struct desc_struct { unsigned long a,b; }; #define desc_empty(desc) \ (!((desc)->a | (desc)->b)) #define desc_equal(desc1, desc2) \ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) /* * Default implementation of macro that returns current * instruction pointer ("program counter"). */ #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) /* * CPU type and hardware bug flags. Kept separately for each CPU. * Members of this structure are referenced in head.S, so think twice * before touching them. [mj] */ struct cpuinfo_x86 { __u8 x86; /* CPU family */ __u8 x86_vendor; /* CPU vendor */ __u8 x86_model; __u8 x86_mask; char wp_works_ok; /* It doesn't on 386's */ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ char hard_math; char rfu; int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ unsigned long x86_capability[NCAPINTS]; char x86_vendor_id[16]; char x86_model_id[64]; int x86_cache_size; /* in KB - valid for CPUS which support this call */ int x86_cache_alignment; /* In bytes */ char fdiv_bug; char f00f_bug; char coma_bug; char pad0; int x86_power; unsigned long loops_per_jiffy; #ifdef CONFIG_SMP cpumask_t llc_shared_map; /* cpus sharing the last level cache */ #endif unsigned char x86_max_cores; /* cpuid returned max cores value */ unsigned char apicid; #ifdef CONFIG_SMP unsigned char booted_cores; /* number of cores as seen by OS */ __u8 phys_proc_id; /* Physical processor id. */ __u8 cpu_core_id; /* Core id */ #endif } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 #define X86_VENDOR_CYRIX 1 #define X86_VENDOR_AMD 2 #define X86_VENDOR_UMC 3 #define X86_VENDOR_NEXGEN 4 #define X86_VENDOR_CENTAUR 5 #define X86_VENDOR_RISE 6 #define X86_VENDOR_TRANSMETA 7 #define X86_VENDOR_NSC 8 #define X86_VENDOR_NUM 9 #define X86_VENDOR_UNKNOWN 0xff /* * capabilities of CPUs */ extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; extern struct tss_struct doublefault_tss; DECLARE_PER_CPU(struct tss_struct, init_tss); #ifdef CONFIG_SMP extern struct cpuinfo_x86 cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] #else #define cpu_data (&boot_cpu_data) #define current_cpu_data boot_cpu_data #endif extern int cpu_llc_id[NR_CPUS]; extern char ignore_fpu_irq; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); extern unsigned short num_cache_leaves; #ifdef CONFIG_X86_HT extern void detect_ht(struct cpuinfo_x86 *c); #else static inline void detect_ht(struct cpuinfo_x86 *c) {} #endif /* * EFLAGS bits */ #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ #define X86_EFLAGS_NT 0x00004000 /* Nested Task */ #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ /* * Generic CPUID function * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx * resulting in stale register contents being returned. */ static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { __asm__("cpuid" : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "c"(0)); } /* Some CPUID calls want 'count' to be placed in ecx */ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, int *edx) { __asm__("cpuid" : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "c" (count)); } /* * CPUID functions returning a single datum */ static inline unsigned int cpuid_eax(unsigned int op) { unsigned int eax; __asm__("cpuid" : "=a" (eax) : "0" (op) : "bx", "cx", "dx"); return eax; } static inline unsigned int cpuid_ebx(unsigned int op) { unsigned int eax, ebx; __asm__("cpuid" : "=a" (eax), "=b" (ebx) : "0" (op) : "cx", "dx" ); return ebx; } static inline unsigned int cpuid_ecx(unsigned int op) { unsigned int eax, ecx; __asm__("cpuid" : "=a" (eax), "=c" (ecx) : "0" (op) : "bx", "dx" ); return ecx; } static inline unsigned int cpuid_edx(unsigned int op) { unsigned int eax, edx; __asm__("cpuid" : "=a" (eax), "=d" (edx) : "0" (op) : "bx", "cx"); return edx; } #define load_cr3(pgdir) write_cr3(__pa(pgdir)) /* * Intel CPU features in CR4 */ #define X86_CR4_VME 0x0001 /* enable vm86 extensions */ #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */ #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */ #define X86_CR4_DE 0x0008 /* enable debugging extensions */ #define X86_CR4_PSE 0x0010 /* enable page size extensions */ #define X86_CR4_PAE 0x0020 /* enable physical address extensions */ #define X86_CR4_MCE 0x0040 /* Machine check enable */ #define X86_CR4_PGE 0x0080 /* enable global pages */ #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */ #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ /* * Save the cr4 feature set we're using (ie * Pentium 4MB enable and PPro Global page * enable), so that any CPU's that boot up * after us can get the correct flags. */ extern unsigned long mmu_cr4_features; static inline void set_in_cr4 (unsigned long mask) { unsigned cr4; mmu_cr4_features |= mask; cr4 = read_cr4(); cr4 |= mask; write_cr4(cr4); } static inline void clear_in_cr4 (unsigned long mask) { unsigned cr4; mmu_cr4_features &= ~mask; cr4 = read_cr4(); cr4 &= ~mask; write_cr4(cr4); } /* * NSC/Cyrix CPU configuration register indexes */ #define CX86_PCR0 0x20 #define CX86_GCR 0xb8 #define CX86_CCR0 0xc0 #define CX86_CCR1 0xc1 #define CX86_CCR2 0xc2 #define CX86_CCR3 0xc3 #define CX86_CCR4 0xe8 #define CX86_CCR5 0xe9 #define CX86_CCR6 0xea #define CX86_CCR7 0xeb #define CX86_PCR1 0xf0 #define CX86_DIR0 0xfe #define CX86_DIR1 0xff #define CX86_ARR_BASE 0xc4 #define CX86_RCR_BASE 0xdc /* * NSC/Cyrix CPU indexed register access macros */ #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); }) #define setCx86(reg, data) do { \ outb((reg), 0x22); \ outb((data), 0x23); \ } while (0) /* Stop speculative execution */ static inline void sync_core(void) { int tmp; asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); } static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { /* "monitor %eax,%ecx,%edx;" */ asm volatile( ".byte 0x0f,0x01,0xc8;" : :"a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax,%ecx;" */ asm volatile( ".byte 0x0f,0x01,0xc9;" : :"a" (eax), "c" (ecx)); } /* from system description table in BIOS. Mostly for MCA use, but others may find it useful. */ extern unsigned int machine_id; extern unsigned int machine_submodel_id; extern unsigned int BIOS_revision; extern unsigned int mca_pentium_flag; /* Boot loader type from the setup header */ extern int bootloader_type; /* * User space process size: 3GB (default). */ #define TASK_SIZE (PAGE_OFFSET) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) #define HAVE_ARCH_PICK_MMAP_LAYOUT /* * Size of io_bitmap. */ #define IO_BITMAP_BITS 65536 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) #define INVALID_IO_BITMAP_OFFSET 0x8000 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 struct i387_fsave_struct { long cwd; long swd; long twd; long fip; long fcs; long foo; long fos; long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ long status; /* software status information */ }; struct i387_fxsave_struct { unsigned short cwd; unsigned short swd; unsigned short twd; unsigned short fop; long fip; long fcs; long foo; long fos; long mxcsr; long mxcsr_mask; long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ long padding[56]; } __attribute__ ((aligned (16))); struct i387_soft_struct { long cwd; long swd; long twd; long fip; long fcs; long foo; long fos; long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ unsigned char ftop, changed, lookahead, no_update, rm, alimit; struct info *info; unsigned long entry_eip; }; union i387_union { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; }; typedef struct { unsigned long seg; } mm_segment_t; struct thread_struct; struct tss_struct { unsigned short back_link,__blh; unsigned long esp0; unsigned short ss0,__ss0h; unsigned long esp1; unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */ unsigned long esp2; unsigned short ss2,__ss2h; unsigned long __cr3; unsigned long eip; unsigned long eflags; unsigned long eax,ecx,edx,ebx; unsigned long esp; unsigned long ebp; unsigned long esi; unsigned long edi; unsigned short es, __esh; unsigned short cs, __csh; unsigned short ss, __ssh; unsigned short ds, __dsh; unsigned short fs, __fsh; unsigned short gs, __gsh; unsigned short ldt, __ldth; unsigned short trace, io_bitmap_base; /* * The extra 1 is there because the CPU will access an * additional byte beyond the end of the IO permission * bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; /* * Cache the current maximum and the last task that used the bitmap: */ unsigned long io_bitmap_max; struct thread_struct *io_bitmap_owner; /* * pads the TSS to be cacheline-aligned (size is 0x100) */ unsigned long __cacheline_filler[35]; /* * .. and then another 0x100 bytes for emergency kernel stack */ unsigned long stack[64]; } __attribute__((packed)); #define ARCH_MIN_TASKALIGN 16 struct thread_struct { /* cached TLS descriptors. */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; unsigned long esp0; unsigned long sysenter_cs; unsigned long eip; unsigned long esp; unsigned long fs; unsigned long gs; /* Hardware debugging registers */ unsigned long debugreg[8]; /* %%db0-7 debug registers */ /* fault info */ unsigned long cr2, trap_no, error_code; /* floating point info */ union i387_union i387; /* virtual 86 mode info */ struct vm86_struct __user * vm86_info; unsigned long screen_bitmap; unsigned long v86flags, v86mask, saved_esp0; unsigned int saved_fs, saved_gs; /* IO permissions */ unsigned long *io_bitmap_ptr; unsigned long iopl; /* max allowed port in the bitmap, in bytes: */ unsigned long io_bitmap_max; }; #define INIT_THREAD { \ .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ } /* * Note that the .io_bitmap member must be extra-big. This is because * the CPU will access an additional byte beyond the end of the IO * permission bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ #define INIT_TSS { \ .esp0 = sizeof(init_stack) + (long)&init_stack, \ .ss0 = __KERNEL_DS, \ .ss1 = __KERNEL_CS, \ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ } static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) { tss->esp0 = thread->esp0; /* This can only happen when SEP is enabled, no need to test "SEP"arately */ if (unlikely(tss->ss1 != thread->sysenter_cs)) { tss->ss1 = thread->sysenter_cs; wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); } } #define start_thread(regs, new_eip, new_esp) do { \ __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \ set_fs(USER_DS); \ regs->xds = __USER_DS; \ regs->xes = __USER_DS; \ regs->xss = __USER_DS; \ regs->xcs = __USER_CS; \ regs->eip = new_eip; \ regs->esp = new_esp; \ } while (0) /* * These special macros can be used to get or set a debugging register */ #define get_debugreg(var, register) \ __asm__("movl %%db" #register ", %0" \ :"=r" (var)) #define set_debugreg(value, register) \ __asm__("movl %0,%%db" #register \ : /* no output */ \ :"r" (value)) /* * Set IOPL bits in EFLAGS from given mask */ static inline void set_iopl_mask(unsigned mask) { unsigned int reg; __asm__ __volatile__ ("pushfl;" "popl %0;" "andl %1, %0;" "orl %2, %0;" "pushl %0;" "popfl" : "=&r" (reg) : "i" (~X86_EFLAGS_IOPL), "r" (mask)); } /* Forward declaration, a strange C thing */ struct task_struct; struct mm_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); /* Prepare to copy thread state - unlazy all lazy status */ extern void prepare_to_copy(struct task_struct *tsk); /* * create a kernel thread without removing it from tasklists */ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern unsigned long thread_saved_pc(struct task_struct *tsk); void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack); unsigned long get_wchan(struct task_struct *p); #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) #define KSTK_TOP(info) \ ({ \ unsigned long *__ptr = (unsigned long *)(info); \ (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ }) /* * The below -8 is to reserve 8 bytes on top of the ring0 stack. * This is necessary to guarantee that the entire "struct pt_regs" * is accessable even if the CPU haven't stored the SS/ESP registers * on the stack (interrupt gate does not save these registers * when switching to the same priv ring). * Therefore beware: accessing the xss/esp fields of the * "struct pt_regs" is possible, but they may contain the * completely wrong values. */ #define task_pt_regs(task) \ ({ \ struct pt_regs *__regs__; \ __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ __regs__ - 1; \ }) #define KSTK_EIP(task) (task_pt_regs(task)->eip) #define KSTK_ESP(task) (task_pt_regs(task)->esp) struct microcode_header { unsigned int hdrver; unsigned int rev; unsigned int date; unsigned int sig; unsigned int cksum; unsigned int ldrver; unsigned int pf; unsigned int datasize; unsigned int totalsize; unsigned int reserved[3]; }; struct microcode { struct microcode_header hdr; unsigned int bits[0]; }; typedef struct microcode microcode_t; typedef struct microcode_header microcode_header_t; /* microcode format is extended from prescott processors */ struct extended_signature { unsigned int sig; unsigned int pf; unsigned int cksum; }; struct extended_sigtable { unsigned int count; unsigned int cksum; unsigned int reserved[3]; struct extended_signature sigs[0]; }; /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) { __asm__ __volatile__("rep;nop": : :"memory"); } #define cpu_relax() rep_nop() /* generic versions from gas */ #define GENERIC_NOP1 ".byte 0x90\n" #define GENERIC_NOP2 ".byte 0x89,0xf6\n" #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 /* Opteron nops */ #define K8_NOP1 GENERIC_NOP1 #define K8_NOP2 ".byte 0x66,0x90\n" #define K8_NOP3 ".byte 0x66,0x66,0x90\n" #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" #define K8_NOP5 K8_NOP3 K8_NOP2 #define K8_NOP6 K8_NOP3 K8_NOP3 #define K8_NOP7 K8_NOP4 K8_NOP3 #define K8_NOP8 K8_NOP4 K8_NOP4 /* K7 nops */ /* uses eax dependencies (arbitary choice) */ #define K7_NOP1 GENERIC_NOP1 #define K7_NOP2 ".byte 0x8b,0xc0\n" #define K7_NOP3 ".byte 0x8d,0x04,0x20\n" #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" #define K7_NOP5 K7_NOP4 ASM_NOP1 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" #define K7_NOP8 K7_NOP7 ASM_NOP1 #ifdef CONFIG_MK8 #define ASM_NOP1 K8_NOP1 #define ASM_NOP2 K8_NOP2 #define ASM_NOP3 K8_NOP3 #define ASM_NOP4 K8_NOP4 #define ASM_NOP5 K8_NOP5 #define ASM_NOP6 K8_NOP6 #define ASM_NOP7 K8_NOP7 #define ASM_NOP8 K8_NOP8 #elif defined(CONFIG_MK7) #define ASM_NOP1 K7_NOP1 #define ASM_NOP2 K7_NOP2 #define ASM_NOP3 K7_NOP3 #define ASM_NOP4 K7_NOP4 #define ASM_NOP5 K7_NOP5 #define ASM_NOP6 K7_NOP6 #define ASM_NOP7 K7_NOP7 #define ASM_NOP8 K7_NOP8 #else #define ASM_NOP1 GENERIC_NOP1 #define ASM_NOP2 GENERIC_NOP2 #define ASM_NOP3 GENERIC_NOP3 #define ASM_NOP4 GENERIC_NOP4 #define ASM_NOP5 GENERIC_NOP5 #define ASM_NOP6 GENERIC_NOP6 #define ASM_NOP7 GENERIC_NOP7 #define ASM_NOP8 GENERIC_NOP8 #endif #define ASM_NOP_MAX 8 /* Prefetch instructions for Pentium III and AMD Athlon */ /* It's not worth to care about 3dnow! prefetches for the K6 because they are microcoded there and very slow. However we don't do prefetches for pre XP Athlons currently That should be fixed. */ #define ARCH_HAS_PREFETCH static inline void prefetch(const void *x) { alternative_input(ASM_NOP4, "prefetchnta (%1)", X86_FEATURE_XMM, "r" (x)); } #define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCHW #define ARCH_HAS_SPINLOCK_PREFETCH /* 3dnow! prefetch to get an exclusive cache line. Useful for spinlocks to avoid one state transition in the cache coherency protocol. */ static inline void prefetchw(const void *x) { alternative_input(ASM_NOP4, "prefetchw (%1)", X86_FEATURE_3DNOW, "r" (x)); } #define spin_lock_prefetch(x) prefetchw(x) extern void select_idle_routine(const struct cpuinfo_x86 *c); #define cache_line_size() (boot_cpu_data.x86_cache_alignment) extern unsigned long boot_option_idle_override; extern void enable_sep_cpu(void); extern int sysenter_setup(void); #endif /* __ASM_I386_PROCESSOR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/ptrace.h000066400000000000000000000040431314037446600242200ustar00rootroot00000000000000#ifndef _I386_PTRACE_H #define _I386_PTRACE_H #define EBX 0 #define ECX 1 #define EDX 2 #define ESI 3 #define EDI 4 #define EBP 5 #define EAX 6 #define DS 7 #define ES 8 #define FS 9 #define GS 10 #define ORIG_EAX 11 #define EIP 12 #define CS 13 #define EFL 14 #define UESP 15 #define SS 16 #define FRAME_SIZE 17 /* this struct defines the way the registers are stored on the stack during a system call. */ struct pt_regs { long ebx; long ecx; long edx; long esi; long edi; long ebp; long eax; int xds; int xes; long orig_eax; long eip; int xcs; long eflags; long esp; int xss; }; /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ #define PTRACE_GETREGS 12 #define PTRACE_SETREGS 13 #define PTRACE_GETFPREGS 14 #define PTRACE_SETFPREGS 15 #define PTRACE_GETFPXREGS 18 #define PTRACE_SETFPXREGS 19 #define PTRACE_OLDSETOPTIONS 21 #define PTRACE_GET_THREAD_AREA 25 #define PTRACE_SET_THREAD_AREA 26 #define PTRACE_SYSEMU 31 #define PTRACE_SYSEMU_SINGLESTEP 32 #ifdef __KERNEL__ #include struct task_struct; extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); /* * user_mode_vm(regs) determines whether a register set came from user mode. * This is true if V8086 mode was enabled OR if the register set was from * protected mode with RPL-3 CS value. This tricky test checks that with * one comparison. Many places in the kernel can bypass this full check * if they have already ruled out V8086 mode, so user_mode(regs) can be used. */ static inline int user_mode(struct pt_regs *regs) { return (regs->xcs & 3) != 0; } static inline int user_mode_vm(struct pt_regs *regs) { return ((regs->xcs & 3) | (regs->eflags & VM_MASK)) != 0; } #define instruction_pointer(regs) ((regs)->eip) #if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER) extern unsigned long profile_pc(struct pt_regs *regs); #else #define profile_pc(regs) instruction_pointer(regs) #endif #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/resource.h000066400000000000000000000001351314037446600245670ustar00rootroot00000000000000#ifndef _I386_RESOURCE_H #define _I386_RESOURCE_H #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/rtc.h000066400000000000000000000002121314037446600235240ustar00rootroot00000000000000#ifndef _I386_RTC_H #define _I386_RTC_H /* * x86 uses the default access methods for the RTC. */ #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/rwlock.h000066400000000000000000000036651314037446600242540ustar00rootroot00000000000000/* include/asm-i386/rwlock.h * * Helpers used by both rw spinlocks and rw semaphores. * * Based in part on code from semaphore.h and * spinlock.h Copyright 1996 Linus Torvalds. * * Copyright 1999 Red Hat, Inc. * * Written by Benjamin LaHaise. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #ifndef _ASM_I386_RWLOCK_H #define _ASM_I386_RWLOCK_H #define RW_LOCK_BIAS 0x01000000 #define RW_LOCK_BIAS_STR "0x01000000" #define __build_read_lock_ptr(rw, helper) \ asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" \ "jns 1f\n" \ "call " helper "\n\t" \ "1:\n" \ ::"a" (rw) : "memory") #define __build_read_lock_const(rw, helper) \ asm volatile(LOCK_PREFIX " subl $1,%0\n\t" \ "jns 1f\n" \ "pushl %%eax\n\t" \ "leal %0,%%eax\n\t" \ "call " helper "\n\t" \ "popl %%eax\n\t" \ "1:\n" \ :"+m" (*(volatile int *)rw) : : "memory") #define __build_read_lock(rw, helper) do { \ if (__builtin_constant_p(rw)) \ __build_read_lock_const(rw, helper); \ else \ __build_read_lock_ptr(rw, helper); \ } while (0) #define __build_write_lock_ptr(rw, helper) \ asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ "jz 1f\n" \ "call " helper "\n\t" \ "1:\n" \ ::"a" (rw) : "memory") #define __build_write_lock_const(rw, helper) \ asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ "jz 1f\n" \ "pushl %%eax\n\t" \ "leal %0,%%eax\n\t" \ "call " helper "\n\t" \ "popl %%eax\n\t" \ "1:\n" \ :"+m" (*(volatile int *)rw) : : "memory") #define __build_write_lock(rw, helper) do { \ if (__builtin_constant_p(rw)) \ __build_write_lock_const(rw, helper); \ else \ __build_write_lock_ptr(rw, helper); \ } while (0) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/rwsem.h000066400000000000000000000176771314037446600241200ustar00rootroot00000000000000/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+ * * Written by David Howells (dhowells@redhat.com). * * Derived from asm-i386/semaphore.h * * * The MSW of the count is the negated number of active writers and waiting * lockers, and the LSW is the total number of active locks * * The lock count is initialized to 0 (no active and no waiting lockers). * * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an * uncontended lock. This can be determined because XADD returns the old value. * Readers increment by 1 and see a positive value when uncontended, negative * if there are writers (and maybe) readers waiting (in which case it goes to * sleep). * * The value of WAITING_BIAS supports up to 32766 waiting processes. This can * be extended to 65534 by manually checking the whole MSW rather than relying * on the S flag. * * The value of ACTIVE_BIAS supports up to 65535 active processes. * * This should be totally fair - if anything is waiting, a process that wants a * lock will go to the back of the queue. When the currently active lock is * released, if there's a writer at the front of the queue, then that and only * that will be woken up; if there's a bunch of consequtive readers at the * front, then they'll all be woken up, but no other readers will be. */ #ifndef _I386_RWSEM_H #define _I386_RWSEM_H #ifndef _LINUX_RWSEM_H #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" #endif #ifdef __KERNEL__ #include #include #include struct rwsem_waiter; extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem)); extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem)); extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *)); extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem)); /* * the semaphore definition */ struct rw_semaphore { signed long count; #define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_ACTIVE_BIAS 0x00000001 #define RWSEM_ACTIVE_MASK 0x0000ffff #define RWSEM_WAITING_BIAS (-0x00010000) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) spinlock_t wait_lock; struct list_head wait_list; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif }; #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } #else # define __RWSEM_DEP_MAP_INIT(lockname) #endif #define __RWSEM_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) extern void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key); #define init_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ __init_rwsem((sem), #sem, &__key); \ } while (0) /* * lock for reading */ static inline void __down_read(struct rw_semaphore *sem) { __asm__ __volatile__( "# beginning down_read\n\t" LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ " js 2f\n\t" /* jump if we weren't granted the lock */ "1:\n\t" LOCK_SECTION_START("") "2:\n\t" " pushl %%ecx\n\t" " pushl %%edx\n\t" " call rwsem_down_read_failed\n\t" " popl %%edx\n\t" " popl %%ecx\n\t" " jmp 1b\n" LOCK_SECTION_END "# ending down_read\n\t" : "+m" (sem->count) : "a" (sem) : "memory", "cc"); } /* * trylock for reading -- returns 1 if successful, 0 if contention */ static inline int __down_read_trylock(struct rw_semaphore *sem) { __s32 result, tmp; __asm__ __volatile__( "# beginning __down_read_trylock\n\t" " movl %0,%1\n\t" "1:\n\t" " movl %1,%2\n\t" " addl %3,%2\n\t" " jle 2f\n\t" LOCK_PREFIX " cmpxchgl %2,%0\n\t" " jnz 1b\n\t" "2:\n\t" "# ending __down_read_trylock\n\t" : "+m" (sem->count), "=&a" (result), "=&r" (tmp) : "i" (RWSEM_ACTIVE_READ_BIAS) : "memory", "cc"); return result>=0 ? 1 : 0; } /* * lock for writing */ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) { int tmp; tmp = RWSEM_ACTIVE_WRITE_BIAS; __asm__ __volatile__( "# beginning down_write\n\t" LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ " testl %%edx,%%edx\n\t" /* was the count 0 before? */ " jnz 2f\n\t" /* jump if we weren't granted the lock */ "1:\n\t" LOCK_SECTION_START("") "2:\n\t" " pushl %%ecx\n\t" " call rwsem_down_write_failed\n\t" " popl %%ecx\n\t" " jmp 1b\n" LOCK_SECTION_END "# ending down_write" : "+m" (sem->count), "=d" (tmp) : "a" (sem), "1" (tmp) : "memory", "cc"); } static inline void __down_write(struct rw_semaphore *sem) { __down_write_nested(sem, 0); } /* * trylock for writing -- returns 1 if successful, 0 if contention */ static inline int __down_write_trylock(struct rw_semaphore *sem) { signed long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); if (ret == RWSEM_UNLOCKED_VALUE) return 1; return 0; } /* * unlock after reading */ static inline void __up_read(struct rw_semaphore *sem) { __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; __asm__ __volatile__( "# beginning __up_read\n\t" LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ " js 2f\n\t" /* jump if the lock is being waited upon */ "1:\n\t" LOCK_SECTION_START("") "2:\n\t" " decw %%dx\n\t" /* do nothing if still outstanding active readers */ " jnz 1b\n\t" " pushl %%ecx\n\t" " call rwsem_wake\n\t" " popl %%ecx\n\t" " jmp 1b\n" LOCK_SECTION_END "# ending __up_read\n" : "+m" (sem->count), "=d" (tmp) : "a" (sem), "1" (tmp) : "memory", "cc"); } /* * unlock after writing */ static inline void __up_write(struct rw_semaphore *sem) { __asm__ __volatile__( "# beginning __up_write\n\t" " movl %2,%%edx\n\t" LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ " jnz 2f\n\t" /* jump if the lock is being waited upon */ "1:\n\t" LOCK_SECTION_START("") "2:\n\t" " decw %%dx\n\t" /* did the active count reduce to 0? */ " jnz 1b\n\t" /* jump back if not */ " pushl %%ecx\n\t" " call rwsem_wake\n\t" " popl %%ecx\n\t" " jmp 1b\n" LOCK_SECTION_END "# ending __up_write\n" : "+m" (sem->count) : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) : "memory", "cc", "edx"); } /* * downgrade write lock to read lock */ static inline void __downgrade_write(struct rw_semaphore *sem) { __asm__ __volatile__( "# beginning __downgrade_write\n\t" LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ " js 2f\n\t" /* jump if the lock is being waited upon */ "1:\n\t" LOCK_SECTION_START("") "2:\n\t" " pushl %%ecx\n\t" " pushl %%edx\n\t" " call rwsem_downgrade_wake\n\t" " popl %%edx\n\t" " popl %%ecx\n\t" " jmp 1b\n" LOCK_SECTION_END "# ending __downgrade_write\n" : "+m" (sem->count) : "a" (sem), "i" (-RWSEM_WAITING_BIAS) : "memory", "cc"); } /* * implement atomic add functionality */ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) { __asm__ __volatile__( LOCK_PREFIX "addl %1,%0" : "+m" (sem->count) : "ir" (delta)); } /* * implement exchange and add functionality */ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) { int tmp = delta; __asm__ __volatile__( LOCK_PREFIX "xadd %0,%1" : "+r" (tmp), "+m" (sem->count) : : "memory"); return tmp+delta; } static inline int rwsem_is_locked(struct rw_semaphore *sem) { return (sem->count != 0); } #endif /* __KERNEL__ */ #endif /* _I386_RWSEM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/scatterlist.h000066400000000000000000000010671314037446600253060ustar00rootroot00000000000000#ifndef _I386_SCATTERLIST_H #define _I386_SCATTERLIST_H struct scatterlist { struct page *page; unsigned int offset; dma_addr_t dma_address; unsigned int length; }; /* These macros should be used after a pci_map_sg call has been done * to get bus addresses of each of the SG entries and their lengths. * You should only work with the number of sg entries pci_map_sg * returns. */ #define sg_dma_address(sg) ((sg)->dma_address) #define sg_dma_len(sg) ((sg)->length) #define ISA_DMA_THRESHOLD (0x00ffffff) #endif /* !(_I386_SCATTERLIST_H) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/seccomp.h000066400000000000000000000005131314037446600243710ustar00rootroot00000000000000#ifndef _ASM_SECCOMP_H #include #ifdef TIF_32BIT #error "unexpected TIF_32BIT on i386" #endif #include #define __NR_seccomp_read __NR_read #define __NR_seccomp_write __NR_write #define __NR_seccomp_exit __NR_exit #define __NR_seccomp_sigreturn __NR_sigreturn #endif /* _ASM_SECCOMP_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/sections.h000066400000000000000000000001761314037446600245740ustar00rootroot00000000000000#ifndef _I386_SECTIONS_H #define _I386_SECTIONS_H /* nothing to see, move along */ #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/segment.h000066400000000000000000000062471314037446600244140ustar00rootroot00000000000000#ifndef _ASM_SEGMENT_H #define _ASM_SEGMENT_H /* * The layout of the per-CPU GDT under Linux: * * 0 - null * 1 - reserved * 2 - reserved * 3 - reserved * * 4 - unused <==== new cacheline * 5 - unused * * ------- start of TLS (Thread-Local Storage) segments: * * 6 - TLS segment #1 [ glibc's TLS segment ] * 7 - TLS segment #2 [ Wine's %fs Win32 segment ] * 8 - TLS segment #3 * 9 - reserved * 10 - reserved * 11 - reserved * * ------- start of kernel segments: * * 12 - kernel code segment <==== new cacheline * 13 - kernel data segment * 14 - default user CS * 15 - default user DS * 16 - TSS * 17 - LDT * 18 - PNPBIOS support (16->32 gate) * 19 - PNPBIOS support * 20 - PNPBIOS support * 21 - PNPBIOS support * 22 - PNPBIOS support * 23 - APM BIOS support * 24 - APM BIOS support * 25 - APM BIOS support * * 26 - ESPFIX small SS * 27 - unused * 28 - unused * 29 - unused * 30 - unused * 31 - TSS for double fault handler */ #define GDT_ENTRY_TLS_ENTRIES 3 #define GDT_ENTRY_TLS_MIN 6 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) #define GDT_ENTRY_DEFAULT_USER_CS 14 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3) #define GDT_ENTRY_DEFAULT_USER_DS 15 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3) #define GDT_ENTRY_KERNEL_BASE 12 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) #define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) #define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6) #define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11) #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) #define GDT_ENTRY_DOUBLEFAULT_TSS 31 /* * The GDT has 32 entries */ #define GDT_ENTRIES 32 #define GDT_SIZE (GDT_ENTRIES * 8) /* Simple and small GDT entries for booting only */ #define GDT_ENTRY_BOOT_CS 2 #define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) #define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) #define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) /* The PnP BIOS entries in the GDT */ #define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) #define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) #define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) #define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) #define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) /* The PnP BIOS selectors */ #define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ #define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ #define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ #define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ #define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ /* * The interrupt descriptor table has room for 256 idt's, * the global descriptor table is dependent on the number * of tasks we can have.. */ #define IDT_ENTRIES 256 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/semaphore.h000066400000000000000000000120551314037446600247270ustar00rootroot00000000000000#ifndef _I386_SEMAPHORE_H #define _I386_SEMAPHORE_H #include #ifdef __KERNEL__ /* * SMP- and interrupt-safe semaphores.. * * (C) Copyright 1996 Linus Torvalds * * Modified 1996-12-23 by Dave Grothe to fix bugs in * the original code and to make semaphore waits * interruptible so that processes waiting on * semaphores can be killed. * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper * functions in asm/sempahore-helper.h while fixing a * potential and subtle race discovered by Ulrich Schmid * in down_interruptible(). Since I started to play here I * also implemented the `trylock' semaphore operation. * 1999-07-02 Artur Skawina * Optimized "0(ecx)" -> "(ecx)" (the assembler does not * do this). Changed calling sequences from push/jmp to * traditional call/ret. * Modified 2001-01-01 Andreas Franck * Some hacks to ensure compatibility with recent * GCC snapshots, to avoid stack corruption when compiling * with -fomit-frame-pointer. It's not sure if this will * be fixed in GCC, as our previous implementation was a * bit dubious. * * If you would like to see an analysis of this implementation, please * ftp to gcom.com and download the file * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz. * */ #include #include #include #include struct semaphore { atomic_t count; int sleepers; wait_queue_head_t wait; }; #define __SEMAPHORE_INITIALIZER(name, n) \ { \ .count = ATOMIC_INIT(n), \ .sleepers = 0, \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ } #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) static inline void sema_init (struct semaphore *sem, int val) { /* * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); * * i'd rather use the more flexible initialization above, but sadly * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. */ atomic_set(&sem->count, val); sem->sleepers = 0; init_waitqueue_head(&sem->wait); } static inline void init_MUTEX (struct semaphore *sem) { sema_init(sem, 1); } static inline void init_MUTEX_LOCKED (struct semaphore *sem) { sema_init(sem, 0); } fastcall void __down_failed(void /* special register calling convention */); fastcall int __down_failed_interruptible(void /* params in registers */); fastcall int __down_failed_trylock(void /* params in registers */); fastcall void __up_wakeup(void /* special register calling convention */); /* * This is ugly, but we want the default case to fall through. * "__down_failed" is a special asm handler that calls the C * routine that actually waits. See arch/i386/kernel/semaphore.c */ static inline void down(struct semaphore * sem) { might_sleep(); __asm__ __volatile__( "# atomic down operation\n\t" LOCK_PREFIX "decl %0\n\t" /* --sem->count */ "js 2f\n" "1:\n" LOCK_SECTION_START("") "2:\tlea %0,%%eax\n\t" "call __down_failed\n\t" "jmp 1b\n" LOCK_SECTION_END :"+m" (sem->count) : :"memory","ax"); } /* * Interruptible try to acquire a semaphore. If we obtained * it, return zero. If we were interrupted, returns -EINTR */ static inline int down_interruptible(struct semaphore * sem) { int result; might_sleep(); __asm__ __volatile__( "# atomic interruptible down operation\n\t" LOCK_PREFIX "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" LOCK_SECTION_START("") "2:\tlea %1,%%eax\n\t" "call __down_failed_interruptible\n\t" "jmp 1b\n" LOCK_SECTION_END :"=a" (result), "+m" (sem->count) : :"memory"); return result; } /* * Non-blockingly attempt to down() a semaphore. * Returns zero if we acquired it */ static inline int down_trylock(struct semaphore * sem) { int result; __asm__ __volatile__( "# atomic interruptible down operation\n\t" LOCK_PREFIX "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" LOCK_SECTION_START("") "2:\tlea %1,%%eax\n\t" "call __down_failed_trylock\n\t" "jmp 1b\n" LOCK_SECTION_END :"=a" (result), "+m" (sem->count) : :"memory"); return result; } /* * Note! This is subtle. We jump to wake people up only if * the semaphore was negative (== somebody was waiting on it). * The default case (no contention) will result in NO * jumps for both down() and up(). */ static inline void up(struct semaphore * sem) { __asm__ __volatile__( "# atomic up operation\n\t" LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ "jle 2f\n" "1:\n" LOCK_SECTION_START("") "2:\tlea %0,%%eax\n\t" "call __up_wakeup\n\t" "jmp 1b\n" LOCK_SECTION_END ".subsection 0\n" :"+m" (sem->count) : :"memory","ax"); } #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/sembuf.h000066400000000000000000000012711314037446600242230ustar00rootroot00000000000000#ifndef _I386_SEMBUF_H #define _I386_SEMBUF_H /* * The semid64_ds structure for i386 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * * Pad space is left for: * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ struct semid64_ds { struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ __kernel_time_t sem_otime; /* last semop time */ unsigned long __unused1; __kernel_time_t sem_ctime; /* last change time */ unsigned long __unused2; unsigned long sem_nsems; /* no. of semaphores in array */ unsigned long __unused3; unsigned long __unused4; }; #endif /* _I386_SEMBUF_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/serial.h000066400000000000000000000017551314037446600242300ustar00rootroot00000000000000/* * include/asm-i386/serial.h */ /* * This assumes you have a 1.8432 MHz clock for your UART. * * It'd be nice if someone built a serial card with a 24.576 MHz * clock, since the 16550A is capable of handling a top speed of 1.5 * megabits/second; but this requires the faster clock. */ #define BASE_BAUD ( 1843200 / 16 ) /* Standard COM flags (except for COM4, because of the 8514 problem) */ #ifdef CONFIG_SERIAL_DETECT_IRQ #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ) #define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ) #else #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) #define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF #endif #define SERIAL_PORT_DFNS \ /* UART CLK PORT IRQ FLAGS */ \ { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/setup.h000066400000000000000000000055211314037446600241040ustar00rootroot00000000000000/* * Just a place holder. We don't want to have to test x86 before * we include stuff */ #ifndef _i386_SETUP_H #define _i386_SETUP_H #ifdef __KERNEL__ #include /* * Reserved space for vmalloc and iomap - defined in asm/page.h */ #define MAXMEM_PFN PFN_DOWN(MAXMEM) #define MAX_NONPAE_PFN (1 << 20) #endif #define PARAM_SIZE 4096 #define COMMAND_LINE_SIZE 256 #define OLD_CL_MAGIC_ADDR 0x90020 #define OLD_CL_MAGIC 0xA33F #define OLD_CL_BASE_ADDR 0x90000 #define OLD_CL_OFFSET 0x90022 #define NEW_CL_POINTER 0x228 /* Relative to real mode data */ #ifndef __ASSEMBLY__ /* * This is set up by the setup-routine at boot-time */ extern unsigned char boot_params[PARAM_SIZE]; #define PARAM (boot_params) #define SCREEN_INFO (*(struct screen_info *) (PARAM+0)) #define EXT_MEM_K (*(unsigned short *) (PARAM+2)) #define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0)) #define E820_MAP_NR (*(char*) (PARAM+E820NR)) #define E820_MAP ((struct e820entry *) (PARAM+E820MAP)) #define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40)) #define IST_INFO (*(struct ist_info *) (PARAM+0x60)) #define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80)) #define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0)) #define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4))) #define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8))) #define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc))) #define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0))) #define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4))) #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2)) #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8)) #define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA)) #define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC)) #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF)) #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210)) #define KERNEL_START (*(unsigned long *) (PARAM+0x214)) #define INITRD_START (*(unsigned long *) (PARAM+0x218)) #define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c)) #define EDID_INFO (*(struct edid_info *) (PARAM+0x140)) #define EDD_NR (*(unsigned char *) (PARAM+EDDNR)) #define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF)) #define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF)) #define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF)) /* * Do NOT EVER look at the BIOS memory size location. * It does not work on many machines. */ #define LOWMEMSIZE() (0x9f000) struct e820entry; char * __init machine_specific_memory_setup(void); int __init copy_e820_map(struct e820entry * biosmap, int nr_map); int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); void __init add_memory_region(unsigned long long start, unsigned long long size, int type); #endif /* __ASSEMBLY__ */ #endif /* _i386_SETUP_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/shmbuf.h000066400000000000000000000022141314037446600242240ustar00rootroot00000000000000#ifndef _I386_SHMBUF_H #define _I386_SHMBUF_H /* * The shmid64_ds structure for i386 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * * Pad space is left for: * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ size_t shm_segsz; /* size of segment (bytes) */ __kernel_time_t shm_atime; /* last attach time */ unsigned long __unused1; __kernel_time_t shm_dtime; /* last detach time */ unsigned long __unused2; __kernel_time_t shm_ctime; /* last change time */ unsigned long __unused3; __kernel_pid_t shm_cpid; /* pid of creator */ __kernel_pid_t shm_lpid; /* pid of last operator */ unsigned long shm_nattch; /* no. of current attaches */ unsigned long __unused4; unsigned long __unused5; }; struct shminfo64 { unsigned long shmmax; unsigned long shmmin; unsigned long shmmni; unsigned long shmseg; unsigned long shmall; unsigned long __unused1; unsigned long __unused2; unsigned long __unused3; unsigned long __unused4; }; #endif /* _I386_SHMBUF_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/shmparam.h000066400000000000000000000002331314037446600245470ustar00rootroot00000000000000#ifndef _ASMI386_SHMPARAM_H #define _ASMI386_SHMPARAM_H #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ #endif /* _ASMI386_SHMPARAM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/sigcontext.h000066400000000000000000000037051314037446600251350ustar00rootroot00000000000000#ifndef _ASMi386_SIGCONTEXT_H #define _ASMi386_SIGCONTEXT_H #include /* * As documented in the iBCS2 standard.. * * The first part of "struct _fpstate" is just the normal i387 * hardware setup, the extra "status" word is used to save the * coprocessor status word before entering the handler. * * Pentium III FXSR, SSE support * Gareth Hughes , May 2000 * * The FPU state data structure has had to grow to accommodate the * extended FPU state required by the Streaming SIMD Extensions. * There is no documented standard to accomplish this at the moment. */ struct _fpreg { unsigned short significand[4]; unsigned short exponent; }; struct _fpxreg { unsigned short significand[4]; unsigned short exponent; unsigned short padding[3]; }; struct _xmmreg { unsigned long element[4]; }; struct _fpstate { /* Regular FPU environment */ unsigned long cw; unsigned long sw; unsigned long tag; unsigned long ipoff; unsigned long cssel; unsigned long dataoff; unsigned long datasel; struct _fpreg _st[8]; unsigned short status; unsigned short magic; /* 0xffff = regular FPU data only */ /* FXSR FPU environment */ unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */ unsigned long mxcsr; unsigned long reserved; struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ struct _xmmreg _xmm[8]; unsigned long padding[56]; }; #define X86_FXSR_MAGIC 0x0000 struct sigcontext { unsigned short gs, __gsh; unsigned short fs, __fsh; unsigned short es, __esh; unsigned short ds, __dsh; unsigned long edi; unsigned long esi; unsigned long ebp; unsigned long esp; unsigned long ebx; unsigned long edx; unsigned long ecx; unsigned long eax; unsigned long trapno; unsigned long err; unsigned long eip; unsigned short cs, __csh; unsigned long eflags; unsigned long esp_at_signal; unsigned short ss, __ssh; struct _fpstate __user * fpstate; unsigned long oldmask; unsigned long cr2; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/siginfo.h000066400000000000000000000001321314037446600243730ustar00rootroot00000000000000#ifndef _I386_SIGINFO_H #define _I386_SIGINFO_H #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/signal.h000066400000000000000000000121001314037446600242100ustar00rootroot00000000000000#ifndef _ASMi386_SIGNAL_H #define _ASMi386_SIGNAL_H #include #include #include /* Avoid too many header ordering problems. */ struct siginfo; #ifdef __KERNEL__ #include /* Most things should be clean enough to redefine this at will, if care is taken to make libc match. */ #define _NSIG 64 #define _NSIG_BPW 32 #define _NSIG_WORDS (_NSIG / _NSIG_BPW) typedef unsigned long old_sigset_t; /* at least 32 bits */ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; #else /* Here we must cater to libcs that poke about in kernel headers. */ #define NSIG 32 typedef unsigned long sigset_t; #endif /* __KERNEL__ */ #define SIGHUP 1 #define SIGINT 2 #define SIGQUIT 3 #define SIGILL 4 #define SIGTRAP 5 #define SIGABRT 6 #define SIGIOT 6 #define SIGBUS 7 #define SIGFPE 8 #define SIGKILL 9 #define SIGUSR1 10 #define SIGSEGV 11 #define SIGUSR2 12 #define SIGPIPE 13 #define SIGALRM 14 #define SIGTERM 15 #define SIGSTKFLT 16 #define SIGCHLD 17 #define SIGCONT 18 #define SIGSTOP 19 #define SIGTSTP 20 #define SIGTTIN 21 #define SIGTTOU 22 #define SIGURG 23 #define SIGXCPU 24 #define SIGXFSZ 25 #define SIGVTALRM 26 #define SIGPROF 27 #define SIGWINCH 28 #define SIGIO 29 #define SIGPOLL SIGIO /* #define SIGLOST 29 */ #define SIGPWR 30 #define SIGSYS 31 #define SIGUNUSED 31 /* These should not be considered constants from userland. */ #define SIGRTMIN 32 #define SIGRTMAX _NSIG /* * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. * SA_NODEFER prevents the current signal from being masked in the handler. * * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single * Unix names RESETHAND and NODEFER respectively. */ #define SA_NOCLDSTOP 0x00000001u #define SA_NOCLDWAIT 0x00000002u #define SA_SIGINFO 0x00000004u #define SA_ONSTACK 0x08000000u #define SA_RESTART 0x10000000u #define SA_NODEFER 0x40000000u #define SA_RESETHAND 0x80000000u #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND #define SA_RESTORER 0x04000000 /* * sigaltstack controls */ #define SS_ONSTACK 1 #define SS_DISABLE 2 #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 #include #ifdef __KERNEL__ struct old_sigaction { __sighandler_t sa_handler; old_sigset_t sa_mask; unsigned long sa_flags; __sigrestore_t sa_restorer; }; struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; struct k_sigaction { struct sigaction sa; }; #else /* Here we must cater to libcs that poke about in kernel headers. */ struct sigaction { union { __sighandler_t _sa_handler; void (*_sa_sigaction)(int, struct siginfo *, void *); } _u; sigset_t sa_mask; unsigned long sa_flags; void (*sa_restorer)(void); }; #define sa_handler _u._sa_handler #define sa_sigaction _u._sa_sigaction #endif /* __KERNEL__ */ typedef struct sigaltstack { void __user *ss_sp; int ss_flags; size_t ss_size; } stack_t; #ifdef __KERNEL__ #include #define __HAVE_ARCH_SIG_BITOPS #define sigaddset(set,sig) \ (__builtin_constant_p(sig) ? \ __const_sigaddset((set),(sig)) : \ __gen_sigaddset((set),(sig))) static __inline__ void __gen_sigaddset(sigset_t *set, int _sig) { __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); } static __inline__ void __const_sigaddset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); } #define sigdelset(set,sig) \ (__builtin_constant_p(sig) ? \ __const_sigdelset((set),(sig)) : \ __gen_sigdelset((set),(sig))) static __inline__ void __gen_sigdelset(sigset_t *set, int _sig) { __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); } static __inline__ void __const_sigdelset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); } static __inline__ int __const_sigismember(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); } static __inline__ int __gen_sigismember(sigset_t *set, int _sig) { int ret; __asm__("btl %2,%1\n\tsbbl %0,%0" : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); return ret; } #define sigismember(set,sig) \ (__builtin_constant_p(sig) ? \ __const_sigismember((set),(sig)) : \ __gen_sigismember((set),(sig))) static __inline__ int sigfindinword(unsigned long word) { __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); return word; } struct pt_regs; #define ptrace_signal_deliver(regs, cookie) \ do { \ if (current->ptrace & PT_DTRACE) { \ current->ptrace &= ~PT_DTRACE; \ (regs)->eflags &= ~TF_MASK; \ } \ } while (0) #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/smp.h000066400000000000000000000044451314037446600235470ustar00rootroot00000000000000#ifndef __ASM_SMP_H #define __ASM_SMP_H /* * We need the APIC definitions automatically as part of 'smp.h' */ #ifndef __ASSEMBLY__ #include #include #include #endif #ifdef CONFIG_X86_LOCAL_APIC #ifndef __ASSEMBLY__ #include #include #include #ifdef CONFIG_X86_IO_APIC #include #endif #include #endif #endif #define BAD_APICID 0xFFu #ifdef CONFIG_SMP #ifndef __ASSEMBLY__ /* * Private routines/data */ extern void smp_alloc_memory(void); extern int pic_mode; extern int smp_num_siblings; extern cpumask_t cpu_sibling_map[]; extern cpumask_t cpu_core_map[]; extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); #define MAX_APICID 256 extern u8 x86_cpu_to_apicid[]; #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] #ifdef CONFIG_HOTPLUG_CPU extern void cpu_exit_clear(void); extern void cpu_uninit(void); #endif /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. We map APIC_BASE very early in page_setup(), * so this is correct in the x86 case. */ #define raw_smp_processor_id() (current_thread_info()->cpu) extern cpumask_t cpu_callout_map; extern cpumask_t cpu_callin_map; extern cpumask_t cpu_possible_map; /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) { return cpus_weight(cpu_callout_map); } #ifdef CONFIG_X86_LOCAL_APIC #ifdef APIC_DEFINITION extern int hard_smp_processor_id(void); #else #include static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); } #endif static __inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); } #endif extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); #endif /* !__ASSEMBLY__ */ #else /* CONFIG_SMP */ #define cpu_physical_id(cpu) boot_cpu_physical_apicid #define NO_PROC_ID 0xFF /* No processor magic marker */ #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/socket.h000066400000000000000000000022031314037446600242260ustar00rootroot00000000000000#ifndef _ASM_SOCKET_H #define _ASM_SOCKET_H #include /* For setsockopt(2) */ #define SOL_SOCKET 1 #define SO_DEBUG 1 #define SO_REUSEADDR 2 #define SO_TYPE 3 #define SO_ERROR 4 #define SO_DONTROUTE 5 #define SO_BROADCAST 6 #define SO_SNDBUF 7 #define SO_RCVBUF 8 #define SO_SNDBUFFORCE 32 #define SO_RCVBUFFORCE 33 #define SO_KEEPALIVE 9 #define SO_OOBINLINE 10 #define SO_NO_CHECK 11 #define SO_PRIORITY 12 #define SO_LINGER 13 #define SO_BSDCOMPAT 14 /* To add :#define SO_REUSEPORT 15 */ #define SO_PASSCRED 16 #define SO_PEERCRED 17 #define SO_RCVLOWAT 18 #define SO_SNDLOWAT 19 #define SO_RCVTIMEO 20 #define SO_SNDTIMEO 21 /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 22 #define SO_SECURITY_ENCRYPTION_TRANSPORT 23 #define SO_SECURITY_ENCRYPTION_NETWORK 24 #define SO_BINDTODEVICE 25 /* Socket filtering */ #define SO_ATTACH_FILTER 26 #define SO_DETACH_FILTER 27 #define SO_PEERNAME 28 #define SO_TIMESTAMP 29 #define SCM_TIMESTAMP SO_TIMESTAMP #define SO_ACCEPTCONN 30 #define SO_PEERSEC 31 #define SO_PASSSEC 34 #endif /* _ASM_SOCKET_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/sockios.h000066400000000000000000000004251314037446600244140ustar00rootroot00000000000000#ifndef __ARCH_I386_SOCKIOS__ #define __ARCH_I386_SOCKIOS__ /* Socket-level I/O control calls. */ #define FIOSETOWN 0x8901 #define SIOCSPGRP 0x8902 #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 #define SIOCGSTAMP 0x8906 /* Get stamp */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/sparsemem.h000066400000000000000000000014221314037446600247340ustar00rootroot00000000000000#ifndef _I386_SPARSEMEM_H #define _I386_SPARSEMEM_H #ifdef CONFIG_SPARSEMEM /* * generic non-linear memory support: * * 1) we will not split memory into more chunks than will fit into the * flags field of the struct page */ /* * SECTION_SIZE_BITS 2^N: how big each section will be * MAX_PHYSADDR_BITS 2^N: how much physical address space we have * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space */ #ifdef CONFIG_X86_PAE #define SECTION_SIZE_BITS 30 #define MAX_PHYSADDR_BITS 36 #define MAX_PHYSMEM_BITS 36 #else #define SECTION_SIZE_BITS 26 #define MAX_PHYSADDR_BITS 32 #define MAX_PHYSMEM_BITS 32 #endif /* XXX: FIXME -- wli */ #define kern_addr_valid(kaddr) (0) #endif /* CONFIG_SPARSEMEM */ #endif /* _I386_SPARSEMEM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/spinlock.h000066400000000000000000000111071314037446600245630ustar00rootroot00000000000000#ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H #include #include #include #include /* * Your basic SMP spinlocks, allowing only a single CPU anywhere * * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. * * We make no fairness assumptions. They have a cost. * * (the type definitions are in asm/spinlock_types.h) */ #define __raw_spin_is_locked(x) \ (*(volatile signed char *)(&(x)->slock) <= 0) #define __raw_spin_lock_string \ "\n1:\t" \ LOCK_PREFIX " ; decb %0\n\t" \ "jns 3f\n" \ "2:\t" \ "rep;nop\n\t" \ "cmpb $0,%0\n\t" \ "jle 2b\n\t" \ "jmp 1b\n" \ "3:\n\t" /* * NOTE: there's an irqs-on section here, which normally would have to be * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use * __raw_spin_lock_string_flags(). */ #define __raw_spin_lock_string_flags \ "\n1:\t" \ LOCK_PREFIX " ; decb %0\n\t" \ "jns 5f\n" \ "2:\t" \ "testl $0x200, %1\n\t" \ "jz 4f\n\t" \ "sti\n" \ "3:\t" \ "rep;nop\n\t" \ "cmpb $0, %0\n\t" \ "jle 3b\n\t" \ "cli\n\t" \ "jmp 1b\n" \ "4:\t" \ "rep;nop\n\t" \ "cmpb $0, %0\n\t" \ "jg 1b\n\t" \ "jmp 4b\n" \ "5:\n\t" static inline void __raw_spin_lock(raw_spinlock_t *lock) { asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory"); } /* * It is easier for the lock validator if interrupts are not re-enabled * in the middle of a lock-acquire. This is a performance feature anyway * so we turn it off: */ #ifndef CONFIG_PROVE_LOCKING static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory"); } #endif static inline int __raw_spin_trylock(raw_spinlock_t *lock) { char oldval; __asm__ __volatile__( "xchgb %b0,%1" :"=q" (oldval), "+m" (lock->slock) :"0" (0) : "memory"); return oldval > 0; } /* * __raw_spin_unlock based on writing $1 to the low byte. * This method works. Despite all the confusion. * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) * (PPro errata 66, 92) */ #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) #define __raw_spin_unlock_string \ "movb $1,%0" \ :"+m" (lock->slock) : : "memory" static inline void __raw_spin_unlock(raw_spinlock_t *lock) { __asm__ __volatile__( __raw_spin_unlock_string ); } #else #define __raw_spin_unlock_string \ "xchgb %b0, %1" \ :"=q" (oldval), "+m" (lock->slock) \ :"0" (oldval) : "memory" static inline void __raw_spin_unlock(raw_spinlock_t *lock) { char oldval = 1; __asm__ __volatile__( __raw_spin_unlock_string ); } #endif #define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) /* * Read-write spinlocks, allowing multiple readers * but only one writer. * * NOTE! it is quite common to have readers in interrupts * but no interrupt writers. For those circumstances we * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. * * On x86, we implement read-write locks as a 32-bit counter * with the high bit (sign) being the "contended" bit. * * The inline assembly is non-obvious. Think about it. * * Changed to use the same technique as rw semaphores. See * semaphore.h for details. -ben * * the helpers are in arch/i386/kernel/semaphore.c */ /** * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ #define __raw_read_can_lock(x) ((int)(x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) static inline void __raw_read_lock(raw_rwlock_t *rw) { __build_read_lock(rw, "__read_lock_failed"); } static inline void __raw_write_lock(raw_rwlock_t *rw) { __build_write_lock(rw, "__write_lock_failed"); } static inline int __raw_read_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; atomic_dec(count); if (atomic_read(count) >= 0) return 1; atomic_inc(count); return 0; } static inline int __raw_write_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) return 1; atomic_add(RW_LOCK_BIAS, count); return 0; } static inline void __raw_read_unlock(raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" : "+m" (rw->lock) : : "memory"); } #endif /* __ASM_SPINLOCK_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/spinlock_types.h000066400000000000000000000005741314037446600260150ustar00rootroot00000000000000#ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_H # error "please don't include this file directly" #endif typedef struct { volatile unsigned int slock; } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile unsigned int lock; } raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/srat.h000066400000000000000000000023041314037446600237110ustar00rootroot00000000000000/* * Some of the code in this file has been gleaned from the 64 bit * discontigmem support code base. * * Copyright (C) 2002, IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to Pat Gaughen */ #ifndef _ASM_SRAT_H_ #define _ASM_SRAT_H_ #ifndef CONFIG_ACPI_SRAT #error CONFIG_ACPI_SRAT not defined, and srat.h header has been included #endif extern int get_memcfg_from_srat(void); extern unsigned long *get_zholes_size(int); #endif /* _ASM_SRAT_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/stat.h000066400000000000000000000030701314037446600237140ustar00rootroot00000000000000#ifndef _I386_STAT_H #define _I386_STAT_H struct __old_kernel_stat { unsigned short st_dev; unsigned short st_ino; unsigned short st_mode; unsigned short st_nlink; unsigned short st_uid; unsigned short st_gid; unsigned short st_rdev; unsigned long st_size; unsigned long st_atime; unsigned long st_mtime; unsigned long st_ctime; }; struct stat { unsigned long st_dev; unsigned long st_ino; unsigned short st_mode; unsigned short st_nlink; unsigned short st_uid; unsigned short st_gid; unsigned long st_rdev; unsigned long st_size; unsigned long st_blksize; unsigned long st_blocks; unsigned long st_atime; unsigned long st_atime_nsec; unsigned long st_mtime; unsigned long st_mtime_nsec; unsigned long st_ctime; unsigned long st_ctime_nsec; unsigned long __unused4; unsigned long __unused5; }; /* This matches struct stat64 in glibc2.1, hence the absolutely * insane amounts of padding around dev_t's. */ struct stat64 { unsigned long long st_dev; unsigned char __pad0[4]; #define STAT64_HAS_BROKEN_ST_INO 1 unsigned long __st_ino; unsigned int st_mode; unsigned int st_nlink; unsigned long st_uid; unsigned long st_gid; unsigned long long st_rdev; unsigned char __pad3[4]; long long st_size; unsigned long st_blksize; unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ unsigned long st_atime; unsigned long st_atime_nsec; unsigned long st_mtime; unsigned int st_mtime_nsec; unsigned long st_ctime; unsigned long st_ctime_nsec; unsigned long long st_ino; }; #define STAT_HAVE_NSEC 1 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/statfs.h000066400000000000000000000001271314037446600242450ustar00rootroot00000000000000#ifndef _I386_STATFS_H #define _I386_STATFS_H #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/string.h000066400000000000000000000256171314037446600242620ustar00rootroot00000000000000#ifndef _I386_STRING_H_ #define _I386_STRING_H_ #ifdef __KERNEL__ /* * On a 486 or Pentium, we are better off not using the * byte string operations. But on a 386 or a PPro the * byte string ops are faster than doing it by hand * (MUCH faster on a Pentium). */ /* * This string-include defines all string functions as inline * functions. Use gcc. It also assumes ds=es=data space, this should be * normal. Most of the string-functions are rather heavily hand-optimized, * see especially strsep,strstr,str[c]spn. They should work, but are not * very easy to understand. Everything is done entirely within the register * set, making the functions fast and clean. String instructions have been * used through-out, making for "slightly" unclear code :-) * * NO Copyright (C) 1991, 1992 Linus Torvalds, * consider these trivial functions to be PD. */ /* AK: in fact I bet it would be better to move this stuff all out of line. */ #define __HAVE_ARCH_STRCPY static inline char * strcpy(char * dest,const char *src) { int d0, d1, d2; __asm__ __volatile__( "1:\tlodsb\n\t" "stosb\n\t" "testb %%al,%%al\n\t" "jne 1b" : "=&S" (d0), "=&D" (d1), "=&a" (d2) :"0" (src),"1" (dest) : "memory"); return dest; } #define __HAVE_ARCH_STRNCPY static inline char * strncpy(char * dest,const char *src,size_t count) { int d0, d1, d2, d3; __asm__ __volatile__( "1:\tdecl %2\n\t" "js 2f\n\t" "lodsb\n\t" "stosb\n\t" "testb %%al,%%al\n\t" "jne 1b\n\t" "rep\n\t" "stosb\n" "2:" : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) :"0" (src),"1" (dest),"2" (count) : "memory"); return dest; } #define __HAVE_ARCH_STRCAT static inline char * strcat(char * dest,const char * src) { int d0, d1, d2, d3; __asm__ __volatile__( "repne\n\t" "scasb\n\t" "decl %1\n" "1:\tlodsb\n\t" "stosb\n\t" "testb %%al,%%al\n\t" "jne 1b" : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu):"memory"); return dest; } #define __HAVE_ARCH_STRNCAT static inline char * strncat(char * dest,const char * src,size_t count) { int d0, d1, d2, d3; __asm__ __volatile__( "repne\n\t" "scasb\n\t" "decl %1\n\t" "movl %8,%3\n" "1:\tdecl %3\n\t" "js 2f\n\t" "lodsb\n\t" "stosb\n\t" "testb %%al,%%al\n\t" "jne 1b\n" "2:\txorl %2,%2\n\t" "stosb" : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) : "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count) : "memory"); return dest; } #define __HAVE_ARCH_STRCMP static inline int strcmp(const char * cs,const char * ct) { int d0, d1; register int __res; __asm__ __volatile__( "1:\tlodsb\n\t" "scasb\n\t" "jne 2f\n\t" "testb %%al,%%al\n\t" "jne 1b\n\t" "xorl %%eax,%%eax\n\t" "jmp 3f\n" "2:\tsbbl %%eax,%%eax\n\t" "orb $1,%%al\n" "3:" :"=a" (__res), "=&S" (d0), "=&D" (d1) :"1" (cs),"2" (ct) :"memory"); return __res; } #define __HAVE_ARCH_STRNCMP static inline int strncmp(const char * cs,const char * ct,size_t count) { register int __res; int d0, d1, d2; __asm__ __volatile__( "1:\tdecl %3\n\t" "js 2f\n\t" "lodsb\n\t" "scasb\n\t" "jne 3f\n\t" "testb %%al,%%al\n\t" "jne 1b\n" "2:\txorl %%eax,%%eax\n\t" "jmp 4f\n" "3:\tsbbl %%eax,%%eax\n\t" "orb $1,%%al\n" "4:" :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2) :"1" (cs),"2" (ct),"3" (count) :"memory"); return __res; } #define __HAVE_ARCH_STRCHR static inline char * strchr(const char * s, int c) { int d0; register char * __res; __asm__ __volatile__( "movb %%al,%%ah\n" "1:\tlodsb\n\t" "cmpb %%ah,%%al\n\t" "je 2f\n\t" "testb %%al,%%al\n\t" "jne 1b\n\t" "movl $1,%1\n" "2:\tmovl %1,%0\n\t" "decl %0" :"=a" (__res), "=&S" (d0) :"1" (s),"0" (c) :"memory"); return __res; } #define __HAVE_ARCH_STRRCHR static inline char * strrchr(const char * s, int c) { int d0, d1; register char * __res; __asm__ __volatile__( "movb %%al,%%ah\n" "1:\tlodsb\n\t" "cmpb %%ah,%%al\n\t" "jne 2f\n\t" "leal -1(%%esi),%0\n" "2:\ttestb %%al,%%al\n\t" "jne 1b" :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c) :"memory"); return __res; } #define __HAVE_ARCH_STRLEN static inline size_t strlen(const char * s) { int d0; register int __res; __asm__ __volatile__( "repne\n\t" "scasb\n\t" "notl %0\n\t" "decl %0" :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu) :"memory"); return __res; } static __always_inline void * __memcpy(void * to, const void * from, size_t n) { int d0, d1, d2; __asm__ __volatile__( "rep ; movsl\n\t" "movl %4,%%ecx\n\t" "andl $3,%%ecx\n\t" #if 1 /* want to pay 2 byte penalty for a chance to skip microcoded rep? */ "jz 1f\n\t" #endif "rep ; movsb\n\t" "1:" : "=&c" (d0), "=&D" (d1), "=&S" (d2) : "0" (n/4), "g" (n), "1" ((long) to), "2" ((long) from) : "memory"); return (to); } /* * This looks ugly, but the compiler can optimize it totally, * as the count is constant. */ static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n) { long esi, edi; if (!n) return to; #if 1 /* want to do small copies with non-string ops? */ switch (n) { case 1: *(char*)to = *(char*)from; return to; case 2: *(short*)to = *(short*)from; return to; case 4: *(int*)to = *(int*)from; return to; #if 1 /* including those doable with two moves? */ case 3: *(short*)to = *(short*)from; *((char*)to+2) = *((char*)from+2); return to; case 5: *(int*)to = *(int*)from; *((char*)to+4) = *((char*)from+4); return to; case 6: *(int*)to = *(int*)from; *((short*)to+2) = *((short*)from+2); return to; case 8: *(int*)to = *(int*)from; *((int*)to+1) = *((int*)from+1); return to; #endif } #endif esi = (long) from; edi = (long) to; if (n >= 5*4) { /* large block: use rep prefix */ int ecx; __asm__ __volatile__( "rep ; movsl" : "=&c" (ecx), "=&D" (edi), "=&S" (esi) : "0" (n/4), "1" (edi),"2" (esi) : "memory" ); } else { /* small block: don't clobber ecx + smaller code */ if (n >= 4*4) __asm__ __volatile__("movsl" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); if (n >= 3*4) __asm__ __volatile__("movsl" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); if (n >= 2*4) __asm__ __volatile__("movsl" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); if (n >= 1*4) __asm__ __volatile__("movsl" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); } switch (n % 4) { /* tail */ case 0: return to; case 1: __asm__ __volatile__("movsb" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); return to; case 2: __asm__ __volatile__("movsw" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); return to; default: __asm__ __volatile__("movsw\n\tmovsb" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); return to; } } #define __HAVE_ARCH_MEMCPY #ifdef CONFIG_X86_USE_3DNOW #include /* * This CPU favours 3DNow strongly (eg AMD Athlon) */ static inline void * __constant_memcpy3d(void * to, const void * from, size_t len) { if (len < 512) return __constant_memcpy(to, from, len); return _mmx_memcpy(to, from, len); } static __inline__ void *__memcpy3d(void *to, const void *from, size_t len) { if (len < 512) return __memcpy(to, from, len); return _mmx_memcpy(to, from, len); } #define memcpy(t, f, n) \ (__builtin_constant_p(n) ? \ __constant_memcpy3d((t),(f),(n)) : \ __memcpy3d((t),(f),(n))) #else /* * No 3D Now! */ #define memcpy(t, f, n) \ (__builtin_constant_p(n) ? \ __constant_memcpy((t),(f),(n)) : \ __memcpy((t),(f),(n))) #endif #define __HAVE_ARCH_MEMMOVE void *memmove(void * dest,const void * src, size_t n); #define memcmp __builtin_memcmp #define __HAVE_ARCH_MEMCHR static inline void * memchr(const void * cs,int c,size_t count) { int d0; register void * __res; if (!count) return NULL; __asm__ __volatile__( "repne\n\t" "scasb\n\t" "je 1f\n\t" "movl $1,%0\n" "1:\tdecl %0" :"=D" (__res), "=&c" (d0) :"a" (c),"0" (cs),"1" (count) :"memory"); return __res; } static inline void * __memset_generic(void * s, char c,size_t count) { int d0, d1; __asm__ __volatile__( "rep\n\t" "stosb" : "=&c" (d0), "=&D" (d1) :"a" (c),"1" (s),"0" (count) :"memory"); return s; } /* we might want to write optimized versions of these later */ #define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count)) /* * memset(x,0,y) is a reasonably common thing to do, so we want to fill * things 32 bits at a time even when we don't know the size of the * area at compile-time.. */ static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count) { int d0, d1; __asm__ __volatile__( "rep ; stosl\n\t" "testb $2,%b3\n\t" "je 1f\n\t" "stosw\n" "1:\ttestb $1,%b3\n\t" "je 2f\n\t" "stosb\n" "2:" :"=&c" (d0), "=&D" (d1) :"a" (c), "q" (count), "0" (count/4), "1" ((long) s) :"memory"); return (s); } /* Added by Gertjan van Wingerde to make minix and sysv module work */ #define __HAVE_ARCH_STRNLEN static inline size_t strnlen(const char * s, size_t count) { int d0; register int __res; __asm__ __volatile__( "movl %2,%0\n\t" "jmp 2f\n" "1:\tcmpb $0,(%0)\n\t" "je 3f\n\t" "incl %0\n" "2:\tdecl %1\n\t" "cmpl $-1,%1\n\t" "jne 1b\n" "3:\tsubl %2,%0" :"=a" (__res), "=&d" (d0) :"c" (s),"1" (count) :"memory"); return __res; } /* end of additional stuff */ #define __HAVE_ARCH_STRSTR extern char *strstr(const char *cs, const char *ct); /* * This looks horribly ugly, but the compiler can optimize it totally, * as we by now know that both pattern and count is constant.. */ static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) { switch (count) { case 0: return s; case 1: *(unsigned char *)s = pattern; return s; case 2: *(unsigned short *)s = pattern; return s; case 3: *(unsigned short *)s = pattern; *(2+(unsigned char *)s) = pattern; return s; case 4: *(unsigned long *)s = pattern; return s; } #define COMMON(x) \ __asm__ __volatile__( \ "rep ; stosl" \ x \ : "=&c" (d0), "=&D" (d1) \ : "a" (pattern),"0" (count/4),"1" ((long) s) \ : "memory") { int d0, d1; switch (count % 4) { case 0: COMMON(""); return s; case 1: COMMON("\n\tstosb"); return s; case 2: COMMON("\n\tstosw"); return s; default: COMMON("\n\tstosw\n\tstosb"); return s; } } #undef COMMON } #define __constant_c_x_memset(s, c, count) \ (__builtin_constant_p(count) ? \ __constant_c_and_count_memset((s),(c),(count)) : \ __constant_c_memset((s),(c),(count))) #define __memset(s, c, count) \ (__builtin_constant_p(count) ? \ __constant_count_memset((s),(c),(count)) : \ __memset_generic((s),(c),(count))) #define __HAVE_ARCH_MEMSET #define memset(s, c, count) \ (__builtin_constant_p(c) ? \ __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \ __memset((s),(c),(count))) /* * find the first occurrence of byte 'c', or 1 past the area if none */ #define __HAVE_ARCH_MEMSCAN static inline void * memscan(void * addr, int c, size_t size) { if (!size) return addr; __asm__("repnz; scasb\n\t" "jnz 1f\n\t" "dec %%edi\n" "1:" : "=D" (addr), "=c" (size) : "0" (addr), "1" (size), "a" (c) : "memory"); return addr; } #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/suspend.h000066400000000000000000000030631314037446600244240ustar00rootroot00000000000000/* * Copyright 2001-2002 Pavel Machek * Based on code * Copyright 2001 Patrick Mochel */ #include #include static inline int arch_prepare_suspend(void) { /* If you want to make non-PSE machine work, turn off paging in swsusp_arch_suspend. swsusp_pg_dir should have identity mapping, so it could work... */ if (!cpu_has_pse) { printk(KERN_ERR "PSE is required for swsusp.\n"); return -EPERM; } return 0; } /* image of the saved processor state */ struct saved_context { u16 es, fs, gs, ss; unsigned long cr0, cr2, cr3, cr4; u16 gdt_pad; u16 gdt_limit; unsigned long gdt_base; u16 idt_pad; u16 idt_limit; unsigned long idt_base; u16 ldt; u16 tss; unsigned long tr; unsigned long safety; unsigned long return_address; } __attribute__((packed)); #ifdef CONFIG_ACPI_SLEEP extern unsigned long saved_eip; extern unsigned long saved_esp; extern unsigned long saved_ebp; extern unsigned long saved_ebx; extern unsigned long saved_esi; extern unsigned long saved_edi; static inline void acpi_save_register_state(unsigned long return_point) { saved_eip = return_point; asm volatile ("movl %%esp,%0" : "=m" (saved_esp)); asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp)); asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx)); asm volatile ("movl %%edi,%0" : "=m" (saved_edi)); asm volatile ("movl %%esi,%0" : "=m" (saved_esi)); } #define acpi_restore_register_state() do {} while (0) /* routines for saving/restoring kernel state */ extern int acpi_save_state_mem(void); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/system.h000066400000000000000000000320711314037446600242700ustar00rootroot00000000000000#ifndef __ASM_SYSTEM_H #define __ASM_SYSTEM_H #include #include #include #include /* for LOCK_PREFIX */ #ifdef __KERNEL__ struct task_struct; /* one of the stranger aspects of C forward declarations.. */ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); /* * Saving eflags is important. It switches not only IOPL between tasks, * it also protects other tasks from NT leaking through sysenter etc. */ #define switch_to(prev,next,last) do { \ unsigned long esi,edi; \ asm volatile("pushfl\n\t" /* Save flags */ \ "pushl %%ebp\n\t" \ "movl %%esp,%0\n\t" /* save ESP */ \ "movl %5,%%esp\n\t" /* restore ESP */ \ "movl $1f,%1\n\t" /* save EIP */ \ "pushl %6\n\t" /* restore EIP */ \ "jmp __switch_to\n" \ "1:\t" \ "popl %%ebp\n\t" \ "popfl" \ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ "=a" (last),"=S" (esi),"=D" (edi) \ :"m" (next->thread.esp),"m" (next->thread.eip), \ "2" (prev), "d" (next)); \ } while (0) #define _set_base(addr,base) do { unsigned long __pr; \ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ "rorl $16,%%edx\n\t" \ "movb %%dl,%2\n\t" \ "movb %%dh,%3" \ :"=&d" (__pr) \ :"m" (*((addr)+2)), \ "m" (*((addr)+4)), \ "m" (*((addr)+7)), \ "0" (base) \ ); } while(0) #define _set_limit(addr,limit) do { unsigned long __lr; \ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ "rorl $16,%%edx\n\t" \ "movb %2,%%dh\n\t" \ "andb $0xf0,%%dh\n\t" \ "orb %%dh,%%dl\n\t" \ "movb %%dl,%2" \ :"=&d" (__lr) \ :"m" (*(addr)), \ "m" (*((addr)+6)), \ "0" (limit) \ ); } while(0) #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) /* * Load a segment. Fall back on loading the zero * segment if something goes wrong.. */ #define loadsegment(seg,value) \ asm volatile("\n" \ "1:\t" \ "mov %0,%%" #seg "\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3:\t" \ "pushl $0\n\t" \ "popl %%" #seg "\n\t" \ "jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n\t" \ ".align 4\n\t" \ ".long 1b,3b\n" \ ".previous" \ : :"rm" (value)) /* * Save a segment register away */ #define savesegment(seg, value) \ asm volatile("mov %%" #seg ",%0":"=rm" (value)) #define read_cr0() ({ \ unsigned int __dummy; \ __asm__ __volatile__( \ "movl %%cr0,%0\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) #define write_cr0(x) \ __asm__ __volatile__("movl %0,%%cr0": :"r" (x)) #define read_cr2() ({ \ unsigned int __dummy; \ __asm__ __volatile__( \ "movl %%cr2,%0\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) #define write_cr2(x) \ __asm__ __volatile__("movl %0,%%cr2": :"r" (x)) #define read_cr3() ({ \ unsigned int __dummy; \ __asm__ ( \ "movl %%cr3,%0\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) #define write_cr3(x) \ __asm__ __volatile__("movl %0,%%cr3": :"r" (x)) #define read_cr4() ({ \ unsigned int __dummy; \ __asm__( \ "movl %%cr4,%0\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) #define read_cr4_safe() ({ \ unsigned int __dummy; \ /* This could fault if %cr4 does not exist */ \ __asm__("1: movl %%cr4, %0 \n" \ "2: \n" \ ".section __ex_table,\"a\" \n" \ ".long 1b,2b \n" \ ".previous \n" \ : "=r" (__dummy): "0" (0)); \ __dummy; \ }) #define write_cr4(x) \ __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) /* * Clear and set 'TS' bit respectively */ #define clts() __asm__ __volatile__ ("clts") #define stts() write_cr0(8 | read_cr0()) #endif /* __KERNEL__ */ #define wbinvd() \ __asm__ __volatile__ ("wbinvd": : :"memory") static inline unsigned long get_limit(unsigned long segment) { unsigned long __limit; __asm__("lsll %1,%0" :"=r" (__limit):"r" (segment)); return __limit+1; } #define nop() __asm__ __volatile__ ("nop") #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) #define tas(ptr) (xchg((ptr),1)) struct __xchg_dummy { unsigned long a[100]; }; #define __xg(x) ((struct __xchg_dummy *)(x)) #ifdef CONFIG_X86_CMPXCHG64 /* * The semantics of XCHGCMP8B are a bit strange, this is why * there is a loop and the loading of %%eax and %%edx has to * be inside. This inlines well in most cases, the cached * cost is around ~38 cycles. (in the future we might want * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that * might have an implicit FPU-save as a cost, so it's not * clear which path to go.) * * cmpxchg8b must be used with the lock prefix here to allow * the instruction to be executed atomically, see page 3-102 * of the instruction set reference 24319102.pdf. We need * the reader side to see the coherent 64bit value. */ static inline void __set_64bit (unsigned long long * ptr, unsigned int low, unsigned int high) { __asm__ __volatile__ ( "\n1:\t" "movl (%0), %%eax\n\t" "movl 4(%0), %%edx\n\t" "lock cmpxchg8b (%0)\n\t" "jnz 1b" : /* no outputs */ : "D"(ptr), "b"(low), "c"(high) : "ax","dx","memory"); } static inline void __set_64bit_constant (unsigned long long *ptr, unsigned long long value) { __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); } #define ll_low(x) *(((unsigned int*)&(x))+0) #define ll_high(x) *(((unsigned int*)&(x))+1) static inline void __set_64bit_var (unsigned long long *ptr, unsigned long long value) { __set_64bit(ptr,ll_low(value), ll_high(value)); } #define set_64bit(ptr,value) \ (__builtin_constant_p(value) ? \ __set_64bit_constant(ptr, value) : \ __set_64bit_var(ptr, value) ) #define _set_64bit(ptr,value) \ (__builtin_constant_p(value) ? \ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ __set_64bit(ptr, ll_low(value), ll_high(value)) ) #endif /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note 2: xchg has side effect, so that attribute volatile is necessary, * but generally the primitive is invalid, *ptr is output argument. --ANK */ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { case 1: __asm__ __volatile__("xchgb %b0,%1" :"=q" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 2: __asm__ __volatile__("xchgw %w0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 4: __asm__ __volatile__("xchgl %0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; } return x; } /* * Atomic compare and exchange. Compare OLD with MEM, if identical, * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ #ifdef CONFIG_X86_CMPXCHG #define __HAVE_ARCH_CMPXCHG 1 #define cmpxchg(ptr,o,n)\ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ (unsigned long)(n),sizeof(*(ptr)))) #endif static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 4: __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; } return old; } #ifndef CONFIG_X86_CMPXCHG /* * Building a kernel capable running on 80386. It may be necessary to * simulate the cmpxchg on the 80386 CPU. For that purpose we define * a function for each of the sizes we support. */ extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, unsigned long new, int size) { switch (size) { case 1: return cmpxchg_386_u8(ptr, old, new); case 2: return cmpxchg_386_u16(ptr, old, new); case 4: return cmpxchg_386_u32(ptr, old, new); } return old; } #define cmpxchg(ptr,o,n) \ ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 3)) \ __ret = __cmpxchg((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr))); \ else \ __ret = cmpxchg_386((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr))); \ __ret; \ }) #endif #ifdef CONFIG_X86_CMPXCHG64 static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, unsigned long long new) { unsigned long long prev; __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" : "=A"(prev) : "b"((unsigned long)new), "c"((unsigned long)(new >> 32)), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; } #define cmpxchg64(ptr,o,n)\ ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\ (unsigned long long)(n))) #endif /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking * to devices. * * For now, "wmb()" doesn't actually do anything, as all * Intel CPU's follow what Intel calls a *Processor Order*, * in which all writes are seen in the program order even * outside the CPU. * * I expect future Intel CPU's to have a weaker ordering, * but I'd also expect them to finally get their act together * and add some real memory barriers if so. * * Some non intel clones support out of order store. wmb() ceases to be a * nop for these. */ /* * Actually only lfence would be needed for mb() because all stores done * by the kernel should be already ordered. But keep a full barrier for now. */ #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) /** * read_barrier_depends - Flush all pending reads that subsequents reads * depend on. * * No data-dependent reads from memory-like regions are ever reordered * over this barrier. All reads preceding this primitive are guaranteed * to access memory (but not necessarily other CPUs' caches) before any * reads following this primitive that depend on the data return by * any of the preceding reads. This primitive is much lighter weight than * rmb() on most CPUs, and is never heavier weight than is * rmb(). * * These ordering constraints are respected by both the local CPU * and the compiler. * * Ordering is not guaranteed by anything other than these primitives, * not even by data dependencies. See the documentation for * memory_barrier() for examples and URLs to more information. * * For example, the following code would force ordering (the initial * value of "a" is zero, "b" is one, and "p" is "&a"): * * * CPU 0 CPU 1 * * b = 2; * memory_barrier(); * p = &b; q = p; * read_barrier_depends(); * d = *q; * * * because the read of "*q" depends on the read of "p" and these * two reads are separated by a read_barrier_depends(). However, * the following code, with the same initial values for "a" and "b": * * * CPU 0 CPU 1 * * a = 2; * memory_barrier(); * b = 3; y = b; * read_barrier_depends(); * x = a; * * * does not enforce ordering, since there is no data dependency between * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() * in cases like this where there are no data dependencies. **/ #define read_barrier_depends() do { } while(0) #ifdef CONFIG_X86_OOSTORE /* Actually there are no OOO store capable CPUs for now that do SSE, but make it already an possibility. */ #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) #else #define wmb() __asm__ __volatile__ ("": : :"memory") #endif #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() #define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define smp_read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif #include /* * disable hlt during certain critical i/o operations */ #define HAVE_DISABLE_HLT void disable_hlt(void); void enable_hlt(void); extern int es7000_plat; void cpu_idle_wait(void); /* * On SMP systems, when the scheduler does migration-cost autodetection, * it needs a way to flush as much of the CPU's caches as possible: */ static inline void sched_cacheflush(void) { wbinvd(); } extern unsigned long arch_align_stack(unsigned long sp); extern void free_init_pages(char *what, unsigned long begin, unsigned long end); void default_idle(void); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/termbits.h000066400000000000000000000073511314037446600246000ustar00rootroot00000000000000#ifndef __ARCH_I386_TERMBITS_H__ #define __ARCH_I386_TERMBITS_H__ #include typedef unsigned char cc_t; typedef unsigned int speed_t; typedef unsigned int tcflag_t; #define NCCS 19 struct termios { tcflag_t c_iflag; /* input mode flags */ tcflag_t c_oflag; /* output mode flags */ tcflag_t c_cflag; /* control mode flags */ tcflag_t c_lflag; /* local mode flags */ cc_t c_line; /* line discipline */ cc_t c_cc[NCCS]; /* control characters */ }; /* c_cc characters */ #define VINTR 0 #define VQUIT 1 #define VERASE 2 #define VKILL 3 #define VEOF 4 #define VTIME 5 #define VMIN 6 #define VSWTC 7 #define VSTART 8 #define VSTOP 9 #define VSUSP 10 #define VEOL 11 #define VREPRINT 12 #define VDISCARD 13 #define VWERASE 14 #define VLNEXT 15 #define VEOL2 16 /* c_iflag bits */ #define IGNBRK 0000001 #define BRKINT 0000002 #define IGNPAR 0000004 #define PARMRK 0000010 #define INPCK 0000020 #define ISTRIP 0000040 #define INLCR 0000100 #define IGNCR 0000200 #define ICRNL 0000400 #define IUCLC 0001000 #define IXON 0002000 #define IXANY 0004000 #define IXOFF 0010000 #define IMAXBEL 0020000 #define IUTF8 0040000 /* c_oflag bits */ #define OPOST 0000001 #define OLCUC 0000002 #define ONLCR 0000004 #define OCRNL 0000010 #define ONOCR 0000020 #define ONLRET 0000040 #define OFILL 0000100 #define OFDEL 0000200 #define NLDLY 0000400 #define NL0 0000000 #define NL1 0000400 #define CRDLY 0003000 #define CR0 0000000 #define CR1 0001000 #define CR2 0002000 #define CR3 0003000 #define TABDLY 0014000 #define TAB0 0000000 #define TAB1 0004000 #define TAB2 0010000 #define TAB3 0014000 #define XTABS 0014000 #define BSDLY 0020000 #define BS0 0000000 #define BS1 0020000 #define VTDLY 0040000 #define VT0 0000000 #define VT1 0040000 #define FFDLY 0100000 #define FF0 0000000 #define FF1 0100000 /* c_cflag bit meaning */ #define CBAUD 0010017 #define B0 0000000 /* hang up */ #define B50 0000001 #define B75 0000002 #define B110 0000003 #define B134 0000004 #define B150 0000005 #define B200 0000006 #define B300 0000007 #define B600 0000010 #define B1200 0000011 #define B1800 0000012 #define B2400 0000013 #define B4800 0000014 #define B9600 0000015 #define B19200 0000016 #define B38400 0000017 #define EXTA B19200 #define EXTB B38400 #define CSIZE 0000060 #define CS5 0000000 #define CS6 0000020 #define CS7 0000040 #define CS8 0000060 #define CSTOPB 0000100 #define CREAD 0000200 #define PARENB 0000400 #define PARODD 0001000 #define HUPCL 0002000 #define CLOCAL 0004000 #define CBAUDEX 0010000 #define B57600 0010001 #define B115200 0010002 #define B230400 0010003 #define B460800 0010004 #define B500000 0010005 #define B576000 0010006 #define B921600 0010007 #define B1000000 0010010 #define B1152000 0010011 #define B1500000 0010012 #define B2000000 0010013 #define B2500000 0010014 #define B3000000 0010015 #define B3500000 0010016 #define B4000000 0010017 #define CIBAUD 002003600000 /* input baud rate (not used) */ #define CMSPAR 010000000000 /* mark or space (stick) parity */ #define CRTSCTS 020000000000 /* flow control */ /* c_lflag bits */ #define ISIG 0000001 #define ICANON 0000002 #define XCASE 0000004 #define ECHO 0000010 #define ECHOE 0000020 #define ECHOK 0000040 #define ECHONL 0000100 #define NOFLSH 0000200 #define TOSTOP 0000400 #define ECHOCTL 0001000 #define ECHOPRT 0002000 #define ECHOKE 0004000 #define FLUSHO 0010000 #define PENDIN 0040000 #define IEXTEN 0100000 /* tcflow() and TCXONC use these */ #define TCOOFF 0 #define TCOON 1 #define TCIOFF 2 #define TCION 3 /* tcflush() and TCFLSH use these */ #define TCIFLUSH 0 #define TCOFLUSH 1 #define TCIOFLUSH 2 /* tcsetattr uses these */ #define TCSANOW 0 #define TCSADRAIN 1 #define TCSAFLUSH 2 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/termios.h000066400000000000000000000061531314037446600244300ustar00rootroot00000000000000#ifndef _I386_TERMIOS_H #define _I386_TERMIOS_H #include #include struct winsize { unsigned short ws_row; unsigned short ws_col; unsigned short ws_xpixel; unsigned short ws_ypixel; }; #define NCC 8 struct termio { unsigned short c_iflag; /* input mode flags */ unsigned short c_oflag; /* output mode flags */ unsigned short c_cflag; /* control mode flags */ unsigned short c_lflag; /* local mode flags */ unsigned char c_line; /* line discipline */ unsigned char c_cc[NCC]; /* control characters */ }; /* modem lines */ #define TIOCM_LE 0x001 #define TIOCM_DTR 0x002 #define TIOCM_RTS 0x004 #define TIOCM_ST 0x008 #define TIOCM_SR 0x010 #define TIOCM_CTS 0x020 #define TIOCM_CAR 0x040 #define TIOCM_RNG 0x080 #define TIOCM_DSR 0x100 #define TIOCM_CD TIOCM_CAR #define TIOCM_RI TIOCM_RNG #define TIOCM_OUT1 0x2000 #define TIOCM_OUT2 0x4000 #define TIOCM_LOOP 0x8000 /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ /* line disciplines */ #define N_TTY 0 #define N_SLIP 1 #define N_MOUSE 2 #define N_PPP 3 #define N_STRIP 4 #define N_AX25 5 #define N_X25 6 /* X.25 async */ #define N_6PACK 7 #define N_MASC 8 /* Reserved for Mobitex module */ #define N_R3964 9 /* Reserved for Simatic R3964 module */ #define N_PROFIBUS_FDL 10 /* Reserved for Profibus */ #define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */ #define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */ #define N_HDLC 13 /* synchronous HDLC */ #define N_SYNC_PPP 14 /* synchronous PPP */ #define N_HCI 15 /* Bluetooth HCI UART */ #ifdef __KERNEL__ #include /* intr=^C quit=^\ erase=del kill=^U eof=^D vtime=\0 vmin=\1 sxtc=\0 start=^Q stop=^S susp=^Z eol=\0 reprint=^R discard=^U werase=^W lnext=^V eol2=\0 */ #define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" /* * Translate a "termio" structure into a "termios". Ugh. */ #define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ unsigned short __tmp; \ get_user(__tmp,&(termio)->x); \ *(unsigned short *) &(termios)->x = __tmp; \ } #define user_termio_to_kernel_termios(termios, termio) \ ({ \ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ }) /* * Translate a "termios" structure into a "termio". Ugh. */ #define kernel_termios_to_user_termio(termio, termios) \ ({ \ put_user((termios)->c_iflag, &(termio)->c_iflag); \ put_user((termios)->c_oflag, &(termio)->c_oflag); \ put_user((termios)->c_cflag, &(termio)->c_cflag); \ put_user((termios)->c_lflag, &(termio)->c_lflag); \ put_user((termios)->c_line, &(termio)->c_line); \ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ }) #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) #endif /* __KERNEL__ */ #endif /* _I386_TERMIOS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/thread_info.h000066400000000000000000000124341314037446600252270ustar00rootroot00000000000000/* thread_info.h: i386 low-level thread information * * Copyright (C) 2002 David Howells (dhowells@redhat.com) * - Incorporating suggestions made by Linus Torvalds and Dave Miller */ #ifndef _ASM_THREAD_INFO_H #define _ASM_THREAD_INFO_H #ifdef __KERNEL__ #include #include #ifndef __ASSEMBLY__ #include #endif /* * low level task data that entry.S needs immediate access to * - this struct should fit entirely inside of one cache line * - this struct shares the supervisor stack pages * - if the contents of this structure are changed, the assembly constants must also be changed */ #ifndef __ASSEMBLY__ struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ unsigned long status; /* thread-synchronous flags */ __u32 cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* thread address space: 0-0xBFFFFFFF for user-thead 0-0xFFFFFFFF for kernel-thread */ void *sysenter_return; struct restart_block restart_block; unsigned long previous_esp; /* ESP of the previous stack in case of nested (IRQ) stacks */ __u8 supervisor_stack[0]; }; #else /* !__ASSEMBLY__ */ #include #endif #define PREEMPT_ACTIVE 0x10000000 #ifdef CONFIG_4KSTACKS #define THREAD_SIZE (4096) #else #define THREAD_SIZE (8192) #endif #define STACK_WARN (THREAD_SIZE/8) /* * macros/functions for gaining access to the thread information structure * * preempt_count needs to be 1 initially, until the scheduler is functional. */ #ifndef __ASSEMBLY__ #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ .exec_domain = &default_exec_domain, \ .flags = 0, \ .cpu = 0, \ .preempt_count = 1, \ .addr_limit = KERNEL_DS, \ .restart_block = { \ .fn = do_no_restart_syscall, \ }, \ } #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) /* how to get the current stack pointer from C */ register unsigned long current_stack_pointer asm("esp") __attribute_used__; /* how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) { return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1)); } /* thread information allocation */ #ifdef CONFIG_DEBUG_STACK_USAGE #define alloc_thread_info(tsk) \ ({ \ struct thread_info *ret; \ \ ret = kmalloc(THREAD_SIZE, GFP_KERNEL); \ if (ret) \ memset(ret, 0, THREAD_SIZE); \ ret; \ }) #else #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) #endif #define free_thread_info(info) kfree(info) #else /* !__ASSEMBLY__ */ /* how to get the thread information struct from ASM */ #define GET_THREAD_INFO(reg) \ movl $-THREAD_SIZE, reg; \ andl %esp, reg /* use this one if reg already contains %esp */ #define GET_THREAD_INFO_WITH_ESP(reg) \ andl $-THREAD_SIZE, reg #endif /* * thread information flags * - these are process state flags that various assembly files may need to access * - pending work-to-be-done flags are in LSW * - other flags in MSW */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ #define TIF_IRET 5 /* return with iret */ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ #define TIF_MEMDIE 16 #define TIF_DEBUG 17 /* uses debug registers */ #define TIF_IO_BITMAP 18 /* uses I/O bitmap */ #define _TIF_SYSCALL_TRACE (1<thread_info->status & TS_POLLING) #endif /* __KERNEL__ */ #endif /* _ASM_THREAD_INFO_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/timer.h000066400000000000000000000004511314037446600240610ustar00rootroot00000000000000#ifndef _ASMi386_TIMER_H #define _ASMi386_TIMER_H #include #include #define TICK_SIZE (tick_nsec / 1000) void setup_pit_timer(void); /* Modifiers for buggy PIT handling */ extern int pit_latch_buggy; extern int timer_ack; extern int recalibrate_cpu_khz(void); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/timex.h000066400000000000000000000007111314037446600240660ustar00rootroot00000000000000/* * linux/include/asm-i386/timex.h * * i386 architecture timex specifications */ #ifndef _ASMi386_TIMEX_H #define _ASMi386_TIMEX_H #include #include #ifdef CONFIG_X86_ELAN # define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */ #else # define CLOCK_TICK_RATE 1193182 /* Underlying HZ */ #endif extern int read_current_timer(unsigned long *timer_value); #define ARCH_HAS_READ_CURRENT_TIMER 1 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/tlb.h000066400000000000000000000006531314037446600235260ustar00rootroot00000000000000#ifndef _I386_TLB_H #define _I386_TLB_H /* * x86 doesn't need any special per-pte or * per-vma handling.. */ #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) /* * .. because we flush the whole mm when it * fills up. */ #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/tlbflush.h000066400000000000000000000072141314037446600245700ustar00rootroot00000000000000#ifndef _I386_TLBFLUSH_H #define _I386_TLBFLUSH_H #include #include #define __flush_tlb() \ do { \ unsigned int tmpreg; \ \ __asm__ __volatile__( \ "movl %%cr3, %0; \n" \ "movl %0, %%cr3; # flush TLB \n" \ : "=r" (tmpreg) \ :: "memory"); \ } while (0) /* * Global pages have to be flushed a bit differently. Not a real * performance problem because this does not happen often. */ #define __flush_tlb_global() \ do { \ unsigned int tmpreg, cr4, cr4_orig; \ \ __asm__ __volatile__( \ "movl %%cr4, %2; # turn off PGE \n" \ "movl %2, %1; \n" \ "andl %3, %1; \n" \ "movl %1, %%cr4; \n" \ "movl %%cr3, %0; \n" \ "movl %0, %%cr3; # flush TLB \n" \ "movl %2, %%cr4; # turn PGE back on \n" \ : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \ : "i" (~X86_CR4_PGE) \ : "memory"); \ } while (0) extern unsigned long pgkern_mask; # define __flush_tlb_all() \ do { \ if (cpu_has_pge) \ __flush_tlb_global(); \ else \ __flush_tlb(); \ } while (0) #define cpu_has_invlpg (boot_cpu_data.x86 > 3) #define __flush_tlb_single(addr) \ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) #ifdef CONFIG_X86_INVLPG # define __flush_tlb_one(addr) __flush_tlb_single(addr) #else # define __flush_tlb_one(addr) \ do { \ if (cpu_has_invlpg) \ __flush_tlb_single(addr); \ else \ __flush_tlb(); \ } while (0) #endif /* * TLB flushing: * * - flush_tlb() flushes the current mm struct TLBs * - flush_tlb_all() flushes all processes TLBs * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * * ..but the i386 has somewhat limited tlb flushing capabilities, * and page-granular flushes are available only on i486 and up. */ #ifndef CONFIG_SMP #define flush_tlb() __flush_tlb() #define flush_tlb_all() __flush_tlb_all() #define local_flush_tlb() __flush_tlb() static inline void flush_tlb_mm(struct mm_struct *mm) { if (mm == current->active_mm) __flush_tlb(); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { if (vma->vm_mm == current->active_mm) __flush_tlb_one(addr); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (vma->vm_mm == current->active_mm) __flush_tlb(); } #else #include #define local_flush_tlb() \ __flush_tlb() extern void flush_tlb_all(void); extern void flush_tlb_current_task(void); extern void flush_tlb_mm(struct mm_struct *); extern void flush_tlb_page(struct vm_area_struct *, unsigned long); #define flush_tlb() flush_tlb_current_task() static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) { flush_tlb_mm(vma->vm_mm); } #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 struct tlb_state { struct mm_struct *active_mm; int state; char __cacheline_padding[L1_CACHE_BYTES-8]; }; DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); #endif #define flush_tlb_kernel_range(start, end) flush_tlb_all() static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { /* i386 does not keep any page table caches in TLB */ } #endif /* _I386_TLBFLUSH_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/topology.h000066400000000000000000000065001314037446600246160ustar00rootroot00000000000000/* * linux/include/asm-i386/topology.h * * Written by: Matthew Dobson, IBM Corporation * * Copyright (C) 2002, IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to */ #ifndef _ASM_I386_TOPOLOGY_H #define _ASM_I386_TOPOLOGY_H #ifdef CONFIG_X86_HT #define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id) #define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) #define topology_core_siblings(cpu) (cpu_core_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) #endif #ifdef CONFIG_NUMA #include #include /* Mappings between logical cpu number and node number */ extern cpumask_t node_2_cpu_mask[]; extern int cpu_2_node[]; /* Returns the number of the node containing CPU 'cpu' */ static inline int cpu_to_node(int cpu) { return cpu_2_node[cpu]; } /* Returns the number of the node containing Node 'node'. This architecture is flat, so it is a pretty simple function! */ #define parent_node(node) (node) /* Returns a bitmask of CPUs on Node 'node'. */ static inline cpumask_t node_to_cpumask(int node) { return node_2_cpu_mask[node]; } /* Returns the number of the first CPU on Node 'node'. */ static inline int node_to_first_cpu(int node) { cpumask_t mask = node_to_cpumask(node); return first_cpu(mask); } #define pcibus_to_node(bus) ((long) (bus)->sysdata) #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)) /* sched_domains SD_NODE_INIT for NUMAQ machines */ #define SD_NODE_INIT (struct sched_domain) { \ .span = CPU_MASK_NONE, \ .parent = NULL, \ .groups = NULL, \ .min_interval = 8, \ .max_interval = 32, \ .busy_factor = 32, \ .imbalance_pct = 125, \ .cache_nice_tries = 1, \ .busy_idx = 3, \ .idle_idx = 1, \ .newidle_idx = 2, \ .wake_idx = 1, \ .per_cpu_gain = 100, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_EXEC \ | SD_BALANCE_FORK \ | SD_WAKE_BALANCE, \ .last_balance = jiffies, \ .balance_interval = 1, \ .nr_balance_failed = 0, \ } extern unsigned long node_start_pfn[]; extern unsigned long node_end_pfn[]; extern unsigned long node_remap_size[]; #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) #else /* !CONFIG_NUMA */ /* * Other i386 platforms should define their own version of the * above macros here. */ #include #endif /* CONFIG_NUMA */ extern cpumask_t cpu_coregroup_map(int cpu); #ifdef CONFIG_SMP #define mc_capable() (boot_cpu_data.x86_max_cores > 1) #define smt_capable() (smp_num_siblings > 1) #endif #endif /* _ASM_I386_TOPOLOGY_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/tsc.h000066400000000000000000000021461314037446600235350ustar00rootroot00000000000000/* * linux/include/asm-i386/tsc.h * * i386 TSC related functions */ #ifndef _ASM_i386_TSC_H #define _ASM_i386_TSC_H #include #include /* * Standard way to access the cycle counter on i586+ CPUs. * Currently only used on SMP. * * If you really have a SMP machine with i486 chips or older, * compile for that, and this will just always return zero. * That's ok, it just means that the nicer scheduling heuristics * won't work for you. * * We only use the low 32 bits, and we'd simply better make sure * that we reschedule before that wraps. Scheduling at least every * four billion cycles just basically sounds like a good idea, * regardless of how fast the machine is. */ typedef unsigned long long cycles_t; extern unsigned int cpu_khz; extern unsigned int tsc_khz; static inline cycles_t get_cycles(void) { unsigned long long ret = 0; #ifndef CONFIG_X86_TSC if (!cpu_has_tsc) return 0; #endif #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) rdtscll(ret); #endif return ret; } extern void tsc_init(void); extern void mark_tsc_unstable(void); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/types.h000066400000000000000000000024161314037446600241100ustar00rootroot00000000000000#ifndef _I386_TYPES_H #define _I386_TYPES_H #ifndef __ASSEMBLY__ typedef unsigned short umode_t; /* * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the * header files exported to user space */ typedef __signed__ char __s8; typedef unsigned char __u8; typedef __signed__ short __s16; typedef unsigned short __u16; typedef __signed__ int __s32; typedef unsigned int __u32; #if defined(__GNUC__) && !defined(__STRICT_ANSI__) typedef __signed__ long long __s64; typedef unsigned long long __u64; #endif #endif /* __ASSEMBLY__ */ /* * These aren't exported outside the kernel to avoid name space clashes */ #ifdef __KERNEL__ #define BITS_PER_LONG 32 #ifndef __ASSEMBLY__ typedef signed char s8; typedef unsigned char u8; typedef signed short s16; typedef unsigned short u16; typedef signed int s32; typedef unsigned int u32; typedef signed long long s64; typedef unsigned long long u64; /* DMA addresses come in generic and 64-bit flavours. */ #ifdef CONFIG_HIGHMEM64G typedef u64 dma_addr_t; #else typedef u32 dma_addr_t; #endif typedef u64 dma64_addr_t; #ifdef CONFIG_LBD typedef u64 sector_t; #define HAVE_SECTOR_T #endif #ifdef CONFIG_LSF typedef u64 blkcnt_t; #define HAVE_BLKCNT_T #endif #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/uaccess.h000066400000000000000000000433771314037446600244050ustar00rootroot00000000000000#ifndef __i386_UACCESS_H #define __i386_UACCESS_H /* * User space memory access functions */ #include #include #include #include #include #define VERIFY_READ 0 #define VERIFY_WRITE 1 /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with * get_fs() == KERNEL_DS, checking is bypassed. * * For historical reasons, these macros are grossly misnamed. */ #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL) #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #define segment_eq(a,b) ((a).seg == (b).seg) /* * movsl can be slow when source and dest are not both 8-byte aligned */ #ifdef CONFIG_X86_INTEL_USERCOPY extern struct movsl_mask { int mask; } ____cacheline_aligned_in_smp movsl_mask; #endif #define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg)) /* * Test whether a block of memory is a valid user space address. * Returns 0 if the range is valid, nonzero otherwise. * * This is equivalent to the following test: * (u33)addr + (u33)size >= (u33)current->addr_limit.seg * * This needs 33-bit arithmetic. We have a carry... */ #define __range_ok(addr,size) ({ \ unsigned long flag,sum; \ __chk_user_ptr(addr); \ asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ :"=&r" (flag), "=r" (sum) \ :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \ flag; }) /** * access_ok: - Checks if a user space pointer is valid * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe * to write to a block, it is always safe to read from it. * @addr: User space pointer to start of block to check * @size: Size of block to check * * Context: User context only. This function may sleep. * * Checks if a pointer to a block of memory in user space is valid. * * Returns true (nonzero) if the memory block may be valid, false (zero) * if it is definitely invalid. * * Note that, depending on architecture, this function probably just * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ #define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is * the address at which the program should continue. No registers are * modified, so it is entirely up to the continuation code to figure out * what to do. * * All the routines below use bits of fixup code that are out of line * with the main instruction path. This means when everything is well, * we don't even have to jump over them. Further, they do not intrude * on our cache or tlb entries. */ struct exception_table_entry { unsigned long insn, fixup; }; extern int fixup_exception(struct pt_regs *regs); /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. * * This gets kind of ugly. We want to return _two_ values in "get_user()" * and yet we don't want to do any pointers, because that is too much * of a performance impact. Thus we have a few rather ugly macros here, * and hide all the ugliness from the user. * * The "__xxx" versions of the user access functions are versions that * do not verify the address space, that must have been done previously * with a separate "access_ok()" call (this is used when we do multiple * accesses to the same area of user memory). */ extern void __get_user_1(void); extern void __get_user_2(void); extern void __get_user_4(void); #define __get_user_x(size,ret,x,ptr) \ __asm__ __volatile__("call __get_user_" #size \ :"=a" (ret),"=d" (x) \ :"0" (ptr)) /* Careful: we have to cast the result to the type of the pointer for sign reasons */ /** * get_user: - Get a simple variable from user space. * @x: Variable to store result. * @ptr: Source address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple variable from user space to kernel * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and the result of * dereferencing @ptr must be assignable to @x without a cast. * * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ #define get_user(x,ptr) \ ({ int __ret_gu; \ unsigned long __val_gu; \ __chk_user_ptr(ptr); \ switch(sizeof (*(ptr))) { \ case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \ } \ (x) = (__typeof__(*(ptr)))__val_gu; \ __ret_gu; \ }) extern void __put_user_bad(void); /* * Strange magic calling convention: pointer in %ecx, * value in %eax(:%edx), return value in %eax, no clobbers. */ extern void __put_user_1(void); extern void __put_user_2(void); extern void __put_user_4(void); extern void __put_user_8(void); #define __put_user_1(x, ptr) __asm__ __volatile__("call __put_user_1":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) #define __put_user_2(x, ptr) __asm__ __volatile__("call __put_user_2":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) #define __put_user_4(x, ptr) __asm__ __volatile__("call __put_user_4":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) #define __put_user_8(x, ptr) __asm__ __volatile__("call __put_user_8":"=a" (__ret_pu):"A" ((typeof(*(ptr)))(x)), "c" (ptr)) #define __put_user_X(x, ptr) __asm__ __volatile__("call __put_user_X":"=a" (__ret_pu):"c" (ptr)) /** * put_user: - Write a simple value into user space. * @x: Value to copy to user space. * @ptr: Destination address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple value from kernel space to user * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and @x must be assignable * to the result of dereferencing @ptr. * * Returns zero on success, or -EFAULT on error. */ #ifdef CONFIG_X86_WP_WORKS_OK #define put_user(x,ptr) \ ({ int __ret_pu; \ __typeof__(*(ptr)) __pu_val; \ __chk_user_ptr(ptr); \ __pu_val = x; \ switch(sizeof(*(ptr))) { \ case 1: __put_user_1(__pu_val, ptr); break; \ case 2: __put_user_2(__pu_val, ptr); break; \ case 4: __put_user_4(__pu_val, ptr); break; \ case 8: __put_user_8(__pu_val, ptr); break; \ default:__put_user_X(__pu_val, ptr); break; \ } \ __ret_pu; \ }) #else #define put_user(x,ptr) \ ({ \ int __ret_pu; \ __typeof__(*(ptr)) __pus_tmp = x; \ __ret_pu=0; \ if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ sizeof(*(ptr))) != 0)) \ __ret_pu=-EFAULT; \ __ret_pu; \ }) #endif /** * __get_user: - Get a simple variable from user space, with less checking. * @x: Variable to store result. * @ptr: Source address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple variable from user space to kernel * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and the result of * dereferencing @ptr must be assignable to @x without a cast. * * Caller must check the pointer with access_ok() before calling this * function. * * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ #define __get_user(x,ptr) \ __get_user_nocheck((x),(ptr),sizeof(*(ptr))) /** * __put_user: - Write a simple value into user space, with less checking. * @x: Value to copy to user space. * @ptr: Destination address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple value from kernel space to user * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and @x must be assignable * to the result of dereferencing @ptr. * * Caller must check the pointer with access_ok() before calling this * function. * * Returns zero on success, or -EFAULT on error. */ #define __put_user(x,ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) #define __put_user_nocheck(x,ptr,size) \ ({ \ long __pu_err; \ __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \ __pu_err; \ }) #define __put_user_u64(x, addr, err) \ __asm__ __volatile__( \ "1: movl %%eax,0(%2)\n" \ "2: movl %%edx,4(%2)\n" \ "3:\n" \ ".section .fixup,\"ax\"\n" \ "4: movl %3,%0\n" \ " jmp 3b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ " .long 1b,4b\n" \ " .long 2b,4b\n" \ ".previous" \ : "=r"(err) \ : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err)) #ifdef CONFIG_X86_WP_WORKS_OK #define __put_user_size(x,ptr,size,retval,errret) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \ case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \ case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break; \ case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\ default: __put_user_bad(); \ } \ } while (0) #else #define __put_user_size(x,ptr,size,retval,errret) \ do { \ __typeof__(*(ptr)) __pus_tmp = x; \ retval = 0; \ \ if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ retval = errret; \ } while (0) #endif struct __large_struct { unsigned long buf[100]; }; #define __m(x) (*(struct __large_struct __user *)(x)) /* * Tell gcc we read from memory instead of writing: this is because * we do not write to any memory gcc knows about, so there are no * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ __asm__ __volatile__( \ "1: mov"itype" %"rtype"1,%2\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: movl %3,%0\n" \ " jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ " .long 1b,3b\n" \ ".previous" \ : "=r"(err) \ : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)) #define __get_user_nocheck(x,ptr,size) \ ({ \ long __gu_err; \ unsigned long __gu_val; \ __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\ (x) = (__typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) extern long __get_user_bad(void); #define __get_user_size(x,ptr,size,retval,errret) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \ case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \ case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break; \ default: (x) = __get_user_bad(); \ } \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ __asm__ __volatile__( \ "1: mov"itype" %2,%"rtype"1\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: movl %3,%0\n" \ " xor"itype" %"rtype"1,%"rtype"1\n" \ " jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ " .long 1b,3b\n" \ ".previous" \ : "=r"(err), ltype (x) \ : "m"(__m(addr)), "i"(errret), "0"(err)) unsigned long __must_check __copy_to_user_ll(void __user *to, const void *from, unsigned long n); unsigned long __must_check __copy_from_user_ll(void *to, const void __user *from, unsigned long n); unsigned long __must_check __copy_from_user_ll_nozero(void *to, const void __user *from, unsigned long n); unsigned long __must_check __copy_from_user_ll_nocache(void *to, const void __user *from, unsigned long n); unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long n); /* * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault * we return the initial request size (1, 2 or 4), as copy_*_user should do. * If a store crosses a page boundary and gets a fault, the x86 will not write * anything, so this is accurate. */ /** * __copy_to_user: - Copy a block of data into user space, with less checking. * @to: Destination address, in user space. * @from: Source address, in kernel space. * @n: Number of bytes to copy. * * Context: User context only. This function may sleep. * * Copy data from kernel space to user space. Caller must check * the specified block with access_ok() before calling this function. * * Returns number of bytes that could not be copied. * On success, this will be zero. */ static __always_inline unsigned long __must_check __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { if (__builtin_constant_p(n)) { unsigned long ret; switch (n) { case 1: __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); return ret; case 2: __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); return ret; case 4: __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); return ret; } } return __copy_to_user_ll(to, from, n); } static __always_inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) { might_sleep(); return __copy_to_user_inatomic(to, from, n); } /** * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. * @from: Source address, in user space. * @n: Number of bytes to copy. * * Context: User context only. This function may sleep. * * Copy data from user space to kernel space. Caller must check * the specified block with access_ok() before calling this function. * * Returns number of bytes that could not be copied. * On success, this will be zero. * * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. * * An alternate version - __copy_from_user_inatomic() - may be called from * atomic context and will fail rather than sleep. In this case the * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h * for explanation of why this is needed. */ static __always_inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { /* Avoid zeroing the tail if the copy fails.. * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, * but as the zeroing behaviour is only significant when n is not * constant, that shouldn't be a problem. */ if (__builtin_constant_p(n)) { unsigned long ret; switch (n) { case 1: __get_user_size(*(u8 *)to, from, 1, ret, 1); return ret; case 2: __get_user_size(*(u16 *)to, from, 2, ret, 2); return ret; case 4: __get_user_size(*(u32 *)to, from, 4, ret, 4); return ret; } } return __copy_from_user_ll_nozero(to, from, n); } static __always_inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_sleep(); if (__builtin_constant_p(n)) { unsigned long ret; switch (n) { case 1: __get_user_size(*(u8 *)to, from, 1, ret, 1); return ret; case 2: __get_user_size(*(u16 *)to, from, 2, ret, 2); return ret; case 4: __get_user_size(*(u32 *)to, from, 4, ret, 4); return ret; } } return __copy_from_user_ll(to, from, n); } #define ARCH_HAS_NOCACHE_UACCESS static __always_inline unsigned long __copy_from_user_nocache(void *to, const void __user *from, unsigned long n) { might_sleep(); if (__builtin_constant_p(n)) { unsigned long ret; switch (n) { case 1: __get_user_size(*(u8 *)to, from, 1, ret, 1); return ret; case 2: __get_user_size(*(u16 *)to, from, 2, ret, 2); return ret; case 4: __get_user_size(*(u32 *)to, from, 4, ret, 4); return ret; } } return __copy_from_user_ll_nocache(to, from, n); } static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) { return __copy_from_user_ll_nocache_nozero(to, from, n); } unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n); unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n); long __must_check strncpy_from_user(char *dst, const char __user *src, long count); long __must_check __strncpy_from_user(char *dst, const char __user *src, long count); /** * strlen_user: - Get the size of a string in user space. * @str: The string to measure. * * Context: User context only. This function may sleep. * * Get the size of a NUL-terminated string in user space. * * Returns the size of the string INCLUDING the terminating NUL. * On exception, returns 0. * * If there is a limit on the length of a valid string, you may wish to * consider using strnlen_user() instead. */ #define strlen_user(str) strnlen_user(str, ~0UL >> 1) long strnlen_user(const char __user *str, long n); unsigned long __must_check clear_user(void __user *mem, unsigned long len); unsigned long __must_check __clear_user(void __user *mem, unsigned long len); #endif /* __i386_UACCESS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/ucontext.h000066400000000000000000000004301314037446600246070ustar00rootroot00000000000000#ifndef _ASMi386_UCONTEXT_H #define _ASMi386_UCONTEXT_H struct ucontext { unsigned long uc_flags; struct ucontext *uc_link; stack_t uc_stack; struct sigcontext uc_mcontext; sigset_t uc_sigmask; /* mask last for extensibility */ }; #endif /* !_ASMi386_UCONTEXT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/unaligned.h000066400000000000000000000022471314037446600247140ustar00rootroot00000000000000#ifndef __I386_UNALIGNED_H #define __I386_UNALIGNED_H /* * The i386 can do unaligned accesses itself. * * The strange macros are there to make sure these can't * be misused in a way that makes them not work on other * architectures where unaligned accesses aren't as simple. */ /** * get_unaligned - get value from possibly mis-aligned location * @ptr: pointer to value * * This macro should be used for accessing values larger in size than * single bytes at locations that are expected to be improperly aligned, * e.g. retrieving a u16 value from a location not u16-aligned. * * Note that unaligned accesses can be very expensive on some architectures. */ #define get_unaligned(ptr) (*(ptr)) /** * put_unaligned - put value to a possibly mis-aligned location * @val: value to place * @ptr: pointer to location * * This macro should be used for placing values larger in size than * single bytes at locations that are expected to be improperly aligned, * e.g. writing a u16 value to a location not u16-aligned. * * Note that unaligned accesses can be very expensive on some architectures. */ #define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/unistd.h000066400000000000000000000347771314037446600242710ustar00rootroot00000000000000#ifndef _ASM_I386_UNISTD_H_ #define _ASM_I386_UNISTD_H_ /* * This file contains the system call numbers. */ #define __NR_restart_syscall 0 #define __NR_exit 1 #define __NR_fork 2 #define __NR_read 3 #define __NR_write 4 #define __NR_open 5 #define __NR_close 6 #define __NR_waitpid 7 #define __NR_creat 8 #define __NR_link 9 #define __NR_unlink 10 #define __NR_execve 11 #define __NR_chdir 12 #define __NR_time 13 #define __NR_mknod 14 #define __NR_chmod 15 #define __NR_lchown 16 #define __NR_break 17 #define __NR_oldstat 18 #define __NR_lseek 19 #define __NR_getpid 20 #define __NR_mount 21 #define __NR_umount 22 #define __NR_setuid 23 #define __NR_getuid 24 #define __NR_stime 25 #define __NR_ptrace 26 #define __NR_alarm 27 #define __NR_oldfstat 28 #define __NR_pause 29 #define __NR_utime 30 #define __NR_stty 31 #define __NR_gtty 32 #define __NR_access 33 #define __NR_nice 34 #define __NR_ftime 35 #define __NR_sync 36 #define __NR_kill 37 #define __NR_rename 38 #define __NR_mkdir 39 #define __NR_rmdir 40 #define __NR_dup 41 #define __NR_pipe 42 #define __NR_times 43 #define __NR_prof 44 #define __NR_brk 45 #define __NR_setgid 46 #define __NR_getgid 47 #define __NR_signal 48 #define __NR_geteuid 49 #define __NR_getegid 50 #define __NR_acct 51 #define __NR_umount2 52 #define __NR_lock 53 #define __NR_ioctl 54 #define __NR_fcntl 55 #define __NR_mpx 56 #define __NR_setpgid 57 #define __NR_ulimit 58 #define __NR_oldolduname 59 #define __NR_umask 60 #define __NR_chroot 61 #define __NR_ustat 62 #define __NR_dup2 63 #define __NR_getppid 64 #define __NR_getpgrp 65 #define __NR_setsid 66 #define __NR_sigaction 67 #define __NR_sgetmask 68 #define __NR_ssetmask 69 #define __NR_setreuid 70 #define __NR_setregid 71 #define __NR_sigsuspend 72 #define __NR_sigpending 73 #define __NR_sethostname 74 #define __NR_setrlimit 75 #define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ #define __NR_getrusage 77 #define __NR_gettimeofday 78 #define __NR_settimeofday 79 #define __NR_getgroups 80 #define __NR_setgroups 81 #define __NR_select 82 #define __NR_symlink 83 #define __NR_oldlstat 84 #define __NR_readlink 85 #define __NR_uselib 86 #define __NR_swapon 87 #define __NR_reboot 88 #define __NR_readdir 89 #define __NR_mmap 90 #define __NR_munmap 91 #define __NR_truncate 92 #define __NR_ftruncate 93 #define __NR_fchmod 94 #define __NR_fchown 95 #define __NR_getpriority 96 #define __NR_setpriority 97 #define __NR_profil 98 #define __NR_statfs 99 #define __NR_fstatfs 100 #define __NR_ioperm 101 #define __NR_socketcall 102 #define __NR_syslog 103 #define __NR_setitimer 104 #define __NR_getitimer 105 #define __NR_stat 106 #define __NR_lstat 107 #define __NR_fstat 108 #define __NR_olduname 109 #define __NR_iopl 110 #define __NR_vhangup 111 #define __NR_idle 112 #define __NR_vm86old 113 #define __NR_wait4 114 #define __NR_swapoff 115 #define __NR_sysinfo 116 #define __NR_ipc 117 #define __NR_fsync 118 #define __NR_sigreturn 119 #define __NR_clone 120 #define __NR_setdomainname 121 #define __NR_uname 122 #define __NR_modify_ldt 123 #define __NR_adjtimex 124 #define __NR_mprotect 125 #define __NR_sigprocmask 126 #define __NR_create_module 127 #define __NR_init_module 128 #define __NR_delete_module 129 #define __NR_get_kernel_syms 130 #define __NR_quotactl 131 #define __NR_getpgid 132 #define __NR_fchdir 133 #define __NR_bdflush 134 #define __NR_sysfs 135 #define __NR_personality 136 #define __NR_afs_syscall 137 /* Syscall for Andrew File System */ #define __NR_setfsuid 138 #define __NR_setfsgid 139 #define __NR__llseek 140 #define __NR_getdents 141 #define __NR__newselect 142 #define __NR_flock 143 #define __NR_msync 144 #define __NR_readv 145 #define __NR_writev 146 #define __NR_getsid 147 #define __NR_fdatasync 148 #define __NR__sysctl 149 #define __NR_mlock 150 #define __NR_munlock 151 #define __NR_mlockall 152 #define __NR_munlockall 153 #define __NR_sched_setparam 154 #define __NR_sched_getparam 155 #define __NR_sched_setscheduler 156 #define __NR_sched_getscheduler 157 #define __NR_sched_yield 158 #define __NR_sched_get_priority_max 159 #define __NR_sched_get_priority_min 160 #define __NR_sched_rr_get_interval 161 #define __NR_nanosleep 162 #define __NR_mremap 163 #define __NR_setresuid 164 #define __NR_getresuid 165 #define __NR_vm86 166 #define __NR_query_module 167 #define __NR_poll 168 #define __NR_nfsservctl 169 #define __NR_setresgid 170 #define __NR_getresgid 171 #define __NR_prctl 172 #define __NR_rt_sigreturn 173 #define __NR_rt_sigaction 174 #define __NR_rt_sigprocmask 175 #define __NR_rt_sigpending 176 #define __NR_rt_sigtimedwait 177 #define __NR_rt_sigqueueinfo 178 #define __NR_rt_sigsuspend 179 #define __NR_pread64 180 #define __NR_pwrite64 181 #define __NR_chown 182 #define __NR_getcwd 183 #define __NR_capget 184 #define __NR_capset 185 #define __NR_sigaltstack 186 #define __NR_sendfile 187 #define __NR_getpmsg 188 /* some people actually want streams */ #define __NR_putpmsg 189 /* some people actually want streams */ #define __NR_vfork 190 #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ #define __NR_mmap2 192 #define __NR_truncate64 193 #define __NR_ftruncate64 194 #define __NR_stat64 195 #define __NR_lstat64 196 #define __NR_fstat64 197 #define __NR_lchown32 198 #define __NR_getuid32 199 #define __NR_getgid32 200 #define __NR_geteuid32 201 #define __NR_getegid32 202 #define __NR_setreuid32 203 #define __NR_setregid32 204 #define __NR_getgroups32 205 #define __NR_setgroups32 206 #define __NR_fchown32 207 #define __NR_setresuid32 208 #define __NR_getresuid32 209 #define __NR_setresgid32 210 #define __NR_getresgid32 211 #define __NR_chown32 212 #define __NR_setuid32 213 #define __NR_setgid32 214 #define __NR_setfsuid32 215 #define __NR_setfsgid32 216 #define __NR_pivot_root 217 #define __NR_mincore 218 #define __NR_madvise 219 #define __NR_madvise1 219 /* delete when C lib stub is removed */ #define __NR_getdents64 220 #define __NR_fcntl64 221 /* 223 is unused */ #define __NR_gettid 224 #define __NR_readahead 225 #define __NR_setxattr 226 #define __NR_lsetxattr 227 #define __NR_fsetxattr 228 #define __NR_getxattr 229 #define __NR_lgetxattr 230 #define __NR_fgetxattr 231 #define __NR_listxattr 232 #define __NR_llistxattr 233 #define __NR_flistxattr 234 #define __NR_removexattr 235 #define __NR_lremovexattr 236 #define __NR_fremovexattr 237 #define __NR_tkill 238 #define __NR_sendfile64 239 #define __NR_futex 240 #define __NR_sched_setaffinity 241 #define __NR_sched_getaffinity 242 #define __NR_set_thread_area 243 #define __NR_get_thread_area 244 #define __NR_io_setup 245 #define __NR_io_destroy 246 #define __NR_io_getevents 247 #define __NR_io_submit 248 #define __NR_io_cancel 249 #define __NR_fadvise64 250 /* 251 is available for reuse (was briefly sys_set_zone_reclaim) */ #define __NR_exit_group 252 #define __NR_lookup_dcookie 253 #define __NR_epoll_create 254 #define __NR_epoll_ctl 255 #define __NR_epoll_wait 256 #define __NR_remap_file_pages 257 #define __NR_set_tid_address 258 #define __NR_timer_create 259 #define __NR_timer_settime (__NR_timer_create+1) #define __NR_timer_gettime (__NR_timer_create+2) #define __NR_timer_getoverrun (__NR_timer_create+3) #define __NR_timer_delete (__NR_timer_create+4) #define __NR_clock_settime (__NR_timer_create+5) #define __NR_clock_gettime (__NR_timer_create+6) #define __NR_clock_getres (__NR_timer_create+7) #define __NR_clock_nanosleep (__NR_timer_create+8) #define __NR_statfs64 268 #define __NR_fstatfs64 269 #define __NR_tgkill 270 #define __NR_utimes 271 #define __NR_fadvise64_64 272 #define __NR_vserver 273 #define __NR_mbind 274 #define __NR_get_mempolicy 275 #define __NR_set_mempolicy 276 #define __NR_mq_open 277 #define __NR_mq_unlink (__NR_mq_open+1) #define __NR_mq_timedsend (__NR_mq_open+2) #define __NR_mq_timedreceive (__NR_mq_open+3) #define __NR_mq_notify (__NR_mq_open+4) #define __NR_mq_getsetattr (__NR_mq_open+5) #define __NR_kexec_load 283 #define __NR_waitid 284 /* #define __NR_sys_setaltroot 285 */ #define __NR_add_key 286 #define __NR_request_key 287 #define __NR_keyctl 288 #define __NR_ioprio_set 289 #define __NR_ioprio_get 290 #define __NR_inotify_init 291 #define __NR_inotify_add_watch 292 #define __NR_inotify_rm_watch 293 #define __NR_migrate_pages 294 #define __NR_openat 295 #define __NR_mkdirat 296 #define __NR_mknodat 297 #define __NR_fchownat 298 #define __NR_futimesat 299 #define __NR_fstatat64 300 #define __NR_unlinkat 301 #define __NR_renameat 302 #define __NR_linkat 303 #define __NR_symlinkat 304 #define __NR_readlinkat 305 #define __NR_fchmodat 306 #define __NR_faccessat 307 #define __NR_pselect6 308 #define __NR_ppoll 309 #define __NR_unshare 310 #define __NR_set_robust_list 311 #define __NR_get_robust_list 312 #define __NR_splice 313 #define __NR_sync_file_range 314 #define __NR_tee 315 #define __NR_vmsplice 316 #define __NR_move_pages 317 #ifdef __KERNEL__ #define NR_syscalls 318 /* * user-visible error numbers are in the range -1 - -128: see * */ #define __syscall_return(type, res) \ do { \ if ((unsigned long)(res) >= (unsigned long)(-(128 + 1))) { \ errno = -(res); \ res = -1; \ } \ return (type) (res); \ } while (0) /* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */ #define _syscall0(type,name) \ type name(void) \ { \ long __res; \ __asm__ volatile ("int $0x80" \ : "=a" (__res) \ : "0" (__NR_##name)); \ __syscall_return(type,__res); \ } #define _syscall1(type,name,type1,arg1) \ type name(type1 arg1) \ { \ long __res; \ __asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ : "=a" (__res) \ : "0" (__NR_##name),"ri" ((long)(arg1)) : "memory"); \ __syscall_return(type,__res); \ } #define _syscall2(type,name,type1,arg1,type2,arg2) \ type name(type1 arg1,type2 arg2) \ { \ long __res; \ __asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ : "=a" (__res) \ : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)) \ : "memory"); \ __syscall_return(type,__res); \ } #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ type name(type1 arg1,type2 arg2,type3 arg3) \ { \ long __res; \ __asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ : "=a" (__res) \ : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ "d" ((long)(arg3)) : "memory"); \ __syscall_return(type,__res); \ } #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ { \ long __res; \ __asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ : "=a" (__res) \ : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \ __syscall_return(type,__res); \ } #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5) \ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ { \ long __res; \ __asm__ volatile ("push %%ebx ; movl %2,%%ebx ; movl %1,%%eax ; " \ "int $0x80 ; pop %%ebx" \ : "=a" (__res) \ : "i" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \ : "memory"); \ __syscall_return(type,__res); \ } #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5,type6,arg6) \ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \ { \ long __res; \ struct { long __a1; long __a6; } __s = { (long)arg1, (long)arg6 }; \ __asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \ "movl 0(%2),%%ebx ; movl %1,%%eax ; int $0x80 ; " \ "pop %%ebx ; pop %%ebp" \ : "=a" (__res) \ : "i" (__NR_##name),"0" ((long)(&__s)),"c" ((long)(arg2)), \ "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \ : "memory"); \ __syscall_return(type,__res); \ } #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_SGETMASK #define __ARCH_WANT_SYS_SIGNAL #define __ARCH_WANT_SYS_TIME #define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_WAITPID #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP #define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_OLD_GETRLIMIT #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND #ifdef __KERNEL_SYSCALLS__ #include #include #include #include /* * we need this inline - forking from kernel space will result * in NO COPY ON WRITE (!!!), until an execve is executed. This * is no problem, but for the stack. This is handled by not letting * main() use the stack at all after fork(). Thus, no function * calls - which means inline code for fork too, as otherwise we * would use the stack upon exit from 'fork()'. * * Actually only pause and fork are needed inline, so that there * won't be any messing with the stack from main(), but we define * some others too. */ static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp) asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount); asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); asmlinkage int sys_execve(struct pt_regs regs); asmlinkage int sys_clone(struct pt_regs regs); asmlinkage int sys_fork(struct pt_regs regs); asmlinkage int sys_vfork(struct pt_regs regs); asmlinkage int sys_pipe(unsigned long __user *fildes); asmlinkage long sys_iopl(unsigned long unused); struct sigaction; asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act, struct sigaction __user *oact, size_t sigsetsize); #endif /* __KERNEL_SYSCALLS__ */ /* * "Conditional" syscalls * * What we want is __attribute__((weak,alias("sys_ni_syscall"))), * but it doesn't work on all toolchains, so we just do it by hand */ #ifndef cond_syscall #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") #endif #endif /* __KERNEL__ */ #endif /* _ASM_I386_UNISTD_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/unwind.h000066400000000000000000000053301314037446600242460ustar00rootroot00000000000000#ifndef _ASM_I386_UNWIND_H #define _ASM_I386_UNWIND_H /* * Copyright (C) 2002-2006 Novell, Inc. * Jan Beulich * This code is released under version 2 of the GNU GPL. */ #ifdef CONFIG_STACK_UNWIND #include #include #include #include struct unwind_frame_info { struct pt_regs regs; struct task_struct *task; }; #define UNW_PC(frame) (frame)->regs.eip #define UNW_SP(frame) (frame)->regs.esp #ifdef CONFIG_FRAME_POINTER #define UNW_FP(frame) (frame)->regs.ebp #define FRAME_RETADDR_OFFSET 4 #define FRAME_LINK_OFFSET 0 #define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0) #define STACK_TOP(tsk) ((tsk)->thread.esp0) #endif #define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) #define UNW_REGISTER_INFO \ PTREGS_INFO(eax), \ PTREGS_INFO(ecx), \ PTREGS_INFO(edx), \ PTREGS_INFO(ebx), \ PTREGS_INFO(esp), \ PTREGS_INFO(ebp), \ PTREGS_INFO(esi), \ PTREGS_INFO(edi), \ PTREGS_INFO(eip) static inline void arch_unw_init_frame_info(struct unwind_frame_info *info, /*const*/ struct pt_regs *regs) { if (user_mode_vm(regs)) info->regs = *regs; else { memcpy(&info->regs, regs, offsetof(struct pt_regs, esp)); info->regs.esp = (unsigned long)®s->esp; info->regs.xss = __KERNEL_DS; } } static inline void arch_unw_init_blocked(struct unwind_frame_info *info) { memset(&info->regs, 0, sizeof(info->regs)); info->regs.eip = info->task->thread.eip; info->regs.xcs = __KERNEL_CS; __get_user(info->regs.ebp, (long *)info->task->thread.esp); info->regs.esp = info->task->thread.esp; info->regs.xss = __KERNEL_DS; info->regs.xds = __USER_DS; info->regs.xes = __USER_DS; } extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *, asmlinkage int (*callback)(struct unwind_frame_info *, void *arg), void *arg); static inline int arch_unw_user_mode(const struct unwind_frame_info *info) { #if 0 /* This can only work when selector register and EFLAGS saves/restores are properly annotated (and tracked in UNW_REGISTER_INFO). */ return user_mode_vm(&info->regs); #else return info->regs.eip < PAGE_OFFSET || (info->regs.eip >= __fix_to_virt(FIX_VDSO) && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE) || info->regs.esp < PAGE_OFFSET; #endif } #else #define UNW_PC(frame) ((void)(frame), 0) #define UNW_SP(frame) ((void)(frame), 0) static inline int arch_unw_user_mode(const void *info) { return 0; } #endif #endif /* _ASM_I386_UNWIND_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/user.h000066400000000000000000000114371314037446600237250ustar00rootroot00000000000000#ifndef _I386_USER_H #define _I386_USER_H #include /* Core file format: The core file is written in such a way that gdb can understand it and provide useful information to the user (under linux we use the 'trad-core' bfd). There are quite a number of obstacles to being able to view the contents of the floating point registers, and until these are solved you will not be able to view the contents of them. Actually, you can read in the core file and look at the contents of the user struct to find out what the floating point registers contain. The actual file contents are as follows: UPAGE: 1 page consisting of a user struct that tells gdb what is present in the file. Directly after this is a copy of the task_struct, which is currently not used by gdb, but it may come in useful at some point. All of the registers are stored as part of the upage. The upage should always be only one page. DATA: The data area is stored. We use current->end_text to current->brk to pick up all of the user variables, plus any memory that may have been malloced. No attempt is made to determine if a page is demand-zero or if a page is totally unused, we just cover the entire range. All of the addresses are rounded in such a way that an integral number of pages is written. STACK: We need the stack information in order to get a meaningful backtrace. We need to write the data from (esp) to current->start_stack, so we round each of these off in order to be able to write an integer number of pages. The minimum core file size is 3 pages, or 12288 bytes. */ /* * Pentium III FXSR, SSE support * Gareth Hughes , May 2000 * * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for * interacting with the FXSR-format floating point environment. Floating * point data can be accessed in the regular format in the usual manner, * and both the standard and SIMD floating point data can be accessed via * the new ptrace requests. In either case, changes to the FPU environment * will be reflected in the task's state as expected. */ struct user_i387_struct { long cwd; long swd; long twd; long fip; long fcs; long foo; long fos; long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ }; struct user_fxsr_struct { unsigned short cwd; unsigned short swd; unsigned short twd; unsigned short fop; long fip; long fcs; long foo; long fos; long mxcsr; long reserved; long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ long padding[56]; }; /* * This is the old layout of "struct pt_regs", and * is still the layout used by user mode (the new * pt_regs doesn't have all registers as the kernel * doesn't use the extra segment registers) */ struct user_regs_struct { long ebx, ecx, edx, esi, edi, ebp, eax; unsigned short ds, __ds, es, __es; unsigned short fs, __fs, gs, __gs; long orig_eax, eip; unsigned short cs, __cs; long eflags, esp; unsigned short ss, __ss; }; /* When the kernel dumps core, it starts by dumping the user struct - this will be used by gdb to figure out where the data and stack segments are within the file, and what virtual addresses to use. */ struct user{ /* We start with the registers, to mimic the way that "memory" is returned from the ptrace(3,...) function. */ struct user_regs_struct regs; /* Where the registers are actually stored */ /* ptrace does not yet supply these. Someday.... */ int u_fpvalid; /* True if math co-processor being used. */ /* for this mess. Not yet used. */ struct user_i387_struct i387; /* Math Co-processor registers. */ /* The rest of this junk is to help gdb figure out what goes where */ unsigned long int u_tsize; /* Text segment size (pages). */ unsigned long int u_dsize; /* Data segment size (pages). */ unsigned long int u_ssize; /* Stack segment size (pages). */ unsigned long start_code; /* Starting virtual address of text. */ unsigned long start_stack; /* Starting virtual address of stack area. This is actually the bottom of the stack, the top of the stack is always found in the esp register. */ long int signal; /* Signal that caused the core dump. */ int reserved; /* No longer used */ struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */ /* the registers. */ struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ int u_debugreg[8]; }; #define NBPG PAGE_SIZE #define UPAGES 1 #define HOST_TEXT_START_ADDR (u.start_code) #define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) #endif /* _I386_USER_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/vga.h000066400000000000000000000005731314037446600235230ustar00rootroot00000000000000/* * Access to VGA videoram * * (c) 1998 Martin Mares */ #ifndef _LINUX_ASM_VGA_H_ #define _LINUX_ASM_VGA_H_ /* * On the PC, we can just recalculate addresses and then * access the videoram directly without any black magic. */ #define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x) #define vga_readb(x) (*(x)) #define vga_writeb(x,y) (*(y) = (x)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/vic.h000066400000000000000000000036211314037446600235240ustar00rootroot00000000000000/* Copyright (C) 1999,2001 * * Author: J.E.J.Bottomley@HansenPartnership.com * * Standard include definitions for the NCR Voyager Interrupt Controller */ /* The eight CPI vectors. To activate a CPI, you write a bit mask * corresponding to the processor set to be interrupted into the * relevant register. That set of CPUs will then be interrupted with * the CPI */ static const int VIC_CPI_Registers[] = {0xFC00, 0xFC01, 0xFC08, 0xFC09, 0xFC10, 0xFC11, 0xFC18, 0xFC19 }; #define VIC_PROC_WHO_AM_I 0xfc29 # define QUAD_IDENTIFIER 0xC0 # define EIGHT_SLOT_IDENTIFIER 0xE0 #define QIC_EXTENDED_PROCESSOR_SELECT 0xFC72 #define VIC_CPI_BASE_REGISTER 0xFC41 #define VIC_PROCESSOR_ID 0xFC21 # define VIC_CPU_MASQUERADE_ENABLE 0x8 #define VIC_CLAIM_REGISTER_0 0xFC38 #define VIC_CLAIM_REGISTER_1 0xFC39 #define VIC_REDIRECT_REGISTER_0 0xFC60 #define VIC_REDIRECT_REGISTER_1 0xFC61 #define VIC_PRIORITY_REGISTER 0xFC20 #define VIC_PRIMARY_MC_BASE 0xFC48 #define VIC_SECONDARY_MC_BASE 0xFC49 #define QIC_PROCESSOR_ID 0xFC71 # define QIC_CPUID_ENABLE 0x08 #define QIC_VIC_CPI_BASE_REGISTER 0xFC79 #define QIC_CPI_BASE_REGISTER 0xFC7A #define QIC_MASK_REGISTER0 0xFC80 /* NOTE: these are masked high, enabled low */ # define QIC_PERF_TIMER 0x01 # define QIC_LPE 0x02 # define QIC_SYS_INT 0x04 # define QIC_CMN_INT 0x08 /* at the moment, just enable CMN_INT, disable SYS_INT */ # define QIC_DEFAULT_MASK0 (~(QIC_CMN_INT /* | VIC_SYS_INT */)) #define QIC_MASK_REGISTER1 0xFC81 # define QIC_BOOT_CPI_MASK 0xFE /* Enable CPI's 1-6 inclusive */ # define QIC_CPI_ENABLE 0x81 #define QIC_INTERRUPT_CLEAR0 0xFC8A #define QIC_INTERRUPT_CLEAR1 0xFC8B /* this is where we place the CPI vectors */ #define VIC_DEFAULT_CPI_BASE 0xC0 /* this is where we place the QIC CPI vectors */ #define QIC_DEFAULT_CPI_BASE 0xD0 #define VIC_BOOT_INTERRUPT_MASK 0xfe extern void smp_vic_timer_interrupt(struct pt_regs *regs); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/vm86.h000066400000000000000000000134421314037446600235450ustar00rootroot00000000000000#ifndef _LINUX_VM86_H #define _LINUX_VM86_H /* * I'm guessing at the VIF/VIP flag usage, but hope that this is how * the Pentium uses them. Linux will return from vm86 mode when both * VIF and VIP is set. * * On a Pentium, we could probably optimize the virtual flags directly * in the eflags register instead of doing it "by hand" in vflags... * * Linus */ #define TF_MASK 0x00000100 #define IF_MASK 0x00000200 #define IOPL_MASK 0x00003000 #define NT_MASK 0x00004000 #ifdef CONFIG_VM86 #define VM_MASK 0x00020000 #else #define VM_MASK 0 /* ignored */ #endif #define AC_MASK 0x00040000 #define VIF_MASK 0x00080000 /* virtual interrupt flag */ #define VIP_MASK 0x00100000 /* virtual interrupt pending */ #define ID_MASK 0x00200000 #define BIOSSEG 0x0f000 #define CPU_086 0 #define CPU_186 1 #define CPU_286 2 #define CPU_386 3 #define CPU_486 4 #define CPU_586 5 /* * Return values for the 'vm86()' system call */ #define VM86_TYPE(retval) ((retval) & 0xff) #define VM86_ARG(retval) ((retval) >> 8) #define VM86_SIGNAL 0 /* return due to signal */ #define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */ #define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ #define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */ /* * Additional return values when invoking new vm86() */ #define VM86_PICRETURN 4 /* return due to pending PIC request */ #define VM86_TRAP 6 /* return due to DOS-debugger request */ /* * function codes when invoking new vm86() */ #define VM86_PLUS_INSTALL_CHECK 0 #define VM86_ENTER 1 #define VM86_ENTER_NO_BYPASS 2 #define VM86_REQUEST_IRQ 3 #define VM86_FREE_IRQ 4 #define VM86_GET_IRQ_BITS 5 #define VM86_GET_AND_RESET_IRQ 6 /* * This is the stack-layout seen by the user space program when we have * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout * is 'kernel_vm86_regs' (see below). */ struct vm86_regs { /* * normal regs, with special meaning for the segment descriptors.. */ long ebx; long ecx; long edx; long esi; long edi; long ebp; long eax; long __null_ds; long __null_es; long __null_fs; long __null_gs; long orig_eax; long eip; unsigned short cs, __csh; long eflags; long esp; unsigned short ss, __ssh; /* * these are specific to v86 mode: */ unsigned short es, __esh; unsigned short ds, __dsh; unsigned short fs, __fsh; unsigned short gs, __gsh; }; struct revectored_struct { unsigned long __map[8]; /* 256 bits */ }; struct vm86_struct { struct vm86_regs regs; unsigned long flags; unsigned long screen_bitmap; unsigned long cpu_type; struct revectored_struct int_revectored; struct revectored_struct int21_revectored; }; /* * flags masks */ #define VM86_SCREEN_BITMAP 0x0001 struct vm86plus_info_struct { unsigned long force_return_for_pic:1; unsigned long vm86dbg_active:1; /* for debugger */ unsigned long vm86dbg_TFpendig:1; /* for debugger */ unsigned long unused:28; unsigned long is_vm86pus:1; /* for vm86 internal use */ unsigned char vm86dbg_intxxtab[32]; /* for debugger */ }; struct vm86plus_struct { struct vm86_regs regs; unsigned long flags; unsigned long screen_bitmap; unsigned long cpu_type; struct revectored_struct int_revectored; struct revectored_struct int21_revectored; struct vm86plus_info_struct vm86plus; }; #ifdef __KERNEL__ /* * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86 * mode - the main change is that the old segment descriptors aren't * useful any more and are forced to be zero by the kernel (and the * hardware when a trap occurs), and the real segment descriptors are * at the end of the structure. Look at ptrace.h to see the "normal" * setup. For user space layout see 'struct vm86_regs' above. */ struct kernel_vm86_regs { /* * normal regs, with special meaning for the segment descriptors.. */ long ebx; long ecx; long edx; long esi; long edi; long ebp; long eax; long __null_ds; long __null_es; long orig_eax; long eip; unsigned short cs, __csh; long eflags; long esp; unsigned short ss, __ssh; /* * these are specific to v86 mode: */ unsigned short es, __esh; unsigned short ds, __dsh; unsigned short fs, __fsh; unsigned short gs, __gsh; }; struct kernel_vm86_struct { struct kernel_vm86_regs regs; /* * the below part remains on the kernel stack while we are in VM86 mode. * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above * 'struct kernel_vm86_regs' with the then actual values. * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct' * in kernelspace, hence we need not reget the data from userspace. */ #define VM86_TSS_ESP0 flags unsigned long flags; unsigned long screen_bitmap; unsigned long cpu_type; struct revectored_struct int_revectored; struct revectored_struct int21_revectored; struct vm86plus_info_struct vm86plus; struct pt_regs *regs32; /* here we save the pointer to the old regs */ /* * The below is not part of the structure, but the stack layout continues * this way. In front of 'return-eip' may be some data, depending on * compilation, so we don't rely on this and save the pointer to 'oldregs' * in 'regs32' above. * However, with GCC-2.7.2 and the current CFLAGS you see exactly this: long return-eip; from call to vm86() struct pt_regs oldregs; user space registers as saved by syscall */ }; #ifdef CONFIG_VM86 void handle_vm86_fault(struct kernel_vm86_regs *, long); int handle_vm86_trap(struct kernel_vm86_regs *, long, int); struct task_struct; void release_vm86_irqs(struct task_struct *); #else #define handle_vm86_fault(a, b) #define release_vm86_irqs(a) static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) { return 0; } #endif /* CONFIG_VM86 */ #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/voyager.h000066400000000000000000000414561314037446600244270ustar00rootroot00000000000000/* Copyright (C) 1999,2001 * * Author: J.E.J.Bottomley@HansenPartnership.com * * Standard include definitions for the NCR Voyager system */ #undef VOYAGER_DEBUG #undef VOYAGER_CAT_DEBUG #ifdef VOYAGER_DEBUG #define VDEBUG(x) printk x #else #define VDEBUG(x) #endif /* There are three levels of voyager machine: 3,4 and 5. The rule is * if it's less than 3435 it's a Level 3 except for a 3360 which is * a level 4. A 3435 or above is a Level 5 */ #define VOYAGER_LEVEL5_AND_ABOVE 0x3435 #define VOYAGER_LEVEL4 0x3360 /* The L4 DINO ASIC */ #define VOYAGER_DINO 0x43 /* voyager ports in standard I/O space */ #define VOYAGER_MC_SETUP 0x96 #define VOYAGER_CAT_CONFIG_PORT 0x97 # define VOYAGER_CAT_DESELECT 0xff #define VOYAGER_SSPB_RELOCATION_PORT 0x98 /* Valid CAT controller commands */ /* start instruction register cycle */ #define VOYAGER_CAT_IRCYC 0x01 /* start data register cycle */ #define VOYAGER_CAT_DRCYC 0x02 /* move to execute state */ #define VOYAGER_CAT_RUN 0x0F /* end operation */ #define VOYAGER_CAT_END 0x80 /* hold in idle state */ #define VOYAGER_CAT_HOLD 0x90 /* single step an "intest" vector */ #define VOYAGER_CAT_STEP 0xE0 /* return cat controller to CLEMSON mode */ #define VOYAGER_CAT_CLEMSON 0xFF /* the default cat command header */ #define VOYAGER_CAT_HEADER 0x7F /* the range of possible CAT module ids in the system */ #define VOYAGER_MIN_MODULE 0x10 #define VOYAGER_MAX_MODULE 0x1f /* The voyager registers per asic */ #define VOYAGER_ASIC_ID_REG 0x00 #define VOYAGER_ASIC_TYPE_REG 0x01 /* the sub address registers can be made auto incrementing on reads */ #define VOYAGER_AUTO_INC_REG 0x02 # define VOYAGER_AUTO_INC 0x04 # define VOYAGER_NO_AUTO_INC 0xfb #define VOYAGER_SUBADDRDATA 0x03 #define VOYAGER_SCANPATH 0x05 # define VOYAGER_CONNECT_ASIC 0x01 # define VOYAGER_DISCONNECT_ASIC 0xfe #define VOYAGER_SUBADDRLO 0x06 #define VOYAGER_SUBADDRHI 0x07 #define VOYAGER_SUBMODSELECT 0x08 #define VOYAGER_SUBMODPRESENT 0x09 #define VOYAGER_SUBADDR_LO 0xff #define VOYAGER_SUBADDR_HI 0xffff /* the maximum size of a scan path -- used to form instructions */ #define VOYAGER_MAX_SCAN_PATH 0x100 /* the biggest possible register size (in bytes) */ #define VOYAGER_MAX_REG_SIZE 4 /* Total number of possible modules (including submodules) */ #define VOYAGER_MAX_MODULES 16 /* Largest number of asics per module */ #define VOYAGER_MAX_ASICS_PER_MODULE 7 /* the CAT asic of each module is always the first one */ #define VOYAGER_CAT_ID 0 #define VOYAGER_PSI 0x1a /* voyager instruction operations and registers */ #define VOYAGER_READ_CONFIG 0x1 #define VOYAGER_WRITE_CONFIG 0x2 #define VOYAGER_BYPASS 0xff typedef struct voyager_asic { __u8 asic_addr; /* ASIC address; Level 4 */ __u8 asic_type; /* ASIC type */ __u8 asic_id; /* ASIC id */ __u8 jtag_id[4]; /* JTAG id */ __u8 asic_location; /* Location within scan path; start w/ 0 */ __u8 bit_location; /* Location within bit stream; start w/ 0 */ __u8 ireg_length; /* Instruction register length */ __u16 subaddr; /* Amount of sub address space */ struct voyager_asic *next; /* Next asic in linked list */ } voyager_asic_t; typedef struct voyager_module { __u8 module_addr; /* Module address */ __u8 scan_path_connected; /* Scan path connected */ __u16 ee_size; /* Size of the EEPROM */ __u16 num_asics; /* Number of Asics */ __u16 inst_bits; /* Instruction bits in the scan path */ __u16 largest_reg; /* Largest register in the scan path */ __u16 smallest_reg; /* Smallest register in the scan path */ voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */ struct voyager_module *submodule; /* Submodule pointer */ struct voyager_module *next; /* Next module in linked list */ } voyager_module_t; typedef struct voyager_eeprom_hdr { __u8 module_id[4] __attribute__((packed)); __u8 version_id __attribute__((packed)); __u8 config_id __attribute__((packed)); __u16 boundry_id __attribute__((packed)); /* boundary scan id */ __u16 ee_size __attribute__((packed)); /* size of EEPROM */ __u8 assembly[11] __attribute__((packed)); /* assembly # */ __u8 assembly_rev __attribute__((packed)); /* assembly rev */ __u8 tracer[4] __attribute__((packed)); /* tracer number */ __u16 assembly_cksum __attribute__((packed)); /* asm checksum */ __u16 power_consump __attribute__((packed)); /* pwr requirements */ __u16 num_asics __attribute__((packed)); /* number of asics */ __u16 bist_time __attribute__((packed)); /* min. bist time */ __u16 err_log_offset __attribute__((packed)); /* error log offset */ __u16 scan_path_offset __attribute__((packed));/* scan path offset */ __u16 cct_offset __attribute__((packed)); __u16 log_length __attribute__((packed)); /* length of err log */ __u16 xsum_end __attribute__((packed)); /* offset to end of checksum */ __u8 reserved[4] __attribute__((packed)); __u8 sflag __attribute__((packed)); /* starting sentinal */ __u8 part_number[13] __attribute__((packed)); /* prom part number */ __u8 version[10] __attribute__((packed)); /* version number */ __u8 signature[8] __attribute__((packed)); __u16 eeprom_chksum __attribute__((packed)); __u32 data_stamp_offset __attribute__((packed)); __u8 eflag __attribute__((packed)); /* ending sentinal */ } voyager_eprom_hdr_t; #define VOYAGER_EPROM_SIZE_OFFSET ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size))) #define VOYAGER_XSUM_END_OFFSET 0x2a /* the following three definitions are for internal table layouts * in the module EPROMs. We really only care about the IDs and * offsets */ typedef struct voyager_sp_table { __u8 asic_id __attribute__((packed)); __u8 bypass_flag __attribute__((packed)); __u16 asic_data_offset __attribute__((packed)); __u16 config_data_offset __attribute__((packed)); } voyager_sp_table_t; typedef struct voyager_jtag_table { __u8 icode[4] __attribute__((packed)); __u8 runbist[4] __attribute__((packed)); __u8 intest[4] __attribute__((packed)); __u8 samp_preld[4] __attribute__((packed)); __u8 ireg_len __attribute__((packed)); } voyager_jtt_t; typedef struct voyager_asic_data_table { __u8 jtag_id[4] __attribute__((packed)); __u16 length_bsr __attribute__((packed)); __u16 length_bist_reg __attribute__((packed)); __u32 bist_clk __attribute__((packed)); __u16 subaddr_bits __attribute__((packed)); __u16 seed_bits __attribute__((packed)); __u16 sig_bits __attribute__((packed)); __u16 jtag_offset __attribute__((packed)); } voyager_at_t; /* Voyager Interrupt Controller (VIC) registers */ /* Base to add to Cross Processor Interrupts (CPIs) when triggering * the CPU IRQ line */ /* register defines for the WCBICs (one per processor) */ #define VOYAGER_WCBIC0 0x41 /* bus A node P1 processor 0 */ #define VOYAGER_WCBIC1 0x49 /* bus A node P1 processor 1 */ #define VOYAGER_WCBIC2 0x51 /* bus A node P2 processor 0 */ #define VOYAGER_WCBIC3 0x59 /* bus A node P2 processor 1 */ #define VOYAGER_WCBIC4 0x61 /* bus B node P1 processor 0 */ #define VOYAGER_WCBIC5 0x69 /* bus B node P1 processor 1 */ #define VOYAGER_WCBIC6 0x71 /* bus B node P2 processor 0 */ #define VOYAGER_WCBIC7 0x79 /* bus B node P2 processor 1 */ /* top of memory registers */ #define VOYAGER_WCBIC_TOM_L 0x4 #define VOYAGER_WCBIC_TOM_H 0x5 /* register defines for Voyager Memory Contol (VMC) * these are present on L4 machines only */ #define VOYAGER_VMC1 0x81 #define VOYAGER_VMC2 0x91 #define VOYAGER_VMC3 0xa1 #define VOYAGER_VMC4 0xb1 /* VMC Ports */ #define VOYAGER_VMC_MEMORY_SETUP 0x9 # define VMC_Interleaving 0x01 # define VMC_4Way 0x02 # define VMC_EvenCacheLines 0x04 # define VMC_HighLine 0x08 # define VMC_Start0_Enable 0x20 # define VMC_Start1_Enable 0x40 # define VMC_Vremap 0x80 #define VOYAGER_VMC_BANK_DENSITY 0xa # define VMC_BANK_EMPTY 0 # define VMC_BANK_4MB 1 # define VMC_BANK_16MB 2 # define VMC_BANK_64MB 3 # define VMC_BANK0_MASK 0x03 # define VMC_BANK1_MASK 0x0C # define VMC_BANK2_MASK 0x30 # define VMC_BANK3_MASK 0xC0 /* Magellan Memory Controller (MMC) defines - present on L5 */ #define VOYAGER_MMC_ASIC_ID 1 /* the two memory modules corresponding to memory cards in the system */ #define VOYAGER_MMC_MEMORY0_MODULE 0x14 #define VOYAGER_MMC_MEMORY1_MODULE 0x15 /* the Magellan Memory Address (MMA) defines */ #define VOYAGER_MMA_ASIC_ID 2 /* Submodule number for the Quad Baseboard */ #define VOYAGER_QUAD_BASEBOARD 1 /* ASIC defines for the Quad Baseboard */ #define VOYAGER_QUAD_QDATA0 1 #define VOYAGER_QUAD_QDATA1 2 #define VOYAGER_QUAD_QABC 3 /* Useful areas in extended CMOS */ #define VOYAGER_PROCESSOR_PRESENT_MASK 0x88a #define VOYAGER_MEMORY_CLICKMAP 0xa23 #define VOYAGER_DUMP_LOCATION 0xb1a /* SUS In Control bit - used to tell SUS that we don't need to be * babysat anymore */ #define VOYAGER_SUS_IN_CONTROL_PORT 0x3ff # define VOYAGER_IN_CONTROL_FLAG 0x80 /* Voyager PSI defines */ #define VOYAGER_PSI_STATUS_REG 0x08 # define PSI_DC_FAIL 0x01 # define PSI_MON 0x02 # define PSI_FAULT 0x04 # define PSI_ALARM 0x08 # define PSI_CURRENT 0x10 # define PSI_DVM 0x20 # define PSI_PSCFAULT 0x40 # define PSI_STAT_CHG 0x80 #define VOYAGER_PSI_SUPPLY_REG 0x8000 /* read */ # define PSI_FAIL_DC 0x01 # define PSI_FAIL_AC 0x02 # define PSI_MON_INT 0x04 # define PSI_SWITCH_OFF 0x08 # define PSI_HX_OFF 0x10 # define PSI_SECURITY 0x20 # define PSI_CMOS_BATT_LOW 0x40 # define PSI_CMOS_BATT_FAIL 0x80 /* write */ # define PSI_CLR_SWITCH_OFF 0x13 # define PSI_CLR_HX_OFF 0x14 # define PSI_CLR_CMOS_BATT_FAIL 0x17 #define VOYAGER_PSI_MASK 0x8001 # define PSI_MASK_MASK 0x10 #define VOYAGER_PSI_AC_FAIL_REG 0x8004 #define AC_FAIL_STAT_CHANGE 0x80 #define VOYAGER_PSI_GENERAL_REG 0x8007 /* read */ # define PSI_SWITCH_ON 0x01 # define PSI_SWITCH_ENABLED 0x02 # define PSI_ALARM_ENABLED 0x08 # define PSI_SECURE_ENABLED 0x10 # define PSI_COLD_RESET 0x20 # define PSI_COLD_START 0x80 /* write */ # define PSI_POWER_DOWN 0x10 # define PSI_SWITCH_DISABLE 0x01 # define PSI_SWITCH_ENABLE 0x11 # define PSI_CLEAR 0x12 # define PSI_ALARM_DISABLE 0x03 # define PSI_ALARM_ENABLE 0x13 # define PSI_CLEAR_COLD_RESET 0x05 # define PSI_SET_COLD_RESET 0x15 # define PSI_CLEAR_COLD_START 0x07 # define PSI_SET_COLD_START 0x17 struct voyager_bios_info { __u8 len; __u8 major; __u8 minor; __u8 debug; __u8 num_classes; __u8 class_1; __u8 class_2; }; /* The following structures and definitions are for the Kernel/SUS * interface these are needed to find out how SUS initialised any Quad * boards in the system */ #define NUMBER_OF_MC_BUSSES 2 #define SLOTS_PER_MC_BUS 8 #define MAX_CPUS 16 /* 16 way CPU system */ #define MAX_PROCESSOR_BOARDS 4 /* 4 processor slot system */ #define MAX_CACHE_LEVELS 4 /* # of cache levels supported */ #define MAX_SHARED_CPUS 4 /* # of CPUs that can share a LARC */ #define NUMBER_OF_POS_REGS 8 typedef struct { __u8 MC_Slot __attribute__((packed)); __u8 POS_Values[NUMBER_OF_POS_REGS] __attribute__((packed)); } MC_SlotInformation_t; struct QuadDescription { __u8 Type __attribute__((packed)); /* for type 0 (DYADIC or MONADIC) all fields * will be zero except for slot */ __u8 StructureVersion __attribute__((packed)); __u32 CPI_BaseAddress __attribute__((packed)); __u32 LARC_BankSize __attribute__((packed)); __u32 LocalMemoryStateBits __attribute__((packed)); __u8 Slot __attribute__((packed)); /* Processor slots 1 - 4 */ }; struct ProcBoardInfo { __u8 Type __attribute__((packed)); __u8 StructureVersion __attribute__((packed)); __u8 NumberOfBoards __attribute__((packed)); struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS] __attribute__((packed)); }; struct CacheDescription { __u8 Level __attribute__((packed)); __u32 TotalSize __attribute__((packed)); __u16 LineSize __attribute__((packed)); __u8 Associativity __attribute__((packed)); __u8 CacheType __attribute__((packed)); __u8 WriteType __attribute__((packed)); __u8 Number_CPUs_SharedBy __attribute__((packed)); __u8 Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS] __attribute__((packed)); }; struct CPU_Description { __u8 CPU_HardwareId __attribute__((packed)); char *FRU_String __attribute__((packed)); __u8 NumberOfCacheLevels __attribute__((packed)); struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS] __attribute__((packed)); }; struct CPU_Info { __u8 Type __attribute__((packed)); __u8 StructureVersion __attribute__((packed)); __u8 NumberOf_CPUs __attribute__((packed)); struct CPU_Description CPU_Data[MAX_CPUS] __attribute__((packed)); }; /* * This structure will be used by SUS and the OS. * The assumption about this structure is that no blank space is * packed in it by our friend the compiler. */ typedef struct { __u8 Mailbox_SUS; /* Written to by SUS to give commands/response to the OS */ __u8 Mailbox_OS; /* Written to by the OS to give commands/response to SUS */ __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the interface SUS supports */ __u8 OS_MailboxVersion; /* Tells SUS which iteration of the interface the OS supports */ __u32 OS_Flags; /* Flags set by the OS as info for SUS */ __u32 SUS_Flags; /* Flags set by SUS as info for the OS */ __u32 WatchDogPeriod; /* Watchdog period (in seconds) which the DP uses to see if the OS is dead */ __u32 WatchDogCount; /* Updated by the OS on every tic. */ __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS where to stuff the SUS error log on a dump */ MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; /* Storage for MCA POS data */ /* All new SECOND_PASS_INTERFACE fields added from this point */ struct ProcBoardInfo *BoardData; struct CPU_Info *CPU_Data; /* All new fields must be added from this point */ } Voyager_KernelSUS_Mbox_t; /* structure for finding the right memory address to send a QIC CPI to */ struct voyager_qic_cpi { /* Each cache line (32 bytes) can trigger a cpi. The cpi * read/write may occur anywhere in the cache line---pick the * middle to be safe */ struct { __u32 pad1[3]; __u32 cpi; __u32 pad2[4]; } qic_cpi[8]; }; struct voyager_status { __u32 power_fail:1; __u32 switch_off:1; __u32 request_from_kernel:1; }; struct voyager_psi_regs { __u8 cat_id; __u8 cat_dev; __u8 cat_control; __u8 subaddr; __u8 dummy4; __u8 checkbit; __u8 subaddr_low; __u8 subaddr_high; __u8 intstatus; __u8 stat1; __u8 stat3; __u8 fault; __u8 tms; __u8 gen; __u8 sysconf; __u8 dummy15; }; struct voyager_psi_subregs { __u8 supply; __u8 mask; __u8 present; __u8 DCfail; __u8 ACfail; __u8 fail; __u8 UPSfail; __u8 genstatus; }; struct voyager_psi { struct voyager_psi_regs regs; struct voyager_psi_subregs subregs; }; struct voyager_SUS { #define VOYAGER_DUMP_BUTTON_NMI 0x1 #define VOYAGER_SUS_VALID 0x2 #define VOYAGER_SYSINT_COMPLETE 0x3 __u8 SUS_mbox; #define VOYAGER_NO_COMMAND 0x0 #define VOYAGER_IGNORE_DUMP 0x1 #define VOYAGER_DO_DUMP 0x2 #define VOYAGER_SYSINT_HANDSHAKE 0x3 #define VOYAGER_DO_MEM_DUMP 0x4 #define VOYAGER_SYSINT_WAS_RECOVERED 0x5 __u8 kernel_mbox; #define VOYAGER_MAILBOX_VERSION 0x10 __u8 SUS_version; __u8 kernel_version; #define VOYAGER_OS_HAS_SYSINT 0x1 #define VOYAGER_OS_IN_PROGRESS 0x2 #define VOYAGER_UPDATING_WDPERIOD 0x4 __u32 kernel_flags; #define VOYAGER_SUS_BOOTING 0x1 #define VOYAGER_SUS_IN_PROGRESS 0x2 __u32 SUS_flags; __u32 watchdog_period; __u32 watchdog_count; __u32 SUS_errorlog; /* lots of system configuration stuff under here */ }; /* Variables exported by voyager_smp */ extern __u32 voyager_extended_vic_processors; extern __u32 voyager_allowed_boot_processors; extern __u32 voyager_quad_processors; extern struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS]; extern struct voyager_SUS *voyager_SUS; /* variables exported always */ extern int voyager_level; extern int kvoyagerd_running; extern struct semaphore kvoyagerd_sem; extern struct voyager_status voyager_status; /* functions exported by the voyager and voyager_smp modules */ extern int voyager_cat_readb(__u8 module, __u8 asic, int reg); extern void voyager_cat_init(void); extern void voyager_detect(struct voyager_bios_info *); extern void voyager_trap_init(void); extern void voyager_setup_irqs(void); extern int voyager_memory_detect(int region, __u32 *addr, __u32 *length); extern void voyager_smp_intr_init(void); extern __u8 voyager_extended_cmos_read(__u16 cmos_address); extern void voyager_smp_dump(void); extern void voyager_timer_interrupt(struct pt_regs *regs); extern void smp_local_timer_interrupt(struct pt_regs * regs); extern void voyager_power_off(void); extern void smp_voyager_power_off(void *dummy); extern void voyager_restart(void); extern void voyager_cat_power_off(void); extern void voyager_cat_do_common_interrupt(void); extern void voyager_handle_nmi(void); /* Commands for the following are */ #define VOYAGER_PSI_READ 0 #define VOYAGER_PSI_WRITE 1 #define VOYAGER_PSI_SUBREAD 2 #define VOYAGER_PSI_SUBWRITE 3 extern void voyager_cat_psi(__u8, __u16, __u8 *); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm-i386/xor.h000066400000000000000000000520551314037446600235600ustar00rootroot00000000000000/* * include/asm-i386/xor.h * * Optimized RAID-5 checksumming functions for MMX and SSE. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * You should have received a copy of the GNU General Public License * (for example /usr/src/linux/COPYING); if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * High-speed RAID5 checksumming functions utilizing MMX instructions. * Copyright (C) 1998 Ingo Molnar. */ #define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n" #define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n" #define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n" #define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n" #define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n" #define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n" #include static void xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { unsigned long lines = bytes >> 7; kernel_fpu_begin(); __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ XO1(i,0) \ ST(i,0) \ XO1(i+1,1) \ ST(i+1,1) \ XO1(i+2,2) \ ST(i+2,2) \ XO1(i+3,3) \ ST(i+3,3) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $128, %1 ;\n" " addl $128, %2 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2) : : "memory"); kernel_fpu_end(); } static void xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { unsigned long lines = bytes >> 7; kernel_fpu_begin(); __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ XO2(i,0) \ ST(i,0) \ XO2(i+1,1) \ ST(i+1,1) \ XO2(i+2,2) \ ST(i+2,2) \ XO2(i+3,3) \ ST(i+3,3) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $128, %1 ;\n" " addl $128, %2 ;\n" " addl $128, %3 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) : : "memory"); kernel_fpu_end(); } static void xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { unsigned long lines = bytes >> 7; kernel_fpu_begin(); __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ XO3(i,0) \ ST(i,0) \ XO3(i+1,1) \ ST(i+1,1) \ XO3(i+2,2) \ ST(i+2,2) \ XO3(i+3,3) \ ST(i+3,3) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $128, %1 ;\n" " addl $128, %2 ;\n" " addl $128, %3 ;\n" " addl $128, %4 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) : : "memory"); kernel_fpu_end(); } static void xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { unsigned long lines = bytes >> 7; kernel_fpu_begin(); /* Make sure GCC forgets anything it knows about p4 or p5, such that it won't pass to the asm volatile below a register that is shared with any other variable. That's because we modify p4 and p5 there, but we can't mark them as read/write, otherwise we'd overflow the 10-asm-operands limit of GCC < 3.1. */ __asm__ ("" : "+r" (p4), "+r" (p5)); __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ XO3(i,0) \ XO3(i+1,1) \ XO3(i+2,2) \ XO3(i+3,3) \ XO4(i,0) \ ST(i,0) \ XO4(i+1,1) \ ST(i+1,1) \ XO4(i+2,2) \ ST(i+2,2) \ XO4(i+3,3) \ ST(i+3,3) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $128, %1 ;\n" " addl $128, %2 ;\n" " addl $128, %3 ;\n" " addl $128, %4 ;\n" " addl $128, %5 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) : "r" (p4), "r" (p5) : "memory"); /* p4 and p5 were modified, and now the variables are dead. Clobber them just to be sure nobody does something stupid like assuming they have some legal value. */ __asm__ ("" : "=r" (p4), "=r" (p5)); kernel_fpu_end(); } #undef LD #undef XO1 #undef XO2 #undef XO3 #undef XO4 #undef ST #undef BLOCK static void xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { unsigned long lines = bytes >> 6; kernel_fpu_begin(); __asm__ __volatile__ ( " .align 32 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" " movq 8(%1), %%mm1 ;\n" " pxor (%2), %%mm0 ;\n" " movq 16(%1), %%mm2 ;\n" " movq %%mm0, (%1) ;\n" " pxor 8(%2), %%mm1 ;\n" " movq 24(%1), %%mm3 ;\n" " movq %%mm1, 8(%1) ;\n" " pxor 16(%2), %%mm2 ;\n" " movq 32(%1), %%mm4 ;\n" " movq %%mm2, 16(%1) ;\n" " pxor 24(%2), %%mm3 ;\n" " movq 40(%1), %%mm5 ;\n" " movq %%mm3, 24(%1) ;\n" " pxor 32(%2), %%mm4 ;\n" " movq 48(%1), %%mm6 ;\n" " movq %%mm4, 32(%1) ;\n" " pxor 40(%2), %%mm5 ;\n" " movq 56(%1), %%mm7 ;\n" " movq %%mm5, 40(%1) ;\n" " pxor 48(%2), %%mm6 ;\n" " pxor 56(%2), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" " addl $64, %1 ;\n" " addl $64, %2 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2) : : "memory"); kernel_fpu_end(); } static void xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { unsigned long lines = bytes >> 6; kernel_fpu_begin(); __asm__ __volatile__ ( " .align 32,0x90 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" " movq 8(%1), %%mm1 ;\n" " pxor (%2), %%mm0 ;\n" " movq 16(%1), %%mm2 ;\n" " pxor 8(%2), %%mm1 ;\n" " pxor (%3), %%mm0 ;\n" " pxor 16(%2), %%mm2 ;\n" " movq %%mm0, (%1) ;\n" " pxor 8(%3), %%mm1 ;\n" " pxor 16(%3), %%mm2 ;\n" " movq 24(%1), %%mm3 ;\n" " movq %%mm1, 8(%1) ;\n" " movq 32(%1), %%mm4 ;\n" " movq 40(%1), %%mm5 ;\n" " pxor 24(%2), %%mm3 ;\n" " movq %%mm2, 16(%1) ;\n" " pxor 32(%2), %%mm4 ;\n" " pxor 24(%3), %%mm3 ;\n" " pxor 40(%2), %%mm5 ;\n" " movq %%mm3, 24(%1) ;\n" " pxor 32(%3), %%mm4 ;\n" " pxor 40(%3), %%mm5 ;\n" " movq 48(%1), %%mm6 ;\n" " movq %%mm4, 32(%1) ;\n" " movq 56(%1), %%mm7 ;\n" " pxor 48(%2), %%mm6 ;\n" " movq %%mm5, 40(%1) ;\n" " pxor 56(%2), %%mm7 ;\n" " pxor 48(%3), %%mm6 ;\n" " pxor 56(%3), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" " addl $64, %1 ;\n" " addl $64, %2 ;\n" " addl $64, %3 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) : : "memory" ); kernel_fpu_end(); } static void xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { unsigned long lines = bytes >> 6; kernel_fpu_begin(); __asm__ __volatile__ ( " .align 32,0x90 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" " movq 8(%1), %%mm1 ;\n" " pxor (%2), %%mm0 ;\n" " movq 16(%1), %%mm2 ;\n" " pxor 8(%2), %%mm1 ;\n" " pxor (%3), %%mm0 ;\n" " pxor 16(%2), %%mm2 ;\n" " pxor 8(%3), %%mm1 ;\n" " pxor (%4), %%mm0 ;\n" " movq 24(%1), %%mm3 ;\n" " pxor 16(%3), %%mm2 ;\n" " pxor 8(%4), %%mm1 ;\n" " movq %%mm0, (%1) ;\n" " movq 32(%1), %%mm4 ;\n" " pxor 24(%2), %%mm3 ;\n" " pxor 16(%4), %%mm2 ;\n" " movq %%mm1, 8(%1) ;\n" " movq 40(%1), %%mm5 ;\n" " pxor 32(%2), %%mm4 ;\n" " pxor 24(%3), %%mm3 ;\n" " movq %%mm2, 16(%1) ;\n" " pxor 40(%2), %%mm5 ;\n" " pxor 32(%3), %%mm4 ;\n" " pxor 24(%4), %%mm3 ;\n" " movq %%mm3, 24(%1) ;\n" " movq 56(%1), %%mm7 ;\n" " movq 48(%1), %%mm6 ;\n" " pxor 40(%3), %%mm5 ;\n" " pxor 32(%4), %%mm4 ;\n" " pxor 48(%2), %%mm6 ;\n" " movq %%mm4, 32(%1) ;\n" " pxor 56(%2), %%mm7 ;\n" " pxor 40(%4), %%mm5 ;\n" " pxor 48(%3), %%mm6 ;\n" " pxor 56(%3), %%mm7 ;\n" " movq %%mm5, 40(%1) ;\n" " pxor 48(%4), %%mm6 ;\n" " pxor 56(%4), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" " addl $64, %1 ;\n" " addl $64, %2 ;\n" " addl $64, %3 ;\n" " addl $64, %4 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) : : "memory"); kernel_fpu_end(); } static void xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { unsigned long lines = bytes >> 6; kernel_fpu_begin(); /* Make sure GCC forgets anything it knows about p4 or p5, such that it won't pass to the asm volatile below a register that is shared with any other variable. That's because we modify p4 and p5 there, but we can't mark them as read/write, otherwise we'd overflow the 10-asm-operands limit of GCC < 3.1. */ __asm__ ("" : "+r" (p4), "+r" (p5)); __asm__ __volatile__ ( " .align 32,0x90 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" " movq 8(%1), %%mm1 ;\n" " pxor (%2), %%mm0 ;\n" " pxor 8(%2), %%mm1 ;\n" " movq 16(%1), %%mm2 ;\n" " pxor (%3), %%mm0 ;\n" " pxor 8(%3), %%mm1 ;\n" " pxor 16(%2), %%mm2 ;\n" " pxor (%4), %%mm0 ;\n" " pxor 8(%4), %%mm1 ;\n" " pxor 16(%3), %%mm2 ;\n" " movq 24(%1), %%mm3 ;\n" " pxor (%5), %%mm0 ;\n" " pxor 8(%5), %%mm1 ;\n" " movq %%mm0, (%1) ;\n" " pxor 16(%4), %%mm2 ;\n" " pxor 24(%2), %%mm3 ;\n" " movq %%mm1, 8(%1) ;\n" " pxor 16(%5), %%mm2 ;\n" " pxor 24(%3), %%mm3 ;\n" " movq 32(%1), %%mm4 ;\n" " movq %%mm2, 16(%1) ;\n" " pxor 24(%4), %%mm3 ;\n" " pxor 32(%2), %%mm4 ;\n" " movq 40(%1), %%mm5 ;\n" " pxor 24(%5), %%mm3 ;\n" " pxor 32(%3), %%mm4 ;\n" " pxor 40(%2), %%mm5 ;\n" " movq %%mm3, 24(%1) ;\n" " pxor 32(%4), %%mm4 ;\n" " pxor 40(%3), %%mm5 ;\n" " movq 48(%1), %%mm6 ;\n" " movq 56(%1), %%mm7 ;\n" " pxor 32(%5), %%mm4 ;\n" " pxor 40(%4), %%mm5 ;\n" " pxor 48(%2), %%mm6 ;\n" " pxor 56(%2), %%mm7 ;\n" " movq %%mm4, 32(%1) ;\n" " pxor 48(%3), %%mm6 ;\n" " pxor 56(%3), %%mm7 ;\n" " pxor 40(%5), %%mm5 ;\n" " pxor 48(%4), %%mm6 ;\n" " pxor 56(%4), %%mm7 ;\n" " movq %%mm5, 40(%1) ;\n" " pxor 48(%5), %%mm6 ;\n" " pxor 56(%5), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" " addl $64, %1 ;\n" " addl $64, %2 ;\n" " addl $64, %3 ;\n" " addl $64, %4 ;\n" " addl $64, %5 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) : "r" (p4), "r" (p5) : "memory"); /* p4 and p5 were modified, and now the variables are dead. Clobber them just to be sure nobody does something stupid like assuming they have some legal value. */ __asm__ ("" : "=r" (p4), "=r" (p5)); kernel_fpu_end(); } static struct xor_block_template xor_block_pII_mmx = { .name = "pII_mmx", .do_2 = xor_pII_mmx_2, .do_3 = xor_pII_mmx_3, .do_4 = xor_pII_mmx_4, .do_5 = xor_pII_mmx_5, }; static struct xor_block_template xor_block_p5_mmx = { .name = "p5_mmx", .do_2 = xor_p5_mmx_2, .do_3 = xor_p5_mmx_3, .do_4 = xor_p5_mmx_4, .do_5 = xor_p5_mmx_5, }; /* * Cache avoiding checksumming functions utilizing KNI instructions * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) */ #define XMMS_SAVE do { \ preempt_disable(); \ cr0 = read_cr0(); \ clts(); \ __asm__ __volatile__ ( \ "movups %%xmm0,(%0) ;\n\t" \ "movups %%xmm1,0x10(%0) ;\n\t" \ "movups %%xmm2,0x20(%0) ;\n\t" \ "movups %%xmm3,0x30(%0) ;\n\t" \ : \ : "r" (xmm_save) \ : "memory"); \ } while(0) #define XMMS_RESTORE do { \ __asm__ __volatile__ ( \ "sfence ;\n\t" \ "movups (%0),%%xmm0 ;\n\t" \ "movups 0x10(%0),%%xmm1 ;\n\t" \ "movups 0x20(%0),%%xmm2 ;\n\t" \ "movups 0x30(%0),%%xmm3 ;\n\t" \ : \ : "r" (xmm_save) \ : "memory"); \ write_cr0(cr0); \ preempt_enable(); \ } while(0) #define ALIGN16 __attribute__((aligned(16))) #define OFFS(x) "16*("#x")" #define PF_OFFS(x) "256+16*("#x")" #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" #define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" #define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" #define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n" #define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n" #define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n" #define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n" #define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n" #define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" #define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" #define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" #define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" #define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" static void xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ LD(i,0) \ LD(i+1,1) \ PF1(i) \ PF1(i+2) \ LD(i+2,2) \ LD(i+3,3) \ PF0(i+4) \ PF0(i+6) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $256, %1 ;\n" " addl $256, %2 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2) : : "memory"); XMMS_RESTORE; } static void xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ PF1(i) \ PF1(i+2) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ PF2(i) \ PF2(i+2) \ PF0(i+4) \ PF0(i+6) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $256, %1 ;\n" " addl $256, %2 ;\n" " addl $256, %3 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r"(p2), "+r"(p3) : : "memory" ); XMMS_RESTORE; } static void xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ PF1(i) \ PF1(i+2) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ PF2(i) \ PF2(i+2) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ PF3(i) \ PF3(i+2) \ PF0(i+4) \ PF0(i+6) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ XO3(i,0) \ XO3(i+1,1) \ XO3(i+2,2) \ XO3(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $256, %1 ;\n" " addl $256, %2 ;\n" " addl $256, %3 ;\n" " addl $256, %4 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) : : "memory" ); XMMS_RESTORE; } static void xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; /* Make sure GCC forgets anything it knows about p4 or p5, such that it won't pass to the asm volatile below a register that is shared with any other variable. That's because we modify p4 and p5 there, but we can't mark them as read/write, otherwise we'd overflow the 10-asm-operands limit of GCC < 3.1. */ __asm__ ("" : "+r" (p4), "+r" (p5)); __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ PF1(i) \ PF1(i+2) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ PF2(i) \ PF2(i+2) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ PF3(i) \ PF3(i+2) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ PF4(i) \ PF4(i+2) \ PF0(i+4) \ PF0(i+6) \ XO3(i,0) \ XO3(i+1,1) \ XO3(i+2,2) \ XO3(i+3,3) \ XO4(i,0) \ XO4(i+1,1) \ XO4(i+2,2) \ XO4(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $256, %1 ;\n" " addl $256, %2 ;\n" " addl $256, %3 ;\n" " addl $256, %4 ;\n" " addl $256, %5 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) : "r" (p4), "r" (p5) : "memory"); /* p4 and p5 were modified, and now the variables are dead. Clobber them just to be sure nobody does something stupid like assuming they have some legal value. */ __asm__ ("" : "=r" (p4), "=r" (p5)); XMMS_RESTORE; } static struct xor_block_template xor_block_pIII_sse = { .name = "pIII_sse", .do_2 = xor_sse_2, .do_3 = xor_sse_3, .do_4 = xor_sse_4, .do_5 = xor_sse_5, }; /* Also try the generic routines. */ #include #undef XOR_TRY_TEMPLATES #define XOR_TRY_TEMPLATES \ do { \ xor_speed(&xor_block_8regs); \ xor_speed(&xor_block_8regs_p); \ xor_speed(&xor_block_32regs); \ xor_speed(&xor_block_32regs_p); \ if (cpu_has_xmm) \ xor_speed(&xor_block_pIII_sse); \ if (cpu_has_mmx) { \ xor_speed(&xor_block_pII_mmx); \ xor_speed(&xor_block_p5_mmx); \ } \ } while (0) /* We force the use of the SSE xor block because it can write around L2. We may also be able to load into the L1 only depending on how the cpu deals with a load to a line that is being prefetched. */ #define XOR_SELECT_TEMPLATE(FASTEST) \ (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm/000077500000000000000000000000001314037446600221015ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm/gnttab_dma.h000066400000000000000000000000561314037446600243530ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm/hypercall.h000066400000000000000000000034561314037446600242450ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * 64-bit updates: * Benjamin Liu * Jun Nakajima * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #ifdef CONFIG_X86_64 #include "hypercall_64.h" #else #include "hypercall_32.h" #endif #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm/hypercall_32.h000066400000000000000000000234461314037446600245520ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_32_H__ #define __HYPERCALL_32_H__ #include /* memcpy() */ #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #define __STR(x) #x #define STR(x) __STR(x) #ifdef CONFIG_XEN #define HYPERCALL_STR(name) \ "call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)" #else #define HYPERCALL_STR(name) \ "mov hypercall_stubs,%%eax; " \ "add $("STR(__HYPERVISOR_##name)" * 32),%%eax; " \ "call *%%eax" #endif #define _hypercall0(type, name) \ ({ \ long __res; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res) \ : \ : "memory" ); \ (type)__res; \ }) #define _hypercall1(type, name, a1) \ ({ \ long __res, __ign1; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1) \ : "1" ((long)(a1)) \ : "memory" ); \ (type)__res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ long __res, __ign1, __ign2; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ : "1" ((long)(a1)), "2" ((long)(a2)) \ : "memory" ); \ (type)__res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ long __res, __ign1, __ign2, __ign3; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ (type)__res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ long __res, __ign1, __ign2, __ign3, __ign4; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)) \ : "memory" ); \ (type)__res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)), \ "5" ((long)(a5)) \ : "memory" ); \ (type)__res; \ }) static inline int HYPERVISOR_set_trap_table( trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int HYPERVISOR_mmu_update( mmu_update_t *req, int count, int *success_count, domid_t domid) { return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int HYPERVISOR_mmuext_op( struct mmuext_op *op, int count, int *success_count, domid_t domid) { return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int HYPERVISOR_set_gdt( unsigned long *frame_list, int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int HYPERVISOR_set_callbacks( unsigned long event_selector, unsigned long event_address, unsigned long failsafe_selector, unsigned long failsafe_address) { return _hypercall4(int, set_callbacks, event_selector, event_address, failsafe_selector, failsafe_address); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } static inline int HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } static inline int HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline long HYPERVISOR_set_timer_op( u64 timeout) { unsigned long timeout_hi = (unsigned long)(timeout>>32); unsigned long timeout_lo = (unsigned long)timeout; return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); } static inline int HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int HYPERVISOR_set_debugreg( int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long HYPERVISOR_get_debugreg( int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int HYPERVISOR_update_descriptor( u64 ma, u64 desc) { return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); } static inline int HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; /* when hypercall failed, retry */ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int HYPERVISOR_multicall( multicall_entry_t *call_list, int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall4(int, update_va_mapping, va, new_val.pte_low, pte_hi, flags); } static inline int HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int HYPERVISOR_acm_op( int cmd, void *arg) { return _hypercall2(int, acm_op, cmd, arg); } static inline int HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int HYPERVISOR_console_io( int cmd, int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { return _hypercall3(int, grant_table_op, cmd, uop, count); } static inline int HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall5(int, update_va_mapping_otherdomain, va, new_val.pte_low, pte_hi, flags, domid); } static inline int HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int HYPERVISOR_vcpu_op( int cmd, int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } static inline unsigned long HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } static inline int HYPERVISOR_callback_op( int cmd, void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } #endif /* __HYPERCALL_32_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm/hypercall_64.h000066400000000000000000000243461314037446600245570ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * 64-bit updates: * Benjamin Liu * Jun Nakajima * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_64_H__ #define __HYPERCALL_64_H__ #include /* memcpy() */ #include #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #ifdef CONFIG_XEN #define HYPERCALL_STR(name) \ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" #else #define HYPERCALL_STR(name) \ "mov $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ "add hypercall_stubs(%%rip),%%rax; " \ "call *%%rax" #endif #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res) \ : \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, a1) \ ({ \ type __res; \ long __ign1; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1) \ : "1" ((long)(a1)) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ long __ign1, __ign2; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \ : "1" ((long)(a1)), "2" ((long)(a2)) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ register long __arg4 asm("r10") = (long)(a4); \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3), "+r" (__arg4) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ register long __arg4 asm("r10") = (long)(a4); \ register long __arg5 asm("r8") = (long)(a5); \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3), "+r" (__arg4), "+r" (__arg5) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_address, unsigned long failsafe_address, unsigned long syscall_address) { return _hypercall3(int, set_callbacks, event_address, failsafe_address, syscall_address); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall1(long, set_timer_op, timeout); } static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_update_descriptor( unsigned long ma, unsigned long word) { return _hypercall2(int, update_descriptor, ma, word); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; /* when hypercall failed, retry */ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { return _hypercall3(int, update_va_mapping, va, new_val.pte, flags); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { return _hypercall3(int, grant_table_op, cmd, uop, count); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { return _hypercall4(int, update_va_mapping_otherdomain, va, new_val.pte, flags, domid); } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_set_segment_base( int reg, unsigned long value) { return _hypercall2(int, set_segment_base, reg, value); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } #endif /* __HYPERCALL_64_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm/hypervisor.h000066400000000000000000000000571314037446600244660ustar00rootroot00000000000000 #include UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm/maddr.h000066400000000000000000000114541314037446600233460ustar00rootroot00000000000000#ifndef _X86_64_MADDR_H #define _X86_64_MADDR_H #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<63) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ typedef unsigned long paddr_t; typedef unsigned long maddr_t; #ifdef CONFIG_XEN extern unsigned long *phys_to_machine_mapping; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return pfn; BUG_ON(end_pfn && pfn >= end_pfn); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return 1; BUG_ON(end_pfn && pfn >= end_pfn); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return end_pfn; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movq %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movq %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 8\n" " .quad 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if ((pfn < end_pfn) && !xen_feature(XENFEAT_auto_translated_physmap) && (phys_to_machine_mapping[pfn] != mfn)) return end_pfn; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { BUG_ON(end_pfn && pfn >= end_pfn); if (xen_feature(XENFEAT_auto_translated_physmap)) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } static inline paddr_t pte_phys_to_machine(paddr_t phys) { maddr_t machine; machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { paddr_t phys; phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #define __pte_ma(x) ((pte_t) { (x) } ) #define pfn_pte_ma(pfn, prot) __pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask) #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _X86_64_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/asm/synch_bitops.h000066400000000000000000000000611314037446600247530ustar00rootroot00000000000000 #include UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/000077500000000000000000000000001314037446600221135ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/balloon.h000066400000000000000000000046071314037446600237210ustar00rootroot00000000000000/****************************************************************************** * balloon.h * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_BALLOON_H__ #define __ASM_BALLOON_H__ /* * Inform the balloon driver that it should allow some slop for device-driver * memory activities. */ void balloon_update_driver_allowance(long delta); /* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */ struct page **alloc_empty_pages_and_pagevec(int nr_pages); void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages); void balloon_release_driver_page(struct page *page); /* * Prevent the balloon driver from changing the memory reservation during * a driver critical region. */ extern spinlock_t balloon_lock; #define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags) #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags) #endif /* __ASM_BALLOON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/blkif.h000066400000000000000000000112031314037446600233500ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_BLKIF_H__ #define __XEN_BLKIF_H__ #include #include #include /* Not a real protocol. Used to generate ring structs which contain * the elements common to all protocols only. This way we get a * compiler-checkable way to use common struct elements, so we can * avoid using switch(protocol) in a number of places. */ struct blkif_common_request { char dummy; }; struct blkif_common_response { char dummy; }; /* i386 protocol version */ #pragma pack(push, 4) struct blkif_x86_32_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_32_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_x86_32_request blkif_x86_32_request_t; typedef struct blkif_x86_32_response blkif_x86_32_response_t; #pragma pack(pop) /* x86_64 protocol version */ struct blkif_x86_64_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t __attribute__((__aligned__(8))) id; blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_64_response { uint64_t __attribute__((__aligned__(8))) id; uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_x86_64_request blkif_x86_64_request_t; typedef struct blkif_x86_64_response blkif_x86_64_response_t; DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response); DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response); DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response); union blkif_back_rings { blkif_back_ring_t native; blkif_common_back_ring_t common; blkif_x86_32_back_ring_t x86_32; blkif_x86_64_back_ring_t x86_64; }; typedef union blkif_back_rings blkif_back_rings_t; enum blkif_protocol { BLKIF_PROTOCOL_NATIVE = 1, BLKIF_PROTOCOL_X86_32 = 2, BLKIF_PROTOCOL_X86_64 = 3, }; static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; dst->id = src->id; dst->sector_number = src->sector_number; barrier(); if (n > dst->nr_segments) n = dst->nr_segments; for (i = 0; i < n; i++) dst->seg[i] = src->seg[i]; } static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; dst->id = src->id; dst->sector_number = src->sector_number; barrier(); if (n > dst->nr_segments) n = dst->nr_segments; for (i = 0; i < n; i++) dst->seg[i] = src->seg[i]; } #endif /* __XEN_BLKIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/compat_ioctl.h000066400000000000000000000030411314037446600247370ustar00rootroot00000000000000/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright IBM Corp. 2007 * * Authors: Jimi Xenidis * Hollis Blanchard */ #ifndef __LINUX_XEN_COMPAT_H__ #define __LINUX_XEN_COMPAT_H__ #include extern int privcmd_ioctl_32(int fd, unsigned int cmd, unsigned long arg); struct privcmd_mmap_32 { int num; domid_t dom; compat_uptr_t entry; }; struct privcmd_mmapbatch_32 { int num; /* number of pages to populate */ domid_t dom; /* target domain */ __u64 addr; /* virtual address */ compat_uptr_t arr; /* array of mfns - top nibble set on err */ }; #define IOCTL_PRIVCMD_MMAP_32 \ _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap_32)) #define IOCTL_PRIVCMD_MMAPBATCH_32 \ _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch_32)) #endif /* __LINUX_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/cpu_hotplug.h000066400000000000000000000014751314037446600246240ustar00rootroot00000000000000#ifndef __XEN_CPU_HOTPLUG_H__ #define __XEN_CPU_HOTPLUG_H__ #include #include #if defined(CONFIG_X86) && defined(CONFIG_SMP) extern cpumask_t cpu_initialized_map; #endif #if defined(CONFIG_HOTPLUG_CPU) int cpu_up_check(unsigned int cpu); void init_xenbus_allowed_cpumask(void); int smp_suspend(void); void smp_resume(void); void cpu_bringup(void); #else /* !defined(CONFIG_HOTPLUG_CPU) */ #define cpu_up_check(cpu) (0) #define init_xenbus_allowed_cpumask() ((void)0) static inline int smp_suspend(void) { if (num_online_cpus() > 1) { printk(KERN_WARNING "Can't suspend SMP guests " "without CONFIG_HOTPLUG_CPU\n"); return -EOPNOTSUPP; } return 0; } static inline void smp_resume(void) { } #endif /* !defined(CONFIG_HOTPLUG_CPU) */ #endif /* __XEN_CPU_HOTPLUG_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/driver_util.h000066400000000000000000000005511314037446600246150ustar00rootroot00000000000000 #ifndef __ASM_XEN_DRIVER_UTIL_H__ #define __ASM_XEN_DRIVER_UTIL_H__ #include #include /* Allocate/destroy a 'vmalloc' VM area. */ extern struct vm_struct *alloc_vm_area(unsigned long size); extern void free_vm_area(struct vm_struct *area); extern struct class *get_xen_class(void); #endif /* __ASM_XEN_DRIVER_UTIL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/evtchn.h000066400000000000000000000114511314037446600235550ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Communication via Xen event channels. * Also definitions for the device that demuxes notifications to userspace. * * Copyright (c) 2004-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_EVTCHN_H__ #define __ASM_EVTCHN_H__ #include #include #include #include #include #include /* * LOW-LEVEL DEFINITIONS */ /* * Dynamically bind an event source to an IRQ-like callback handler. * On some platforms this may not be implemented via the Linux IRQ subsystem. * The IRQ argument passed to the callback handler is the same as returned * from the bind call. It may not correspond to a Linux IRQ number. * Returns IRQ or negative errno. */ int bind_caller_port_to_irqhandler( unsigned int caller_port, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); int bind_interdomain_evtchn_to_irqhandler( unsigned int remote_domain, unsigned int remote_port, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irqhandler( unsigned int virq, unsigned int cpu, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (except for bindings * made with bind_caller_port_to_irqhandler()). */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); void irq_resume(void); /* Entry point for notifications into Linux subsystems. */ asmlinkage void evtchn_do_upcall(struct pt_regs *regs); /* Entry point for notifications into the userland character device. */ void evtchn_device_upcall(int port); /* Mark a PIRQ as unavailable for dynamic allocation. */ void evtchn_register_pirq(int irq); /* Map a Xen-supplied PIRQ to a dynamically allocated one. */ int evtchn_map_pirq(int irq, int xen_pirq); /* Look up a Xen-supplied PIRQ for a dynamically allocated one. */ int evtchn_get_xen_pirq(int irq); void mask_evtchn(int port); void disable_all_local_evtchn(void); void unmask_evtchn(int port); #ifdef CONFIG_SMP void rebind_evtchn_to_cpu(int port, unsigned int cpu); #else #define rebind_evtchn_to_cpu(port, cpu) ((void)0) #endif static inline int test_and_set_evtchn_mask(int port) { shared_info_t *s = HYPERVISOR_shared_info; return synch_test_and_set_bit(port, s->evtchn_mask); } static inline void clear_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; synch_clear_bit(port, s->evtchn_pending); } static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send)); } /* * Use these to access the event channel underlying the IRQ handle returned * by bind_*_to_irqhandler(). */ void notify_remote_via_irq(int irq); int irq_to_evtchn_port(int irq); #endif /* __ASM_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/features.h000066400000000000000000000007041314037446600241030ustar00rootroot00000000000000/****************************************************************************** * features.h * * Query the features reported by Xen. * * Copyright (c) 2006, Ian Campbell */ #ifndef __ASM_XEN_FEATURES_H__ #define __ASM_XEN_FEATURES_H__ #include extern void setup_xen_features(void); extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32]; #define xen_feature(flag) (xen_features[flag]) #endif /* __ASM_XEN_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/firmware.h000066400000000000000000000003011314037446600240720ustar00rootroot00000000000000#ifndef __XEN_FIRMWARE_H__ #define __XEN_FIRMWARE_H__ #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) void copy_edd(void); #endif void copy_edid(void); #endif /* __XEN_FIRMWARE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/gnttab.h000066400000000000000000000124361314037446600235510ustar00rootroot00000000000000/****************************************************************************** * gnttab.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include /* maddr_t */ #include #include #include struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; u8 queued; }; int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_ref(grant_ref_t ref); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); int gnttab_query_foreign_access(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags); void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep); void __gnttab_dma_map_page(struct page *page); static inline void __gnttab_dma_unmap_page(struct page *page) { } void gnttab_reset_grant_page(struct page *page); int gnttab_suspend(void); int gnttab_resume(void); void *arch_gnttab_alloc_shared(unsigned long *frames); static inline void gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr, uint32_t flags, grant_ref_t ref, domid_t domid) { if (flags & GNTMAP_contains_pte) map->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) map->host_addr = __pa(addr); else map->host_addr = addr; map->flags = flags; map->ref = ref; map->dom = domid; } static inline void gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr, uint32_t flags, grant_handle_t handle) { if (flags & GNTMAP_contains_pte) unmap->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) unmap->host_addr = __pa(addr); else unmap->host_addr = addr; unmap->handle = handle; unmap->dev_bus_addr = 0; } static inline void gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr, maddr_t new_addr, grant_handle_t handle) { if (xen_feature(XENFEAT_auto_translated_physmap)) { unmap->host_addr = __pa(addr); unmap->new_addr = __pa(new_addr); } else { unmap->host_addr = addr; unmap->new_addr = new_addr; } unmap->handle = handle; } #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/hvm.h000066400000000000000000000007101314037446600230540ustar00rootroot00000000000000/* Simple wrappers around HVM functions */ #ifndef XEN_HVM_H__ #define XEN_HVM_H__ #include static inline unsigned long hvm_get_parameter(int idx) { struct xen_hvm_param xhv; int r; xhv.domid = DOMID_SELF; xhv.index = idx; r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); if (r < 0) { printk(KERN_ERR "cannot get hvm parameter %d: %d.\n", idx, r); return 0; } return xhv.value; } #endif /* XEN_HVM_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/hypercall.h000066400000000000000000000013751314037446600242550ustar00rootroot00000000000000#ifndef __XEN_HYPERCALL_H__ #define __XEN_HYPERCALL_H__ #include static inline int __must_check HYPERVISOR_multicall_check( multicall_entry_t *call_list, unsigned int nr_calls, const unsigned long *rc_list) { int rc = HYPERVISOR_multicall(call_list, nr_calls); if (unlikely(rc < 0)) return rc; BUG_ON(rc); BUG_ON((int)nr_calls < 0); for ( ; nr_calls > 0; --nr_calls, ++call_list) if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0))) return nr_calls; return 0; } /* A construct to ignore the return value of hypercall wrappers in a few * exceptional cases (simply casting the function result to void doesn't * avoid the compiler warning): */ #define VOID(expr) ((void)((expr)?:0)) #endif /* __XEN_HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/hypervisor_sysfs.h000066400000000000000000000015051314037446600257260ustar00rootroot00000000000000/* * copyright (c) 2006 IBM Corporation * Authored by: Mike D. Day * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _HYP_SYSFS_H_ #define _HYP_SYSFS_H_ #include #include #define HYPERVISOR_ATTR_RO(_name) \ static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) #define HYPERVISOR_ATTR_RW(_name) \ static struct hyp_sysfs_attr _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) struct hyp_sysfs_attr { struct attribute attr; ssize_t (*show)(struct hyp_sysfs_attr *, char *); ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t); void *hyp_attr_data; }; #endif /* _HYP_SYSFS_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/000077500000000000000000000000001314037446600240535ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/COPYING000066400000000000000000000032641314037446600251130ustar00rootroot00000000000000XEN NOTICE ========== This copyright applies to all files within this subdirectory and its subdirectories: include/public/*.h include/public/hvm/*.h include/public/io/*.h The intention is that these files can be freely copied into the source tree of an operating system when porting that OS to run on Xen. Doing so does *not* cause the OS to become subject to the terms of the GPL. All other files in the Xen source distribution are covered by version 2 of the GNU General Public License except where explicitly stated otherwise within individual source files. -- Keir Fraser (on behalf of the Xen team) ===================================================================== Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-ia64.h000066400000000000000000000511341314037446600257060ustar00rootroot00000000000000/****************************************************************************** * arch-ia64/hypervisor-if.h * * Guest OS interface to IA64 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * */ #include "xen.h" #ifndef __HYPERVISOR_IF_IA64_H__ #define __HYPERVISOR_IF_IA64_H__ #if !defined(__GNUC__) || defined(__STRICT_ANSI__) #error "Anonymous structs/unions are a GNU extension." #endif /* Structural guest handles introduced in 0x00030201. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030201 #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name #else #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef type * __guest_handle_ ## name #endif #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ ___DEFINE_XEN_GUEST_HANDLE(name, type); \ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) #define uint64_aligned_t uint64_t #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif #ifndef __ASSEMBLY__ typedef unsigned long xen_pfn_t; #define PRI_xen_pfn "lx" #endif /* Arch specific VIRQs definition */ #define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */ #define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */ #define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */ /* Maximum number of virtual CPUs in multi-processor guests. */ /* WARNING: before changing this, check that shared_info fits on a page */ #define MAX_VIRT_CPUS 64 /* IO ports location for PV. */ #define IO_PORTS_PADDR 0x00000ffffc000000UL #define IO_PORTS_SIZE 0x0000000004000000UL #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; #ifdef __XEN_TOOLS__ #define XEN_PAGE_SIZE XC_PAGE_SIZE #else #define XEN_PAGE_SIZE PAGE_SIZE #endif #define INVALID_MFN (~0UL) struct pt_fpreg { union { unsigned long bits[2]; long double __dummy; /* force 16-byte alignment */ } u; }; union vac { unsigned long value; struct { int a_int:1; int a_from_int_cr:1; int a_to_int_cr:1; int a_from_psr:1; int a_from_cpuid:1; int a_cover:1; int a_bsw:1; long reserved:57; }; }; typedef union vac vac_t; union vdc { unsigned long value; struct { int d_vmsw:1; int d_extint:1; int d_ibr_dbr:1; int d_pmc:1; int d_to_pmd:1; int d_itm:1; long reserved:58; }; }; typedef union vdc vdc_t; struct mapped_regs { union vac vac; union vdc vdc; unsigned long virt_env_vaddr; unsigned long reserved1[29]; unsigned long vhpi; unsigned long reserved2[95]; union { unsigned long vgr[16]; unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active }; union { unsigned long vbgr[16]; unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active }; unsigned long vnat; unsigned long vbnat; unsigned long vcpuid[5]; unsigned long reserved3[11]; unsigned long vpsr; unsigned long vpr; unsigned long reserved4[76]; union { unsigned long vcr[128]; struct { unsigned long dcr; // CR0 unsigned long itm; unsigned long iva; unsigned long rsv1[5]; unsigned long pta; // CR8 unsigned long rsv2[7]; unsigned long ipsr; // CR16 unsigned long isr; unsigned long rsv3; unsigned long iip; unsigned long ifa; unsigned long itir; unsigned long iipa; unsigned long ifs; unsigned long iim; // CR24 unsigned long iha; unsigned long rsv4[38]; unsigned long lid; // CR64 unsigned long ivr; unsigned long tpr; unsigned long eoi; unsigned long irr[4]; unsigned long itv; // CR72 unsigned long pmv; unsigned long cmcv; unsigned long rsv5[5]; unsigned long lrr0; // CR80 unsigned long lrr1; unsigned long rsv6[46]; }; }; union { unsigned long reserved5[128]; struct { unsigned long precover_ifs; unsigned long unat; // not sure if this is needed until NaT arch is done int interrupt_collection_enabled; // virtual psr.ic /* virtual interrupt deliverable flag is evtchn_upcall_mask in * shared info area now. interrupt_mask_addr is the address * of evtchn_upcall_mask for current vcpu */ unsigned char *interrupt_mask_addr; int pending_interruption; unsigned char vpsr_pp; unsigned char vpsr_dfh; unsigned char hpsr_dfh; unsigned char hpsr_mfh; unsigned long reserved5_1[4]; int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual int banknum; // 0 or 1, which virtual register bank is active unsigned long rrs[8]; // region registers unsigned long krs[8]; // kernel registers unsigned long tmp[16]; // temp registers (e.g. for hyperprivops) /* itc paravirtualization * vAR.ITC = mAR.ITC + itc_offset * itc_last is one which was lastly passed to * the guest OS in order to prevent it from * going backwords. */ unsigned long itc_offset; unsigned long itc_last; }; }; }; typedef struct mapped_regs mapped_regs_t; struct vpd { struct mapped_regs vpd_low; unsigned long reserved6[3456]; unsigned long vmm_avail[128]; unsigned long reserved7[4096]; }; typedef struct vpd vpd_t; struct arch_vcpu_info { }; typedef struct arch_vcpu_info arch_vcpu_info_t; /* * This structure is used for magic page in domain pseudo physical address * space and the result of XENMEM_machine_memory_map. * As the XENMEM_machine_memory_map result, * xen_memory_map::nr_entries indicates the size in bytes * including struct xen_ia64_memmap_info. Not the number of entries. */ struct xen_ia64_memmap_info { uint64_t efi_memmap_size; /* size of EFI memory map */ uint64_t efi_memdesc_size; /* size of an EFI memory map descriptor */ uint32_t efi_memdesc_version; /* memory descriptor version */ void *memdesc[0]; /* array of efi_memory_desc_t */ }; typedef struct xen_ia64_memmap_info xen_ia64_memmap_info_t; struct arch_shared_info { /* PFN of the start_info page. */ unsigned long start_info_pfn; /* Interrupt vector for event channel. */ int evtchn_vector; /* PFN of memmap_info page */ unsigned int memmap_info_num_pages;/* currently only = 1 case is supported. */ unsigned long memmap_info_pfn; uint64_t pad[31]; }; typedef struct arch_shared_info arch_shared_info_t; typedef unsigned long xen_callback_t; struct ia64_tr_entry { unsigned long pte; unsigned long itir; unsigned long vadr; unsigned long rid; }; typedef struct ia64_tr_entry ia64_tr_entry_t; DEFINE_XEN_GUEST_HANDLE(ia64_tr_entry_t); struct vcpu_tr_regs { struct ia64_tr_entry itrs[12]; struct ia64_tr_entry dtrs[12]; }; union vcpu_ar_regs { unsigned long ar[128]; struct { unsigned long kr[8]; unsigned long rsv1[8]; unsigned long rsc; unsigned long bsp; unsigned long bspstore; unsigned long rnat; unsigned long rsv2; unsigned long fcr; unsigned long rsv3[2]; unsigned long eflag; unsigned long csd; unsigned long ssd; unsigned long cflg; unsigned long fsr; unsigned long fir; unsigned long fdr; unsigned long rsv4; unsigned long ccv; /* 32 */ unsigned long rsv5[3]; unsigned long unat; unsigned long rsv6[3]; unsigned long fpsr; unsigned long rsv7[3]; unsigned long itc; unsigned long rsv8[3]; unsigned long ign1[16]; unsigned long pfs; /* 64 */ unsigned long lc; unsigned long ec; unsigned long rsv9[45]; unsigned long ign2[16]; }; }; union vcpu_cr_regs { unsigned long cr[128]; struct { unsigned long dcr; // CR0 unsigned long itm; unsigned long iva; unsigned long rsv1[5]; unsigned long pta; // CR8 unsigned long rsv2[7]; unsigned long ipsr; // CR16 unsigned long isr; unsigned long rsv3; unsigned long iip; unsigned long ifa; unsigned long itir; unsigned long iipa; unsigned long ifs; unsigned long iim; // CR24 unsigned long iha; unsigned long rsv4[38]; unsigned long lid; // CR64 unsigned long ivr; unsigned long tpr; unsigned long eoi; unsigned long irr[4]; unsigned long itv; // CR72 unsigned long pmv; unsigned long cmcv; unsigned long rsv5[5]; unsigned long lrr0; // CR80 unsigned long lrr1; unsigned long rsv6[46]; }; }; struct vcpu_guest_context_regs { unsigned long r[32]; unsigned long b[8]; unsigned long bank[16]; unsigned long ip; unsigned long psr; unsigned long cfm; unsigned long pr; unsigned int nats; /* NaT bits for r1-r31. */ unsigned int bnats; /* Nat bits for banked registers. */ union vcpu_ar_regs ar; union vcpu_cr_regs cr; struct pt_fpreg f[128]; unsigned long dbr[8]; unsigned long ibr[8]; unsigned long rr[8]; unsigned long pkr[16]; /* FIXME: cpuid,pmd,pmc */ unsigned long xip; unsigned long xpsr; unsigned long xfs; unsigned long xr[4]; struct vcpu_tr_regs tr; /* Physical registers in case of debug event. */ unsigned long excp_iipa; unsigned long excp_ifa; unsigned long excp_isr; unsigned int excp_vector; /* * The rbs is intended to be the image of the stacked registers still * in the cpu (not yet stored in memory). It is laid out as if it * were written in memory at a 512 (64*8) aligned address + offset. * rbs_voff is (offset / 8). rbs_nat contains NaT bits for the * remaining rbs registers. rbs_rnat contains NaT bits for in memory * rbs registers. * Note: loadrs is 2**14 bytes == 2**11 slots. */ unsigned int rbs_voff; unsigned long rbs[2048]; unsigned long rbs_rnat; /* * RSE.N_STACKED_PHYS via PAL_RSE_INFO * Strictly this isn't cpu context, but this value is necessary * for domain save/restore. So is here. */ unsigned long num_phys_stacked; }; struct vcpu_guest_context { #define VGCF_EXTRA_REGS (1UL << 1) /* Set extra regs. */ #define VGCF_SET_CR_IRR (1UL << 2) /* Set cr_irr[0:3]. */ #define VGCF_online (1UL << 3) /* make this vcpu online */ #define VGCF_SET_AR_ITC (1UL << 4) /* set pv ar.itc. itc_offset, itc_last */ unsigned long flags; /* VGCF_* flags */ struct vcpu_guest_context_regs regs; unsigned long event_callback_ip; /* xen doesn't share privregs pages with hvm domain so that this member * doesn't make sense for hvm domain. * ~0UL is already used for INVALID_P2M_ENTRY. */ #define VGC_PRIVREGS_HVM (~(-2UL)) unsigned long privregs_pfn; }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); /* dom0 vp op */ #define __HYPERVISOR_ia64_dom0vp_op __HYPERVISOR_arch_0 /* Map io space in machine address to dom0 physical address space. Currently physical assigned address equals to machine address. */ #define IA64_DOM0VP_ioremap 0 /* Convert a pseudo physical page frame number to the corresponding machine page frame number. If no page is assigned, INVALID_MFN or GPFN_INV_MASK is returned depending on domain's non-vti/vti mode. */ #define IA64_DOM0VP_phystomach 1 /* Convert a machine page frame number to the corresponding pseudo physical page frame number of the caller domain. */ #define IA64_DOM0VP_machtophys 3 /* Reserved for future use. */ #define IA64_DOM0VP_iounmap 4 /* Unmap and free pages contained in the specified pseudo physical region. */ #define IA64_DOM0VP_zap_physmap 5 /* Assign machine page frame to dom0's pseudo physical address space. */ #define IA64_DOM0VP_add_physmap 6 /* expose the p2m table into domain */ #define IA64_DOM0VP_expose_p2m 7 /* xen perfmon */ #define IA64_DOM0VP_perfmon 8 /* gmfn version of IA64_DOM0VP_add_physmap */ #define IA64_DOM0VP_add_physmap_with_gmfn 9 /* get fpswa revision */ #define IA64_DOM0VP_fpswa_revision 10 /* Add an I/O port space range */ #define IA64_DOM0VP_add_io_space 11 /* expose the foreign domain's p2m table into privileged domain */ #define IA64_DOM0VP_expose_foreign_p2m 12 #define IA64_DOM0VP_EFP_ALLOC_PTE 0x1 /* allocate p2m table */ /* unexpose the foreign domain's p2m table into privileged domain */ #define IA64_DOM0VP_unexpose_foreign_p2m 13 /* get memmap_info and memmap. It is possible to map the page directly by foreign page mapping, but there is a race between writer. This hypercall avoids such race. */ #define IA64_DOM0VP_get_memmap 14 // flags for page assignement to pseudo physical address space #define _ASSIGN_readonly 0 #define ASSIGN_readonly (1UL << _ASSIGN_readonly) #define ASSIGN_writable (0UL << _ASSIGN_readonly) // dummy flag /* Internal only: memory attribute must be WC/UC/UCE. */ #define _ASSIGN_nocache 1 #define ASSIGN_nocache (1UL << _ASSIGN_nocache) // tlb tracking #define _ASSIGN_tlb_track 2 #define ASSIGN_tlb_track (1UL << _ASSIGN_tlb_track) /* Internal only: associated with PGC_allocated bit */ #define _ASSIGN_pgc_allocated 3 #define ASSIGN_pgc_allocated (1UL << _ASSIGN_pgc_allocated) /* Page is an IO page. */ #define _ASSIGN_io 4 #define ASSIGN_io (1UL << _ASSIGN_io) /* This structure has the same layout of struct ia64_boot_param, defined in . It is redefined here to ease use. */ struct xen_ia64_boot_param { unsigned long command_line; /* physical address of cmd line args */ unsigned long efi_systab; /* physical address of EFI system table */ unsigned long efi_memmap; /* physical address of EFI memory map */ unsigned long efi_memmap_size; /* size of EFI memory map */ unsigned long efi_memdesc_size; /* size of an EFI memory map descriptor */ unsigned int efi_memdesc_version; /* memory descriptor version */ struct { unsigned short num_cols; /* number of columns on console. */ unsigned short num_rows; /* number of rows on console. */ unsigned short orig_x; /* cursor's x position */ unsigned short orig_y; /* cursor's y position */ } console_info; unsigned long fpswa; /* physical address of the fpswa interface */ unsigned long initrd_start; unsigned long initrd_size; unsigned long domain_start; /* va where the boot time domain begins */ unsigned long domain_size; /* how big is the boot domain */ }; #endif /* !__ASSEMBLY__ */ /* Size of the shared_info area (this is not related to page size). */ #define XSI_SHIFT 14 #define XSI_SIZE (1 << XSI_SHIFT) /* Log size of mapped_regs area (64 KB - only 4KB is used). */ #define XMAPPEDREGS_SHIFT 12 #define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT) /* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */ #define XMAPPEDREGS_OFS XSI_SIZE /* Hyperprivops. */ #define HYPERPRIVOP_START 0x1 #define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0) #define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1) #define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2) #define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3) #define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4) #define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5) #define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6) #define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7) #define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8) #define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9) #define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa) #define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb) #define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc) #define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd) #define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe) #define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf) #define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10) #define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11) #define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12) #define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13) #define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14) #define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15) #define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16) #define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17) #define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18) #define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19) #define HYPERPRIVOP_MAX (0x1a) /* Fast and light hypercalls. */ #define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1 /* Extra debug features. */ #define __HYPERVISOR_ia64_debug_op __HYPERVISOR_arch_2 /* Xencomm macros. */ #define XENCOMM_INLINE_MASK 0xf800000000000000UL #define XENCOMM_INLINE_FLAG 0x8000000000000000UL #ifndef __ASSEMBLY__ /* * Optimization features. * The hypervisor may do some special optimizations for guests. This hypercall * can be used to switch on/of these special optimizations. */ #define __HYPERVISOR_opt_feature 0x700UL #define XEN_IA64_OPTF_OFF 0x0 #define XEN_IA64_OPTF_ON 0x1 /* * If this feature is switched on, the hypervisor inserts the * tlb entries without calling the guests traphandler. * This is useful in guests using region 7 for identity mapping * like the linux kernel does. */ #define XEN_IA64_OPTF_IDENT_MAP_REG7 1 /* Identity mapping of region 4 addresses in HVM. */ #define XEN_IA64_OPTF_IDENT_MAP_REG4 2 /* Identity mapping of region 5 addresses in HVM. */ #define XEN_IA64_OPTF_IDENT_MAP_REG5 3 #define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0) struct xen_ia64_opt_feature { unsigned long cmd; /* Which feature */ unsigned char on; /* Switch feature on/off */ union { struct { /* The page protection bit mask of the pte. * This will be or'ed with the pte. */ unsigned long pgprot; unsigned long key; /* A protection key for itir. */ }; }; }; #endif /* __ASSEMBLY__ */ /* xen perfmon */ #ifdef XEN #ifndef __ASSEMBLY__ #ifndef _ASM_IA64_PERFMON_H #include // asm/perfmon.h requires struct list_head #include // for PFM_xxx and pfarg_features_t, pfarg_context_t, pfarg_reg_t, pfarg_load_t #endif /* _ASM_IA64_PERFMON_H */ DEFINE_XEN_GUEST_HANDLE(pfarg_features_t); DEFINE_XEN_GUEST_HANDLE(pfarg_context_t); DEFINE_XEN_GUEST_HANDLE(pfarg_reg_t); DEFINE_XEN_GUEST_HANDLE(pfarg_load_t); #endif /* __ASSEMBLY__ */ #endif /* XEN */ #ifndef __ASSEMBLY__ #include "arch-ia64/hvm/memmap.h" #endif #endif /* __HYPERVISOR_IF_IA64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-ia64/000077500000000000000000000000001314037446600255315ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-ia64/debug_op.h000066400000000000000000000061201314037446600274650ustar00rootroot00000000000000/****************************************************************************** * debug_op.h * * Copyright (c) 2007 Tristan Gingold * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_PUBLIC_IA64_DEBUG_OP_H__ #define __XEN_PUBLIC_IA64_DEBUG_OP_H__ /* Set/Get extra conditions to break. */ #define XEN_IA64_DEBUG_OP_SET_FLAGS 1 #define XEN_IA64_DEBUG_OP_GET_FLAGS 2 /* Break on kernel single step. */ #define XEN_IA64_DEBUG_ON_KERN_SSTEP (1 << 0) /* Break on kernel debug (breakpoint or watch point). */ #define XEN_IA64_DEBUG_ON_KERN_DEBUG (1 << 1) /* Break on kernel taken branch. */ #define XEN_IA64_DEBUG_ON_KERN_TBRANCH (1 << 2) /* Break on interrupt injection. */ #define XEN_IA64_DEBUG_ON_EXTINT (1 << 3) /* Break on interrupt injection. */ #define XEN_IA64_DEBUG_ON_EXCEPT (1 << 4) /* Break on event injection. */ #define XEN_IA64_DEBUG_ON_EVENT (1 << 5) /* Break on privop/virtualized instruction (slow path only). */ #define XEN_IA64_DEBUG_ON_PRIVOP (1 << 6) /* Break on emulated PAL call (at entry). */ #define XEN_IA64_DEBUG_ON_PAL (1 << 7) /* Break on emulated SAL call (at entry). */ #define XEN_IA64_DEBUG_ON_SAL (1 << 8) /* Break on emulated EFI call (at entry). */ #define XEN_IA64_DEBUG_ON_EFI (1 << 9) /* Break on rfi emulation (slow path only, before exec). */ #define XEN_IA64_DEBUG_ON_RFI (1 << 10) /* Break on address translation switch. */ #define XEN_IA64_DEBUG_ON_MMU (1 << 11) /* Break on bad guest physical address. */ #define XEN_IA64_DEBUG_ON_BAD_MPA (1 << 12) /* Force psr.ss bit. */ #define XEN_IA64_DEBUG_FORCE_SS (1 << 13) /* Force psr.db bit. */ #define XEN_IA64_DEBUG_FORCE_DB (1 << 14) /* Break on ITR/PTR. */ #define XEN_IA64_DEBUG_ON_TR (1 << 15) /* Break on ITC/PTC.L/PTC.G/PTC.GA. */ #define XEN_IA64_DEBUG_ON_TC (1 << 16) /* Get translation cache. */ #define XEN_IA64_DEBUG_OP_GET_TC 3 /* Translate virtual address to guest physical address. */ #define XEN_IA64_DEBUG_OP_TRANSLATE 4 union xen_ia64_debug_op { uint64_t flags; struct xen_ia64_debug_vtlb { uint64_t nbr; /* IN/OUT */ XEN_GUEST_HANDLE_64(ia64_tr_entry_t) tr; /* IN/OUT */ } vtlb; }; typedef union xen_ia64_debug_op xen_ia64_debug_op_t; DEFINE_XEN_GUEST_HANDLE(xen_ia64_debug_op_t); #endif /* __XEN_PUBLIC_IA64_DEBUG_OP_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-ia64/hvm/000077500000000000000000000000001314037446600263235ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-ia64/hvm/memmap.h000066400000000000000000000052301314037446600277500ustar00rootroot00000000000000/****************************************************************************** * memmap.h * * Copyright (c) 2008 Tristan Gingold * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_PUBLIC_HVM_MEMMAP_IA64_H__ #define __XEN_PUBLIC_HVM_MEMMAP_IA64_H__ #define MEM_G (1UL << 30) #define MEM_M (1UL << 20) #define MEM_K (1UL << 10) /* Guest physical address of IO ports space. */ #define MMIO_START (3 * MEM_G) #define MMIO_SIZE (512 * MEM_M) #define VGA_IO_START 0xA0000UL #define VGA_IO_SIZE 0x20000 #define LEGACY_IO_START (MMIO_START + MMIO_SIZE) #define LEGACY_IO_SIZE (64 * MEM_M) #define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE) #define IO_PAGE_SIZE XEN_PAGE_SIZE #define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE) #define STORE_PAGE_SIZE XEN_PAGE_SIZE #define BUFFER_IO_PAGE_START (STORE_PAGE_START + STORE_PAGE_SIZE) #define BUFFER_IO_PAGE_SIZE XEN_PAGE_SIZE #define BUFFER_PIO_PAGE_START (BUFFER_IO_PAGE_START + BUFFER_IO_PAGE_SIZE) #define BUFFER_PIO_PAGE_SIZE XEN_PAGE_SIZE #define IO_SAPIC_START 0xfec00000UL #define IO_SAPIC_SIZE 0x100000 #define PIB_START 0xfee00000UL #define PIB_SIZE 0x200000 #define GFW_START (4 * MEM_G - 16 * MEM_M) #define GFW_SIZE (16 * MEM_M) /* domVTI */ #define GPFN_FRAME_BUFFER 0x1 /* VGA framebuffer */ #define GPFN_LOW_MMIO 0x2 /* Low MMIO range */ #define GPFN_PIB 0x3 /* PIB base */ #define GPFN_IOSAPIC 0x4 /* IOSAPIC base */ #define GPFN_LEGACY_IO 0x5 /* Legacy I/O base */ #define GPFN_HIGH_MMIO 0x6 /* High MMIO range */ /* Nvram belongs to GFW memory space */ #define NVRAM_SIZE (MEM_K * 64) #define NVRAM_START (GFW_START + 10 * MEM_M) #define NVRAM_VALID_SIG 0x4650494e45584948 /* "HIXENIPF" */ struct nvram_save_addr { unsigned long addr; unsigned long signature; }; #endif /* __XEN_PUBLIC_HVM_MEMMAP_IA64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-ia64/hvm/save.h000066400000000000000000000130561314037446600274370ustar00rootroot00000000000000/****************************************************************************** * save_types.h * * Copyright (c) 2007 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_PUBLIC_HVM_SAVE_IA64_H__ #define __XEN_PUBLIC_HVM_SAVE_IA64_H__ #include "../../hvm/save.h" #include "../../arch-ia64.h" /* * Save/restore header: general info about the save file. */ /* x86 uses 0x54381286 */ #define HVM_FILE_MAGIC 0x343641492f6e6558UL /* "Xen/IA64" */ #define HVM_FILE_VERSION 0x0000000000000001UL struct hvm_save_header { uint64_t magic; /* Must be HVM_FILE_MAGIC */ uint64_t version; /* File format version */ uint64_t changeset; /* Version of Xen that saved this file */ uint64_t cpuid[5]; /* CPUID[0x01][%eax] on the saving machine */ }; DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header); /* * CPU */ struct hvm_hw_ia64_cpu { uint64_t ipsr; }; DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_ia64_cpu); /* * CPU */ struct hvm_hw_ia64_vpd { struct vpd vpd; }; DECLARE_HVM_SAVE_TYPE(VPD, 3, struct hvm_hw_ia64_vpd); /* * device dependency * vacpi => viosapic => vlsapic */ /* * vlsapic */ struct hvm_hw_ia64_vlsapic { uint64_t insvc[4]; uint64_t vhpi; // ??? should this be saved in vpd uint8_t xtp; uint8_t pal_init_pending; uint8_t pad[2]; }; DECLARE_HVM_SAVE_TYPE(VLSAPIC, 4, struct hvm_hw_ia64_vlsapic); /* set * unconditionaly set v->arch.irq_new_peding = 1 * unconditionaly set v->arch.irq_new_condition = 0 */ /* * vtime */ /* itc, itm, itv are saved by arch vcpu context */ struct hvm_hw_ia64_vtime { uint64_t itc; uint64_t itm; uint64_t last_itc; uint64_t pending; }; DECLARE_HVM_SAVE_TYPE(VTIME, 5, struct hvm_hw_ia64_vtime); /* * calculate v->vtm.vtm_offset * ??? Or should vtm_offset be set by leave_hypervisor_tail()? * start vtm_timer if necessary by vtm_set_itm(). * ??? Or should vtm_timer be set by leave_hypervisor_tail()? * * ??? or should be done by schedule_tail() * => schedule_tail() should do. */ /* * viosapic */ #define VIOSAPIC_NUM_PINS 48 /* To share VT-d code which uses vioapic_redir_entry. * Although on ia64 this is for vsapic, but we have to vioapic_redir_entry * instead of viosapic_redir_entry. */ union vioapic_redir_entry { uint64_t bits; struct { uint8_t vector; uint8_t delivery_mode : 3; uint8_t reserve1 : 1; uint8_t delivery_status: 1; uint8_t polarity : 1; uint8_t reserve2 : 1; uint8_t trig_mode : 1; uint8_t mask : 1; uint8_t reserve3 : 7; uint8_t reserved[3]; uint16_t dest_id; } fields; }; struct hvm_hw_ia64_viosapic { uint64_t irr; uint64_t isr; uint32_t ioregsel; uint32_t pad; uint64_t lowest_vcpu_id; uint64_t base_address; union vioapic_redir_entry redirtbl[VIOSAPIC_NUM_PINS]; }; DECLARE_HVM_SAVE_TYPE(VIOSAPIC, 6, struct hvm_hw_ia64_viosapic); /* * vacpi * PM timer */ struct vacpi_regs { union { struct { uint32_t pm1a_sts:16;/* PM1a_EVT_BLK.PM1a_STS: status register */ uint32_t pm1a_en:16; /* PM1a_EVT_BLK.PM1a_EN: enable register */ }; uint32_t evt_blk; }; uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */ }; struct hvm_hw_ia64_vacpi { struct vacpi_regs regs; }; DECLARE_HVM_SAVE_TYPE(VACPI, 7, struct hvm_hw_ia64_vacpi); /* update last_gtime and setup timer of struct vacpi */ /* * opt_feature: identity mapping of region 4, 5 and 7. * With the c/s 16396:d2935f9c217f of xen-ia64-devel.hg, * opt_feature hypercall supports only region 4,5,7 identity mappings. * structure hvm_hw_ia64_identity_mappings only supports them. * The new structure, struct hvm_hw_ia64_identity_mappings, is created to * avoid to keep up with change of the xen/ia64 internal structure, struct * opt_feature. * * If it is enhanced in the future, new structure will be created. */ struct hvm_hw_ia64_identity_mapping { uint64_t on; /* on/off */ uint64_t pgprot; /* The page protection bit mask of the pte. */ uint64_t key; /* A protection key. */ }; struct hvm_hw_ia64_identity_mappings { struct hvm_hw_ia64_identity_mapping im_reg4;/* Region 4 identity mapping */ struct hvm_hw_ia64_identity_mapping im_reg5;/* Region 5 identity mapping */ struct hvm_hw_ia64_identity_mapping im_reg7;/* Region 7 identity mapping */ }; DECLARE_HVM_SAVE_TYPE(OPT_FEATURE_IDENTITY_MAPPINGS, 8, struct hvm_hw_ia64_identity_mappings); /* * Largest type-code in use */ #define HVM_SAVE_CODE_MAX 8 #endif /* __XEN_PUBLIC_HVM_SAVE_IA64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-ia64/sioemu.h000066400000000000000000000050161314037446600272050ustar00rootroot00000000000000/****************************************************************************** * sioemu.h * * Copyright (c) 2008 Tristan Gingold * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_PUBLIC_IA64_SIOEMU_H__ #define __XEN_PUBLIC_IA64_SIOEMU_H__ /* SIOEMU specific hypercalls. The numbers are the minor part of FW_HYPERCALL_SIOEMU. */ /* Defines the callback entry point. r8=ip, r9=data. Must be called per-vcpu. */ #define SIOEMU_HYPERCALL_SET_CALLBACK 0x01 /* Finish sioemu fw initialization and start firmware. r8=ip. */ #define SIOEMU_HYPERCALL_START_FW 0x02 /* Add IO pages in physmap. */ #define SIOEMU_HYPERCALL_ADD_IO_PHYSMAP 0x03 /* Get wallclock time. */ #define SIOEMU_HYPERCALL_GET_TIME 0x04 /* Flush cache. */ #define SIOEMU_HYPERCALL_FLUSH_CACHE 0x07 /* Get freq base. */ #define SIOEMU_HYPERCALL_FREQ_BASE 0x08 /* Return from callback. */ #define SIOEMU_HYPERCALL_CALLBACK_RETURN 0x09 /* Deliver an interrupt. */ #define SIOEMU_HYPERCALL_DELIVER_INT 0x0a /* SIOEMU callback reason. */ /* An event (from event channel) has to be delivered. */ #define SIOEMU_CB_EVENT 0x00 /* Emulate an IO access. */ #define SIOEMU_CB_IO_EMULATE 0x01 /* An IPI is sent to a dead vcpu. */ #define SIOEMU_CB_WAKEUP_VCPU 0x02 /* A SAL hypercall is executed. */ #define SIOEMU_CB_SAL_ASSIST 0x03 #ifndef __ASSEMBLY__ struct sioemu_callback_info { /* Saved registers. */ unsigned long ip; unsigned long psr; unsigned long ifs; unsigned long nats; unsigned long r8; unsigned long r9; unsigned long r10; unsigned long r11; /* Callback parameters. */ unsigned long cause; unsigned long arg0; unsigned long arg1; unsigned long arg2; unsigned long arg3; unsigned long _pad2[2]; unsigned long r2; }; #endif /* __ASSEMBLY__ */ #endif /* __XEN_PUBLIC_IA64_SIOEMU_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-x86/000077500000000000000000000000001314037446600254135ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-x86/cpuid.h000066400000000000000000000050721314037446600266740ustar00rootroot00000000000000/****************************************************************************** * arch-x86/cpuid.h * * CPUID interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2007 Citrix Systems, Inc. * * Authors: * Keir Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__ #define __XEN_PUBLIC_ARCH_X86_CPUID_H__ /* Xen identification leaves start at 0x40000000. */ #define XEN_CPUID_FIRST_LEAF 0x40000000 #define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i)) /* * Leaf 1 (0x40000000) * EAX: Largest Xen-information leaf. All leaves up to an including @EAX * are supported by the Xen host. * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification * of a Xen host. */ #define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */ #define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */ #define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */ /* * Leaf 2 (0x40000001) * EAX[31:16]: Xen major version. * EAX[15: 0]: Xen minor version. * EBX-EDX: Reserved (currently all zeroes). */ /* * Leaf 3 (0x40000002) * EAX: Number of hypercall transfer pages. This register is always guaranteed * to specify one hypercall page. * EBX: Base address of Xen-specific MSRs. * ECX: Features 1. Unused bits are set to zero. * EDX: Features 2. Unused bits are set to zero. */ /* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */ #define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0 #define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0) #endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-x86/hvm/000077500000000000000000000000001314037446600262055ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-x86/hvm/save.h000066400000000000000000000247501314037446600273240ustar00rootroot00000000000000/* * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * Copyright (c) 2007 XenSource Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__ #define __XEN_PUBLIC_HVM_SAVE_X86_H__ /* * Save/restore header: general info about the save file. */ #define HVM_FILE_MAGIC 0x54381286 #define HVM_FILE_VERSION 0x00000001 struct hvm_save_header { uint32_t magic; /* Must be HVM_FILE_MAGIC */ uint32_t version; /* File format version */ uint64_t changeset; /* Version of Xen that saved this file */ uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */ uint32_t pad0; }; DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header); /* * Processor */ struct hvm_hw_cpu { uint8_t fpu_regs[512]; uint64_t rax; uint64_t rbx; uint64_t rcx; uint64_t rdx; uint64_t rbp; uint64_t rsi; uint64_t rdi; uint64_t rsp; uint64_t r8; uint64_t r9; uint64_t r10; uint64_t r11; uint64_t r12; uint64_t r13; uint64_t r14; uint64_t r15; uint64_t rip; uint64_t rflags; uint64_t cr0; uint64_t cr2; uint64_t cr3; uint64_t cr4; uint64_t dr0; uint64_t dr1; uint64_t dr2; uint64_t dr3; uint64_t dr6; uint64_t dr7; uint32_t cs_sel; uint32_t ds_sel; uint32_t es_sel; uint32_t fs_sel; uint32_t gs_sel; uint32_t ss_sel; uint32_t tr_sel; uint32_t ldtr_sel; uint32_t cs_limit; uint32_t ds_limit; uint32_t es_limit; uint32_t fs_limit; uint32_t gs_limit; uint32_t ss_limit; uint32_t tr_limit; uint32_t ldtr_limit; uint32_t idtr_limit; uint32_t gdtr_limit; uint64_t cs_base; uint64_t ds_base; uint64_t es_base; uint64_t fs_base; uint64_t gs_base; uint64_t ss_base; uint64_t tr_base; uint64_t ldtr_base; uint64_t idtr_base; uint64_t gdtr_base; uint32_t cs_arbytes; uint32_t ds_arbytes; uint32_t es_arbytes; uint32_t fs_arbytes; uint32_t gs_arbytes; uint32_t ss_arbytes; uint32_t tr_arbytes; uint32_t ldtr_arbytes; uint32_t sysenter_cs; uint32_t padding0; uint64_t sysenter_esp; uint64_t sysenter_eip; /* msr for em64t */ uint64_t shadow_gs; /* msr content saved/restored. */ uint64_t msr_flags; uint64_t msr_lstar; uint64_t msr_star; uint64_t msr_cstar; uint64_t msr_syscall_mask; uint64_t msr_efer; /* guest's idea of what rdtsc() would return */ uint64_t tsc; /* pending event, if any */ union { uint32_t pending_event; struct { uint8_t pending_vector:8; uint8_t pending_type:3; uint8_t pending_error_valid:1; uint32_t pending_reserved:19; uint8_t pending_valid:1; }; }; /* error code for pending event */ uint32_t error_code; }; DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu); /* * PIC */ struct hvm_hw_vpic { /* IR line bitmasks. */ uint8_t irr; uint8_t imr; uint8_t isr; /* Line IRx maps to IRQ irq_base+x */ uint8_t irq_base; /* * Where are we in ICW2-4 initialisation (0 means no init in progress)? * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1). * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence) * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence) */ uint8_t init_state:4; /* IR line with highest priority. */ uint8_t priority_add:4; /* Reads from A=0 obtain ISR or IRR? */ uint8_t readsel_isr:1; /* Reads perform a polling read? */ uint8_t poll:1; /* Automatically clear IRQs from the ISR during INTA? */ uint8_t auto_eoi:1; /* Automatically rotate IRQ priorities during AEOI? */ uint8_t rotate_on_auto_eoi:1; /* Exclude slave inputs when considering in-service IRQs? */ uint8_t special_fully_nested_mode:1; /* Special mask mode excludes masked IRs from AEOI and priority checks. */ uint8_t special_mask_mode:1; /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */ uint8_t is_master:1; /* Edge/trigger selection. */ uint8_t elcr; /* Virtual INT output. */ uint8_t int_output; }; DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic); /* * IO-APIC */ #ifdef __ia64__ #define VIOAPIC_IS_IOSAPIC 1 #define VIOAPIC_NUM_PINS 24 #else #define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */ #endif struct hvm_hw_vioapic { uint64_t base_address; uint32_t ioregsel; uint32_t id; union vioapic_redir_entry { uint64_t bits; struct { uint8_t vector; uint8_t delivery_mode:3; uint8_t dest_mode:1; uint8_t delivery_status:1; uint8_t polarity:1; uint8_t remote_irr:1; uint8_t trig_mode:1; uint8_t mask:1; uint8_t reserve:7; #if !VIOAPIC_IS_IOSAPIC uint8_t reserved[4]; uint8_t dest_id; #else uint8_t reserved[3]; uint16_t dest_id; #endif } fields; } redirtbl[VIOAPIC_NUM_PINS]; }; DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic); /* * LAPIC */ struct hvm_hw_lapic { uint64_t apic_base_msr; uint32_t disabled; /* VLAPIC_xx_DISABLED */ uint32_t timer_divisor; }; DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic); struct hvm_hw_lapic_regs { uint8_t data[1024]; }; DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs); /* * IRQs */ struct hvm_hw_pci_irqs { /* * Virtual interrupt wires for a single PCI bus. * Indexed by: device*4 + INTx#. */ union { unsigned long i[16 / sizeof (unsigned long)]; /* DECLARE_BITMAP(i, 32*4); */ uint64_t pad[2]; }; }; DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs); struct hvm_hw_isa_irqs { /* * Virtual interrupt wires for ISA devices. * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing). */ union { unsigned long i[1]; /* DECLARE_BITMAP(i, 16); */ uint64_t pad[1]; }; }; DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs); struct hvm_hw_pci_link { /* * PCI-ISA interrupt router. * Each PCI is 'wire-ORed' into one of four links using * the traditional 'barber's pole' mapping ((device + INTx#) & 3). * The router provides a programmable mapping from each link to a GSI. */ uint8_t route[4]; uint8_t pad0[4]; }; DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link); /* * PIT */ struct hvm_hw_pit { struct hvm_hw_pit_channel { uint32_t count; /* can be 65536 */ uint16_t latched_count; uint8_t count_latched; uint8_t status_latched; uint8_t status; uint8_t read_state; uint8_t write_state; uint8_t write_latch; uint8_t rw_mode; uint8_t mode; uint8_t bcd; /* not supported */ uint8_t gate; /* timer start */ } channels[3]; /* 3 x 16 bytes */ uint32_t speaker_data_on; uint32_t pad0; }; DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit); /* * RTC */ #define RTC_CMOS_SIZE 14 struct hvm_hw_rtc { /* CMOS bytes */ uint8_t cmos_data[RTC_CMOS_SIZE]; /* Index register for 2-part operations */ uint8_t cmos_index; uint8_t pad0; }; DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc); /* * HPET */ #define HPET_TIMER_NUM 3 /* 3 timers supported now */ struct hvm_hw_hpet { /* Memory-mapped, software visible registers */ uint64_t capability; /* capabilities */ uint64_t res0; /* reserved */ uint64_t config; /* configuration */ uint64_t res1; /* reserved */ uint64_t isr; /* interrupt status reg */ uint64_t res2[25]; /* reserved */ uint64_t mc64; /* main counter */ uint64_t res3; /* reserved */ struct { /* timers */ uint64_t config; /* configuration/cap */ uint64_t cmp; /* comparator */ uint64_t fsb; /* FSB route, not supported now */ uint64_t res4; /* reserved */ } timers[HPET_TIMER_NUM]; uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */ /* Hidden register state */ uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */ }; DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet); /* * PM timer */ struct hvm_hw_pmtimer { uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */ uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */ uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */ }; DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer); /* * MTRR MSRs */ struct hvm_hw_mtrr { #define MTRR_VCNT 8 #define NUM_FIXED_MSR 11 uint64_t msr_pat_cr; /* mtrr physbase & physmask msr pair*/ uint64_t msr_mtrr_var[MTRR_VCNT*2]; uint64_t msr_mtrr_fixed[NUM_FIXED_MSR]; uint64_t msr_mtrr_cap; uint64_t msr_mtrr_def_type; }; DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr); /* * Viridian hypervisor context. */ struct hvm_viridian_context { uint64_t hypercall_gpa; uint64_t guest_os_id; }; DECLARE_HVM_SAVE_TYPE(VIRIDIAN, 15, struct hvm_viridian_context); /* * Largest type-code in use */ #define HVM_SAVE_CODE_MAX 15 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-x86/xen-mca.h000066400000000000000000000305371314037446600271240ustar00rootroot00000000000000/****************************************************************************** * arch-x86/mca.h * * Contributed by Advanced Micro Devices, Inc. * Author: Christoph Egger * * Guest OS machine check interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /* Full MCA functionality has the following Usecases from the guest side: * * Must have's: * 1. Dom0 and DomU register machine check trap callback handlers * (already done via "set_trap_table" hypercall) * 2. Dom0 registers machine check event callback handler * (doable via EVTCHNOP_bind_virq) * 3. Dom0 and DomU fetches machine check data * 4. Dom0 wants Xen to notify a DomU * 5. Dom0 gets DomU ID from physical address * 6. Dom0 wants Xen to kill DomU (already done for "xm destroy") * * Nice to have's: * 7. Dom0 wants Xen to deactivate a physical CPU * This is better done as separate task, physical CPU hotplugging, * and hypercall(s) should be sysctl's * 8. Page migration proposed from Xen NUMA work, where Dom0 can tell Xen to * move a DomU (or Dom0 itself) away from a malicious page * producing correctable errors. * 9. offlining physical page: * Xen free's and never re-uses a certain physical page. * 10. Testfacility: Allow Dom0 to write values into machine check MSR's * and tell Xen to trigger a machine check */ #ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__ #define __XEN_PUBLIC_ARCH_X86_MCA_H__ /* Hypercall */ #define __HYPERVISOR_mca __HYPERVISOR_arch_0 /* * The xen-unstable repo has interface version 0x03000001; out interface * is incompatible with that and any future minor revisions, so we * choose a different version number range that is numerically less * than that used in xen-unstable. */ #define XEN_MCA_INTERFACE_VERSION 0x01ecc002 /* IN: Dom0 calls hypercall to retrieve nonurgent telemetry */ #define XEN_MC_NONURGENT 0x0001 /* IN: Dom0/DomU calls hypercall to retrieve urgent telemetry */ #define XEN_MC_URGENT 0x0002 /* IN: Dom0 acknowledges previosly-fetched telemetry */ #define XEN_MC_ACK 0x0004 /* OUT: All is ok */ #define XEN_MC_OK 0x0 /* OUT: Domain could not fetch data. */ #define XEN_MC_FETCHFAILED 0x1 /* OUT: There was no machine check data to fetch. */ #define XEN_MC_NODATA 0x2 /* OUT: Between notification time and this hypercall an other * (most likely) correctable error happened. The fetched data, * does not match the original machine check data. */ #define XEN_MC_NOMATCH 0x4 /* OUT: DomU did not register MC NMI handler. Try something else. */ #define XEN_MC_CANNOTHANDLE 0x8 /* OUT: Notifying DomU failed. Retry later or try something else. */ #define XEN_MC_NOTDELIVERED 0x10 /* Note, XEN_MC_CANNOTHANDLE and XEN_MC_NOTDELIVERED are mutually exclusive. */ #ifndef __ASSEMBLY__ #define VIRQ_MCA VIRQ_ARCH_0 /* G. (DOM0) Machine Check Architecture */ /* * Machine Check Architecure: * structs are read-only and used to report all kinds of * correctable and uncorrectable errors detected by the HW. * Dom0 and DomU: register a handler to get notified. * Dom0 only: Correctable errors are reported via VIRQ_MCA * Dom0 and DomU: Uncorrectable errors are reported via nmi handlers */ #define MC_TYPE_GLOBAL 0 #define MC_TYPE_BANK 1 #define MC_TYPE_EXTENDED 2 struct mcinfo_common { uint16_t type; /* structure type */ uint16_t size; /* size of this struct in bytes */ }; #define MC_FLAG_CORRECTABLE (1 << 0) #define MC_FLAG_UNCORRECTABLE (1 << 1) #define MC_FLAG_RECOVERABLE (1 << 2) #define MC_FLAG_POLLED (1 << 3) #define MC_FLAG_RESET (1 << 4) #define MC_FLAG_CMCI (1 << 5) #define MC_FLAG_MCE (1 << 6) /* contains global x86 mc information */ struct mcinfo_global { struct mcinfo_common common; /* running domain at the time in error (most likely the impacted one) */ uint16_t mc_domid; uint32_t mc_socketid; /* physical socket of the physical core */ uint16_t mc_coreid; /* physical impacted core */ uint32_t mc_apicid; uint16_t mc_core_threadid; /* core thread of physical core */ uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */ uint64_t mc_gstatus; /* global status */ uint32_t mc_flags; }; /* contains bank local x86 mc information */ struct mcinfo_bank { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint16_t mc_domid; /* Usecase 5: domain referenced by mc_addr on dom0 * and if mc_addr is valid. Never valid on DomU. */ uint64_t mc_status; /* bank status */ uint64_t mc_addr; /* bank address, only valid * if addr bit is set in mc_status */ uint64_t mc_misc; uint64_t mc_ctrl2; uint64_t mc_tsc; }; struct mcinfo_msr { uint64_t reg; /* MSR */ uint64_t value; /* MSR value */ }; /* contains mc information from other * or additional mc MSRs */ struct mcinfo_extended { struct mcinfo_common common; /* You can fill up to five registers. * If you need more, then use this structure * multiple times. */ uint32_t mc_msrs; /* Number of msr with valid values. */ /* * Currently Intel extended MSR (32/64) including all gp registers * and E(R)DI, E(R)BP, E(R)SP, E(R)FLAGS, E(R)IP, E(R)MISC, only 10 * of them might be useful. So expend this array to 10. */ struct mcinfo_msr mc_msr[10]; }; #define MCINFO_HYPERCALLSIZE 1024 #define MCINFO_MAXSIZE 768 struct mc_info { /* Number of mcinfo_* entries in mi_data */ uint32_t mi_nentries; uint8_t mi_data[MCINFO_MAXSIZE - sizeof(uint32_t)]; }; typedef struct mc_info mc_info_t; DEFINE_XEN_GUEST_HANDLE(mc_info_t); #define __MC_MSR_ARRAYSIZE 8 #define __MC_NMSRS 1 #define MC_NCAPS 7 /* 7 CPU feature flag words */ #define MC_CAPS_STD_EDX 0 /* cpuid level 0x00000001 (%edx) */ #define MC_CAPS_AMD_EDX 1 /* cpuid level 0x80000001 (%edx) */ #define MC_CAPS_TM 2 /* cpuid level 0x80860001 (TransMeta) */ #define MC_CAPS_LINUX 3 /* Linux-defined */ #define MC_CAPS_STD_ECX 4 /* cpuid level 0x00000001 (%ecx) */ #define MC_CAPS_VIA 5 /* cpuid level 0xc0000001 */ #define MC_CAPS_AMD_ECX 6 /* cpuid level 0x80000001 (%ecx) */ typedef struct mcinfo_logical_cpu { uint32_t mc_cpunr; uint32_t mc_chipid; uint16_t mc_coreid; uint16_t mc_threadid; uint32_t mc_apicid; uint32_t mc_clusterid; uint32_t mc_ncores; uint32_t mc_ncores_active; uint32_t mc_nthreads; int32_t mc_cpuid_level; uint32_t mc_family; uint32_t mc_vendor; uint32_t mc_model; uint32_t mc_step; char mc_vendorid[16]; char mc_brandid[64]; uint32_t mc_cpu_caps[MC_NCAPS]; uint32_t mc_cache_size; uint32_t mc_cache_alignment; int32_t mc_nmsrvals; struct mcinfo_msr mc_msrvalues[__MC_MSR_ARRAYSIZE]; } xen_mc_logical_cpu_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_logical_cpu_t); /* * OS's should use these instead of writing their own lookup function * each with its own bugs and drawbacks. * We use macros instead of static inline functions to allow guests * to include this header in assembly files (*.S). */ /* Prototype: * uint32_t x86_mcinfo_nentries(struct mc_info *mi); */ #define x86_mcinfo_nentries(_mi) \ (_mi)->mi_nentries /* Prototype: * struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi); */ #define x86_mcinfo_first(_mi) \ (struct mcinfo_common *)((_mi)->mi_data) /* Prototype: * struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic); */ #define x86_mcinfo_next(_mic) \ (struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size) /* Prototype: * void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type); */ #define x86_mcinfo_lookup(_ret, _mi, _type) \ do { \ uint32_t found, i; \ struct mcinfo_common *_mic; \ \ found = 0; \ (_ret) = NULL; \ if (_mi == NULL) break; \ _mic = x86_mcinfo_first(_mi); \ for (i = 0; i < x86_mcinfo_nentries(_mi); i++) { \ if (_mic->type == (_type)) { \ found = 1; \ break; \ } \ _mic = x86_mcinfo_next(_mic); \ } \ (_ret) = found ? _mic : NULL; \ } while (0) /* Usecase 1 * Register machine check trap callback handler * (already done via "set_trap_table" hypercall) */ /* Usecase 2 * Dom0 registers machine check event callback handler * done by EVTCHNOP_bind_virq */ /* Usecase 3 * Fetch machine check data from hypervisor. * Note, this hypercall is special, because both Dom0 and DomU must use this. */ #define XEN_MC_fetch 1 struct xen_mc_fetch { /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_NONURGENT, XEN_MC_URGENT, XEN_MC_ACK if ack'ing an earlier fetch */ /* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED, XEN_MC_NODATA, XEN_MC_NOMATCH */ uint64_t fetch_id; /* OUT: id for ack, IN: id we are ack'ing */ /* OUT variables. */ XEN_GUEST_HANDLE(mc_info_t) data; }; typedef struct xen_mc_fetch xen_mc_fetch_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t); /* Usecase 4 * This tells the hypervisor to notify a DomU about the machine check error */ #define XEN_MC_notifydomain 2 struct xen_mc_notifydomain { /* IN variables. */ uint16_t mc_domid; /* The unprivileged domain to notify. */ uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify. * Usually echo'd value from the fetch hypercall. */ /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */ /* OUT: XEN_MC_OK, XEN_MC_CANNOTHANDLE, XEN_MC_NOTDELIVERED, XEN_MC_NOMATCH */ }; typedef struct xen_mc_notifydomain xen_mc_notifydomain_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t); #define XEN_MC_physcpuinfo 3 struct xen_mc_physcpuinfo { /* IN/OUT */ uint32_t ncpus; uint32_t pad0; /* OUT */ XEN_GUEST_HANDLE(xen_mc_logical_cpu_t) info; }; #define XEN_MC_msrinject 4 #define MC_MSRINJ_MAXMSRS 8 struct xen_mc_msrinject { /* IN */ unsigned int mcinj_cpunr; /* target processor id */ uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */ uint32_t mcinj_count; /* 0 .. count-1 in array are valid */ uint32_t mcinj_pad0; struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS]; }; /* Flags for mcinj_flags above; bits 16-31 are reserved */ #define MC_MSRINJ_F_INTERPOSE 0x1 #define XEN_MC_mceinject 5 struct xen_mc_mceinject { unsigned int mceinj_cpunr; /* target processor id */ }; typedef union { struct xen_mc_fetch mc_fetch; struct xen_mc_notifydomain mc_notifydomain; struct xen_mc_physcpuinfo mc_physcpuinfo; struct xen_mc_msrinject mc_msrinject; struct xen_mc_mceinject mc_mceinject; } xen_mc_arg_t; struct xen_mc { uint32_t cmd; uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */ xen_mc_arg_t u; }; typedef struct xen_mc xen_mc_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_t); #endif /* __ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-x86/xen-x86_32.h000066400000000000000000000145401314037446600273110ustar00rootroot00000000000000/****************************************************************************** * xen-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2007, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ /* * Hypercall interface: * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5) * Output: %eax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; int $0x82 */ #define TRAP_INSTR "int $0x82" #endif /* * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ #define FLAT_KERNEL_CS FLAT_RING1_CS #define FLAT_KERNEL_DS FLAT_RING1_DS #define FLAT_KERNEL_SS FLAT_RING1_SS #define FLAT_USER_CS FLAT_RING3_CS #define FLAT_USER_DS FLAT_RING3_DS #define FLAT_USER_SS FLAT_RING3_SS #define __HYPERVISOR_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_END_PAE 0xF6800000 #define HYPERVISOR_VIRT_START_PAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE) #define MACH2PHYS_VIRT_START_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE) #define MACH2PHYS_VIRT_END_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE) /* Non-PAE bounds are obsolete. */ #define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000 #define HYPERVISOR_VIRT_START_NONPAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_START_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_END_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE) #define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE #define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE #define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) #endif /* 32-/64-bit invariability for control interfaces (domctl/sysctl). */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #undef ___DEFINE_XEN_GUEST_HANDLE #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } \ __guest_handle_ ## name; \ typedef struct { union { type *p; uint64_aligned_t q; }; } \ __guest_handle_64_ ## name #undef set_xen_guest_handle #define set_xen_guest_handle(hnd, val) \ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \ (hnd).p = val; \ } while ( 0 ) #define uint64_aligned_t uint64_t __attribute__((aligned(8))) #define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name #define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name) #endif #ifndef __ASSEMBLY__ struct cpu_user_regs { uint32_t ebx; uint32_t ecx; uint32_t edx; uint32_t esi; uint32_t edi; uint32_t ebp; uint32_t eax; uint16_t error_code; /* private */ uint16_t entry_vector; /* private */ uint32_t eip; uint16_t cs; uint8_t saved_upcall_mask; uint8_t _pad0; uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ uint32_t esp; uint16_t ss, _pad1; uint16_t es, _pad2; uint16_t ds, _pad3; uint16_t fs, _pad4; uint16_t gs, _pad5; }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); /* * Page-directory addresses above 4GB do not fit into architectural %cr3. * When accessing %cr3, or equivalent field in vcpu_guest_context, guests * must use the following accessor macros to pack/unpack valid MFNs. */ #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) struct arch_vcpu_info { unsigned long cr2; unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; struct xen_callback { unsigned long cs; unsigned long eip; }; typedef struct xen_callback xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-x86/xen-x86_64.h000066400000000000000000000157351314037446600273250ustar00rootroot00000000000000/****************************************************************************** * xen-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ /* * Hypercall interface: * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5) * Output: %rax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; syscall * Clobbered: %rcx, %r11, argument registers (as above) */ #define TRAP_INSTR "syscall" #endif /* * 64-bit segment selectors * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING3_CS32 0xe023 /* GDT index 260 */ #define FLAT_RING3_CS64 0xe033 /* GDT index 261 */ #define FLAT_RING3_DS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_DS64 0x0000 /* NULL selector */ #define FLAT_RING3_SS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_SS64 0xe02b /* GDT index 262 */ #define FLAT_KERNEL_DS64 FLAT_RING3_DS64 #define FLAT_KERNEL_DS32 FLAT_RING3_DS32 #define FLAT_KERNEL_DS FLAT_KERNEL_DS64 #define FLAT_KERNEL_CS64 FLAT_RING3_CS64 #define FLAT_KERNEL_CS32 FLAT_RING3_CS32 #define FLAT_KERNEL_CS FLAT_KERNEL_CS64 #define FLAT_KERNEL_SS64 FLAT_RING3_SS64 #define FLAT_KERNEL_SS32 FLAT_RING3_SS32 #define FLAT_KERNEL_SS FLAT_KERNEL_SS64 #define FLAT_USER_DS64 FLAT_RING3_DS64 #define FLAT_USER_DS32 FLAT_RING3_DS32 #define FLAT_USER_DS FLAT_USER_DS64 #define FLAT_USER_CS64 FLAT_RING3_CS64 #define FLAT_USER_CS32 FLAT_RING3_CS32 #define FLAT_USER_CS FLAT_USER_CS64 #define FLAT_USER_SS64 FLAT_RING3_SS64 #define FLAT_USER_SS32 FLAT_RING3_SS32 #define FLAT_USER_SS FLAT_USER_SS64 #define __HYPERVISOR_VIRT_START 0xFFFF800000000000 #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) #endif /* * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) * @which == SEGBASE_* ; @base == 64-bit base address * Returns 0 on success. */ #define SEGBASE_FS 0 #define SEGBASE_GS_USER 1 #define SEGBASE_GS_KERNEL 2 #define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */ /* * int HYPERVISOR_iret(void) * All arguments are on the kernel stack, in the following format. * Never returns if successful. Current kernel context is lost. * The saved CS is mapped as follows: * RING0 -> RING3 kernel mode. * RING1 -> RING3 kernel mode. * RING2 -> RING3 kernel mode. * RING3 -> RING3 user mode. * However RING0 indicates that the guest kernel should return to iteself * directly with * orb $3,1*8(%rsp) * iretq * If flags contains VGCF_in_syscall: * Restore RAX, RIP, RFLAGS, RSP. * Discard R11, RCX, CS, SS. * Otherwise: * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP. * All other registers are saved on hypercall entry and restored to user. */ /* Guest exited in SYSCALL context? Return to guest with SYSRET? */ #define _VGCF_in_syscall 8 #define VGCF_in_syscall (1<<_VGCF_in_syscall) #define VGCF_IN_SYSCALL VGCF_in_syscall #ifndef __ASSEMBLY__ struct iret_context { /* Top of stack (%rsp at point of hypercall). */ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; /* Bottom of iret stack frame. */ }; #if defined(__GNUC__) && !defined(__STRICT_ANSI__) /* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ #define __DECL_REG(name) union { \ uint64_t r ## name, e ## name; \ uint32_t _e ## name; \ } #else /* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ #define __DECL_REG(name) uint64_t r ## name #endif struct cpu_user_regs { uint64_t r15; uint64_t r14; uint64_t r13; uint64_t r12; __DECL_REG(bp); __DECL_REG(bx); uint64_t r11; uint64_t r10; uint64_t r9; uint64_t r8; __DECL_REG(ax); __DECL_REG(cx); __DECL_REG(dx); __DECL_REG(si); __DECL_REG(di); uint32_t error_code; /* private */ uint32_t entry_vector; /* private */ __DECL_REG(ip); uint16_t cs, _pad0[1]; uint8_t saved_upcall_mask; uint8_t _pad1[3]; __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */ __DECL_REG(sp); uint16_t ss, _pad2[3]; uint16_t es, _pad3[3]; uint16_t ds, _pad4[3]; uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */ }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); #undef __DECL_REG #define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) #define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) struct arch_vcpu_info { unsigned long cr2; unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; typedef unsigned long xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-x86/xen.h000066400000000000000000000170471314037446600263670ustar00rootroot00000000000000/****************************************************************************** * arch-x86/xen.h * * Guest OS interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "../xen.h" #ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_H__ /* Structural guest handles introduced in 0x00030201. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030201 #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name #else #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef type * __guest_handle_ ## name #endif #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ ___DEFINE_XEN_GUEST_HANDLE(name, type); \ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif #if defined(__i386__) #include "xen-x86_32.h" #elif defined(__x86_64__) #include "xen-x86_64.h" #endif #ifndef __ASSEMBLY__ typedef unsigned long xen_pfn_t; #define PRI_xen_pfn "lx" #endif /* * SEGMENT DESCRIPTOR TABLES */ /* * A number of GDT entries are reserved by Xen. These are not situated at the * start of the GDT because some stupid OSes export hard-coded selector values * in their ABI. These hard-coded values are always near the start of the GDT, * so Xen places itself out of the way, at the far end of the GDT. */ #define FIRST_RESERVED_GDT_PAGE 14 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) /* Maximum number of virtual CPUs in multi-processor guests. */ #define MAX_VIRT_CPUS 32 /* Machine check support */ #include "xen-mca.h" #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; /* * Send an array of these to HYPERVISOR_set_trap_table(). * The privilege level specifies which modes may enter a trap via a software * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate * privilege levels as follows: * Level == 0: Noone may enter * Level == 1: Kernel may enter * Level == 2: Kernel may enter * Level == 3: Everyone may enter */ #define TI_GET_DPL(_ti) ((_ti)->flags & 3) #define TI_GET_IF(_ti) ((_ti)->flags & 4) #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) struct trap_info { uint8_t vector; /* exception vector */ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ uint16_t cs; /* code selector */ unsigned long address; /* code offset */ }; typedef struct trap_info trap_info_t; DEFINE_XEN_GUEST_HANDLE(trap_info_t); typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* * The following is all CPU context. Note that the fpu_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ struct vcpu_guest_context { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ #define VGCF_I387_VALID (1<<0) #define VGCF_IN_KERNEL (1<<2) #define _VGCF_i387_valid 0 #define VGCF_i387_valid (1<<_VGCF_i387_valid) #define _VGCF_in_kernel 2 #define VGCF_in_kernel (1<<_VGCF_in_kernel) #define _VGCF_failsafe_disables_events 3 #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) #define _VGCF_syscall_disables_events 4 #define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) #define _VGCF_online 5 #define VGCF_online (1<<_VGCF_online) unsigned long flags; /* VGCF_* flags */ struct cpu_user_regs user_regs; /* User-level CPU registers */ struct trap_info trap_ctxt[256]; /* Virtual IDT */ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ #ifdef __i386__ unsigned long event_callback_cs; /* CS:EIP of event callback */ unsigned long event_callback_eip; unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ unsigned long failsafe_callback_eip; #else unsigned long event_callback_eip; unsigned long failsafe_callback_eip; #ifdef __XEN__ union { unsigned long syscall_callback_eip; struct { unsigned int event_callback_cs; /* compat CS of event cb */ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */ }; }; #else unsigned long syscall_callback_eip; #endif #endif unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ #ifdef __x86_64__ /* Segment base addresses. */ uint64_t fs_base; uint64_t gs_base_kernel; uint64_t gs_base_user; #endif }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); struct arch_shared_info { unsigned long max_pfn; /* max pfn that appears in table */ /* Frame containing list of mfns containing list of mfns containing p2m. */ xen_pfn_t pfn_to_mfn_frame_list_list; unsigned long nmi_reason; uint64_t pad[32]; }; typedef struct arch_shared_info arch_shared_info_t; #endif /* !__ASSEMBLY__ */ /* * Prefix forces emulation of some non-trapping instructions. * Currently only CPUID. */ #ifdef __ASSEMBLY__ #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; #define XEN_CPUID XEN_EMULATE_PREFIX cpuid #else #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" #endif #endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-x86_32.h000066400000000000000000000024131314037446600260700ustar00rootroot00000000000000/****************************************************************************** * arch-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/arch-x86_64.h000066400000000000000000000024131314037446600260750ustar00rootroot00000000000000/****************************************************************************** * arch-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/callback.h000066400000000000000000000076671314037446600260000ustar00rootroot00000000000000/****************************************************************************** * callback.h * * Register guest OS callbacks with Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell */ #ifndef __XEN_PUBLIC_CALLBACK_H__ #define __XEN_PUBLIC_CALLBACK_H__ #include "xen.h" /* * Prototype for this hypercall is: * long callback_op(int cmd, void *extra_args) * @cmd == CALLBACKOP_??? (callback operation). * @extra_args == Operation-specific extra arguments (NULL if none). */ /* ia64, x86: Callback for event delivery. */ #define CALLBACKTYPE_event 0 /* x86: Failsafe callback when guest state cannot be restored by Xen. */ #define CALLBACKTYPE_failsafe 1 /* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */ #define CALLBACKTYPE_syscall 2 /* * x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel * feature is enabled. Do not use this callback type in new code. */ #define CALLBACKTYPE_sysenter_deprecated 3 /* x86: Callback for NMI delivery. */ #define CALLBACKTYPE_nmi 4 /* * x86: sysenter is only available as follows: * - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled * - 64-bit hypervisor: 32-bit guest applications on Intel CPUs * ('32-on-32-on-64', '32-on-64-on-64') * [nb. also 64-bit guest applications on Intel CPUs * ('64-on-64-on-64'), but syscall is preferred] */ #define CALLBACKTYPE_sysenter 5 /* * x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs * ('32-on-32-on-64', '32-on-64-on-64') */ #define CALLBACKTYPE_syscall32 7 /* * Disable event deliver during callback? This flag is ignored for event and * NMI callbacks: event delivery is unconditionally disabled. */ #define _CALLBACKF_mask_events 0 #define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events) /* * Register a callback. */ #define CALLBACKOP_register 0 struct callback_register { uint16_t type; uint16_t flags; xen_callback_t address; }; typedef struct callback_register callback_register_t; DEFINE_XEN_GUEST_HANDLE(callback_register_t); /* * Unregister a callback. * * Not all callbacks can be unregistered. -EINVAL will be returned if * you attempt to unregister such a callback. */ #define CALLBACKOP_unregister 1 struct callback_unregister { uint16_t type; uint16_t _unused; }; typedef struct callback_unregister callback_unregister_t; DEFINE_XEN_GUEST_HANDLE(callback_unregister_t); #if __XEN_INTERFACE_VERSION__ < 0x00030207 #undef CALLBACKTYPE_sysenter #define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated #endif #endif /* __XEN_PUBLIC_CALLBACK_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/dom0_ops.h000066400000000000000000000077061314037446600257560ustar00rootroot00000000000000/****************************************************************************** * dom0_ops.h * * Process command requests from domain-0 guest OS. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOM0_OPS_H__ #define __XEN_PUBLIC_DOM0_OPS_H__ #include "xen.h" #include "platform.h" #if __XEN_INTERFACE_VERSION__ >= 0x00030204 #error "dom0_ops.h is a compatibility interface only" #endif #define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION #define DOM0_SETTIME XENPF_settime #define dom0_settime xenpf_settime #define dom0_settime_t xenpf_settime_t #define DOM0_ADD_MEMTYPE XENPF_add_memtype #define dom0_add_memtype xenpf_add_memtype #define dom0_add_memtype_t xenpf_add_memtype_t #define DOM0_DEL_MEMTYPE XENPF_del_memtype #define dom0_del_memtype xenpf_del_memtype #define dom0_del_memtype_t xenpf_del_memtype_t #define DOM0_READ_MEMTYPE XENPF_read_memtype #define dom0_read_memtype xenpf_read_memtype #define dom0_read_memtype_t xenpf_read_memtype_t #define DOM0_MICROCODE XENPF_microcode_update #define dom0_microcode xenpf_microcode_update #define dom0_microcode_t xenpf_microcode_update_t #define DOM0_PLATFORM_QUIRK XENPF_platform_quirk #define dom0_platform_quirk xenpf_platform_quirk #define dom0_platform_quirk_t xenpf_platform_quirk_t typedef uint64_t cpumap_t; /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_MSR 15 struct dom0_msr { /* IN variables. */ uint32_t write; cpumap_t cpu_mask; uint32_t msr; uint32_t in1; uint32_t in2; /* OUT variables. */ uint32_t out1; uint32_t out2; }; typedef struct dom0_msr dom0_msr_t; DEFINE_XEN_GUEST_HANDLE(dom0_msr_t); /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_PHYSICAL_MEMORY_MAP 40 struct dom0_memory_map_entry { uint64_t start, end; uint32_t flags; /* reserved */ uint8_t is_ram; }; typedef struct dom0_memory_map_entry dom0_memory_map_entry_t; DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t); struct dom0_op { uint32_t cmd; uint32_t interface_version; /* DOM0_INTERFACE_VERSION */ union { struct dom0_msr msr; struct dom0_settime settime; struct dom0_add_memtype add_memtype; struct dom0_del_memtype del_memtype; struct dom0_read_memtype read_memtype; struct dom0_microcode microcode; struct dom0_platform_quirk platform_quirk; struct dom0_memory_map_entry physical_memory_map; uint8_t pad[128]; } u; }; typedef struct dom0_op dom0_op_t; DEFINE_XEN_GUEST_HANDLE(dom0_op_t); #endif /* __XEN_PUBLIC_DOM0_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/domctl.h000066400000000000000000000614111314037446600255110ustar00rootroot00000000000000/****************************************************************************** * domctl.h * * Domain management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOMCTL_H__ #define __XEN_PUBLIC_DOMCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "domctl operations are intended for use by node control tools only" #endif #include "xen.h" #define XEN_DOMCTL_INTERFACE_VERSION 0x00000005 struct xenctl_cpumap { XEN_GUEST_HANDLE_64(uint8) bitmap; uint32_t nr_cpus; }; /* * NB. xen_domctl.domain is an IN/OUT parameter for this operation. * If it is specified as zero, an id is auto-allocated and returned. */ #define XEN_DOMCTL_createdomain 1 struct xen_domctl_createdomain { /* IN parameters */ uint32_t ssidref; xen_domain_handle_t handle; /* Is this an HVM guest (as opposed to a PV guest)? */ #define _XEN_DOMCTL_CDF_hvm_guest 0 #define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest) /* Use hardware-assisted paging if available? */ #define _XEN_DOMCTL_CDF_hap 1 #define XEN_DOMCTL_CDF_hap (1U<<_XEN_DOMCTL_CDF_hap) /* Should domain memory integrity be verifed by tboot during Sx? */ #define _XEN_DOMCTL_CDF_s3_integrity 2 #define XEN_DOMCTL_CDF_s3_integrity (1U<<_XEN_DOMCTL_CDF_s3_integrity) uint32_t flags; }; typedef struct xen_domctl_createdomain xen_domctl_createdomain_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t); #define XEN_DOMCTL_destroydomain 2 #define XEN_DOMCTL_pausedomain 3 #define XEN_DOMCTL_unpausedomain 4 #define XEN_DOMCTL_resumedomain 27 #define XEN_DOMCTL_getdomaininfo 5 struct xen_domctl_getdomaininfo { /* OUT variables. */ domid_t domain; /* Also echoed in domctl.domain */ /* Domain is scheduled to die. */ #define _XEN_DOMINF_dying 0 #define XEN_DOMINF_dying (1U<<_XEN_DOMINF_dying) /* Domain is an HVM guest (as opposed to a PV guest). */ #define _XEN_DOMINF_hvm_guest 1 #define XEN_DOMINF_hvm_guest (1U<<_XEN_DOMINF_hvm_guest) /* The guest OS has shut down. */ #define _XEN_DOMINF_shutdown 2 #define XEN_DOMINF_shutdown (1U<<_XEN_DOMINF_shutdown) /* Currently paused by control software. */ #define _XEN_DOMINF_paused 3 #define XEN_DOMINF_paused (1U<<_XEN_DOMINF_paused) /* Currently blocked pending an event. */ #define _XEN_DOMINF_blocked 4 #define XEN_DOMINF_blocked (1U<<_XEN_DOMINF_blocked) /* Domain is currently running. */ #define _XEN_DOMINF_running 5 #define XEN_DOMINF_running (1U<<_XEN_DOMINF_running) /* Being debugged. */ #define _XEN_DOMINF_debugged 6 #define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged) /* CPU to which this domain is bound. */ #define XEN_DOMINF_cpumask 255 #define XEN_DOMINF_cpushift 8 /* XEN_DOMINF_shutdown guest-supplied code. */ #define XEN_DOMINF_shutdownmask 255 #define XEN_DOMINF_shutdownshift 16 uint32_t flags; /* XEN_DOMINF_* */ uint64_aligned_t tot_pages; uint64_aligned_t max_pages; uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */ uint64_aligned_t cpu_time; uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */ uint32_t ssidref; xen_domain_handle_t handle; }; typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t); #define XEN_DOMCTL_getmemlist 6 struct xen_domctl_getmemlist { /* IN variables. */ /* Max entries to write to output buffer. */ uint64_aligned_t max_pfns; /* Start index in guest's page list. */ uint64_aligned_t start_pfn; XEN_GUEST_HANDLE_64(uint64) buffer; /* OUT variables. */ uint64_aligned_t num_pfns; }; typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t); #define XEN_DOMCTL_getpageframeinfo 7 #define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28 #define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28) #define XEN_DOMCTL_PFINFO_L1TAB (0x1U<<28) #define XEN_DOMCTL_PFINFO_L2TAB (0x2U<<28) #define XEN_DOMCTL_PFINFO_L3TAB (0x3U<<28) #define XEN_DOMCTL_PFINFO_L4TAB (0x4U<<28) #define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28) #define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31) #define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */ #define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28) struct xen_domctl_getpageframeinfo { /* IN variables. */ uint64_aligned_t gmfn; /* GMFN to query */ /* OUT variables. */ /* Is the page PINNED to a type? */ uint32_t type; /* see above type defs */ }; typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t); #define XEN_DOMCTL_getpageframeinfo2 8 struct xen_domctl_getpageframeinfo2 { /* IN variables. */ uint64_aligned_t num; /* IN/OUT variables. */ XEN_GUEST_HANDLE_64(uint32) array; }; typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t); /* * Control shadow pagetables operation */ #define XEN_DOMCTL_shadow_op 10 /* Disable shadow mode. */ #define XEN_DOMCTL_SHADOW_OP_OFF 0 /* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */ #define XEN_DOMCTL_SHADOW_OP_ENABLE 32 /* Log-dirty bitmap operations. */ /* Return the bitmap and clean internal copy for next round. */ #define XEN_DOMCTL_SHADOW_OP_CLEAN 11 /* Return the bitmap but do not modify internal copy. */ #define XEN_DOMCTL_SHADOW_OP_PEEK 12 /* Memory allocation accessors. */ #define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30 #define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31 /* Legacy enable operations. */ /* Equiv. to ENABLE with no mode flags. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST 1 /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY 2 /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE 3 /* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */ /* * Shadow pagetables are refcounted: guest does not use explicit mmu * operations nor write-protect its pagetables. */ #define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT (1 << 1) /* * Log pages in a bitmap as they are dirtied. * Used for live relocation to determine which pages must be re-sent. */ #define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2) /* * Automatically translate GPFNs into MFNs. */ #define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3) /* * Xen does not steal virtual address space from the guest. * Requires HVM support. */ #define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4) struct xen_domctl_shadow_op_stats { uint32_t fault_count; uint32_t dirty_count; }; typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t); struct xen_domctl_shadow_op { /* IN variables. */ uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */ /* OP_ENABLE */ uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */ /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */ uint32_t mb; /* Shadow memory allocation in MB */ /* OP_PEEK / OP_CLEAN */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */ struct xen_domctl_shadow_op_stats stats; }; typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t); #define XEN_DOMCTL_max_mem 11 struct xen_domctl_max_mem { /* IN variables. */ uint64_aligned_t max_memkb; }; typedef struct xen_domctl_max_mem xen_domctl_max_mem_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t); #define XEN_DOMCTL_setvcpucontext 12 #define XEN_DOMCTL_getvcpucontext 13 struct xen_domctl_vcpucontext { uint32_t vcpu; /* IN */ XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */ }; typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t); #define XEN_DOMCTL_getvcpuinfo 14 struct xen_domctl_getvcpuinfo { /* IN variables. */ uint32_t vcpu; /* OUT variables. */ uint8_t online; /* currently online (not hotplugged)? */ uint8_t blocked; /* blocked waiting for an event? */ uint8_t running; /* currently scheduled on its CPU? */ uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */ uint32_t cpu; /* current mapping */ }; typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t); /* Get/set which physical cpus a vcpu can execute on. */ #define XEN_DOMCTL_setvcpuaffinity 9 #define XEN_DOMCTL_getvcpuaffinity 25 struct xen_domctl_vcpuaffinity { uint32_t vcpu; /* IN */ struct xenctl_cpumap cpumap; /* IN/OUT */ }; typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t); #define XEN_DOMCTL_max_vcpus 15 struct xen_domctl_max_vcpus { uint32_t max; /* maximum number of vcpus */ }; typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t); #define XEN_DOMCTL_scheduler_op 16 /* Scheduler types. */ #define XEN_SCHEDULER_SEDF 4 #define XEN_SCHEDULER_CREDIT 5 /* Set or get info? */ #define XEN_DOMCTL_SCHEDOP_putinfo 0 #define XEN_DOMCTL_SCHEDOP_getinfo 1 struct xen_domctl_scheduler_op { uint32_t sched_id; /* XEN_SCHEDULER_* */ uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */ union { struct xen_domctl_sched_sedf { uint64_aligned_t period; uint64_aligned_t slice; uint64_aligned_t latency; uint32_t extratime; uint32_t weight; } sedf; struct xen_domctl_sched_credit { uint16_t weight; uint16_t cap; } credit; } u; }; typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t); #define XEN_DOMCTL_setdomainhandle 17 struct xen_domctl_setdomainhandle { xen_domain_handle_t handle; }; typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t); #define XEN_DOMCTL_setdebugging 18 struct xen_domctl_setdebugging { uint8_t enable; }; typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t); #define XEN_DOMCTL_irq_permission 19 struct xen_domctl_irq_permission { uint8_t pirq; uint8_t allow_access; /* flag to specify enable/disable of IRQ access */ }; typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t); #define XEN_DOMCTL_iomem_permission 20 struct xen_domctl_iomem_permission { uint64_aligned_t first_mfn;/* first page (physical page number) in range */ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */ }; typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t); #define XEN_DOMCTL_ioport_permission 21 struct xen_domctl_ioport_permission { uint32_t first_port; /* first port int range */ uint32_t nr_ports; /* size of port range */ uint8_t allow_access; /* allow or deny access to range? */ }; typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t); #define XEN_DOMCTL_hypercall_init 22 struct xen_domctl_hypercall_init { uint64_aligned_t gmfn; /* GMFN to be initialised */ }; typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t); #define XEN_DOMCTL_arch_setup 23 #define _XEN_DOMAINSETUP_hvm_guest 0 #define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest) #define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */ #define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query) #define _XEN_DOMAINSETUP_sioemu_guest 2 #define XEN_DOMAINSETUP_sioemu_guest (1UL<<_XEN_DOMAINSETUP_sioemu_guest) typedef struct xen_domctl_arch_setup { uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */ #ifdef __ia64__ uint64_aligned_t bp; /* mpaddr of boot param area */ uint64_aligned_t maxmem; /* Highest memory address for MDT. */ uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */ int8_t vhpt_size_log2; /* Log2 of VHPT size. */ #endif } xen_domctl_arch_setup_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t); #define XEN_DOMCTL_settimeoffset 24 struct xen_domctl_settimeoffset { int32_t time_offset_seconds; /* applied to domain wallclock time */ }; typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t); #define XEN_DOMCTL_gethvmcontext 33 #define XEN_DOMCTL_sethvmcontext 34 typedef struct xen_domctl_hvmcontext { uint32_t size; /* IN/OUT: size of buffer / bytes filled */ XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call * gethvmcontext with NULL * buffer to get size req'd */ } xen_domctl_hvmcontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t); #define XEN_DOMCTL_set_address_size 35 #define XEN_DOMCTL_get_address_size 36 typedef struct xen_domctl_address_size { uint32_t size; } xen_domctl_address_size_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t); #define XEN_DOMCTL_real_mode_area 26 struct xen_domctl_real_mode_area { uint32_t log; /* log2 of Real Mode Area size */ }; typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t); #define XEN_DOMCTL_sendtrigger 28 #define XEN_DOMCTL_SENDTRIGGER_NMI 0 #define XEN_DOMCTL_SENDTRIGGER_RESET 1 #define XEN_DOMCTL_SENDTRIGGER_INIT 2 struct xen_domctl_sendtrigger { uint32_t trigger; /* IN */ uint32_t vcpu; /* IN */ }; typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t); /* Assign PCI device to HVM guest. Sets up IOMMU structures. */ #define XEN_DOMCTL_assign_device 37 #define XEN_DOMCTL_test_assign_device 45 #define XEN_DOMCTL_deassign_device 47 struct xen_domctl_assign_device { uint32_t machine_bdf; /* machine PCI ID of assigned device */ }; typedef struct xen_domctl_assign_device xen_domctl_assign_device_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t); /* Retrieve sibling devices infomation of machine_bdf */ #define XEN_DOMCTL_get_device_group 50 struct xen_domctl_get_device_group { uint32_t machine_bdf; /* IN */ uint32_t max_sdevs; /* IN */ uint32_t num_sdevs; /* OUT */ XEN_GUEST_HANDLE_64(uint32) sdev_array; /* OUT */ }; typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t); /* Pass-through interrupts: bind real irq -> hvm devfn. */ #define XEN_DOMCTL_bind_pt_irq 38 #define XEN_DOMCTL_unbind_pt_irq 48 typedef enum pt_irq_type_e { PT_IRQ_TYPE_PCI, PT_IRQ_TYPE_ISA, PT_IRQ_TYPE_MSI, PT_IRQ_TYPE_MSI_TRANSLATE, } pt_irq_type_t; struct xen_domctl_bind_pt_irq { uint32_t machine_irq; pt_irq_type_t irq_type; uint32_t hvm_domid; union { struct { uint8_t isa_irq; } isa; struct { uint8_t bus; uint8_t device; uint8_t intx; } pci; struct { uint8_t gvec; uint32_t gflags; uint64_aligned_t gtable; } msi; } u; }; typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t); /* Bind machine I/O address range -> HVM address range. */ #define XEN_DOMCTL_memory_mapping 39 #define DPCI_ADD_MAPPING 1 #define DPCI_REMOVE_MAPPING 0 struct xen_domctl_memory_mapping { uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */ uint64_aligned_t first_mfn; /* first page (machine page) in range */ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ uint32_t add_mapping; /* add or remove mapping */ uint32_t padding; /* padding for 64-bit aligned structure */ }; typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t); /* Bind machine I/O port range -> HVM I/O port range. */ #define XEN_DOMCTL_ioport_mapping 40 struct xen_domctl_ioport_mapping { uint32_t first_gport; /* first guest IO port*/ uint32_t first_mport; /* first machine IO port */ uint32_t nr_ports; /* size of port range */ uint32_t add_mapping; /* add or remove mapping */ }; typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t); /* * Pin caching type of RAM space for x86 HVM domU. */ #define XEN_DOMCTL_pin_mem_cacheattr 41 /* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */ #define XEN_DOMCTL_MEM_CACHEATTR_UC 0 #define XEN_DOMCTL_MEM_CACHEATTR_WC 1 #define XEN_DOMCTL_MEM_CACHEATTR_WT 4 #define XEN_DOMCTL_MEM_CACHEATTR_WP 5 #define XEN_DOMCTL_MEM_CACHEATTR_WB 6 #define XEN_DOMCTL_MEM_CACHEATTR_UCM 7 struct xen_domctl_pin_mem_cacheattr { uint64_aligned_t start, end; unsigned int type; /* XEN_DOMCTL_MEM_CACHEATTR_* */ }; typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t); #define XEN_DOMCTL_set_ext_vcpucontext 42 #define XEN_DOMCTL_get_ext_vcpucontext 43 struct xen_domctl_ext_vcpucontext { /* IN: VCPU that this call applies to. */ uint32_t vcpu; /* * SET: Size of struct (IN) * GET: Size of struct (OUT) */ uint32_t size; #if defined(__i386__) || defined(__x86_64__) /* SYSCALL from 32-bit mode and SYSENTER callback information. */ /* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */ uint64_aligned_t syscall32_callback_eip; uint64_aligned_t sysenter_callback_eip; uint16_t syscall32_callback_cs; uint16_t sysenter_callback_cs; uint8_t syscall32_disables_events; uint8_t sysenter_disables_events; #endif }; typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t); /* * Set optimizaton features for a domain */ #define XEN_DOMCTL_set_opt_feature 44 struct xen_domctl_set_opt_feature { #if defined(__ia64__) struct xen_ia64_opt_feature optf; #else /* Make struct non-empty: do not depend on this field name! */ uint64_t dummy; #endif }; typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t); /* * Set the target domain for a domain */ #define XEN_DOMCTL_set_target 46 struct xen_domctl_set_target { domid_t target; }; typedef struct xen_domctl_set_target xen_domctl_set_target_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t); #if defined(__i386__) || defined(__x86_64__) # define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF # define XEN_DOMCTL_set_cpuid 49 struct xen_domctl_cpuid { unsigned int input[2]; unsigned int eax; unsigned int ebx; unsigned int ecx; unsigned int edx; }; typedef struct xen_domctl_cpuid xen_domctl_cpuid_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t); #endif #define XEN_DOMCTL_subscribe 29 struct xen_domctl_subscribe { uint32_t port; /* IN */ }; typedef struct xen_domctl_subscribe xen_domctl_subscribe_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t); /* * Define the maximum machine address size which should be allocated * to a guest. */ #define XEN_DOMCTL_set_machine_address_size 51 #define XEN_DOMCTL_get_machine_address_size 52 /* * Do not inject spurious page faults into this domain. */ #define XEN_DOMCTL_suppress_spurious_page_faults 53 #define XEN_DOMCTL_debug_op 54 #define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF 0 #define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON 1 struct xen_domctl_debug_op { uint32_t op; /* IN */ uint32_t vcpu; /* IN */ }; typedef struct xen_domctl_debug_op xen_domctl_debug_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_debug_op_t); /* * Request a particular record from the HVM context */ #define XEN_DOMCTL_gethvmcontext_partial 55 typedef struct xen_domctl_hvmcontext_partial { uint32_t type; /* IN: Type of record required */ uint32_t instance; /* IN: Instance of that type */ XEN_GUEST_HANDLE_64(uint8) buffer; /* OUT: buffer to write record into */ } xen_domctl_hvmcontext_partial_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_partial_t); struct xen_domctl { uint32_t cmd; uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */ domid_t domain; union { struct xen_domctl_createdomain createdomain; struct xen_domctl_getdomaininfo getdomaininfo; struct xen_domctl_getmemlist getmemlist; struct xen_domctl_getpageframeinfo getpageframeinfo; struct xen_domctl_getpageframeinfo2 getpageframeinfo2; struct xen_domctl_vcpuaffinity vcpuaffinity; struct xen_domctl_shadow_op shadow_op; struct xen_domctl_max_mem max_mem; struct xen_domctl_vcpucontext vcpucontext; struct xen_domctl_getvcpuinfo getvcpuinfo; struct xen_domctl_max_vcpus max_vcpus; struct xen_domctl_scheduler_op scheduler_op; struct xen_domctl_setdomainhandle setdomainhandle; struct xen_domctl_setdebugging setdebugging; struct xen_domctl_irq_permission irq_permission; struct xen_domctl_iomem_permission iomem_permission; struct xen_domctl_ioport_permission ioport_permission; struct xen_domctl_hypercall_init hypercall_init; struct xen_domctl_arch_setup arch_setup; struct xen_domctl_settimeoffset settimeoffset; struct xen_domctl_real_mode_area real_mode_area; struct xen_domctl_hvmcontext hvmcontext; struct xen_domctl_hvmcontext_partial hvmcontext_partial; struct xen_domctl_address_size address_size; struct xen_domctl_sendtrigger sendtrigger; struct xen_domctl_get_device_group get_device_group; struct xen_domctl_assign_device assign_device; struct xen_domctl_bind_pt_irq bind_pt_irq; struct xen_domctl_memory_mapping memory_mapping; struct xen_domctl_ioport_mapping ioport_mapping; struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr; struct xen_domctl_ext_vcpucontext ext_vcpucontext; struct xen_domctl_set_opt_feature set_opt_feature; struct xen_domctl_set_target set_target; struct xen_domctl_subscribe subscribe; struct xen_domctl_debug_op debug_op; #if defined(__i386__) || defined(__x86_64__) struct xen_domctl_cpuid cpuid; #endif uint8_t pad[128]; } u; }; typedef struct xen_domctl xen_domctl_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_t); #endif /* __XEN_PUBLIC_DOMCTL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/elfnote.h000066400000000000000000000165031314037446600256650ustar00rootroot00000000000000/****************************************************************************** * elfnote.h * * Definitions used for the Xen ELF notes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell, XenSource Ltd. */ #ifndef __XEN_PUBLIC_ELFNOTE_H__ #define __XEN_PUBLIC_ELFNOTE_H__ /* * The notes should live in a PT_NOTE segment and have "Xen" in the * name field. * * Numeric types are either 4 or 8 bytes depending on the content of * the desc field. * * LEGACY indicated the fields in the legacy __xen_guest string which * this a note type replaces. */ /* * NAME=VALUE pair (string). */ #define XEN_ELFNOTE_INFO 0 /* * The virtual address of the entry point (numeric). * * LEGACY: VIRT_ENTRY */ #define XEN_ELFNOTE_ENTRY 1 /* The virtual address of the hypercall transfer page (numeric). * * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page * number not a virtual address) */ #define XEN_ELFNOTE_HYPERCALL_PAGE 2 /* The virtual address where the kernel image should be mapped (numeric). * * Defaults to 0. * * LEGACY: VIRT_BASE */ #define XEN_ELFNOTE_VIRT_BASE 3 /* * The offset of the ELF paddr field from the acutal required * psuedo-physical address (numeric). * * This is used to maintain backwards compatibility with older kernels * which wrote __PAGE_OFFSET into that field. This field defaults to 0 * if not present. * * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE) */ #define XEN_ELFNOTE_PADDR_OFFSET 4 /* * The version of Xen that we work with (string). * * LEGACY: XEN_VER */ #define XEN_ELFNOTE_XEN_VERSION 5 /* * The name of the guest operating system (string). * * LEGACY: GUEST_OS */ #define XEN_ELFNOTE_GUEST_OS 6 /* * The version of the guest operating system (string). * * LEGACY: GUEST_VER */ #define XEN_ELFNOTE_GUEST_VERSION 7 /* * The loader type (string). * * LEGACY: LOADER */ #define XEN_ELFNOTE_LOADER 8 /* * The kernel supports PAE (x86/32 only, string = "yes", "no" or * "bimodal"). * * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting * may be given as "yes,bimodal" which will cause older Xen to treat * this kernel as PAE. * * LEGACY: PAE (n.b. The legacy interface included a provision to * indicate 'extended-cr3' support allowing L3 page tables to be * placed above 4G. It is assumed that any kernel new enough to use * these ELF notes will include this and therefore "yes" here is * equivalent to "yes[entended-cr3]" in the __xen_guest interface. */ #define XEN_ELFNOTE_PAE_MODE 9 /* * The features supported/required by this kernel (string). * * The string must consist of a list of feature names (as given in * features.h, without the "XENFEAT_" prefix) separated by '|' * characters. If a feature is required for the kernel to function * then the feature name must be preceded by a '!' character. * * LEGACY: FEATURES */ #define XEN_ELFNOTE_FEATURES 10 /* * The kernel requires the symbol table to be loaded (string = "yes" or "no") * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence * of this string as a boolean flag rather than requiring "yes" or * "no". */ #define XEN_ELFNOTE_BSD_SYMTAB 11 /* * The lowest address the hypervisor hole can begin at (numeric). * * This must not be set higher than HYPERVISOR_VIRT_START. Its presence * also indicates to the hypervisor that the kernel can deal with the * hole starting at a higher address. */ #define XEN_ELFNOTE_HV_START_LOW 12 /* * List of maddr_t-sized mask/value pairs describing how to recognize * (non-present) L1 page table entries carrying valid MFNs (numeric). */ #define XEN_ELFNOTE_L1_MFN_VALID 13 /* * Whether or not the guest supports cooperative suspend cancellation. */ #define XEN_ELFNOTE_SUSPEND_CANCEL 14 /* * The (non-default) location the initial phys-to-machine map should be * placed at by the hypervisor (Dom0) or the tools (DomU). * The kernel must be prepared for this mapping to be established using * large pages, despite such otherwise not being available to guests. * The kernel must also be able to handle the page table pages used for * this mapping not being accessible through the initial mapping. * (Only x86-64 supports this at present.) */ #define XEN_ELFNOTE_INIT_P2M 15 /* * The number of the highest elfnote defined. */ #define XEN_ELFNOTE_MAX XEN_ELFNOTE_INIT_P2M /* * System information exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO * note in case of a system crash. This note will contain various * information about the system, see xen/include/xen/elfcore.h. */ #define XEN_ELFNOTE_CRASH_INFO 0x1000001 /* * System registers exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS * note per cpu in case of a system crash. This note is architecture * specific and will contain registers not saved in the "CORE" note. * See xen/include/xen/elfcore.h for more information. */ #define XEN_ELFNOTE_CRASH_REGS 0x1000002 /* * xen dump-core none note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE * in its dump file to indicate that the file is xen dump-core * file. This note doesn't have any other information. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000 /* * xen dump-core header note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER * in its dump file. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001 /* * xen dump-core xen version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION * in its dump file. It contains the xen version obtained via the * XENVER hypercall. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002 /* * xen dump-core format version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION * in its dump file. It contains a format version identifier. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003 #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/event_channel.h000066400000000000000000000213251314037446600270400ustar00rootroot00000000000000/****************************************************************************** * event_channel.h * * Event channels between domains. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, K A Fraser. */ #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ #define __XEN_PUBLIC_EVENT_CHANNEL_H__ /* * Prototype for this hypercall is: * int event_channel_op(int cmd, void *args) * @cmd == EVTCHNOP_??? (event-channel operation). * @args == Operation-specific extra arguments (NULL if none). */ typedef uint32_t evtchn_port_t; DEFINE_XEN_GUEST_HANDLE(evtchn_port_t); /* * EVTCHNOP_alloc_unbound: Allocate a port in domain and mark as * accepting interdomain bindings from domain . A fresh port * is allocated in and returned as . * NOTES: * 1. If the caller is unprivileged then must be DOMID_SELF. * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_alloc_unbound 6 struct evtchn_alloc_unbound { /* IN parameters */ domid_t dom, remote_dom; /* OUT parameters */ evtchn_port_t port; }; typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; /* * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between * the calling domain and . must identify * a port that is unbound and marked as accepting bindings from the calling * domain. A fresh port is allocated in the calling domain and returned as * . * NOTES: * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_bind_interdomain 0 struct evtchn_bind_interdomain { /* IN parameters. */ domid_t remote_dom; evtchn_port_t remote_port; /* OUT parameters. */ evtchn_port_t local_port; }; typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t; /* * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ on specified * vcpu. * NOTES: * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list * in xen.h for the classification of each VIRQ. * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be * re-bound via EVTCHNOP_bind_vcpu. * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. * The allocated event channel is bound to the specified vcpu and the * binding cannot be changed. */ #define EVTCHNOP_bind_virq 1 struct evtchn_bind_virq { /* IN parameters. */ uint32_t virq; uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_virq evtchn_bind_virq_t; /* * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ . * NOTES: * 1. A physical IRQ may be bound to at most one event channel per domain. * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. */ #define EVTCHNOP_bind_pirq 2 struct evtchn_bind_pirq { /* IN parameters. */ uint32_t pirq; #define BIND_PIRQ__WILL_SHARE 1 uint32_t flags; /* BIND_PIRQ__* */ /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_pirq evtchn_bind_pirq_t; /* * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. * NOTES: * 1. The allocated event channel is bound to the specified vcpu. The binding * may not be changed. */ #define EVTCHNOP_bind_ipi 7 struct evtchn_bind_ipi { uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_ipi evtchn_bind_ipi_t; /* * EVTCHNOP_close: Close a local event channel . If the channel is * interdomain then the remote end is placed in the unbound state * (EVTCHNSTAT_unbound), awaiting a new connection. */ #define EVTCHNOP_close 3 struct evtchn_close { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_close evtchn_close_t; /* * EVTCHNOP_send: Send an event to the remote end of the channel whose local * endpoint is . */ #define EVTCHNOP_send 4 struct evtchn_send { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_send evtchn_send_t; /* * EVTCHNOP_status: Get the current status of the communication channel which * has an endpoint at . * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may obtain the status of an event * channel for which is not DOMID_SELF. */ #define EVTCHNOP_status 5 struct evtchn_status { /* IN parameters */ domid_t dom; evtchn_port_t port; /* OUT parameters */ #define EVTCHNSTAT_closed 0 /* Channel is not in use. */ #define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ #define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ uint32_t status; uint32_t vcpu; /* VCPU to which this channel is bound. */ union { struct { domid_t dom; } unbound; /* EVTCHNSTAT_unbound */ struct { domid_t dom; evtchn_port_t port; } interdomain; /* EVTCHNSTAT_interdomain */ uint32_t pirq; /* EVTCHNSTAT_pirq */ uint32_t virq; /* EVTCHNSTAT_virq */ } u; }; typedef struct evtchn_status evtchn_status_t; /* * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an * event is pending. * NOTES: * 1. IPI-bound channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 3. All other channels notify vcpu0 by default. This default is set when * the channel is allocated (a port that is freed and subsequently reused * has its binding reset to vcpu0). */ #define EVTCHNOP_bind_vcpu 8 struct evtchn_bind_vcpu { /* IN parameters. */ evtchn_port_t port; uint32_t vcpu; }; typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t; /* * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver * a notification to the appropriate VCPU if an event is pending. */ #define EVTCHNOP_unmask 9 struct evtchn_unmask { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_unmask evtchn_unmask_t; /* * EVTCHNOP_reset: Close all event channels associated with specified domain. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. */ #define EVTCHNOP_reset 10 struct evtchn_reset { /* IN parameters. */ domid_t dom; }; typedef struct evtchn_reset evtchn_reset_t; /* * Argument to event_channel_op_compat() hypercall. Superceded by new * event_channel_op() hypercall since 0x00030202. */ struct evtchn_op { uint32_t cmd; /* EVTCHNOP_* */ union { struct evtchn_alloc_unbound alloc_unbound; struct evtchn_bind_interdomain bind_interdomain; struct evtchn_bind_virq bind_virq; struct evtchn_bind_pirq bind_pirq; struct evtchn_bind_ipi bind_ipi; struct evtchn_close close; struct evtchn_send send; struct evtchn_status status; struct evtchn_bind_vcpu bind_vcpu; struct evtchn_unmask unmask; } u; }; typedef struct evtchn_op evtchn_op_t; DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/features.h000066400000000000000000000055751314037446600260560ustar00rootroot00000000000000/****************************************************************************** * features.h * * Feature flags, reported by XENVER_get_features. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Keir Fraser */ #ifndef __XEN_PUBLIC_FEATURES_H__ #define __XEN_PUBLIC_FEATURES_H__ /* * If set, the guest does not need to write-protect its pagetables, and can * update them via direct writes. */ #define XENFEAT_writable_page_tables 0 /* * If set, the guest does not need to write-protect its segment descriptor * tables, and can update them via direct writes. */ #define XENFEAT_writable_descriptor_tables 1 /* * If set, translation between the guest's 'pseudo-physical' address space * and the host's machine address space are handled by the hypervisor. In this * mode the guest does not need to perform phys-to/from-machine translations * when performing page table operations. */ #define XENFEAT_auto_translated_physmap 2 /* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ #define XENFEAT_supervisor_mode_kernel 3 /* * If set, the guest does not need to allocate x86 PAE page directories * below 4GB. This flag is usually implied by auto_translated_physmap. */ #define XENFEAT_pae_pgdir_above_4gb 4 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ #define XENFEAT_mmu_pt_update_preserve_ad 5 /* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */ #define XENFEAT_highmem_assist 6 /* * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel * available pte bits. */ #define XENFEAT_gnttab_map_avail_bits 7 #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/grant_table.h000066400000000000000000000406711314037446600265160ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ #define __XEN_PUBLIC_GRANT_TABLE_H__ /*********************************** * GRANT TABLE REPRESENTATION */ /* Some rough guidelines on accessing and updating grant-table entries * in a concurrency-safe manner. For more information, Linux contains a * reference implementation for guest OSes (arch/xen/kernel/grant_table.c). * * NB. WMB is a no-op on current-generation x86 processors. However, a * compiler barrier will still be required. * * Introducing a valid entry into the grant table: * 1. Write ent->domid. * 2. Write ent->frame: * GTF_permit_access: Frame to which access is permitted. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new * frame, or zero if none. * 3. Write memory barrier (WMB). * 4. Write ent->flags, inc. valid type. * * Invalidating an unused GTF_permit_access entry: * 1. flags = ent->flags. * 2. Observe that !(flags & (GTF_reading|GTF_writing)). * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * * Invalidating an in-use GTF_permit_access entry: * This cannot be done directly. Request assistance from the domain controller * which can set a timeout on the use of a grant entry and take necessary * action. (NB. This is not yet implemented!). * * Invalidating an unused GTF_accept_transfer entry: * 1. flags = ent->flags. * 2. Observe that !(flags & GTF_transfer_committed). [*] * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. * The guest must /not/ modify the grant entry until the address of the * transferred frame is written. It is safe for the guest to spin waiting * for this to occur (detect by observing GTF_transfer_completed in * ent->flags). * * Invalidating a committed GTF_accept_transfer entry: * 1. Wait for (ent->flags & GTF_transfer_completed). * * Changing a GTF_permit_access from writable to read-only: * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. * * Changing a GTF_permit_access from read-only to writable: * Use SMP-safe bit-setting instruction. */ /* * A grant table comprises a packed array of grant entries in one or more * page frames shared between Xen and a guest. * [XEN]: This field is written by Xen and read by the sharing guest. * [GST]: This field is written by the guest and read by Xen. */ struct grant_entry { /* GTF_xxx: various type and flag information. [XEN,GST] */ uint16_t flags; /* The domain being granted foreign privileges. [GST] */ domid_t domid; /* * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] */ uint32_t frame; }; typedef struct grant_entry grant_entry_t; /* * Type of grant entry. * GTF_invalid: This grant entry grants no privileges. * GTF_permit_access: Allow @domid to map/access @frame. * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame * to this guest. Xen writes the page number to @frame. */ #define GTF_invalid (0U<<0) #define GTF_permit_access (1U<<0) #define GTF_accept_transfer (2U<<0) #define GTF_type_mask (3U<<0) /* * Subflags for GTF_permit_access. * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST] */ #define _GTF_readonly (2) #define GTF_readonly (1U<<_GTF_readonly) #define _GTF_reading (3) #define GTF_reading (1U<<_GTF_reading) #define _GTF_writing (4) #define GTF_writing (1U<<_GTF_writing) #define _GTF_PWT (5) #define GTF_PWT (1U<<_GTF_PWT) #define _GTF_PCD (6) #define GTF_PCD (1U<<_GTF_PCD) #define _GTF_PAT (7) #define GTF_PAT (1U<<_GTF_PAT) /* * Subflags for GTF_accept_transfer: * GTF_transfer_committed: Xen sets this flag to indicate that it is committed * to transferring ownership of a page frame. When a guest sees this flag * it must /not/ modify the grant entry until GTF_transfer_completed is * set by Xen. * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag * after reading GTF_transfer_committed. Xen will always write the frame * address, followed by ORing this flag, in a timely manner. */ #define _GTF_transfer_committed (2) #define GTF_transfer_committed (1U<<_GTF_transfer_committed) #define _GTF_transfer_completed (3) #define GTF_transfer_completed (1U<<_GTF_transfer_completed) /*********************************** * GRANT TABLE QUERIES AND USES */ /* * Reference to a grant entry in a specified domain's grant table. */ typedef uint32_t grant_ref_t; /* * Handle to track a mapping created via a grant reference. */ typedef uint32_t grant_handle_t; /* * GNTTABOP_map_grant_ref: Map the grant entry (,) for access * by devices and/or host CPUs. If successful, is a tracking number * that must be presented later to destroy the mapping(s). On error, * is a negative status code. * NOTES: * 1. If GNTMAP_device_map is specified then is the address * via which I/O devices may access the granted frame. * 2. If GNTMAP_host_map is specified then a mapping will be added at * either a host virtual address in the current address space, or at * a PTE at the specified machine address. The type of mapping to * perform is selected through the GNTMAP_contains_pte flag, and the * address is specified in . * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a * host mapping is destroyed by other means then it is *NOT* guaranteed * to be accounted to the correct grant reference! */ #define GNTTABOP_map_grant_ref 0 struct gnttab_map_grant_ref { /* IN parameters. */ uint64_t host_addr; uint32_t flags; /* GNTMAP_* */ grant_ref_t ref; domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ grant_handle_t handle; uint64_t dev_bus_addr; }; typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t); /* * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings * tracked by . If or is zero, that * field is ignored. If non-zero, they must refer to a device/host mapping * that is tracked by * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 3. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_grant_ref 1 struct gnttab_unmap_grant_ref { /* IN parameters. */ uint64_t host_addr; uint64_t dev_bus_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t); /* * GNTTABOP_setup_table: Set up a grant table for comprising at least * pages. The frame addresses are written to the . * Only addresses are written, even if the table is larger. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. * 3. Xen may not support more than a single grant-table page per domain. */ #define GNTTABOP_setup_table 2 struct gnttab_setup_table { /* IN parameters. */ domid_t dom; uint32_t nr_frames; /* OUT parameters. */ int16_t status; /* GNTST_* */ XEN_GUEST_HANDLE(ulong) frame_list; }; typedef struct gnttab_setup_table gnttab_setup_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t); /* * GNTTABOP_dump_table: Dump the contents of the grant table to the * xen console. Debugging use only. */ #define GNTTABOP_dump_table 3 struct gnttab_dump_table { /* IN parameters. */ domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_dump_table gnttab_dump_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t); /* * GNTTABOP_transfer_grant_ref: Transfer to a foreign domain. The * foreign domain has previously registered its interest in the transfer via * . * * Note that, even if the transfer fails, the specified page no longer belongs * to the calling domain *unless* the error is GNTST_bad_page. */ #define GNTTABOP_transfer 4 struct gnttab_transfer { /* IN parameters. */ xen_pfn_t mfn; domid_t domid; grant_ref_t ref; /* OUT parameters. */ int16_t status; }; typedef struct gnttab_transfer gnttab_transfer_t; DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t); /* * GNTTABOP_copy: Hypervisor based copy * source and destinations can be eithers MFNs or, for foreign domains, * grant references. the foreign domain has to grant read/write access * in its grant table. * * The flags specify what type source and destinations are (either MFN * or grant reference). * * Note that this can also be used to copy data between two domains * via a third party if the source and destination domains had previously * grant appropriate access to their pages to the third party. * * source_offset specifies an offset in the source frame, dest_offset * the offset in the target frame and len specifies the number of * bytes to be copied. */ #define _GNTCOPY_source_gref (0) #define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) #define _GNTCOPY_dest_gref (1) #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) #define GNTTABOP_copy 5 typedef struct gnttab_copy { /* IN parameters. */ struct { union { grant_ref_t ref; xen_pfn_t gmfn; } u; domid_t domid; uint16_t offset; } source, dest; uint16_t len; uint16_t flags; /* GNTCOPY_* */ /* OUT parameters. */ int16_t status; } gnttab_copy_t; DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t); /* * GNTTABOP_query_size: Query the current and maximum sizes of the shared * grant table. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. */ #define GNTTABOP_query_size 6 struct gnttab_query_size { /* IN parameters. */ domid_t dom; /* OUT parameters. */ uint32_t nr_frames; uint32_t max_nr_frames; int16_t status; /* GNTST_* */ }; typedef struct gnttab_query_size gnttab_query_size_t; DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t); /* * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings * tracked by but atomically replace the page table entry with one * pointing to the machine address under . will be * redirected to the null entry. * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 2. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_and_replace 7 struct gnttab_unmap_and_replace { /* IN parameters. */ uint64_t host_addr; uint64_t new_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t); /* * Bitfield values for gnttab_map_grant_ref.flags. */ /* Map the grant entry for access by I/O devices. */ #define _GNTMAP_device_map (0) #define GNTMAP_device_map (1<<_GNTMAP_device_map) /* Map the grant entry for access by host CPUs. */ #define _GNTMAP_host_map (1) #define GNTMAP_host_map (1<<_GNTMAP_host_map) /* Accesses to the granted frame will be restricted to read-only access. */ #define _GNTMAP_readonly (2) #define GNTMAP_readonly (1<<_GNTMAP_readonly) /* * GNTMAP_host_map subflag: * 0 => The host mapping is usable only by the guest OS. * 1 => The host mapping is usable by guest OS + current application. */ #define _GNTMAP_application_map (3) #define GNTMAP_application_map (1<<_GNTMAP_application_map) /* * GNTMAP_contains_pte subflag: * 0 => This map request contains a host virtual address. * 1 => This map request contains the machine addess of the PTE to update. */ #define _GNTMAP_contains_pte (4) #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) /* * Bits to be placed in guest kernel available PTE bits (architecture * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set). */ #define _GNTMAP_guest_avail0 (16) #define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0) /* * Values for error status returns. All errors are -ve. */ #define GNTST_okay (0) /* Normal return. */ #define GNTST_general_error (-1) /* General undefined error. */ #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ #define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ #define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ #define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ #define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ #define GNTST_address_too_big (-11) /* transfer page address too large. */ #define GNTTABOP_error_msgs { \ "okay", \ "undefined error", \ "unrecognised domain id", \ "invalid grant reference", \ "invalid mapping handle", \ "invalid virtual address", \ "invalid device address", \ "no spare translation slot in the I/O MMU", \ "permission denied", \ "bad page", \ "copy arguments cross page boundary", \ "page address size too large" \ } #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/hvm/000077500000000000000000000000001314037446600246455ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/hvm/e820.h000066400000000000000000000030061314037446600254730ustar00rootroot00000000000000 /* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_E820_H__ #define __XEN_PUBLIC_HVM_E820_H__ /* E820 location in HVM virtual address space. */ #define HVM_E820_PAGE 0x00090000 #define HVM_E820_NR_OFFSET 0x000001E8 #define HVM_E820_OFFSET 0x000002D0 #define HVM_BELOW_4G_RAM_END 0xF0000000 #define HVM_BELOW_4G_MMIO_START HVM_BELOW_4G_RAM_END #define HVM_BELOW_4G_MMIO_LENGTH ((1ULL << 32) - HVM_BELOW_4G_MMIO_START) #endif /* __XEN_PUBLIC_HVM_E820_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/hvm/hvm_info_table.h000066400000000000000000000047531314037446600300030ustar00rootroot00000000000000/****************************************************************************** * hvm/hvm_info_table.h * * HVM parameter and information table, written into guest memory map. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define HVM_INFO_PFN 0x09F #define HVM_INFO_OFFSET 0x800 #define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET) struct hvm_info_table { char signature[8]; /* "HVM INFO" */ uint32_t length; uint8_t checksum; /* Should firmware build ACPI tables? */ uint8_t acpi_enabled; /* Should firmware build APIC descriptors (APIC MADT / MP BIOS)? */ uint8_t apic_mode; /* How many CPUs does this domain have? */ uint32_t nr_vcpus; /* * MEMORY MAP provided by HVM domain builder. * Notes: * 1. page_to_phys(x) = x << 12 * 2. If a field is zero, the corresponding range does not exist. */ /* * 0x0 to page_to_phys(low_mem_pgend)-1: * RAM below 4GB (except for VGA hole 0xA0000-0xBFFFF) */ uint32_t low_mem_pgend; /* * page_to_phys(reserved_mem_pgstart) to 0xFFFFFFFF: * Reserved for special memory mappings */ uint32_t reserved_mem_pgstart; /* * 0x100000000 to page_to_phys(high_mem_pgend)-1: * RAM above 4GB */ uint32_t high_mem_pgend; }; #endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/hvm/hvm_op.h000066400000000000000000000112521314037446600263070ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ #define __XEN_PUBLIC_HVM_HVM_OP_H__ /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */ #define HVMOP_set_param 0 #define HVMOP_get_param 1 struct xen_hvm_param { domid_t domid; /* IN */ uint32_t index; /* IN */ uint64_t value; /* IN/OUT */ }; typedef struct xen_hvm_param xen_hvm_param_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); /* Set the logical level of one of a domain's PCI INTx wires. */ #define HVMOP_set_pci_intx_level 2 struct xen_hvm_set_pci_intx_level { /* Domain to be updated. */ domid_t domid; /* PCI INTx identification in PCI topology (domain:bus:device:intx). */ uint8_t domain, bus, device, intx; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t); /* Set the logical level of one of a domain's ISA IRQ wires. */ #define HVMOP_set_isa_irq_level 3 struct xen_hvm_set_isa_irq_level { /* Domain to be updated. */ domid_t domid; /* ISA device identification, by ISA IRQ (0-15). */ uint8_t isa_irq; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t); #define HVMOP_set_pci_link_route 4 struct xen_hvm_set_pci_link_route { /* Domain to be updated. */ domid_t domid; /* PCI link identifier (0-3). */ uint8_t link; /* ISA IRQ (1-15), or 0 (disable link). */ uint8_t isa_irq; }; typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); /* Flushes all VCPU TLBs: @arg must be NULL. */ #define HVMOP_flush_tlbs 5 /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Track dirty VRAM. */ #define HVMOP_track_dirty_vram 6 struct xen_hvm_track_dirty_vram { /* Domain to be tracked. */ domid_t domid; /* First pfn to track. */ uint64_aligned_t first_pfn; /* Number of pages to track. */ uint64_aligned_t nr; /* OUT variable. */ /* Dirty bitmap buffer. */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; }; typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t); /* Notify that some pages got modified by the Device Model. */ #define HVMOP_modified_memory 7 struct xen_hvm_modified_memory { /* Domain to be updated. */ domid_t domid; /* First pfn. */ uint64_aligned_t first_pfn; /* Number of pages. */ uint64_aligned_t nr; }; typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); #define HVMOP_set_mem_type 8 typedef enum { HVMMEM_ram_rw, /* Normal read/write guest RAM */ HVMMEM_ram_ro, /* Read-only; writes are discarded */ HVMMEM_mmio_dm, /* Reads and write go to the device model */ } hvmmem_type_t; /* Notify that a region of memory is to be treated in a specific way. */ struct xen_hvm_set_mem_type { /* Domain to be updated. */ domid_t domid; /* Memory type */ hvmmem_type_t hvmmem_type; /* First pfn. */ uint64_aligned_t first_pfn; /* Number of pages. */ uint64_aligned_t nr; }; typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t); #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/hvm/ioreq.h000066400000000000000000000105071314037446600261400ustar00rootroot00000000000000/* * ioreq.h: I/O request definitions for device models * Copyright (c) 2004, Intel Corporation. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef _IOREQ_H_ #define _IOREQ_H_ #define IOREQ_READ 1 #define IOREQ_WRITE 0 #define STATE_IOREQ_NONE 0 #define STATE_IOREQ_READY 1 #define STATE_IOREQ_INPROCESS 2 #define STATE_IORESP_READY 3 #define IOREQ_TYPE_PIO 0 /* pio */ #define IOREQ_TYPE_COPY 1 /* mmio ops */ #define IOREQ_TYPE_TIMEOFFSET 7 #define IOREQ_TYPE_INVALIDATE 8 /* mapcache */ /* * VMExit dispatcher should cooperate with instruction decoder to * prepare this structure and notify service OS and DM by sending * virq */ struct ioreq { uint64_t addr; /* physical address */ uint64_t size; /* size in bytes */ uint64_t count; /* for rep prefixes */ uint64_t data; /* data (or paddr of data) */ uint8_t state:4; uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr * of the real data to use. */ uint8_t dir:1; /* 1=read, 0=write */ uint8_t df:1; uint8_t pad:1; uint8_t type; /* I/O type */ uint8_t _pad0[6]; uint64_t io_count; /* How many IO done on a vcpu */ }; typedef struct ioreq ioreq_t; struct vcpu_iodata { struct ioreq vp_ioreq; /* Event channel port, used for notifications to/from the device model. */ uint32_t vp_eport; uint32_t _pad0; }; typedef struct vcpu_iodata vcpu_iodata_t; struct shared_iopage { struct vcpu_iodata vcpu_iodata[1]; }; typedef struct shared_iopage shared_iopage_t; struct buf_ioreq { uint8_t type; /* I/O type */ uint8_t pad:1; uint8_t dir:1; /* 1=read, 0=write */ uint8_t size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */ uint32_t addr:20;/* physical address */ uint32_t data; /* data */ }; typedef struct buf_ioreq buf_ioreq_t; #define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */ struct buffered_iopage { unsigned int read_pointer; unsigned int write_pointer; buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM]; }; /* NB. Size of this structure must be no greater than one page. */ typedef struct buffered_iopage buffered_iopage_t; #if defined(__ia64__) struct pio_buffer { uint32_t page_offset; uint32_t pointer; uint32_t data_end; uint32_t buf_size; void *opaque; }; #define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */ #define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */ #define PIO_BUFFER_ENTRY_NUM 2 struct buffered_piopage { struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM]; uint8_t buffer[1]; }; #endif /* defined(__ia64__) */ #define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40 #define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04) #define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08) #define ACPI_GPE0_BLK_ADDRESS (ACPI_PM_TMR_BLK_ADDRESS + 0x20) #define ACPI_GPE0_BLK_LEN 0x08 #endif /* _IOREQ_H_ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/hvm/params.h000066400000000000000000000076341314037446600263130ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_PARAMS_H__ #define __XEN_PUBLIC_HVM_PARAMS_H__ #include "hvm_op.h" /* * Parameter space for HVMOP_{set,get}_param. */ /* * How should CPU0 event-channel notifications be delivered? * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: * Domain = val[47:32], Bus = val[31:16], * DevFn = val[15: 8], IntX = val[ 1: 0] * If val == 0 then CPU0 event-channel notifications are not delivered. */ #define HVM_PARAM_CALLBACK_IRQ 0 /* * These are not used by Xen. They are here for convenience of HVM-guest * xenbus implementations. */ #define HVM_PARAM_STORE_PFN 1 #define HVM_PARAM_STORE_EVTCHN 2 #define HVM_PARAM_PAE_ENABLED 4 #define HVM_PARAM_IOREQ_PFN 5 #define HVM_PARAM_BUFIOREQ_PFN 6 #ifdef __ia64__ #define HVM_PARAM_NVRAM_FD 7 #define HVM_PARAM_VHPT_SIZE 8 #define HVM_PARAM_BUFPIOREQ_PFN 9 #elif defined(__i386__) || defined(__x86_64__) /* Expose Viridian interfaces to this HVM guest? */ #define HVM_PARAM_VIRIDIAN 9 #endif /* * Set mode for virtual timers (currently x86 only): * delay_for_missed_ticks (default): * Do not advance a vcpu's time beyond the correct delivery time for * interrupts that have been missed due to preemption. Deliver missed * interrupts when the vcpu is rescheduled and advance the vcpu's virtual * time stepwise for each one. * no_delay_for_missed_ticks: * As above, missed interrupts are delivered, but guest time always tracks * wallclock (i.e., real) time while doing so. * no_missed_ticks_pending: * No missed interrupts are held pending. Instead, to ensure ticks are * delivered at some non-zero rate, if we detect missed ticks then the * internal tick alarm is not disabled if the VCPU is preempted during the * next tick period. * one_missed_tick_pending: * Missed interrupts are collapsed together and delivered as one 'late tick'. * Guest time always tracks wallclock (i.e., real) time. */ #define HVM_PARAM_TIMER_MODE 10 #define HVMPTM_delay_for_missed_ticks 0 #define HVMPTM_no_delay_for_missed_ticks 1 #define HVMPTM_no_missed_ticks_pending 2 #define HVMPTM_one_missed_tick_pending 3 /* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */ #define HVM_PARAM_HPET_ENABLED 11 /* Identity-map page directory used by Intel EPT when CR0.PG=0. */ #define HVM_PARAM_IDENT_PT 12 /* Device Model domain, defaults to 0. */ #define HVM_PARAM_DM_DOMAIN 13 /* ACPI S state: currently support S0 and S3 on x86. */ #define HVM_PARAM_ACPI_S_STATE 14 /* TSS used on Intel when CR0.PE=0. */ #define HVM_PARAM_VM86_TSS 15 /* Boolean: Enable aligning all periodic vpts to reduce interrupts */ #define HVM_PARAM_VPT_ALIGN 16 #define HVM_NR_PARAMS 17 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/hvm/save.h000066400000000000000000000063451314037446600257640ustar00rootroot00000000000000/* * hvm/save.h * * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * Copyright (c) 2007 XenSource Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_H__ #define __XEN_PUBLIC_HVM_SAVE_H__ /* * Structures in this header *must* have the same layout in 32bit * and 64bit environments: this means that all fields must be explicitly * sized types and aligned to their sizes, and the structs must be * a multiple of eight bytes long. * * Only the state necessary for saving and restoring (i.e. fields * that are analogous to actual hardware state) should go in this file. * Internal mechanisms should be kept in Xen-private headers. */ #if !defined(__GNUC__) || defined(__STRICT_ANSI__) #error "Anonymous structs/unions are a GNU extension." #endif /* * Each entry is preceded by a descriptor giving its type and length */ struct hvm_save_descriptor { uint16_t typecode; /* Used to demux the various types below */ uint16_t instance; /* Further demux within a type */ uint32_t length; /* In bytes, *not* including this descriptor */ }; /* * Each entry has a datatype associated with it: for example, the CPU state * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU), * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU). * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system * ugliness. */ #define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; } #define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t) #define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x))) #define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c)) /* * The series of save records is teminated by a zero-type, zero-length * descriptor. */ struct hvm_save_end {}; DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); #if defined(__i386__) || defined(__x86_64__) #include "../arch-x86/hvm/save.h" #elif defined(__ia64__) #include "../arch-ia64/hvm/save.h" #else #error "unsupported architecture" #endif #endif /* __XEN_PUBLIC_HVM_SAVE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/000077500000000000000000000000001314037446600244625ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/blkif.h000066400000000000000000000310611314037446600257230ustar00rootroot00000000000000/****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_BLKIF_H__ #define __XEN_PUBLIC_IO_BLKIF_H__ #include "ring.h" #include "../grant_table.h" /* * Front->back notifications: When enqueuing a new request, sending a * notification can be made conditional on req_event (i.e., the generic * hold-off mechanism provided by the ring macros). Backends must set * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). * * Back->front notifications: When enqueuing a new response, sending a * notification can be made conditional on rsp_event (i.e., the generic * hold-off mechanism provided by the ring macros). Frontends must set * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). */ #ifndef blkif_vdev_t #define blkif_vdev_t uint16_t #endif #define blkif_sector_t uint64_t /* * REQUEST CODES. */ #define BLKIF_OP_READ 0 #define BLKIF_OP_WRITE 1 /* * Recognised only if "feature-barrier" is present in backend xenbus info. * The "feature-barrier" node contains a boolean indicating whether barrier * requests are likely to succeed or fail. Either way, a barrier request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt barrier requests. * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* * create the "feature-barrier" node! */ #define BLKIF_OP_WRITE_BARRIER 2 /* * Recognised if "feature-flush-cache" is present in backend xenbus * info. A flush will ask the underlying storage hardware to flush its * non-volatile caches as appropriate. The "feature-flush-cache" node * contains a boolean indicating whether flush requests are likely to * succeed or fail. Either way, a flush request may fail at any time * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying * block-device hardware. The boolean simply indicates whether or not it * is worthwhile for the frontend to attempt flushes. If a backend does * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the * "feature-flush-cache" node! */ #define BLKIF_OP_FLUSH_DISKCACHE 3 /* * Device specific command packet contained within the request */ #define BLKIF_OP_PACKET 4 /* * Recognised only if "feature-discard" is present in backend xenbus info. * The "feature-discard" node contains a boolean indicating whether trim * (ATA) or unmap (SCSI) - conviently called discard requests are likely * to succeed or fail. Either way, a discard request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt discard requests. * If a backend does not recognise BLKIF_OP_DISCARD, it should *not* * create the "feature-discard" node! * * Discard operation is a request for the underlying block device to mark * extents to be erased. However, discard does not guarantee that the blocks * will be erased from the device - it is just a hint to the device * controller that these blocks are no longer in use. What the device * controller does with that information is left to the controller. * Discard operations are passed with sector_number as the * sector index to begin discard operations at and nr_sectors as the number of * sectors to be discarded. The specified sectors should be discarded if the * underlying block device supports trim (ATA) or unmap (SCSI) operations, * or a BLKIF_RSP_EOPNOTSUPP should be returned. * More information about trim/unmap operations at: * http://t13.org/Documents/UploadedDocuments/docs2008/ * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc * http://www.seagate.com/staticfiles/support/disc/manuals/ * Interface%20manuals/100293068c.pdf * The backend can optionally provide these extra XenBus attributes to * further optimize the discard functionality: * 'discard-aligment' - Devices that support discard functionality may * internally allocate space in units that are bigger than the exported * logical block size. The discard-alignment parameter indicates how many bytes * the beginning of the partition is offset from the internal allocation unit's * natural alignment. Do not confuse this with natural disk alignment offset. * 'discard-granularity' - Devices that support discard functionality may * internally allocate space using units that are bigger than the logical block * size. The discard-granularity parameter indicates the size of the internal * allocation unit in bytes if reported by the device. Otherwise the * discard-granularity will be set to match the device's physical block size. * It is the minimum size you can discard. * 'discard-secure' - All copies of the discarded sectors (potentially created * by garbage collection) must also be erased. To use this feature, the flag * BLKIF_DISCARD_SECURE must be set in the blkif_request_discard. */ #define BLKIF_OP_DISCARD 5 /* * Recognized if "feature-max-indirect-segments" in present in the backend * xenbus info. The "feature-max-indirect-segments" node contains the maximum * number of segments allowed by the backend per request. If the node is * present, the frontend might use blkif_request_indirect structs in order to * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The * maximum number of indirect segments is fixed by the backend, but the * frontend can issue requests with any number of indirect segments as long as * it's less than the number provided by the backend. The indirect_grefs field * in blkif_request_indirect should be filled by the frontend with the * grant references of the pages that are holding the indirect segments. * This pages are filled with an array of blkif_request_segment_aligned * that hold the information about the segments. The number of indirect * pages to use is determined by the maximum number of segments * a indirect request contains. Every indirect page can contain a maximum * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), * so to calculate the number of indirect pages to use we have to do * ceil(indirect_segments/512). * * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* * create the "feature-max-indirect-segments" node! */ #define BLKIF_OP_INDIRECT 6 /* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 #ifdef CUSTOMIZED struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; }; struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #else struct blkif_request_segment_aligned { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ } __attribute__((__packed__)); struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; } __attribute__((__packed__)); struct blkif_request_discard { uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ blkif_vdev_t _pad1; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number; uint64_t nr_sectors; uint8_t _pad3; } __attribute__((__packed__)); struct blkif_request_other { uint8_t _pad1; blkif_vdev_t _pad2; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ } __attribute__((__packed__)); struct blkif_request_indirect { uint8_t indirect_op; uint16_t nr_segments; #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ #endif uint64_t id; blkif_sector_t sector_number; blkif_vdev_t handle; uint16_t _pad2; grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; #ifndef CONFIG_X86_32 uint32_t _pad3; /* make it 64 byte aligned */ #else uint64_t _pad3; /* make it 64 byte aligned */ #endif } __attribute__((__packed__)); struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_request_rw rw; struct blkif_request_discard discard; struct blkif_request_other other; struct blkif_request_indirect indirect; } u; } __attribute__((__packed__)); #endif typedef struct blkif_request blkif_request_t; struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_response blkif_response_t; /* * STATUS RETURN CODES. */ /* Operation not supported (only happens on barrier writes). */ #define BLKIF_RSP_EOPNOTSUPP -2 /* Operation failed for some unspecified reason (-EIO). */ #define BLKIF_RSP_ERROR -1 /* Operation completed successfully. */ #define BLKIF_RSP_OKAY 0 /* * Generate blkif ring structures and types. */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/console.h000066400000000000000000000033341314037446600263000ustar00rootroot00000000000000/****************************************************************************** * console.h * * Console I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_CONSOLE_H__ #define __XEN_PUBLIC_IO_CONSOLE_H__ typedef uint32_t XENCONS_RING_IDX; #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) struct xencons_interface { char in[1024]; char out[2048]; XENCONS_RING_IDX in_cons, in_prod; XENCONS_RING_IDX out_cons, out_prod; }; #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/fbif.h000066400000000000000000000127051314037446600255460ustar00rootroot00000000000000/* * fbif.h -- Xen virtual frame buffer device * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_FBIF_H__ #define __XEN_PUBLIC_IO_FBIF_H__ /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. */ /* Event type 1 currently not used */ /* * Framebuffer update notification event * Capable frontend sets feature-update in xenstore. * Backend requests it by setting request-update in xenstore. */ #define XENFB_TYPE_UPDATE 2 struct xenfb_update { uint8_t type; /* XENFB_TYPE_UPDATE */ int32_t x; /* source x */ int32_t y; /* source y */ int32_t width; /* rect width */ int32_t height; /* rect height */ }; /* * Framebuffer resize notification event * Capable backend sets feature-resize in xenstore. */ #define XENFB_TYPE_RESIZE 3 struct xenfb_resize { uint8_t type; /* XENFB_TYPE_RESIZE */ int32_t width; /* width in pixels */ int32_t height; /* height in pixels */ int32_t stride; /* stride in bytes */ int32_t depth; /* depth in bits */ int32_t offset; /* offset of the framebuffer in bytes */ }; #define XENFB_OUT_EVENT_SIZE 40 union xenfb_out_event { uint8_t type; struct xenfb_update update; struct xenfb_resize resize; char pad[XENFB_OUT_EVENT_SIZE]; }; /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. */ /* * Framebuffer refresh period advice * Backend sends it to advise the frontend their preferred period of * refresh. Frontends that keep the framebuffer constantly up-to-date * just ignore it. Frontends that use the advice should immediately * refresh the framebuffer (and send an update notification event if * those have been requested), then use the update frequency to guide * their periodical refreshs. */ #define XENFB_TYPE_REFRESH_PERIOD 1 #define XENFB_NO_REFRESH 0 struct xenfb_refresh_period { uint8_t type; /* XENFB_TYPE_UPDATE_PERIOD */ uint32_t period; /* period of refresh, in ms, * XENFB_NO_REFRESH if no refresh is needed */ }; #define XENFB_IN_EVENT_SIZE 40 union xenfb_in_event { uint8_t type; struct xenfb_refresh_period refresh_period; char pad[XENFB_IN_EVENT_SIZE]; }; /* shared page */ #define XENFB_IN_RING_SIZE 1024 #define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE) #define XENFB_IN_RING_OFFS 1024 #define XENFB_IN_RING(page) \ ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS)) #define XENFB_IN_RING_REF(page, idx) \ (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN]) #define XENFB_OUT_RING_SIZE 2048 #define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE) #define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE) #define XENFB_OUT_RING(page) \ ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS)) #define XENFB_OUT_RING_REF(page, idx) \ (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN]) struct xenfb_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; int32_t width; /* the width of the framebuffer (in pixels) */ int32_t height; /* the height of the framebuffer (in pixels) */ uint32_t line_length; /* the length of a row of pixels (in bytes) */ uint32_t mem_length; /* the length of the framebuffer (in bytes) */ uint8_t depth; /* the depth of a pixel (in bits) */ /* * Framebuffer page directory * * Each directory page holds PAGE_SIZE / sizeof(*pd) * framebuffer pages, and can thus map up to PAGE_SIZE * * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 Megs * 64 bit. 256 directories give enough room for a 512 Meg * framebuffer with a max resolution of 12,800x10,240. Should * be enough for a while with room leftover for expansion. */ unsigned long pd[256]; }; /* * Wart: xenkbd needs to know default resolution. Put it here until a * better solution is found, but don't leak it to the backend. */ #ifdef __KERNEL__ #define XENFB_WIDTH 800 #define XENFB_HEIGHT 600 #define XENFB_DEPTH 32 #endif #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/fsif.h000066400000000000000000000123741314037446600255710ustar00rootroot00000000000000/****************************************************************************** * fsif.h * * Interface to FS level split device drivers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2007, Grzegorz Milos, . */ #ifndef __XEN_PUBLIC_IO_FSIF_H__ #define __XEN_PUBLIC_IO_FSIF_H__ #include "ring.h" #include "../grant_table.h" #define REQ_FILE_OPEN 1 #define REQ_FILE_CLOSE 2 #define REQ_FILE_READ 3 #define REQ_FILE_WRITE 4 #define REQ_STAT 5 #define REQ_FILE_TRUNCATE 6 #define REQ_REMOVE 7 #define REQ_RENAME 8 #define REQ_CREATE 9 #define REQ_DIR_LIST 10 #define REQ_CHMOD 11 #define REQ_FS_SPACE 12 #define REQ_FILE_SYNC 13 struct fsif_open_request { grant_ref_t gref; }; struct fsif_close_request { uint32_t fd; }; struct fsif_read_request { uint32_t fd; int32_t pad; uint64_t len; uint64_t offset; grant_ref_t grefs[1]; /* Variable length */ }; struct fsif_write_request { uint32_t fd; int32_t pad; uint64_t len; uint64_t offset; grant_ref_t grefs[1]; /* Variable length */ }; struct fsif_stat_request { uint32_t fd; }; /* This structure is a copy of some fields from stat structure, returned * via the ring. */ struct fsif_stat_response { int32_t stat_mode; uint32_t stat_uid; uint32_t stat_gid; int32_t stat_ret; int64_t stat_size; int64_t stat_atime; int64_t stat_mtime; int64_t stat_ctime; }; struct fsif_truncate_request { uint32_t fd; int32_t pad; int64_t length; }; struct fsif_remove_request { grant_ref_t gref; }; struct fsif_rename_request { uint16_t old_name_offset; uint16_t new_name_offset; grant_ref_t gref; }; struct fsif_create_request { int8_t directory; int8_t pad; int16_t pad2; int32_t mode; grant_ref_t gref; }; struct fsif_list_request { uint32_t offset; grant_ref_t gref; }; #define NR_FILES_SHIFT 0 #define NR_FILES_SIZE 16 /* 16 bits for the number of files mask */ #define NR_FILES_MASK (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT) #define ERROR_SIZE 32 /* 32 bits for the error mask */ #define ERROR_SHIFT (NR_FILES_SIZE + NR_FILES_SHIFT) #define ERROR_MASK (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT) #define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE) #define HAS_MORE_FLAG (1ULL << HAS_MORE_SHIFT) struct fsif_chmod_request { uint32_t fd; int32_t mode; }; struct fsif_space_request { grant_ref_t gref; }; struct fsif_sync_request { uint32_t fd; }; /* FS operation request */ struct fsif_request { uint8_t type; /* Type of the request */ uint8_t pad; uint16_t id; /* Request ID, copied to the response */ uint32_t pad2; union { struct fsif_open_request fopen; struct fsif_close_request fclose; struct fsif_read_request fread; struct fsif_write_request fwrite; struct fsif_stat_request fstat; struct fsif_truncate_request ftruncate; struct fsif_remove_request fremove; struct fsif_rename_request frename; struct fsif_create_request fcreate; struct fsif_list_request flist; struct fsif_chmod_request fchmod; struct fsif_space_request fspace; struct fsif_sync_request fsync; } u; }; typedef struct fsif_request fsif_request_t; /* FS operation response */ struct fsif_response { uint16_t id; uint16_t pad1; uint32_t pad2; union { uint64_t ret_val; struct fsif_stat_response fstat; }; }; typedef struct fsif_response fsif_response_t; #define FSIF_RING_ENTRY_SIZE 64 #define FSIF_NR_READ_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_read_request)) / \ sizeof(grant_ref_t) + 1) #define FSIF_NR_WRITE_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_write_request)) / \ sizeof(grant_ref_t) + 1) DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response); #define STATE_INITIALISED "init" #define STATE_READY "ready" #define STATE_CLOSING "closing" #define STATE_CLOSED "closed" #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/kbdif.h000066400000000000000000000076241314037446600257230ustar00rootroot00000000000000/* * kbdif.h -- Xen virtual keyboard/mouse * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_KBDIF_H__ #define __XEN_PUBLIC_IO_KBDIF_H__ /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. */ /* Pointer movement event */ #define XENKBD_TYPE_MOTION 1 /* Event type 2 currently not used */ /* Key event (includes pointer buttons) */ #define XENKBD_TYPE_KEY 3 /* * Pointer position event * Capable backend sets feature-abs-pointer in xenstore. * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting * request-abs-update in xenstore. */ #define XENKBD_TYPE_POS 4 struct xenkbd_motion { uint8_t type; /* XENKBD_TYPE_MOTION */ int32_t rel_x; /* relative X motion */ int32_t rel_y; /* relative Y motion */ int32_t rel_z; /* relative Z motion (wheel) */ }; struct xenkbd_key { uint8_t type; /* XENKBD_TYPE_KEY */ uint8_t pressed; /* 1 if pressed; 0 otherwise */ uint32_t keycode; /* KEY_* from linux/input.h */ }; struct xenkbd_position { uint8_t type; /* XENKBD_TYPE_POS */ int32_t abs_x; /* absolute X position (in FB pixels) */ int32_t abs_y; /* absolute Y position (in FB pixels) */ int32_t rel_z; /* relative Z motion (wheel) */ }; #define XENKBD_IN_EVENT_SIZE 40 union xenkbd_in_event { uint8_t type; struct xenkbd_motion motion; struct xenkbd_key key; struct xenkbd_position pos; char pad[XENKBD_IN_EVENT_SIZE]; }; /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. * No out events currently defined. */ #define XENKBD_OUT_EVENT_SIZE 40 union xenkbd_out_event { uint8_t type; char pad[XENKBD_OUT_EVENT_SIZE]; }; /* shared page */ #define XENKBD_IN_RING_SIZE 2048 #define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE) #define XENKBD_IN_RING_OFFS 1024 #define XENKBD_IN_RING(page) \ ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS)) #define XENKBD_IN_RING_REF(page, idx) \ (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN]) #define XENKBD_OUT_RING_SIZE 1024 #define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE) #define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE) #define XENKBD_OUT_RING(page) \ ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS)) #define XENKBD_OUT_RING_REF(page, idx) \ (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN]) struct xenkbd_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; }; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/netif.h000066400000000000000000000163401314037446600257440ustar00rootroot00000000000000/****************************************************************************** * netif.h * * Unified network-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_NETIF_H__ #define __XEN_PUBLIC_IO_NETIF_H__ #include "ring.h" #include "../grant_table.h" /* * Notifications after enqueuing any type of message should be conditional on * the appropriate req_event or rsp_event field in the shared ring. * If the client sends notification for rx requests then it should specify * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume * that it cannot safely queue packets (as it may not be kicked to send them). */ /* * This is the 'wire' format for packets: * Request 1: netif_tx_request -- NETTXF_* (any flags) * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info) * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE) * Request 4: netif_tx_request -- NETTXF_more_data * Request 5: netif_tx_request -- NETTXF_more_data * ... * Request N: netif_tx_request -- 0 */ /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETTXF_csum_blank (0) #define NETTXF_csum_blank (1U<<_NETTXF_csum_blank) /* Packet data has been validated against protocol checksum. */ #define _NETTXF_data_validated (1) #define NETTXF_data_validated (1U<<_NETTXF_data_validated) /* Packet continues in the next request descriptor. */ #define _NETTXF_more_data (2) #define NETTXF_more_data (1U<<_NETTXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETTXF_extra_info (3) #define NETTXF_extra_info (1U<<_NETTXF_extra_info) struct netif_tx_request { grant_ref_t gref; /* Reference to buffer page */ uint16_t offset; /* Offset within buffer page */ uint16_t flags; /* NETTXF_* */ uint16_t id; /* Echoed in response message. */ uint16_t size; /* Packet size in bytes. */ }; typedef struct netif_tx_request netif_tx_request_t; /* Types of netif_extra_info descriptors. */ #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MAX (4) /* netif_extra_info flags. */ #define _XEN_NETIF_EXTRA_FLAG_MORE (0) #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) /* GSO types - only TCPv4 currently supported. */ #define XEN_NETIF_GSO_TYPE_TCPV4 (1) /* * This structure needs to fit within both netif_tx_request and * netif_rx_response for compatibility. */ struct netif_extra_info { uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ union { /* * XEN_NETIF_EXTRA_TYPE_GSO: */ struct { /* * Maximum payload size of each segment. For example, for TCP this * is just the path MSS. */ uint16_t size; /* * GSO type. This determines the protocol of the packet and any * extra features required to segment the packet properly. */ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ /* Future expansion. */ uint8_t pad; /* * GSO features. This specifies any extra GSO features required * to process this packet, such as ECN support for TCPv4. */ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ } gso; /* * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}: * Backend advertises availability via 'feature-multicast-control' * xenbus node containing value '1'. * Frontend requests this feature by advertising * 'request-multicast-control' xenbus node containing value '1'. * If multicast control is requested then multicast flooding is * disabled and the frontend must explicitly register its interest * in multicast groups using dummy transmit requests containing * MCAST_{ADD,DEL} extra-info fragments. */ struct { uint8_t addr[6]; /* Address to add/remove. */ } mcast; uint16_t pad[3]; } u; }; typedef struct netif_extra_info netif_extra_info_t; struct netif_tx_response { uint16_t id; int16_t status; /* NETIF_RSP_* */ }; typedef struct netif_tx_response netif_tx_response_t; struct netif_rx_request { uint16_t id; /* Echoed in response message. */ grant_ref_t gref; /* Reference to incoming granted frame */ }; typedef struct netif_rx_request netif_rx_request_t; /* Packet data has been validated against protocol checksum. */ #define _NETRXF_data_validated (0) #define NETRXF_data_validated (1U<<_NETRXF_data_validated) /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETRXF_csum_blank (1) #define NETRXF_csum_blank (1U<<_NETRXF_csum_blank) /* Packet continues in the next request descriptor. */ #define _NETRXF_more_data (2) #define NETRXF_more_data (1U<<_NETRXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETRXF_extra_info (3) #define NETRXF_extra_info (1U<<_NETRXF_extra_info) struct netif_rx_response { uint16_t id; uint16_t offset; /* Offset in page of start of received packet */ uint16_t flags; /* NETRXF_* */ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ }; typedef struct netif_rx_response netif_rx_response_t; /* * Generate netif ring structures and types. */ DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response); DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response); #define NETIF_RSP_DROPPED -2 #define NETIF_RSP_ERROR -1 #define NETIF_RSP_OKAY 0 /* No response: used for auxiliary requests (e.g., netif_tx_extra). */ #define NETIF_RSP_NULL 1 #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/pciif.h000066400000000000000000000074321314037446600257330ustar00rootroot00000000000000/* * PCI Backend/Frontend Common Data Structures & Macros * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Ryan Wilson */ #ifndef __XEN_PCI_COMMON_H__ #define __XEN_PCI_COMMON_H__ /* Be sure to bump this number if you change this file */ #define XEN_PCI_MAGIC "7" /* xen_pci_sharedinfo flags */ #define _XEN_PCIF_active (0) #define XEN_PCIF_active (1<<_XEN_PCIF_active) #define _XEN_PCIB_AERHANDLER (1) #define XEN_PCIB_AERHANDLER (1<<_XEN_PCIB_AERHANDLER) #define _XEN_PCIB_active (2) #define XEN_PCIB_active (1<<_XEN_PCIB_active) /* xen_pci_op commands */ #define XEN_PCI_OP_conf_read (0) #define XEN_PCI_OP_conf_write (1) #define XEN_PCI_OP_enable_msi (2) #define XEN_PCI_OP_disable_msi (3) #define XEN_PCI_OP_enable_msix (4) #define XEN_PCI_OP_disable_msix (5) #define XEN_PCI_OP_aer_detected (6) #define XEN_PCI_OP_aer_resume (7) #define XEN_PCI_OP_aer_mmio (8) #define XEN_PCI_OP_aer_slotreset (9) /* xen_pci_op error numbers */ #define XEN_PCI_ERR_success (0) #define XEN_PCI_ERR_dev_not_found (-1) #define XEN_PCI_ERR_invalid_offset (-2) #define XEN_PCI_ERR_access_denied (-3) #define XEN_PCI_ERR_not_implemented (-4) /* XEN_PCI_ERR_op_failed - backend failed to complete the operation */ #define XEN_PCI_ERR_op_failed (-5) /* * it should be PAGE_SIZE-sizeof(struct xen_pci_op))/sizeof(struct msix_entry)) * Should not exceed 128 */ #define SH_INFO_MAX_VEC 128 struct xen_msix_entry { uint16_t vector; uint16_t entry; }; struct xen_pci_op { /* IN: what action to perform: XEN_PCI_OP_* */ uint32_t cmd; /* OUT: will contain an error number (if any) from errno.h */ int32_t err; /* IN: which device to touch */ uint32_t domain; /* PCI Domain/Segment */ uint32_t bus; uint32_t devfn; /* IN: which configuration registers to touch */ int32_t offset; int32_t size; /* IN/OUT: Contains the result after a READ or the value to WRITE */ uint32_t value; /* IN: Contains extra infor for this operation */ uint32_t info; /*IN: param for msi-x */ struct xen_msix_entry msix_entries[SH_INFO_MAX_VEC]; }; /*used for pcie aer handling*/ struct xen_pcie_aer_op { /* IN: what action to perform: XEN_PCI_OP_* */ uint32_t cmd; /*IN/OUT: return aer_op result or carry error_detected state as input*/ int32_t err; /* IN: which device to touch */ uint32_t domain; /* PCI Domain/Segment*/ uint32_t bus; uint32_t devfn; }; struct xen_pci_sharedinfo { /* flags - XEN_PCIF_* */ uint32_t flags; struct xen_pci_op op; struct xen_pcie_aer_op aer_op; }; #endif /* __XEN_PCI_COMMON_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/protocols.h000066400000000000000000000032101314037446600266530ustar00rootroot00000000000000/****************************************************************************** * protocols.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PROTOCOLS_H__ #define __XEN_PROTOCOLS_H__ #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi" #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi" #define XEN_IO_PROTO_ABI_IA64 "ia64-abi" #if defined(__i386__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 #elif defined(__x86_64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 #elif defined(__ia64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64 #else # error arch fixup needed here #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/ring.h000066400000000000000000000352421314037446600256000ustar00rootroot00000000000000/****************************************************************************** * ring.h * * Shared producer-consumer ring macros. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Tim Deegan and Andrew Warfield November 2004. */ #ifndef __XEN_PUBLIC_IO_RING_H__ #define __XEN_PUBLIC_IO_RING_H__ #include "../xen-compat.h" #if __XEN_INTERFACE_VERSION__ < 0x00030208 #define xen_mb() mb() #define xen_rmb() rmb() #define xen_wmb() wmb() #endif typedef unsigned int RING_IDX; /* Round a 32-bit unsigned constant down to the nearest power of two. */ #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) /* * Calculate size of a shared ring, given the total available space for the * ring and indexes (_sz), and the name tag of the request/response structure. * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ #define __CONST_RING_SIZE(_s, _sz) \ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ sizeof(((struct _s##_sring *)0)->ring[0]))) #define __RING_SIZE(_s, _sz) \ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* * Macros to make the correct C datatypes for a new kind of ring. * * To make a new ring datatype, you need to have two message structures, * let's say request_t, and response_t already defined. * * In a header where you want the ring datatype declared, you then do: * * DEFINE_RING_TYPES(mytag, request_t, response_t); * * These expand out to give you a set of types, as you can see below. * The most important of these are: * * mytag_sring_t - The shared ring. * mytag_front_ring_t - The 'front' half of the ring. * mytag_back_ring_t - The 'back' half of the ring. * * To initialize a ring in your code you need to know the location and size * of the shared memory area (PAGE_SIZE, for instance). To initialise * the front half: * * mytag_front_ring_t front_ring; * SHARED_RING_INIT((mytag_sring_t *)shared_page); * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); * * Initializing the back follows similarly (note that only the front * initializes the shared ring): * * mytag_back_ring_t back_ring; * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); */ #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ \ /* Shared ring entry */ \ union __name##_sring_entry { \ __req_t req; \ __rsp_t rsp; \ }; \ \ /* Shared ring page */ \ struct __name##_sring { \ RING_IDX req_prod, req_event; \ RING_IDX rsp_prod, rsp_event; \ uint8_t pad[48]; \ union __name##_sring_entry ring[1]; /* variable-length */ \ }; \ \ /* "Front" end's private variables */ \ struct __name##_front_ring { \ RING_IDX req_prod_pvt; \ RING_IDX rsp_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* "Back" end's private variables */ \ struct __name##_back_ring { \ RING_IDX rsp_prod_pvt; \ RING_IDX req_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* Syntactic sugar */ \ typedef struct __name##_sring __name##_sring_t; \ typedef struct __name##_front_ring __name##_front_ring_t; \ typedef struct __name##_back_ring __name##_back_ring_t /* * Macros for manipulating rings. * * FRONT_RING_whatever works on the "front end" of a ring: here * requests are pushed on to the ring and responses taken off it. * * BACK_RING_whatever works on the "back end" of a ring: here * requests are taken off the ring and responses put on. * * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. * This is OK in 1-for-1 request-response situations where the * requestor (front end) never has more than RING_SIZE()-1 * outstanding requests. */ /* Initialising empty rings */ #define SHARED_RING_INIT(_s) do { \ (_s)->req_prod = (_s)->rsp_prod = 0; \ (_s)->req_event = (_s)->rsp_event = 1; \ (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \ } while(0) #define FRONT_RING_INIT(_r, _s, __size) do { \ (_r)->req_prod_pvt = 0; \ (_r)->rsp_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) #define BACK_RING_INIT(_r, _s, __size) do { \ (_r)->rsp_prod_pvt = 0; \ (_r)->req_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) /* Initialize to existing shared indexes -- for recovery */ #define FRONT_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->req_prod_pvt = (_s)->req_prod; \ (_r)->rsp_cons = (_s)->rsp_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) #define BACK_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ (_r)->req_cons = (_s)->req_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) /* How big is this ring? */ #define RING_SIZE(_r) \ ((_r)->nr_ents) /* Number of free requests (for use on front side only). */ #define RING_FREE_REQUESTS(_r) \ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) /* Test if there is an empty slot available on the front ring. * (This is only meaningful from the front. ) */ #define RING_FULL(_r) \ (RING_FREE_REQUESTS(_r) == 0) /* Test if there are outstanding messages to be processed on a ring. */ #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) #ifdef __GNUC__ #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ unsigned int rsp = RING_SIZE(_r) - \ ((_r)->req_cons - (_r)->rsp_prod_pvt); \ req < rsp ? req : rsp; \ }) #else /* Same as above, but without the nice GCC ({ ... }) syntax. */ #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ ((((_r)->sring->req_prod - (_r)->req_cons) < \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ ((_r)->sring->req_prod - (_r)->req_cons) : \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) #endif /* Direct access to individual ring elements, by index. */ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) #define RING_GET_RESPONSE(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) /* Loop termination condition: Would the specified index overflow the ring? */ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) #define RING_PUSH_REQUESTS(_r) do { \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) #define RING_PUSH_RESPONSES(_r) do { \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ } while (0) /* * Notification hold-off (req_event and rsp_event): * * When queueing requests or responses on a shared ring, it may not always be * necessary to notify the remote end. For example, if requests are in flight * in a backend, the front may be able to queue further requests without * notifying the back (if the back checks for new requests when it queues * responses). * * When enqueuing requests or responses: * * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument * is a boolean return value. True indicates that the receiver requires an * asynchronous notification. * * After dequeuing requests or responses (before sleeping the connection): * * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). * The second argument is a boolean return value. True indicates that there * are pending messages on the ring (i.e., the connection should not be put * to sleep). * * These macros will set the req_event/rsp_event field to trigger a * notification on the very next message that is enqueued. If you want to * create batches of work (i.e., only receive a notification after several * messages have been enqueued) then you will need to create a customised * version of the FINAL_CHECK macro in your own code, which sets the event * field appropriately. */ #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->req_prod; \ RING_IDX __new = (_r)->req_prod_pvt; \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = __new; \ xen_mb(); /* back sees new requests /before/ we check req_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->rsp_prod; \ RING_IDX __new = (_r)->rsp_prod_pvt; \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = __new; \ xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ if (_work_to_do) break; \ (_r)->sring->req_event = (_r)->req_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ } while (0) #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ if (_work_to_do) break; \ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ } while (0) #endif /* __XEN_PUBLIC_IO_RING_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/tpmif.h000066400000000000000000000046251314037446600257610ustar00rootroot00000000000000/****************************************************************************** * tpmif.h * * TPM I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, IBM Corporation * * Author: Stefan Berger, stefanb@us.ibm.com * Grant table support: Mahadevan Gomathisankaran * * This code has been derived from tools/libxc/xen/io/netif.h * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_TPMIF_H__ #define __XEN_PUBLIC_IO_TPMIF_H__ #include "../grant_table.h" struct tpmif_tx_request { unsigned long addr; /* Machine address of packet. */ grant_ref_t ref; /* grant table access reference */ uint16_t unused; uint16_t size; /* Packet size in bytes. */ }; typedef struct tpmif_tx_request tpmif_tx_request_t; /* * The TPMIF_TX_RING_SIZE defines the number of pages the * front-end and backend can exchange (= size of array). */ typedef uint32_t TPMIF_RING_IDX; #define TPMIF_TX_RING_SIZE 1 /* This structure must fit in a memory page. */ struct tpmif_ring { struct tpmif_tx_request req; }; typedef struct tpmif_ring tpmif_ring_t; struct tpmif_tx_interface { struct tpmif_ring ring[TPMIF_TX_RING_SIZE]; }; typedef struct tpmif_tx_interface tpmif_tx_interface_t; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/usbif.h000066400000000000000000000071541314037446600257520ustar00rootroot00000000000000/* * usbif.h * * USB I/O interface for Xen guest OSes. * * Copyright (C) 2009, FUJITSU LABORATORIES LTD. * Author: Noboru Iwamatsu * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_IO_USBIF_H__ #define __XEN_PUBLIC_IO_USBIF_H__ #include "ring.h" #include "../grant_table.h" /* * USB pipe in usbif_request * * bits 0-5 are specific bits for virtual USB driver. * bits 7-31 are standard urb pipe. * * - port number(NEW): bits 0-4 * (USB_MAXCHILDREN is 31) * * - operation flag(NEW): bit 5 * (0 = submit urb, * 1 = unlink urb) * * - direction: bit 7 * (0 = Host-to-Device [Out] * 1 = Device-to-Host [In]) * * - device address: bits 8-14 * * - endpoint: bits 15-18 * * - pipe type: bits 30-31 * (00 = isochronous, 01 = interrupt, * 10 = control, 11 = bulk) */ #define usbif_pipeportnum(pipe) ((pipe) & 0x1f) #define usbif_setportnum_pipe(pipe,portnum) \ ((pipe)|(portnum)) #define usbif_pipeunlink(pipe) ((pipe) & 0x20) #define usbif_setunlink_pipe(pipe) ((pipe)|(0x20)) #define USBIF_BACK_MAX_PENDING_REQS (128) #define USBIF_MAX_SEGMENTS_PER_REQUEST (10) struct usbif_request_segment { grant_ref_t gref; uint16_t offset; uint16_t length; }; struct usbif_request { uint16_t id; /* request id */ uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */ /* basic urb parameter */ uint32_t pipe; uint16_t transfer_flags; uint16_t buffer_length; union { uint8_t ctrl[8]; /* setup_packet (Ctrl) */ struct { uint16_t interval; /* maximum (1024*8) in usb core */ uint16_t start_frame; /* start frame */ uint16_t number_of_packets; /* number of ISO packet */ uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */ } isoc; struct { uint16_t interval; /* maximum (1024*8) in usb core */ uint16_t pad[3]; } intr; struct { uint16_t unlink_id; /* unlink request id */ uint16_t pad[3]; } unlink; } u; /* urb data segments */ struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST]; }; typedef struct usbif_request usbif_request_t; struct usbif_response { uint16_t id; /* request id */ uint16_t start_frame; /* start frame (ISO) */ int32_t status; /* status (non-ISO) */ int32_t actual_length; /* actual transfer length */ int32_t error_count; /* number of ISO errors */ }; typedef struct usbif_response usbif_response_t; DEFINE_RING_TYPES(usbif, struct usbif_request, struct usbif_response); #define USB_RING_SIZE __RING_SIZE((struct usbif_sring *)0, PAGE_SIZE) #endif /* __XEN_PUBLIC_IO_USBIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/vscsiif.h000066400000000000000000000070421314037446600263040ustar00rootroot00000000000000/****************************************************************************** * vscsiif.h * * Based on the blkif.h code. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright(c) FUJITSU Limited 2008. */ #ifndef __XEN__PUBLIC_IO_SCSI_H__ #define __XEN__PUBLIC_IO_SCSI_H__ #include "ring.h" #include "../grant_table.h" /* command between backend and frontend */ #define VSCSIIF_ACT_SCSI_CDB 1 /* SCSI CDB command */ #define VSCSIIF_ACT_SCSI_ABORT 2 /* SCSI Device(Lun) Abort*/ #define VSCSIIF_ACT_SCSI_RESET 3 /* SCSI Device(Lun) Reset*/ #define VSCSIIF_BACK_MAX_PENDING_REQS 128 /* * Maximum scatter/gather segments per request. * * Considering balance between allocating al least 16 "vscsiif_request" * structures on one page (4096bytes) and number of scatter gather * needed, we decided to use 26 as a magic number. */ #define VSCSIIF_SG_TABLESIZE 26 /* * base on linux kernel 2.6.18 */ #define VSCSIIF_MAX_COMMAND_SIZE 16 #define VSCSIIF_SENSE_BUFFERSIZE 96 struct vscsiif_request { uint16_t rqid; /* private guest value, echoed in resp */ uint8_t act; /* command between backend and frontend */ uint8_t cmd_len; uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; uint16_t timeout_per_command; /* The command is issued by twice the value in Backend. */ uint16_t channel, id, lun; uint16_t padding; uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1) DMA_FROM_DEVICE(2) DMA_NONE(3) requests */ uint8_t nr_segments; /* Number of pieces of scatter-gather */ struct scsiif_request_segment { grant_ref_t gref; uint16_t offset; uint16_t length; } seg[VSCSIIF_SG_TABLESIZE]; uint32_t reserved[3]; }; typedef struct vscsiif_request vscsiif_request_t; struct vscsiif_response { uint16_t rqid; uint8_t padding; uint8_t sense_len; uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE]; int32_t rslt; uint32_t residual_len; /* request bufflen - return the value from physical device */ uint32_t reserved[36]; }; typedef struct vscsiif_response vscsiif_response_t; DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response); #endif /*__XEN__PUBLIC_IO_SCSI_H__*/ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/xenbus.h000066400000000000000000000046401314037446600261430ustar00rootroot00000000000000/***************************************************************************** * xenbus.h * * Xenbus protocol details. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 XenSource Ltd. */ #ifndef _XEN_PUBLIC_IO_XENBUS_H #define _XEN_PUBLIC_IO_XENBUS_H /* * The state of either end of the Xenbus, i.e. the current communication * status of initialisation across the bus. States here imply nothing about * the state of the connection between the driver and the kernel's device * layers. */ enum xenbus_state { XenbusStateUnknown = 0, XenbusStateInitialising = 1, /* * InitWait: Finished early initialisation but waiting for information * from the peer or hotplug scripts. */ XenbusStateInitWait = 2, /* * Initialised: Waiting for a connection from the peer. */ XenbusStateInitialised = 3, XenbusStateConnected = 4, /* * Closing: The device is being closed due to an error or an unplug event. */ XenbusStateClosing = 5, XenbusStateClosed = 6, /* * Reconfiguring: The device is being reconfigured. */ XenbusStateReconfiguring = 7, XenbusStateReconfigured = 8 }; typedef enum xenbus_state XenbusState; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/io/xs_wire.h000066400000000000000000000067671314037446600263330ustar00rootroot00000000000000/* * Details of the "wire" protocol between Xen Store Daemon and client * library or guest kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Rusty Russell IBM Corporation */ #ifndef _XS_WIRE_H #define _XS_WIRE_H enum xsd_sockmsg_type { XS_DEBUG, XS_DIRECTORY, XS_READ, XS_GET_PERMS, XS_WATCH, XS_UNWATCH, XS_TRANSACTION_START, XS_TRANSACTION_END, XS_INTRODUCE, XS_RELEASE, XS_GET_DOMAIN_PATH, XS_WRITE, XS_MKDIR, XS_RM, XS_SET_PERMS, XS_WATCH_EVENT, XS_ERROR, XS_IS_DOMAIN_INTRODUCED, XS_RESUME, XS_SET_TARGET }; #define XS_WRITE_NONE "NONE" #define XS_WRITE_CREATE "CREATE" #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" /* We hand errors as strings, for portability. */ struct xsd_errors { int errnum; const char *errstring; }; #define XSD_ERROR(x) { x, #x } /* LINTED: static unused */ static struct xsd_errors xsd_errors[] #if defined(__GNUC__) __attribute__((unused)) #endif = { XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), XSD_ERROR(EISDIR), XSD_ERROR(ENOENT), XSD_ERROR(ENOMEM), XSD_ERROR(ENOSPC), XSD_ERROR(EIO), XSD_ERROR(ENOTEMPTY), XSD_ERROR(ENOSYS), XSD_ERROR(EROFS), XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN) }; struct xsd_sockmsg { uint32_t type; /* XS_??? */ uint32_t req_id;/* Request identifier, echoed in daemon's response. */ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ uint32_t len; /* Length of data following this. */ /* Generally followed by nul-terminated string(s). */ }; enum xs_watch_type { XS_WATCH_PATH = 0, XS_WATCH_TOKEN }; /* Inter-domain shared memory communications. */ #define XENSTORE_RING_SIZE 1024 typedef uint32_t XENSTORE_RING_IDX; #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) struct xenstore_domain_interface { char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ XENSTORE_RING_IDX req_cons, req_prod; XENSTORE_RING_IDX rsp_cons, rsp_prod; }; /* Violating this is very bad. See docs/misc/xenstore.txt. */ #define XENSTORE_PAYLOAD_MAX 4096 /* Violating these just gets you an error back */ #define XENSTORE_ABS_PATH_MAX 3072 #define XENSTORE_REL_PATH_MAX 2048 #endif /* _XS_WIRE_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/kexec.h000066400000000000000000000144501314037446600253270ustar00rootroot00000000000000/****************************************************************************** * kexec.h - Public portion * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Xen port written by: * - Simon 'Horms' Horman * - Magnus Damm */ #ifndef _XEN_PUBLIC_KEXEC_H #define _XEN_PUBLIC_KEXEC_H /* This file describes the Kexec / Kdump hypercall interface for Xen. * * Kexec under vanilla Linux allows a user to reboot the physical machine * into a new user-specified kernel. The Xen port extends this idea * to allow rebooting of the machine from dom0. When kexec for dom0 * is used to reboot, both the hypervisor and the domains get replaced * with some other kernel. It is possible to kexec between vanilla * Linux and Xen and back again. Xen to Xen works well too. * * The hypercall interface for kexec can be divided into three main * types of hypercall operations: * * 1) Range information: * This is used by the dom0 kernel to ask the hypervisor about various * address information. This information is needed to allow kexec-tools * to fill in the ELF headers for /proc/vmcore properly. * * 2) Load and unload of images: * There are no big surprises here, the kexec binary from kexec-tools * runs in userspace in dom0. The tool loads/unloads data into the * dom0 kernel such as new kernel, initramfs and hypervisor. When * loaded the dom0 kernel performs a load hypercall operation, and * before releasing all page references the dom0 kernel calls unload. * * 3) Kexec operation: * This is used to start a previously loaded kernel. */ #include "xen.h" #if defined(__i386__) || defined(__x86_64__) #define KEXEC_XEN_NO_PAGES 17 #endif /* * Prototype for this hypercall is: * int kexec_op(int cmd, void *args) * @cmd == KEXEC_CMD_... * KEXEC operation to perform * @args == Operation-specific extra arguments (NULL if none). */ /* * Kexec supports two types of operation: * - kexec into a regular kernel, very similar to a standard reboot * - KEXEC_TYPE_DEFAULT is used to specify this type * - kexec into a special "crash kernel", aka kexec-on-panic * - KEXEC_TYPE_CRASH is used to specify this type * - parts of our system may be broken at kexec-on-panic time * - the code should be kept as simple and self-contained as possible */ #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 /* The kexec implementation for Xen allows the user to load two * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH. * All data needed for a kexec reboot is kept in one xen_kexec_image_t * per "instance". The data mainly consists of machine address lists to pages * together with destination addresses. The data in xen_kexec_image_t * is passed to the "code page" which is one page of code that performs * the final relocations before jumping to the new kernel. */ typedef struct xen_kexec_image { #if defined(__i386__) || defined(__x86_64__) unsigned long page_list[KEXEC_XEN_NO_PAGES]; #endif #if defined(__ia64__) unsigned long reboot_code_buffer; #endif unsigned long indirection_page; unsigned long start_address; } xen_kexec_image_t; /* * Perform kexec having previously loaded a kexec or kdump kernel * as appropriate. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] */ #define KEXEC_CMD_kexec 0 typedef struct xen_kexec_exec { int type; } xen_kexec_exec_t; /* * Load/Unload kernel image for kexec or kdump. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] * image == relocation information for kexec (ignored for unload) [in] */ #define KEXEC_CMD_kexec_load 1 #define KEXEC_CMD_kexec_unload 2 typedef struct xen_kexec_load { int type; xen_kexec_image_t image; } xen_kexec_load_t; #define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */ #define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */ #define KEXEC_RANGE_MA_CPU 2 /* machine address and size of a CPU note */ #define KEXEC_RANGE_MA_XENHEAP 3 /* machine address and size of xenheap * Note that although this is adjacent * to Xen it exists in a separate EFI * region on ia64, and thus needs to be * inserted into iomem_machine separately */ #define KEXEC_RANGE_MA_BOOT_PARAM 4 /* machine address and size of * the ia64_boot_param */ #define KEXEC_RANGE_MA_EFI_MEMMAP 5 /* machine address and size of * of the EFI Memory Map */ #define KEXEC_RANGE_MA_VMCOREINFO 6 /* machine address and size of vmcoreinfo */ /* * Find the address and size of certain memory areas * range == KEXEC_RANGE_... [in] * nr == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in] * size == number of bytes reserved in window [out] * start == address of the first byte in the window [out] */ #define KEXEC_CMD_kexec_get_range 3 typedef struct xen_kexec_range { int range; int nr; unsigned long size; unsigned long start; } xen_kexec_range_t; #endif /* _XEN_PUBLIC_KEXEC_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/memory.h000066400000000000000000000232611314037446600255400ustar00rootroot00000000000000/****************************************************************************** * memory.h * * Memory reservation and information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_MEMORY_H__ #define __XEN_PUBLIC_MEMORY_H__ /* * Increase or decrease the specified domain's memory reservation. Returns the * number of extents successfully allocated or freed. * arg == addr of struct xen_memory_reservation. */ #define XENMEM_increase_reservation 0 #define XENMEM_decrease_reservation 1 #define XENMEM_populate_physmap 6 #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* * Maximum # bits addressable by the user of the allocated region (e.g., I/O * devices often have a 32-bit limitation even in 64-bit systems). If zero * then the user has no addressing restriction. This field is not used by * XENMEM_decrease_reservation. */ #define XENMEMF_address_bits(x) (x) #define XENMEMF_get_address_bits(x) ((x) & 0xffu) /* NUMA node to allocate from. */ #define XENMEMF_node(x) (((x) + 1) << 8) #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu) /* Flag to populate physmap with populate-on-demand entries */ #define XENMEMF_populate_on_demand (1<<16) #endif struct xen_memory_reservation { /* * XENMEM_increase_reservation: * OUT: MFN (*not* GMFN) bases of extents that were allocated * XENMEM_decrease_reservation: * IN: GMFN bases of extents to free * XENMEM_populate_physmap: * IN: GPFN bases of extents to populate with memory * OUT: GMFN bases of extents that were allocated * (NB. This command also updates the mach_to_phys translation table) */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* Number of extents, and size/alignment of each (2^extent_order pages). */ xen_ulong_t nr_extents; unsigned int extent_order; #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* XENMEMF flags. */ unsigned int mem_flags; #else unsigned int address_bits; #endif /* * Domain whose reservation is being changed. * Unprivileged domains can specify only DOMID_SELF. */ domid_t domid; }; typedef struct xen_memory_reservation xen_memory_reservation_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); /* * An atomic exchange of memory pages. If return code is zero then * @out.extent_list provides GMFNs of the newly-allocated memory. * Returns zero on complete success, otherwise a negative error code. * On complete success then always @nr_exchanged == @in.nr_extents. * On partial success @nr_exchanged indicates how much work was done. */ #define XENMEM_exchange 11 struct xen_memory_exchange { /* * [IN] Details of memory extents to be exchanged (GMFN bases). * Note that @in.address_bits is ignored and unused. */ struct xen_memory_reservation in; /* * [IN/OUT] Details of new memory extents. * We require that: * 1. @in.domid == @out.domid * 2. @in.nr_extents << @in.extent_order == * @out.nr_extents << @out.extent_order * 3. @in.extent_start and @out.extent_start lists must not overlap * 4. @out.extent_start lists GPFN bases to be populated * 5. @out.extent_start is overwritten with allocated GMFN bases */ struct xen_memory_reservation out; /* * [OUT] Number of input extents that were successfully exchanged: * 1. The first @nr_exchanged input extents were successfully * deallocated. * 2. The corresponding first entries in the output extent list correctly * indicate the GMFNs that were successfully exchanged. * 3. All other input and output extents are untouched. * 4. If not all input exents are exchanged then the return code of this * command will be non-zero. * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! */ xen_ulong_t nr_exchanged; }; typedef struct xen_memory_exchange xen_memory_exchange_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); /* * Returns the maximum machine frame number of mapped RAM in this system. * This command always succeeds (it never returns an error code). * arg == NULL. */ #define XENMEM_maximum_ram_page 2 /* * Returns the current or maximum memory reservation, in pages, of the * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. * arg == addr of domid_t. */ #define XENMEM_current_reservation 3 #define XENMEM_maximum_reservation 4 /* * Returns the maximum GPFN in use by the guest, or -ve errcode on failure. */ #define XENMEM_maximum_gpfn 14 /* * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys * mapping table. Architectures which do not have a m2p table do not implement * this command. * arg == addr of xen_machphys_mfn_list_t. */ #define XENMEM_machphys_mfn_list 5 struct xen_machphys_mfn_list { /* * Size of the 'extent_start' array. Fewer entries will be filled if the * machphys table is smaller than max_extents * 2MB. */ unsigned int max_extents; /* * Pointer to buffer to fill with list of extent starts. If there are * any large discontiguities in the machine address space, 2MB gaps in * the machphys table will be represented by an MFN base of zero. */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* * Number of extents written to the above array. This will be smaller * than 'max_extents' if the machphys table is smaller than max_e * 2MB. */ unsigned int nr_extents; }; typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); /* * Returns the location in virtual address space of the machine_to_phys * mapping table. Architectures which do not have a m2p table, or which do not * map it by default into guest address space, do not implement this command. * arg == addr of xen_machphys_mapping_t. */ #define XENMEM_machphys_mapping 12 struct xen_machphys_mapping { xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ }; typedef struct xen_machphys_mapping xen_machphys_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); /* * Sets the GPFN at which a particular page appears in the specified guest's * pseudophysical address space. * arg == addr of xen_add_to_physmap_t. */ #define XENMEM_add_to_physmap 7 struct xen_add_to_physmap { /* Which domain to change the mapping for. */ domid_t domid; /* Source mapping space. */ #define XENMAPSPACE_shared_info 0 /* shared info page */ #define XENMAPSPACE_grant_table 1 /* grant table page */ #define XENMAPSPACE_gmfn 2 /* GMFN */ unsigned int space; /* Index into source mapping space. */ xen_ulong_t idx; /* GPFN where the source mapping page should appear. */ xen_pfn_t gpfn; }; typedef struct xen_add_to_physmap xen_add_to_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); /*** REMOVED ***/ /*#define XENMEM_translate_gpfn_list 8*/ /* * Returns the pseudo-physical memory map as it was when the domain * was started (specified by XENMEM_set_memory_map). * arg == addr of xen_memory_map_t. */ #define XENMEM_memory_map 9 struct xen_memory_map { /* * On call the number of entries which can be stored in buffer. On * return the number of entries which have been stored in * buffer. */ unsigned int nr_entries; /* * Entries in the buffer are in the same format as returned by the * BIOS INT 0x15 EAX=0xE820 call. */ XEN_GUEST_HANDLE(void) buffer; }; typedef struct xen_memory_map xen_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); /* * Returns the real physical memory map. Passes the same structure as * XENMEM_memory_map. * arg == addr of xen_memory_map_t. */ #define XENMEM_machine_memory_map 10 /* * Set the pseudo-physical memory map of a domain, as returned by * XENMEM_memory_map. * arg == addr of xen_foreign_memory_map_t. */ #define XENMEM_set_memory_map 13 struct xen_foreign_memory_map { domid_t domid; struct xen_memory_map map; }; typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); #define XENMEM_set_pod_target 16 #define XENMEM_get_pod_target 17 struct xen_pod_target { /* IN */ uint64_t target_pages; /* OUT */ uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; /* IN */ domid_t domid; }; typedef struct xen_pod_target xen_pod_target_t; #endif /* __XEN_PUBLIC_MEMORY_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/nmi.h000066400000000000000000000052641314037446600250160ustar00rootroot00000000000000/****************************************************************************** * nmi.h * * NMI callback registration and reason codes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_NMI_H__ #define __XEN_PUBLIC_NMI_H__ /* * NMI reason codes: * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. */ /* I/O-check error reported via ISA port 0x61, bit 6. */ #define _XEN_NMIREASON_io_error 0 #define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) /* Parity error reported via ISA port 0x61, bit 7. */ #define _XEN_NMIREASON_parity_error 1 #define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error) /* Unknown hardware-generated NMI. */ #define _XEN_NMIREASON_unknown 2 #define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) /* * long nmi_op(unsigned int cmd, void *arg) * NB. All ops return zero on success, else a negative error code. */ /* * Register NMI callback for this (calling) VCPU. Currently this only makes * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. * arg == pointer to xennmi_callback structure. */ #define XENNMI_register_callback 0 struct xennmi_callback { unsigned long handler_address; unsigned long pad; }; typedef struct xennmi_callback xennmi_callback_t; DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t); /* * Deregister NMI callback for this (calling) VCPU. * arg == NULL. */ #define XENNMI_unregister_callback 1 #endif /* __XEN_PUBLIC_NMI_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/physdev.h000066400000000000000000000170731314037446600257160ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_PHYSDEV_H__ #define __XEN_PUBLIC_PHYSDEV_H__ /* * Prototype for this hypercall is: * int physdev_op(int cmd, void *args) * @cmd == PHYSDEVOP_??? (physdev operation). * @args == Operation-specific extra arguments (NULL if none). */ /* * Notify end-of-interrupt (EOI) for the specified IRQ. * @arg == pointer to physdev_eoi structure. */ #define PHYSDEVOP_eoi 12 struct physdev_eoi { /* IN */ uint32_t irq; }; typedef struct physdev_eoi physdev_eoi_t; DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t); /* * Register a shared page for the hypervisor to indicate whether the guest * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly * once the guest used this function in that the associated event channel * will automatically get unmasked. The page registered is used as a bit * array indexed by Xen's PIRQ value. */ #define PHYSDEVOP_pirq_eoi_gmfn 17 struct physdev_pirq_eoi_gmfn { /* IN */ xen_pfn_t gmfn; }; typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t; DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t); /* * Query the status of an IRQ line. * @arg == pointer to physdev_irq_status_query structure. */ #define PHYSDEVOP_irq_status_query 5 struct physdev_irq_status_query { /* IN */ uint32_t irq; /* OUT */ uint32_t flags; /* XENIRQSTAT_* */ }; typedef struct physdev_irq_status_query physdev_irq_status_query_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t); /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ #define _XENIRQSTAT_needs_eoi (0) #define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) /* IRQ shared by multiple guests? */ #define _XENIRQSTAT_shared (1) #define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) /* * Set the current VCPU's I/O privilege level. * @arg == pointer to physdev_set_iopl structure. */ #define PHYSDEVOP_set_iopl 6 struct physdev_set_iopl { /* IN */ uint32_t iopl; }; typedef struct physdev_set_iopl physdev_set_iopl_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t); /* * Set the current VCPU's I/O-port permissions bitmap. * @arg == pointer to physdev_set_iobitmap structure. */ #define PHYSDEVOP_set_iobitmap 7 struct physdev_set_iobitmap { /* IN */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(uint8) bitmap; #else uint8_t *bitmap; #endif uint32_t nr_ports; }; typedef struct physdev_set_iobitmap physdev_set_iobitmap_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t); /* * Read or write an IO-APIC register. * @arg == pointer to physdev_apic structure. */ #define PHYSDEVOP_apic_read 8 #define PHYSDEVOP_apic_write 9 struct physdev_apic { /* IN */ unsigned long apic_physbase; uint32_t reg; /* IN or OUT */ uint32_t value; }; typedef struct physdev_apic physdev_apic_t; DEFINE_XEN_GUEST_HANDLE(physdev_apic_t); /* * Allocate or free a physical upcall vector for the specified IRQ line. * @arg == pointer to physdev_irq structure. */ #define PHYSDEVOP_alloc_irq_vector 10 #define PHYSDEVOP_free_irq_vector 11 struct physdev_irq { /* IN */ uint32_t irq; /* IN or OUT */ uint32_t vector; }; typedef struct physdev_irq physdev_irq_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_t); #define MAP_PIRQ_TYPE_MSI 0x0 #define MAP_PIRQ_TYPE_GSI 0x1 #define MAP_PIRQ_TYPE_UNKNOWN 0x2 #define PHYSDEVOP_map_pirq 13 struct physdev_map_pirq { domid_t domid; /* IN */ int type; /* IN */ int index; /* IN or OUT */ int pirq; /* IN */ int bus; /* IN */ int devfn; /* IN */ int entry_nr; /* IN */ uint64_t table_base; }; typedef struct physdev_map_pirq physdev_map_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t); #define PHYSDEVOP_unmap_pirq 14 struct physdev_unmap_pirq { domid_t domid; /* IN */ int pirq; }; typedef struct physdev_unmap_pirq physdev_unmap_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t); #define PHYSDEVOP_manage_pci_add 15 #define PHYSDEVOP_manage_pci_remove 16 struct physdev_manage_pci { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_manage_pci physdev_manage_pci_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t); #define PHYSDEVOP_restore_msi 19 struct physdev_restore_msi { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_restore_msi physdev_restore_msi_t; DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t); #define PHYSDEVOP_manage_pci_add_ext 20 struct physdev_manage_pci_ext { /* IN */ uint8_t bus; uint8_t devfn; unsigned is_extfn; unsigned is_virtfn; struct { uint8_t bus; uint8_t devfn; } physfn; }; typedef struct physdev_manage_pci_ext physdev_manage_pci_ext_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_ext_t); /* * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() * hypercall since 0x00030202. */ struct physdev_op { uint32_t cmd; union { struct physdev_irq_status_query irq_status_query; struct physdev_set_iopl set_iopl; struct physdev_set_iobitmap set_iobitmap; struct physdev_apic apic_op; struct physdev_irq irq_op; } u; }; typedef struct physdev_op physdev_op_t; DEFINE_XEN_GUEST_HANDLE(physdev_op_t); /* * Notify that some PIRQ-bound event channels have been unmasked. * ** This command is obsolete since interface version 0x00030202 and is ** * ** unsupported by newer versions of Xen. ** */ #define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 /* * These all-capitals physdev operation names are superceded by the new names * (defined above) since interface version 0x00030202. */ #define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query #define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl #define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap #define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read #define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write #define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector #define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi #define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared #endif /* __XEN_PUBLIC_PHYSDEV_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/platform.h000066400000000000000000000301251314037446600260510ustar00rootroot00000000000000/****************************************************************************** * platform.h * * Hardware platform operations. Intended for use by domain-0 kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_PLATFORM_H__ #define __XEN_PUBLIC_PLATFORM_H__ #include "xen.h" #define XENPF_INTERFACE_VERSION 0x03000001 /* * Set clock such that it would read after 00:00:00 UTC, * 1 January, 1970 if the current system time was . */ #define XENPF_settime 17 struct xenpf_settime { /* IN variables. */ uint32_t secs; uint32_t nsecs; uint64_t system_time; }; typedef struct xenpf_settime xenpf_settime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t); /* * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type. * On x86, @type is an architecture-defined MTRR memory type. * On success, returns the MTRR that was used (@reg) and a handle that can * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting. * (x86-specific). */ #define XENPF_add_memtype 31 struct xenpf_add_memtype { /* IN variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; /* OUT variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_add_memtype xenpf_add_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t); /* * Tear down an existing memory-range type. If @handle is remembered then it * should be passed in to accurately tear down the correct setting (in case * of overlapping memory regions with differing types). If it is not known * then @handle should be set to zero. In all cases @reg must be set. * (x86-specific). */ #define XENPF_del_memtype 32 struct xenpf_del_memtype { /* IN variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_del_memtype xenpf_del_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t); /* Read current type of an MTRR (x86-specific). */ #define XENPF_read_memtype 33 struct xenpf_read_memtype { /* IN variables. */ uint32_t reg; /* OUT variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; }; typedef struct xenpf_read_memtype xenpf_read_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t); #define XENPF_microcode_update 35 struct xenpf_microcode_update { /* IN variables. */ XEN_GUEST_HANDLE(const_void) data;/* Pointer to microcode data */ uint32_t length; /* Length of microcode data. */ }; typedef struct xenpf_microcode_update xenpf_microcode_update_t; DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t); #define XENPF_platform_quirk 39 #define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */ #define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */ #define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */ struct xenpf_platform_quirk { /* IN variables. */ uint32_t quirk_id; }; typedef struct xenpf_platform_quirk xenpf_platform_quirk_t; DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t); #define XENPF_firmware_info 50 #define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */ #define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */ #define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */ struct xenpf_firmware_info { /* IN variables. */ uint32_t type; uint32_t index; /* OUT variables. */ union { struct { /* Int13, Fn48: Check Extensions Present. */ uint8_t device; /* %dl: bios device number */ uint8_t version; /* %ah: major version */ uint16_t interface_support; /* %cx: support bitmap */ /* Int13, Fn08: Legacy Get Device Parameters. */ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */ uint8_t legacy_max_head; /* %dh: max head # */ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */ /* NB. First uint16_t of buffer must be set to buffer size. */ XEN_GUEST_HANDLE(void) edd_params; } disk_info; /* XEN_FW_DISK_INFO */ struct { uint8_t device; /* bios device number */ uint32_t mbr_signature; /* offset 0x1b8 in mbr */ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */ struct { /* Int10, AX=4F15: Get EDID info. */ uint8_t capabilities; uint8_t edid_transfer_time; /* must refer to 128-byte buffer */ XEN_GUEST_HANDLE(uint8) edid; } vbeddc_info; /* XEN_FW_VBEDDC_INFO */ } u; }; typedef struct xenpf_firmware_info xenpf_firmware_info_t; DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t); #define XENPF_enter_acpi_sleep 51 struct xenpf_enter_acpi_sleep { /* IN variables */ uint16_t pm1a_cnt_val; /* PM1a control value. */ uint16_t pm1b_cnt_val; /* PM1b control value. */ uint32_t sleep_state; /* Which state to enter (Sn). */ uint32_t flags; /* Must be zero. */ }; typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t; DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t); #define XENPF_change_freq 52 struct xenpf_change_freq { /* IN variables */ uint32_t flags; /* Must be zero. */ uint32_t cpu; /* Physical cpu. */ uint64_t freq; /* New frequency (Hz). */ }; typedef struct xenpf_change_freq xenpf_change_freq_t; DEFINE_XEN_GUEST_HANDLE(xenpf_change_freq_t); /* * Get idle times (nanoseconds since boot) for physical CPUs specified in the * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is * indexed by CPU number; only entries with the corresponding @cpumap_bitmap * bit set are written to. On return, @cpumap_bitmap is modified so that any * non-existent CPUs are cleared. Such CPUs have their @idletime array entry * cleared. */ #define XENPF_getidletime 53 struct xenpf_getidletime { /* IN/OUT variables */ /* IN: CPUs to interrogate; OUT: subset of IN which are present */ XEN_GUEST_HANDLE(uint8) cpumap_bitmap; /* IN variables */ /* Size of cpumap bitmap. */ uint32_t cpumap_nr_cpus; /* Must be indexable for every cpu in cpumap_bitmap. */ XEN_GUEST_HANDLE(uint64) idletime; /* OUT variables */ /* System time when the idletime snapshots were taken. */ uint64_t now; }; typedef struct xenpf_getidletime xenpf_getidletime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t); #define XENPF_set_processor_pminfo 54 /* ability bits */ #define XEN_PROCESSOR_PM_CX 1 #define XEN_PROCESSOR_PM_PX 2 #define XEN_PROCESSOR_PM_TX 4 /* cmd type */ #define XEN_PM_CX 0 #define XEN_PM_PX 1 #define XEN_PM_TX 2 /* Px sub info type */ #define XEN_PX_PCT 1 #define XEN_PX_PSS 2 #define XEN_PX_PPC 4 #define XEN_PX_PSD 8 struct xen_power_register { uint32_t space_id; uint32_t bit_width; uint32_t bit_offset; uint32_t access_size; uint64_t address; }; struct xen_processor_csd { uint32_t domain; /* domain number of one dependent group */ uint32_t coord_type; /* coordination type */ uint32_t num; /* number of processors in same domain */ }; typedef struct xen_processor_csd xen_processor_csd_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_csd_t); struct xen_processor_cx { struct xen_power_register reg; /* GAS for Cx trigger register */ uint8_t type; /* cstate value, c0: 0, c1: 1, ... */ uint32_t latency; /* worst latency (ms) to enter/exit this cstate */ uint32_t power; /* average power consumption(mW) */ uint32_t dpcnt; /* number of dependency entries */ XEN_GUEST_HANDLE(xen_processor_csd_t) dp; /* NULL if no dependency */ }; typedef struct xen_processor_cx xen_processor_cx_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_cx_t); struct xen_processor_flags { uint32_t bm_control:1; uint32_t bm_check:1; uint32_t has_cst:1; uint32_t power_setup_done:1; uint32_t bm_rld_set:1; }; struct xen_processor_power { uint32_t count; /* number of C state entries in array below */ struct xen_processor_flags flags; /* global flags of this processor */ XEN_GUEST_HANDLE(xen_processor_cx_t) states; /* supported c states */ }; struct xen_pct_register { uint8_t descriptor; uint16_t length; uint8_t space_id; uint8_t bit_width; uint8_t bit_offset; uint8_t reserved; uint64_t address; }; struct xen_processor_px { uint64_t core_frequency; /* megahertz */ uint64_t power; /* milliWatts */ uint64_t transition_latency; /* microseconds */ uint64_t bus_master_latency; /* microseconds */ uint64_t control; /* control value */ uint64_t status; /* success indicator */ }; typedef struct xen_processor_px xen_processor_px_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_px_t); struct xen_psd_package { uint64_t num_entries; uint64_t revision; uint64_t domain; uint64_t coord_type; uint64_t num_processors; }; struct xen_processor_performance { uint32_t flags; /* flag for Px sub info type */ uint32_t platform_limit; /* Platform limitation on freq usage */ struct xen_pct_register control_register; struct xen_pct_register status_register; uint32_t state_count; /* total available performance states */ XEN_GUEST_HANDLE(xen_processor_px_t) states; struct xen_psd_package domain_info; uint32_t shared_type; /* coordination type of this processor */ }; typedef struct xen_processor_performance xen_processor_performance_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_performance_t); struct xenpf_set_processor_pminfo { /* IN variables */ uint32_t id; /* ACPI CPU ID */ uint32_t type; /* {XEN_PM_CX, XEN_PM_PX} */ union { struct xen_processor_power power;/* Cx: _CST/_CSD */ struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */ }; }; typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t; DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t); struct xen_platform_op { uint32_t cmd; uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ union { struct xenpf_settime settime; struct xenpf_add_memtype add_memtype; struct xenpf_del_memtype del_memtype; struct xenpf_read_memtype read_memtype; struct xenpf_microcode_update microcode; struct xenpf_platform_quirk platform_quirk; struct xenpf_firmware_info firmware_info; struct xenpf_enter_acpi_sleep enter_acpi_sleep; struct xenpf_change_freq change_freq; struct xenpf_getidletime getidletime; struct xenpf_set_processor_pminfo set_pminfo; uint8_t pad[128]; } u; }; typedef struct xen_platform_op xen_platform_op_t; DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t); #endif /* __XEN_PUBLIC_PLATFORM_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/000077500000000000000000000000001314037446600253315ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/COPYING000066400000000000000000000032641314037446600263710ustar00rootroot00000000000000XEN NOTICE ========== This copyright applies to all files within this subdirectory and its subdirectories: include/public/*.h include/public/hvm/*.h include/public/io/*.h The intention is that these files can be freely copied into the source tree of an operating system when porting that OS to run on Xen. Doing so does *not* cause the OS to become subject to the terms of the GPL. All other files in the Xen source distribution are covered by version 2 of the GNU General Public License except where explicitly stated otherwise within individual source files. -- Keir Fraser (on behalf of the Xen team) ===================================================================== Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-ia64.h000066400000000000000000000511341314037446600271640ustar00rootroot00000000000000/****************************************************************************** * arch-ia64/hypervisor-if.h * * Guest OS interface to IA64 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * */ #include "xen.h" #ifndef __HYPERVISOR_IF_IA64_H__ #define __HYPERVISOR_IF_IA64_H__ #if !defined(__GNUC__) || defined(__STRICT_ANSI__) #error "Anonymous structs/unions are a GNU extension." #endif /* Structural guest handles introduced in 0x00030201. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030201 #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name #else #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef type * __guest_handle_ ## name #endif #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ ___DEFINE_XEN_GUEST_HANDLE(name, type); \ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) #define uint64_aligned_t uint64_t #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif #ifndef __ASSEMBLY__ typedef unsigned long xen_pfn_t; #define PRI_xen_pfn "lx" #endif /* Arch specific VIRQs definition */ #define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */ #define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */ #define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */ /* Maximum number of virtual CPUs in multi-processor guests. */ /* WARNING: before changing this, check that shared_info fits on a page */ #define MAX_VIRT_CPUS 64 /* IO ports location for PV. */ #define IO_PORTS_PADDR 0x00000ffffc000000UL #define IO_PORTS_SIZE 0x0000000004000000UL #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; #ifdef __XEN_TOOLS__ #define XEN_PAGE_SIZE XC_PAGE_SIZE #else #define XEN_PAGE_SIZE PAGE_SIZE #endif #define INVALID_MFN (~0UL) struct pt_fpreg { union { unsigned long bits[2]; long double __dummy; /* force 16-byte alignment */ } u; }; union vac { unsigned long value; struct { int a_int:1; int a_from_int_cr:1; int a_to_int_cr:1; int a_from_psr:1; int a_from_cpuid:1; int a_cover:1; int a_bsw:1; long reserved:57; }; }; typedef union vac vac_t; union vdc { unsigned long value; struct { int d_vmsw:1; int d_extint:1; int d_ibr_dbr:1; int d_pmc:1; int d_to_pmd:1; int d_itm:1; long reserved:58; }; }; typedef union vdc vdc_t; struct mapped_regs { union vac vac; union vdc vdc; unsigned long virt_env_vaddr; unsigned long reserved1[29]; unsigned long vhpi; unsigned long reserved2[95]; union { unsigned long vgr[16]; unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active }; union { unsigned long vbgr[16]; unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active }; unsigned long vnat; unsigned long vbnat; unsigned long vcpuid[5]; unsigned long reserved3[11]; unsigned long vpsr; unsigned long vpr; unsigned long reserved4[76]; union { unsigned long vcr[128]; struct { unsigned long dcr; // CR0 unsigned long itm; unsigned long iva; unsigned long rsv1[5]; unsigned long pta; // CR8 unsigned long rsv2[7]; unsigned long ipsr; // CR16 unsigned long isr; unsigned long rsv3; unsigned long iip; unsigned long ifa; unsigned long itir; unsigned long iipa; unsigned long ifs; unsigned long iim; // CR24 unsigned long iha; unsigned long rsv4[38]; unsigned long lid; // CR64 unsigned long ivr; unsigned long tpr; unsigned long eoi; unsigned long irr[4]; unsigned long itv; // CR72 unsigned long pmv; unsigned long cmcv; unsigned long rsv5[5]; unsigned long lrr0; // CR80 unsigned long lrr1; unsigned long rsv6[46]; }; }; union { unsigned long reserved5[128]; struct { unsigned long precover_ifs; unsigned long unat; // not sure if this is needed until NaT arch is done int interrupt_collection_enabled; // virtual psr.ic /* virtual interrupt deliverable flag is evtchn_upcall_mask in * shared info area now. interrupt_mask_addr is the address * of evtchn_upcall_mask for current vcpu */ unsigned char *interrupt_mask_addr; int pending_interruption; unsigned char vpsr_pp; unsigned char vpsr_dfh; unsigned char hpsr_dfh; unsigned char hpsr_mfh; unsigned long reserved5_1[4]; int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual int banknum; // 0 or 1, which virtual register bank is active unsigned long rrs[8]; // region registers unsigned long krs[8]; // kernel registers unsigned long tmp[16]; // temp registers (e.g. for hyperprivops) /* itc paravirtualization * vAR.ITC = mAR.ITC + itc_offset * itc_last is one which was lastly passed to * the guest OS in order to prevent it from * going backwords. */ unsigned long itc_offset; unsigned long itc_last; }; }; }; typedef struct mapped_regs mapped_regs_t; struct vpd { struct mapped_regs vpd_low; unsigned long reserved6[3456]; unsigned long vmm_avail[128]; unsigned long reserved7[4096]; }; typedef struct vpd vpd_t; struct arch_vcpu_info { }; typedef struct arch_vcpu_info arch_vcpu_info_t; /* * This structure is used for magic page in domain pseudo physical address * space and the result of XENMEM_machine_memory_map. * As the XENMEM_machine_memory_map result, * xen_memory_map::nr_entries indicates the size in bytes * including struct xen_ia64_memmap_info. Not the number of entries. */ struct xen_ia64_memmap_info { uint64_t efi_memmap_size; /* size of EFI memory map */ uint64_t efi_memdesc_size; /* size of an EFI memory map descriptor */ uint32_t efi_memdesc_version; /* memory descriptor version */ void *memdesc[0]; /* array of efi_memory_desc_t */ }; typedef struct xen_ia64_memmap_info xen_ia64_memmap_info_t; struct arch_shared_info { /* PFN of the start_info page. */ unsigned long start_info_pfn; /* Interrupt vector for event channel. */ int evtchn_vector; /* PFN of memmap_info page */ unsigned int memmap_info_num_pages;/* currently only = 1 case is supported. */ unsigned long memmap_info_pfn; uint64_t pad[31]; }; typedef struct arch_shared_info arch_shared_info_t; typedef unsigned long xen_callback_t; struct ia64_tr_entry { unsigned long pte; unsigned long itir; unsigned long vadr; unsigned long rid; }; typedef struct ia64_tr_entry ia64_tr_entry_t; DEFINE_XEN_GUEST_HANDLE(ia64_tr_entry_t); struct vcpu_tr_regs { struct ia64_tr_entry itrs[12]; struct ia64_tr_entry dtrs[12]; }; union vcpu_ar_regs { unsigned long ar[128]; struct { unsigned long kr[8]; unsigned long rsv1[8]; unsigned long rsc; unsigned long bsp; unsigned long bspstore; unsigned long rnat; unsigned long rsv2; unsigned long fcr; unsigned long rsv3[2]; unsigned long eflag; unsigned long csd; unsigned long ssd; unsigned long cflg; unsigned long fsr; unsigned long fir; unsigned long fdr; unsigned long rsv4; unsigned long ccv; /* 32 */ unsigned long rsv5[3]; unsigned long unat; unsigned long rsv6[3]; unsigned long fpsr; unsigned long rsv7[3]; unsigned long itc; unsigned long rsv8[3]; unsigned long ign1[16]; unsigned long pfs; /* 64 */ unsigned long lc; unsigned long ec; unsigned long rsv9[45]; unsigned long ign2[16]; }; }; union vcpu_cr_regs { unsigned long cr[128]; struct { unsigned long dcr; // CR0 unsigned long itm; unsigned long iva; unsigned long rsv1[5]; unsigned long pta; // CR8 unsigned long rsv2[7]; unsigned long ipsr; // CR16 unsigned long isr; unsigned long rsv3; unsigned long iip; unsigned long ifa; unsigned long itir; unsigned long iipa; unsigned long ifs; unsigned long iim; // CR24 unsigned long iha; unsigned long rsv4[38]; unsigned long lid; // CR64 unsigned long ivr; unsigned long tpr; unsigned long eoi; unsigned long irr[4]; unsigned long itv; // CR72 unsigned long pmv; unsigned long cmcv; unsigned long rsv5[5]; unsigned long lrr0; // CR80 unsigned long lrr1; unsigned long rsv6[46]; }; }; struct vcpu_guest_context_regs { unsigned long r[32]; unsigned long b[8]; unsigned long bank[16]; unsigned long ip; unsigned long psr; unsigned long cfm; unsigned long pr; unsigned int nats; /* NaT bits for r1-r31. */ unsigned int bnats; /* Nat bits for banked registers. */ union vcpu_ar_regs ar; union vcpu_cr_regs cr; struct pt_fpreg f[128]; unsigned long dbr[8]; unsigned long ibr[8]; unsigned long rr[8]; unsigned long pkr[16]; /* FIXME: cpuid,pmd,pmc */ unsigned long xip; unsigned long xpsr; unsigned long xfs; unsigned long xr[4]; struct vcpu_tr_regs tr; /* Physical registers in case of debug event. */ unsigned long excp_iipa; unsigned long excp_ifa; unsigned long excp_isr; unsigned int excp_vector; /* * The rbs is intended to be the image of the stacked registers still * in the cpu (not yet stored in memory). It is laid out as if it * were written in memory at a 512 (64*8) aligned address + offset. * rbs_voff is (offset / 8). rbs_nat contains NaT bits for the * remaining rbs registers. rbs_rnat contains NaT bits for in memory * rbs registers. * Note: loadrs is 2**14 bytes == 2**11 slots. */ unsigned int rbs_voff; unsigned long rbs[2048]; unsigned long rbs_rnat; /* * RSE.N_STACKED_PHYS via PAL_RSE_INFO * Strictly this isn't cpu context, but this value is necessary * for domain save/restore. So is here. */ unsigned long num_phys_stacked; }; struct vcpu_guest_context { #define VGCF_EXTRA_REGS (1UL << 1) /* Set extra regs. */ #define VGCF_SET_CR_IRR (1UL << 2) /* Set cr_irr[0:3]. */ #define VGCF_online (1UL << 3) /* make this vcpu online */ #define VGCF_SET_AR_ITC (1UL << 4) /* set pv ar.itc. itc_offset, itc_last */ unsigned long flags; /* VGCF_* flags */ struct vcpu_guest_context_regs regs; unsigned long event_callback_ip; /* xen doesn't share privregs pages with hvm domain so that this member * doesn't make sense for hvm domain. * ~0UL is already used for INVALID_P2M_ENTRY. */ #define VGC_PRIVREGS_HVM (~(-2UL)) unsigned long privregs_pfn; }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); /* dom0 vp op */ #define __HYPERVISOR_ia64_dom0vp_op __HYPERVISOR_arch_0 /* Map io space in machine address to dom0 physical address space. Currently physical assigned address equals to machine address. */ #define IA64_DOM0VP_ioremap 0 /* Convert a pseudo physical page frame number to the corresponding machine page frame number. If no page is assigned, INVALID_MFN or GPFN_INV_MASK is returned depending on domain's non-vti/vti mode. */ #define IA64_DOM0VP_phystomach 1 /* Convert a machine page frame number to the corresponding pseudo physical page frame number of the caller domain. */ #define IA64_DOM0VP_machtophys 3 /* Reserved for future use. */ #define IA64_DOM0VP_iounmap 4 /* Unmap and free pages contained in the specified pseudo physical region. */ #define IA64_DOM0VP_zap_physmap 5 /* Assign machine page frame to dom0's pseudo physical address space. */ #define IA64_DOM0VP_add_physmap 6 /* expose the p2m table into domain */ #define IA64_DOM0VP_expose_p2m 7 /* xen perfmon */ #define IA64_DOM0VP_perfmon 8 /* gmfn version of IA64_DOM0VP_add_physmap */ #define IA64_DOM0VP_add_physmap_with_gmfn 9 /* get fpswa revision */ #define IA64_DOM0VP_fpswa_revision 10 /* Add an I/O port space range */ #define IA64_DOM0VP_add_io_space 11 /* expose the foreign domain's p2m table into privileged domain */ #define IA64_DOM0VP_expose_foreign_p2m 12 #define IA64_DOM0VP_EFP_ALLOC_PTE 0x1 /* allocate p2m table */ /* unexpose the foreign domain's p2m table into privileged domain */ #define IA64_DOM0VP_unexpose_foreign_p2m 13 /* get memmap_info and memmap. It is possible to map the page directly by foreign page mapping, but there is a race between writer. This hypercall avoids such race. */ #define IA64_DOM0VP_get_memmap 14 // flags for page assignement to pseudo physical address space #define _ASSIGN_readonly 0 #define ASSIGN_readonly (1UL << _ASSIGN_readonly) #define ASSIGN_writable (0UL << _ASSIGN_readonly) // dummy flag /* Internal only: memory attribute must be WC/UC/UCE. */ #define _ASSIGN_nocache 1 #define ASSIGN_nocache (1UL << _ASSIGN_nocache) // tlb tracking #define _ASSIGN_tlb_track 2 #define ASSIGN_tlb_track (1UL << _ASSIGN_tlb_track) /* Internal only: associated with PGC_allocated bit */ #define _ASSIGN_pgc_allocated 3 #define ASSIGN_pgc_allocated (1UL << _ASSIGN_pgc_allocated) /* Page is an IO page. */ #define _ASSIGN_io 4 #define ASSIGN_io (1UL << _ASSIGN_io) /* This structure has the same layout of struct ia64_boot_param, defined in . It is redefined here to ease use. */ struct xen_ia64_boot_param { unsigned long command_line; /* physical address of cmd line args */ unsigned long efi_systab; /* physical address of EFI system table */ unsigned long efi_memmap; /* physical address of EFI memory map */ unsigned long efi_memmap_size; /* size of EFI memory map */ unsigned long efi_memdesc_size; /* size of an EFI memory map descriptor */ unsigned int efi_memdesc_version; /* memory descriptor version */ struct { unsigned short num_cols; /* number of columns on console. */ unsigned short num_rows; /* number of rows on console. */ unsigned short orig_x; /* cursor's x position */ unsigned short orig_y; /* cursor's y position */ } console_info; unsigned long fpswa; /* physical address of the fpswa interface */ unsigned long initrd_start; unsigned long initrd_size; unsigned long domain_start; /* va where the boot time domain begins */ unsigned long domain_size; /* how big is the boot domain */ }; #endif /* !__ASSEMBLY__ */ /* Size of the shared_info area (this is not related to page size). */ #define XSI_SHIFT 14 #define XSI_SIZE (1 << XSI_SHIFT) /* Log size of mapped_regs area (64 KB - only 4KB is used). */ #define XMAPPEDREGS_SHIFT 12 #define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT) /* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */ #define XMAPPEDREGS_OFS XSI_SIZE /* Hyperprivops. */ #define HYPERPRIVOP_START 0x1 #define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0) #define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1) #define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2) #define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3) #define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4) #define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5) #define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6) #define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7) #define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8) #define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9) #define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa) #define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb) #define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc) #define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd) #define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe) #define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf) #define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10) #define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11) #define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12) #define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13) #define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14) #define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15) #define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16) #define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17) #define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18) #define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19) #define HYPERPRIVOP_MAX (0x1a) /* Fast and light hypercalls. */ #define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1 /* Extra debug features. */ #define __HYPERVISOR_ia64_debug_op __HYPERVISOR_arch_2 /* Xencomm macros. */ #define XENCOMM_INLINE_MASK 0xf800000000000000UL #define XENCOMM_INLINE_FLAG 0x8000000000000000UL #ifndef __ASSEMBLY__ /* * Optimization features. * The hypervisor may do some special optimizations for guests. This hypercall * can be used to switch on/of these special optimizations. */ #define __HYPERVISOR_opt_feature 0x700UL #define XEN_IA64_OPTF_OFF 0x0 #define XEN_IA64_OPTF_ON 0x1 /* * If this feature is switched on, the hypervisor inserts the * tlb entries without calling the guests traphandler. * This is useful in guests using region 7 for identity mapping * like the linux kernel does. */ #define XEN_IA64_OPTF_IDENT_MAP_REG7 1 /* Identity mapping of region 4 addresses in HVM. */ #define XEN_IA64_OPTF_IDENT_MAP_REG4 2 /* Identity mapping of region 5 addresses in HVM. */ #define XEN_IA64_OPTF_IDENT_MAP_REG5 3 #define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0) struct xen_ia64_opt_feature { unsigned long cmd; /* Which feature */ unsigned char on; /* Switch feature on/off */ union { struct { /* The page protection bit mask of the pte. * This will be or'ed with the pte. */ unsigned long pgprot; unsigned long key; /* A protection key for itir. */ }; }; }; #endif /* __ASSEMBLY__ */ /* xen perfmon */ #ifdef XEN #ifndef __ASSEMBLY__ #ifndef _ASM_IA64_PERFMON_H #include // asm/perfmon.h requires struct list_head #include // for PFM_xxx and pfarg_features_t, pfarg_context_t, pfarg_reg_t, pfarg_load_t #endif /* _ASM_IA64_PERFMON_H */ DEFINE_XEN_GUEST_HANDLE(pfarg_features_t); DEFINE_XEN_GUEST_HANDLE(pfarg_context_t); DEFINE_XEN_GUEST_HANDLE(pfarg_reg_t); DEFINE_XEN_GUEST_HANDLE(pfarg_load_t); #endif /* __ASSEMBLY__ */ #endif /* XEN */ #ifndef __ASSEMBLY__ #include "arch-ia64/hvm/memmap.h" #endif #endif /* __HYPERVISOR_IF_IA64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-ia64/000077500000000000000000000000001314037446600270075ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-ia64/debug_op.h000066400000000000000000000061201314037446600307430ustar00rootroot00000000000000/****************************************************************************** * debug_op.h * * Copyright (c) 2007 Tristan Gingold * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_PUBLIC_IA64_DEBUG_OP_H__ #define __XEN_PUBLIC_IA64_DEBUG_OP_H__ /* Set/Get extra conditions to break. */ #define XEN_IA64_DEBUG_OP_SET_FLAGS 1 #define XEN_IA64_DEBUG_OP_GET_FLAGS 2 /* Break on kernel single step. */ #define XEN_IA64_DEBUG_ON_KERN_SSTEP (1 << 0) /* Break on kernel debug (breakpoint or watch point). */ #define XEN_IA64_DEBUG_ON_KERN_DEBUG (1 << 1) /* Break on kernel taken branch. */ #define XEN_IA64_DEBUG_ON_KERN_TBRANCH (1 << 2) /* Break on interrupt injection. */ #define XEN_IA64_DEBUG_ON_EXTINT (1 << 3) /* Break on interrupt injection. */ #define XEN_IA64_DEBUG_ON_EXCEPT (1 << 4) /* Break on event injection. */ #define XEN_IA64_DEBUG_ON_EVENT (1 << 5) /* Break on privop/virtualized instruction (slow path only). */ #define XEN_IA64_DEBUG_ON_PRIVOP (1 << 6) /* Break on emulated PAL call (at entry). */ #define XEN_IA64_DEBUG_ON_PAL (1 << 7) /* Break on emulated SAL call (at entry). */ #define XEN_IA64_DEBUG_ON_SAL (1 << 8) /* Break on emulated EFI call (at entry). */ #define XEN_IA64_DEBUG_ON_EFI (1 << 9) /* Break on rfi emulation (slow path only, before exec). */ #define XEN_IA64_DEBUG_ON_RFI (1 << 10) /* Break on address translation switch. */ #define XEN_IA64_DEBUG_ON_MMU (1 << 11) /* Break on bad guest physical address. */ #define XEN_IA64_DEBUG_ON_BAD_MPA (1 << 12) /* Force psr.ss bit. */ #define XEN_IA64_DEBUG_FORCE_SS (1 << 13) /* Force psr.db bit. */ #define XEN_IA64_DEBUG_FORCE_DB (1 << 14) /* Break on ITR/PTR. */ #define XEN_IA64_DEBUG_ON_TR (1 << 15) /* Break on ITC/PTC.L/PTC.G/PTC.GA. */ #define XEN_IA64_DEBUG_ON_TC (1 << 16) /* Get translation cache. */ #define XEN_IA64_DEBUG_OP_GET_TC 3 /* Translate virtual address to guest physical address. */ #define XEN_IA64_DEBUG_OP_TRANSLATE 4 union xen_ia64_debug_op { uint64_t flags; struct xen_ia64_debug_vtlb { uint64_t nbr; /* IN/OUT */ XEN_GUEST_HANDLE_64(ia64_tr_entry_t) tr; /* IN/OUT */ } vtlb; }; typedef union xen_ia64_debug_op xen_ia64_debug_op_t; DEFINE_XEN_GUEST_HANDLE(xen_ia64_debug_op_t); #endif /* __XEN_PUBLIC_IA64_DEBUG_OP_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-ia64/hvm/000077500000000000000000000000001314037446600276015ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-ia64/hvm/memmap.h000066400000000000000000000052301314037446600312260ustar00rootroot00000000000000/****************************************************************************** * memmap.h * * Copyright (c) 2008 Tristan Gingold * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_PUBLIC_HVM_MEMMAP_IA64_H__ #define __XEN_PUBLIC_HVM_MEMMAP_IA64_H__ #define MEM_G (1UL << 30) #define MEM_M (1UL << 20) #define MEM_K (1UL << 10) /* Guest physical address of IO ports space. */ #define MMIO_START (3 * MEM_G) #define MMIO_SIZE (512 * MEM_M) #define VGA_IO_START 0xA0000UL #define VGA_IO_SIZE 0x20000 #define LEGACY_IO_START (MMIO_START + MMIO_SIZE) #define LEGACY_IO_SIZE (64 * MEM_M) #define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE) #define IO_PAGE_SIZE XEN_PAGE_SIZE #define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE) #define STORE_PAGE_SIZE XEN_PAGE_SIZE #define BUFFER_IO_PAGE_START (STORE_PAGE_START + STORE_PAGE_SIZE) #define BUFFER_IO_PAGE_SIZE XEN_PAGE_SIZE #define BUFFER_PIO_PAGE_START (BUFFER_IO_PAGE_START + BUFFER_IO_PAGE_SIZE) #define BUFFER_PIO_PAGE_SIZE XEN_PAGE_SIZE #define IO_SAPIC_START 0xfec00000UL #define IO_SAPIC_SIZE 0x100000 #define PIB_START 0xfee00000UL #define PIB_SIZE 0x200000 #define GFW_START (4 * MEM_G - 16 * MEM_M) #define GFW_SIZE (16 * MEM_M) /* domVTI */ #define GPFN_FRAME_BUFFER 0x1 /* VGA framebuffer */ #define GPFN_LOW_MMIO 0x2 /* Low MMIO range */ #define GPFN_PIB 0x3 /* PIB base */ #define GPFN_IOSAPIC 0x4 /* IOSAPIC base */ #define GPFN_LEGACY_IO 0x5 /* Legacy I/O base */ #define GPFN_HIGH_MMIO 0x6 /* High MMIO range */ /* Nvram belongs to GFW memory space */ #define NVRAM_SIZE (MEM_K * 64) #define NVRAM_START (GFW_START + 10 * MEM_M) #define NVRAM_VALID_SIG 0x4650494e45584948 /* "HIXENIPF" */ struct nvram_save_addr { unsigned long addr; unsigned long signature; }; #endif /* __XEN_PUBLIC_HVM_MEMMAP_IA64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-ia64/hvm/save.h000066400000000000000000000130561314037446600307150ustar00rootroot00000000000000/****************************************************************************** * save_types.h * * Copyright (c) 2007 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_PUBLIC_HVM_SAVE_IA64_H__ #define __XEN_PUBLIC_HVM_SAVE_IA64_H__ #include "../../hvm/save.h" #include "../../arch-ia64.h" /* * Save/restore header: general info about the save file. */ /* x86 uses 0x54381286 */ #define HVM_FILE_MAGIC 0x343641492f6e6558UL /* "Xen/IA64" */ #define HVM_FILE_VERSION 0x0000000000000001UL struct hvm_save_header { uint64_t magic; /* Must be HVM_FILE_MAGIC */ uint64_t version; /* File format version */ uint64_t changeset; /* Version of Xen that saved this file */ uint64_t cpuid[5]; /* CPUID[0x01][%eax] on the saving machine */ }; DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header); /* * CPU */ struct hvm_hw_ia64_cpu { uint64_t ipsr; }; DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_ia64_cpu); /* * CPU */ struct hvm_hw_ia64_vpd { struct vpd vpd; }; DECLARE_HVM_SAVE_TYPE(VPD, 3, struct hvm_hw_ia64_vpd); /* * device dependency * vacpi => viosapic => vlsapic */ /* * vlsapic */ struct hvm_hw_ia64_vlsapic { uint64_t insvc[4]; uint64_t vhpi; // ??? should this be saved in vpd uint8_t xtp; uint8_t pal_init_pending; uint8_t pad[2]; }; DECLARE_HVM_SAVE_TYPE(VLSAPIC, 4, struct hvm_hw_ia64_vlsapic); /* set * unconditionaly set v->arch.irq_new_peding = 1 * unconditionaly set v->arch.irq_new_condition = 0 */ /* * vtime */ /* itc, itm, itv are saved by arch vcpu context */ struct hvm_hw_ia64_vtime { uint64_t itc; uint64_t itm; uint64_t last_itc; uint64_t pending; }; DECLARE_HVM_SAVE_TYPE(VTIME, 5, struct hvm_hw_ia64_vtime); /* * calculate v->vtm.vtm_offset * ??? Or should vtm_offset be set by leave_hypervisor_tail()? * start vtm_timer if necessary by vtm_set_itm(). * ??? Or should vtm_timer be set by leave_hypervisor_tail()? * * ??? or should be done by schedule_tail() * => schedule_tail() should do. */ /* * viosapic */ #define VIOSAPIC_NUM_PINS 48 /* To share VT-d code which uses vioapic_redir_entry. * Although on ia64 this is for vsapic, but we have to vioapic_redir_entry * instead of viosapic_redir_entry. */ union vioapic_redir_entry { uint64_t bits; struct { uint8_t vector; uint8_t delivery_mode : 3; uint8_t reserve1 : 1; uint8_t delivery_status: 1; uint8_t polarity : 1; uint8_t reserve2 : 1; uint8_t trig_mode : 1; uint8_t mask : 1; uint8_t reserve3 : 7; uint8_t reserved[3]; uint16_t dest_id; } fields; }; struct hvm_hw_ia64_viosapic { uint64_t irr; uint64_t isr; uint32_t ioregsel; uint32_t pad; uint64_t lowest_vcpu_id; uint64_t base_address; union vioapic_redir_entry redirtbl[VIOSAPIC_NUM_PINS]; }; DECLARE_HVM_SAVE_TYPE(VIOSAPIC, 6, struct hvm_hw_ia64_viosapic); /* * vacpi * PM timer */ struct vacpi_regs { union { struct { uint32_t pm1a_sts:16;/* PM1a_EVT_BLK.PM1a_STS: status register */ uint32_t pm1a_en:16; /* PM1a_EVT_BLK.PM1a_EN: enable register */ }; uint32_t evt_blk; }; uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */ }; struct hvm_hw_ia64_vacpi { struct vacpi_regs regs; }; DECLARE_HVM_SAVE_TYPE(VACPI, 7, struct hvm_hw_ia64_vacpi); /* update last_gtime and setup timer of struct vacpi */ /* * opt_feature: identity mapping of region 4, 5 and 7. * With the c/s 16396:d2935f9c217f of xen-ia64-devel.hg, * opt_feature hypercall supports only region 4,5,7 identity mappings. * structure hvm_hw_ia64_identity_mappings only supports them. * The new structure, struct hvm_hw_ia64_identity_mappings, is created to * avoid to keep up with change of the xen/ia64 internal structure, struct * opt_feature. * * If it is enhanced in the future, new structure will be created. */ struct hvm_hw_ia64_identity_mapping { uint64_t on; /* on/off */ uint64_t pgprot; /* The page protection bit mask of the pte. */ uint64_t key; /* A protection key. */ }; struct hvm_hw_ia64_identity_mappings { struct hvm_hw_ia64_identity_mapping im_reg4;/* Region 4 identity mapping */ struct hvm_hw_ia64_identity_mapping im_reg5;/* Region 5 identity mapping */ struct hvm_hw_ia64_identity_mapping im_reg7;/* Region 7 identity mapping */ }; DECLARE_HVM_SAVE_TYPE(OPT_FEATURE_IDENTITY_MAPPINGS, 8, struct hvm_hw_ia64_identity_mappings); /* * Largest type-code in use */ #define HVM_SAVE_CODE_MAX 8 #endif /* __XEN_PUBLIC_HVM_SAVE_IA64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-ia64/sioemu.h000066400000000000000000000050161314037446600304630ustar00rootroot00000000000000/****************************************************************************** * sioemu.h * * Copyright (c) 2008 Tristan Gingold * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_PUBLIC_IA64_SIOEMU_H__ #define __XEN_PUBLIC_IA64_SIOEMU_H__ /* SIOEMU specific hypercalls. The numbers are the minor part of FW_HYPERCALL_SIOEMU. */ /* Defines the callback entry point. r8=ip, r9=data. Must be called per-vcpu. */ #define SIOEMU_HYPERCALL_SET_CALLBACK 0x01 /* Finish sioemu fw initialization and start firmware. r8=ip. */ #define SIOEMU_HYPERCALL_START_FW 0x02 /* Add IO pages in physmap. */ #define SIOEMU_HYPERCALL_ADD_IO_PHYSMAP 0x03 /* Get wallclock time. */ #define SIOEMU_HYPERCALL_GET_TIME 0x04 /* Flush cache. */ #define SIOEMU_HYPERCALL_FLUSH_CACHE 0x07 /* Get freq base. */ #define SIOEMU_HYPERCALL_FREQ_BASE 0x08 /* Return from callback. */ #define SIOEMU_HYPERCALL_CALLBACK_RETURN 0x09 /* Deliver an interrupt. */ #define SIOEMU_HYPERCALL_DELIVER_INT 0x0a /* SIOEMU callback reason. */ /* An event (from event channel) has to be delivered. */ #define SIOEMU_CB_EVENT 0x00 /* Emulate an IO access. */ #define SIOEMU_CB_IO_EMULATE 0x01 /* An IPI is sent to a dead vcpu. */ #define SIOEMU_CB_WAKEUP_VCPU 0x02 /* A SAL hypercall is executed. */ #define SIOEMU_CB_SAL_ASSIST 0x03 #ifndef __ASSEMBLY__ struct sioemu_callback_info { /* Saved registers. */ unsigned long ip; unsigned long psr; unsigned long ifs; unsigned long nats; unsigned long r8; unsigned long r9; unsigned long r10; unsigned long r11; /* Callback parameters. */ unsigned long cause; unsigned long arg0; unsigned long arg1; unsigned long arg2; unsigned long arg3; unsigned long _pad2[2]; unsigned long r2; }; #endif /* __ASSEMBLY__ */ #endif /* __XEN_PUBLIC_IA64_SIOEMU_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-x86/000077500000000000000000000000001314037446600266715ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-x86/cpuid.h000066400000000000000000000050721314037446600301520ustar00rootroot00000000000000/****************************************************************************** * arch-x86/cpuid.h * * CPUID interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2007 Citrix Systems, Inc. * * Authors: * Keir Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__ #define __XEN_PUBLIC_ARCH_X86_CPUID_H__ /* Xen identification leaves start at 0x40000000. */ #define XEN_CPUID_FIRST_LEAF 0x40000000 #define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i)) /* * Leaf 1 (0x40000000) * EAX: Largest Xen-information leaf. All leaves up to an including @EAX * are supported by the Xen host. * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification * of a Xen host. */ #define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */ #define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */ #define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */ /* * Leaf 2 (0x40000001) * EAX[31:16]: Xen major version. * EAX[15: 0]: Xen minor version. * EBX-EDX: Reserved (currently all zeroes). */ /* * Leaf 3 (0x40000002) * EAX: Number of hypercall transfer pages. This register is always guaranteed * to specify one hypercall page. * EBX: Base address of Xen-specific MSRs. * ECX: Features 1. Unused bits are set to zero. * EDX: Features 2. Unused bits are set to zero. */ /* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */ #define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0 #define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0) #endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-x86/hvm/000077500000000000000000000000001314037446600274635ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-x86/hvm/save.h000066400000000000000000000247501314037446600306020ustar00rootroot00000000000000/* * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * Copyright (c) 2007 XenSource Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__ #define __XEN_PUBLIC_HVM_SAVE_X86_H__ /* * Save/restore header: general info about the save file. */ #define HVM_FILE_MAGIC 0x54381286 #define HVM_FILE_VERSION 0x00000001 struct hvm_save_header { uint32_t magic; /* Must be HVM_FILE_MAGIC */ uint32_t version; /* File format version */ uint64_t changeset; /* Version of Xen that saved this file */ uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */ uint32_t pad0; }; DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header); /* * Processor */ struct hvm_hw_cpu { uint8_t fpu_regs[512]; uint64_t rax; uint64_t rbx; uint64_t rcx; uint64_t rdx; uint64_t rbp; uint64_t rsi; uint64_t rdi; uint64_t rsp; uint64_t r8; uint64_t r9; uint64_t r10; uint64_t r11; uint64_t r12; uint64_t r13; uint64_t r14; uint64_t r15; uint64_t rip; uint64_t rflags; uint64_t cr0; uint64_t cr2; uint64_t cr3; uint64_t cr4; uint64_t dr0; uint64_t dr1; uint64_t dr2; uint64_t dr3; uint64_t dr6; uint64_t dr7; uint32_t cs_sel; uint32_t ds_sel; uint32_t es_sel; uint32_t fs_sel; uint32_t gs_sel; uint32_t ss_sel; uint32_t tr_sel; uint32_t ldtr_sel; uint32_t cs_limit; uint32_t ds_limit; uint32_t es_limit; uint32_t fs_limit; uint32_t gs_limit; uint32_t ss_limit; uint32_t tr_limit; uint32_t ldtr_limit; uint32_t idtr_limit; uint32_t gdtr_limit; uint64_t cs_base; uint64_t ds_base; uint64_t es_base; uint64_t fs_base; uint64_t gs_base; uint64_t ss_base; uint64_t tr_base; uint64_t ldtr_base; uint64_t idtr_base; uint64_t gdtr_base; uint32_t cs_arbytes; uint32_t ds_arbytes; uint32_t es_arbytes; uint32_t fs_arbytes; uint32_t gs_arbytes; uint32_t ss_arbytes; uint32_t tr_arbytes; uint32_t ldtr_arbytes; uint32_t sysenter_cs; uint32_t padding0; uint64_t sysenter_esp; uint64_t sysenter_eip; /* msr for em64t */ uint64_t shadow_gs; /* msr content saved/restored. */ uint64_t msr_flags; uint64_t msr_lstar; uint64_t msr_star; uint64_t msr_cstar; uint64_t msr_syscall_mask; uint64_t msr_efer; /* guest's idea of what rdtsc() would return */ uint64_t tsc; /* pending event, if any */ union { uint32_t pending_event; struct { uint8_t pending_vector:8; uint8_t pending_type:3; uint8_t pending_error_valid:1; uint32_t pending_reserved:19; uint8_t pending_valid:1; }; }; /* error code for pending event */ uint32_t error_code; }; DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu); /* * PIC */ struct hvm_hw_vpic { /* IR line bitmasks. */ uint8_t irr; uint8_t imr; uint8_t isr; /* Line IRx maps to IRQ irq_base+x */ uint8_t irq_base; /* * Where are we in ICW2-4 initialisation (0 means no init in progress)? * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1). * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence) * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence) */ uint8_t init_state:4; /* IR line with highest priority. */ uint8_t priority_add:4; /* Reads from A=0 obtain ISR or IRR? */ uint8_t readsel_isr:1; /* Reads perform a polling read? */ uint8_t poll:1; /* Automatically clear IRQs from the ISR during INTA? */ uint8_t auto_eoi:1; /* Automatically rotate IRQ priorities during AEOI? */ uint8_t rotate_on_auto_eoi:1; /* Exclude slave inputs when considering in-service IRQs? */ uint8_t special_fully_nested_mode:1; /* Special mask mode excludes masked IRs from AEOI and priority checks. */ uint8_t special_mask_mode:1; /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */ uint8_t is_master:1; /* Edge/trigger selection. */ uint8_t elcr; /* Virtual INT output. */ uint8_t int_output; }; DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic); /* * IO-APIC */ #ifdef __ia64__ #define VIOAPIC_IS_IOSAPIC 1 #define VIOAPIC_NUM_PINS 24 #else #define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */ #endif struct hvm_hw_vioapic { uint64_t base_address; uint32_t ioregsel; uint32_t id; union vioapic_redir_entry { uint64_t bits; struct { uint8_t vector; uint8_t delivery_mode:3; uint8_t dest_mode:1; uint8_t delivery_status:1; uint8_t polarity:1; uint8_t remote_irr:1; uint8_t trig_mode:1; uint8_t mask:1; uint8_t reserve:7; #if !VIOAPIC_IS_IOSAPIC uint8_t reserved[4]; uint8_t dest_id; #else uint8_t reserved[3]; uint16_t dest_id; #endif } fields; } redirtbl[VIOAPIC_NUM_PINS]; }; DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic); /* * LAPIC */ struct hvm_hw_lapic { uint64_t apic_base_msr; uint32_t disabled; /* VLAPIC_xx_DISABLED */ uint32_t timer_divisor; }; DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic); struct hvm_hw_lapic_regs { uint8_t data[1024]; }; DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs); /* * IRQs */ struct hvm_hw_pci_irqs { /* * Virtual interrupt wires for a single PCI bus. * Indexed by: device*4 + INTx#. */ union { unsigned long i[16 / sizeof (unsigned long)]; /* DECLARE_BITMAP(i, 32*4); */ uint64_t pad[2]; }; }; DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs); struct hvm_hw_isa_irqs { /* * Virtual interrupt wires for ISA devices. * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing). */ union { unsigned long i[1]; /* DECLARE_BITMAP(i, 16); */ uint64_t pad[1]; }; }; DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs); struct hvm_hw_pci_link { /* * PCI-ISA interrupt router. * Each PCI is 'wire-ORed' into one of four links using * the traditional 'barber's pole' mapping ((device + INTx#) & 3). * The router provides a programmable mapping from each link to a GSI. */ uint8_t route[4]; uint8_t pad0[4]; }; DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link); /* * PIT */ struct hvm_hw_pit { struct hvm_hw_pit_channel { uint32_t count; /* can be 65536 */ uint16_t latched_count; uint8_t count_latched; uint8_t status_latched; uint8_t status; uint8_t read_state; uint8_t write_state; uint8_t write_latch; uint8_t rw_mode; uint8_t mode; uint8_t bcd; /* not supported */ uint8_t gate; /* timer start */ } channels[3]; /* 3 x 16 bytes */ uint32_t speaker_data_on; uint32_t pad0; }; DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit); /* * RTC */ #define RTC_CMOS_SIZE 14 struct hvm_hw_rtc { /* CMOS bytes */ uint8_t cmos_data[RTC_CMOS_SIZE]; /* Index register for 2-part operations */ uint8_t cmos_index; uint8_t pad0; }; DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc); /* * HPET */ #define HPET_TIMER_NUM 3 /* 3 timers supported now */ struct hvm_hw_hpet { /* Memory-mapped, software visible registers */ uint64_t capability; /* capabilities */ uint64_t res0; /* reserved */ uint64_t config; /* configuration */ uint64_t res1; /* reserved */ uint64_t isr; /* interrupt status reg */ uint64_t res2[25]; /* reserved */ uint64_t mc64; /* main counter */ uint64_t res3; /* reserved */ struct { /* timers */ uint64_t config; /* configuration/cap */ uint64_t cmp; /* comparator */ uint64_t fsb; /* FSB route, not supported now */ uint64_t res4; /* reserved */ } timers[HPET_TIMER_NUM]; uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */ /* Hidden register state */ uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */ }; DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet); /* * PM timer */ struct hvm_hw_pmtimer { uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */ uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */ uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */ }; DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer); /* * MTRR MSRs */ struct hvm_hw_mtrr { #define MTRR_VCNT 8 #define NUM_FIXED_MSR 11 uint64_t msr_pat_cr; /* mtrr physbase & physmask msr pair*/ uint64_t msr_mtrr_var[MTRR_VCNT*2]; uint64_t msr_mtrr_fixed[NUM_FIXED_MSR]; uint64_t msr_mtrr_cap; uint64_t msr_mtrr_def_type; }; DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr); /* * Viridian hypervisor context. */ struct hvm_viridian_context { uint64_t hypercall_gpa; uint64_t guest_os_id; }; DECLARE_HVM_SAVE_TYPE(VIRIDIAN, 15, struct hvm_viridian_context); /* * Largest type-code in use */ #define HVM_SAVE_CODE_MAX 15 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-x86/xen-mca.h000066400000000000000000000342111314037446600303730ustar00rootroot00000000000000/****************************************************************************** * arch-x86/mca.h * * Contributed by Advanced Micro Devices, Inc. * Author: Christoph Egger * * Guest OS machine check interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /* Full MCA functionality has the following Usecases from the guest side: * * Must have's: * 1. Dom0 and DomU register machine check trap callback handlers * (already done via "set_trap_table" hypercall) * 2. Dom0 registers machine check event callback handler * (doable via EVTCHNOP_bind_virq) * 3. Dom0 and DomU fetches machine check data * 4. Dom0 wants Xen to notify a DomU * 5. Dom0 gets DomU ID from physical address * 6. Dom0 wants Xen to kill DomU (already done for "xm destroy") * * Nice to have's: * 7. Dom0 wants Xen to deactivate a physical CPU * This is better done as separate task, physical CPU hotplugging, * and hypercall(s) should be sysctl's * 8. Page migration proposed from Xen NUMA work, where Dom0 can tell Xen to * move a DomU (or Dom0 itself) away from a malicious page * producing correctable errors. * 9. offlining physical page: * Xen free's and never re-uses a certain physical page. * 10. Testfacility: Allow Dom0 to write values into machine check MSR's * and tell Xen to trigger a machine check */ #ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__ #define __XEN_PUBLIC_ARCH_X86_MCA_H__ /* Hypercall */ #define __HYPERVISOR_mca __HYPERVISOR_arch_0 /* * The xen-unstable repo has interface version 0x03000001; out interface * is incompatible with that and any future minor revisions, so we * choose a different version number range that is numerically less * than that used in xen-unstable. */ #define XEN_MCA_INTERFACE_VERSION 0x01ecc003 /* IN: Dom0 calls hypercall to retrieve nonurgent telemetry */ #define XEN_MC_NONURGENT 0x0001 /* IN: Dom0/DomU calls hypercall to retrieve urgent telemetry */ #define XEN_MC_URGENT 0x0002 /* IN: Dom0 acknowledges previosly-fetched telemetry */ #define XEN_MC_ACK 0x0004 /* OUT: All is ok */ #define XEN_MC_OK 0x0 /* OUT: Domain could not fetch data. */ #define XEN_MC_FETCHFAILED 0x1 /* OUT: There was no machine check data to fetch. */ #define XEN_MC_NODATA 0x2 /* OUT: Between notification time and this hypercall an other * (most likely) correctable error happened. The fetched data, * does not match the original machine check data. */ #define XEN_MC_NOMATCH 0x4 /* OUT: DomU did not register MC NMI handler. Try something else. */ #define XEN_MC_CANNOTHANDLE 0x8 /* OUT: Notifying DomU failed. Retry later or try something else. */ #define XEN_MC_NOTDELIVERED 0x10 /* Note, XEN_MC_CANNOTHANDLE and XEN_MC_NOTDELIVERED are mutually exclusive. */ #ifndef __ASSEMBLY__ #define VIRQ_MCA VIRQ_ARCH_0 /* G. (DOM0) Machine Check Architecture */ /* * Machine Check Architecure: * structs are read-only and used to report all kinds of * correctable and uncorrectable errors detected by the HW. * Dom0 and DomU: register a handler to get notified. * Dom0 only: Correctable errors are reported via VIRQ_MCA * Dom0 and DomU: Uncorrectable errors are reported via nmi handlers */ #define MC_TYPE_GLOBAL 0 #define MC_TYPE_BANK 1 #define MC_TYPE_EXTENDED 2 #define MC_TYPE_RECOVERY 3 struct mcinfo_common { uint16_t type; /* structure type */ uint16_t size; /* size of this struct in bytes */ }; #define MC_FLAG_CORRECTABLE (1 << 0) #define MC_FLAG_UNCORRECTABLE (1 << 1) #define MC_FLAG_RECOVERABLE (1 << 2) #define MC_FLAG_POLLED (1 << 3) #define MC_FLAG_RESET (1 << 4) #define MC_FLAG_CMCI (1 << 5) #define MC_FLAG_MCE (1 << 6) /* contains global x86 mc information */ struct mcinfo_global { struct mcinfo_common common; /* running domain at the time in error (most likely the impacted one) */ uint16_t mc_domid; uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */ uint32_t mc_socketid; /* physical socket of the physical core */ uint16_t mc_coreid; /* physical impacted core */ uint16_t mc_core_threadid; /* core thread of physical core */ uint32_t mc_apicid; uint32_t mc_flags; uint64_t mc_gstatus; /* global status */ }; /* contains bank local x86 mc information */ struct mcinfo_bank { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint16_t mc_domid; /* Usecase 5: domain referenced by mc_addr on dom0 * and if mc_addr is valid. Never valid on DomU. */ uint64_t mc_status; /* bank status */ uint64_t mc_addr; /* bank address, only valid * if addr bit is set in mc_status */ uint64_t mc_misc; uint64_t mc_ctrl2; uint64_t mc_tsc; }; struct mcinfo_msr { uint64_t reg; /* MSR */ uint64_t value; /* MSR value */ }; /* contains mc information from other * or additional mc MSRs */ struct mcinfo_extended { struct mcinfo_common common; /* You can fill up to five registers. * If you need more, then use this structure * multiple times. */ uint32_t mc_msrs; /* Number of msr with valid values. */ /* * Currently Intel extended MSR (32/64) include all gp registers * and E(R)FLAGS, E(R)IP, E(R)MISC, up to 11/19 of them might be * useful at present. So expand this array to 16/32 to leave room. */ struct mcinfo_msr mc_msr[sizeof(void *) * 4]; }; /* Recovery Action flags. Giving recovery result information to DOM0 */ /* Xen takes successful recovery action, the error is recovered */ #define REC_ACTION_RECOVERED (0x1 << 0) /* No action is performed by XEN */ #define REC_ACTION_NONE (0x1 << 1) /* It's possible DOM0 might take action ownership in some case */ #define REC_ACTION_NEED_RESET (0x1 << 2) /* Different Recovery Action types, if the action is performed successfully, * REC_ACTION_RECOVERED flag will be returned. */ /* Page Offline Action */ #define MC_ACTION_PAGE_OFFLINE (0x1 << 0) /* CPU offline Action */ #define MC_ACTION_CPU_OFFLINE (0x1 << 1) /* L3 cache disable Action */ #define MC_ACTION_CACHE_SHRINK (0x1 << 2) /* Below interface used between XEN/DOM0 for passing XEN's recovery action * information to DOM0. * usage Senario: After offlining broken page, XEN might pass its page offline * recovery action result to DOM0. DOM0 will save the information in * non-volatile memory for further proactive actions, such as offlining the * easy broken page earlier when doing next reboot. */ struct page_offline_action { /* Params for passing the offlined page number to DOM0 */ uint64_t mfn; uint64_t status; }; struct cpu_offline_action { /* Params for passing the identity of the offlined CPU to DOM0 */ uint32_t mc_socketid; uint16_t mc_coreid; uint16_t mc_core_threadid; }; #define MAX_UNION_SIZE 16 struct mcinfo_recovery { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint8_t action_flags; uint8_t action_types; union { struct page_offline_action page_retire; struct cpu_offline_action cpu_offline; uint8_t pad[MAX_UNION_SIZE]; } action_info; }; #define MCINFO_HYPERCALLSIZE 1024 #define MCINFO_MAXSIZE 768 struct mc_info { /* Number of mcinfo_* entries in mi_data */ uint32_t mi_nentries; uint32_t _pad0; uint64_t mi_data[(MCINFO_MAXSIZE - 1) / 8]; }; typedef struct mc_info mc_info_t; DEFINE_XEN_GUEST_HANDLE(mc_info_t); #define __MC_MSR_ARRAYSIZE 8 #define __MC_NMSRS 1 #define MC_NCAPS 7 /* 7 CPU feature flag words */ #define MC_CAPS_STD_EDX 0 /* cpuid level 0x00000001 (%edx) */ #define MC_CAPS_AMD_EDX 1 /* cpuid level 0x80000001 (%edx) */ #define MC_CAPS_TM 2 /* cpuid level 0x80860001 (TransMeta) */ #define MC_CAPS_LINUX 3 /* Linux-defined */ #define MC_CAPS_STD_ECX 4 /* cpuid level 0x00000001 (%ecx) */ #define MC_CAPS_VIA 5 /* cpuid level 0xc0000001 */ #define MC_CAPS_AMD_ECX 6 /* cpuid level 0x80000001 (%ecx) */ struct mcinfo_logical_cpu { uint32_t mc_cpunr; uint32_t mc_chipid; uint16_t mc_coreid; uint16_t mc_threadid; uint32_t mc_apicid; uint32_t mc_clusterid; uint32_t mc_ncores; uint32_t mc_ncores_active; uint32_t mc_nthreads; int32_t mc_cpuid_level; uint32_t mc_family; uint32_t mc_vendor; uint32_t mc_model; uint32_t mc_step; char mc_vendorid[16]; char mc_brandid[64]; uint32_t mc_cpu_caps[MC_NCAPS]; uint32_t mc_cache_size; uint32_t mc_cache_alignment; int32_t mc_nmsrvals; struct mcinfo_msr mc_msrvalues[__MC_MSR_ARRAYSIZE]; }; typedef struct mcinfo_logical_cpu xen_mc_logical_cpu_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_logical_cpu_t); /* * OS's should use these instead of writing their own lookup function * each with its own bugs and drawbacks. * We use macros instead of static inline functions to allow guests * to include this header in assembly files (*.S). */ /* Prototype: * uint32_t x86_mcinfo_nentries(struct mc_info *mi); */ #define x86_mcinfo_nentries(_mi) \ (_mi)->mi_nentries /* Prototype: * struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi); */ #define x86_mcinfo_first(_mi) \ ((struct mcinfo_common *)(_mi)->mi_data) /* Prototype: * struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic); */ #define x86_mcinfo_next(_mic) \ ((struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size)) /* Prototype: * void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type); */ #define x86_mcinfo_lookup(_ret, _mi, _type) \ do { \ uint32_t found, i; \ struct mcinfo_common *_mic; \ \ found = 0; \ (_ret) = NULL; \ if (_mi == NULL) break; \ _mic = x86_mcinfo_first(_mi); \ for (i = 0; i < x86_mcinfo_nentries(_mi); i++) { \ if (_mic->type == (_type)) { \ found = 1; \ break; \ } \ _mic = x86_mcinfo_next(_mic); \ } \ (_ret) = found ? _mic : NULL; \ } while (0) /* Usecase 1 * Register machine check trap callback handler * (already done via "set_trap_table" hypercall) */ /* Usecase 2 * Dom0 registers machine check event callback handler * done by EVTCHNOP_bind_virq */ /* Usecase 3 * Fetch machine check data from hypervisor. * Note, this hypercall is special, because both Dom0 and DomU must use this. */ #define XEN_MC_fetch 1 struct xen_mc_fetch { /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_NONURGENT, XEN_MC_URGENT, XEN_MC_ACK if ack'ing an earlier fetch */ /* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED, XEN_MC_NODATA, XEN_MC_NOMATCH */ uint32_t _pad0; uint64_t fetch_id; /* OUT: id for ack, IN: id we are ack'ing */ /* OUT variables. */ XEN_GUEST_HANDLE(mc_info_t) data; }; typedef struct xen_mc_fetch xen_mc_fetch_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t); /* Usecase 4 * This tells the hypervisor to notify a DomU about the machine check error */ #define XEN_MC_notifydomain 2 struct xen_mc_notifydomain { /* IN variables. */ uint16_t mc_domid; /* The unprivileged domain to notify. */ uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify. * Usually echo'd value from the fetch hypercall. */ /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */ /* OUT: XEN_MC_OK, XEN_MC_CANNOTHANDLE, XEN_MC_NOTDELIVERED, XEN_MC_NOMATCH */ }; typedef struct xen_mc_notifydomain xen_mc_notifydomain_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t); #define XEN_MC_physcpuinfo 3 struct xen_mc_physcpuinfo { /* IN/OUT */ uint32_t ncpus; uint32_t _pad0; /* OUT */ XEN_GUEST_HANDLE(xen_mc_logical_cpu_t) info; }; #define XEN_MC_msrinject 4 #define MC_MSRINJ_MAXMSRS 8 struct xen_mc_msrinject { /* IN */ uint32_t mcinj_cpunr; /* target processor id */ uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */ uint32_t mcinj_count; /* 0 .. count-1 in array are valid */ uint32_t _pad0; struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS]; }; /* Flags for mcinj_flags above; bits 16-31 are reserved */ #define MC_MSRINJ_F_INTERPOSE 0x1 #define XEN_MC_mceinject 5 struct xen_mc_mceinject { unsigned int mceinj_cpunr; /* target processor id */ }; struct xen_mc { uint32_t cmd; uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */ union { struct xen_mc_fetch mc_fetch; struct xen_mc_notifydomain mc_notifydomain; struct xen_mc_physcpuinfo mc_physcpuinfo; struct xen_mc_msrinject mc_msrinject; struct xen_mc_mceinject mc_mceinject; } u; }; typedef struct xen_mc xen_mc_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_t); #endif /* __ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-x86/xen-x86_32.h000066400000000000000000000145401314037446600305670ustar00rootroot00000000000000/****************************************************************************** * xen-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2007, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ /* * Hypercall interface: * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5) * Output: %eax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; int $0x82 */ #define TRAP_INSTR "int $0x82" #endif /* * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ #define FLAT_KERNEL_CS FLAT_RING1_CS #define FLAT_KERNEL_DS FLAT_RING1_DS #define FLAT_KERNEL_SS FLAT_RING1_SS #define FLAT_USER_CS FLAT_RING3_CS #define FLAT_USER_DS FLAT_RING3_DS #define FLAT_USER_SS FLAT_RING3_SS #define __HYPERVISOR_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_END_PAE 0xF6800000 #define HYPERVISOR_VIRT_START_PAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE) #define MACH2PHYS_VIRT_START_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE) #define MACH2PHYS_VIRT_END_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE) /* Non-PAE bounds are obsolete. */ #define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000 #define HYPERVISOR_VIRT_START_NONPAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_START_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_END_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE) #define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE #define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE #define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) #endif /* 32-/64-bit invariability for control interfaces (domctl/sysctl). */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #undef ___DEFINE_XEN_GUEST_HANDLE #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } \ __guest_handle_ ## name; \ typedef struct { union { type *p; uint64_aligned_t q; }; } \ __guest_handle_64_ ## name #undef set_xen_guest_handle #define set_xen_guest_handle(hnd, val) \ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \ (hnd).p = val; \ } while ( 0 ) #define uint64_aligned_t uint64_t __attribute__((aligned(8))) #define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name #define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name) #endif #ifndef __ASSEMBLY__ struct cpu_user_regs { uint32_t ebx; uint32_t ecx; uint32_t edx; uint32_t esi; uint32_t edi; uint32_t ebp; uint32_t eax; uint16_t error_code; /* private */ uint16_t entry_vector; /* private */ uint32_t eip; uint16_t cs; uint8_t saved_upcall_mask; uint8_t _pad0; uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ uint32_t esp; uint16_t ss, _pad1; uint16_t es, _pad2; uint16_t ds, _pad3; uint16_t fs, _pad4; uint16_t gs, _pad5; }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); /* * Page-directory addresses above 4GB do not fit into architectural %cr3. * When accessing %cr3, or equivalent field in vcpu_guest_context, guests * must use the following accessor macros to pack/unpack valid MFNs. */ #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) struct arch_vcpu_info { unsigned long cr2; unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; struct xen_callback { unsigned long cs; unsigned long eip; }; typedef struct xen_callback xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-x86/xen-x86_64.h000066400000000000000000000157351314037446600306030ustar00rootroot00000000000000/****************************************************************************** * xen-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ /* * Hypercall interface: * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5) * Output: %rax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; syscall * Clobbered: %rcx, %r11, argument registers (as above) */ #define TRAP_INSTR "syscall" #endif /* * 64-bit segment selectors * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING3_CS32 0xe023 /* GDT index 260 */ #define FLAT_RING3_CS64 0xe033 /* GDT index 261 */ #define FLAT_RING3_DS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_DS64 0x0000 /* NULL selector */ #define FLAT_RING3_SS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_SS64 0xe02b /* GDT index 262 */ #define FLAT_KERNEL_DS64 FLAT_RING3_DS64 #define FLAT_KERNEL_DS32 FLAT_RING3_DS32 #define FLAT_KERNEL_DS FLAT_KERNEL_DS64 #define FLAT_KERNEL_CS64 FLAT_RING3_CS64 #define FLAT_KERNEL_CS32 FLAT_RING3_CS32 #define FLAT_KERNEL_CS FLAT_KERNEL_CS64 #define FLAT_KERNEL_SS64 FLAT_RING3_SS64 #define FLAT_KERNEL_SS32 FLAT_RING3_SS32 #define FLAT_KERNEL_SS FLAT_KERNEL_SS64 #define FLAT_USER_DS64 FLAT_RING3_DS64 #define FLAT_USER_DS32 FLAT_RING3_DS32 #define FLAT_USER_DS FLAT_USER_DS64 #define FLAT_USER_CS64 FLAT_RING3_CS64 #define FLAT_USER_CS32 FLAT_RING3_CS32 #define FLAT_USER_CS FLAT_USER_CS64 #define FLAT_USER_SS64 FLAT_RING3_SS64 #define FLAT_USER_SS32 FLAT_RING3_SS32 #define FLAT_USER_SS FLAT_USER_SS64 #define __HYPERVISOR_VIRT_START 0xFFFF800000000000 #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) #endif /* * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) * @which == SEGBASE_* ; @base == 64-bit base address * Returns 0 on success. */ #define SEGBASE_FS 0 #define SEGBASE_GS_USER 1 #define SEGBASE_GS_KERNEL 2 #define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */ /* * int HYPERVISOR_iret(void) * All arguments are on the kernel stack, in the following format. * Never returns if successful. Current kernel context is lost. * The saved CS is mapped as follows: * RING0 -> RING3 kernel mode. * RING1 -> RING3 kernel mode. * RING2 -> RING3 kernel mode. * RING3 -> RING3 user mode. * However RING0 indicates that the guest kernel should return to iteself * directly with * orb $3,1*8(%rsp) * iretq * If flags contains VGCF_in_syscall: * Restore RAX, RIP, RFLAGS, RSP. * Discard R11, RCX, CS, SS. * Otherwise: * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP. * All other registers are saved on hypercall entry and restored to user. */ /* Guest exited in SYSCALL context? Return to guest with SYSRET? */ #define _VGCF_in_syscall 8 #define VGCF_in_syscall (1<<_VGCF_in_syscall) #define VGCF_IN_SYSCALL VGCF_in_syscall #ifndef __ASSEMBLY__ struct iret_context { /* Top of stack (%rsp at point of hypercall). */ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; /* Bottom of iret stack frame. */ }; #if defined(__GNUC__) && !defined(__STRICT_ANSI__) /* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ #define __DECL_REG(name) union { \ uint64_t r ## name, e ## name; \ uint32_t _e ## name; \ } #else /* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ #define __DECL_REG(name) uint64_t r ## name #endif struct cpu_user_regs { uint64_t r15; uint64_t r14; uint64_t r13; uint64_t r12; __DECL_REG(bp); __DECL_REG(bx); uint64_t r11; uint64_t r10; uint64_t r9; uint64_t r8; __DECL_REG(ax); __DECL_REG(cx); __DECL_REG(dx); __DECL_REG(si); __DECL_REG(di); uint32_t error_code; /* private */ uint32_t entry_vector; /* private */ __DECL_REG(ip); uint16_t cs, _pad0[1]; uint8_t saved_upcall_mask; uint8_t _pad1[3]; __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */ __DECL_REG(sp); uint16_t ss, _pad2[3]; uint16_t es, _pad3[3]; uint16_t ds, _pad4[3]; uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */ }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); #undef __DECL_REG #define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) #define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) struct arch_vcpu_info { unsigned long cr2; unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; typedef unsigned long xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-x86/xen.h000066400000000000000000000167641314037446600276520ustar00rootroot00000000000000/****************************************************************************** * arch-x86/xen.h * * Guest OS interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "../xen.h" #ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_H__ /* Structural guest handles introduced in 0x00030201. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030201 #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name #else #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef type * __guest_handle_ ## name #endif #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ ___DEFINE_XEN_GUEST_HANDLE(name, type); \ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif #if defined(__i386__) #include "xen-x86_32.h" #elif defined(__x86_64__) #include "xen-x86_64.h" #endif #ifndef __ASSEMBLY__ typedef unsigned long xen_pfn_t; #define PRI_xen_pfn "lx" #endif /* * SEGMENT DESCRIPTOR TABLES */ /* * A number of GDT entries are reserved by Xen. These are not situated at the * start of the GDT because some stupid OSes export hard-coded selector values * in their ABI. These hard-coded values are always near the start of the GDT, * so Xen places itself out of the way, at the far end of the GDT. */ #define FIRST_RESERVED_GDT_PAGE 14 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) /* Maximum number of virtual CPUs in multi-processor guests. */ #define MAX_VIRT_CPUS 32 #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; /* * Send an array of these to HYPERVISOR_set_trap_table(). * The privilege level specifies which modes may enter a trap via a software * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate * privilege levels as follows: * Level == 0: Noone may enter * Level == 1: Kernel may enter * Level == 2: Kernel may enter * Level == 3: Everyone may enter */ #define TI_GET_DPL(_ti) ((_ti)->flags & 3) #define TI_GET_IF(_ti) ((_ti)->flags & 4) #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) struct trap_info { uint8_t vector; /* exception vector */ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ uint16_t cs; /* code selector */ unsigned long address; /* code offset */ }; typedef struct trap_info trap_info_t; DEFINE_XEN_GUEST_HANDLE(trap_info_t); typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* * The following is all CPU context. Note that the fpu_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ struct vcpu_guest_context { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ #define VGCF_I387_VALID (1<<0) #define VGCF_IN_KERNEL (1<<2) #define _VGCF_i387_valid 0 #define VGCF_i387_valid (1<<_VGCF_i387_valid) #define _VGCF_in_kernel 2 #define VGCF_in_kernel (1<<_VGCF_in_kernel) #define _VGCF_failsafe_disables_events 3 #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) #define _VGCF_syscall_disables_events 4 #define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) #define _VGCF_online 5 #define VGCF_online (1<<_VGCF_online) unsigned long flags; /* VGCF_* flags */ struct cpu_user_regs user_regs; /* User-level CPU registers */ struct trap_info trap_ctxt[256]; /* Virtual IDT */ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ #ifdef __i386__ unsigned long event_callback_cs; /* CS:EIP of event callback */ unsigned long event_callback_eip; unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ unsigned long failsafe_callback_eip; #else unsigned long event_callback_eip; unsigned long failsafe_callback_eip; #ifdef __XEN__ union { unsigned long syscall_callback_eip; struct { unsigned int event_callback_cs; /* compat CS of event cb */ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */ }; }; #else unsigned long syscall_callback_eip; #endif #endif unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ #ifdef __x86_64__ /* Segment base addresses. */ uint64_t fs_base; uint64_t gs_base_kernel; uint64_t gs_base_user; #endif }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); struct arch_shared_info { unsigned long max_pfn; /* max pfn that appears in table */ /* Frame containing list of mfns containing list of mfns containing p2m. */ xen_pfn_t pfn_to_mfn_frame_list_list; unsigned long nmi_reason; uint64_t pad[32]; }; typedef struct arch_shared_info arch_shared_info_t; #endif /* !__ASSEMBLY__ */ /* * Prefix forces emulation of some non-trapping instructions. * Currently only CPUID. */ #ifdef __ASSEMBLY__ #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; #define XEN_CPUID XEN_EMULATE_PREFIX cpuid #else #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" #endif #endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-x86_32.h000066400000000000000000000024131314037446600273460ustar00rootroot00000000000000/****************************************************************************** * arch-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/arch-x86_64.h000066400000000000000000000024131314037446600273530ustar00rootroot00000000000000/****************************************************************************** * arch-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/callback.h000066400000000000000000000076671314037446600272560ustar00rootroot00000000000000/****************************************************************************** * callback.h * * Register guest OS callbacks with Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell */ #ifndef __XEN_PUBLIC_CALLBACK_H__ #define __XEN_PUBLIC_CALLBACK_H__ #include "xen.h" /* * Prototype for this hypercall is: * long callback_op(int cmd, void *extra_args) * @cmd == CALLBACKOP_??? (callback operation). * @extra_args == Operation-specific extra arguments (NULL if none). */ /* ia64, x86: Callback for event delivery. */ #define CALLBACKTYPE_event 0 /* x86: Failsafe callback when guest state cannot be restored by Xen. */ #define CALLBACKTYPE_failsafe 1 /* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */ #define CALLBACKTYPE_syscall 2 /* * x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel * feature is enabled. Do not use this callback type in new code. */ #define CALLBACKTYPE_sysenter_deprecated 3 /* x86: Callback for NMI delivery. */ #define CALLBACKTYPE_nmi 4 /* * x86: sysenter is only available as follows: * - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled * - 64-bit hypervisor: 32-bit guest applications on Intel CPUs * ('32-on-32-on-64', '32-on-64-on-64') * [nb. also 64-bit guest applications on Intel CPUs * ('64-on-64-on-64'), but syscall is preferred] */ #define CALLBACKTYPE_sysenter 5 /* * x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs * ('32-on-32-on-64', '32-on-64-on-64') */ #define CALLBACKTYPE_syscall32 7 /* * Disable event deliver during callback? This flag is ignored for event and * NMI callbacks: event delivery is unconditionally disabled. */ #define _CALLBACKF_mask_events 0 #define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events) /* * Register a callback. */ #define CALLBACKOP_register 0 struct callback_register { uint16_t type; uint16_t flags; xen_callback_t address; }; typedef struct callback_register callback_register_t; DEFINE_XEN_GUEST_HANDLE(callback_register_t); /* * Unregister a callback. * * Not all callbacks can be unregistered. -EINVAL will be returned if * you attempt to unregister such a callback. */ #define CALLBACKOP_unregister 1 struct callback_unregister { uint16_t type; uint16_t _unused; }; typedef struct callback_unregister callback_unregister_t; DEFINE_XEN_GUEST_HANDLE(callback_unregister_t); #if __XEN_INTERFACE_VERSION__ < 0x00030207 #undef CALLBACKTYPE_sysenter #define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated #endif #endif /* __XEN_PUBLIC_CALLBACK_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/dom0_ops.h000066400000000000000000000077061314037446600272340ustar00rootroot00000000000000/****************************************************************************** * dom0_ops.h * * Process command requests from domain-0 guest OS. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOM0_OPS_H__ #define __XEN_PUBLIC_DOM0_OPS_H__ #include "xen.h" #include "platform.h" #if __XEN_INTERFACE_VERSION__ >= 0x00030204 #error "dom0_ops.h is a compatibility interface only" #endif #define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION #define DOM0_SETTIME XENPF_settime #define dom0_settime xenpf_settime #define dom0_settime_t xenpf_settime_t #define DOM0_ADD_MEMTYPE XENPF_add_memtype #define dom0_add_memtype xenpf_add_memtype #define dom0_add_memtype_t xenpf_add_memtype_t #define DOM0_DEL_MEMTYPE XENPF_del_memtype #define dom0_del_memtype xenpf_del_memtype #define dom0_del_memtype_t xenpf_del_memtype_t #define DOM0_READ_MEMTYPE XENPF_read_memtype #define dom0_read_memtype xenpf_read_memtype #define dom0_read_memtype_t xenpf_read_memtype_t #define DOM0_MICROCODE XENPF_microcode_update #define dom0_microcode xenpf_microcode_update #define dom0_microcode_t xenpf_microcode_update_t #define DOM0_PLATFORM_QUIRK XENPF_platform_quirk #define dom0_platform_quirk xenpf_platform_quirk #define dom0_platform_quirk_t xenpf_platform_quirk_t typedef uint64_t cpumap_t; /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_MSR 15 struct dom0_msr { /* IN variables. */ uint32_t write; cpumap_t cpu_mask; uint32_t msr; uint32_t in1; uint32_t in2; /* OUT variables. */ uint32_t out1; uint32_t out2; }; typedef struct dom0_msr dom0_msr_t; DEFINE_XEN_GUEST_HANDLE(dom0_msr_t); /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_PHYSICAL_MEMORY_MAP 40 struct dom0_memory_map_entry { uint64_t start, end; uint32_t flags; /* reserved */ uint8_t is_ram; }; typedef struct dom0_memory_map_entry dom0_memory_map_entry_t; DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t); struct dom0_op { uint32_t cmd; uint32_t interface_version; /* DOM0_INTERFACE_VERSION */ union { struct dom0_msr msr; struct dom0_settime settime; struct dom0_add_memtype add_memtype; struct dom0_del_memtype del_memtype; struct dom0_read_memtype read_memtype; struct dom0_microcode microcode; struct dom0_platform_quirk platform_quirk; struct dom0_memory_map_entry physical_memory_map; uint8_t pad[128]; } u; }; typedef struct dom0_op dom0_op_t; DEFINE_XEN_GUEST_HANDLE(dom0_op_t); #endif /* __XEN_PUBLIC_DOM0_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/domctl.h000066400000000000000000000612721314037446600267740ustar00rootroot00000000000000/****************************************************************************** * domctl.h * * Domain management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOMCTL_H__ #define __XEN_PUBLIC_DOMCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "domctl operations are intended for use by node control tools only" #endif #include "xen.h" #define XEN_DOMCTL_INTERFACE_VERSION 0x00000005 struct xenctl_cpumap { XEN_GUEST_HANDLE_64(uint8) bitmap; uint32_t nr_cpus; }; /* * NB. xen_domctl.domain is an IN/OUT parameter for this operation. * If it is specified as zero, an id is auto-allocated and returned. */ #define XEN_DOMCTL_createdomain 1 struct xen_domctl_createdomain { /* IN parameters */ uint32_t ssidref; xen_domain_handle_t handle; /* Is this an HVM guest (as opposed to a PV guest)? */ #define _XEN_DOMCTL_CDF_hvm_guest 0 #define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest) /* Use hardware-assisted paging if available? */ #define _XEN_DOMCTL_CDF_hap 1 #define XEN_DOMCTL_CDF_hap (1U<<_XEN_DOMCTL_CDF_hap) /* Should domain memory integrity be verifed by tboot during Sx? */ #define _XEN_DOMCTL_CDF_s3_integrity 2 #define XEN_DOMCTL_CDF_s3_integrity (1U<<_XEN_DOMCTL_CDF_s3_integrity) uint32_t flags; }; typedef struct xen_domctl_createdomain xen_domctl_createdomain_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t); #define XEN_DOMCTL_destroydomain 2 #define XEN_DOMCTL_pausedomain 3 #define XEN_DOMCTL_unpausedomain 4 #define XEN_DOMCTL_resumedomain 27 #define XEN_DOMCTL_getdomaininfo 5 struct xen_domctl_getdomaininfo { /* OUT variables. */ domid_t domain; /* Also echoed in domctl.domain */ /* Domain is scheduled to die. */ #define _XEN_DOMINF_dying 0 #define XEN_DOMINF_dying (1U<<_XEN_DOMINF_dying) /* Domain is an HVM guest (as opposed to a PV guest). */ #define _XEN_DOMINF_hvm_guest 1 #define XEN_DOMINF_hvm_guest (1U<<_XEN_DOMINF_hvm_guest) /* The guest OS has shut down. */ #define _XEN_DOMINF_shutdown 2 #define XEN_DOMINF_shutdown (1U<<_XEN_DOMINF_shutdown) /* Currently paused by control software. */ #define _XEN_DOMINF_paused 3 #define XEN_DOMINF_paused (1U<<_XEN_DOMINF_paused) /* Currently blocked pending an event. */ #define _XEN_DOMINF_blocked 4 #define XEN_DOMINF_blocked (1U<<_XEN_DOMINF_blocked) /* Domain is currently running. */ #define _XEN_DOMINF_running 5 #define XEN_DOMINF_running (1U<<_XEN_DOMINF_running) /* Being debugged. */ #define _XEN_DOMINF_debugged 6 #define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged) /* XEN_DOMINF_shutdown guest-supplied code. */ #define XEN_DOMINF_shutdownmask 255 #define XEN_DOMINF_shutdownshift 16 uint32_t flags; /* XEN_DOMINF_* */ uint64_aligned_t tot_pages; uint64_aligned_t max_pages; uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */ uint64_aligned_t cpu_time; uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */ uint32_t ssidref; xen_domain_handle_t handle; }; typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t); #define XEN_DOMCTL_getmemlist 6 struct xen_domctl_getmemlist { /* IN variables. */ /* Max entries to write to output buffer. */ uint64_aligned_t max_pfns; /* Start index in guest's page list. */ uint64_aligned_t start_pfn; XEN_GUEST_HANDLE_64(uint64) buffer; /* OUT variables. */ uint64_aligned_t num_pfns; }; typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t); #define XEN_DOMCTL_getpageframeinfo 7 #define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28 #define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28) #define XEN_DOMCTL_PFINFO_L1TAB (0x1U<<28) #define XEN_DOMCTL_PFINFO_L2TAB (0x2U<<28) #define XEN_DOMCTL_PFINFO_L3TAB (0x3U<<28) #define XEN_DOMCTL_PFINFO_L4TAB (0x4U<<28) #define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28) #define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31) #define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */ #define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28) struct xen_domctl_getpageframeinfo { /* IN variables. */ uint64_aligned_t gmfn; /* GMFN to query */ /* OUT variables. */ /* Is the page PINNED to a type? */ uint32_t type; /* see above type defs */ }; typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t); #define XEN_DOMCTL_getpageframeinfo2 8 struct xen_domctl_getpageframeinfo2 { /* IN variables. */ uint64_aligned_t num; /* IN/OUT variables. */ XEN_GUEST_HANDLE_64(uint32) array; }; typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t); /* * Control shadow pagetables operation */ #define XEN_DOMCTL_shadow_op 10 /* Disable shadow mode. */ #define XEN_DOMCTL_SHADOW_OP_OFF 0 /* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */ #define XEN_DOMCTL_SHADOW_OP_ENABLE 32 /* Log-dirty bitmap operations. */ /* Return the bitmap and clean internal copy for next round. */ #define XEN_DOMCTL_SHADOW_OP_CLEAN 11 /* Return the bitmap but do not modify internal copy. */ #define XEN_DOMCTL_SHADOW_OP_PEEK 12 /* Memory allocation accessors. */ #define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30 #define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31 /* Legacy enable operations. */ /* Equiv. to ENABLE with no mode flags. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST 1 /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY 2 /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE 3 /* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */ /* * Shadow pagetables are refcounted: guest does not use explicit mmu * operations nor write-protect its pagetables. */ #define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT (1 << 1) /* * Log pages in a bitmap as they are dirtied. * Used for live relocation to determine which pages must be re-sent. */ #define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2) /* * Automatically translate GPFNs into MFNs. */ #define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3) /* * Xen does not steal virtual address space from the guest. * Requires HVM support. */ #define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4) struct xen_domctl_shadow_op_stats { uint32_t fault_count; uint32_t dirty_count; }; typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t); struct xen_domctl_shadow_op { /* IN variables. */ uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */ /* OP_ENABLE */ uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */ /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */ uint32_t mb; /* Shadow memory allocation in MB */ /* OP_PEEK / OP_CLEAN */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */ struct xen_domctl_shadow_op_stats stats; }; typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t); #define XEN_DOMCTL_max_mem 11 struct xen_domctl_max_mem { /* IN variables. */ uint64_aligned_t max_memkb; }; typedef struct xen_domctl_max_mem xen_domctl_max_mem_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t); #define XEN_DOMCTL_setvcpucontext 12 #define XEN_DOMCTL_getvcpucontext 13 struct xen_domctl_vcpucontext { uint32_t vcpu; /* IN */ XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */ }; typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t); #define XEN_DOMCTL_getvcpuinfo 14 struct xen_domctl_getvcpuinfo { /* IN variables. */ uint32_t vcpu; /* OUT variables. */ uint8_t online; /* currently online (not hotplugged)? */ uint8_t blocked; /* blocked waiting for an event? */ uint8_t running; /* currently scheduled on its CPU? */ uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */ uint32_t cpu; /* current mapping */ }; typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t); /* Get/set which physical cpus a vcpu can execute on. */ #define XEN_DOMCTL_setvcpuaffinity 9 #define XEN_DOMCTL_getvcpuaffinity 25 struct xen_domctl_vcpuaffinity { uint32_t vcpu; /* IN */ struct xenctl_cpumap cpumap; /* IN/OUT */ }; typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t); #define XEN_DOMCTL_max_vcpus 15 struct xen_domctl_max_vcpus { uint32_t max; /* maximum number of vcpus */ }; typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t); #define XEN_DOMCTL_scheduler_op 16 /* Scheduler types. */ #define XEN_SCHEDULER_SEDF 4 #define XEN_SCHEDULER_CREDIT 5 /* Set or get info? */ #define XEN_DOMCTL_SCHEDOP_putinfo 0 #define XEN_DOMCTL_SCHEDOP_getinfo 1 struct xen_domctl_scheduler_op { uint32_t sched_id; /* XEN_SCHEDULER_* */ uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */ union { struct xen_domctl_sched_sedf { uint64_aligned_t period; uint64_aligned_t slice; uint64_aligned_t latency; uint32_t extratime; uint32_t weight; } sedf; struct xen_domctl_sched_credit { uint16_t weight; uint16_t cap; } credit; } u; }; typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t); #define XEN_DOMCTL_setdomainhandle 17 struct xen_domctl_setdomainhandle { xen_domain_handle_t handle; }; typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t); #define XEN_DOMCTL_setdebugging 18 struct xen_domctl_setdebugging { uint8_t enable; }; typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t); #define XEN_DOMCTL_irq_permission 19 struct xen_domctl_irq_permission { uint8_t pirq; uint8_t allow_access; /* flag to specify enable/disable of IRQ access */ }; typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t); #define XEN_DOMCTL_iomem_permission 20 struct xen_domctl_iomem_permission { uint64_aligned_t first_mfn;/* first page (physical page number) in range */ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */ }; typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t); #define XEN_DOMCTL_ioport_permission 21 struct xen_domctl_ioport_permission { uint32_t first_port; /* first port int range */ uint32_t nr_ports; /* size of port range */ uint8_t allow_access; /* allow or deny access to range? */ }; typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t); #define XEN_DOMCTL_hypercall_init 22 struct xen_domctl_hypercall_init { uint64_aligned_t gmfn; /* GMFN to be initialised */ }; typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t); #define XEN_DOMCTL_arch_setup 23 #define _XEN_DOMAINSETUP_hvm_guest 0 #define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest) #define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */ #define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query) #define _XEN_DOMAINSETUP_sioemu_guest 2 #define XEN_DOMAINSETUP_sioemu_guest (1UL<<_XEN_DOMAINSETUP_sioemu_guest) typedef struct xen_domctl_arch_setup { uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */ #ifdef __ia64__ uint64_aligned_t bp; /* mpaddr of boot param area */ uint64_aligned_t maxmem; /* Highest memory address for MDT. */ uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */ int8_t vhpt_size_log2; /* Log2 of VHPT size. */ #endif } xen_domctl_arch_setup_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t); #define XEN_DOMCTL_settimeoffset 24 struct xen_domctl_settimeoffset { int32_t time_offset_seconds; /* applied to domain wallclock time */ }; typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t); #define XEN_DOMCTL_gethvmcontext 33 #define XEN_DOMCTL_sethvmcontext 34 typedef struct xen_domctl_hvmcontext { uint32_t size; /* IN/OUT: size of buffer / bytes filled */ XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call * gethvmcontext with NULL * buffer to get size req'd */ } xen_domctl_hvmcontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t); #define XEN_DOMCTL_set_address_size 35 #define XEN_DOMCTL_get_address_size 36 typedef struct xen_domctl_address_size { uint32_t size; } xen_domctl_address_size_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t); #define XEN_DOMCTL_real_mode_area 26 struct xen_domctl_real_mode_area { uint32_t log; /* log2 of Real Mode Area size */ }; typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t); #define XEN_DOMCTL_sendtrigger 28 #define XEN_DOMCTL_SENDTRIGGER_NMI 0 #define XEN_DOMCTL_SENDTRIGGER_RESET 1 #define XEN_DOMCTL_SENDTRIGGER_INIT 2 #define XEN_DOMCTL_SENDTRIGGER_POWER 3 struct xen_domctl_sendtrigger { uint32_t trigger; /* IN */ uint32_t vcpu; /* IN */ }; typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t); /* Assign PCI device to HVM guest. Sets up IOMMU structures. */ #define XEN_DOMCTL_assign_device 37 #define XEN_DOMCTL_test_assign_device 45 #define XEN_DOMCTL_deassign_device 47 struct xen_domctl_assign_device { uint32_t machine_bdf; /* machine PCI ID of assigned device */ }; typedef struct xen_domctl_assign_device xen_domctl_assign_device_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t); /* Retrieve sibling devices infomation of machine_bdf */ #define XEN_DOMCTL_get_device_group 50 struct xen_domctl_get_device_group { uint32_t machine_bdf; /* IN */ uint32_t max_sdevs; /* IN */ uint32_t num_sdevs; /* OUT */ XEN_GUEST_HANDLE_64(uint32) sdev_array; /* OUT */ }; typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t); /* Pass-through interrupts: bind real irq -> hvm devfn. */ #define XEN_DOMCTL_bind_pt_irq 38 #define XEN_DOMCTL_unbind_pt_irq 48 typedef enum pt_irq_type_e { PT_IRQ_TYPE_PCI, PT_IRQ_TYPE_ISA, PT_IRQ_TYPE_MSI, PT_IRQ_TYPE_MSI_TRANSLATE, } pt_irq_type_t; struct xen_domctl_bind_pt_irq { uint32_t machine_irq; pt_irq_type_t irq_type; uint32_t hvm_domid; union { struct { uint8_t isa_irq; } isa; struct { uint8_t bus; uint8_t device; uint8_t intx; } pci; struct { uint8_t gvec; uint32_t gflags; uint64_aligned_t gtable; } msi; } u; }; typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t); /* Bind machine I/O address range -> HVM address range. */ #define XEN_DOMCTL_memory_mapping 39 #define DPCI_ADD_MAPPING 1 #define DPCI_REMOVE_MAPPING 0 struct xen_domctl_memory_mapping { uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */ uint64_aligned_t first_mfn; /* first page (machine page) in range */ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ uint32_t add_mapping; /* add or remove mapping */ uint32_t padding; /* padding for 64-bit aligned structure */ }; typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t); /* Bind machine I/O port range -> HVM I/O port range. */ #define XEN_DOMCTL_ioport_mapping 40 struct xen_domctl_ioport_mapping { uint32_t first_gport; /* first guest IO port*/ uint32_t first_mport; /* first machine IO port */ uint32_t nr_ports; /* size of port range */ uint32_t add_mapping; /* add or remove mapping */ }; typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t); /* * Pin caching type of RAM space for x86 HVM domU. */ #define XEN_DOMCTL_pin_mem_cacheattr 41 /* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */ #define XEN_DOMCTL_MEM_CACHEATTR_UC 0 #define XEN_DOMCTL_MEM_CACHEATTR_WC 1 #define XEN_DOMCTL_MEM_CACHEATTR_WT 4 #define XEN_DOMCTL_MEM_CACHEATTR_WP 5 #define XEN_DOMCTL_MEM_CACHEATTR_WB 6 #define XEN_DOMCTL_MEM_CACHEATTR_UCM 7 struct xen_domctl_pin_mem_cacheattr { uint64_aligned_t start, end; unsigned int type; /* XEN_DOMCTL_MEM_CACHEATTR_* */ }; typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t); #define XEN_DOMCTL_set_ext_vcpucontext 42 #define XEN_DOMCTL_get_ext_vcpucontext 43 struct xen_domctl_ext_vcpucontext { /* IN: VCPU that this call applies to. */ uint32_t vcpu; /* * SET: Size of struct (IN) * GET: Size of struct (OUT) */ uint32_t size; #if defined(__i386__) || defined(__x86_64__) /* SYSCALL from 32-bit mode and SYSENTER callback information. */ /* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */ uint64_aligned_t syscall32_callback_eip; uint64_aligned_t sysenter_callback_eip; uint16_t syscall32_callback_cs; uint16_t sysenter_callback_cs; uint8_t syscall32_disables_events; uint8_t sysenter_disables_events; #endif }; typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t); /* * Set optimizaton features for a domain */ #define XEN_DOMCTL_set_opt_feature 44 struct xen_domctl_set_opt_feature { #if defined(__ia64__) struct xen_ia64_opt_feature optf; #else /* Make struct non-empty: do not depend on this field name! */ uint64_t dummy; #endif }; typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t); /* * Set the target domain for a domain */ #define XEN_DOMCTL_set_target 46 struct xen_domctl_set_target { domid_t target; }; typedef struct xen_domctl_set_target xen_domctl_set_target_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t); #if defined(__i386__) || defined(__x86_64__) # define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF # define XEN_DOMCTL_set_cpuid 49 struct xen_domctl_cpuid { unsigned int input[2]; unsigned int eax; unsigned int ebx; unsigned int ecx; unsigned int edx; }; typedef struct xen_domctl_cpuid xen_domctl_cpuid_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t); #endif #define XEN_DOMCTL_subscribe 29 struct xen_domctl_subscribe { uint32_t port; /* IN */ }; typedef struct xen_domctl_subscribe xen_domctl_subscribe_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t); /* * Define the maximum machine address size which should be allocated * to a guest. */ #define XEN_DOMCTL_set_machine_address_size 51 #define XEN_DOMCTL_get_machine_address_size 52 /* * Do not inject spurious page faults into this domain. */ #define XEN_DOMCTL_suppress_spurious_page_faults 53 #define XEN_DOMCTL_debug_op 54 #define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF 0 #define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON 1 struct xen_domctl_debug_op { uint32_t op; /* IN */ uint32_t vcpu; /* IN */ }; typedef struct xen_domctl_debug_op xen_domctl_debug_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_debug_op_t); /* * Request a particular record from the HVM context */ #define XEN_DOMCTL_gethvmcontext_partial 55 typedef struct xen_domctl_hvmcontext_partial { uint32_t type; /* IN: Type of record required */ uint32_t instance; /* IN: Instance of that type */ XEN_GUEST_HANDLE_64(uint8) buffer; /* OUT: buffer to write record into */ } xen_domctl_hvmcontext_partial_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_partial_t); struct xen_domctl { uint32_t cmd; uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */ domid_t domain; union { struct xen_domctl_createdomain createdomain; struct xen_domctl_getdomaininfo getdomaininfo; struct xen_domctl_getmemlist getmemlist; struct xen_domctl_getpageframeinfo getpageframeinfo; struct xen_domctl_getpageframeinfo2 getpageframeinfo2; struct xen_domctl_vcpuaffinity vcpuaffinity; struct xen_domctl_shadow_op shadow_op; struct xen_domctl_max_mem max_mem; struct xen_domctl_vcpucontext vcpucontext; struct xen_domctl_getvcpuinfo getvcpuinfo; struct xen_domctl_max_vcpus max_vcpus; struct xen_domctl_scheduler_op scheduler_op; struct xen_domctl_setdomainhandle setdomainhandle; struct xen_domctl_setdebugging setdebugging; struct xen_domctl_irq_permission irq_permission; struct xen_domctl_iomem_permission iomem_permission; struct xen_domctl_ioport_permission ioport_permission; struct xen_domctl_hypercall_init hypercall_init; struct xen_domctl_arch_setup arch_setup; struct xen_domctl_settimeoffset settimeoffset; struct xen_domctl_real_mode_area real_mode_area; struct xen_domctl_hvmcontext hvmcontext; struct xen_domctl_hvmcontext_partial hvmcontext_partial; struct xen_domctl_address_size address_size; struct xen_domctl_sendtrigger sendtrigger; struct xen_domctl_get_device_group get_device_group; struct xen_domctl_assign_device assign_device; struct xen_domctl_bind_pt_irq bind_pt_irq; struct xen_domctl_memory_mapping memory_mapping; struct xen_domctl_ioport_mapping ioport_mapping; struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr; struct xen_domctl_ext_vcpucontext ext_vcpucontext; struct xen_domctl_set_opt_feature set_opt_feature; struct xen_domctl_set_target set_target; struct xen_domctl_subscribe subscribe; struct xen_domctl_debug_op debug_op; #if defined(__i386__) || defined(__x86_64__) struct xen_domctl_cpuid cpuid; #endif uint8_t pad[128]; } u; }; typedef struct xen_domctl xen_domctl_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_t); #endif /* __XEN_PUBLIC_DOMCTL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/elfnote.h000066400000000000000000000165031314037446600271430ustar00rootroot00000000000000/****************************************************************************** * elfnote.h * * Definitions used for the Xen ELF notes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell, XenSource Ltd. */ #ifndef __XEN_PUBLIC_ELFNOTE_H__ #define __XEN_PUBLIC_ELFNOTE_H__ /* * The notes should live in a PT_NOTE segment and have "Xen" in the * name field. * * Numeric types are either 4 or 8 bytes depending on the content of * the desc field. * * LEGACY indicated the fields in the legacy __xen_guest string which * this a note type replaces. */ /* * NAME=VALUE pair (string). */ #define XEN_ELFNOTE_INFO 0 /* * The virtual address of the entry point (numeric). * * LEGACY: VIRT_ENTRY */ #define XEN_ELFNOTE_ENTRY 1 /* The virtual address of the hypercall transfer page (numeric). * * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page * number not a virtual address) */ #define XEN_ELFNOTE_HYPERCALL_PAGE 2 /* The virtual address where the kernel image should be mapped (numeric). * * Defaults to 0. * * LEGACY: VIRT_BASE */ #define XEN_ELFNOTE_VIRT_BASE 3 /* * The offset of the ELF paddr field from the acutal required * psuedo-physical address (numeric). * * This is used to maintain backwards compatibility with older kernels * which wrote __PAGE_OFFSET into that field. This field defaults to 0 * if not present. * * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE) */ #define XEN_ELFNOTE_PADDR_OFFSET 4 /* * The version of Xen that we work with (string). * * LEGACY: XEN_VER */ #define XEN_ELFNOTE_XEN_VERSION 5 /* * The name of the guest operating system (string). * * LEGACY: GUEST_OS */ #define XEN_ELFNOTE_GUEST_OS 6 /* * The version of the guest operating system (string). * * LEGACY: GUEST_VER */ #define XEN_ELFNOTE_GUEST_VERSION 7 /* * The loader type (string). * * LEGACY: LOADER */ #define XEN_ELFNOTE_LOADER 8 /* * The kernel supports PAE (x86/32 only, string = "yes", "no" or * "bimodal"). * * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting * may be given as "yes,bimodal" which will cause older Xen to treat * this kernel as PAE. * * LEGACY: PAE (n.b. The legacy interface included a provision to * indicate 'extended-cr3' support allowing L3 page tables to be * placed above 4G. It is assumed that any kernel new enough to use * these ELF notes will include this and therefore "yes" here is * equivalent to "yes[entended-cr3]" in the __xen_guest interface. */ #define XEN_ELFNOTE_PAE_MODE 9 /* * The features supported/required by this kernel (string). * * The string must consist of a list of feature names (as given in * features.h, without the "XENFEAT_" prefix) separated by '|' * characters. If a feature is required for the kernel to function * then the feature name must be preceded by a '!' character. * * LEGACY: FEATURES */ #define XEN_ELFNOTE_FEATURES 10 /* * The kernel requires the symbol table to be loaded (string = "yes" or "no") * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence * of this string as a boolean flag rather than requiring "yes" or * "no". */ #define XEN_ELFNOTE_BSD_SYMTAB 11 /* * The lowest address the hypervisor hole can begin at (numeric). * * This must not be set higher than HYPERVISOR_VIRT_START. Its presence * also indicates to the hypervisor that the kernel can deal with the * hole starting at a higher address. */ #define XEN_ELFNOTE_HV_START_LOW 12 /* * List of maddr_t-sized mask/value pairs describing how to recognize * (non-present) L1 page table entries carrying valid MFNs (numeric). */ #define XEN_ELFNOTE_L1_MFN_VALID 13 /* * Whether or not the guest supports cooperative suspend cancellation. */ #define XEN_ELFNOTE_SUSPEND_CANCEL 14 /* * The (non-default) location the initial phys-to-machine map should be * placed at by the hypervisor (Dom0) or the tools (DomU). * The kernel must be prepared for this mapping to be established using * large pages, despite such otherwise not being available to guests. * The kernel must also be able to handle the page table pages used for * this mapping not being accessible through the initial mapping. * (Only x86-64 supports this at present.) */ #define XEN_ELFNOTE_INIT_P2M 15 /* * The number of the highest elfnote defined. */ #define XEN_ELFNOTE_MAX XEN_ELFNOTE_INIT_P2M /* * System information exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO * note in case of a system crash. This note will contain various * information about the system, see xen/include/xen/elfcore.h. */ #define XEN_ELFNOTE_CRASH_INFO 0x1000001 /* * System registers exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS * note per cpu in case of a system crash. This note is architecture * specific and will contain registers not saved in the "CORE" note. * See xen/include/xen/elfcore.h for more information. */ #define XEN_ELFNOTE_CRASH_REGS 0x1000002 /* * xen dump-core none note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE * in its dump file to indicate that the file is xen dump-core * file. This note doesn't have any other information. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000 /* * xen dump-core header note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER * in its dump file. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001 /* * xen dump-core xen version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION * in its dump file. It contains the xen version obtained via the * XENVER hypercall. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002 /* * xen dump-core format version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION * in its dump file. It contains a format version identifier. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003 #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/event_channel.h000066400000000000000000000213251314037446600303160ustar00rootroot00000000000000/****************************************************************************** * event_channel.h * * Event channels between domains. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, K A Fraser. */ #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ #define __XEN_PUBLIC_EVENT_CHANNEL_H__ /* * Prototype for this hypercall is: * int event_channel_op(int cmd, void *args) * @cmd == EVTCHNOP_??? (event-channel operation). * @args == Operation-specific extra arguments (NULL if none). */ typedef uint32_t evtchn_port_t; DEFINE_XEN_GUEST_HANDLE(evtchn_port_t); /* * EVTCHNOP_alloc_unbound: Allocate a port in domain and mark as * accepting interdomain bindings from domain . A fresh port * is allocated in and returned as . * NOTES: * 1. If the caller is unprivileged then must be DOMID_SELF. * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_alloc_unbound 6 struct evtchn_alloc_unbound { /* IN parameters */ domid_t dom, remote_dom; /* OUT parameters */ evtchn_port_t port; }; typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; /* * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between * the calling domain and . must identify * a port that is unbound and marked as accepting bindings from the calling * domain. A fresh port is allocated in the calling domain and returned as * . * NOTES: * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_bind_interdomain 0 struct evtchn_bind_interdomain { /* IN parameters. */ domid_t remote_dom; evtchn_port_t remote_port; /* OUT parameters. */ evtchn_port_t local_port; }; typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t; /* * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ on specified * vcpu. * NOTES: * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list * in xen.h for the classification of each VIRQ. * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be * re-bound via EVTCHNOP_bind_vcpu. * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. * The allocated event channel is bound to the specified vcpu and the * binding cannot be changed. */ #define EVTCHNOP_bind_virq 1 struct evtchn_bind_virq { /* IN parameters. */ uint32_t virq; uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_virq evtchn_bind_virq_t; /* * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ . * NOTES: * 1. A physical IRQ may be bound to at most one event channel per domain. * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. */ #define EVTCHNOP_bind_pirq 2 struct evtchn_bind_pirq { /* IN parameters. */ uint32_t pirq; #define BIND_PIRQ__WILL_SHARE 1 uint32_t flags; /* BIND_PIRQ__* */ /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_pirq evtchn_bind_pirq_t; /* * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. * NOTES: * 1. The allocated event channel is bound to the specified vcpu. The binding * may not be changed. */ #define EVTCHNOP_bind_ipi 7 struct evtchn_bind_ipi { uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_ipi evtchn_bind_ipi_t; /* * EVTCHNOP_close: Close a local event channel . If the channel is * interdomain then the remote end is placed in the unbound state * (EVTCHNSTAT_unbound), awaiting a new connection. */ #define EVTCHNOP_close 3 struct evtchn_close { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_close evtchn_close_t; /* * EVTCHNOP_send: Send an event to the remote end of the channel whose local * endpoint is . */ #define EVTCHNOP_send 4 struct evtchn_send { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_send evtchn_send_t; /* * EVTCHNOP_status: Get the current status of the communication channel which * has an endpoint at . * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may obtain the status of an event * channel for which is not DOMID_SELF. */ #define EVTCHNOP_status 5 struct evtchn_status { /* IN parameters */ domid_t dom; evtchn_port_t port; /* OUT parameters */ #define EVTCHNSTAT_closed 0 /* Channel is not in use. */ #define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ #define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ uint32_t status; uint32_t vcpu; /* VCPU to which this channel is bound. */ union { struct { domid_t dom; } unbound; /* EVTCHNSTAT_unbound */ struct { domid_t dom; evtchn_port_t port; } interdomain; /* EVTCHNSTAT_interdomain */ uint32_t pirq; /* EVTCHNSTAT_pirq */ uint32_t virq; /* EVTCHNSTAT_virq */ } u; }; typedef struct evtchn_status evtchn_status_t; /* * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an * event is pending. * NOTES: * 1. IPI-bound channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 3. All other channels notify vcpu0 by default. This default is set when * the channel is allocated (a port that is freed and subsequently reused * has its binding reset to vcpu0). */ #define EVTCHNOP_bind_vcpu 8 struct evtchn_bind_vcpu { /* IN parameters. */ evtchn_port_t port; uint32_t vcpu; }; typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t; /* * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver * a notification to the appropriate VCPU if an event is pending. */ #define EVTCHNOP_unmask 9 struct evtchn_unmask { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_unmask evtchn_unmask_t; /* * EVTCHNOP_reset: Close all event channels associated with specified domain. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. */ #define EVTCHNOP_reset 10 struct evtchn_reset { /* IN parameters. */ domid_t dom; }; typedef struct evtchn_reset evtchn_reset_t; /* * Argument to event_channel_op_compat() hypercall. Superceded by new * event_channel_op() hypercall since 0x00030202. */ struct evtchn_op { uint32_t cmd; /* EVTCHNOP_* */ union { struct evtchn_alloc_unbound alloc_unbound; struct evtchn_bind_interdomain bind_interdomain; struct evtchn_bind_virq bind_virq; struct evtchn_bind_pirq bind_pirq; struct evtchn_bind_ipi bind_ipi; struct evtchn_close close; struct evtchn_send send; struct evtchn_status status; struct evtchn_bind_vcpu bind_vcpu; struct evtchn_unmask unmask; } u; }; typedef struct evtchn_op evtchn_op_t; DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/features.h000066400000000000000000000055751314037446600273340ustar00rootroot00000000000000/****************************************************************************** * features.h * * Feature flags, reported by XENVER_get_features. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Keir Fraser */ #ifndef __XEN_PUBLIC_FEATURES_H__ #define __XEN_PUBLIC_FEATURES_H__ /* * If set, the guest does not need to write-protect its pagetables, and can * update them via direct writes. */ #define XENFEAT_writable_page_tables 0 /* * If set, the guest does not need to write-protect its segment descriptor * tables, and can update them via direct writes. */ #define XENFEAT_writable_descriptor_tables 1 /* * If set, translation between the guest's 'pseudo-physical' address space * and the host's machine address space are handled by the hypervisor. In this * mode the guest does not need to perform phys-to/from-machine translations * when performing page table operations. */ #define XENFEAT_auto_translated_physmap 2 /* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ #define XENFEAT_supervisor_mode_kernel 3 /* * If set, the guest does not need to allocate x86 PAE page directories * below 4GB. This flag is usually implied by auto_translated_physmap. */ #define XENFEAT_pae_pgdir_above_4gb 4 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ #define XENFEAT_mmu_pt_update_preserve_ad 5 /* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */ #define XENFEAT_highmem_assist 6 /* * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel * available pte bits. */ #define XENFEAT_gnttab_map_avail_bits 7 #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/grant_table.h000066400000000000000000000406711314037446600277740ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ #define __XEN_PUBLIC_GRANT_TABLE_H__ /*********************************** * GRANT TABLE REPRESENTATION */ /* Some rough guidelines on accessing and updating grant-table entries * in a concurrency-safe manner. For more information, Linux contains a * reference implementation for guest OSes (arch/xen/kernel/grant_table.c). * * NB. WMB is a no-op on current-generation x86 processors. However, a * compiler barrier will still be required. * * Introducing a valid entry into the grant table: * 1. Write ent->domid. * 2. Write ent->frame: * GTF_permit_access: Frame to which access is permitted. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new * frame, or zero if none. * 3. Write memory barrier (WMB). * 4. Write ent->flags, inc. valid type. * * Invalidating an unused GTF_permit_access entry: * 1. flags = ent->flags. * 2. Observe that !(flags & (GTF_reading|GTF_writing)). * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * * Invalidating an in-use GTF_permit_access entry: * This cannot be done directly. Request assistance from the domain controller * which can set a timeout on the use of a grant entry and take necessary * action. (NB. This is not yet implemented!). * * Invalidating an unused GTF_accept_transfer entry: * 1. flags = ent->flags. * 2. Observe that !(flags & GTF_transfer_committed). [*] * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. * The guest must /not/ modify the grant entry until the address of the * transferred frame is written. It is safe for the guest to spin waiting * for this to occur (detect by observing GTF_transfer_completed in * ent->flags). * * Invalidating a committed GTF_accept_transfer entry: * 1. Wait for (ent->flags & GTF_transfer_completed). * * Changing a GTF_permit_access from writable to read-only: * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. * * Changing a GTF_permit_access from read-only to writable: * Use SMP-safe bit-setting instruction. */ /* * A grant table comprises a packed array of grant entries in one or more * page frames shared between Xen and a guest. * [XEN]: This field is written by Xen and read by the sharing guest. * [GST]: This field is written by the guest and read by Xen. */ struct grant_entry { /* GTF_xxx: various type and flag information. [XEN,GST] */ uint16_t flags; /* The domain being granted foreign privileges. [GST] */ domid_t domid; /* * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] */ uint32_t frame; }; typedef struct grant_entry grant_entry_t; /* * Type of grant entry. * GTF_invalid: This grant entry grants no privileges. * GTF_permit_access: Allow @domid to map/access @frame. * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame * to this guest. Xen writes the page number to @frame. */ #define GTF_invalid (0U<<0) #define GTF_permit_access (1U<<0) #define GTF_accept_transfer (2U<<0) #define GTF_type_mask (3U<<0) /* * Subflags for GTF_permit_access. * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST] */ #define _GTF_readonly (2) #define GTF_readonly (1U<<_GTF_readonly) #define _GTF_reading (3) #define GTF_reading (1U<<_GTF_reading) #define _GTF_writing (4) #define GTF_writing (1U<<_GTF_writing) #define _GTF_PWT (5) #define GTF_PWT (1U<<_GTF_PWT) #define _GTF_PCD (6) #define GTF_PCD (1U<<_GTF_PCD) #define _GTF_PAT (7) #define GTF_PAT (1U<<_GTF_PAT) /* * Subflags for GTF_accept_transfer: * GTF_transfer_committed: Xen sets this flag to indicate that it is committed * to transferring ownership of a page frame. When a guest sees this flag * it must /not/ modify the grant entry until GTF_transfer_completed is * set by Xen. * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag * after reading GTF_transfer_committed. Xen will always write the frame * address, followed by ORing this flag, in a timely manner. */ #define _GTF_transfer_committed (2) #define GTF_transfer_committed (1U<<_GTF_transfer_committed) #define _GTF_transfer_completed (3) #define GTF_transfer_completed (1U<<_GTF_transfer_completed) /*********************************** * GRANT TABLE QUERIES AND USES */ /* * Reference to a grant entry in a specified domain's grant table. */ typedef uint32_t grant_ref_t; /* * Handle to track a mapping created via a grant reference. */ typedef uint32_t grant_handle_t; /* * GNTTABOP_map_grant_ref: Map the grant entry (,) for access * by devices and/or host CPUs. If successful, is a tracking number * that must be presented later to destroy the mapping(s). On error, * is a negative status code. * NOTES: * 1. If GNTMAP_device_map is specified then is the address * via which I/O devices may access the granted frame. * 2. If GNTMAP_host_map is specified then a mapping will be added at * either a host virtual address in the current address space, or at * a PTE at the specified machine address. The type of mapping to * perform is selected through the GNTMAP_contains_pte flag, and the * address is specified in . * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a * host mapping is destroyed by other means then it is *NOT* guaranteed * to be accounted to the correct grant reference! */ #define GNTTABOP_map_grant_ref 0 struct gnttab_map_grant_ref { /* IN parameters. */ uint64_t host_addr; uint32_t flags; /* GNTMAP_* */ grant_ref_t ref; domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ grant_handle_t handle; uint64_t dev_bus_addr; }; typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t); /* * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings * tracked by . If or is zero, that * field is ignored. If non-zero, they must refer to a device/host mapping * that is tracked by * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 3. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_grant_ref 1 struct gnttab_unmap_grant_ref { /* IN parameters. */ uint64_t host_addr; uint64_t dev_bus_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t); /* * GNTTABOP_setup_table: Set up a grant table for comprising at least * pages. The frame addresses are written to the . * Only addresses are written, even if the table is larger. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. * 3. Xen may not support more than a single grant-table page per domain. */ #define GNTTABOP_setup_table 2 struct gnttab_setup_table { /* IN parameters. */ domid_t dom; uint32_t nr_frames; /* OUT parameters. */ int16_t status; /* GNTST_* */ XEN_GUEST_HANDLE(ulong) frame_list; }; typedef struct gnttab_setup_table gnttab_setup_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t); /* * GNTTABOP_dump_table: Dump the contents of the grant table to the * xen console. Debugging use only. */ #define GNTTABOP_dump_table 3 struct gnttab_dump_table { /* IN parameters. */ domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_dump_table gnttab_dump_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t); /* * GNTTABOP_transfer_grant_ref: Transfer to a foreign domain. The * foreign domain has previously registered its interest in the transfer via * . * * Note that, even if the transfer fails, the specified page no longer belongs * to the calling domain *unless* the error is GNTST_bad_page. */ #define GNTTABOP_transfer 4 struct gnttab_transfer { /* IN parameters. */ xen_pfn_t mfn; domid_t domid; grant_ref_t ref; /* OUT parameters. */ int16_t status; }; typedef struct gnttab_transfer gnttab_transfer_t; DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t); /* * GNTTABOP_copy: Hypervisor based copy * source and destinations can be eithers MFNs or, for foreign domains, * grant references. the foreign domain has to grant read/write access * in its grant table. * * The flags specify what type source and destinations are (either MFN * or grant reference). * * Note that this can also be used to copy data between two domains * via a third party if the source and destination domains had previously * grant appropriate access to their pages to the third party. * * source_offset specifies an offset in the source frame, dest_offset * the offset in the target frame and len specifies the number of * bytes to be copied. */ #define _GNTCOPY_source_gref (0) #define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) #define _GNTCOPY_dest_gref (1) #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) #define GNTTABOP_copy 5 typedef struct gnttab_copy { /* IN parameters. */ struct { union { grant_ref_t ref; xen_pfn_t gmfn; } u; domid_t domid; uint16_t offset; } source, dest; uint16_t len; uint16_t flags; /* GNTCOPY_* */ /* OUT parameters. */ int16_t status; } gnttab_copy_t; DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t); /* * GNTTABOP_query_size: Query the current and maximum sizes of the shared * grant table. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. */ #define GNTTABOP_query_size 6 struct gnttab_query_size { /* IN parameters. */ domid_t dom; /* OUT parameters. */ uint32_t nr_frames; uint32_t max_nr_frames; int16_t status; /* GNTST_* */ }; typedef struct gnttab_query_size gnttab_query_size_t; DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t); /* * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings * tracked by but atomically replace the page table entry with one * pointing to the machine address under . will be * redirected to the null entry. * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 2. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_and_replace 7 struct gnttab_unmap_and_replace { /* IN parameters. */ uint64_t host_addr; uint64_t new_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t); /* * Bitfield values for gnttab_map_grant_ref.flags. */ /* Map the grant entry for access by I/O devices. */ #define _GNTMAP_device_map (0) #define GNTMAP_device_map (1<<_GNTMAP_device_map) /* Map the grant entry for access by host CPUs. */ #define _GNTMAP_host_map (1) #define GNTMAP_host_map (1<<_GNTMAP_host_map) /* Accesses to the granted frame will be restricted to read-only access. */ #define _GNTMAP_readonly (2) #define GNTMAP_readonly (1<<_GNTMAP_readonly) /* * GNTMAP_host_map subflag: * 0 => The host mapping is usable only by the guest OS. * 1 => The host mapping is usable by guest OS + current application. */ #define _GNTMAP_application_map (3) #define GNTMAP_application_map (1<<_GNTMAP_application_map) /* * GNTMAP_contains_pte subflag: * 0 => This map request contains a host virtual address. * 1 => This map request contains the machine addess of the PTE to update. */ #define _GNTMAP_contains_pte (4) #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) /* * Bits to be placed in guest kernel available PTE bits (architecture * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set). */ #define _GNTMAP_guest_avail0 (16) #define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0) /* * Values for error status returns. All errors are -ve. */ #define GNTST_okay (0) /* Normal return. */ #define GNTST_general_error (-1) /* General undefined error. */ #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ #define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ #define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ #define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ #define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ #define GNTST_address_too_big (-11) /* transfer page address too large. */ #define GNTTABOP_error_msgs { \ "okay", \ "undefined error", \ "unrecognised domain id", \ "invalid grant reference", \ "invalid mapping handle", \ "invalid virtual address", \ "invalid device address", \ "no spare translation slot in the I/O MMU", \ "permission denied", \ "bad page", \ "copy arguments cross page boundary", \ "page address size too large" \ } #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/hvm/000077500000000000000000000000001314037446600261235ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/hvm/e820.h000066400000000000000000000030061314037446600267510ustar00rootroot00000000000000 /* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_E820_H__ #define __XEN_PUBLIC_HVM_E820_H__ /* E820 location in HVM virtual address space. */ #define HVM_E820_PAGE 0x00090000 #define HVM_E820_NR_OFFSET 0x000001E8 #define HVM_E820_OFFSET 0x000002D0 #define HVM_BELOW_4G_RAM_END 0xF0000000 #define HVM_BELOW_4G_MMIO_START HVM_BELOW_4G_RAM_END #define HVM_BELOW_4G_MMIO_LENGTH ((1ULL << 32) - HVM_BELOW_4G_MMIO_START) #endif /* __XEN_PUBLIC_HVM_E820_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/hvm/hvm_info_table.h000066400000000000000000000047531314037446600312610ustar00rootroot00000000000000/****************************************************************************** * hvm/hvm_info_table.h * * HVM parameter and information table, written into guest memory map. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define HVM_INFO_PFN 0x09F #define HVM_INFO_OFFSET 0x800 #define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET) struct hvm_info_table { char signature[8]; /* "HVM INFO" */ uint32_t length; uint8_t checksum; /* Should firmware build ACPI tables? */ uint8_t acpi_enabled; /* Should firmware build APIC descriptors (APIC MADT / MP BIOS)? */ uint8_t apic_mode; /* How many CPUs does this domain have? */ uint32_t nr_vcpus; /* * MEMORY MAP provided by HVM domain builder. * Notes: * 1. page_to_phys(x) = x << 12 * 2. If a field is zero, the corresponding range does not exist. */ /* * 0x0 to page_to_phys(low_mem_pgend)-1: * RAM below 4GB (except for VGA hole 0xA0000-0xBFFFF) */ uint32_t low_mem_pgend; /* * page_to_phys(reserved_mem_pgstart) to 0xFFFFFFFF: * Reserved for special memory mappings */ uint32_t reserved_mem_pgstart; /* * 0x100000000 to page_to_phys(high_mem_pgend)-1: * RAM above 4GB */ uint32_t high_mem_pgend; }; #endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/hvm/hvm_op.h000066400000000000000000000112521314037446600275650ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ #define __XEN_PUBLIC_HVM_HVM_OP_H__ /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */ #define HVMOP_set_param 0 #define HVMOP_get_param 1 struct xen_hvm_param { domid_t domid; /* IN */ uint32_t index; /* IN */ uint64_t value; /* IN/OUT */ }; typedef struct xen_hvm_param xen_hvm_param_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); /* Set the logical level of one of a domain's PCI INTx wires. */ #define HVMOP_set_pci_intx_level 2 struct xen_hvm_set_pci_intx_level { /* Domain to be updated. */ domid_t domid; /* PCI INTx identification in PCI topology (domain:bus:device:intx). */ uint8_t domain, bus, device, intx; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t); /* Set the logical level of one of a domain's ISA IRQ wires. */ #define HVMOP_set_isa_irq_level 3 struct xen_hvm_set_isa_irq_level { /* Domain to be updated. */ domid_t domid; /* ISA device identification, by ISA IRQ (0-15). */ uint8_t isa_irq; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t); #define HVMOP_set_pci_link_route 4 struct xen_hvm_set_pci_link_route { /* Domain to be updated. */ domid_t domid; /* PCI link identifier (0-3). */ uint8_t link; /* ISA IRQ (1-15), or 0 (disable link). */ uint8_t isa_irq; }; typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); /* Flushes all VCPU TLBs: @arg must be NULL. */ #define HVMOP_flush_tlbs 5 /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Track dirty VRAM. */ #define HVMOP_track_dirty_vram 6 struct xen_hvm_track_dirty_vram { /* Domain to be tracked. */ domid_t domid; /* First pfn to track. */ uint64_aligned_t first_pfn; /* Number of pages to track. */ uint64_aligned_t nr; /* OUT variable. */ /* Dirty bitmap buffer. */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; }; typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t); /* Notify that some pages got modified by the Device Model. */ #define HVMOP_modified_memory 7 struct xen_hvm_modified_memory { /* Domain to be updated. */ domid_t domid; /* First pfn. */ uint64_aligned_t first_pfn; /* Number of pages. */ uint64_aligned_t nr; }; typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); #define HVMOP_set_mem_type 8 typedef enum { HVMMEM_ram_rw, /* Normal read/write guest RAM */ HVMMEM_ram_ro, /* Read-only; writes are discarded */ HVMMEM_mmio_dm, /* Reads and write go to the device model */ } hvmmem_type_t; /* Notify that a region of memory is to be treated in a specific way. */ struct xen_hvm_set_mem_type { /* Domain to be updated. */ domid_t domid; /* Memory type */ hvmmem_type_t hvmmem_type; /* First pfn. */ uint64_aligned_t first_pfn; /* Number of pages. */ uint64_aligned_t nr; }; typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t); #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/hvm/ioreq.h000066400000000000000000000105071314037446600274160ustar00rootroot00000000000000/* * ioreq.h: I/O request definitions for device models * Copyright (c) 2004, Intel Corporation. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef _IOREQ_H_ #define _IOREQ_H_ #define IOREQ_READ 1 #define IOREQ_WRITE 0 #define STATE_IOREQ_NONE 0 #define STATE_IOREQ_READY 1 #define STATE_IOREQ_INPROCESS 2 #define STATE_IORESP_READY 3 #define IOREQ_TYPE_PIO 0 /* pio */ #define IOREQ_TYPE_COPY 1 /* mmio ops */ #define IOREQ_TYPE_TIMEOFFSET 7 #define IOREQ_TYPE_INVALIDATE 8 /* mapcache */ /* * VMExit dispatcher should cooperate with instruction decoder to * prepare this structure and notify service OS and DM by sending * virq */ struct ioreq { uint64_t addr; /* physical address */ uint64_t size; /* size in bytes */ uint64_t count; /* for rep prefixes */ uint64_t data; /* data (or paddr of data) */ uint8_t state:4; uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr * of the real data to use. */ uint8_t dir:1; /* 1=read, 0=write */ uint8_t df:1; uint8_t pad:1; uint8_t type; /* I/O type */ uint8_t _pad0[6]; uint64_t io_count; /* How many IO done on a vcpu */ }; typedef struct ioreq ioreq_t; struct vcpu_iodata { struct ioreq vp_ioreq; /* Event channel port, used for notifications to/from the device model. */ uint32_t vp_eport; uint32_t _pad0; }; typedef struct vcpu_iodata vcpu_iodata_t; struct shared_iopage { struct vcpu_iodata vcpu_iodata[1]; }; typedef struct shared_iopage shared_iopage_t; struct buf_ioreq { uint8_t type; /* I/O type */ uint8_t pad:1; uint8_t dir:1; /* 1=read, 0=write */ uint8_t size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */ uint32_t addr:20;/* physical address */ uint32_t data; /* data */ }; typedef struct buf_ioreq buf_ioreq_t; #define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */ struct buffered_iopage { unsigned int read_pointer; unsigned int write_pointer; buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM]; }; /* NB. Size of this structure must be no greater than one page. */ typedef struct buffered_iopage buffered_iopage_t; #if defined(__ia64__) struct pio_buffer { uint32_t page_offset; uint32_t pointer; uint32_t data_end; uint32_t buf_size; void *opaque; }; #define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */ #define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */ #define PIO_BUFFER_ENTRY_NUM 2 struct buffered_piopage { struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM]; uint8_t buffer[1]; }; #endif /* defined(__ia64__) */ #define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40 #define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04) #define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08) #define ACPI_GPE0_BLK_ADDRESS (ACPI_PM_TMR_BLK_ADDRESS + 0x20) #define ACPI_GPE0_BLK_LEN 0x08 #endif /* _IOREQ_H_ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/hvm/params.h000066400000000000000000000076341314037446600275710ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_PARAMS_H__ #define __XEN_PUBLIC_HVM_PARAMS_H__ #include "hvm_op.h" /* * Parameter space for HVMOP_{set,get}_param. */ /* * How should CPU0 event-channel notifications be delivered? * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: * Domain = val[47:32], Bus = val[31:16], * DevFn = val[15: 8], IntX = val[ 1: 0] * If val == 0 then CPU0 event-channel notifications are not delivered. */ #define HVM_PARAM_CALLBACK_IRQ 0 /* * These are not used by Xen. They are here for convenience of HVM-guest * xenbus implementations. */ #define HVM_PARAM_STORE_PFN 1 #define HVM_PARAM_STORE_EVTCHN 2 #define HVM_PARAM_PAE_ENABLED 4 #define HVM_PARAM_IOREQ_PFN 5 #define HVM_PARAM_BUFIOREQ_PFN 6 #ifdef __ia64__ #define HVM_PARAM_NVRAM_FD 7 #define HVM_PARAM_VHPT_SIZE 8 #define HVM_PARAM_BUFPIOREQ_PFN 9 #elif defined(__i386__) || defined(__x86_64__) /* Expose Viridian interfaces to this HVM guest? */ #define HVM_PARAM_VIRIDIAN 9 #endif /* * Set mode for virtual timers (currently x86 only): * delay_for_missed_ticks (default): * Do not advance a vcpu's time beyond the correct delivery time for * interrupts that have been missed due to preemption. Deliver missed * interrupts when the vcpu is rescheduled and advance the vcpu's virtual * time stepwise for each one. * no_delay_for_missed_ticks: * As above, missed interrupts are delivered, but guest time always tracks * wallclock (i.e., real) time while doing so. * no_missed_ticks_pending: * No missed interrupts are held pending. Instead, to ensure ticks are * delivered at some non-zero rate, if we detect missed ticks then the * internal tick alarm is not disabled if the VCPU is preempted during the * next tick period. * one_missed_tick_pending: * Missed interrupts are collapsed together and delivered as one 'late tick'. * Guest time always tracks wallclock (i.e., real) time. */ #define HVM_PARAM_TIMER_MODE 10 #define HVMPTM_delay_for_missed_ticks 0 #define HVMPTM_no_delay_for_missed_ticks 1 #define HVMPTM_no_missed_ticks_pending 2 #define HVMPTM_one_missed_tick_pending 3 /* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */ #define HVM_PARAM_HPET_ENABLED 11 /* Identity-map page directory used by Intel EPT when CR0.PG=0. */ #define HVM_PARAM_IDENT_PT 12 /* Device Model domain, defaults to 0. */ #define HVM_PARAM_DM_DOMAIN 13 /* ACPI S state: currently support S0 and S3 on x86. */ #define HVM_PARAM_ACPI_S_STATE 14 /* TSS used on Intel when CR0.PE=0. */ #define HVM_PARAM_VM86_TSS 15 /* Boolean: Enable aligning all periodic vpts to reduce interrupts */ #define HVM_PARAM_VPT_ALIGN 16 #define HVM_NR_PARAMS 17 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/hvm/save.h000066400000000000000000000063451314037446600272420ustar00rootroot00000000000000/* * hvm/save.h * * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * Copyright (c) 2007 XenSource Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_H__ #define __XEN_PUBLIC_HVM_SAVE_H__ /* * Structures in this header *must* have the same layout in 32bit * and 64bit environments: this means that all fields must be explicitly * sized types and aligned to their sizes, and the structs must be * a multiple of eight bytes long. * * Only the state necessary for saving and restoring (i.e. fields * that are analogous to actual hardware state) should go in this file. * Internal mechanisms should be kept in Xen-private headers. */ #if !defined(__GNUC__) || defined(__STRICT_ANSI__) #error "Anonymous structs/unions are a GNU extension." #endif /* * Each entry is preceded by a descriptor giving its type and length */ struct hvm_save_descriptor { uint16_t typecode; /* Used to demux the various types below */ uint16_t instance; /* Further demux within a type */ uint32_t length; /* In bytes, *not* including this descriptor */ }; /* * Each entry has a datatype associated with it: for example, the CPU state * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU), * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU). * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system * ugliness. */ #define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; } #define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t) #define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x))) #define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c)) /* * The series of save records is teminated by a zero-type, zero-length * descriptor. */ struct hvm_save_end {}; DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); #if defined(__i386__) || defined(__x86_64__) #include "../arch-x86/hvm/save.h" #elif defined(__ia64__) #include "../arch-ia64/hvm/save.h" #else #error "unsupported architecture" #endif #endif /* __XEN_PUBLIC_HVM_SAVE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/000077500000000000000000000000001314037446600257405ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/blkif.h000066400000000000000000000133701314037446600272040ustar00rootroot00000000000000/****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_BLKIF_H__ #define __XEN_PUBLIC_IO_BLKIF_H__ #include "ring.h" #include "../grant_table.h" /* * Front->back notifications: When enqueuing a new request, sending a * notification can be made conditional on req_event (i.e., the generic * hold-off mechanism provided by the ring macros). Backends must set * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). * * Back->front notifications: When enqueuing a new response, sending a * notification can be made conditional on rsp_event (i.e., the generic * hold-off mechanism provided by the ring macros). Frontends must set * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). */ #ifndef blkif_vdev_t #define blkif_vdev_t uint16_t #endif #define blkif_sector_t uint64_t /* * REQUEST CODES. */ #define BLKIF_OP_READ 0 #define BLKIF_OP_WRITE 1 /* * Recognised only if "feature-barrier" is present in backend xenbus info. * The "feature-barrier" node contains a boolean indicating whether barrier * requests are likely to succeed or fail. Either way, a barrier request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt barrier requests. * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* * create the "feature-barrier" node! */ #define BLKIF_OP_WRITE_BARRIER 2 /* * Recognised if "feature-flush-cache" is present in backend xenbus * info. A flush will ask the underlying storage hardware to flush its * non-volatile caches as appropriate. The "feature-flush-cache" node * contains a boolean indicating whether flush requests are likely to * succeed or fail. Either way, a flush request may fail at any time * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying * block-device hardware. The boolean simply indicates whether or not it * is worthwhile for the frontend to attempt flushes. If a backend does * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the * "feature-flush-cache" node! */ #define BLKIF_OP_FLUSH_DISKCACHE 3 /* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 /* * NB. first_sect and last_sect in blkif_request_segment, as well as * sector_number in blkif_request, are always expressed in 512-byte units. * However they must be properly aligned to the real sector size of the * physical disk, which is reported in the "sector-size" node in the backend * xenbus info. Also the xenbus "sectors" node is expressed in 512-byte units. */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; }; struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; typedef struct blkif_request blkif_request_t; struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_response blkif_response_t; /* * STATUS RETURN CODES. */ /* Operation not supported (only happens on barrier writes). */ #define BLKIF_RSP_EOPNOTSUPP -2 /* Operation failed for some unspecified reason (-EIO). */ #define BLKIF_RSP_ERROR -1 /* Operation completed successfully. */ #define BLKIF_RSP_OKAY 0 /* * Generate blkif ring structures and types. */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/console.h000066400000000000000000000033341314037446600275560ustar00rootroot00000000000000/****************************************************************************** * console.h * * Console I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_CONSOLE_H__ #define __XEN_PUBLIC_IO_CONSOLE_H__ typedef uint32_t XENCONS_RING_IDX; #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) struct xencons_interface { char in[1024]; char out[2048]; XENCONS_RING_IDX in_cons, in_prod; XENCONS_RING_IDX out_cons, out_prod; }; #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/fbif.h000066400000000000000000000127051314037446600270240ustar00rootroot00000000000000/* * fbif.h -- Xen virtual frame buffer device * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_FBIF_H__ #define __XEN_PUBLIC_IO_FBIF_H__ /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. */ /* Event type 1 currently not used */ /* * Framebuffer update notification event * Capable frontend sets feature-update in xenstore. * Backend requests it by setting request-update in xenstore. */ #define XENFB_TYPE_UPDATE 2 struct xenfb_update { uint8_t type; /* XENFB_TYPE_UPDATE */ int32_t x; /* source x */ int32_t y; /* source y */ int32_t width; /* rect width */ int32_t height; /* rect height */ }; /* * Framebuffer resize notification event * Capable backend sets feature-resize in xenstore. */ #define XENFB_TYPE_RESIZE 3 struct xenfb_resize { uint8_t type; /* XENFB_TYPE_RESIZE */ int32_t width; /* width in pixels */ int32_t height; /* height in pixels */ int32_t stride; /* stride in bytes */ int32_t depth; /* depth in bits */ int32_t offset; /* offset of the framebuffer in bytes */ }; #define XENFB_OUT_EVENT_SIZE 40 union xenfb_out_event { uint8_t type; struct xenfb_update update; struct xenfb_resize resize; char pad[XENFB_OUT_EVENT_SIZE]; }; /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. */ /* * Framebuffer refresh period advice * Backend sends it to advise the frontend their preferred period of * refresh. Frontends that keep the framebuffer constantly up-to-date * just ignore it. Frontends that use the advice should immediately * refresh the framebuffer (and send an update notification event if * those have been requested), then use the update frequency to guide * their periodical refreshs. */ #define XENFB_TYPE_REFRESH_PERIOD 1 #define XENFB_NO_REFRESH 0 struct xenfb_refresh_period { uint8_t type; /* XENFB_TYPE_UPDATE_PERIOD */ uint32_t period; /* period of refresh, in ms, * XENFB_NO_REFRESH if no refresh is needed */ }; #define XENFB_IN_EVENT_SIZE 40 union xenfb_in_event { uint8_t type; struct xenfb_refresh_period refresh_period; char pad[XENFB_IN_EVENT_SIZE]; }; /* shared page */ #define XENFB_IN_RING_SIZE 1024 #define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE) #define XENFB_IN_RING_OFFS 1024 #define XENFB_IN_RING(page) \ ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS)) #define XENFB_IN_RING_REF(page, idx) \ (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN]) #define XENFB_OUT_RING_SIZE 2048 #define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE) #define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE) #define XENFB_OUT_RING(page) \ ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS)) #define XENFB_OUT_RING_REF(page, idx) \ (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN]) struct xenfb_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; int32_t width; /* the width of the framebuffer (in pixels) */ int32_t height; /* the height of the framebuffer (in pixels) */ uint32_t line_length; /* the length of a row of pixels (in bytes) */ uint32_t mem_length; /* the length of the framebuffer (in bytes) */ uint8_t depth; /* the depth of a pixel (in bits) */ /* * Framebuffer page directory * * Each directory page holds PAGE_SIZE / sizeof(*pd) * framebuffer pages, and can thus map up to PAGE_SIZE * * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 Megs * 64 bit. 256 directories give enough room for a 512 Meg * framebuffer with a max resolution of 12,800x10,240. Should * be enough for a while with room leftover for expansion. */ unsigned long pd[256]; }; /* * Wart: xenkbd needs to know default resolution. Put it here until a * better solution is found, but don't leak it to the backend. */ #ifdef __KERNEL__ #define XENFB_WIDTH 800 #define XENFB_HEIGHT 600 #define XENFB_DEPTH 32 #endif #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/fsif.h000066400000000000000000000123741314037446600270470ustar00rootroot00000000000000/****************************************************************************** * fsif.h * * Interface to FS level split device drivers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2007, Grzegorz Milos, . */ #ifndef __XEN_PUBLIC_IO_FSIF_H__ #define __XEN_PUBLIC_IO_FSIF_H__ #include "ring.h" #include "../grant_table.h" #define REQ_FILE_OPEN 1 #define REQ_FILE_CLOSE 2 #define REQ_FILE_READ 3 #define REQ_FILE_WRITE 4 #define REQ_STAT 5 #define REQ_FILE_TRUNCATE 6 #define REQ_REMOVE 7 #define REQ_RENAME 8 #define REQ_CREATE 9 #define REQ_DIR_LIST 10 #define REQ_CHMOD 11 #define REQ_FS_SPACE 12 #define REQ_FILE_SYNC 13 struct fsif_open_request { grant_ref_t gref; }; struct fsif_close_request { uint32_t fd; }; struct fsif_read_request { uint32_t fd; int32_t pad; uint64_t len; uint64_t offset; grant_ref_t grefs[1]; /* Variable length */ }; struct fsif_write_request { uint32_t fd; int32_t pad; uint64_t len; uint64_t offset; grant_ref_t grefs[1]; /* Variable length */ }; struct fsif_stat_request { uint32_t fd; }; /* This structure is a copy of some fields from stat structure, returned * via the ring. */ struct fsif_stat_response { int32_t stat_mode; uint32_t stat_uid; uint32_t stat_gid; int32_t stat_ret; int64_t stat_size; int64_t stat_atime; int64_t stat_mtime; int64_t stat_ctime; }; struct fsif_truncate_request { uint32_t fd; int32_t pad; int64_t length; }; struct fsif_remove_request { grant_ref_t gref; }; struct fsif_rename_request { uint16_t old_name_offset; uint16_t new_name_offset; grant_ref_t gref; }; struct fsif_create_request { int8_t directory; int8_t pad; int16_t pad2; int32_t mode; grant_ref_t gref; }; struct fsif_list_request { uint32_t offset; grant_ref_t gref; }; #define NR_FILES_SHIFT 0 #define NR_FILES_SIZE 16 /* 16 bits for the number of files mask */ #define NR_FILES_MASK (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT) #define ERROR_SIZE 32 /* 32 bits for the error mask */ #define ERROR_SHIFT (NR_FILES_SIZE + NR_FILES_SHIFT) #define ERROR_MASK (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT) #define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE) #define HAS_MORE_FLAG (1ULL << HAS_MORE_SHIFT) struct fsif_chmod_request { uint32_t fd; int32_t mode; }; struct fsif_space_request { grant_ref_t gref; }; struct fsif_sync_request { uint32_t fd; }; /* FS operation request */ struct fsif_request { uint8_t type; /* Type of the request */ uint8_t pad; uint16_t id; /* Request ID, copied to the response */ uint32_t pad2; union { struct fsif_open_request fopen; struct fsif_close_request fclose; struct fsif_read_request fread; struct fsif_write_request fwrite; struct fsif_stat_request fstat; struct fsif_truncate_request ftruncate; struct fsif_remove_request fremove; struct fsif_rename_request frename; struct fsif_create_request fcreate; struct fsif_list_request flist; struct fsif_chmod_request fchmod; struct fsif_space_request fspace; struct fsif_sync_request fsync; } u; }; typedef struct fsif_request fsif_request_t; /* FS operation response */ struct fsif_response { uint16_t id; uint16_t pad1; uint32_t pad2; union { uint64_t ret_val; struct fsif_stat_response fstat; }; }; typedef struct fsif_response fsif_response_t; #define FSIF_RING_ENTRY_SIZE 64 #define FSIF_NR_READ_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_read_request)) / \ sizeof(grant_ref_t) + 1) #define FSIF_NR_WRITE_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_write_request)) / \ sizeof(grant_ref_t) + 1) DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response); #define STATE_INITIALISED "init" #define STATE_READY "ready" #define STATE_CLOSING "closing" #define STATE_CLOSED "closed" #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/kbdif.h000066400000000000000000000076241314037446600272010ustar00rootroot00000000000000/* * kbdif.h -- Xen virtual keyboard/mouse * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_KBDIF_H__ #define __XEN_PUBLIC_IO_KBDIF_H__ /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. */ /* Pointer movement event */ #define XENKBD_TYPE_MOTION 1 /* Event type 2 currently not used */ /* Key event (includes pointer buttons) */ #define XENKBD_TYPE_KEY 3 /* * Pointer position event * Capable backend sets feature-abs-pointer in xenstore. * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting * request-abs-update in xenstore. */ #define XENKBD_TYPE_POS 4 struct xenkbd_motion { uint8_t type; /* XENKBD_TYPE_MOTION */ int32_t rel_x; /* relative X motion */ int32_t rel_y; /* relative Y motion */ int32_t rel_z; /* relative Z motion (wheel) */ }; struct xenkbd_key { uint8_t type; /* XENKBD_TYPE_KEY */ uint8_t pressed; /* 1 if pressed; 0 otherwise */ uint32_t keycode; /* KEY_* from linux/input.h */ }; struct xenkbd_position { uint8_t type; /* XENKBD_TYPE_POS */ int32_t abs_x; /* absolute X position (in FB pixels) */ int32_t abs_y; /* absolute Y position (in FB pixels) */ int32_t rel_z; /* relative Z motion (wheel) */ }; #define XENKBD_IN_EVENT_SIZE 40 union xenkbd_in_event { uint8_t type; struct xenkbd_motion motion; struct xenkbd_key key; struct xenkbd_position pos; char pad[XENKBD_IN_EVENT_SIZE]; }; /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. * No out events currently defined. */ #define XENKBD_OUT_EVENT_SIZE 40 union xenkbd_out_event { uint8_t type; char pad[XENKBD_OUT_EVENT_SIZE]; }; /* shared page */ #define XENKBD_IN_RING_SIZE 2048 #define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE) #define XENKBD_IN_RING_OFFS 1024 #define XENKBD_IN_RING(page) \ ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS)) #define XENKBD_IN_RING_REF(page, idx) \ (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN]) #define XENKBD_OUT_RING_SIZE 1024 #define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE) #define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE) #define XENKBD_OUT_RING(page) \ ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS)) #define XENKBD_OUT_RING_REF(page, idx) \ (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN]) struct xenkbd_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; }; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/netif.h000066400000000000000000000163401314037446600272220ustar00rootroot00000000000000/****************************************************************************** * netif.h * * Unified network-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_NETIF_H__ #define __XEN_PUBLIC_IO_NETIF_H__ #include "ring.h" #include "../grant_table.h" /* * Notifications after enqueuing any type of message should be conditional on * the appropriate req_event or rsp_event field in the shared ring. * If the client sends notification for rx requests then it should specify * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume * that it cannot safely queue packets (as it may not be kicked to send them). */ /* * This is the 'wire' format for packets: * Request 1: netif_tx_request -- NETTXF_* (any flags) * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info) * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE) * Request 4: netif_tx_request -- NETTXF_more_data * Request 5: netif_tx_request -- NETTXF_more_data * ... * Request N: netif_tx_request -- 0 */ /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETTXF_csum_blank (0) #define NETTXF_csum_blank (1U<<_NETTXF_csum_blank) /* Packet data has been validated against protocol checksum. */ #define _NETTXF_data_validated (1) #define NETTXF_data_validated (1U<<_NETTXF_data_validated) /* Packet continues in the next request descriptor. */ #define _NETTXF_more_data (2) #define NETTXF_more_data (1U<<_NETTXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETTXF_extra_info (3) #define NETTXF_extra_info (1U<<_NETTXF_extra_info) struct netif_tx_request { grant_ref_t gref; /* Reference to buffer page */ uint16_t offset; /* Offset within buffer page */ uint16_t flags; /* NETTXF_* */ uint16_t id; /* Echoed in response message. */ uint16_t size; /* Packet size in bytes. */ }; typedef struct netif_tx_request netif_tx_request_t; /* Types of netif_extra_info descriptors. */ #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MAX (4) /* netif_extra_info flags. */ #define _XEN_NETIF_EXTRA_FLAG_MORE (0) #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) /* GSO types - only TCPv4 currently supported. */ #define XEN_NETIF_GSO_TYPE_TCPV4 (1) /* * This structure needs to fit within both netif_tx_request and * netif_rx_response for compatibility. */ struct netif_extra_info { uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ union { /* * XEN_NETIF_EXTRA_TYPE_GSO: */ struct { /* * Maximum payload size of each segment. For example, for TCP this * is just the path MSS. */ uint16_t size; /* * GSO type. This determines the protocol of the packet and any * extra features required to segment the packet properly. */ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ /* Future expansion. */ uint8_t pad; /* * GSO features. This specifies any extra GSO features required * to process this packet, such as ECN support for TCPv4. */ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ } gso; /* * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}: * Backend advertises availability via 'feature-multicast-control' * xenbus node containing value '1'. * Frontend requests this feature by advertising * 'request-multicast-control' xenbus node containing value '1'. * If multicast control is requested then multicast flooding is * disabled and the frontend must explicitly register its interest * in multicast groups using dummy transmit requests containing * MCAST_{ADD,DEL} extra-info fragments. */ struct { uint8_t addr[6]; /* Address to add/remove. */ } mcast; uint16_t pad[3]; } u; }; typedef struct netif_extra_info netif_extra_info_t; struct netif_tx_response { uint16_t id; int16_t status; /* NETIF_RSP_* */ }; typedef struct netif_tx_response netif_tx_response_t; struct netif_rx_request { uint16_t id; /* Echoed in response message. */ grant_ref_t gref; /* Reference to incoming granted frame */ }; typedef struct netif_rx_request netif_rx_request_t; /* Packet data has been validated against protocol checksum. */ #define _NETRXF_data_validated (0) #define NETRXF_data_validated (1U<<_NETRXF_data_validated) /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETRXF_csum_blank (1) #define NETRXF_csum_blank (1U<<_NETRXF_csum_blank) /* Packet continues in the next request descriptor. */ #define _NETRXF_more_data (2) #define NETRXF_more_data (1U<<_NETRXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETRXF_extra_info (3) #define NETRXF_extra_info (1U<<_NETRXF_extra_info) struct netif_rx_response { uint16_t id; uint16_t offset; /* Offset in page of start of received packet */ uint16_t flags; /* NETRXF_* */ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ }; typedef struct netif_rx_response netif_rx_response_t; /* * Generate netif ring structures and types. */ DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response); DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response); #define NETIF_RSP_DROPPED -2 #define NETIF_RSP_ERROR -1 #define NETIF_RSP_OKAY 0 /* No response: used for auxiliary requests (e.g., netif_tx_extra). */ #define NETIF_RSP_NULL 1 #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/pciif.h000066400000000000000000000074321314037446600272110ustar00rootroot00000000000000/* * PCI Backend/Frontend Common Data Structures & Macros * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Ryan Wilson */ #ifndef __XEN_PCI_COMMON_H__ #define __XEN_PCI_COMMON_H__ /* Be sure to bump this number if you change this file */ #define XEN_PCI_MAGIC "7" /* xen_pci_sharedinfo flags */ #define _XEN_PCIF_active (0) #define XEN_PCIF_active (1<<_XEN_PCIF_active) #define _XEN_PCIB_AERHANDLER (1) #define XEN_PCIB_AERHANDLER (1<<_XEN_PCIB_AERHANDLER) #define _XEN_PCIB_active (2) #define XEN_PCIB_active (1<<_XEN_PCIB_active) /* xen_pci_op commands */ #define XEN_PCI_OP_conf_read (0) #define XEN_PCI_OP_conf_write (1) #define XEN_PCI_OP_enable_msi (2) #define XEN_PCI_OP_disable_msi (3) #define XEN_PCI_OP_enable_msix (4) #define XEN_PCI_OP_disable_msix (5) #define XEN_PCI_OP_aer_detected (6) #define XEN_PCI_OP_aer_resume (7) #define XEN_PCI_OP_aer_mmio (8) #define XEN_PCI_OP_aer_slotreset (9) /* xen_pci_op error numbers */ #define XEN_PCI_ERR_success (0) #define XEN_PCI_ERR_dev_not_found (-1) #define XEN_PCI_ERR_invalid_offset (-2) #define XEN_PCI_ERR_access_denied (-3) #define XEN_PCI_ERR_not_implemented (-4) /* XEN_PCI_ERR_op_failed - backend failed to complete the operation */ #define XEN_PCI_ERR_op_failed (-5) /* * it should be PAGE_SIZE-sizeof(struct xen_pci_op))/sizeof(struct msix_entry)) * Should not exceed 128 */ #define SH_INFO_MAX_VEC 128 struct xen_msix_entry { uint16_t vector; uint16_t entry; }; struct xen_pci_op { /* IN: what action to perform: XEN_PCI_OP_* */ uint32_t cmd; /* OUT: will contain an error number (if any) from errno.h */ int32_t err; /* IN: which device to touch */ uint32_t domain; /* PCI Domain/Segment */ uint32_t bus; uint32_t devfn; /* IN: which configuration registers to touch */ int32_t offset; int32_t size; /* IN/OUT: Contains the result after a READ or the value to WRITE */ uint32_t value; /* IN: Contains extra infor for this operation */ uint32_t info; /*IN: param for msi-x */ struct xen_msix_entry msix_entries[SH_INFO_MAX_VEC]; }; /*used for pcie aer handling*/ struct xen_pcie_aer_op { /* IN: what action to perform: XEN_PCI_OP_* */ uint32_t cmd; /*IN/OUT: return aer_op result or carry error_detected state as input*/ int32_t err; /* IN: which device to touch */ uint32_t domain; /* PCI Domain/Segment*/ uint32_t bus; uint32_t devfn; }; struct xen_pci_sharedinfo { /* flags - XEN_PCIF_* */ uint32_t flags; struct xen_pci_op op; struct xen_pcie_aer_op aer_op; }; #endif /* __XEN_PCI_COMMON_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/protocols.h000066400000000000000000000032101314037446600301310ustar00rootroot00000000000000/****************************************************************************** * protocols.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PROTOCOLS_H__ #define __XEN_PROTOCOLS_H__ #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi" #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi" #define XEN_IO_PROTO_ABI_IA64 "ia64-abi" #if defined(__i386__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 #elif defined(__x86_64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 #elif defined(__ia64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64 #else # error arch fixup needed here #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/ring.h000066400000000000000000000350101314037446600270470ustar00rootroot00000000000000/****************************************************************************** * ring.h * * Shared producer-consumer ring macros. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Tim Deegan and Andrew Warfield November 2004. */ #ifndef __XEN_PUBLIC_IO_RING_H__ #define __XEN_PUBLIC_IO_RING_H__ #include "../xen-compat.h" #if __XEN_INTERFACE_VERSION__ < 0x00030208 #define xen_mb() mb() #define xen_rmb() rmb() #define xen_wmb() wmb() #endif typedef unsigned int RING_IDX; /* Round a 32-bit unsigned constant down to the nearest power of two. */ #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) /* * Calculate size of a shared ring, given the total available space for the * ring and indexes (_sz), and the name tag of the request/response structure. * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ #define __RING_SIZE(_s, _sz) \ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* * Macros to make the correct C datatypes for a new kind of ring. * * To make a new ring datatype, you need to have two message structures, * let's say request_t, and response_t already defined. * * In a header where you want the ring datatype declared, you then do: * * DEFINE_RING_TYPES(mytag, request_t, response_t); * * These expand out to give you a set of types, as you can see below. * The most important of these are: * * mytag_sring_t - The shared ring. * mytag_front_ring_t - The 'front' half of the ring. * mytag_back_ring_t - The 'back' half of the ring. * * To initialize a ring in your code you need to know the location and size * of the shared memory area (PAGE_SIZE, for instance). To initialise * the front half: * * mytag_front_ring_t front_ring; * SHARED_RING_INIT((mytag_sring_t *)shared_page); * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); * * Initializing the back follows similarly (note that only the front * initializes the shared ring): * * mytag_back_ring_t back_ring; * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); */ #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ \ /* Shared ring entry */ \ union __name##_sring_entry { \ __req_t req; \ __rsp_t rsp; \ }; \ \ /* Shared ring page */ \ struct __name##_sring { \ RING_IDX req_prod, req_event; \ RING_IDX rsp_prod, rsp_event; \ uint8_t pad[48]; \ union __name##_sring_entry ring[1]; /* variable-length */ \ }; \ \ /* "Front" end's private variables */ \ struct __name##_front_ring { \ RING_IDX req_prod_pvt; \ RING_IDX rsp_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* "Back" end's private variables */ \ struct __name##_back_ring { \ RING_IDX rsp_prod_pvt; \ RING_IDX req_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* Syntactic sugar */ \ typedef struct __name##_sring __name##_sring_t; \ typedef struct __name##_front_ring __name##_front_ring_t; \ typedef struct __name##_back_ring __name##_back_ring_t /* * Macros for manipulating rings. * * FRONT_RING_whatever works on the "front end" of a ring: here * requests are pushed on to the ring and responses taken off it. * * BACK_RING_whatever works on the "back end" of a ring: here * requests are taken off the ring and responses put on. * * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. * This is OK in 1-for-1 request-response situations where the * requestor (front end) never has more than RING_SIZE()-1 * outstanding requests. */ /* Initialising empty rings */ #define SHARED_RING_INIT(_s) do { \ (_s)->req_prod = (_s)->rsp_prod = 0; \ (_s)->req_event = (_s)->rsp_event = 1; \ (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \ } while(0) #define FRONT_RING_INIT(_r, _s, __size) do { \ (_r)->req_prod_pvt = 0; \ (_r)->rsp_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) #define BACK_RING_INIT(_r, _s, __size) do { \ (_r)->rsp_prod_pvt = 0; \ (_r)->req_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) /* Initialize to existing shared indexes -- for recovery */ #define FRONT_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->req_prod_pvt = (_s)->req_prod; \ (_r)->rsp_cons = (_s)->rsp_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) #define BACK_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ (_r)->req_cons = (_s)->req_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) /* How big is this ring? */ #define RING_SIZE(_r) \ ((_r)->nr_ents) /* Number of free requests (for use on front side only). */ #define RING_FREE_REQUESTS(_r) \ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) /* Test if there is an empty slot available on the front ring. * (This is only meaningful from the front. ) */ #define RING_FULL(_r) \ (RING_FREE_REQUESTS(_r) == 0) /* Test if there are outstanding messages to be processed on a ring. */ #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) #ifdef __GNUC__ #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ unsigned int rsp = RING_SIZE(_r) - \ ((_r)->req_cons - (_r)->rsp_prod_pvt); \ req < rsp ? req : rsp; \ }) #else /* Same as above, but without the nice GCC ({ ... }) syntax. */ #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ ((((_r)->sring->req_prod - (_r)->req_cons) < \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ ((_r)->sring->req_prod - (_r)->req_cons) : \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) #endif /* Direct access to individual ring elements, by index. */ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) #define RING_GET_RESPONSE(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) /* Loop termination condition: Would the specified index overflow the ring? */ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) #define RING_PUSH_REQUESTS(_r) do { \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) #define RING_PUSH_RESPONSES(_r) do { \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ } while (0) /* * Notification hold-off (req_event and rsp_event): * * When queueing requests or responses on a shared ring, it may not always be * necessary to notify the remote end. For example, if requests are in flight * in a backend, the front may be able to queue further requests without * notifying the back (if the back checks for new requests when it queues * responses). * * When enqueuing requests or responses: * * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument * is a boolean return value. True indicates that the receiver requires an * asynchronous notification. * * After dequeuing requests or responses (before sleeping the connection): * * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). * The second argument is a boolean return value. True indicates that there * are pending messages on the ring (i.e., the connection should not be put * to sleep). * * These macros will set the req_event/rsp_event field to trigger a * notification on the very next message that is enqueued. If you want to * create batches of work (i.e., only receive a notification after several * messages have been enqueued) then you will need to create a customised * version of the FINAL_CHECK macro in your own code, which sets the event * field appropriately. */ #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->req_prod; \ RING_IDX __new = (_r)->req_prod_pvt; \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = __new; \ xen_mb(); /* back sees new requests /before/ we check req_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->rsp_prod; \ RING_IDX __new = (_r)->rsp_prod_pvt; \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = __new; \ xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ if (_work_to_do) break; \ (_r)->sring->req_event = (_r)->req_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ } while (0) #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ if (_work_to_do) break; \ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ } while (0) #endif /* __XEN_PUBLIC_IO_RING_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/tpmif.h000066400000000000000000000046251314037446600272370ustar00rootroot00000000000000/****************************************************************************** * tpmif.h * * TPM I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, IBM Corporation * * Author: Stefan Berger, stefanb@us.ibm.com * Grant table support: Mahadevan Gomathisankaran * * This code has been derived from tools/libxc/xen/io/netif.h * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_TPMIF_H__ #define __XEN_PUBLIC_IO_TPMIF_H__ #include "../grant_table.h" struct tpmif_tx_request { unsigned long addr; /* Machine address of packet. */ grant_ref_t ref; /* grant table access reference */ uint16_t unused; uint16_t size; /* Packet size in bytes. */ }; typedef struct tpmif_tx_request tpmif_tx_request_t; /* * The TPMIF_TX_RING_SIZE defines the number of pages the * front-end and backend can exchange (= size of array). */ typedef uint32_t TPMIF_RING_IDX; #define TPMIF_TX_RING_SIZE 1 /* This structure must fit in a memory page. */ struct tpmif_ring { struct tpmif_tx_request req; }; typedef struct tpmif_ring tpmif_ring_t; struct tpmif_tx_interface { struct tpmif_ring ring[TPMIF_TX_RING_SIZE]; }; typedef struct tpmif_tx_interface tpmif_tx_interface_t; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/usbif.h000066400000000000000000000071541314037446600272300ustar00rootroot00000000000000/* * usbif.h * * USB I/O interface for Xen guest OSes. * * Copyright (C) 2009, FUJITSU LABORATORIES LTD. * Author: Noboru Iwamatsu * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_IO_USBIF_H__ #define __XEN_PUBLIC_IO_USBIF_H__ #include "ring.h" #include "../grant_table.h" /* * USB pipe in usbif_request * * bits 0-5 are specific bits for virtual USB driver. * bits 7-31 are standard urb pipe. * * - port number(NEW): bits 0-4 * (USB_MAXCHILDREN is 31) * * - operation flag(NEW): bit 5 * (0 = submit urb, * 1 = unlink urb) * * - direction: bit 7 * (0 = Host-to-Device [Out] * 1 = Device-to-Host [In]) * * - device address: bits 8-14 * * - endpoint: bits 15-18 * * - pipe type: bits 30-31 * (00 = isochronous, 01 = interrupt, * 10 = control, 11 = bulk) */ #define usbif_pipeportnum(pipe) ((pipe) & 0x1f) #define usbif_setportnum_pipe(pipe,portnum) \ ((pipe)|(portnum)) #define usbif_pipeunlink(pipe) ((pipe) & 0x20) #define usbif_setunlink_pipe(pipe) ((pipe)|(0x20)) #define USBIF_BACK_MAX_PENDING_REQS (128) #define USBIF_MAX_SEGMENTS_PER_REQUEST (10) struct usbif_request_segment { grant_ref_t gref; uint16_t offset; uint16_t length; }; struct usbif_request { uint16_t id; /* request id */ uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */ /* basic urb parameter */ uint32_t pipe; uint16_t transfer_flags; uint16_t buffer_length; union { uint8_t ctrl[8]; /* setup_packet (Ctrl) */ struct { uint16_t interval; /* maximum (1024*8) in usb core */ uint16_t start_frame; /* start frame */ uint16_t number_of_packets; /* number of ISO packet */ uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */ } isoc; struct { uint16_t interval; /* maximum (1024*8) in usb core */ uint16_t pad[3]; } intr; struct { uint16_t unlink_id; /* unlink request id */ uint16_t pad[3]; } unlink; } u; /* urb data segments */ struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST]; }; typedef struct usbif_request usbif_request_t; struct usbif_response { uint16_t id; /* request id */ uint16_t start_frame; /* start frame (ISO) */ int32_t status; /* status (non-ISO) */ int32_t actual_length; /* actual transfer length */ int32_t error_count; /* number of ISO errors */ }; typedef struct usbif_response usbif_response_t; DEFINE_RING_TYPES(usbif, struct usbif_request, struct usbif_response); #define USB_RING_SIZE __RING_SIZE((struct usbif_sring *)0, PAGE_SIZE) #endif /* __XEN_PUBLIC_IO_USBIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/vscsiif.h000066400000000000000000000070421314037446600275620ustar00rootroot00000000000000/****************************************************************************** * vscsiif.h * * Based on the blkif.h code. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright(c) FUJITSU Limited 2008. */ #ifndef __XEN__PUBLIC_IO_SCSI_H__ #define __XEN__PUBLIC_IO_SCSI_H__ #include "ring.h" #include "../grant_table.h" /* command between backend and frontend */ #define VSCSIIF_ACT_SCSI_CDB 1 /* SCSI CDB command */ #define VSCSIIF_ACT_SCSI_ABORT 2 /* SCSI Device(Lun) Abort*/ #define VSCSIIF_ACT_SCSI_RESET 3 /* SCSI Device(Lun) Reset*/ #define VSCSIIF_BACK_MAX_PENDING_REQS 128 /* * Maximum scatter/gather segments per request. * * Considering balance between allocating al least 16 "vscsiif_request" * structures on one page (4096bytes) and number of scatter gather * needed, we decided to use 26 as a magic number. */ #define VSCSIIF_SG_TABLESIZE 26 /* * base on linux kernel 2.6.18 */ #define VSCSIIF_MAX_COMMAND_SIZE 16 #define VSCSIIF_SENSE_BUFFERSIZE 96 struct vscsiif_request { uint16_t rqid; /* private guest value, echoed in resp */ uint8_t act; /* command between backend and frontend */ uint8_t cmd_len; uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; uint16_t timeout_per_command; /* The command is issued by twice the value in Backend. */ uint16_t channel, id, lun; uint16_t padding; uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1) DMA_FROM_DEVICE(2) DMA_NONE(3) requests */ uint8_t nr_segments; /* Number of pieces of scatter-gather */ struct scsiif_request_segment { grant_ref_t gref; uint16_t offset; uint16_t length; } seg[VSCSIIF_SG_TABLESIZE]; uint32_t reserved[3]; }; typedef struct vscsiif_request vscsiif_request_t; struct vscsiif_response { uint16_t rqid; uint8_t padding; uint8_t sense_len; uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE]; int32_t rslt; uint32_t residual_len; /* request bufflen - return the value from physical device */ uint32_t reserved[36]; }; typedef struct vscsiif_response vscsiif_response_t; DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response); #endif /*__XEN__PUBLIC_IO_SCSI_H__*/ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/xenbus.h000066400000000000000000000046401314037446600274210ustar00rootroot00000000000000/***************************************************************************** * xenbus.h * * Xenbus protocol details. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 XenSource Ltd. */ #ifndef _XEN_PUBLIC_IO_XENBUS_H #define _XEN_PUBLIC_IO_XENBUS_H /* * The state of either end of the Xenbus, i.e. the current communication * status of initialisation across the bus. States here imply nothing about * the state of the connection between the driver and the kernel's device * layers. */ enum xenbus_state { XenbusStateUnknown = 0, XenbusStateInitialising = 1, /* * InitWait: Finished early initialisation but waiting for information * from the peer or hotplug scripts. */ XenbusStateInitWait = 2, /* * Initialised: Waiting for a connection from the peer. */ XenbusStateInitialised = 3, XenbusStateConnected = 4, /* * Closing: The device is being closed due to an error or an unplug event. */ XenbusStateClosing = 5, XenbusStateClosed = 6, /* * Reconfiguring: The device is being reconfigured. */ XenbusStateReconfiguring = 7, XenbusStateReconfigured = 8 }; typedef enum xenbus_state XenbusState; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/io/xs_wire.h000066400000000000000000000067671314037446600276110ustar00rootroot00000000000000/* * Details of the "wire" protocol between Xen Store Daemon and client * library or guest kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Rusty Russell IBM Corporation */ #ifndef _XS_WIRE_H #define _XS_WIRE_H enum xsd_sockmsg_type { XS_DEBUG, XS_DIRECTORY, XS_READ, XS_GET_PERMS, XS_WATCH, XS_UNWATCH, XS_TRANSACTION_START, XS_TRANSACTION_END, XS_INTRODUCE, XS_RELEASE, XS_GET_DOMAIN_PATH, XS_WRITE, XS_MKDIR, XS_RM, XS_SET_PERMS, XS_WATCH_EVENT, XS_ERROR, XS_IS_DOMAIN_INTRODUCED, XS_RESUME, XS_SET_TARGET }; #define XS_WRITE_NONE "NONE" #define XS_WRITE_CREATE "CREATE" #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" /* We hand errors as strings, for portability. */ struct xsd_errors { int errnum; const char *errstring; }; #define XSD_ERROR(x) { x, #x } /* LINTED: static unused */ static struct xsd_errors xsd_errors[] #if defined(__GNUC__) __attribute__((unused)) #endif = { XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), XSD_ERROR(EISDIR), XSD_ERROR(ENOENT), XSD_ERROR(ENOMEM), XSD_ERROR(ENOSPC), XSD_ERROR(EIO), XSD_ERROR(ENOTEMPTY), XSD_ERROR(ENOSYS), XSD_ERROR(EROFS), XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN) }; struct xsd_sockmsg { uint32_t type; /* XS_??? */ uint32_t req_id;/* Request identifier, echoed in daemon's response. */ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ uint32_t len; /* Length of data following this. */ /* Generally followed by nul-terminated string(s). */ }; enum xs_watch_type { XS_WATCH_PATH = 0, XS_WATCH_TOKEN }; /* Inter-domain shared memory communications. */ #define XENSTORE_RING_SIZE 1024 typedef uint32_t XENSTORE_RING_IDX; #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) struct xenstore_domain_interface { char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ XENSTORE_RING_IDX req_cons, req_prod; XENSTORE_RING_IDX rsp_cons, rsp_prod; }; /* Violating this is very bad. See docs/misc/xenstore.txt. */ #define XENSTORE_PAYLOAD_MAX 4096 /* Violating these just gets you an error back */ #define XENSTORE_ABS_PATH_MAX 3072 #define XENSTORE_REL_PATH_MAX 2048 #endif /* _XS_WIRE_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/kexec.h000066400000000000000000000144501314037446600266050ustar00rootroot00000000000000/****************************************************************************** * kexec.h - Public portion * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Xen port written by: * - Simon 'Horms' Horman * - Magnus Damm */ #ifndef _XEN_PUBLIC_KEXEC_H #define _XEN_PUBLIC_KEXEC_H /* This file describes the Kexec / Kdump hypercall interface for Xen. * * Kexec under vanilla Linux allows a user to reboot the physical machine * into a new user-specified kernel. The Xen port extends this idea * to allow rebooting of the machine from dom0. When kexec for dom0 * is used to reboot, both the hypervisor and the domains get replaced * with some other kernel. It is possible to kexec between vanilla * Linux and Xen and back again. Xen to Xen works well too. * * The hypercall interface for kexec can be divided into three main * types of hypercall operations: * * 1) Range information: * This is used by the dom0 kernel to ask the hypervisor about various * address information. This information is needed to allow kexec-tools * to fill in the ELF headers for /proc/vmcore properly. * * 2) Load and unload of images: * There are no big surprises here, the kexec binary from kexec-tools * runs in userspace in dom0. The tool loads/unloads data into the * dom0 kernel such as new kernel, initramfs and hypervisor. When * loaded the dom0 kernel performs a load hypercall operation, and * before releasing all page references the dom0 kernel calls unload. * * 3) Kexec operation: * This is used to start a previously loaded kernel. */ #include "xen.h" #if defined(__i386__) || defined(__x86_64__) #define KEXEC_XEN_NO_PAGES 17 #endif /* * Prototype for this hypercall is: * int kexec_op(int cmd, void *args) * @cmd == KEXEC_CMD_... * KEXEC operation to perform * @args == Operation-specific extra arguments (NULL if none). */ /* * Kexec supports two types of operation: * - kexec into a regular kernel, very similar to a standard reboot * - KEXEC_TYPE_DEFAULT is used to specify this type * - kexec into a special "crash kernel", aka kexec-on-panic * - KEXEC_TYPE_CRASH is used to specify this type * - parts of our system may be broken at kexec-on-panic time * - the code should be kept as simple and self-contained as possible */ #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 /* The kexec implementation for Xen allows the user to load two * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH. * All data needed for a kexec reboot is kept in one xen_kexec_image_t * per "instance". The data mainly consists of machine address lists to pages * together with destination addresses. The data in xen_kexec_image_t * is passed to the "code page" which is one page of code that performs * the final relocations before jumping to the new kernel. */ typedef struct xen_kexec_image { #if defined(__i386__) || defined(__x86_64__) unsigned long page_list[KEXEC_XEN_NO_PAGES]; #endif #if defined(__ia64__) unsigned long reboot_code_buffer; #endif unsigned long indirection_page; unsigned long start_address; } xen_kexec_image_t; /* * Perform kexec having previously loaded a kexec or kdump kernel * as appropriate. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] */ #define KEXEC_CMD_kexec 0 typedef struct xen_kexec_exec { int type; } xen_kexec_exec_t; /* * Load/Unload kernel image for kexec or kdump. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] * image == relocation information for kexec (ignored for unload) [in] */ #define KEXEC_CMD_kexec_load 1 #define KEXEC_CMD_kexec_unload 2 typedef struct xen_kexec_load { int type; xen_kexec_image_t image; } xen_kexec_load_t; #define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */ #define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */ #define KEXEC_RANGE_MA_CPU 2 /* machine address and size of a CPU note */ #define KEXEC_RANGE_MA_XENHEAP 3 /* machine address and size of xenheap * Note that although this is adjacent * to Xen it exists in a separate EFI * region on ia64, and thus needs to be * inserted into iomem_machine separately */ #define KEXEC_RANGE_MA_BOOT_PARAM 4 /* machine address and size of * the ia64_boot_param */ #define KEXEC_RANGE_MA_EFI_MEMMAP 5 /* machine address and size of * of the EFI Memory Map */ #define KEXEC_RANGE_MA_VMCOREINFO 6 /* machine address and size of vmcoreinfo */ /* * Find the address and size of certain memory areas * range == KEXEC_RANGE_... [in] * nr == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in] * size == number of bytes reserved in window [out] * start == address of the first byte in the window [out] */ #define KEXEC_CMD_kexec_get_range 3 typedef struct xen_kexec_range { int range; int nr; unsigned long size; unsigned long start; } xen_kexec_range_t; #endif /* _XEN_PUBLIC_KEXEC_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/memory.h000066400000000000000000000232611314037446600270160ustar00rootroot00000000000000/****************************************************************************** * memory.h * * Memory reservation and information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_MEMORY_H__ #define __XEN_PUBLIC_MEMORY_H__ /* * Increase or decrease the specified domain's memory reservation. Returns the * number of extents successfully allocated or freed. * arg == addr of struct xen_memory_reservation. */ #define XENMEM_increase_reservation 0 #define XENMEM_decrease_reservation 1 #define XENMEM_populate_physmap 6 #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* * Maximum # bits addressable by the user of the allocated region (e.g., I/O * devices often have a 32-bit limitation even in 64-bit systems). If zero * then the user has no addressing restriction. This field is not used by * XENMEM_decrease_reservation. */ #define XENMEMF_address_bits(x) (x) #define XENMEMF_get_address_bits(x) ((x) & 0xffu) /* NUMA node to allocate from. */ #define XENMEMF_node(x) (((x) + 1) << 8) #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu) /* Flag to populate physmap with populate-on-demand entries */ #define XENMEMF_populate_on_demand (1<<16) #endif struct xen_memory_reservation { /* * XENMEM_increase_reservation: * OUT: MFN (*not* GMFN) bases of extents that were allocated * XENMEM_decrease_reservation: * IN: GMFN bases of extents to free * XENMEM_populate_physmap: * IN: GPFN bases of extents to populate with memory * OUT: GMFN bases of extents that were allocated * (NB. This command also updates the mach_to_phys translation table) */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* Number of extents, and size/alignment of each (2^extent_order pages). */ xen_ulong_t nr_extents; unsigned int extent_order; #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* XENMEMF flags. */ unsigned int mem_flags; #else unsigned int address_bits; #endif /* * Domain whose reservation is being changed. * Unprivileged domains can specify only DOMID_SELF. */ domid_t domid; }; typedef struct xen_memory_reservation xen_memory_reservation_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); /* * An atomic exchange of memory pages. If return code is zero then * @out.extent_list provides GMFNs of the newly-allocated memory. * Returns zero on complete success, otherwise a negative error code. * On complete success then always @nr_exchanged == @in.nr_extents. * On partial success @nr_exchanged indicates how much work was done. */ #define XENMEM_exchange 11 struct xen_memory_exchange { /* * [IN] Details of memory extents to be exchanged (GMFN bases). * Note that @in.address_bits is ignored and unused. */ struct xen_memory_reservation in; /* * [IN/OUT] Details of new memory extents. * We require that: * 1. @in.domid == @out.domid * 2. @in.nr_extents << @in.extent_order == * @out.nr_extents << @out.extent_order * 3. @in.extent_start and @out.extent_start lists must not overlap * 4. @out.extent_start lists GPFN bases to be populated * 5. @out.extent_start is overwritten with allocated GMFN bases */ struct xen_memory_reservation out; /* * [OUT] Number of input extents that were successfully exchanged: * 1. The first @nr_exchanged input extents were successfully * deallocated. * 2. The corresponding first entries in the output extent list correctly * indicate the GMFNs that were successfully exchanged. * 3. All other input and output extents are untouched. * 4. If not all input exents are exchanged then the return code of this * command will be non-zero. * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! */ xen_ulong_t nr_exchanged; }; typedef struct xen_memory_exchange xen_memory_exchange_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); /* * Returns the maximum machine frame number of mapped RAM in this system. * This command always succeeds (it never returns an error code). * arg == NULL. */ #define XENMEM_maximum_ram_page 2 /* * Returns the current or maximum memory reservation, in pages, of the * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. * arg == addr of domid_t. */ #define XENMEM_current_reservation 3 #define XENMEM_maximum_reservation 4 /* * Returns the maximum GPFN in use by the guest, or -ve errcode on failure. */ #define XENMEM_maximum_gpfn 14 /* * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys * mapping table. Architectures which do not have a m2p table do not implement * this command. * arg == addr of xen_machphys_mfn_list_t. */ #define XENMEM_machphys_mfn_list 5 struct xen_machphys_mfn_list { /* * Size of the 'extent_start' array. Fewer entries will be filled if the * machphys table is smaller than max_extents * 2MB. */ unsigned int max_extents; /* * Pointer to buffer to fill with list of extent starts. If there are * any large discontiguities in the machine address space, 2MB gaps in * the machphys table will be represented by an MFN base of zero. */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* * Number of extents written to the above array. This will be smaller * than 'max_extents' if the machphys table is smaller than max_e * 2MB. */ unsigned int nr_extents; }; typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); /* * Returns the location in virtual address space of the machine_to_phys * mapping table. Architectures which do not have a m2p table, or which do not * map it by default into guest address space, do not implement this command. * arg == addr of xen_machphys_mapping_t. */ #define XENMEM_machphys_mapping 12 struct xen_machphys_mapping { xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ }; typedef struct xen_machphys_mapping xen_machphys_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); /* * Sets the GPFN at which a particular page appears in the specified guest's * pseudophysical address space. * arg == addr of xen_add_to_physmap_t. */ #define XENMEM_add_to_physmap 7 struct xen_add_to_physmap { /* Which domain to change the mapping for. */ domid_t domid; /* Source mapping space. */ #define XENMAPSPACE_shared_info 0 /* shared info page */ #define XENMAPSPACE_grant_table 1 /* grant table page */ #define XENMAPSPACE_gmfn 2 /* GMFN */ unsigned int space; /* Index into source mapping space. */ xen_ulong_t idx; /* GPFN where the source mapping page should appear. */ xen_pfn_t gpfn; }; typedef struct xen_add_to_physmap xen_add_to_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); /*** REMOVED ***/ /*#define XENMEM_translate_gpfn_list 8*/ /* * Returns the pseudo-physical memory map as it was when the domain * was started (specified by XENMEM_set_memory_map). * arg == addr of xen_memory_map_t. */ #define XENMEM_memory_map 9 struct xen_memory_map { /* * On call the number of entries which can be stored in buffer. On * return the number of entries which have been stored in * buffer. */ unsigned int nr_entries; /* * Entries in the buffer are in the same format as returned by the * BIOS INT 0x15 EAX=0xE820 call. */ XEN_GUEST_HANDLE(void) buffer; }; typedef struct xen_memory_map xen_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); /* * Returns the real physical memory map. Passes the same structure as * XENMEM_memory_map. * arg == addr of xen_memory_map_t. */ #define XENMEM_machine_memory_map 10 /* * Set the pseudo-physical memory map of a domain, as returned by * XENMEM_memory_map. * arg == addr of xen_foreign_memory_map_t. */ #define XENMEM_set_memory_map 13 struct xen_foreign_memory_map { domid_t domid; struct xen_memory_map map; }; typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); #define XENMEM_set_pod_target 16 #define XENMEM_get_pod_target 17 struct xen_pod_target { /* IN */ uint64_t target_pages; /* OUT */ uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; /* IN */ domid_t domid; }; typedef struct xen_pod_target xen_pod_target_t; #endif /* __XEN_PUBLIC_MEMORY_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/nmi.h000066400000000000000000000052641314037446600262740ustar00rootroot00000000000000/****************************************************************************** * nmi.h * * NMI callback registration and reason codes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_NMI_H__ #define __XEN_PUBLIC_NMI_H__ /* * NMI reason codes: * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. */ /* I/O-check error reported via ISA port 0x61, bit 6. */ #define _XEN_NMIREASON_io_error 0 #define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) /* Parity error reported via ISA port 0x61, bit 7. */ #define _XEN_NMIREASON_parity_error 1 #define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error) /* Unknown hardware-generated NMI. */ #define _XEN_NMIREASON_unknown 2 #define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) /* * long nmi_op(unsigned int cmd, void *arg) * NB. All ops return zero on success, else a negative error code. */ /* * Register NMI callback for this (calling) VCPU. Currently this only makes * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. * arg == pointer to xennmi_callback structure. */ #define XENNMI_register_callback 0 struct xennmi_callback { unsigned long handler_address; unsigned long pad; }; typedef struct xennmi_callback xennmi_callback_t; DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t); /* * Deregister NMI callback for this (calling) VCPU. * arg == NULL. */ #define XENNMI_unregister_callback 1 #endif /* __XEN_PUBLIC_NMI_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/physdev.h000066400000000000000000000170731314037446600271740ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_PHYSDEV_H__ #define __XEN_PUBLIC_PHYSDEV_H__ /* * Prototype for this hypercall is: * int physdev_op(int cmd, void *args) * @cmd == PHYSDEVOP_??? (physdev operation). * @args == Operation-specific extra arguments (NULL if none). */ /* * Notify end-of-interrupt (EOI) for the specified IRQ. * @arg == pointer to physdev_eoi structure. */ #define PHYSDEVOP_eoi 12 struct physdev_eoi { /* IN */ uint32_t irq; }; typedef struct physdev_eoi physdev_eoi_t; DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t); /* * Register a shared page for the hypervisor to indicate whether the guest * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly * once the guest used this function in that the associated event channel * will automatically get unmasked. The page registered is used as a bit * array indexed by Xen's PIRQ value. */ #define PHYSDEVOP_pirq_eoi_gmfn 17 struct physdev_pirq_eoi_gmfn { /* IN */ xen_pfn_t gmfn; }; typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t; DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t); /* * Query the status of an IRQ line. * @arg == pointer to physdev_irq_status_query structure. */ #define PHYSDEVOP_irq_status_query 5 struct physdev_irq_status_query { /* IN */ uint32_t irq; /* OUT */ uint32_t flags; /* XENIRQSTAT_* */ }; typedef struct physdev_irq_status_query physdev_irq_status_query_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t); /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ #define _XENIRQSTAT_needs_eoi (0) #define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) /* IRQ shared by multiple guests? */ #define _XENIRQSTAT_shared (1) #define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) /* * Set the current VCPU's I/O privilege level. * @arg == pointer to physdev_set_iopl structure. */ #define PHYSDEVOP_set_iopl 6 struct physdev_set_iopl { /* IN */ uint32_t iopl; }; typedef struct physdev_set_iopl physdev_set_iopl_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t); /* * Set the current VCPU's I/O-port permissions bitmap. * @arg == pointer to physdev_set_iobitmap structure. */ #define PHYSDEVOP_set_iobitmap 7 struct physdev_set_iobitmap { /* IN */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(uint8) bitmap; #else uint8_t *bitmap; #endif uint32_t nr_ports; }; typedef struct physdev_set_iobitmap physdev_set_iobitmap_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t); /* * Read or write an IO-APIC register. * @arg == pointer to physdev_apic structure. */ #define PHYSDEVOP_apic_read 8 #define PHYSDEVOP_apic_write 9 struct physdev_apic { /* IN */ unsigned long apic_physbase; uint32_t reg; /* IN or OUT */ uint32_t value; }; typedef struct physdev_apic physdev_apic_t; DEFINE_XEN_GUEST_HANDLE(physdev_apic_t); /* * Allocate or free a physical upcall vector for the specified IRQ line. * @arg == pointer to physdev_irq structure. */ #define PHYSDEVOP_alloc_irq_vector 10 #define PHYSDEVOP_free_irq_vector 11 struct physdev_irq { /* IN */ uint32_t irq; /* IN or OUT */ uint32_t vector; }; typedef struct physdev_irq physdev_irq_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_t); #define MAP_PIRQ_TYPE_MSI 0x0 #define MAP_PIRQ_TYPE_GSI 0x1 #define MAP_PIRQ_TYPE_UNKNOWN 0x2 #define PHYSDEVOP_map_pirq 13 struct physdev_map_pirq { domid_t domid; /* IN */ int type; /* IN */ int index; /* IN or OUT */ int pirq; /* IN */ int bus; /* IN */ int devfn; /* IN */ int entry_nr; /* IN */ uint64_t table_base; }; typedef struct physdev_map_pirq physdev_map_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t); #define PHYSDEVOP_unmap_pirq 14 struct physdev_unmap_pirq { domid_t domid; /* IN */ int pirq; }; typedef struct physdev_unmap_pirq physdev_unmap_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t); #define PHYSDEVOP_manage_pci_add 15 #define PHYSDEVOP_manage_pci_remove 16 struct physdev_manage_pci { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_manage_pci physdev_manage_pci_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t); #define PHYSDEVOP_restore_msi 19 struct physdev_restore_msi { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_restore_msi physdev_restore_msi_t; DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t); #define PHYSDEVOP_manage_pci_add_ext 20 struct physdev_manage_pci_ext { /* IN */ uint8_t bus; uint8_t devfn; unsigned is_extfn; unsigned is_virtfn; struct { uint8_t bus; uint8_t devfn; } physfn; }; typedef struct physdev_manage_pci_ext physdev_manage_pci_ext_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_ext_t); /* * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() * hypercall since 0x00030202. */ struct physdev_op { uint32_t cmd; union { struct physdev_irq_status_query irq_status_query; struct physdev_set_iopl set_iopl; struct physdev_set_iobitmap set_iobitmap; struct physdev_apic apic_op; struct physdev_irq irq_op; } u; }; typedef struct physdev_op physdev_op_t; DEFINE_XEN_GUEST_HANDLE(physdev_op_t); /* * Notify that some PIRQ-bound event channels have been unmasked. * ** This command is obsolete since interface version 0x00030202 and is ** * ** unsupported by newer versions of Xen. ** */ #define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 /* * These all-capitals physdev operation names are superceded by the new names * (defined above) since interface version 0x00030202. */ #define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query #define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl #define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap #define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read #define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write #define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector #define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi #define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared #endif /* __XEN_PUBLIC_PHYSDEV_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/platform.h000066400000000000000000000301251314037446600273270ustar00rootroot00000000000000/****************************************************************************** * platform.h * * Hardware platform operations. Intended for use by domain-0 kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_PLATFORM_H__ #define __XEN_PUBLIC_PLATFORM_H__ #include "xen.h" #define XENPF_INTERFACE_VERSION 0x03000001 /* * Set clock such that it would read after 00:00:00 UTC, * 1 January, 1970 if the current system time was . */ #define XENPF_settime 17 struct xenpf_settime { /* IN variables. */ uint32_t secs; uint32_t nsecs; uint64_t system_time; }; typedef struct xenpf_settime xenpf_settime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t); /* * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type. * On x86, @type is an architecture-defined MTRR memory type. * On success, returns the MTRR that was used (@reg) and a handle that can * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting. * (x86-specific). */ #define XENPF_add_memtype 31 struct xenpf_add_memtype { /* IN variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; /* OUT variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_add_memtype xenpf_add_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t); /* * Tear down an existing memory-range type. If @handle is remembered then it * should be passed in to accurately tear down the correct setting (in case * of overlapping memory regions with differing types). If it is not known * then @handle should be set to zero. In all cases @reg must be set. * (x86-specific). */ #define XENPF_del_memtype 32 struct xenpf_del_memtype { /* IN variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_del_memtype xenpf_del_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t); /* Read current type of an MTRR (x86-specific). */ #define XENPF_read_memtype 33 struct xenpf_read_memtype { /* IN variables. */ uint32_t reg; /* OUT variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; }; typedef struct xenpf_read_memtype xenpf_read_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t); #define XENPF_microcode_update 35 struct xenpf_microcode_update { /* IN variables. */ XEN_GUEST_HANDLE(const_void) data;/* Pointer to microcode data */ uint32_t length; /* Length of microcode data. */ }; typedef struct xenpf_microcode_update xenpf_microcode_update_t; DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t); #define XENPF_platform_quirk 39 #define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */ #define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */ #define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */ struct xenpf_platform_quirk { /* IN variables. */ uint32_t quirk_id; }; typedef struct xenpf_platform_quirk xenpf_platform_quirk_t; DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t); #define XENPF_firmware_info 50 #define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */ #define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */ #define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */ struct xenpf_firmware_info { /* IN variables. */ uint32_t type; uint32_t index; /* OUT variables. */ union { struct { /* Int13, Fn48: Check Extensions Present. */ uint8_t device; /* %dl: bios device number */ uint8_t version; /* %ah: major version */ uint16_t interface_support; /* %cx: support bitmap */ /* Int13, Fn08: Legacy Get Device Parameters. */ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */ uint8_t legacy_max_head; /* %dh: max head # */ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */ /* NB. First uint16_t of buffer must be set to buffer size. */ XEN_GUEST_HANDLE(void) edd_params; } disk_info; /* XEN_FW_DISK_INFO */ struct { uint8_t device; /* bios device number */ uint32_t mbr_signature; /* offset 0x1b8 in mbr */ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */ struct { /* Int10, AX=4F15: Get EDID info. */ uint8_t capabilities; uint8_t edid_transfer_time; /* must refer to 128-byte buffer */ XEN_GUEST_HANDLE(uint8) edid; } vbeddc_info; /* XEN_FW_VBEDDC_INFO */ } u; }; typedef struct xenpf_firmware_info xenpf_firmware_info_t; DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t); #define XENPF_enter_acpi_sleep 51 struct xenpf_enter_acpi_sleep { /* IN variables */ uint16_t pm1a_cnt_val; /* PM1a control value. */ uint16_t pm1b_cnt_val; /* PM1b control value. */ uint32_t sleep_state; /* Which state to enter (Sn). */ uint32_t flags; /* Must be zero. */ }; typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t; DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t); #define XENPF_change_freq 52 struct xenpf_change_freq { /* IN variables */ uint32_t flags; /* Must be zero. */ uint32_t cpu; /* Physical cpu. */ uint64_t freq; /* New frequency (Hz). */ }; typedef struct xenpf_change_freq xenpf_change_freq_t; DEFINE_XEN_GUEST_HANDLE(xenpf_change_freq_t); /* * Get idle times (nanoseconds since boot) for physical CPUs specified in the * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is * indexed by CPU number; only entries with the corresponding @cpumap_bitmap * bit set are written to. On return, @cpumap_bitmap is modified so that any * non-existent CPUs are cleared. Such CPUs have their @idletime array entry * cleared. */ #define XENPF_getidletime 53 struct xenpf_getidletime { /* IN/OUT variables */ /* IN: CPUs to interrogate; OUT: subset of IN which are present */ XEN_GUEST_HANDLE(uint8) cpumap_bitmap; /* IN variables */ /* Size of cpumap bitmap. */ uint32_t cpumap_nr_cpus; /* Must be indexable for every cpu in cpumap_bitmap. */ XEN_GUEST_HANDLE(uint64) idletime; /* OUT variables */ /* System time when the idletime snapshots were taken. */ uint64_t now; }; typedef struct xenpf_getidletime xenpf_getidletime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t); #define XENPF_set_processor_pminfo 54 /* ability bits */ #define XEN_PROCESSOR_PM_CX 1 #define XEN_PROCESSOR_PM_PX 2 #define XEN_PROCESSOR_PM_TX 4 /* cmd type */ #define XEN_PM_CX 0 #define XEN_PM_PX 1 #define XEN_PM_TX 2 /* Px sub info type */ #define XEN_PX_PCT 1 #define XEN_PX_PSS 2 #define XEN_PX_PPC 4 #define XEN_PX_PSD 8 struct xen_power_register { uint32_t space_id; uint32_t bit_width; uint32_t bit_offset; uint32_t access_size; uint64_t address; }; struct xen_processor_csd { uint32_t domain; /* domain number of one dependent group */ uint32_t coord_type; /* coordination type */ uint32_t num; /* number of processors in same domain */ }; typedef struct xen_processor_csd xen_processor_csd_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_csd_t); struct xen_processor_cx { struct xen_power_register reg; /* GAS for Cx trigger register */ uint8_t type; /* cstate value, c0: 0, c1: 1, ... */ uint32_t latency; /* worst latency (ms) to enter/exit this cstate */ uint32_t power; /* average power consumption(mW) */ uint32_t dpcnt; /* number of dependency entries */ XEN_GUEST_HANDLE(xen_processor_csd_t) dp; /* NULL if no dependency */ }; typedef struct xen_processor_cx xen_processor_cx_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_cx_t); struct xen_processor_flags { uint32_t bm_control:1; uint32_t bm_check:1; uint32_t has_cst:1; uint32_t power_setup_done:1; uint32_t bm_rld_set:1; }; struct xen_processor_power { uint32_t count; /* number of C state entries in array below */ struct xen_processor_flags flags; /* global flags of this processor */ XEN_GUEST_HANDLE(xen_processor_cx_t) states; /* supported c states */ }; struct xen_pct_register { uint8_t descriptor; uint16_t length; uint8_t space_id; uint8_t bit_width; uint8_t bit_offset; uint8_t reserved; uint64_t address; }; struct xen_processor_px { uint64_t core_frequency; /* megahertz */ uint64_t power; /* milliWatts */ uint64_t transition_latency; /* microseconds */ uint64_t bus_master_latency; /* microseconds */ uint64_t control; /* control value */ uint64_t status; /* success indicator */ }; typedef struct xen_processor_px xen_processor_px_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_px_t); struct xen_psd_package { uint64_t num_entries; uint64_t revision; uint64_t domain; uint64_t coord_type; uint64_t num_processors; }; struct xen_processor_performance { uint32_t flags; /* flag for Px sub info type */ uint32_t platform_limit; /* Platform limitation on freq usage */ struct xen_pct_register control_register; struct xen_pct_register status_register; uint32_t state_count; /* total available performance states */ XEN_GUEST_HANDLE(xen_processor_px_t) states; struct xen_psd_package domain_info; uint32_t shared_type; /* coordination type of this processor */ }; typedef struct xen_processor_performance xen_processor_performance_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_performance_t); struct xenpf_set_processor_pminfo { /* IN variables */ uint32_t id; /* ACPI CPU ID */ uint32_t type; /* {XEN_PM_CX, XEN_PM_PX} */ union { struct xen_processor_power power;/* Cx: _CST/_CSD */ struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */ }; }; typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t; DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t); struct xen_platform_op { uint32_t cmd; uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ union { struct xenpf_settime settime; struct xenpf_add_memtype add_memtype; struct xenpf_del_memtype del_memtype; struct xenpf_read_memtype read_memtype; struct xenpf_microcode_update microcode; struct xenpf_platform_quirk platform_quirk; struct xenpf_firmware_info firmware_info; struct xenpf_enter_acpi_sleep enter_acpi_sleep; struct xenpf_change_freq change_freq; struct xenpf_getidletime getidletime; struct xenpf_set_processor_pminfo set_pminfo; uint8_t pad[128]; } u; }; typedef struct xen_platform_op xen_platform_op_t; DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t); #endif /* __XEN_PUBLIC_PLATFORM_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/sched.h000066400000000000000000000104321314037446600265700ustar00rootroot00000000000000/****************************************************************************** * sched.h * * Scheduler state interactions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_SCHED_H__ #define __XEN_PUBLIC_SCHED_H__ #include "event_channel.h" /* * The prototype for this hypercall is: * long sched_op(int cmd, void *arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == Operation-specific extra argument(s), as described below. * * Versions of Xen prior to 3.0.2 provided only the following legacy version * of this hypercall, supporting only the commands yield, block and shutdown: * long sched_op(int cmd, unsigned long arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) * == SHUTDOWN_* code (SCHEDOP_shutdown) * This legacy version is available to new guests as sched_op_compat(). */ /* * Voluntarily yield the CPU. * @arg == NULL. */ #define SCHEDOP_yield 0 /* * Block execution of this VCPU until an event is received for processing. * If called with event upcalls masked, this operation will atomically * reenable event delivery and check for pending events before blocking the * VCPU. This avoids a "wakeup waiting" race. * @arg == NULL. */ #define SCHEDOP_block 1 /* * Halt execution of this domain (all VCPUs) and notify the system controller. * @arg == pointer to sched_shutdown structure. */ #define SCHEDOP_shutdown 2 struct sched_shutdown { unsigned int reason; /* SHUTDOWN_* */ }; typedef struct sched_shutdown sched_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t); /* * Poll a set of event-channel ports. Return when one or more are pending. An * optional timeout may be specified. * @arg == pointer to sched_poll structure. */ #define SCHEDOP_poll 3 struct sched_poll { XEN_GUEST_HANDLE(evtchn_port_t) ports; unsigned int nr_ports; uint64_t timeout; }; typedef struct sched_poll sched_poll_t; DEFINE_XEN_GUEST_HANDLE(sched_poll_t); /* * Declare a shutdown for another domain. The main use of this function is * in interpreting shutdown requests and reasons for fully-virtualized * domains. A para-virtualized domain may use SCHEDOP_shutdown directly. * @arg == pointer to sched_remote_shutdown structure. */ #define SCHEDOP_remote_shutdown 4 struct sched_remote_shutdown { domid_t domain_id; /* Remote domain ID */ unsigned int reason; /* SHUTDOWN_xxx reason */ }; typedef struct sched_remote_shutdown sched_remote_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t); /* * Reason codes for SCHEDOP_shutdown. These may be interpreted by control * software to determine the appropriate action. For the most part, Xen does * not care about the shutdown code. */ #define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #endif /* __XEN_PUBLIC_SCHED_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/sysctl.h000066400000000000000000000403361314037446600270310ustar00rootroot00000000000000/****************************************************************************** * sysctl.h * * System management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_SYSCTL_H__ #define __XEN_PUBLIC_SYSCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "sysctl operations are intended for use by node control tools only" #endif #include "xen.h" #include "domctl.h" #define XEN_SYSCTL_INTERFACE_VERSION 0x00000006 /* * Read console content from Xen buffer ring. */ #define XEN_SYSCTL_readconsole 1 struct xen_sysctl_readconsole { /* IN: Non-zero -> clear after reading. */ uint8_t clear; /* IN: Non-zero -> start index specified by @index field. */ uint8_t incremental; uint8_t pad0, pad1; /* * IN: Start index for consuming from ring buffer (if @incremental); * OUT: End index after consuming from ring buffer. */ uint32_t index; /* IN: Virtual address to write console data. */ XEN_GUEST_HANDLE_64(char) buffer; /* IN: Size of buffer; OUT: Bytes written to buffer. */ uint32_t count; }; typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t); /* Get trace buffers machine base address */ #define XEN_SYSCTL_tbuf_op 2 struct xen_sysctl_tbuf_op { /* IN variables */ #define XEN_SYSCTL_TBUFOP_get_info 0 #define XEN_SYSCTL_TBUFOP_set_cpu_mask 1 #define XEN_SYSCTL_TBUFOP_set_evt_mask 2 #define XEN_SYSCTL_TBUFOP_set_size 3 #define XEN_SYSCTL_TBUFOP_enable 4 #define XEN_SYSCTL_TBUFOP_disable 5 uint32_t cmd; /* IN/OUT variables */ struct xenctl_cpumap cpu_mask; uint32_t evt_mask; /* OUT variables */ uint64_aligned_t buffer_mfn; uint32_t size; }; typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t); /* * Get physical information about the host machine */ #define XEN_SYSCTL_physinfo 3 /* (x86) The platform supports HVM guests. */ #define _XEN_SYSCTL_PHYSCAP_hvm 0 #define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm) /* (x86) The platform supports HVM-guest direct access to I/O devices. */ #define _XEN_SYSCTL_PHYSCAP_hvm_directio 1 #define XEN_SYSCTL_PHYSCAP_hvm_directio (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio) struct xen_sysctl_physinfo { uint32_t threads_per_core; uint32_t cores_per_socket; uint32_t nr_cpus; uint32_t nr_nodes; uint32_t cpu_khz; uint64_aligned_t total_pages; uint64_aligned_t free_pages; uint64_aligned_t scrub_pages; uint32_t hw_cap[8]; /* * IN: maximum addressable entry in the caller-provided cpu_to_node array. * OUT: largest cpu identifier in the system. * If OUT is greater than IN then the cpu_to_node array is truncated! */ uint32_t max_cpu_id; /* * If not NULL, this array is filled with node identifier for each cpu. * If a cpu has no node information (e.g., cpu not present) then the * sentinel value ~0u is written. * The size of this array is specified by the caller in @max_cpu_id. * If the actual @max_cpu_id is smaller than the array then the trailing * elements of the array will not be written by the sysctl. */ XEN_GUEST_HANDLE_64(uint32) cpu_to_node; /* XEN_SYSCTL_PHYSCAP_??? */ uint32_t capabilities; }; typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t); /* * Get the ID of the current scheduler. */ #define XEN_SYSCTL_sched_id 4 struct xen_sysctl_sched_id { /* OUT variable */ uint32_t sched_id; }; typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t); /* Interface for controlling Xen software performance counters. */ #define XEN_SYSCTL_perfc_op 5 /* Sub-operations: */ #define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */ #define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */ struct xen_sysctl_perfc_desc { char name[80]; /* name of perf counter */ uint32_t nr_vals; /* number of values for this counter */ }; typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t); typedef uint32_t xen_sysctl_perfc_val_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t); struct xen_sysctl_perfc_op { /* IN variables. */ uint32_t cmd; /* XEN_SYSCTL_PERFCOP_??? */ /* OUT variables. */ uint32_t nr_counters; /* number of counters description */ uint32_t nr_vals; /* number of values */ /* counter information (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc; /* counter values (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val; }; typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t); #define XEN_SYSCTL_getdomaininfolist 6 struct xen_sysctl_getdomaininfolist { /* IN variables. */ domid_t first_domain; uint32_t max_domains; XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer; /* OUT variables. */ uint32_t num_domains; }; typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t); /* Inject debug keys into Xen. */ #define XEN_SYSCTL_debug_keys 7 struct xen_sysctl_debug_keys { /* IN variables. */ XEN_GUEST_HANDLE_64(char) keys; uint32_t nr_keys; }; typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t); /* Get physical CPU information. */ #define XEN_SYSCTL_getcpuinfo 8 struct xen_sysctl_cpuinfo { uint64_aligned_t idletime; }; typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t); struct xen_sysctl_getcpuinfo { /* IN variables. */ uint32_t max_cpus; XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info; /* OUT variables. */ uint32_t nr_cpus; }; typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t); #define XEN_SYSCTL_availheap 9 struct xen_sysctl_availheap { /* IN variables. */ uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */ uint32_t max_bitwidth; /* Largest address width (zero if don't care). */ int32_t node; /* NUMA node of interest (-1 for all nodes). */ /* OUT variables. */ uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */ }; typedef struct xen_sysctl_availheap xen_sysctl_availheap_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t); #define XEN_SYSCTL_get_pmstat 10 struct pm_px_val { uint64_aligned_t freq; /* Px core frequency */ uint64_aligned_t residency; /* Px residency time */ uint64_aligned_t count; /* Px transition count */ }; typedef struct pm_px_val pm_px_val_t; DEFINE_XEN_GUEST_HANDLE(pm_px_val_t); struct pm_px_stat { uint8_t total; /* total Px states */ uint8_t usable; /* usable Px states */ uint8_t last; /* last Px state */ uint8_t cur; /* current Px state */ XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */ XEN_GUEST_HANDLE_64(pm_px_val_t) pt; }; typedef struct pm_px_stat pm_px_stat_t; DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t); struct pm_cx_stat { uint32_t nr; /* entry nr in triggers & residencies, including C0 */ uint32_t last; /* last Cx state */ uint64_aligned_t idle_time; /* idle time from boot */ XEN_GUEST_HANDLE_64(uint64) triggers; /* Cx trigger counts */ XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */ }; struct xen_sysctl_get_pmstat { #define PMSTAT_CATEGORY_MASK 0xf0 #define PMSTAT_PX 0x10 #define PMSTAT_CX 0x20 #define PMSTAT_get_max_px (PMSTAT_PX | 0x1) #define PMSTAT_get_pxstat (PMSTAT_PX | 0x2) #define PMSTAT_reset_pxstat (PMSTAT_PX | 0x3) #define PMSTAT_get_max_cx (PMSTAT_CX | 0x1) #define PMSTAT_get_cxstat (PMSTAT_CX | 0x2) #define PMSTAT_reset_cxstat (PMSTAT_CX | 0x3) uint32_t type; uint32_t cpuid; union { struct pm_px_stat getpx; struct pm_cx_stat getcx; /* other struct for tx, etc */ } u; }; typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t); /* * Status codes. Must be greater than 0 to avoid confusing * sysctl callers that see 0 as a plain successful return. */ #define XEN_CPU_HOTPLUG_STATUS_OFFLINE 1 #define XEN_CPU_HOTPLUG_STATUS_ONLINE 2 #define XEN_CPU_HOTPLUG_STATUS_NEW 3 #define XEN_SYSCTL_cpu_hotplug 11 struct xen_sysctl_cpu_hotplug { /* IN variables */ uint32_t cpu; /* Physical cpu. */ #define XEN_SYSCTL_CPU_HOTPLUG_ONLINE 0 #define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1 #define XEN_SYSCTL_CPU_HOTPLUG_STATUS 2 uint32_t op; /* hotplug opcode */ }; typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t); /* * Get/set xen power management, include * 1. cpufreq governors and related parameters */ #define XEN_SYSCTL_pm_op 12 struct xen_userspace { uint32_t scaling_setspeed; }; typedef struct xen_userspace xen_userspace_t; struct xen_ondemand { uint32_t sampling_rate_max; uint32_t sampling_rate_min; uint32_t sampling_rate; uint32_t up_threshold; }; typedef struct xen_ondemand xen_ondemand_t; /* * cpufreq para name of this structure named * same as sysfs file name of native linux */ #define CPUFREQ_NAME_LEN 16 struct xen_get_cpufreq_para { /* IN/OUT variable */ uint32_t cpu_num; uint32_t freq_num; uint32_t gov_num; /* for all governors */ /* OUT variable */ XEN_GUEST_HANDLE_64(uint32) affected_cpus; XEN_GUEST_HANDLE_64(uint32) scaling_available_frequencies; XEN_GUEST_HANDLE_64(char) scaling_available_governors; char scaling_driver[CPUFREQ_NAME_LEN]; uint32_t cpuinfo_cur_freq; uint32_t cpuinfo_max_freq; uint32_t cpuinfo_min_freq; uint32_t scaling_cur_freq; char scaling_governor[CPUFREQ_NAME_LEN]; uint32_t scaling_max_freq; uint32_t scaling_min_freq; /* for specific governor */ union { struct xen_userspace userspace; struct xen_ondemand ondemand; } u; }; struct xen_set_cpufreq_gov { char scaling_governor[CPUFREQ_NAME_LEN]; }; struct xen_set_cpufreq_para { #define SCALING_MAX_FREQ 1 #define SCALING_MIN_FREQ 2 #define SCALING_SETSPEED 3 #define SAMPLING_RATE 4 #define UP_THRESHOLD 5 uint32_t ctrl_type; uint32_t ctrl_value; }; /* Get physical CPU topology information. */ #define INVALID_TOPOLOGY_ID (~0U) struct xen_get_cputopo { /* IN: maximum addressable entry in * the caller-provided cpu_to_core/socket. */ uint32_t max_cpus; XEN_GUEST_HANDLE_64(uint32) cpu_to_core; XEN_GUEST_HANDLE_64(uint32) cpu_to_socket; /* OUT: number of cpus returned * If OUT is greater than IN then the cpu_to_core/socket is truncated! */ uint32_t nr_cpus; }; struct xen_sysctl_pm_op { #define PM_PARA_CATEGORY_MASK 0xf0 #define CPUFREQ_PARA 0x10 /* cpufreq command type */ #define GET_CPUFREQ_PARA (CPUFREQ_PARA | 0x01) #define SET_CPUFREQ_GOV (CPUFREQ_PARA | 0x02) #define SET_CPUFREQ_PARA (CPUFREQ_PARA | 0x03) #define GET_CPUFREQ_AVGFREQ (CPUFREQ_PARA | 0x04) /* get CPU topology */ #define XEN_SYSCTL_pm_op_get_cputopo 0x20 /* set/reset scheduler power saving option */ #define XEN_SYSCTL_pm_op_set_sched_opt_smt 0x21 /* cpuidle max_cstate access command */ #define XEN_SYSCTL_pm_op_get_max_cstate 0x22 #define XEN_SYSCTL_pm_op_set_max_cstate 0x23 /* set scheduler migration cost value */ #define XEN_SYSCTL_pm_op_set_vcpu_migration_delay 0x24 #define XEN_SYSCTL_pm_op_get_vcpu_migration_delay 0x25 uint32_t cmd; uint32_t cpuid; union { struct xen_get_cpufreq_para get_para; struct xen_set_cpufreq_gov set_gov; struct xen_set_cpufreq_para set_para; uint64_t get_avgfreq; struct xen_get_cputopo get_topo; uint32_t set_sched_opt_smt; uint32_t get_max_cstate; uint32_t set_max_cstate; uint32_t get_vcpu_migration_delay; uint32_t set_vcpu_migration_delay; }; }; #define XEN_SYSCTL_page_offline_op 14 struct xen_sysctl_page_offline_op { /* IN: range of page to be offlined */ #define sysctl_page_offline 1 #define sysctl_page_online 2 #define sysctl_query_page_offline 3 uint32_t cmd; uint32_t start; uint32_t end; /* OUT: result of page offline request */ /* * bit 0~15: result flags * bit 16~31: owner */ XEN_GUEST_HANDLE(uint32) status; }; #define PG_OFFLINE_STATUS_MASK (0xFFUL) /* The result is invalid, i.e. HV does not handle it */ #define PG_OFFLINE_INVALID (0x1UL << 0) #define PG_OFFLINE_OFFLINED (0x1UL << 1) #define PG_OFFLINE_PENDING (0x1UL << 2) #define PG_OFFLINE_FAILED (0x1UL << 3) #define PG_ONLINE_FAILED PG_OFFLINE_FAILED #define PG_ONLINE_ONLINED PG_OFFLINE_OFFLINED #define PG_OFFLINE_STATUS_OFFLINED (0x1UL << 1) #define PG_OFFLINE_STATUS_ONLINE (0x1UL << 2) #define PG_OFFLINE_STATUS_OFFLINE_PENDING (0x1UL << 3) #define PG_OFFLINE_STATUS_BROKEN (0x1UL << 4) #define PG_OFFLINE_MISC_MASK (0xFFUL << 4) /* only valid when PG_OFFLINE_FAILED */ #define PG_OFFLINE_XENPAGE (0x1UL << 8) #define PG_OFFLINE_DOM0PAGE (0x1UL << 9) #define PG_OFFLINE_ANONYMOUS (0x1UL << 10) #define PG_OFFLINE_NOT_CONV_RAM (0x1UL << 11) #define PG_OFFLINE_OWNED (0x1UL << 12) #define PG_OFFLINE_BROKEN (0x1UL << 13) #define PG_ONLINE_BROKEN PG_OFFLINE_BROKEN #define PG_OFFLINE_OWNER_SHIFT 16 struct xen_sysctl { uint32_t cmd; uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */ union { struct xen_sysctl_readconsole readconsole; struct xen_sysctl_tbuf_op tbuf_op; struct xen_sysctl_physinfo physinfo; struct xen_sysctl_sched_id sched_id; struct xen_sysctl_perfc_op perfc_op; struct xen_sysctl_getdomaininfolist getdomaininfolist; struct xen_sysctl_debug_keys debug_keys; struct xen_sysctl_getcpuinfo getcpuinfo; struct xen_sysctl_availheap availheap; struct xen_sysctl_get_pmstat get_pmstat; struct xen_sysctl_cpu_hotplug cpu_hotplug; struct xen_sysctl_pm_op pm_op; struct xen_sysctl_page_offline_op page_offline; uint8_t pad[128]; } u; }; typedef struct xen_sysctl xen_sysctl_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t); #endif /* __XEN_PUBLIC_SYSCTL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/trace.h000066400000000000000000000225471314037446600266120ustar00rootroot00000000000000/****************************************************************************** * include/public/trace.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Mark Williamson, (C) 2004 Intel Research Cambridge * Copyright (C) 2005 Bin Ren */ #ifndef __XEN_PUBLIC_TRACE_H__ #define __XEN_PUBLIC_TRACE_H__ #define TRACE_EXTRA_MAX 7 #define TRACE_EXTRA_SHIFT 28 /* Trace classes */ #define TRC_CLS_SHIFT 16 #define TRC_GEN 0x0001f000 /* General trace */ #define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */ #define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */ #define TRC_HVM 0x0008f000 /* Xen HVM trace */ #define TRC_MEM 0x0010f000 /* Xen memory trace */ #define TRC_PV 0x0020f000 /* Xen PV traces */ #define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */ #define TRC_PM 0x0080f000 /* Xen power management trace */ #define TRC_ALL 0x0ffff000 #define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff) #define TRC_HD_CYCLE_FLAG (1UL<<31) #define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) ) #define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX) /* Trace subclasses */ #define TRC_SUBCLS_SHIFT 12 /* trace subclasses for SVM */ #define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */ #define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */ #define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */ #define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */ /* Trace events per class */ #define TRC_LOST_RECORDS (TRC_GEN + 1) #define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2) #define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3) #define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1) #define TRC_SCHED_CONTINUE_RUNNING (TRC_SCHED_MIN + 2) #define TRC_SCHED_DOM_ADD (TRC_SCHED_VERBOSE + 1) #define TRC_SCHED_DOM_REM (TRC_SCHED_VERBOSE + 2) #define TRC_SCHED_SLEEP (TRC_SCHED_VERBOSE + 3) #define TRC_SCHED_WAKE (TRC_SCHED_VERBOSE + 4) #define TRC_SCHED_YIELD (TRC_SCHED_VERBOSE + 5) #define TRC_SCHED_BLOCK (TRC_SCHED_VERBOSE + 6) #define TRC_SCHED_SHUTDOWN (TRC_SCHED_VERBOSE + 7) #define TRC_SCHED_CTL (TRC_SCHED_VERBOSE + 8) #define TRC_SCHED_ADJDOM (TRC_SCHED_VERBOSE + 9) #define TRC_SCHED_SWITCH (TRC_SCHED_VERBOSE + 10) #define TRC_SCHED_S_TIMER_FN (TRC_SCHED_VERBOSE + 11) #define TRC_SCHED_T_TIMER_FN (TRC_SCHED_VERBOSE + 12) #define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED_VERBOSE + 13) #define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14) #define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15) #define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1) #define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2) #define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3) #define TRC_PV_HYPERCALL (TRC_PV + 1) #define TRC_PV_TRAP (TRC_PV + 3) #define TRC_PV_PAGE_FAULT (TRC_PV + 4) #define TRC_PV_FORCED_INVALID_OP (TRC_PV + 5) #define TRC_PV_EMULATE_PRIVOP (TRC_PV + 6) #define TRC_PV_EMULATE_4GB (TRC_PV + 7) #define TRC_PV_MATH_STATE_RESTORE (TRC_PV + 8) #define TRC_PV_PAGING_FIXUP (TRC_PV + 9) #define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV + 10) #define TRC_PV_PTWR_EMULATION (TRC_PV + 11) #define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12) /* Indicates that addresses in trace record are 64 bits */ #define TRC_64_FLAG (0x100) #define TRC_SHADOW_NOT_SHADOW (TRC_SHADOW + 1) #define TRC_SHADOW_FAST_PROPAGATE (TRC_SHADOW + 2) #define TRC_SHADOW_FAST_MMIO (TRC_SHADOW + 3) #define TRC_SHADOW_FALSE_FAST_PATH (TRC_SHADOW + 4) #define TRC_SHADOW_MMIO (TRC_SHADOW + 5) #define TRC_SHADOW_FIXUP (TRC_SHADOW + 6) #define TRC_SHADOW_DOMF_DYING (TRC_SHADOW + 7) #define TRC_SHADOW_EMULATE (TRC_SHADOW + 8) #define TRC_SHADOW_EMULATE_UNSHADOW_USER (TRC_SHADOW + 9) #define TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ (TRC_SHADOW + 10) #define TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED (TRC_SHADOW + 11) #define TRC_SHADOW_WRMAP_BF (TRC_SHADOW + 12) #define TRC_SHADOW_PREALLOC_UNPIN (TRC_SHADOW + 13) #define TRC_SHADOW_RESYNC_FULL (TRC_SHADOW + 14) #define TRC_SHADOW_RESYNC_ONLY (TRC_SHADOW + 15) /* trace events per subclass */ #define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01) #define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02) #define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02) #define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01) #define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01) #define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02) #define TRC_HVM_PF_INJECT64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02) #define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03) #define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04) #define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05) #define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06) #define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07) #define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08) #define TRC_HVM_CR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08) #define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09) #define TRC_HVM_CR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09) #define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A) #define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B) #define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C) #define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D) #define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E) #define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F) #define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10) #define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11) #define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12) #define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13) #define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14) #define TRC_HVM_INVLPG64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14) #define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15) #define TRC_HVM_IOPORT_READ (TRC_HVM_HANDLER + 0x16) #define TRC_HVM_IOMEM_READ (TRC_HVM_HANDLER + 0x17) #define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18) #define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19) #define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19) #define TRC_HVM_INTR_WINDOW (TRC_HVM_HANDLER + 0x20) #define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216) #define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217) /* trace subclasses for power management */ #define TRC_PM_FREQ 0x00801000 /* xen cpu freq events */ #define TRC_PM_IDLE 0x00802000 /* xen cpu idle events */ /* trace events for per class */ #define TRC_PM_FREQ_CHANGE (TRC_PM_FREQ + 0x01) #define TRC_PM_IDLE_ENTRY (TRC_PM_IDLE + 0x01) #define TRC_PM_IDLE_EXIT (TRC_PM_IDLE + 0x02) /* This structure represents a single trace buffer record. */ struct t_rec { uint32_t event:28; uint32_t extra_u32:3; /* # entries in trailing extra_u32[] array */ uint32_t cycles_included:1; /* u.cycles or u.no_cycles? */ union { struct { uint32_t cycles_lo, cycles_hi; /* cycle counter timestamp */ uint32_t extra_u32[7]; /* event data items */ } cycles; struct { uint32_t extra_u32[7]; /* event data items */ } nocycles; } u; }; /* * This structure contains the metadata for a single trace buffer. The head * field, indexes into an array of struct t_rec's. */ struct t_buf { /* Assume the data buffer size is X. X is generally not a power of 2. * CONS and PROD are incremented modulo (2*X): * 0 <= cons < 2*X * 0 <= prod < 2*X * This is done because addition modulo X breaks at 2^32 when X is not a * power of 2: * (((2^32 - 1) % X) + 1) % X != (2^32) % X */ uint32_t cons; /* Offset of next item to be consumed by control tools. */ uint32_t prod; /* Offset of next item to be produced by Xen. */ /* Records follow immediately after the meta-data header. */ }; #endif /* __XEN_PUBLIC_TRACE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/vcpu.h000066400000000000000000000202501314037446600264560ustar00rootroot00000000000000/****************************************************************************** * vcpu.h * * VCPU initialisation, query, and hotplug. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VCPU_H__ #define __XEN_PUBLIC_VCPU_H__ /* * Prototype for this hypercall is: * int vcpu_op(int cmd, int vcpuid, void *extra_args) * @cmd == VCPUOP_??? (VCPU operation). * @vcpuid == VCPU to operate on. * @extra_args == Operation-specific extra arguments (NULL if none). */ /* * Initialise a VCPU. Each VCPU can be initialised only once. A * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. * * @extra_arg == pointer to vcpu_guest_context structure containing initial * state for the VCPU. */ #define VCPUOP_initialise 0 /* * Bring up a VCPU. This makes the VCPU runnable. This operation will fail * if the VCPU has not been initialised (VCPUOP_initialise). */ #define VCPUOP_up 1 /* * Bring down a VCPU (i.e., make it non-runnable). * There are a few caveats that callers should observe: * 1. This operation may return, and VCPU_is_up may return false, before the * VCPU stops running (i.e., the command is asynchronous). It is a good * idea to ensure that the VCPU has entered a non-critical loop before * bringing it down. Alternatively, this operation is guaranteed * synchronous if invoked by the VCPU itself. * 2. After a VCPU is initialised, there is currently no way to drop all its * references to domain memory. Even a VCPU that is down still holds * memory references via its pagetable base pointer and GDT. It is good * practise to move a VCPU onto an 'idle' or default page table, LDT and * GDT before bringing it down. */ #define VCPUOP_down 2 /* Returns 1 if the given VCPU is up. */ #define VCPUOP_is_up 3 /* * Return information about the state and running time of a VCPU. * @extra_arg == pointer to vcpu_runstate_info structure. */ #define VCPUOP_get_runstate_info 4 struct vcpu_runstate_info { /* VCPU's current state (RUNSTATE_*). */ int state; /* When was current state entered (system time, ns)? */ uint64_t state_entry_time; /* * Time spent in each RUNSTATE_* (ns). The sum of these times is * guaranteed not to drift from system time. */ uint64_t time[4]; }; typedef struct vcpu_runstate_info vcpu_runstate_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t); /* VCPU is currently running on a physical CPU. */ #define RUNSTATE_running 0 /* VCPU is runnable, but not currently scheduled on any physical CPU. */ #define RUNSTATE_runnable 1 /* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ #define RUNSTATE_blocked 2 /* * VCPU is not runnable, but it is not blocked. * This is a 'catch all' state for things like hotplug and pauses by the * system administrator (or for critical sections in the hypervisor). * RUNSTATE_blocked dominates this state (it is the preferred state). */ #define RUNSTATE_offline 3 /* * Register a shared memory area from which the guest may obtain its own * runstate information without needing to execute a hypercall. * Notes: * 1. The registered address may be virtual or physical or guest handle, * depending on the platform. Virtual address or guest handle should be * registered on x86 systems. * 2. Only one shared area may be registered per VCPU. The shared area is * updated by the hypervisor each time the VCPU is scheduled. Thus * runstate.state will always be RUNSTATE_running and * runstate.state_entry_time will indicate the system time at which the * VCPU was last scheduled to run. * @extra_arg == pointer to vcpu_register_runstate_memory_area structure. */ #define VCPUOP_register_runstate_memory_area 5 struct vcpu_register_runstate_memory_area { union { XEN_GUEST_HANDLE(vcpu_runstate_info_t) h; struct vcpu_runstate_info *v; uint64_t p; } addr; }; typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t); /* * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer * which can be set via these commands. Periods smaller than one millisecond * may not be supported. */ #define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ #define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ struct vcpu_set_periodic_timer { uint64_t period_ns; }; typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t); /* * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot * timer which can be set via these commands. */ #define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */ struct vcpu_set_singleshot_timer { uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */ uint32_t flags; /* VCPU_SSHOTTMR_??? */ }; typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t); /* Flags to VCPUOP_set_singleshot_timer. */ /* Require the timeout to be in the future (return -ETIME if it's passed). */ #define _VCPU_SSHOTTMR_future (0) #define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future) /* * Register a memory location in the guest address space for the * vcpu_info structure. This allows the guest to place the vcpu_info * structure in a convenient place, such as in a per-cpu data area. * The pointer need not be page aligned, but the structure must not * cross a page boundary. * * This may be called only once per vcpu. */ #define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */ struct vcpu_register_vcpu_info { uint64_t mfn; /* mfn of page to place vcpu_info */ uint32_t offset; /* offset within page */ uint32_t rsvd; /* unused */ }; typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t); /* Send an NMI to the specified VCPU. @extra_arg == NULL. */ #define VCPUOP_send_nmi 11 /* * Get the physical ID information for a pinned vcpu's underlying physical * processor. The physical ID informmation is architecture-specific. * On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and * greater are reserved. * This command returns -EINVAL if it is not a valid operation for this VCPU. */ #define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */ struct vcpu_get_physid { uint64_t phys_id; }; typedef struct vcpu_get_physid vcpu_get_physid_t; DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t); #define xen_vcpu_physid_to_x86_apicid(physid) \ ((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid))) #define xen_vcpu_physid_to_x86_acpiid(physid) \ ((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32))) #endif /* __XEN_PUBLIC_VCPU_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/version.h000066400000000000000000000057531314037446600272010ustar00rootroot00000000000000/****************************************************************************** * version.h * * Xen version, type, and compile information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Nguyen Anh Quynh * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VERSION_H__ #define __XEN_PUBLIC_VERSION_H__ /* NB. All ops return zero on success, except XENVER_{version,pagesize} */ /* arg == NULL; returns major:minor (16:16). */ #define XENVER_version 0 /* arg == xen_extraversion_t. */ #define XENVER_extraversion 1 typedef char xen_extraversion_t[16]; #define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t)) /* arg == xen_compile_info_t. */ #define XENVER_compile_info 2 struct xen_compile_info { char compiler[64]; char compile_by[16]; char compile_domain[32]; char compile_date[32]; }; typedef struct xen_compile_info xen_compile_info_t; #define XENVER_capabilities 3 typedef char xen_capabilities_info_t[1024]; #define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t)) #define XENVER_changeset 4 typedef char xen_changeset_info_t[64]; #define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t)) #define XENVER_platform_parameters 5 struct xen_platform_parameters { unsigned long virt_start; }; typedef struct xen_platform_parameters xen_platform_parameters_t; #define XENVER_get_features 6 struct xen_feature_info { unsigned int submap_idx; /* IN: which 32-bit submap to return */ uint32_t submap; /* OUT: 32-bit submap */ }; typedef struct xen_feature_info xen_feature_info_t; /* Declares the features reported by XENVER_get_features. */ #include "features.h" /* arg == NULL; returns host memory page size. */ #define XENVER_pagesize 7 /* arg == xen_domain_handle_t. */ #define XENVER_guest_handle 8 #endif /* __XEN_PUBLIC_VERSION_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/xen-compat.h000066400000000000000000000036361314037446600275650ustar00rootroot00000000000000/****************************************************************************** * xen-compat.h * * Guest OS interface to Xen. Compatibility layer. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Christian Limpach */ #ifndef __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_LATEST_INTERFACE_VERSION__ 0x00030209 #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Xen is built with matching headers and implements the latest interface. */ #define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__ #elif !defined(__XEN_INTERFACE_VERSION__) /* Guests which do not specify a version get the legacy interface. */ #define __XEN_INTERFACE_VERSION__ 0x00000000 #endif #if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__ #error "These header files do not support the requested interface version." #endif #endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/xen.h000066400000000000000000000623601314037446600263030ustar00rootroot00000000000000/****************************************************************************** * xen.h * * Guest OS interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_XEN_H__ #define __XEN_PUBLIC_XEN_H__ #include "xen-compat.h" #if defined(__i386__) || defined(__x86_64__) #include "arch-x86/xen.h" #elif defined(__ia64__) #include "arch-ia64.h" #else #error "Unsupported architecture" #endif #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ DEFINE_XEN_GUEST_HANDLE(char); __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); DEFINE_XEN_GUEST_HANDLE(int); __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); DEFINE_XEN_GUEST_HANDLE(long); __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); DEFINE_XEN_GUEST_HANDLE(void); DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #endif /* * HYPERCALLS */ #define __HYPERVISOR_set_trap_table 0 #define __HYPERVISOR_mmu_update 1 #define __HYPERVISOR_set_gdt 2 #define __HYPERVISOR_stack_switch 3 #define __HYPERVISOR_set_callbacks 4 #define __HYPERVISOR_fpu_taskswitch 5 #define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */ #define __HYPERVISOR_platform_op 7 #define __HYPERVISOR_set_debugreg 8 #define __HYPERVISOR_get_debugreg 9 #define __HYPERVISOR_update_descriptor 10 #define __HYPERVISOR_memory_op 12 #define __HYPERVISOR_multicall 13 #define __HYPERVISOR_update_va_mapping 14 #define __HYPERVISOR_set_timer_op 15 #define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */ #define __HYPERVISOR_xen_version 17 #define __HYPERVISOR_console_io 18 #define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */ #define __HYPERVISOR_grant_table_op 20 #define __HYPERVISOR_vm_assist 21 #define __HYPERVISOR_update_va_mapping_otherdomain 22 #define __HYPERVISOR_iret 23 /* x86 only */ #define __HYPERVISOR_vcpu_op 24 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ #define __HYPERVISOR_mmuext_op 26 #define __HYPERVISOR_xsm_op 27 #define __HYPERVISOR_nmi_op 28 #define __HYPERVISOR_sched_op 29 #define __HYPERVISOR_callback_op 30 #define __HYPERVISOR_xenoprof_op 31 #define __HYPERVISOR_event_channel_op 32 #define __HYPERVISOR_physdev_op 33 #define __HYPERVISOR_hvm_op 34 #define __HYPERVISOR_sysctl 35 #define __HYPERVISOR_domctl 36 #define __HYPERVISOR_kexec_op 37 /* Architecture-specific hypercall definitions. */ #define __HYPERVISOR_arch_0 48 #define __HYPERVISOR_arch_1 49 #define __HYPERVISOR_arch_2 50 #define __HYPERVISOR_arch_3 51 #define __HYPERVISOR_arch_4 52 #define __HYPERVISOR_arch_5 53 #define __HYPERVISOR_arch_6 54 #define __HYPERVISOR_arch_7 55 /* * HYPERCALL COMPATIBILITY. */ /* New sched_op hypercall introduced in 0x00030101. */ #if __XEN_INTERFACE_VERSION__ < 0x00030101 #undef __HYPERVISOR_sched_op #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat #endif /* New event-channel and physdev hypercalls introduced in 0x00030202. */ #if __XEN_INTERFACE_VERSION__ < 0x00030202 #undef __HYPERVISOR_event_channel_op #define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat #undef __HYPERVISOR_physdev_op #define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat #endif /* New platform_op hypercall introduced in 0x00030204. */ #if __XEN_INTERFACE_VERSION__ < 0x00030204 #define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op #endif /* * VIRTUAL INTERRUPTS * * Virtual interrupts that a guest OS may receive from Xen. * * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. * The latter can be allocated only once per guest: they must initially be * allocated to VCPU0 but can subsequently be re-bound. */ #define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ #define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ #define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ #define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ #define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ #define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ #define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ #define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ /* Architecture-specific VIRQ definitions. */ #define VIRQ_ARCH_0 16 #define VIRQ_ARCH_1 17 #define VIRQ_ARCH_2 18 #define VIRQ_ARCH_3 19 #define VIRQ_ARCH_4 20 #define VIRQ_ARCH_5 21 #define VIRQ_ARCH_6 22 #define VIRQ_ARCH_7 23 #define NR_VIRQS 24 /* * MMU-UPDATE REQUESTS * * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * ptr[1:0] specifies the appropriate MMU_* command. * * ptr[1:0] == MMU_NORMAL_PT_UPDATE: * Updates an entry in a page table. If updating an L1 table, and the new * table entry is valid/present, the mapped frame must belong to the FD, if * an FD has been specified. If attempting to map an I/O page then the * caller assumes the privilege of the FD. * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. * FD == DOMID_XEN: Map restricted areas of Xen's heap space. * ptr[:2] -- Machine address of the page-table entry to modify. * val -- Value to write. * * ptr[1:0] == MMU_MACHPHYS_UPDATE: * Updates an entry in the machine->pseudo-physical mapping table. * ptr[:2] -- Machine address within the frame whose mapping to modify. * The frame must belong to the FD, if one is specified. * val -- Value to write into the mapping entry. * * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed * with those in @val. */ #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ /* * MMU EXTENDED OPERATIONS * * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * * cmd: MMUEXT_(UN)PIN_*_TABLE * mfn: Machine frame number to be (un)pinned as a p.t. page. * The frame must belong to the FD, if one is specified. * * cmd: MMUEXT_NEW_BASEPTR * mfn: Machine frame number of new page-table base to install in MMU. * * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] * mfn: Machine frame number of new page-table base to install in MMU * when in user space. * * cmd: MMUEXT_TLB_FLUSH_LOCAL * No additional arguments. Flushes local TLB. * * cmd: MMUEXT_INVLPG_LOCAL * linear_addr: Linear address to be flushed from the local TLB. * * cmd: MMUEXT_TLB_FLUSH_MULTI * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_INVLPG_MULTI * linear_addr: Linear address to be flushed. * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_TLB_FLUSH_ALL * No additional arguments. Flushes all VCPUs' TLBs. * * cmd: MMUEXT_INVLPG_ALL * linear_addr: Linear address to be flushed from all VCPUs' TLBs. * * cmd: MMUEXT_FLUSH_CACHE * No additional arguments. Writes back and flushes cache contents. * * cmd: MMUEXT_SET_LDT * linear_addr: Linear address of LDT base (NB. must be page-aligned). * nr_ents: Number of entries in LDT. * * cmd: MMUEXT_CLEAR_PAGE * mfn: Machine frame number to be cleared. * * cmd: MMUEXT_COPY_PAGE * mfn: Machine frame number of the destination page. * src_mfn: Machine frame number of the source page. */ #define MMUEXT_PIN_L1_TABLE 0 #define MMUEXT_PIN_L2_TABLE 1 #define MMUEXT_PIN_L3_TABLE 2 #define MMUEXT_PIN_L4_TABLE 3 #define MMUEXT_UNPIN_TABLE 4 #define MMUEXT_NEW_BASEPTR 5 #define MMUEXT_TLB_FLUSH_LOCAL 6 #define MMUEXT_INVLPG_LOCAL 7 #define MMUEXT_TLB_FLUSH_MULTI 8 #define MMUEXT_INVLPG_MULTI 9 #define MMUEXT_TLB_FLUSH_ALL 10 #define MMUEXT_INVLPG_ALL 11 #define MMUEXT_FLUSH_CACHE 12 #define MMUEXT_SET_LDT 13 #define MMUEXT_NEW_USER_BASEPTR 15 #define MMUEXT_CLEAR_PAGE 16 #define MMUEXT_COPY_PAGE 17 #ifndef __ASSEMBLY__ struct mmuext_op { unsigned int cmd; union { /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR * CLEAR_PAGE, COPY_PAGE */ xen_pfn_t mfn; /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ unsigned long linear_addr; } arg1; union { /* SET_LDT */ unsigned int nr_ents; /* TLB_FLUSH_MULTI, INVLPG_MULTI */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(const_void) vcpumask; #else const void *vcpumask; #endif /* COPY_PAGE */ xen_pfn_t src_mfn; } arg2; }; typedef struct mmuext_op mmuext_op_t; DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); #endif /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ #define UVMF_NONE (0UL<<0) /* No flushing at all. */ #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ #define UVMF_FLUSHTYPE_MASK (3UL<<0) #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ /* * Commands to HYPERVISOR_console_io(). */ #define CONSOLEIO_write 0 #define CONSOLEIO_read 1 /* * Commands to HYPERVISOR_vm_assist(). */ #define VMASST_CMD_enable 0 #define VMASST_CMD_disable 1 /* x86/32 guests: simulate full 4GB segment limits. */ #define VMASST_TYPE_4gb_segments 0 /* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ #define VMASST_TYPE_4gb_segments_notify 1 /* * x86 guests: support writes to bottom-level PTEs. * NB1. Page-directory entries cannot be written. * NB2. Guest must continue to remove all writable mappings of PTEs. */ #define VMASST_TYPE_writable_pagetables 2 /* x86/PAE guests: support PDPTs above 4GB. */ #define VMASST_TYPE_pae_extended_cr3 3 #define MAX_VMASST_TYPE 3 #ifndef __ASSEMBLY__ typedef uint16_t domid_t; /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ #define DOMID_FIRST_RESERVED (0x7FF0U) /* DOMID_SELF is used in certain contexts to refer to oneself. */ #define DOMID_SELF (0x7FF0U) /* * DOMID_IO is used to restrict page-table updates to mapping I/O memory. * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO * is useful to ensure that no mappings to the OS's own heap are accidentally * installed. (e.g., in Linux this could cause havoc as reference counts * aren't adjusted on the I/O-mapping code path). * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can * be specified by any calling domain. */ #define DOMID_IO (0x7FF1U) /* * DOMID_XEN is used to allow privileged domains to map restricted parts of * Xen's heap space (e.g., the machine_to_phys table). * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if * the caller is privileged. */ #define DOMID_XEN (0x7FF2U) /* DOMID_INVALID is used to identity invalid domid */ #define DOMID_INVALID (0x7FFFU) /* * Send an array of these to HYPERVISOR_mmu_update(). * NB. The fields are natural pointer/address size for this architecture. */ struct mmu_update { uint64_t ptr; /* Machine address of PTE. */ uint64_t val; /* New contents of PTE. */ }; typedef struct mmu_update mmu_update_t; DEFINE_XEN_GUEST_HANDLE(mmu_update_t); /* * Send an array of these to HYPERVISOR_multicall(). * NB. The fields are natural register size for this architecture. */ struct multicall_entry { unsigned long op, result; unsigned long args[6]; }; typedef struct multicall_entry multicall_entry_t; DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); /* * Event channel endpoints per domain: * 1024 if a long is 32 bits; 4096 if a long is 64 bits. */ #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) struct vcpu_time_info { /* * Updates to the following values are preceded and followed by an * increment of 'version'. The guest can therefore detect updates by * looking for changes to 'version'. If the least-significant bit of * the version number is set then an update is in progress and the guest * must wait to read a consistent set of values. * The correct way to interact with the version number is similar to * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry. */ uint32_t version; uint32_t pad0; uint64_t tsc_timestamp; /* TSC at last update of time vals. */ uint64_t system_time; /* Time, in nanosecs, since boot. */ /* * Current system time: * system_time + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32) * CPU frequency (Hz): * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift */ uint32_t tsc_to_system_mul; int8_t tsc_shift; int8_t pad1[3]; }; /* 32 bytes */ typedef struct vcpu_time_info vcpu_time_info_t; struct vcpu_info { /* * 'evtchn_upcall_pending' is written non-zero by Xen to indicate * a pending notification for a particular VCPU. It is then cleared * by the guest OS /before/ checking for pending work, thus avoiding * a set-and-check race. Note that the mask is only accessed by Xen * on the CPU that is currently hosting the VCPU. This means that the * pending and mask flags can be updated by the guest without special * synchronisation (i.e., no need for the x86 LOCK prefix). * This may seem suboptimal because if the pending flag is set by * a different CPU then an IPI may be scheduled even when the mask * is set. However, note: * 1. The task of 'interrupt holdoff' is covered by the per-event- * channel mask bits. A 'noisy' event that is continually being * triggered can be masked at source at this very precise * granularity. * 2. The main purpose of the per-VCPU mask is therefore to restrict * reentrant execution: whether for concurrency control, or to * prevent unbounded stack usage. Whatever the purpose, we expect * that the mask will be asserted only for short periods at a time, * and so the likelihood of a 'spurious' IPI is suitably small. * The mask is read before making an event upcall to the guest: a * non-zero mask therefore guarantees that the VCPU will not receive * an upcall activation. The mask is cleared when the VCPU requests * to block: this avoids wakeup-waiting races. */ uint8_t evtchn_upcall_pending; uint8_t evtchn_upcall_mask; unsigned long evtchn_pending_sel; struct arch_vcpu_info arch; struct vcpu_time_info time; }; /* 64 bytes (x86) */ #ifndef __XEN__ typedef struct vcpu_info vcpu_info_t; #endif /* * Xen/kernel shared data -- pointer provided in start_info. * * This structure is defined to be both smaller than a page, and the * only data on the shared page, but may vary in actual size even within * compatible Xen versions; guests should not rely on the size * of this structure remaining constant. */ struct shared_info { struct vcpu_info vcpu_info[MAX_VIRT_CPUS]; /* * A domain can create "event channels" on which it can send and receive * asynchronous event notifications. There are three classes of event that * are delivered by this mechanism: * 1. Bi-directional inter- and intra-domain connections. Domains must * arrange out-of-band to set up a connection (usually by allocating * an unbound 'listener' port and avertising that via a storage service * such as xenstore). * 2. Physical interrupts. A domain with suitable hardware-access * privileges can bind an event-channel port to a physical interrupt * source. * 3. Virtual interrupts ('events'). A domain can bind an event-channel * port to a virtual interrupt source, such as the virtual-timer * device or the emergency console. * * Event channels are addressed by a "port index". Each channel is * associated with two bits of information: * 1. PENDING -- notifies the domain that there is a pending notification * to be processed. This bit is cleared by the guest. * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING * will cause an asynchronous upcall to be scheduled. This bit is only * updated by the guest. It is read-only within Xen. If a channel * becomes pending while the channel is masked then the 'edge' is lost * (i.e., when the channel is unmasked, the guest must manually handle * pending notifications as no upcall will be scheduled by Xen). * * To expedite scanning of pending notifications, any 0->1 pending * transition on an unmasked channel causes a corresponding bit in a * per-vcpu selector word to be set. Each bit in the selector covers a * 'C long' in the PENDING bitfield array. */ unsigned long evtchn_pending[sizeof(unsigned long) * 8]; unsigned long evtchn_mask[sizeof(unsigned long) * 8]; /* * Wallclock time: updated only by control software. Guests should base * their gettimeofday() syscall on this wallclock-base value. */ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ struct arch_shared_info arch; }; #ifndef __XEN__ typedef struct shared_info shared_info_t; #endif /* * Start-of-day memory layout: * 1. The domain is started within contiguous virtual-memory region. * 2. The contiguous region ends on an aligned 4MB boundary. * 3. This the order of bootstrap elements in the initial virtual region: * a. relocated kernel image * b. initial ram disk [mod_start, mod_len] * c. list of allocated page frames [mfn_list, nr_pages] * (unless relocated due to XEN_ELFNOTE_INIT_P2M) * d. start_info_t structure [register ESI (x86)] * e. bootstrap page tables [pt_base, CR3 (x86)] * f. bootstrap stack [register ESP (x86)] * 4. Bootstrap elements are packed together, but each is 4kB-aligned. * 5. The initial ram disk may be omitted. * 6. The list of page frames forms a contiguous 'pseudo-physical' memory * layout for the domain. In particular, the bootstrap virtual-memory * region is a 1:1 mapping to the first section of the pseudo-physical map. * 7. All bootstrap elements are mapped read-writable for the guest OS. The * only exception is the bootstrap page table, which is mapped read-only. * 8. There is guaranteed to be at least 512kB padding after the final * bootstrap element. If necessary, the bootstrap virtual region is * extended by an extra 4MB to ensure this. */ #define MAX_GUEST_CMDLINE 1024 struct start_info { /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ char magic[32]; /* "xen--". */ unsigned long nr_pages; /* Total pages allocated to this domain. */ unsigned long shared_info; /* MACHINE address of shared info struct. */ uint32_t flags; /* SIF_xxx flags. */ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ uint32_t store_evtchn; /* Event channel for store communication. */ union { struct { xen_pfn_t mfn; /* MACHINE page number of console page. */ uint32_t evtchn; /* Event channel for console page. */ } domU; struct { uint32_t info_off; /* Offset of console_info struct. */ uint32_t info_size; /* Size of console_info struct from start.*/ } dom0; } console; /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ unsigned long pt_base; /* VIRTUAL address of page directory. */ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ int8_t cmd_line[MAX_GUEST_CMDLINE]; /* The pfn range here covers both page table and p->m table frames. */ unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */ unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */ }; typedef struct start_info start_info_t; /* New console union for dom0 introduced in 0x00030203. */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 #define console_mfn console.domU.mfn #define console_evtchn console.domU.evtchn #endif /* These flags are passed in the 'flags' field of start_info_t. */ #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ #define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ typedef struct dom0_vga_console_info { uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ #define XEN_VGATYPE_TEXT_MODE_3 0x03 #define XEN_VGATYPE_VESA_LFB 0x23 union { struct { /* Font height, in pixels. */ uint16_t font_height; /* Cursor location (column, row). */ uint16_t cursor_x, cursor_y; /* Number of rows and columns (dimensions in characters). */ uint16_t rows, columns; } text_mode_3; struct { /* Width and height, in pixels. */ uint16_t width, height; /* Bytes per scan line. */ uint16_t bytes_per_line; /* Bits per pixel. */ uint16_t bits_per_pixel; /* LFB physical address, and size (in units of 64kB). */ uint32_t lfb_base; uint32_t lfb_size; /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ uint8_t red_pos, red_size; uint8_t green_pos, green_size; uint8_t blue_pos, blue_size; uint8_t rsvd_pos, rsvd_size; #if __XEN_INTERFACE_VERSION__ >= 0x00030206 /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ uint32_t gbl_caps; /* Mode attributes (offset 0x0, VESA command 0x4f01). */ uint16_t mode_attrs; #endif } vesa_lfb; } u; } dom0_vga_console_info_t; #define xen_vga_console_info dom0_vga_console_info #define xen_vga_console_info_t dom0_vga_console_info_t typedef uint8_t xen_domain_handle_t[16]; /* Turn a plain number into a C unsigned long constant. */ #define __mk_unsigned_long(x) x ## UL #define mk_unsigned_long(x) __mk_unsigned_long(x) __DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t); __DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t); __DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t); __DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t); #else /* __ASSEMBLY__ */ /* In assembly code we cannot use C numeric constant suffixes. */ #define mk_unsigned_long(x) x #endif /* !__ASSEMBLY__ */ /* Default definitions for macros used by domctl/sysctl. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #ifndef uint64_aligned_t #define uint64_aligned_t uint64_t #endif #ifndef XEN_GUEST_HANDLE_64 #define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) #endif #endif #endif /* __XEN_PUBLIC_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/xencomm.h000066400000000000000000000032131314037446600271470ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) IBM Corp. 2006 */ #ifndef _XEN_XENCOMM_H_ #define _XEN_XENCOMM_H_ /* A xencomm descriptor is a scatter/gather list containing physical * addresses corresponding to a virtually contiguous memory area. The * hypervisor translates these physical addresses to machine addresses to copy * to and from the virtually contiguous area. */ #define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */ #define XENCOMM_INVALID (~0UL) struct xencomm_desc { uint32_t magic; uint32_t nr_addrs; /* the number of entries in address[] */ uint64_t address[0]; }; #endif /* _XEN_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/xenoprof.h000066400000000000000000000100311314037446600273350ustar00rootroot00000000000000/****************************************************************************** * xenoprof.h * * Interface for enabling system wide profiling based on hardware performance * counters * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Hewlett-Packard Co. * Written by Aravind Menon & Jose Renato Santos */ #ifndef __XEN_PUBLIC_XENOPROF_H__ #define __XEN_PUBLIC_XENOPROF_H__ #include "xen.h" /* * Commands to HYPERVISOR_xenoprof_op(). */ #define XENOPROF_init 0 #define XENOPROF_reset_active_list 1 #define XENOPROF_reset_passive_list 2 #define XENOPROF_set_active 3 #define XENOPROF_set_passive 4 #define XENOPROF_reserve_counters 5 #define XENOPROF_counter 6 #define XENOPROF_setup_events 7 #define XENOPROF_enable_virq 8 #define XENOPROF_start 9 #define XENOPROF_stop 10 #define XENOPROF_disable_virq 11 #define XENOPROF_release_counters 12 #define XENOPROF_shutdown 13 #define XENOPROF_get_buffer 14 #define XENOPROF_set_backtrace 15 #define XENOPROF_last_op 15 #define MAX_OPROF_EVENTS 32 #define MAX_OPROF_DOMAINS 25 #define XENOPROF_CPU_TYPE_SIZE 64 /* Xenoprof performance events (not Xen events) */ struct event_log { uint64_t eip; uint8_t mode; uint8_t event; }; /* PC value that indicates a special code */ #define XENOPROF_ESCAPE_CODE ~0UL /* Transient events for the xenoprof->oprofile cpu buf */ #define XENOPROF_TRACE_BEGIN 1 /* Xenoprof buffer shared between Xen and domain - 1 per VCPU */ struct xenoprof_buf { uint32_t event_head; uint32_t event_tail; uint32_t event_size; uint32_t vcpu_id; uint64_t xen_samples; uint64_t kernel_samples; uint64_t user_samples; uint64_t lost_samples; struct event_log event_log[1]; }; #ifndef __XEN__ typedef struct xenoprof_buf xenoprof_buf_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t); #endif struct xenoprof_init { int32_t num_events; int32_t is_primary; char cpu_type[XENOPROF_CPU_TYPE_SIZE]; }; typedef struct xenoprof_init xenoprof_init_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t); struct xenoprof_get_buffer { int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; }; typedef struct xenoprof_get_buffer xenoprof_get_buffer_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t); struct xenoprof_counter { uint32_t ind; uint64_t count; uint32_t enabled; uint32_t event; uint32_t hypervisor; uint32_t kernel; uint32_t user; uint64_t unit_mask; }; typedef struct xenoprof_counter xenoprof_counter_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t); typedef struct xenoprof_passive { uint16_t domain_id; int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; } xenoprof_passive_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t); #endif /* __XEN_PUBLIC_XENOPROF_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/xsm/000077500000000000000000000000001314037446600261405ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/xsm/acm.h000066400000000000000000000156251314037446600270620ustar00rootroot00000000000000/* * acm.h: Xen access control module interface defintions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005, International Business Machines Corporation. */ #ifndef _XEN_PUBLIC_ACM_H #define _XEN_PUBLIC_ACM_H #include "../xen.h" /* default ssid reference value if not supplied */ #define ACM_DEFAULT_SSID 0x0 #define ACM_DEFAULT_LOCAL_SSID 0x0 /* Internal ACM ERROR types */ #define ACM_OK 0 #define ACM_UNDEF -1 #define ACM_INIT_SSID_ERROR -2 #define ACM_INIT_SOID_ERROR -3 #define ACM_ERROR -4 /* External ACCESS DECISIONS */ #define ACM_ACCESS_PERMITTED 0 #define ACM_ACCESS_DENIED -111 #define ACM_NULL_POINTER_ERROR -200 /* Error codes reported in when trying to test for a new policy These error codes are reported in an array of tuples where each error code is followed by a parameter describing the error more closely, such as a domain id. */ #define ACM_EVTCHN_SHARING_VIOLATION 0x100 #define ACM_GNTTAB_SHARING_VIOLATION 0x101 #define ACM_DOMAIN_LOOKUP 0x102 #define ACM_CHWALL_CONFLICT 0x103 #define ACM_SSIDREF_IN_USE 0x104 /* primary policy in lower 4 bits */ #define ACM_NULL_POLICY 0 #define ACM_CHINESE_WALL_POLICY 1 #define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2 #define ACM_POLICY_UNDEFINED 15 /* combinations have secondary policy component in higher 4bit */ #define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY) /* policy: */ #define ACM_POLICY_NAME(X) \ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \ "UNDEFINED" /* the following policy versions must be increased * whenever the interpretation of the related * policy's data structure changes */ #define ACM_POLICY_VERSION 4 #define ACM_CHWALL_VERSION 1 #define ACM_STE_VERSION 1 /* defines a ssid reference used by xen */ typedef uint32_t ssidref_t; /* hooks that are known to domains */ #define ACMHOOK_none 0 #define ACMHOOK_sharing 1 #define ACMHOOK_authorization 2 #define ACMHOOK_conflictset 3 /* -------security policy relevant type definitions-------- */ /* type identifier; compares to "equal" or "not equal" */ typedef uint16_t domaintype_t; /* CHINESE WALL POLICY DATA STRUCTURES * * current accumulated conflict type set: * When a domain is started and has a type that is in * a conflict set, the conflicting types are incremented in * the aggregate set. When a domain is destroyed, the * conflicting types to its type are decremented. * If a domain has multiple types, this procedure works over * all those types. * * conflict_aggregate_set[i] holds the number of * running domains that have a conflict with type i. * * running_types[i] holds the number of running domains * that include type i in their ssidref-referenced type set * * conflict_sets[i][j] is "0" if type j has no conflict * with type i and is "1" otherwise. */ /* high-16 = version, low-16 = check magic */ #define ACM_MAGIC 0x0001debc /* size of the SHA1 hash identifying the XML policy from which the binary policy was created */ #define ACM_SHA1_HASH_SIZE 20 /* each offset in bytes from start of the struct they * are part of */ /* V3 of the policy buffer aded a version structure */ struct acm_policy_version { uint32_t major; uint32_t minor; }; /* each buffer consists of all policy information for * the respective policy given in the policy code * * acm_policy_buffer, acm_chwall_policy_buffer, * and acm_ste_policy_buffer need to stay 32-bit aligned * because we create binary policies also with external * tools that assume packed representations (e.g. the java tool) */ struct acm_policy_buffer { uint32_t magic; uint32_t policy_version; /* ACM_POLICY_VERSION */ uint32_t len; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_buffer_offset; uint32_t secondary_policy_code; uint32_t secondary_buffer_offset; struct acm_policy_version xml_pol_version; /* add in V3 */ uint8_t xml_policy_hash[ACM_SHA1_HASH_SIZE]; /* added in V4 */ }; struct acm_policy_reference_buffer { uint32_t len; }; struct acm_chwall_policy_buffer { uint32_t policy_version; /* ACM_CHWALL_VERSION */ uint32_t policy_code; uint32_t chwall_max_types; uint32_t chwall_max_ssidrefs; uint32_t chwall_max_conflictsets; uint32_t chwall_ssid_offset; uint32_t chwall_conflict_sets_offset; uint32_t chwall_running_types_offset; uint32_t chwall_conflict_aggregate_offset; }; struct acm_ste_policy_buffer { uint32_t policy_version; /* ACM_STE_VERSION */ uint32_t policy_code; uint32_t ste_max_types; uint32_t ste_max_ssidrefs; uint32_t ste_ssid_offset; }; struct acm_stats_buffer { uint32_t magic; uint32_t len; uint32_t primary_policy_code; uint32_t primary_stats_offset; uint32_t secondary_policy_code; uint32_t secondary_stats_offset; }; struct acm_ste_stats_buffer { uint32_t ec_eval_count; uint32_t gt_eval_count; uint32_t ec_denied_count; uint32_t gt_denied_count; uint32_t ec_cachehit_count; uint32_t gt_cachehit_count; }; struct acm_ssid_buffer { uint32_t len; ssidref_t ssidref; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_max_types; uint32_t primary_types_offset; uint32_t secondary_policy_code; uint32_t secondary_max_types; uint32_t secondary_types_offset; }; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/xsm/acm_ops.h000066400000000000000000000104741314037446600277400ustar00rootroot00000000000000/* * acm_ops.h: Xen access control module hypervisor commands * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005,2006 International Business Machines Corporation. */ #ifndef __XEN_PUBLIC_ACM_OPS_H__ #define __XEN_PUBLIC_ACM_OPS_H__ #include "../xen.h" #include "acm.h" /* * Make sure you increment the interface version whenever you modify this file! * This makes sure that old versions of acm tools will stop working in a * well-defined way (rather than crashing the machine, for instance). */ #define ACM_INTERFACE_VERSION 0xAAAA000A /************************************************************************/ /* * Prototype for this hypercall is: * int acm_op(int cmd, void *args) * @cmd == ACMOP_??? (access control module operation). * @args == Operation-specific extra arguments (NULL if none). */ #define ACMOP_setpolicy 1 struct acm_setpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pushcache; uint32_t pushcache_size; }; #define ACMOP_getpolicy 2 struct acm_getpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_dumpstats 3 struct acm_dumpstats { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_getssid 4 #define ACM_GETBY_ssidref 1 #define ACM_GETBY_domainid 2 struct acm_getssid { /* IN */ uint32_t get_ssid_by; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id; XEN_GUEST_HANDLE_64(void) ssidbuf; uint32_t ssidbuf_size; }; #define ACMOP_getdecision 5 struct acm_getdecision { /* IN */ uint32_t get_decision_by1; /* ACM_GETBY_* */ uint32_t get_decision_by2; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id1; union { domaintype_t domainid; ssidref_t ssidref; } id2; uint32_t hook; /* OUT */ uint32_t acm_decision; }; #define ACMOP_chgpolicy 6 struct acm_change_policy { /* IN */ XEN_GUEST_HANDLE_64(void) policy_pushcache; uint32_t policy_pushcache_size; XEN_GUEST_HANDLE_64(void) del_array; uint32_t delarray_size; XEN_GUEST_HANDLE_64(void) chg_array; uint32_t chgarray_size; /* OUT */ /* array with error code */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; #define ACMOP_relabeldoms 7 struct acm_relabel_doms { /* IN */ XEN_GUEST_HANDLE_64(void) relabel_map; uint32_t relabel_map_size; /* OUT */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; /* future interface to Xen */ struct xen_acmctl { uint32_t cmd; uint32_t interface_version; union { struct acm_setpolicy setpolicy; struct acm_getpolicy getpolicy; struct acm_dumpstats dumpstats; struct acm_getssid getssid; struct acm_getdecision getdecision; struct acm_change_policy change_policy; struct acm_relabel_doms relabel_doms; } u; }; typedef struct xen_acmctl xen_acmctl_t; DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t); #endif /* __XEN_PUBLIC_ACM_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/public/xsm/flask_op.h000066400000000000000000000023531314037446600301120ustar00rootroot00000000000000/* * This file contains the flask_op hypercall commands and definitions. * * Author: George Coker, * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ #ifndef __FLASK_OP_H__ #define __FLASK_OP_H__ #define FLASK_LOAD 1 #define FLASK_GETENFORCE 2 #define FLASK_SETENFORCE 3 #define FLASK_CONTEXT_TO_SID 4 #define FLASK_SID_TO_CONTEXT 5 #define FLASK_ACCESS 6 #define FLASK_CREATE 7 #define FLASK_RELABEL 8 #define FLASK_USER 9 #define FLASK_POLICYVERS 10 #define FLASK_GETBOOL 11 #define FLASK_SETBOOL 12 #define FLASK_COMMITBOOLS 13 #define FLASK_MLS 14 #define FLASK_DISABLE 15 #define FLASK_GETAVC_THRESHOLD 16 #define FLASK_SETAVC_THRESHOLD 17 #define FLASK_AVC_HASHSTATS 18 #define FLASK_AVC_CACHESTATS 19 #define FLASK_MEMBER 20 #define FLASK_LAST FLASK_MEMBER typedef struct flask_op { uint32_t cmd; uint32_t size; char *buf; } flask_op_t; DEFINE_XEN_GUEST_HANDLE(flask_op_t); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/sched.h000066400000000000000000000104321314037446600253120ustar00rootroot00000000000000/****************************************************************************** * sched.h * * Scheduler state interactions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_SCHED_H__ #define __XEN_PUBLIC_SCHED_H__ #include "event_channel.h" /* * The prototype for this hypercall is: * long sched_op(int cmd, void *arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == Operation-specific extra argument(s), as described below. * * Versions of Xen prior to 3.0.2 provided only the following legacy version * of this hypercall, supporting only the commands yield, block and shutdown: * long sched_op(int cmd, unsigned long arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) * == SHUTDOWN_* code (SCHEDOP_shutdown) * This legacy version is available to new guests as sched_op_compat(). */ /* * Voluntarily yield the CPU. * @arg == NULL. */ #define SCHEDOP_yield 0 /* * Block execution of this VCPU until an event is received for processing. * If called with event upcalls masked, this operation will atomically * reenable event delivery and check for pending events before blocking the * VCPU. This avoids a "wakeup waiting" race. * @arg == NULL. */ #define SCHEDOP_block 1 /* * Halt execution of this domain (all VCPUs) and notify the system controller. * @arg == pointer to sched_shutdown structure. */ #define SCHEDOP_shutdown 2 struct sched_shutdown { unsigned int reason; /* SHUTDOWN_* */ }; typedef struct sched_shutdown sched_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t); /* * Poll a set of event-channel ports. Return when one or more are pending. An * optional timeout may be specified. * @arg == pointer to sched_poll structure. */ #define SCHEDOP_poll 3 struct sched_poll { XEN_GUEST_HANDLE(evtchn_port_t) ports; unsigned int nr_ports; uint64_t timeout; }; typedef struct sched_poll sched_poll_t; DEFINE_XEN_GUEST_HANDLE(sched_poll_t); /* * Declare a shutdown for another domain. The main use of this function is * in interpreting shutdown requests and reasons for fully-virtualized * domains. A para-virtualized domain may use SCHEDOP_shutdown directly. * @arg == pointer to sched_remote_shutdown structure. */ #define SCHEDOP_remote_shutdown 4 struct sched_remote_shutdown { domid_t domain_id; /* Remote domain ID */ unsigned int reason; /* SHUTDOWN_xxx reason */ }; typedef struct sched_remote_shutdown sched_remote_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t); /* * Reason codes for SCHEDOP_shutdown. These may be interpreted by control * software to determine the appropriate action. For the most part, Xen does * not care about the shutdown code. */ #define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #endif /* __XEN_PUBLIC_SCHED_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/sysctl.h000066400000000000000000000366751314037446600255660ustar00rootroot00000000000000/****************************************************************************** * sysctl.h * * System management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_SYSCTL_H__ #define __XEN_PUBLIC_SYSCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "sysctl operations are intended for use by node control tools only" #endif #include "xen.h" #include "domctl.h" #define XEN_SYSCTL_INTERFACE_VERSION 0x00000006 /* * Read console content from Xen buffer ring. */ #define XEN_SYSCTL_readconsole 1 struct xen_sysctl_readconsole { /* IN: Non-zero -> clear after reading. */ uint8_t clear; /* IN: Non-zero -> start index specified by @index field. */ uint8_t incremental; uint8_t pad0, pad1; /* * IN: Start index for consuming from ring buffer (if @incremental); * OUT: End index after consuming from ring buffer. */ uint32_t index; /* IN: Virtual address to write console data. */ XEN_GUEST_HANDLE_64(char) buffer; /* IN: Size of buffer; OUT: Bytes written to buffer. */ uint32_t count; }; typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t); /* Get trace buffers machine base address */ #define XEN_SYSCTL_tbuf_op 2 struct xen_sysctl_tbuf_op { /* IN variables */ #define XEN_SYSCTL_TBUFOP_get_info 0 #define XEN_SYSCTL_TBUFOP_set_cpu_mask 1 #define XEN_SYSCTL_TBUFOP_set_evt_mask 2 #define XEN_SYSCTL_TBUFOP_set_size 3 #define XEN_SYSCTL_TBUFOP_enable 4 #define XEN_SYSCTL_TBUFOP_disable 5 uint32_t cmd; /* IN/OUT variables */ struct xenctl_cpumap cpu_mask; uint32_t evt_mask; /* OUT variables */ uint64_aligned_t buffer_mfn; uint32_t size; }; typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t); /* * Get physical information about the host machine */ #define XEN_SYSCTL_physinfo 3 /* (x86) The platform supports HVM guests. */ #define _XEN_SYSCTL_PHYSCAP_hvm 0 #define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm) /* (x86) The platform supports HVM-guest direct access to I/O devices. */ #define _XEN_SYSCTL_PHYSCAP_hvm_directio 1 #define XEN_SYSCTL_PHYSCAP_hvm_directio (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio) struct xen_sysctl_physinfo { uint32_t threads_per_core; uint32_t cores_per_socket; uint32_t nr_cpus; uint32_t nr_nodes; uint32_t cpu_khz; uint64_aligned_t total_pages; uint64_aligned_t free_pages; uint64_aligned_t scrub_pages; uint32_t hw_cap[8]; /* * IN: maximum addressable entry in the caller-provided cpu_to_node array. * OUT: largest cpu identifier in the system. * If OUT is greater than IN then the cpu_to_node array is truncated! */ uint32_t max_cpu_id; /* * If not NULL, this array is filled with node identifier for each cpu. * If a cpu has no node information (e.g., cpu not present) then the * sentinel value ~0u is written. * The size of this array is specified by the caller in @max_cpu_id. * If the actual @max_cpu_id is smaller than the array then the trailing * elements of the array will not be written by the sysctl. */ XEN_GUEST_HANDLE_64(uint32) cpu_to_node; /* XEN_SYSCTL_PHYSCAP_??? */ uint32_t capabilities; }; typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t); /* * Get the ID of the current scheduler. */ #define XEN_SYSCTL_sched_id 4 struct xen_sysctl_sched_id { /* OUT variable */ uint32_t sched_id; }; typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t); /* Interface for controlling Xen software performance counters. */ #define XEN_SYSCTL_perfc_op 5 /* Sub-operations: */ #define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */ #define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */ struct xen_sysctl_perfc_desc { char name[80]; /* name of perf counter */ uint32_t nr_vals; /* number of values for this counter */ }; typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t); typedef uint32_t xen_sysctl_perfc_val_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t); struct xen_sysctl_perfc_op { /* IN variables. */ uint32_t cmd; /* XEN_SYSCTL_PERFCOP_??? */ /* OUT variables. */ uint32_t nr_counters; /* number of counters description */ uint32_t nr_vals; /* number of values */ /* counter information (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc; /* counter values (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val; }; typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t); #define XEN_SYSCTL_getdomaininfolist 6 struct xen_sysctl_getdomaininfolist { /* IN variables. */ domid_t first_domain; uint32_t max_domains; XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer; /* OUT variables. */ uint32_t num_domains; }; typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t); /* Inject debug keys into Xen. */ #define XEN_SYSCTL_debug_keys 7 struct xen_sysctl_debug_keys { /* IN variables. */ XEN_GUEST_HANDLE_64(char) keys; uint32_t nr_keys; }; typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t); /* Get physical CPU information. */ #define XEN_SYSCTL_getcpuinfo 8 struct xen_sysctl_cpuinfo { uint64_aligned_t idletime; }; typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t); struct xen_sysctl_getcpuinfo { /* IN variables. */ uint32_t max_cpus; XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info; /* OUT variables. */ uint32_t nr_cpus; }; typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t); #define XEN_SYSCTL_availheap 9 struct xen_sysctl_availheap { /* IN variables. */ uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */ uint32_t max_bitwidth; /* Largest address width (zero if don't care). */ int32_t node; /* NUMA node of interest (-1 for all nodes). */ /* OUT variables. */ uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */ }; typedef struct xen_sysctl_availheap xen_sysctl_availheap_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t); #define XEN_SYSCTL_get_pmstat 10 struct pm_px_val { uint64_aligned_t freq; /* Px core frequency */ uint64_aligned_t residency; /* Px residency time */ uint64_aligned_t count; /* Px transition count */ }; typedef struct pm_px_val pm_px_val_t; DEFINE_XEN_GUEST_HANDLE(pm_px_val_t); struct pm_px_stat { uint8_t total; /* total Px states */ uint8_t usable; /* usable Px states */ uint8_t last; /* last Px state */ uint8_t cur; /* current Px state */ XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */ XEN_GUEST_HANDLE_64(pm_px_val_t) pt; }; typedef struct pm_px_stat pm_px_stat_t; DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t); struct pm_cx_stat { uint32_t nr; /* entry nr in triggers & residencies, including C0 */ uint32_t last; /* last Cx state */ uint64_aligned_t idle_time; /* idle time from boot */ XEN_GUEST_HANDLE_64(uint64) triggers; /* Cx trigger counts */ XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */ }; struct xen_sysctl_get_pmstat { #define PMSTAT_CATEGORY_MASK 0xf0 #define PMSTAT_PX 0x10 #define PMSTAT_CX 0x20 #define PMSTAT_get_max_px (PMSTAT_PX | 0x1) #define PMSTAT_get_pxstat (PMSTAT_PX | 0x2) #define PMSTAT_reset_pxstat (PMSTAT_PX | 0x3) #define PMSTAT_get_max_cx (PMSTAT_CX | 0x1) #define PMSTAT_get_cxstat (PMSTAT_CX | 0x2) #define PMSTAT_reset_cxstat (PMSTAT_CX | 0x3) uint32_t type; uint32_t cpuid; union { struct pm_px_stat getpx; struct pm_cx_stat getcx; /* other struct for tx, etc */ } u; }; typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t); /* * Status codes. Must be greater than 0 to avoid confusing * sysctl callers that see 0 as a plain successful return. */ #define XEN_CPU_HOTPLUG_STATUS_OFFLINE 1 #define XEN_CPU_HOTPLUG_STATUS_ONLINE 2 #define XEN_CPU_HOTPLUG_STATUS_NEW 3 #define XEN_SYSCTL_cpu_hotplug 11 struct xen_sysctl_cpu_hotplug { /* IN variables */ uint32_t cpu; /* Physical cpu. */ #define XEN_SYSCTL_CPU_HOTPLUG_ONLINE 0 #define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1 #define XEN_SYSCTL_CPU_HOTPLUG_STATUS 2 uint32_t op; /* hotplug opcode */ }; typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t); /* * Get/set xen power management, include * 1. cpufreq governors and related parameters */ #define XEN_SYSCTL_pm_op 12 struct xen_userspace { uint32_t scaling_setspeed; }; typedef struct xen_userspace xen_userspace_t; struct xen_ondemand { uint32_t sampling_rate_max; uint32_t sampling_rate_min; uint32_t sampling_rate; uint32_t up_threshold; }; typedef struct xen_ondemand xen_ondemand_t; /* * cpufreq para name of this structure named * same as sysfs file name of native linux */ #define CPUFREQ_NAME_LEN 16 struct xen_get_cpufreq_para { /* IN/OUT variable */ uint32_t cpu_num; uint32_t freq_num; uint32_t gov_num; /* for all governors */ /* OUT variable */ XEN_GUEST_HANDLE_64(uint32) affected_cpus; XEN_GUEST_HANDLE_64(uint32) scaling_available_frequencies; XEN_GUEST_HANDLE_64(char) scaling_available_governors; char scaling_driver[CPUFREQ_NAME_LEN]; uint32_t cpuinfo_cur_freq; uint32_t cpuinfo_max_freq; uint32_t cpuinfo_min_freq; uint32_t scaling_cur_freq; char scaling_governor[CPUFREQ_NAME_LEN]; uint32_t scaling_max_freq; uint32_t scaling_min_freq; /* for specific governor */ union { struct xen_userspace userspace; struct xen_ondemand ondemand; } u; }; struct xen_set_cpufreq_gov { char scaling_governor[CPUFREQ_NAME_LEN]; }; struct xen_set_cpufreq_para { #define SCALING_MAX_FREQ 1 #define SCALING_MIN_FREQ 2 #define SCALING_SETSPEED 3 #define SAMPLING_RATE 4 #define UP_THRESHOLD 5 uint32_t ctrl_type; uint32_t ctrl_value; } ; /* Get physical CPU topology information. */ #define INVALID_TOPOLOGY_ID (~0U) struct xen_get_cputopo { /* IN: maximum addressable entry in * the caller-provided cpu_to_core/socket. */ uint32_t max_cpus; XEN_GUEST_HANDLE_64(uint32) cpu_to_core; XEN_GUEST_HANDLE_64(uint32) cpu_to_socket; /* OUT: number of cpus returned * If OUT is greater than IN then the cpu_to_core/socket is truncated! */ uint32_t nr_cpus; }; struct xen_sysctl_pm_op { #define PM_PARA_CATEGORY_MASK 0xf0 #define CPUFREQ_PARA 0x10 /* cpufreq command type */ #define GET_CPUFREQ_PARA (CPUFREQ_PARA | 0x01) #define SET_CPUFREQ_GOV (CPUFREQ_PARA | 0x02) #define SET_CPUFREQ_PARA (CPUFREQ_PARA | 0x03) /* get CPU topology */ #define XEN_SYSCTL_pm_op_get_cputopo 0x20 uint32_t cmd; uint32_t cpuid; union { struct xen_get_cpufreq_para get_para; struct xen_set_cpufreq_gov set_gov; struct xen_set_cpufreq_para set_para; struct xen_get_cputopo get_topo; }; }; #define XEN_SYSCTL_page_offline_op 14 struct xen_sysctl_page_offline_op { /* IN: range of page to be offlined */ #define sysctl_page_offline 1 #define sysctl_page_online 2 #define sysctl_query_page_offline 3 uint32_t cmd; uint32_t start; uint32_t end; /* OUT: result of page offline request */ /* * bit 0~15: result flags * bit 16~31: owner */ XEN_GUEST_HANDLE(uint32) status; }; #define PG_OFFLINE_STATUS_MASK (0xFFUL) /* The result is invalid, i.e. HV does not handle it */ #define PG_OFFLINE_INVALID (0x1UL << 0) #define PG_OFFLINE_OFFLINED (0x1UL << 1) #define PG_OFFLINE_PENDING (0x1UL << 2) #define PG_OFFLINE_FAILED (0x1UL << 3) #define PG_ONLINE_FAILED PG_OFFLINE_FAILED #define PG_ONLINE_ONLINED PG_OFFLINE_OFFLINED #define PG_OFFLINE_STATUS_OFFLINED (0x1UL << 1) #define PG_OFFLINE_STATUS_ONLINE (0x1UL << 2) #define PG_OFFLINE_STATUS_OFFLINE_PENDING (0x1UL << 3) #define PG_OFFLINE_STATUS_BROKEN (0x1UL << 4) #define PG_OFFLINE_MISC_MASK (0xFFUL << 4) /* only valid when PG_OFFLINE_FAILED */ #define PG_OFFLINE_XENPAGE (0x1UL << 8) #define PG_OFFLINE_DOM0PAGE (0x1UL << 9) #define PG_OFFLINE_ANONYMOUS (0x1UL << 10) #define PG_OFFLINE_NOT_CONV_RAM (0x1UL << 11) #define PG_OFFLINE_OWNED (0x1UL << 12) #define PG_OFFLINE_BROKEN (0x1UL << 13) #define PG_ONLINE_BROKEN PG_OFFLINE_BROKEN #define PG_OFFLINE_OWNER_SHIFT 16 struct xen_sysctl { uint32_t cmd; uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */ union { struct xen_sysctl_readconsole readconsole; struct xen_sysctl_tbuf_op tbuf_op; struct xen_sysctl_physinfo physinfo; struct xen_sysctl_sched_id sched_id; struct xen_sysctl_perfc_op perfc_op; struct xen_sysctl_getdomaininfolist getdomaininfolist; struct xen_sysctl_debug_keys debug_keys; struct xen_sysctl_getcpuinfo getcpuinfo; struct xen_sysctl_availheap availheap; struct xen_sysctl_get_pmstat get_pmstat; struct xen_sysctl_cpu_hotplug cpu_hotplug; struct xen_sysctl_pm_op pm_op; struct xen_sysctl_page_offline_op page_offline; uint8_t pad[128]; } u; }; typedef struct xen_sysctl xen_sysctl_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t); #endif /* __XEN_PUBLIC_SYSCTL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/trace.h000066400000000000000000000226011314037446600253230ustar00rootroot00000000000000/****************************************************************************** * include/public/trace.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Mark Williamson, (C) 2004 Intel Research Cambridge * Copyright (C) 2005 Bin Ren */ #ifndef __XEN_PUBLIC_TRACE_H__ #define __XEN_PUBLIC_TRACE_H__ #define TRACE_EXTRA_MAX 7 #define TRACE_EXTRA_SHIFT 28 /* Trace classes */ #define TRC_CLS_SHIFT 16 #define TRC_GEN 0x0001f000 /* General trace */ #define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */ #define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */ #define TRC_HVM 0x0008f000 /* Xen HVM trace */ #define TRC_MEM 0x0010f000 /* Xen memory trace */ #define TRC_PV 0x0020f000 /* Xen PV traces */ #define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */ #define TRC_PM 0x0080f000 /* Xen power management trace */ #define TRC_ALL 0x0ffff000 #define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff) #define TRC_HD_CYCLE_FLAG (1UL<<31) #define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) ) #define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX) /* Trace subclasses */ #define TRC_SUBCLS_SHIFT 12 /* trace subclasses for SVM */ #define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */ #define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */ #define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */ #define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */ /* Trace events per class */ #define TRC_LOST_RECORDS (TRC_GEN + 1) #define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2) #define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3) #define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1) #define TRC_SCHED_CONTINUE_RUNNING (TRC_SCHED_MIN + 2) #define TRC_SCHED_DOM_ADD (TRC_SCHED_VERBOSE + 1) #define TRC_SCHED_DOM_REM (TRC_SCHED_VERBOSE + 2) #define TRC_SCHED_SLEEP (TRC_SCHED_VERBOSE + 3) #define TRC_SCHED_WAKE (TRC_SCHED_VERBOSE + 4) #define TRC_SCHED_YIELD (TRC_SCHED_VERBOSE + 5) #define TRC_SCHED_BLOCK (TRC_SCHED_VERBOSE + 6) #define TRC_SCHED_SHUTDOWN (TRC_SCHED_VERBOSE + 7) #define TRC_SCHED_CTL (TRC_SCHED_VERBOSE + 8) #define TRC_SCHED_ADJDOM (TRC_SCHED_VERBOSE + 9) #define TRC_SCHED_SWITCH (TRC_SCHED_VERBOSE + 10) #define TRC_SCHED_S_TIMER_FN (TRC_SCHED_VERBOSE + 11) #define TRC_SCHED_T_TIMER_FN (TRC_SCHED_VERBOSE + 12) #define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED_VERBOSE + 13) #define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14) #define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15) #define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1) #define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2) #define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3) #define TRC_PV_HYPERCALL (TRC_PV + 1) #define TRC_PV_TRAP (TRC_PV + 3) #define TRC_PV_PAGE_FAULT (TRC_PV + 4) #define TRC_PV_FORCED_INVALID_OP (TRC_PV + 5) #define TRC_PV_EMULATE_PRIVOP (TRC_PV + 6) #define TRC_PV_EMULATE_4GB (TRC_PV + 7) #define TRC_PV_MATH_STATE_RESTORE (TRC_PV + 8) #define TRC_PV_PAGING_FIXUP (TRC_PV + 9) #define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV + 10) #define TRC_PV_PTWR_EMULATION (TRC_PV + 11) #define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12) /* Indicates that addresses in trace record are 64 bits */ #define TRC_64_FLAG (0x100) #define TRC_SHADOW_NOT_SHADOW (TRC_SHADOW + 1) #define TRC_SHADOW_FAST_PROPAGATE (TRC_SHADOW + 2) #define TRC_SHADOW_FAST_MMIO (TRC_SHADOW + 3) #define TRC_SHADOW_FALSE_FAST_PATH (TRC_SHADOW + 4) #define TRC_SHADOW_MMIO (TRC_SHADOW + 5) #define TRC_SHADOW_FIXUP (TRC_SHADOW + 6) #define TRC_SHADOW_DOMF_DYING (TRC_SHADOW + 7) #define TRC_SHADOW_EMULATE (TRC_SHADOW + 8) #define TRC_SHADOW_EMULATE_UNSHADOW_USER (TRC_SHADOW + 9) #define TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ (TRC_SHADOW + 10) #define TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED (TRC_SHADOW + 11) #define TRC_SHADOW_WRMAP_BF (TRC_SHADOW + 12) #define TRC_SHADOW_PREALLOC_UNPIN (TRC_SHADOW + 13) #define TRC_SHADOW_RESYNC_FULL (TRC_SHADOW + 14) #define TRC_SHADOW_RESYNC_ONLY (TRC_SHADOW + 15) /* trace events per subclass */ #define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01) #define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02) #define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02) #define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01) #define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01) #define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02) #define TRC_HVM_PF_INJECT64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02) #define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03) #define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04) #define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05) #define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06) #define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07) #define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08) #define TRC_HVM_CR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08) #define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09) #define TRC_HVM_CR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09) #define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A) #define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B) #define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C) #define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D) #define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E) #define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F) #define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10) #define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11) #define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12) #define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13) #define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14) #define TRC_HVM_INVLPG64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14) #define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15) #define TRC_HVM_IO_ASSIST (TRC_HVM_HANDLER + 0x16) #define TRC_HVM_IO_ASSIST64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x16) #define TRC_HVM_MMIO_ASSIST (TRC_HVM_HANDLER + 0x17) #define TRC_HVM_MMIO_ASSIST64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x17) #define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18) #define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19) #define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19) #define TRC_HVM_INTR_WINDOW (TRC_HVM_HANDLER + 0X20) /* trace subclasses for power management */ #define TRC_PM_FREQ 0x00801000 /* xen cpu freq events */ #define TRC_PM_IDLE 0x00802000 /* xen cpu idle events */ /* trace events for per class */ #define TRC_PM_FREQ_CHANGE (TRC_PM_FREQ + 0x01) #define TRC_PM_IDLE_ENTRY (TRC_PM_IDLE + 0x01) #define TRC_PM_IDLE_EXIT (TRC_PM_IDLE + 0x02) /* This structure represents a single trace buffer record. */ struct t_rec { uint32_t event:28; uint32_t extra_u32:3; /* # entries in trailing extra_u32[] array */ uint32_t cycles_included:1; /* u.cycles or u.no_cycles? */ union { struct { uint32_t cycles_lo, cycles_hi; /* cycle counter timestamp */ uint32_t extra_u32[7]; /* event data items */ } cycles; struct { uint32_t extra_u32[7]; /* event data items */ } nocycles; } u; }; /* * This structure contains the metadata for a single trace buffer. The head * field, indexes into an array of struct t_rec's. */ struct t_buf { /* Assume the data buffer size is X. X is generally not a power of 2. * CONS and PROD are incremented modulo (2*X): * 0 <= cons < 2*X * 0 <= prod < 2*X * This is done because addition modulo X breaks at 2^32 when X is not a * power of 2: * (((2^32 - 1) % X) + 1) % X != (2^32) % X */ uint32_t cons; /* Offset of next item to be consumed by control tools. */ uint32_t prod; /* Offset of next item to be produced by Xen. */ /* Records follow immediately after the meta-data header. */ }; #endif /* __XEN_PUBLIC_TRACE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/vcpu.h000066400000000000000000000202501314037446600252000ustar00rootroot00000000000000/****************************************************************************** * vcpu.h * * VCPU initialisation, query, and hotplug. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VCPU_H__ #define __XEN_PUBLIC_VCPU_H__ /* * Prototype for this hypercall is: * int vcpu_op(int cmd, int vcpuid, void *extra_args) * @cmd == VCPUOP_??? (VCPU operation). * @vcpuid == VCPU to operate on. * @extra_args == Operation-specific extra arguments (NULL if none). */ /* * Initialise a VCPU. Each VCPU can be initialised only once. A * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. * * @extra_arg == pointer to vcpu_guest_context structure containing initial * state for the VCPU. */ #define VCPUOP_initialise 0 /* * Bring up a VCPU. This makes the VCPU runnable. This operation will fail * if the VCPU has not been initialised (VCPUOP_initialise). */ #define VCPUOP_up 1 /* * Bring down a VCPU (i.e., make it non-runnable). * There are a few caveats that callers should observe: * 1. This operation may return, and VCPU_is_up may return false, before the * VCPU stops running (i.e., the command is asynchronous). It is a good * idea to ensure that the VCPU has entered a non-critical loop before * bringing it down. Alternatively, this operation is guaranteed * synchronous if invoked by the VCPU itself. * 2. After a VCPU is initialised, there is currently no way to drop all its * references to domain memory. Even a VCPU that is down still holds * memory references via its pagetable base pointer and GDT. It is good * practise to move a VCPU onto an 'idle' or default page table, LDT and * GDT before bringing it down. */ #define VCPUOP_down 2 /* Returns 1 if the given VCPU is up. */ #define VCPUOP_is_up 3 /* * Return information about the state and running time of a VCPU. * @extra_arg == pointer to vcpu_runstate_info structure. */ #define VCPUOP_get_runstate_info 4 struct vcpu_runstate_info { /* VCPU's current state (RUNSTATE_*). */ int state; /* When was current state entered (system time, ns)? */ uint64_t state_entry_time; /* * Time spent in each RUNSTATE_* (ns). The sum of these times is * guaranteed not to drift from system time. */ uint64_t time[4]; }; typedef struct vcpu_runstate_info vcpu_runstate_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t); /* VCPU is currently running on a physical CPU. */ #define RUNSTATE_running 0 /* VCPU is runnable, but not currently scheduled on any physical CPU. */ #define RUNSTATE_runnable 1 /* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ #define RUNSTATE_blocked 2 /* * VCPU is not runnable, but it is not blocked. * This is a 'catch all' state for things like hotplug and pauses by the * system administrator (or for critical sections in the hypervisor). * RUNSTATE_blocked dominates this state (it is the preferred state). */ #define RUNSTATE_offline 3 /* * Register a shared memory area from which the guest may obtain its own * runstate information without needing to execute a hypercall. * Notes: * 1. The registered address may be virtual or physical or guest handle, * depending on the platform. Virtual address or guest handle should be * registered on x86 systems. * 2. Only one shared area may be registered per VCPU. The shared area is * updated by the hypervisor each time the VCPU is scheduled. Thus * runstate.state will always be RUNSTATE_running and * runstate.state_entry_time will indicate the system time at which the * VCPU was last scheduled to run. * @extra_arg == pointer to vcpu_register_runstate_memory_area structure. */ #define VCPUOP_register_runstate_memory_area 5 struct vcpu_register_runstate_memory_area { union { XEN_GUEST_HANDLE(vcpu_runstate_info_t) h; struct vcpu_runstate_info *v; uint64_t p; } addr; }; typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t); /* * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer * which can be set via these commands. Periods smaller than one millisecond * may not be supported. */ #define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ #define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ struct vcpu_set_periodic_timer { uint64_t period_ns; }; typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t); /* * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot * timer which can be set via these commands. */ #define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */ struct vcpu_set_singleshot_timer { uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */ uint32_t flags; /* VCPU_SSHOTTMR_??? */ }; typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t); /* Flags to VCPUOP_set_singleshot_timer. */ /* Require the timeout to be in the future (return -ETIME if it's passed). */ #define _VCPU_SSHOTTMR_future (0) #define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future) /* * Register a memory location in the guest address space for the * vcpu_info structure. This allows the guest to place the vcpu_info * structure in a convenient place, such as in a per-cpu data area. * The pointer need not be page aligned, but the structure must not * cross a page boundary. * * This may be called only once per vcpu. */ #define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */ struct vcpu_register_vcpu_info { uint64_t mfn; /* mfn of page to place vcpu_info */ uint32_t offset; /* offset within page */ uint32_t rsvd; /* unused */ }; typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t); /* Send an NMI to the specified VCPU. @extra_arg == NULL. */ #define VCPUOP_send_nmi 11 /* * Get the physical ID information for a pinned vcpu's underlying physical * processor. The physical ID informmation is architecture-specific. * On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and * greater are reserved. * This command returns -EINVAL if it is not a valid operation for this VCPU. */ #define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */ struct vcpu_get_physid { uint64_t phys_id; }; typedef struct vcpu_get_physid vcpu_get_physid_t; DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t); #define xen_vcpu_physid_to_x86_apicid(physid) \ ((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid))) #define xen_vcpu_physid_to_x86_acpiid(physid) \ ((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32))) #endif /* __XEN_PUBLIC_VCPU_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/version.h000066400000000000000000000057531314037446600257230ustar00rootroot00000000000000/****************************************************************************** * version.h * * Xen version, type, and compile information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Nguyen Anh Quynh * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VERSION_H__ #define __XEN_PUBLIC_VERSION_H__ /* NB. All ops return zero on success, except XENVER_{version,pagesize} */ /* arg == NULL; returns major:minor (16:16). */ #define XENVER_version 0 /* arg == xen_extraversion_t. */ #define XENVER_extraversion 1 typedef char xen_extraversion_t[16]; #define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t)) /* arg == xen_compile_info_t. */ #define XENVER_compile_info 2 struct xen_compile_info { char compiler[64]; char compile_by[16]; char compile_domain[32]; char compile_date[32]; }; typedef struct xen_compile_info xen_compile_info_t; #define XENVER_capabilities 3 typedef char xen_capabilities_info_t[1024]; #define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t)) #define XENVER_changeset 4 typedef char xen_changeset_info_t[64]; #define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t)) #define XENVER_platform_parameters 5 struct xen_platform_parameters { unsigned long virt_start; }; typedef struct xen_platform_parameters xen_platform_parameters_t; #define XENVER_get_features 6 struct xen_feature_info { unsigned int submap_idx; /* IN: which 32-bit submap to return */ uint32_t submap; /* OUT: 32-bit submap */ }; typedef struct xen_feature_info xen_feature_info_t; /* Declares the features reported by XENVER_get_features. */ #include "features.h" /* arg == NULL; returns host memory page size. */ #define XENVER_pagesize 7 /* arg == xen_domain_handle_t. */ #define XENVER_guest_handle 8 #endif /* __XEN_PUBLIC_VERSION_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/xen-compat.h000066400000000000000000000036361314037446600263070ustar00rootroot00000000000000/****************************************************************************** * xen-compat.h * * Guest OS interface to Xen. Compatibility layer. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Christian Limpach */ #ifndef __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_LATEST_INTERFACE_VERSION__ 0x00030209 #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Xen is built with matching headers and implements the latest interface. */ #define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__ #elif !defined(__XEN_INTERFACE_VERSION__) /* Guests which do not specify a version get the legacy interface. */ #define __XEN_INTERFACE_VERSION__ 0x00000000 #endif #if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__ #error "These header files do not support the requested interface version." #endif #endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/xen.h000066400000000000000000000623441314037446600250270ustar00rootroot00000000000000/****************************************************************************** * xen.h * * Guest OS interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_XEN_H__ #define __XEN_PUBLIC_XEN_H__ #include "xen-compat.h" #if defined(__i386__) || defined(__x86_64__) #include "arch-x86/xen.h" #elif defined(__ia64__) #include "arch-ia64.h" #else #error "Unsupported architecture" #endif #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ DEFINE_XEN_GUEST_HANDLE(char); __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); DEFINE_XEN_GUEST_HANDLE(int); __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); DEFINE_XEN_GUEST_HANDLE(long); __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); DEFINE_XEN_GUEST_HANDLE(void); DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #endif /* * HYPERCALLS */ #define __HYPERVISOR_set_trap_table 0 #define __HYPERVISOR_mmu_update 1 #define __HYPERVISOR_set_gdt 2 #define __HYPERVISOR_stack_switch 3 #define __HYPERVISOR_set_callbacks 4 #define __HYPERVISOR_fpu_taskswitch 5 #define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */ #define __HYPERVISOR_platform_op 7 #define __HYPERVISOR_set_debugreg 8 #define __HYPERVISOR_get_debugreg 9 #define __HYPERVISOR_update_descriptor 10 #define __HYPERVISOR_memory_op 12 #define __HYPERVISOR_multicall 13 #define __HYPERVISOR_update_va_mapping 14 #define __HYPERVISOR_set_timer_op 15 #define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */ #define __HYPERVISOR_xen_version 17 #define __HYPERVISOR_console_io 18 #define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */ #define __HYPERVISOR_grant_table_op 20 #define __HYPERVISOR_vm_assist 21 #define __HYPERVISOR_update_va_mapping_otherdomain 22 #define __HYPERVISOR_iret 23 /* x86 only */ #define __HYPERVISOR_vcpu_op 24 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ #define __HYPERVISOR_mmuext_op 26 #define __HYPERVISOR_xsm_op 27 #define __HYPERVISOR_nmi_op 28 #define __HYPERVISOR_sched_op 29 #define __HYPERVISOR_callback_op 30 #define __HYPERVISOR_xenoprof_op 31 #define __HYPERVISOR_event_channel_op 32 #define __HYPERVISOR_physdev_op 33 #define __HYPERVISOR_hvm_op 34 #define __HYPERVISOR_sysctl 35 #define __HYPERVISOR_domctl 36 #define __HYPERVISOR_kexec_op 37 /* Architecture-specific hypercall definitions. */ #define __HYPERVISOR_arch_0 48 #define __HYPERVISOR_arch_1 49 #define __HYPERVISOR_arch_2 50 #define __HYPERVISOR_arch_3 51 #define __HYPERVISOR_arch_4 52 #define __HYPERVISOR_arch_5 53 #define __HYPERVISOR_arch_6 54 #define __HYPERVISOR_arch_7 55 /* * HYPERCALL COMPATIBILITY. */ /* New sched_op hypercall introduced in 0x00030101. */ #if __XEN_INTERFACE_VERSION__ < 0x00030101 #undef __HYPERVISOR_sched_op #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat #endif /* New event-channel and physdev hypercalls introduced in 0x00030202. */ #if __XEN_INTERFACE_VERSION__ < 0x00030202 #undef __HYPERVISOR_event_channel_op #define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat #undef __HYPERVISOR_physdev_op #define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat #endif /* New platform_op hypercall introduced in 0x00030204. */ #if __XEN_INTERFACE_VERSION__ < 0x00030204 #define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op #endif /* * VIRTUAL INTERRUPTS * * Virtual interrupts that a guest OS may receive from Xen. * * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. * The latter can be allocated only once per guest: they must initially be * allocated to VCPU0 but can subsequently be re-bound. */ #define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ #define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ #define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ #define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ #define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ #define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ #define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ #define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ /* Architecture-specific VIRQ definitions. */ #define VIRQ_ARCH_0 16 #define VIRQ_ARCH_1 17 #define VIRQ_ARCH_2 18 #define VIRQ_ARCH_3 19 #define VIRQ_ARCH_4 20 #define VIRQ_ARCH_5 21 #define VIRQ_ARCH_6 22 #define VIRQ_ARCH_7 23 #define NR_VIRQS 24 /* * MMU-UPDATE REQUESTS * * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * ptr[1:0] specifies the appropriate MMU_* command. * * ptr[1:0] == MMU_NORMAL_PT_UPDATE: * Updates an entry in a page table. If updating an L1 table, and the new * table entry is valid/present, the mapped frame must belong to the FD, if * an FD has been specified. If attempting to map an I/O page then the * caller assumes the privilege of the FD. * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. * FD == DOMID_XEN: Map restricted areas of Xen's heap space. * ptr[:2] -- Machine address of the page-table entry to modify. * val -- Value to write. * * ptr[1:0] == MMU_MACHPHYS_UPDATE: * Updates an entry in the machine->pseudo-physical mapping table. * ptr[:2] -- Machine address within the frame whose mapping to modify. * The frame must belong to the FD, if one is specified. * val -- Value to write into the mapping entry. * * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed * with those in @val. */ #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ /* * MMU EXTENDED OPERATIONS * * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * * cmd: MMUEXT_(UN)PIN_*_TABLE * mfn: Machine frame number to be (un)pinned as a p.t. page. * The frame must belong to the FD, if one is specified. * * cmd: MMUEXT_NEW_BASEPTR * mfn: Machine frame number of new page-table base to install in MMU. * * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] * mfn: Machine frame number of new page-table base to install in MMU * when in user space. * * cmd: MMUEXT_TLB_FLUSH_LOCAL * No additional arguments. Flushes local TLB. * * cmd: MMUEXT_INVLPG_LOCAL * linear_addr: Linear address to be flushed from the local TLB. * * cmd: MMUEXT_TLB_FLUSH_MULTI * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_INVLPG_MULTI * linear_addr: Linear address to be flushed. * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_TLB_FLUSH_ALL * No additional arguments. Flushes all VCPUs' TLBs. * * cmd: MMUEXT_INVLPG_ALL * linear_addr: Linear address to be flushed from all VCPUs' TLBs. * * cmd: MMUEXT_FLUSH_CACHE * No additional arguments. Writes back and flushes cache contents. * * cmd: MMUEXT_SET_LDT * linear_addr: Linear address of LDT base (NB. must be page-aligned). * nr_ents: Number of entries in LDT. * * cmd: MMUEXT_CLEAR_PAGE * mfn: Machine frame number to be cleared. * * cmd: MMUEXT_COPY_PAGE * mfn: Machine frame number of the destination page. * src_mfn: Machine frame number of the source page. */ #define MMUEXT_PIN_L1_TABLE 0 #define MMUEXT_PIN_L2_TABLE 1 #define MMUEXT_PIN_L3_TABLE 2 #define MMUEXT_PIN_L4_TABLE 3 #define MMUEXT_UNPIN_TABLE 4 #define MMUEXT_NEW_BASEPTR 5 #define MMUEXT_TLB_FLUSH_LOCAL 6 #define MMUEXT_INVLPG_LOCAL 7 #define MMUEXT_TLB_FLUSH_MULTI 8 #define MMUEXT_INVLPG_MULTI 9 #define MMUEXT_TLB_FLUSH_ALL 10 #define MMUEXT_INVLPG_ALL 11 #define MMUEXT_FLUSH_CACHE 12 #define MMUEXT_SET_LDT 13 #define MMUEXT_NEW_USER_BASEPTR 15 #define MMUEXT_CLEAR_PAGE 16 #define MMUEXT_COPY_PAGE 17 #ifndef __ASSEMBLY__ struct mmuext_op { unsigned int cmd; union { /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR * CLEAR_PAGE, COPY_PAGE */ xen_pfn_t mfn; /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ unsigned long linear_addr; } arg1; union { /* SET_LDT */ unsigned int nr_ents; /* TLB_FLUSH_MULTI, INVLPG_MULTI */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(void) vcpumask; #else void *vcpumask; #endif /* COPY_PAGE */ xen_pfn_t src_mfn; } arg2; }; typedef struct mmuext_op mmuext_op_t; DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); #endif /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ #define UVMF_NONE (0UL<<0) /* No flushing at all. */ #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ #define UVMF_FLUSHTYPE_MASK (3UL<<0) #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ /* * Commands to HYPERVISOR_console_io(). */ #define CONSOLEIO_write 0 #define CONSOLEIO_read 1 /* * Commands to HYPERVISOR_vm_assist(). */ #define VMASST_CMD_enable 0 #define VMASST_CMD_disable 1 /* x86/32 guests: simulate full 4GB segment limits. */ #define VMASST_TYPE_4gb_segments 0 /* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ #define VMASST_TYPE_4gb_segments_notify 1 /* * x86 guests: support writes to bottom-level PTEs. * NB1. Page-directory entries cannot be written. * NB2. Guest must continue to remove all writable mappings of PTEs. */ #define VMASST_TYPE_writable_pagetables 2 /* x86/PAE guests: support PDPTs above 4GB. */ #define VMASST_TYPE_pae_extended_cr3 3 #define MAX_VMASST_TYPE 3 #ifndef __ASSEMBLY__ typedef uint16_t domid_t; /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ #define DOMID_FIRST_RESERVED (0x7FF0U) /* DOMID_SELF is used in certain contexts to refer to oneself. */ #define DOMID_SELF (0x7FF0U) /* * DOMID_IO is used to restrict page-table updates to mapping I/O memory. * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO * is useful to ensure that no mappings to the OS's own heap are accidentally * installed. (e.g., in Linux this could cause havoc as reference counts * aren't adjusted on the I/O-mapping code path). * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can * be specified by any calling domain. */ #define DOMID_IO (0x7FF1U) /* * DOMID_XEN is used to allow privileged domains to map restricted parts of * Xen's heap space (e.g., the machine_to_phys table). * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if * the caller is privileged. */ #define DOMID_XEN (0x7FF2U) /* DOMID_INVALID is used to identity invalid domid */ #define DOMID_INVALID (0x7FFFU) /* * Send an array of these to HYPERVISOR_mmu_update(). * NB. The fields are natural pointer/address size for this architecture. */ struct mmu_update { uint64_t ptr; /* Machine address of PTE. */ uint64_t val; /* New contents of PTE. */ }; typedef struct mmu_update mmu_update_t; DEFINE_XEN_GUEST_HANDLE(mmu_update_t); /* * Send an array of these to HYPERVISOR_multicall(). * NB. The fields are natural register size for this architecture. */ struct multicall_entry { unsigned long op, result; unsigned long args[6]; }; typedef struct multicall_entry multicall_entry_t; DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); /* * Event channel endpoints per domain: * 1024 if a long is 32 bits; 4096 if a long is 64 bits. */ #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) struct vcpu_time_info { /* * Updates to the following values are preceded and followed by an * increment of 'version'. The guest can therefore detect updates by * looking for changes to 'version'. If the least-significant bit of * the version number is set then an update is in progress and the guest * must wait to read a consistent set of values. * The correct way to interact with the version number is similar to * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry. */ uint32_t version; uint32_t pad0; uint64_t tsc_timestamp; /* TSC at last update of time vals. */ uint64_t system_time; /* Time, in nanosecs, since boot. */ /* * Current system time: * system_time + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32) * CPU frequency (Hz): * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift */ uint32_t tsc_to_system_mul; int8_t tsc_shift; int8_t pad1[3]; }; /* 32 bytes */ typedef struct vcpu_time_info vcpu_time_info_t; struct vcpu_info { /* * 'evtchn_upcall_pending' is written non-zero by Xen to indicate * a pending notification for a particular VCPU. It is then cleared * by the guest OS /before/ checking for pending work, thus avoiding * a set-and-check race. Note that the mask is only accessed by Xen * on the CPU that is currently hosting the VCPU. This means that the * pending and mask flags can be updated by the guest without special * synchronisation (i.e., no need for the x86 LOCK prefix). * This may seem suboptimal because if the pending flag is set by * a different CPU then an IPI may be scheduled even when the mask * is set. However, note: * 1. The task of 'interrupt holdoff' is covered by the per-event- * channel mask bits. A 'noisy' event that is continually being * triggered can be masked at source at this very precise * granularity. * 2. The main purpose of the per-VCPU mask is therefore to restrict * reentrant execution: whether for concurrency control, or to * prevent unbounded stack usage. Whatever the purpose, we expect * that the mask will be asserted only for short periods at a time, * and so the likelihood of a 'spurious' IPI is suitably small. * The mask is read before making an event upcall to the guest: a * non-zero mask therefore guarantees that the VCPU will not receive * an upcall activation. The mask is cleared when the VCPU requests * to block: this avoids wakeup-waiting races. */ uint8_t evtchn_upcall_pending; uint8_t evtchn_upcall_mask; unsigned long evtchn_pending_sel; struct arch_vcpu_info arch; struct vcpu_time_info time; }; /* 64 bytes (x86) */ #ifndef __XEN__ typedef struct vcpu_info vcpu_info_t; #endif /* * Xen/kernel shared data -- pointer provided in start_info. * * This structure is defined to be both smaller than a page, and the * only data on the shared page, but may vary in actual size even within * compatible Xen versions; guests should not rely on the size * of this structure remaining constant. */ struct shared_info { struct vcpu_info vcpu_info[MAX_VIRT_CPUS]; /* * A domain can create "event channels" on which it can send and receive * asynchronous event notifications. There are three classes of event that * are delivered by this mechanism: * 1. Bi-directional inter- and intra-domain connections. Domains must * arrange out-of-band to set up a connection (usually by allocating * an unbound 'listener' port and avertising that via a storage service * such as xenstore). * 2. Physical interrupts. A domain with suitable hardware-access * privileges can bind an event-channel port to a physical interrupt * source. * 3. Virtual interrupts ('events'). A domain can bind an event-channel * port to a virtual interrupt source, such as the virtual-timer * device or the emergency console. * * Event channels are addressed by a "port index". Each channel is * associated with two bits of information: * 1. PENDING -- notifies the domain that there is a pending notification * to be processed. This bit is cleared by the guest. * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING * will cause an asynchronous upcall to be scheduled. This bit is only * updated by the guest. It is read-only within Xen. If a channel * becomes pending while the channel is masked then the 'edge' is lost * (i.e., when the channel is unmasked, the guest must manually handle * pending notifications as no upcall will be scheduled by Xen). * * To expedite scanning of pending notifications, any 0->1 pending * transition on an unmasked channel causes a corresponding bit in a * per-vcpu selector word to be set. Each bit in the selector covers a * 'C long' in the PENDING bitfield array. */ unsigned long evtchn_pending[sizeof(unsigned long) * 8]; unsigned long evtchn_mask[sizeof(unsigned long) * 8]; /* * Wallclock time: updated only by control software. Guests should base * their gettimeofday() syscall on this wallclock-base value. */ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ struct arch_shared_info arch; }; #ifndef __XEN__ typedef struct shared_info shared_info_t; #endif /* * Start-of-day memory layout: * 1. The domain is started within contiguous virtual-memory region. * 2. The contiguous region ends on an aligned 4MB boundary. * 3. This the order of bootstrap elements in the initial virtual region: * a. relocated kernel image * b. initial ram disk [mod_start, mod_len] * c. list of allocated page frames [mfn_list, nr_pages] * (unless relocated due to XEN_ELFNOTE_INIT_P2M) * d. start_info_t structure [register ESI (x86)] * e. bootstrap page tables [pt_base, CR3 (x86)] * f. bootstrap stack [register ESP (x86)] * 4. Bootstrap elements are packed together, but each is 4kB-aligned. * 5. The initial ram disk may be omitted. * 6. The list of page frames forms a contiguous 'pseudo-physical' memory * layout for the domain. In particular, the bootstrap virtual-memory * region is a 1:1 mapping to the first section of the pseudo-physical map. * 7. All bootstrap elements are mapped read-writable for the guest OS. The * only exception is the bootstrap page table, which is mapped read-only. * 8. There is guaranteed to be at least 512kB padding after the final * bootstrap element. If necessary, the bootstrap virtual region is * extended by an extra 4MB to ensure this. */ #define MAX_GUEST_CMDLINE 1024 struct start_info { /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ char magic[32]; /* "xen--". */ unsigned long nr_pages; /* Total pages allocated to this domain. */ unsigned long shared_info; /* MACHINE address of shared info struct. */ uint32_t flags; /* SIF_xxx flags. */ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ uint32_t store_evtchn; /* Event channel for store communication. */ union { struct { xen_pfn_t mfn; /* MACHINE page number of console page. */ uint32_t evtchn; /* Event channel for console page. */ } domU; struct { uint32_t info_off; /* Offset of console_info struct. */ uint32_t info_size; /* Size of console_info struct from start.*/ } dom0; } console; /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ unsigned long pt_base; /* VIRTUAL address of page directory. */ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ int8_t cmd_line[MAX_GUEST_CMDLINE]; /* The pfn range here covers both page table and p->m table frames. */ unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */ unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */ }; typedef struct start_info start_info_t; /* New console union for dom0 introduced in 0x00030203. */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 #define console_mfn console.domU.mfn #define console_evtchn console.domU.evtchn #endif /* These flags are passed in the 'flags' field of start_info_t. */ #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ #define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ typedef struct dom0_vga_console_info { uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ #define XEN_VGATYPE_TEXT_MODE_3 0x03 #define XEN_VGATYPE_VESA_LFB 0x23 union { struct { /* Font height, in pixels. */ uint16_t font_height; /* Cursor location (column, row). */ uint16_t cursor_x, cursor_y; /* Number of rows and columns (dimensions in characters). */ uint16_t rows, columns; } text_mode_3; struct { /* Width and height, in pixels. */ uint16_t width, height; /* Bytes per scan line. */ uint16_t bytes_per_line; /* Bits per pixel. */ uint16_t bits_per_pixel; /* LFB physical address, and size (in units of 64kB). */ uint32_t lfb_base; uint32_t lfb_size; /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ uint8_t red_pos, red_size; uint8_t green_pos, green_size; uint8_t blue_pos, blue_size; uint8_t rsvd_pos, rsvd_size; #if __XEN_INTERFACE_VERSION__ >= 0x00030206 /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ uint32_t gbl_caps; /* Mode attributes (offset 0x0, VESA command 0x4f01). */ uint16_t mode_attrs; #endif } vesa_lfb; } u; } dom0_vga_console_info_t; #define xen_vga_console_info dom0_vga_console_info #define xen_vga_console_info_t dom0_vga_console_info_t typedef uint8_t xen_domain_handle_t[16]; /* Turn a plain number into a C unsigned long constant. */ #define __mk_unsigned_long(x) x ## UL #define mk_unsigned_long(x) __mk_unsigned_long(x) __DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t); __DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t); __DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t); __DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t); #else /* __ASSEMBLY__ */ /* In assembly code we cannot use C numeric constant suffixes. */ #define mk_unsigned_long(x) x #endif /* !__ASSEMBLY__ */ /* Default definitions for macros used by domctl/sysctl. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #ifndef uint64_aligned_t #define uint64_aligned_t uint64_t #endif #ifndef XEN_GUEST_HANDLE_64 #define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) #endif #endif #endif /* __XEN_PUBLIC_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/xencomm.h000066400000000000000000000032131314037446600256710ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) IBM Corp. 2006 */ #ifndef _XEN_XENCOMM_H_ #define _XEN_XENCOMM_H_ /* A xencomm descriptor is a scatter/gather list containing physical * addresses corresponding to a virtually contiguous memory area. The * hypervisor translates these physical addresses to machine addresses to copy * to and from the virtually contiguous area. */ #define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */ #define XENCOMM_INVALID (~0UL) struct xencomm_desc { uint32_t magic; uint32_t nr_addrs; /* the number of entries in address[] */ uint64_t address[0]; }; #endif /* _XEN_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/xenoprof.h000066400000000000000000000100311314037446600260570ustar00rootroot00000000000000/****************************************************************************** * xenoprof.h * * Interface for enabling system wide profiling based on hardware performance * counters * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Hewlett-Packard Co. * Written by Aravind Menon & Jose Renato Santos */ #ifndef __XEN_PUBLIC_XENOPROF_H__ #define __XEN_PUBLIC_XENOPROF_H__ #include "xen.h" /* * Commands to HYPERVISOR_xenoprof_op(). */ #define XENOPROF_init 0 #define XENOPROF_reset_active_list 1 #define XENOPROF_reset_passive_list 2 #define XENOPROF_set_active 3 #define XENOPROF_set_passive 4 #define XENOPROF_reserve_counters 5 #define XENOPROF_counter 6 #define XENOPROF_setup_events 7 #define XENOPROF_enable_virq 8 #define XENOPROF_start 9 #define XENOPROF_stop 10 #define XENOPROF_disable_virq 11 #define XENOPROF_release_counters 12 #define XENOPROF_shutdown 13 #define XENOPROF_get_buffer 14 #define XENOPROF_set_backtrace 15 #define XENOPROF_last_op 15 #define MAX_OPROF_EVENTS 32 #define MAX_OPROF_DOMAINS 25 #define XENOPROF_CPU_TYPE_SIZE 64 /* Xenoprof performance events (not Xen events) */ struct event_log { uint64_t eip; uint8_t mode; uint8_t event; }; /* PC value that indicates a special code */ #define XENOPROF_ESCAPE_CODE ~0UL /* Transient events for the xenoprof->oprofile cpu buf */ #define XENOPROF_TRACE_BEGIN 1 /* Xenoprof buffer shared between Xen and domain - 1 per VCPU */ struct xenoprof_buf { uint32_t event_head; uint32_t event_tail; uint32_t event_size; uint32_t vcpu_id; uint64_t xen_samples; uint64_t kernel_samples; uint64_t user_samples; uint64_t lost_samples; struct event_log event_log[1]; }; #ifndef __XEN__ typedef struct xenoprof_buf xenoprof_buf_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t); #endif struct xenoprof_init { int32_t num_events; int32_t is_primary; char cpu_type[XENOPROF_CPU_TYPE_SIZE]; }; typedef struct xenoprof_init xenoprof_init_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t); struct xenoprof_get_buffer { int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; }; typedef struct xenoprof_get_buffer xenoprof_get_buffer_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t); struct xenoprof_counter { uint32_t ind; uint64_t count; uint32_t enabled; uint32_t event; uint32_t hypervisor; uint32_t kernel; uint32_t user; uint64_t unit_mask; }; typedef struct xenoprof_counter xenoprof_counter_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t); typedef struct xenoprof_passive { uint16_t domain_id; int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; } xenoprof_passive_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t); #endif /* __XEN_PUBLIC_XENOPROF_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/xsm/000077500000000000000000000000001314037446600246625ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/xsm/acm.h000066400000000000000000000162341314037446600256010ustar00rootroot00000000000000/* * acm.h: Xen access control module interface defintions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005, International Business Machines Corporation. */ #ifndef _XEN_PUBLIC_ACM_H #define _XEN_PUBLIC_ACM_H #include "../xen.h" /* if ACM_DEBUG defined, all hooks should * print a short trace message (comment it out * when not in testing mode ) */ /* #define ACM_DEBUG */ #ifdef ACM_DEBUG # define printkd(fmt, args...) printk(fmt,## args) #else # define printkd(fmt, args...) #endif /* default ssid reference value if not supplied */ #define ACM_DEFAULT_SSID 0x0 #define ACM_DEFAULT_LOCAL_SSID 0x0 /* Internal ACM ERROR types */ #define ACM_OK 0 #define ACM_UNDEF -1 #define ACM_INIT_SSID_ERROR -2 #define ACM_INIT_SOID_ERROR -3 #define ACM_ERROR -4 /* External ACCESS DECISIONS */ #define ACM_ACCESS_PERMITTED 0 #define ACM_ACCESS_DENIED -111 #define ACM_NULL_POINTER_ERROR -200 /* Error codes reported in when trying to test for a new policy These error codes are reported in an array of tuples where each error code is followed by a parameter describing the error more closely, such as a domain id. */ #define ACM_EVTCHN_SHARING_VIOLATION 0x100 #define ACM_GNTTAB_SHARING_VIOLATION 0x101 #define ACM_DOMAIN_LOOKUP 0x102 #define ACM_CHWALL_CONFLICT 0x103 #define ACM_SSIDREF_IN_USE 0x104 /* primary policy in lower 4 bits */ #define ACM_NULL_POLICY 0 #define ACM_CHINESE_WALL_POLICY 1 #define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2 #define ACM_POLICY_UNDEFINED 15 /* combinations have secondary policy component in higher 4bit */ #define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY) /* policy: */ #define ACM_POLICY_NAME(X) \ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \ "UNDEFINED" /* the following policy versions must be increased * whenever the interpretation of the related * policy's data structure changes */ #define ACM_POLICY_VERSION 4 #define ACM_CHWALL_VERSION 1 #define ACM_STE_VERSION 1 /* defines a ssid reference used by xen */ typedef uint32_t ssidref_t; /* hooks that are known to domains */ #define ACMHOOK_none 0 #define ACMHOOK_sharing 1 #define ACMHOOK_authorization 2 #define ACMHOOK_conflictset 3 /* -------security policy relevant type definitions-------- */ /* type identifier; compares to "equal" or "not equal" */ typedef uint16_t domaintype_t; /* CHINESE WALL POLICY DATA STRUCTURES * * current accumulated conflict type set: * When a domain is started and has a type that is in * a conflict set, the conflicting types are incremented in * the aggregate set. When a domain is destroyed, the * conflicting types to its type are decremented. * If a domain has multiple types, this procedure works over * all those types. * * conflict_aggregate_set[i] holds the number of * running domains that have a conflict with type i. * * running_types[i] holds the number of running domains * that include type i in their ssidref-referenced type set * * conflict_sets[i][j] is "0" if type j has no conflict * with type i and is "1" otherwise. */ /* high-16 = version, low-16 = check magic */ #define ACM_MAGIC 0x0001debc /* size of the SHA1 hash identifying the XML policy from which the binary policy was created */ #define ACM_SHA1_HASH_SIZE 20 /* each offset in bytes from start of the struct they * are part of */ /* V3 of the policy buffer aded a version structure */ struct acm_policy_version { uint32_t major; uint32_t minor; }; /* each buffer consists of all policy information for * the respective policy given in the policy code * * acm_policy_buffer, acm_chwall_policy_buffer, * and acm_ste_policy_buffer need to stay 32-bit aligned * because we create binary policies also with external * tools that assume packed representations (e.g. the java tool) */ struct acm_policy_buffer { uint32_t magic; uint32_t policy_version; /* ACM_POLICY_VERSION */ uint32_t len; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_buffer_offset; uint32_t secondary_policy_code; uint32_t secondary_buffer_offset; struct acm_policy_version xml_pol_version; /* add in V3 */ uint8_t xml_policy_hash[ACM_SHA1_HASH_SIZE]; /* added in V4 */ }; struct acm_policy_reference_buffer { uint32_t len; }; struct acm_chwall_policy_buffer { uint32_t policy_version; /* ACM_CHWALL_VERSION */ uint32_t policy_code; uint32_t chwall_max_types; uint32_t chwall_max_ssidrefs; uint32_t chwall_max_conflictsets; uint32_t chwall_ssid_offset; uint32_t chwall_conflict_sets_offset; uint32_t chwall_running_types_offset; uint32_t chwall_conflict_aggregate_offset; }; struct acm_ste_policy_buffer { uint32_t policy_version; /* ACM_STE_VERSION */ uint32_t policy_code; uint32_t ste_max_types; uint32_t ste_max_ssidrefs; uint32_t ste_ssid_offset; }; struct acm_stats_buffer { uint32_t magic; uint32_t len; uint32_t primary_policy_code; uint32_t primary_stats_offset; uint32_t secondary_policy_code; uint32_t secondary_stats_offset; }; struct acm_ste_stats_buffer { uint32_t ec_eval_count; uint32_t gt_eval_count; uint32_t ec_denied_count; uint32_t gt_denied_count; uint32_t ec_cachehit_count; uint32_t gt_cachehit_count; }; struct acm_ssid_buffer { uint32_t len; ssidref_t ssidref; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_max_types; uint32_t primary_types_offset; uint32_t secondary_policy_code; uint32_t secondary_max_types; uint32_t secondary_types_offset; }; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/xsm/acm_ops.h000066400000000000000000000104741314037446600264620ustar00rootroot00000000000000/* * acm_ops.h: Xen access control module hypervisor commands * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005,2006 International Business Machines Corporation. */ #ifndef __XEN_PUBLIC_ACM_OPS_H__ #define __XEN_PUBLIC_ACM_OPS_H__ #include "../xen.h" #include "acm.h" /* * Make sure you increment the interface version whenever you modify this file! * This makes sure that old versions of acm tools will stop working in a * well-defined way (rather than crashing the machine, for instance). */ #define ACM_INTERFACE_VERSION 0xAAAA000A /************************************************************************/ /* * Prototype for this hypercall is: * int acm_op(int cmd, void *args) * @cmd == ACMOP_??? (access control module operation). * @args == Operation-specific extra arguments (NULL if none). */ #define ACMOP_setpolicy 1 struct acm_setpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pushcache; uint32_t pushcache_size; }; #define ACMOP_getpolicy 2 struct acm_getpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_dumpstats 3 struct acm_dumpstats { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_getssid 4 #define ACM_GETBY_ssidref 1 #define ACM_GETBY_domainid 2 struct acm_getssid { /* IN */ uint32_t get_ssid_by; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id; XEN_GUEST_HANDLE_64(void) ssidbuf; uint32_t ssidbuf_size; }; #define ACMOP_getdecision 5 struct acm_getdecision { /* IN */ uint32_t get_decision_by1; /* ACM_GETBY_* */ uint32_t get_decision_by2; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id1; union { domaintype_t domainid; ssidref_t ssidref; } id2; uint32_t hook; /* OUT */ uint32_t acm_decision; }; #define ACMOP_chgpolicy 6 struct acm_change_policy { /* IN */ XEN_GUEST_HANDLE_64(void) policy_pushcache; uint32_t policy_pushcache_size; XEN_GUEST_HANDLE_64(void) del_array; uint32_t delarray_size; XEN_GUEST_HANDLE_64(void) chg_array; uint32_t chgarray_size; /* OUT */ /* array with error code */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; #define ACMOP_relabeldoms 7 struct acm_relabel_doms { /* IN */ XEN_GUEST_HANDLE_64(void) relabel_map; uint32_t relabel_map_size; /* OUT */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; /* future interface to Xen */ struct xen_acmctl { uint32_t cmd; uint32_t interface_version; union { struct acm_setpolicy setpolicy; struct acm_getpolicy getpolicy; struct acm_dumpstats dumpstats; struct acm_getssid getssid; struct acm_getdecision getdecision; struct acm_change_policy change_policy; struct acm_relabel_doms relabel_doms; } u; }; typedef struct xen_acmctl xen_acmctl_t; DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t); #endif /* __XEN_PUBLIC_ACM_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/interface/xsm/flask_op.h000066400000000000000000000023531314037446600266340ustar00rootroot00000000000000/* * This file contains the flask_op hypercall commands and definitions. * * Author: George Coker, * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ #ifndef __FLASK_OP_H__ #define __FLASK_OP_H__ #define FLASK_LOAD 1 #define FLASK_GETENFORCE 2 #define FLASK_SETENFORCE 3 #define FLASK_CONTEXT_TO_SID 4 #define FLASK_SID_TO_CONTEXT 5 #define FLASK_ACCESS 6 #define FLASK_CREATE 7 #define FLASK_RELABEL 8 #define FLASK_USER 9 #define FLASK_POLICYVERS 10 #define FLASK_GETBOOL 11 #define FLASK_SETBOOL 12 #define FLASK_COMMITBOOLS 13 #define FLASK_MLS 14 #define FLASK_DISABLE 15 #define FLASK_GETAVC_THRESHOLD 16 #define FLASK_SETAVC_THRESHOLD 17 #define FLASK_AVC_HASHSTATS 18 #define FLASK_AVC_CACHESTATS 19 #define FLASK_MEMBER 20 #define FLASK_LAST FLASK_MEMBER typedef struct flask_op { uint32_t cmd; uint32_t size; char *buf; } flask_op_t; DEFINE_XEN_GUEST_HANDLE(flask_op_t); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/pcifront.h000066400000000000000000000033141314037446600241110ustar00rootroot00000000000000/* * PCI Frontend - arch-dependendent declarations * * Author: Ryan Wilson */ #ifndef __XEN_ASM_PCIFRONT_H__ #define __XEN_ASM_PCIFRONT_H__ #include #ifdef __KERNEL__ #ifndef __ia64__ struct pcifront_device; struct pci_bus; struct pcifront_sd { int domain; struct pcifront_device *pdev; }; static inline struct pcifront_device * pcifront_get_pdev(struct pcifront_sd *sd) { return sd->pdev; } static inline void pcifront_init_sd(struct pcifront_sd *sd, unsigned int domain, unsigned int bus, struct pcifront_device *pdev) { sd->domain = domain; sd->pdev = pdev; } #if defined(CONFIG_PCI_DOMAINS) static inline int pci_domain_nr(struct pci_bus *bus) { struct pcifront_sd *sd = bus->sysdata; return sd->domain; } static inline int pci_proc_domain(struct pci_bus *bus) { return pci_domain_nr(bus); } #endif /* CONFIG_PCI_DOMAINS */ static inline void pcifront_setup_root_resources(struct pci_bus *bus, struct pcifront_sd *sd) { } #else /* __ia64__ */ #include #include #define pcifront_sd pci_controller extern void xen_add_resource(struct pci_controller *, unsigned int, unsigned int, struct acpi_resource *); extern void xen_pcibios_setup_root_windows(struct pci_bus *, struct pci_controller *); static inline struct pcifront_device * pcifront_get_pdev(struct pcifront_sd *sd) { return (struct pcifront_device *)sd->platform_data; } static inline void pcifront_setup_root_resources(struct pci_bus *bus, struct pcifront_sd *sd) { xen_pcibios_setup_root_windows(bus, sd); } #endif /* __ia64__ */ extern struct rw_semaphore pci_bus_sem; #endif /* __KERNEL__ */ #endif /* __XEN_ASM_PCIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/public/000077500000000000000000000000001314037446600233715ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/public/evtchn.h000066400000000000000000000056351314037446600250420ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Interface to /dev/xen/evtchn. * * Copyright (c) 2003-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_EVTCHN_H__ #define __LINUX_PUBLIC_EVTCHN_H__ /* * Bind a fresh port to VIRQ @virq. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_VIRQ \ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq)) struct ioctl_evtchn_bind_virq { unsigned int virq; }; /* * Bind a fresh port to remote <@remote_domain, @remote_port>. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_INTERDOMAIN \ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain)) struct ioctl_evtchn_bind_interdomain { unsigned int remote_domain, remote_port; }; /* * Allocate a fresh port for binding to @remote_domain. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_UNBOUND_PORT \ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port)) struct ioctl_evtchn_bind_unbound_port { unsigned int remote_domain; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_UNBIND \ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind)) struct ioctl_evtchn_unbind { unsigned int port; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_NOTIFY \ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify)) struct ioctl_evtchn_notify { unsigned int port; }; /* Clear and reinitialise the event buffer. Clear error condition. */ #define IOCTL_EVTCHN_RESET \ _IOC(_IOC_NONE, 'E', 5, 0) #endif /* __LINUX_PUBLIC_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/public/gntdev.h000066400000000000000000000107401314037446600250330ustar00rootroot00000000000000/****************************************************************************** * gntdev.h * * Interface to /dev/xen/gntdev. * * Copyright (c) 2007, D G Murray * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_GNTDEV_H__ #define __LINUX_PUBLIC_GNTDEV_H__ struct ioctl_gntdev_grant_ref { /* The domain ID of the grant to be mapped. */ uint32_t domid; /* The grant reference of the grant to be mapped. */ uint32_t ref; }; /* * Inserts the grant references into the mapping table of an instance * of gntdev. N.B. This does not perform the mapping, which is deferred * until mmap() is called with @index as the offset. */ #define IOCTL_GNTDEV_MAP_GRANT_REF \ _IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref)) struct ioctl_gntdev_map_grant_ref { /* IN parameters */ /* The number of grants to be mapped. */ uint32_t count; uint32_t pad; /* OUT parameters */ /* The offset to be used on a subsequent call to mmap(). */ uint64_t index; /* Variable IN parameter. */ /* Array of grant references, of size @count. */ struct ioctl_gntdev_grant_ref refs[1]; }; /* * Removes the grant references from the mapping table of an instance of * of gntdev. N.B. munmap() must be called on the relevant virtual address(es) * before this ioctl is called, or an error will result. */ #define IOCTL_GNTDEV_UNMAP_GRANT_REF \ _IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) struct ioctl_gntdev_unmap_grant_ref { /* IN parameters */ /* The offset was returned by the corresponding map operation. */ uint64_t index; /* The number of pages to be unmapped. */ uint32_t count; uint32_t pad; }; /* * Returns the offset in the driver's address space that corresponds * to @vaddr. This can be used to perform a munmap(), followed by an * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by * the caller. The number of pages that were allocated at the same time as * @vaddr is returned in @count. * * N.B. Where more than one page has been mapped into a contiguous range, the * supplied @vaddr must correspond to the start of the range; otherwise * an error will result. It is only possible to munmap() the entire * contiguously-allocated range at once, and not any subrange thereof. */ #define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \ _IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr)) struct ioctl_gntdev_get_offset_for_vaddr { /* IN parameters */ /* The virtual address of the first mapped page in a range. */ uint64_t vaddr; /* OUT parameters */ /* The offset that was used in the initial mmap() operation. */ uint64_t offset; /* The number of pages mapped in the VM area that begins at @vaddr. */ uint32_t count; uint32_t pad; }; /* * Sets the maximum number of grants that may mapped at once by this gntdev * instance. * * N.B. This must be called before any other ioctl is performed on the device. */ #define IOCTL_GNTDEV_SET_MAX_GRANTS \ _IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants)) struct ioctl_gntdev_set_max_grants { /* IN parameter */ /* The maximum number of grants that may be mapped at once. */ uint32_t count; }; #endif /* __LINUX_PUBLIC_GNTDEV_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/public/privcmd.h000066400000000000000000000052141314037446600252100ustar00rootroot00000000000000/****************************************************************************** * privcmd.h * * Interface to /proc/xen/privcmd. * * Copyright (c) 2003-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_PRIVCMD_H__ #define __LINUX_PUBLIC_PRIVCMD_H__ #include #ifndef __user #define __user #endif typedef struct privcmd_hypercall { __u64 op; __u64 arg[5]; } privcmd_hypercall_t; typedef struct privcmd_mmap_entry { __u64 va; __u64 mfn; __u64 npages; } privcmd_mmap_entry_t; typedef struct privcmd_mmap { int num; domid_t dom; /* target domain */ privcmd_mmap_entry_t __user *entry; } privcmd_mmap_t; typedef struct privcmd_mmapbatch { int num; /* number of pages to populate */ domid_t dom; /* target domain */ __u64 addr; /* virtual address */ xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */ } privcmd_mmapbatch_t; /* * @cmd: IOCTL_PRIVCMD_HYPERCALL * @arg: &privcmd_hypercall_t * Return: Value returned from execution of the specified hypercall. */ #define IOCTL_PRIVCMD_HYPERCALL \ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t)) #define IOCTL_PRIVCMD_MMAP \ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t)) #define IOCTL_PRIVCMD_MMAPBATCH \ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t)) #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/xen_proc.h000066400000000000000000000004021314037446600240750ustar00rootroot00000000000000 #ifndef __ASM_XEN_PROC_H__ #define __ASM_XEN_PROC_H__ #include extern struct proc_dir_entry *create_xen_proc_entry( const char *name, mode_t mode); extern void remove_xen_proc_entry( const char *name); #endif /* __ASM_XEN_PROC_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/xenbus.h000066400000000000000000000255551314037446600236040ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XEN_XENBUS_H #define _XEN_XENBUS_H #include #include #include #include #include #include #include #include #include #include /* Register callback to watch this node. */ struct xenbus_watch { struct list_head list; /* Path being watched. */ const char *node; /* Callback (executed in a process context with no locks held). */ void (*callback)(struct xenbus_watch *, const char **vec, unsigned int len); /* See XBWF_ definitions below. */ unsigned long flags; }; /* * Execute callback in its own kthread. Useful if the callback is long * running or heavily serialised, to avoid taking out the main xenwatch thread * for a long period of time (or even unwittingly causing a deadlock). */ #define XBWF_new_thread 1 /* A xenbus device. */ struct xenbus_device { const char *devicetype; const char *nodename; const char *otherend; int otherend_id; struct xenbus_watch otherend_watch; struct device dev; enum xenbus_state state; struct completion down; }; static inline struct xenbus_device *to_xenbus_device(struct device *dev) { return container_of(dev, struct xenbus_device, dev); } struct xenbus_device_id { /* .../device// */ char devicetype[32]; /* General class of device. */ }; /* A xenbus driver. */ struct xenbus_driver { char *name; struct module *owner; const struct xenbus_device_id *ids; int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); void (*otherend_changed)(struct xenbus_device *dev, enum xenbus_state backend_state); int (*remove)(struct xenbus_device *dev); int (*suspend)(struct xenbus_device *dev); int (*suspend_cancel)(struct xenbus_device *dev); int (*resume)(struct xenbus_device *dev); int (*uevent)(struct xenbus_device *, char **, int, char *, int); struct device_driver driver; int (*read_otherend_details)(struct xenbus_device *dev); int (*is_ready)(struct xenbus_device *dev); }; static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) { return container_of(drv, struct xenbus_driver, driver); } int xenbus_register_frontend(struct xenbus_driver *drv); int xenbus_register_backend(struct xenbus_driver *drv); void xenbus_unregister_driver(struct xenbus_driver *drv); struct xenbus_transaction { u32 id; }; /* Nil transaction ID. */ #define XBT_NIL ((struct xenbus_transaction) { 0 }) char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num); void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len); int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string); int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_transaction_start(struct xenbus_transaction *t); int xenbus_transaction_end(struct xenbus_transaction t, int abort); /* Single read and scanf: returns -errno or num scanned if > 0. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(scanf, 4, 5))); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(printf, 4, 5))); /* Generic read function: NULL-terminated triples of name, * sprintf-style type string, and pointer. Returns 0 or errno.*/ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...); /* notifer routines for when the xenstore comes up */ int register_xenstore_notifier(struct notifier_block *nb); void unregister_xenstore_notifier(struct notifier_block *nb); int register_xenbus_watch(struct xenbus_watch *watch); void unregister_xenbus_watch(struct xenbus_watch *watch); void xs_suspend(void); void xs_resume(void); void xs_suspend_cancel(void); /* Used by xenbus_dev to borrow kernel's store connection. */ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); /* Prepare for domain suspend: then resume or cancel the suspend. */ void xenbus_suspend(void); void xenbus_resume(void); void xenbus_suspend_cancel(void); #define XENBUS_IS_ERR_READ(str) ({ \ if (!IS_ERR(str) && strlen(str) == 0) { \ kfree(str); \ str = ERR_PTR(-ERANGE); \ } \ IS_ERR(str); \ }) #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE) /** * Register a watch on the given path, using the given xenbus_watch structure * for storage, and the given callback function as the callback. Return 0 on * success, or -errno on error. On success, the given path will be saved as * watch->node, and remains the caller's to free. On error, watch->node will * be NULL, the device will switch to XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); /** * Register a watch on the given path/path2, using the given xenbus_watch * structure for storage, and the given callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (path/path2) will be saved as watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); /** * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); /** * Grant access to the given ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn); /** * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_valloc allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * xenbus_map_ring does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr); /** * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_vfree if you mapped in your memory with * xenbus_map_ring_valloc (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port); /** * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path); /*** * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...); /*** * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...); int xenbus_dev_init(void); const char *xenbus_strstate(enum xenbus_state state); int xenbus_dev_is_online(struct xenbus_device *dev); int xenbus_frontend_closed(struct xenbus_device *dev); int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)); int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)); #endif /* _XEN_XENBUS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/xencomm.h000066400000000000000000000050601314037446600237330ustar00rootroot00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Copyright (C) IBM Corp. 2006 * * Authors: Hollis Blanchard * Jerone Young */ #ifndef _LINUX_XENCOMM_H_ #define _LINUX_XENCOMM_H_ #include #define XENCOMM_MINI_ADDRS 3 struct xencomm_mini { struct xencomm_desc _desc; uint64_t address[XENCOMM_MINI_ADDRS]; }; /* To avoid additionnal virt to phys conversion, an opaque structure is presented. */ struct xencomm_handle; extern void xencomm_free(struct xencomm_handle *desc); extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes); extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, struct xencomm_mini *xc_area); #if 0 #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ struct xencomm_mini xc_desc ## _base[(n)] \ __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \ struct xencomm_mini* xc_desc = &xc_desc ## _base[0]; #else /* * gcc bug workaround: * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660 * gcc doesn't handle properly stack variable with * __attribute__((__align__(sizeof(struct xencomm_mini)))) */ #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ unsigned char xc_desc ## _base[((n) + 1 ) * \ sizeof(struct xencomm_mini)]; \ struct xencomm_mini *xc_desc = (struct xencomm_mini*) \ ((unsigned long)xc_desc ## _base + \ (sizeof(struct xencomm_mini) - \ ((unsigned long)xc_desc ## _base) % \ sizeof(struct xencomm_mini))); #endif #define xencomm_map_no_alloc(ptr, bytes) \ ({XENCOMM_MINI_ALIGNED(xc_desc, 1); \ __xencomm_map_no_alloc(ptr, bytes, xc_desc);}) /* provided by architecture code: */ extern unsigned long xencomm_vtop(unsigned long vaddr); static inline void *xencomm_pa(void *ptr) { return (void *)xencomm_vtop((unsigned long)ptr); } #define xen_guest_handle(hnd) ((hnd).p) #endif /* _LINUX_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/xencons.h000066400000000000000000000007421314037446600237440ustar00rootroot00000000000000#ifndef __ASM_XENCONS_H__ #define __ASM_XENCONS_H__ struct dom0_vga_console_info; void dom0_init_screen_info(const struct dom0_vga_console_info *, size_t); void xencons_force_flush(void); void xencons_resume(void); /* Interrupt work hooks. Receive data, or kick data out. */ void xencons_rx(char *buf, unsigned len, struct pt_regs *regs); void xencons_tx(void); int xencons_ring_init(void); int xencons_ring_send(const char *data, unsigned len); #endif /* __ASM_XENCONS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/include/xen/xenoprof.h000066400000000000000000000025651314037446600241340ustar00rootroot00000000000000/****************************************************************************** * xen/xenoprof.h * * Copyright (c) 2006 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_XENOPROF_H__ #define __XEN_XENOPROF_H__ #ifdef CONFIG_XEN #include struct oprofile_operations; int xenoprofile_init(struct oprofile_operations * ops); void xenoprofile_exit(void); struct xenoprof_shared_buffer { char *buffer; struct xenoprof_arch_shared_buffer arch; }; #else #define xenoprofile_init(ops) (-ENOSYS) #define xenoprofile_exit() do { } while (0) #endif /* CONFIG_XEN */ #endif /* __XEN_XENOPROF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/version.ini000066400000000000000000000000641314037446600220640ustar00rootroot00000000000000KernModeVersion=2.2.0.112 UserModeVersion=2.2.0.112 UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-balloon/000077500000000000000000000000001314037446600221145ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-balloon/Kbuild000066400000000000000000000002551314037446600232530ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-balloon.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-balloon-y := balloon.o sysfs.o xen-balloon-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-balloon/Makefile000066400000000000000000000000661314037446600235560ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-balloon/balloon.c000066400000000000000000000531351314037446600237150ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /*added by linyuliang 00176349 begin */ #include #include /*added by linyuliang 00176349 end */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); struct balloon_stats balloon_stats; unsigned long old_totalram_pages; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; /* VM /proc information for memory */ extern unsigned long totalram_pages; #ifndef CONFIG_XEN /* * In HVM guests accounting here uses the Xen visible values, but the kernel * determined totalram_pages value shouldn't get altered. Since totalram_pages * includes neither the kernel static image nor any memory allocated prior to * or from the bootmem allocator, we have to synchronize the two values. */ static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif #ifndef MODULE extern unsigned long totalhigh_pages; #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else #define inc_totalhigh_pages() ((void)0) #define dec_totalhigh_pages() ((void)0) #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ /*modified by linyuliang 00176349 begin */ static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process,NULL); /*modified by linyuliang 00176349 end */ static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page) { /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; dec_totalhigh_pages(); } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(void) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); if (PageHighMem(page)) { bs.balloon_high--; inc_totalhigh_pages(); } else bs.balloon_low--; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static inline void balloon_free_page(struct page *page) { #ifndef MODULE if (put_page_testzero(page)) free_cold_page(page); #else /* free_cold_page() is not being exported. */ __free_page(page); #endif } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = min(bs.target_pages, bs.hard_limit); if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } /*modified by linyuliang 00176349 begin */ unsigned long balloon_minimum_target(void) /*modified by linyuliang 00176349 end */ { #ifndef CONFIG_XEN #define max_pfn num_physpages #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN #undef max_pfn #endif } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; /*modified by linyuliang 00176349 begin */ struct page *page; /*modified by linyuliang 00176349 end */ long rc; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { /*modified by linyuliang 00176349 begin */ /*when xen has less pages than nr_pages,don not give it back! */ BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page); page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); page_zone(page)->present_pages++; init_page_count(page); balloon_free_page(page); /*modified by linyuliang 00176349 end */ } /*modified by linyuliang 00176349 begin */ bs.current_pages += rc; /*modified by linyuliang 00176349 end */ /* totalram_pages = bs.current_pages; */ if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages + rc; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages=totalram_pages; out: balloon_unlock(flags); /*modified by linyuliang 00176349 begin */ //return 0; return rc < 0 ? rc : rc != nr_pages; /*modified by linyuliang 00176349 end */ } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; /*added by linyuliang 00176349 begin */ IPRINTK("nr_pages = %lu\n",nr_pages); IPRINTK("need_sleep= %d\n",need_sleep); /*added by linyuliang 00176349 end */ break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); balloon_append(pfn_to_page(pfn)); page = pfn_to_page(pfn); SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); page_zone(page)->present_pages--; } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages - nr_pages; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages=totalram_pages; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ /*modified by linyuliang 00176349 begin */ /*When adjustment be stopped one time,adjustment over! then write current memory to xenstore*/ static int flag; static void balloon_process(struct work_struct *unused) { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); if(totalram_pages > (bs.current_pages - totalram_bias)) { IPRINTK("totalram_pages=%lu, current_pages=%lu, totalram_bias=%lu\n", totalram_pages*4, bs.current_pages*4, totalram_bias*4); bs.current_pages = totalram_pages + totalram_bias; IPRINTK("totalram_pages=%lu, current_pages=%lu\n", totalram_pages*4, bs.current_pages*4); } if(current_target() == bs.current_pages) { mutex_unlock(&balloon_mutex); return; } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); // if (!need_sleep) // { if (current_target() != bs.current_pages || (flag==1)) { sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); // } /* Schedule more work if there is some still to be done. */ // if (current_target() != bs.current_pages) // mod_timer(&balloon_timer, jiffies + HZ); mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, balloon_minimum_target()); flag = bs.target_pages== target ? 0:1; // IPRINTK("b_set_new_tar_bs.target_pages= %lu",bs.target_pages); schedule_work(&balloon_worker); } /*modified by linyuliang 00176349 end */ static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = snprintf( page, PAGE_SIZE, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n" "Xen hard limit: ", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); if (bs.hard_limit != ~0UL) len += snprintf(page + len,PAGE_SIZE-len, "%8lu kB\n", PAGES2KB(bs.hard_limit)); else len += snprintf(page + len,PAGE_SIZE-len, " ??? kB\n"); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { #if !defined(CONFIG_XEN) # ifndef XENMEM_get_pod_target # define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; # endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; #elif defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("Initialising balloon driver.\n"); #ifdef CONFIG_XEN bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else totalram_bias = HYPERVISOR_memory_op( HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target) != -ENOSYS ? XENMEM_maximum_reservation : XENMEM_current_reservation, &pod_target.domid); if ((long)totalram_bias != -ENOSYS) { BUG_ON(totalram_bias < totalram_pages); bs.current_pages = totalram_bias; totalram_bias -= totalram_pages; old_totalram_pages = totalram_pages; } else { totalram_bias = 0; bs.current_pages = totalram_pages; } #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; bs.hard_limit = ~0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #ifdef CONFIG_PROC_FS if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) balloon_append(page); } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void __exit balloon_exit(void) { balloon_sysfs_exit(); /* XXX - release balloon here */ } module_exit(balloon_exit); void balloon_update_driver_allowance(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } #ifdef CONFIG_XEN static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long flags; void *v; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD); if (page == NULL) goto err; v = page_address(page); if(!v) { printk(KERN_ERR "page_address is null\n"); } scrub_pages(v, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, (unsigned long)v, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_unlock(flags); balloon_free_page(page); goto err; } totalram_pages = --bs.current_pages - totalram_bias; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i]); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i]); } balloon_unlock(flags); kfree(pagevec); schedule_work(&balloon_worker); } void balloon_release_driver_page(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page); bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-balloon/common.h000066400000000000000000000044621314037446600235630ustar00rootroot00000000000000/****************************************************************************** * balloon/common.h * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_COMMON_H__ #define __XEN_BALLOON_COMMON_H__ #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* We may hit the hard limit in Xen. If we do then we remember it. */ unsigned long hard_limit; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; extern struct balloon_stats balloon_stats; #define bs balloon_stats int balloon_sysfs_init(void); void balloon_sysfs_exit(void); void balloon_set_new_target(unsigned long target); #endif /* __XEN_BALLOON_COMMON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-balloon/sysfs.c000066400000000000000000000114231314037446600234300ustar00rootroot00000000000000/****************************************************************************** * balloon/sysfs.c * * Xen balloon driver - sysfs interfaces. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BALLOON_CLASS_NAME "xen_memory" #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); BALLOON_SHOW(hard_limit_kb, (bs.hard_limit!=~0UL) ? "%lu\n" : "???\n", (bs.hard_limit!=~0UL) ? PAGES2KB(bs.hard_limit) : 0); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, const char *buf, size_t count) { char memstring[64]={0}, *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ strncpy(memstring, buf,count); memstring[64-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_hard_limit_kb.attr, &attr_driver_kb.attr, NULL }; static struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { set_kset_name(BALLOON_CLASS_NAME), }; static struct sys_device balloon_sysdev; static int __init register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } static __exit void unregister_balloon(struct sys_device *sysdev) { int i; sysfs_remove_group(&sysdev->kobj, &balloon_info_group); for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); } int __init balloon_sysfs_init(void) { return register_balloon(&balloon_sysdev); } void __exit balloon_sysfs_exit(void) { unregister_balloon(&balloon_sysdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/000077500000000000000000000000001314037446600230635ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/Kbuild000066400000000000000000000013171314037446600242220ustar00rootroot00000000000000include $(M)/config.mk obj-m := xen-platform-pci.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-platform-pci-objs := evtchn.o platform-pci.o gnttab.o xen_support.o xen-platform-pci-objs += features.o platform-compat.o xen-platform-pci-objs += reboot.o machine_reboot.o xen-platform-pci-objs += panic-handler.o xen-platform-pci-objs += platform-pci-unplug.o xen-platform-pci-objs += xenbus_comms.o xen-platform-pci-objs += xenbus_xs.o xen-platform-pci-objs += xenbus_probe.o xen-platform-pci-objs += xenbus_dev.o xen-platform-pci-objs += xenbus_client.o xen-platform-pci-objs += xen_proc.o # Can we do better ? ifeq ($(ARCH),ia64) xen-platform-pci-objs += xencomm.o xencomm_arch.o xcom_hcall.o xcom_asm.o endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/Makefile000066400000000000000000000000661314037446600245250ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/evtchn.c000066400000000000000000000215031314037446600245170ustar00rootroot00000000000000/****************************************************************************** * evtchn.c * * A simplified event channel for para-drivers in unmodified linux * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, Intel Corporation * * This file may be distributed separately from the Linux kernel, or * incorporated into other software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif void *shared_info_area; #define is_valid_evtchn(x) ((x) != 0) #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn) static struct { spinlock_t lock; irq_handler_t handler; void *dev_id; int evtchn; int close:1; /* close on unbind_from_irqhandler()? */ int inuse:1; int in_handler:1; } irq_evtchn[256]; static int evtchn_to_irq[NR_EVENT_CHANNELS] = { [0 ... NR_EVENT_CHANNELS-1] = -1 }; static DEFINE_SPINLOCK(irq_alloc_lock); static int alloc_xen_irq(void) { static int warned; int irq; spin_lock(&irq_alloc_lock); for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) { if (irq_evtchn[irq].inuse) continue; irq_evtchn[irq].inuse = 1; spin_unlock(&irq_alloc_lock); return irq; } if (!warned) { warned = 1; printk(KERN_WARNING "No available IRQ to bind to: " "increase irq_evtchn[] size in evtchn.c.\n"); } spin_unlock(&irq_alloc_lock); return -ENOSPC; } static void free_xen_irq(int irq) { spin_lock(&irq_alloc_lock); irq_evtchn[irq].inuse = 0; spin_unlock(&irq_alloc_lock); } int irq_to_evtchn_port(int irq) { return irq_evtchn[irq].evtchn; } EXPORT_SYMBOL(irq_to_evtchn_port); void mask_evtchn(int port) { shared_info_t *s = shared_info_area; synch_set_bit(port, &s->evtchn_mask[0]); } EXPORT_SYMBOL(mask_evtchn); void unmask_evtchn(int port) { evtchn_unmask_t op = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op)); } EXPORT_SYMBOL(unmask_evtchn); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { struct evtchn_alloc_unbound alloc_unbound; int err, irq; alloc_unbound.port = 0xffff; //nothing to do ,only avoid warning irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); if(!irqflags) irqflags = 0; //nothing to do ,only avoid warning alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_domain; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { spin_unlock_irq(&irq_evtchn[irq].lock); free_xen_irq(irq); return err; } irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = alloc_unbound.port; irq_evtchn[irq].close = 1; evtchn_to_irq[alloc_unbound.port] = irq; unmask_evtchn(alloc_unbound.port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_listening_port_to_irqhandler); int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = caller_port; irq_evtchn[irq].close = 0; evtchn_to_irq[caller_port] = irq; unmask_evtchn(caller_port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_caller_port_to_irqhandler); void unbind_from_irqhandler(unsigned int irq, void *dev_id) { int evtchn; spin_lock_irq(&irq_evtchn[irq].lock); evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) { evtchn_to_irq[evtchn] = -1; mask_evtchn(evtchn); if (irq_evtchn[irq].close) { struct evtchn_close close = { .port = evtchn }; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) BUG(); } } irq_evtchn[irq].handler = NULL; irq_evtchn[irq].evtchn = 0; spin_unlock_irq(&irq_evtchn[irq].lock); while (irq_evtchn[irq].in_handler) cpu_relax(); free_xen_irq(irq); } EXPORT_SYMBOL(unbind_from_irqhandler); void notify_remote_via_irq(int irq) { int evtchn; evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL(notify_remote_via_irq); static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 }; static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 }; static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh, unsigned int idx) { return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]); } static irqreturn_t evtchn_interrupt(int irq, void *dev_id #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) , struct pt_regs *regs #else # define handler(irq, dev_id, regs) handler(irq, dev_id) #endif ) { unsigned int l1i, l2i, port; unsigned long masked_l1, masked_l2; /* XXX: All events are bound to vcpu0 but irq may be redirected. */ int cpu = 0; /*smp_processor_id();*/ irq_handler_t handler; shared_info_t *s = shared_info_area; vcpu_info_t *v = &s->vcpu_info[cpu]; unsigned long l1, l2; v->evtchn_upcall_pending = 0; #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ /* Clear master flag /before/ clearing selector flag. */ wmb(); #endif l1 = xchg(&v->evtchn_pending_sel, 0); l1i = per_cpu(last_processed_l1i, cpu); l2i = per_cpu(last_processed_l2i, cpu); while (l1 != 0) { l1i = (l1i + 1) % BITS_PER_LONG; masked_l1 = l1 & ((~0UL) << l1i); if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */ l1i = BITS_PER_LONG - 1; l2i = BITS_PER_LONG - 1; continue; } l1i = __ffs(masked_l1); do { l2 = active_evtchns(cpu, s, l1i); l2i = (l2i + 1) % BITS_PER_LONG; masked_l2 = l2 & ((~0UL) << l2i); if (masked_l2 == 0) { /* if we masked out all events, move on */ l2i = BITS_PER_LONG - 1; break; } l2i = __ffs(masked_l2); /* process port */ port = (l1i * BITS_PER_LONG) + l2i; synch_clear_bit(port, &s->evtchn_pending[0]); irq = evtchn_to_irq[port]; if (irq < 0) continue; spin_lock(&irq_evtchn[irq].lock); handler = irq_evtchn[irq].handler; dev_id = irq_evtchn[irq].dev_id; if (unlikely(handler == NULL)) { printk("Xen IRQ%d (port %d) has no handler!\n", irq, port); spin_unlock(&irq_evtchn[irq].lock); continue; } irq_evtchn[irq].in_handler = 1; spin_unlock(&irq_evtchn[irq].lock); local_irq_enable(); handler(irq, irq_evtchn[irq].dev_id, regs); local_irq_disable(); spin_lock(&irq_evtchn[irq].lock); irq_evtchn[irq].in_handler = 0; spin_unlock(&irq_evtchn[irq].lock); /* if this is the final port processed, we'll pick up here+1 next time */ per_cpu(last_processed_l1i, cpu) = l1i; per_cpu(last_processed_l2i, cpu) = l2i; } while (l2i != BITS_PER_LONG - 1); l2 = active_evtchns(cpu, s, l1i); if (l2 == 0) /* we handled all ports, so we can clear the selector bit */ l1 &= ~(1UL << l1i); } return IRQ_HANDLED; } void irq_resume(void) { int evtchn, irq; for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) { mask_evtchn(evtchn); evtchn_to_irq[evtchn] = -1; } for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) irq_evtchn[irq].evtchn = 0; } int xen_irq_init(struct pci_dev *pdev) { int irq; for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) spin_lock_init(&irq_evtchn[irq].lock); return request_irq(pdev->irq, evtchn_interrupt, #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT, #else IRQF_SHARED | IRQF_SAMPLE_RANDOM | IRQF_DISABLED, #endif "xen-platform-pci", pdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/features.c000066400000000000000000000015441314037446600250510ustar00rootroot00000000000000/****************************************************************************** * features.c * * Xen feature flags. * * Copyright (c) 2006, Ian Campbell, XenSource Inc. */ #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */ EXPORT_SYMBOL(xen_features); void setup_xen_features(void) { xen_feature_info_t fi; int i, j; fi.submap = 0; //add to avoid coverity warning for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) { fi.submap_idx = i; if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j=0; j<32; j++) xen_features[i*32+j] = !!(fi.submap & 1< #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff #define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t)) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; static unsigned int boot_max_nr_grant_frames; static int gnttab_free_count; static grant_ref_t gnttab_free_head; static DEFINE_SPINLOCK(gnttab_list_lock); static struct grant_entry *shared; static struct gnttab_free_callback *gnttab_free_callback_list; static int gnttab_expand(unsigned int req_entries); #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP]) #define nr_freelist_frames(grant_frames) \ (((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP) static int get_free_entries(int count) { unsigned long flags; int ref, rc; grant_ref_t head; spin_lock_irqsave(&gnttab_list_lock, flags); if ((gnttab_free_count < count) && ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { spin_unlock_irqrestore(&gnttab_list_lock, flags); return rc; } ref = head = gnttab_free_head; gnttab_free_count -= count; while (count-- > 1) head = gnttab_entry(head); gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } #define get_free_entry() get_free_entries(1) static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; callback = gnttab_free_callback_list; gnttab_free_callback_list = NULL; while (callback != NULL) { next = callback->next; if (gnttab_free_count >= callback->count) { callback->next = NULL; callback->queued = 0; callback->fn(callback->arg); } else { callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; } callback = next; } } static inline void check_free_callbacks(void) { if (unlikely(gnttab_free_callback_list)) do_free_callbacks(); } static void put_free_entry(grant_ref_t ref) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; gnttab_free_count++; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } /* * Public grant-issuing interface functions */ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags) { shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); int gnttab_query_foreign_access(grant_ref_t ref) { u16 nflags; nflags = shared[ref].flags; return (nflags & (GTF_reading|GTF_writing)); } EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); int gnttab_end_foreign_access_ref(grant_ref_t ref) { u16 flags, nflags; nflags = shared[ref].flags; do { if ((flags = nflags) & (GTF_reading|GTF_writing)) { printk(KERN_DEBUG "WARNING: g.e. still in use!\n"); return 0; } } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) != flags); return 1; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page) { if (gnttab_end_foreign_access_ref(ref)) { put_free_entry(ref); if (page != 0) free_page(page); } else { /* XXX This needs to be fixed so that the ref and page are placed on a list to be freed up later. */ printk(KERN_DEBUG "WARNING: leaking g.e. and page still in use!\n"); } } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; gnttab_grant_foreign_transfer_ref(ref, domid, pfn); return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, unsigned long pfn) { shared[ref].frame = pfn; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_accept_transfer; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) { unsigned long frame; u16 flags; /* * If a transfer is not even yet started, try to reclaim the grant * reference and return failure (== 0). */ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags) return 0; cpu_relax(); } /* If a transfer is in progress then wait until it is completed. */ while (!(flags & GTF_transfer_completed)) { flags = shared[ref].flags; cpu_relax(); } /* Read the frame number /after/ reading completion status. */ rmb(); frame = shared[ref].frame; BUG_ON(frame == 0); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) { unsigned long frame = gnttab_end_foreign_transfer_ref(ref); put_free_entry(ref); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); void gnttab_free_grant_reference(grant_ref_t ref) { put_free_entry(ref); } EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; unsigned long flags; int count = 1; if (head == GNTTAB_LIST_END) return; spin_lock_irqsave(&gnttab_list_lock, flags); ref = head; while (gnttab_entry(ref) != GNTTAB_LIST_END) { ref = gnttab_entry(ref); count++; } gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = head; gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_references); int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) { int h = get_free_entries(count); if (h < 0) return -ENOSPC; *head = h; return 0; } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); } EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); int gnttab_claim_grant_reference(grant_ref_t *private_head) { grant_ref_t g = *private_head; if (unlikely(g == GNTTAB_LIST_END)) return -ENOSPC; *private_head = gnttab_entry(g); return g; } EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release) { gnttab_entry(release) = *private_head; *private_head = release; } EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); if (callback->queued) goto out; callback->fn = fn; callback->arg = arg; callback->count = count; callback->queued = 1; callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; check_free_callbacks(); out: spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_request_free_callback); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) { struct gnttab_free_callback **pcb; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { if (*pcb == callback) { *pcb = callback->next; callback->queued = 0; break; } } spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); static int grow_gnttab_list(unsigned int more_frames) { unsigned int new_nr_grant_frames, extra_entries, i; unsigned int nr_glist_frames, new_nr_glist_frames; new_nr_grant_frames = nr_grant_frames + more_frames; extra_entries = more_frames * ENTRIES_PER_GRANT_FRAME; nr_glist_frames = nr_freelist_frames(nr_grant_frames); new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames); for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); if (!gnttab_list[i]) goto grow_nomem; } for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; check_free_callbacks(); return 0; grow_nomem: for ( ; i >= nr_glist_frames; i--) free_page((unsigned long) gnttab_list[i]); return -ENOMEM; } static unsigned int xen_max_nr_grant_frames(void) { struct gnttab_query_size query; int rc; query.status = GNTST_general_error; //avoid coverity warning query.max_nr_frames = 0; //avoid coverity warning query.dom = DOMID_SELF; rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); if ((rc < 0) || (query.status != GNTST_okay)) return 4; /* Legacy max supported number of frames */ return query.max_nr_frames; } static inline unsigned int max_nr_grant_frames(void) { unsigned int xen_max = xen_max_nr_grant_frames(); if (xen_max > boot_max_nr_grant_frames) return boot_max_nr_grant_frames; return xen_max; } #ifdef CONFIG_XEN static DEFINE_SEQLOCK(gnttab_dma_lock); #ifdef CONFIG_X86 static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } void *arch_gnttab_alloc_shared(unsigned long *frames) { struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames()); BUG_ON(area == NULL); return area->addr; } #endif /* CONFIG_X86 */ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); if (shared == NULL) shared = arch_gnttab_alloc_shared(frames); #ifdef CONFIG_X86 rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); BUG_ON(rc); frames -= nr_gframes; /* adjust after map_pte_fn() */ #endif /* CONFIG_X86 */ kfree(frames); return 0; } static void gnttab_page_free(struct page *page, unsigned int order) { BUG_ON(order); ClearPageForeign(page); gnttab_reset_grant_page(page); put_page(page); } /* * Must not be called with IRQs off. This should only be used on the * slow path. * * Copy a foreign granted page to local memory. */ int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep) { struct gnttab_unmap_and_replace unmap; mmu_update_t mmu; struct page *page; struct page *new_page; void *new_addr; void *addr; paddr_t pfn; maddr_t mfn; maddr_t new_mfn; int err; page = *pagep; if (!get_page_unless_zero(page)) return -ENOENT; err = -ENOMEM; new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!new_page) goto out; new_addr = page_address(new_page); addr = page_address(page); memcpy(new_addr, addr, PAGE_SIZE); pfn = page_to_pfn(page); mfn = pfn_to_mfn(pfn); new_mfn = virt_to_mfn(new_addr); write_seqlock(&gnttab_dma_lock); /* Make seq visible before checking page_mapped. */ smp_mb(); /* Has the page been DMA-mapped? */ if (unlikely(page_mapped(page))) { write_sequnlock(&gnttab_dma_lock); put_page(new_page); err = -EBUSY; goto out; } if (!xen_feature(XENFEAT_auto_translated_physmap)) set_phys_to_machine(pfn, new_mfn); gnttab_set_replace_op(&unmap, (unsigned long)addr, (unsigned long)new_addr, ref); err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace, &unmap, 1); BUG_ON(err); BUG_ON(unmap.status); write_sequnlock(&gnttab_dma_lock); if (!xen_feature(XENFEAT_auto_translated_physmap)) { set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY); mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu.val = pfn; err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF); BUG_ON(err); } new_page->mapping = page->mapping; new_page->index = page->index; set_bit(PG_foreign, &new_page->flags); *pagep = new_page; SetPageForeign(page, gnttab_page_free); page->mapping = NULL; out: put_page(page); return err; } EXPORT_SYMBOL_GPL(gnttab_copy_grant_page); void gnttab_reset_grant_page(struct page *page) { init_page_count(page); reset_page_mapcount(page); } EXPORT_SYMBOL_GPL(gnttab_reset_grant_page); /* * Keep track of foreign pages marked as PageForeign so that we don't * return them to the remote domain prematurely. * * PageForeign pages are pinned down by increasing their mapcount. * * All other pages are simply returned as is. */ void __gnttab_dma_map_page(struct page *page) { unsigned int seq; if (!is_running_on_xen() || !PageForeign(page)) return; do { seq = read_seqbegin(&gnttab_dma_lock); if (gnttab_dma_local_pfn(page)) break; atomic_set(&page->_mapcount, 0); /* Make _mapcount visible before read_seqretry. */ smp_mb(); } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); } int gnttab_resume(void) { if (max_nr_grant_frames() < nr_grant_frames) return -ENOSYS; return gnttab_map(0, nr_grant_frames - 1); } int gnttab_suspend(void) { #ifdef CONFIG_X86 apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); #endif return 0; } #else /* !CONFIG_XEN */ #include static unsigned long resume_frames; static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; /* Loop backwards, so that the first hypercall has the largest index, * ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); } while (i-- > start_idx); return 0; } int gnttab_resume(void) { unsigned int max_nr_gframes, nr_gframes; nr_gframes = nr_grant_frames; max_nr_gframes = max_nr_grant_frames(); if (max_nr_gframes < nr_gframes) return -ENOSYS; if (!resume_frames) { resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); if (shared == NULL) { printk("error to ioremap gnttab share frames\n"); return -1; } } gnttab_map(0, nr_gframes - 1); return 0; } #endif /* !CONFIG_XEN */ static int gnttab_expand(unsigned int req_entries) { int rc; unsigned int cur, extra; cur = nr_grant_frames; extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) / ENTRIES_PER_GRANT_FRAME); if (cur + extra > max_nr_grant_frames()) return -ENOSPC; if ((rc = gnttab_map(cur, cur + extra - 1)) == 0) rc = grow_gnttab_list(extra); return rc; } int __devinit gnttab_init(void) { int i; unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int nr_init_grefs; if (!is_running_on_xen()) return -ENODEV; nr_grant_frames = 1; boot_max_nr_grant_frames = xen_max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; nr_glist_frames = nr_freelist_frames(nr_grant_frames); for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) goto ini_nomem; } if (gnttab_resume() < 0) return -ENODEV; nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; return 0; ini_nomem: for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); return -ENOMEM; } #ifdef CONFIG_XEN core_initcall(gnttab_init); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/machine_reboot.c000066400000000000000000000042671314037446600262160ustar00rootroot00000000000000#include #include #include #include #include #include "platform-pci.h" #include struct ap_suspend_info { int do_spin; atomic_t nr_spinning; }; #ifdef CONFIG_SMP /* * Spinning prevents, for example, APs touching grant table entries while * the shared grant table is not mapped into the address space imemdiately * after resume. */ static void ap_suspend(void *_info) { struct ap_suspend_info *info = _info; BUG_ON(!irqs_disabled()); atomic_inc(&info->nr_spinning); mb(); while (info->do_spin) cpu_relax(); mb(); atomic_dec(&info->nr_spinning); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0, 0) #else #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0) #endif #else /* !defined(CONFIG_SMP) */ #define initiate_ap_suspend(i) 0 #endif static int bp_suspend(void) { int suspend_cancelled; BUG_ON(!irqs_disabled()); suspend_cancelled = HYPERVISOR_suspend(0); if (!suspend_cancelled) { platform_pci_resume(); gnttab_resume(); irq_resume(); } return suspend_cancelled; } int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)) { int err, suspend_cancelled, nr_cpus; struct ap_suspend_info info; xenbus_suspend(); preempt_disable(); /* Prevent any races with evtchn_interrupt() handler. */ disable_irq(xen_platform_pdev->irq); info.do_spin = 1; atomic_set(&info.nr_spinning, 0); smp_mb(); nr_cpus = num_online_cpus() - 1; err = initiate_ap_suspend(&info); if (err < 0) { preempt_enable(); xenbus_suspend_cancel(); return err; } while (atomic_read(&info.nr_spinning) != nr_cpus) cpu_relax(); local_irq_disable(); suspend_cancelled = bp_suspend(); resume_notifier(suspend_cancelled); local_irq_enable(); smp_mb(); info.do_spin = 0; while (atomic_read(&info.nr_spinning) != 0) cpu_relax(); enable_irq(xen_platform_pdev->irq); preempt_enable(); if (!suspend_cancelled){ xenbus_resume(); } else xenbus_suspend_cancel(); // update uvp flags write_driver_resume_flag(); write_feature_flag(); write_monitor_service_flag(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/panic-handler.c000066400000000000000000000016521314037446600257400ustar00rootroot00000000000000#include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif MODULE_LICENSE("GPL"); #ifdef __ia64__ static void xen_panic_hypercall(struct unw_frame_info *info, void *arg) { current->thread.ksp = (__u64)info->sw - 16; HYPERVISOR_shutdown(SHUTDOWN_crash); /* we're never actually going to get here... */ } #endif static int xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { #ifdef __ia64__ unw_init_running(xen_panic_hypercall, NULL); #else /* !__ia64__ */ HYPERVISOR_shutdown(SHUTDOWN_crash); #endif /* we're never actually going to get here... */ return NOTIFY_DONE; } static struct notifier_block xen_panic_block = { .notifier_call = xen_panic_event }; int xen_panic_handler_init(void) { atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/platform-compat.c000066400000000000000000000075071314037446600263450ustar00rootroot00000000000000#include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) static int system_state = 1; EXPORT_SYMBOL(system_state); #endif static int xen_reboot_vm(void) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *reboot_argv[] = { "/sbin/reboot", NULL }; if(call_usermodehelper("/sbin/reboot", reboot_argv, envp, 0)<0) printk(KERN_ERR "Xen reboot vm Failed.\n"); return 0; } void ctrl_alt_del(void) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) // kill_proc(1, SIGINT, 1); /* interrupt init */ xen_reboot_vm(); #else kill_cad_pid(SIGINT, 1); #endif } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) size_t strcspn(const char *s, const char *reject) { const char *p; const char *r; size_t count = 0; for (p = s; *p != '\0'; ++p) { for (r = reject; *r != '\0'; ++r) { if (*p == *r) return count; } ++count; } return count; } EXPORT_SYMBOL(strcspn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(void * vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; } EXPORT_SYMBOL(wait_for_completion_timeout); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) /* fake do_exit using complete_and_exit */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) asmlinkage NORET_TYPE void do_exit(long code) #else fastcall NORET_TYPE void do_exit(long code) #endif { complete_and_exit(NULL, code); } EXPORT_SYMBOL_GPL(do_exit); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout) { __set_current_state(TASK_INTERRUPTIBLE); return schedule_timeout(timeout); } EXPORT_SYMBOL(schedule_timeout_interruptible); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. */ void *kzalloc(size_t size, int flags) { void *ret = kmalloc(size, flags); if (ret) memset(ret, 0, size); return ret; } EXPORT_SYMBOL(kzalloc); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) /* Simplified asprintf. */ char *kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; unsigned int len; char *p, dummy[1]; va_start(ap, fmt); len = vsnprintf(dummy, 0, fmt, ap); va_end(ap); p = kmalloc(len + 1, gfp); if (!p) return NULL; va_start(ap, fmt); vsprintf(p, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL(kasprintf); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/platform-pci-unplug.c000066400000000000000000000117421314037446600271410ustar00rootroot00000000000000/****************************************************************************** * platform-pci-unplug.c * * Xen platform PCI device driver * Copyright (c) 2010, Citrix * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include /****************************************************************************** * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0xffff #define XEN_IOPORT_LINUX_DRVVER ((LINUX_VERSION_CODE << 8) + 0x0) #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define UNPLUG_ALL_IDE_DISKS 1 #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 #define UNPLUG_ALL 7 #define UNPLUG_IGNORE 8 int xen_platform_pci; EXPORT_SYMBOL_GPL(xen_platform_pci); static int xen_emul_unplug; void xen_unplug_emulated_devices(void) { /* If the version matches enable the Xen platform PCI driver. * Also enable the Xen platform PCI driver if the version is really old * and the user told us to ignore it. */ xen_platform_pci = 1; xen_emul_unplug |= UNPLUG_ALL_NICS; xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; /* Set the default value of xen_emul_unplug depending on whether or * not the Xen PV frontends and the Xen platform PCI driver have * been compiled for this kernel (modules or built-in are both OK). */ if (xen_platform_pci && !xen_emul_unplug) { #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Netfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated ICs.\n"); xen_emul_unplug |= UNPLUG_ALL_NICS; #endif #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Blkfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated disks.\n" "You might have to change the root device\n" "from /dev/hd[a-d] to /dev/xvd[a-d]\n" "in your root= kernel command line option\n"); xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; #endif } /* Now unplug the emulated devices */ if (xen_platform_pci && !(xen_emul_unplug & UNPLUG_IGNORE)) outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/platform-pci.c000066400000000000000000000241311314037446600256250ustar00rootroot00000000000000/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __ia64__ #include #endif #include "platform-pci.h" #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DRV_NAME "xen-platform-pci" #define DRV_VERSION "0.10" #define DRV_RELDATE "03/03/2005" static int max_hypercall_stub_pages, nr_hypercall_stub_pages; char *hypercall_stubs; EXPORT_SYMBOL(hypercall_stubs); MODULE_AUTHOR("ssmith@xensource.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); struct pci_dev *xen_platform_pdev; static unsigned long shared_info_frame; static uint64_t callback_via; /* driver should write xenstore flag to tell xen which feature supported */ void write_feature_flag() { int rc; char str[32] = {0}; (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); rc = xenbus_write(XBT_NIL, "control/uvp", "feature_flag", str); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); } } /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { int rc; rc = xenbus_write(XBT_NIL, "control/uvp", "monitor-service-flag", "true"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/monitor-service-flag failed \n"); } } /*DTS2014042508949. driver write this flag when the xenpci resume succeed.*/ void write_driver_resume_flag() { int rc = 0; rc = xenbus_write(XBT_NIL, "control/uvp", "driver-resume-flag", "1"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/driver-resume-flag failed \n"); } } static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; extern void *shared_info_area; #ifdef __ia64__ xencomm_initialize(); #endif setup_xen_features(); shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); shared_info_area = ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); return 0; } static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } #ifndef __ia64__ static uint32_t xen_cpuid_base(void) { uint32_t base = 0, eax = 0, ebx = 0, ecx = 0, edx = 0; char signature[13]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { cpuid(base, &eax, &ebx, &ecx, &edx); *(uint32_t*)(signature + 0) = ebx; *(uint32_t*)(signature + 4) = ecx; *(uint32_t*)(signature + 8) = edx; signature[12] = 0; if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) return base; } return 0; } static int init_hypercall_stubs(void) { uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0, pages = 0, msr = 0, i = 0, base = 0; base = xen_cpuid_base(); if (base == 0) { printk(KERN_WARNING "Detected Xen platform device but not Xen VMM?\n"); return -EINVAL; } cpuid(base + 1, &eax, &ebx, &ecx, &edx); printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff); /* * Find largest supported number of hypercall pages. * We'll create as many as possible up to this number. */ cpuid(base + 2, &pages, &msr, &ecx, &edx); /* * Use __vmalloc() because vmalloc_exec() is not an exported symbol. * PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL. * hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE); */ while (pages > 0) { hypercall_stubs = __vmalloc( pages * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM, __pgprot(__PAGE_KERNEL & ~_PAGE_NX)); if (hypercall_stubs != NULL) break; pages--; /* vmalloc failed: try one fewer pages */ } if (hypercall_stubs == NULL) return -ENOMEM; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; max_hypercall_stub_pages = pages; printk(KERN_INFO "Hypercall area is %u pages.\n", pages); return 0; } static void resume_hypercall_stubs(void) { uint32_t base, ecx, edx, pages = 0, msr = 0, i; base = xen_cpuid_base(); BUG_ON(base == 0); cpuid(base + 2, &pages, &msr, &ecx, &edx); if (pages > max_hypercall_stub_pages) pages = max_hypercall_stub_pages; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; } #else /* __ia64__ */ #define init_hypercall_stubs() (0) #define resume_hypercall_stubs() ((void)0) #endif static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; #ifdef __ia64__ for (irq = 0; irq < 16; irq++) { if (isa_irq_to_vector(irq) == pdev->irq) return irq; /* ISA IRQ */ } #else /* !__ia64__ */ irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) pin = pdev->pin; #else pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); #endif /* We don't know the GSI. Specify the PCI INTx line instead. */ return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3)); } static int set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } int xen_irq_init(struct pci_dev *pdev); int xenbus_init(void); int xen_reboot_init(void); int xen_panic_handler_init(void); int gnttab_init(void); void xen_unplug_emulated_devices(void); static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr, iolen; long mmio_addr, mmio_len; xen_unplug_emulated_devices(); if (xen_platform_pdev) return -EBUSY; xen_platform_pdev = pdev; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); callback_via = get_callback_via(pdev); if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { printk(KERN_WARNING DRV_NAME ":no resources found\n"); return -ENOENT; } if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) { printk(KERN_ERR ":MEM I/O resource 0x%lx @ 0x%lx busy\n", mmio_addr, mmio_len); return -EBUSY; } if (request_region(ioaddr, iolen, DRV_NAME) == NULL) { printk(KERN_ERR DRV_NAME ":I/O resource 0x%lx @ 0x%lx busy\n", iolen, ioaddr); release_mem_region(mmio_addr, mmio_len); return -EBUSY; } platform_mmio = mmio_addr; platform_mmiolen = mmio_len; ret = init_hypercall_stubs(); if (ret < 0) goto out; if ((ret = init_xen_info())) goto out; if ((ret = gnttab_init())) goto out; if ((ret = xen_irq_init(pdev))) goto out; if ((ret = set_callback_via(callback_via))) goto out; if ((ret = xenbus_init())) goto out; if ((ret = xen_reboot_init())) goto out; if ((ret = xen_panic_handler_init())) goto out; /* write flag to xenstore when xenbus is available */ write_feature_flag(); out: if (ret) { release_mem_region(mmio_addr, mmio_len); release_region(ioaddr, iolen); } return ret; } #define XEN_PLATFORM_VENDOR_ID 0x5853 #define XEN_PLATFORM_DEVICE_ID 0x0001 static struct pci_device_id platform_pci_tbl[] __devinitdata = { {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* Continue to recognise the old ID for now */ {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { name: DRV_NAME, probe: platform_pci_init, id_table: platform_pci_tbl, }; static int pci_device_registered; void platform_pci_resume(void) { struct xen_add_to_physmap xatp; resume_hypercall_stubs(); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); if (set_callback_via(callback_via)) printk("platform_pci_resume failure!\n"); } static int __init platform_pci_module_init(void) { int rc; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) rc = pci_module_init(&platform_driver); #else rc = pci_register_driver(&platform_driver); #endif if (rc) { printk(KERN_INFO DRV_NAME ": No platform pci device model found\n"); return rc; } pci_device_registered = 1; return 0; } module_init(platform_pci_module_init); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/platform-pci.h000066400000000000000000000026611314037446600256360ustar00rootroot00000000000000/****************************************************************************** * platform-pci.h * * Xen platform PCI device driver * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. */ #ifndef _XEN_PLATFORM_PCI_H #define _XEN_PLATFORM_PCI_H #include /* the features pvdriver supported */ #define XENPAGING 0x1 #define SUPPORTED_FEATURE XENPAGING extern void write_feature_flag(void); extern void write_monitor_service_flag(void); /*when xenpci resume succeed, write this flag, then uvpmonitor call ndsend*/ extern void write_driver_resume_flag(void); unsigned long alloc_xen_mmio(unsigned long len); void platform_pci_resume(void); extern struct pci_dev *xen_platform_pdev; #endif /* _XEN_PLATFORM_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/reboot.c000066400000000000000000000172021314037446600245230ustar00rootroot00000000000000#define __KERNEL_SYSCALLS__ #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Was last suspend request cancelled? */ static int suspend_cancelled; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(void *unused); static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL); static int setup_suspend_evtchn(void); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed(current, cpumask_of_cpu(0)); if (err) { printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { printk(KERN_ERR "Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_work(&shutdown_work); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_work(&shutdown_work); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(void *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } /*պǨʱsuspendʧᵼǨƹȥsuspendʧܺǨƳʱ*/ //xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } printk(KERN_WARNING "%s(%d): receive shutdown request %s\n", __FUNCTION__, __LINE__, str); if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) ctrl_alt_del(); else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else printk("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) { printk(KERN_ERR "Unable to read sysrq code in " "control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') (void)xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key, NULL, NULL); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; static irqreturn_t suspend_int(int irq, void* dev_id, struct pt_regs *ptregs) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); printk(KERN_INFO "suspend: event channel %d\n", port); (void)snprintf(portstr,sizeof(portstr), "%d", port); xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } static int setup_shutdown_watcher(void) { int err; (void)xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { printk(KERN_ERR "Failed to set shutdown watcher\n"); return err; } err = register_xenbus_watch(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { printk(KERN_ERR "Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/xen_proc.c000066400000000000000000000010451314037446600250440ustar00rootroot00000000000000 #include #include #include static struct proc_dir_entry *xen_base; struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode) { if ( xen_base == NULL ) if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL ) panic("Couldn't create /proc/xen"); return create_proc_entry(name, mode, xen_base); } EXPORT_SYMBOL_GPL(create_xen_proc_entry); void remove_xen_proc_entry(const char *name) { remove_proc_entry(name, xen_base); } EXPORT_SYMBOL_GPL(remove_xen_proc_entry); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/xen_support.c000066400000000000000000000040531314037446600256170ustar00rootroot00000000000000/****************************************************************************** * support.c * Xen module support functions. * Copyright (C) 2004, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if defined (__ia64__) unsigned long __hypercall(unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long cmd) { unsigned long __res; __asm__ __volatile__ (";;\n" "mov r2=%1\n" "break 0x1000 ;;\n" "mov %0=r8 ;;\n" : "=r"(__res) : "r"(cmd) : "r2", "r8", "memory"); return __res; } EXPORT_SYMBOL(__hypercall); int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) { return xencomm_hypercall_grant_table_op(cmd, uop, count); } EXPORT_SYMBOL(HYPERVISOR_grant_table_op); /* without using balloon driver on PV-on-HVM for ia64 */ void balloon_update_driver_allowance(long delta) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); void balloon_release_driver_page(struct page *page) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_release_driver_page); #endif /* __ia64__ */ void xen_machphys_update(unsigned long mfn, unsigned long pfn) { BUG(); } EXPORT_SYMBOL(xen_machphys_update); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/xenbus_backend_client.c000066400000000000000000000106761314037446600275520ustar00rootroot00000000000000/****************************************************************************** * Backend-client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code in the backend * driver. * * Copyright (C) 2005-2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include /* Based on Rusty Russell's skeleton driver's map_page */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref) { struct gnttab_map_grant_ref op; struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE); if (!area) return ERR_PTR(-ENOMEM); gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map, gnt_ref, dev->otherend_id); if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { free_vm_area(area); xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); BUG_ON(!IS_ERR(ERR_PTR(op.status))); return ERR_PTR(op.status); } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; return area; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, dev->otherend_id); if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring); /* Based on Rusty Russell's skeleton driver's unmap_page */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map, (grant_handle_t)area->phys_addr); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) free_vm_area(area); else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring); int xenbus_dev_is_online(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/xenbus_client.c000066400000000000000000000175211314037446600260770ustar00rootroot00000000000000/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DPRINTK(fmt, args...) \ pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args) const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", [ XenbusStateReconfiguring ] = "Reconfiguring", [ XenbusStateReconfigured ] = "Reconfigured", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate); int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path); int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2); if (!state) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, state, watch, callback); if (err) kfree(state); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path2); int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not work inside a Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ int current_state; int err; if (state == dev->state) return 0; err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d", ¤t_state); if (err != 1) return 0; err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state); if (err) { if (state != XenbusStateClosing) /* Avoid looping */ xenbus_dev_fatal(dev, err, "writing new state"); return err; } dev->state = state; return 0; } EXPORT_SYMBOL_GPL(xenbus_switch_state); int xenbus_frontend_closed(struct xenbus_device *dev) { xenbus_switch_state(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed); /** * Return the path to the error node for the given device, or NULL on failure. * If the value returned is non-NULL, then it is the caller's to kfree. */ static char *error_path(struct xenbus_device *dev) { return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); } void _dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list ap) { int ret; unsigned int len; char *printf_buffer = NULL, *path_buffer = NULL; #define PRINTF_BUFFER_SIZE 4096 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); if (printf_buffer == NULL) goto fail; len = sprintf(printf_buffer, "%i ", -err); ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = error_path(dev); if (path_buffer == NULL) { printk("xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { printk("xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } fail: if (printf_buffer) kfree(printf_buffer); if (path_buffer) kfree(path_buffer); } void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error); void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); xenbus_switch_state(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal); int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); if (err < 0) xenbus_dev_fatal(dev, err, "granting access to ring page"); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.port = 0xffff; //avoid coverity warning ; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error(dev, err, "freeing event channel %d", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn); enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/xenbus_comms.c000066400000000000000000000140101314037446600257250ustar00rootroot00000000000000/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_irq; extern void xenbus_probe(void *); extern int xenstored_ready; static DECLARE_WORK(probe_work, xenbus_probe, NULL); static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs) { if (unlikely(xenstored_ready == 0)) { xenstored_ready = 1; schedule_work(&probe_work); } wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { return wait_event_interruptible(xb_waitq, xb_data_to_read()); } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /* Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; int err; if (intf->req_prod != intf->req_cons) printk(KERN_ERR "XENBUS request ring is not quiescent " "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { printk(KERN_WARNING "XENBUS response ring is not quiescent " "(%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); intf->rsp_cons = intf->rsp_prod; } if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); err = bind_caller_port_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/xenbus_comms.h000066400000000000000000000035561314037446600257470ustar00rootroot00000000000000/* * Private include for xenbus communications. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_COMMS_H #define _XENBUS_COMMS_H int xs_init(void); int xb_init_comms(void); /* Low level routines. */ int xb_write(const void *data, unsigned len); int xb_read(void *data, unsigned len); int xb_data_to_read(void); int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; #endif /* _XENBUS_COMMS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/xenbus_dev.c000066400000000000000000000236361314037446600254030ustar00rootroot00000000000000/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[PAGE_SIZE]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static struct proc_dir_entry *xenbus_dev_intf; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static void queue_reply(struct xenbus_dev_data *u, char *data, unsigned int len) { struct read_buffer *rb =NULL; if (len == 0) return; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); if(NULL == rb) BUG_ON(rb == NULL); rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, &u->read_buffers); wake_up(&u->read_waitq); } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int path_len, tok_len, body_len, data_len = 0; path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr)); queue_reply(adap->dev_data, (char *)path, path_len); queue_reply(adap->dev_data, (char *)token, tok_len); if (len > 2) queue_reply(adap->dev_data, (char *)vec[2], data_len); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply; char *path, *token; struct watch_adapter *watch, *tmp_watch; int err, rc = len; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 if ((len + u->len) > sizeof(u->u.buffer)) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_TRANSACTION_START: case XS_TRANSACTION_END: case XS_DIRECTORY: case XS_READ: case XS_GET_PERMS: case XS_RELEASE: case XS_GET_DOMAIN_PATH: case XS_WRITE: case XS_LINUX_WEAK_WRITE: case XS_MKDIR: case XS_RM: case XS_SET_PERMS: if (msg_type == XS_TRANSACTION_START) { trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } } reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; BUG_ON(&trans->list == &u->transactions); list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg)); queue_reply(u, (char *)reply, u->u.msg.len); mutex_unlock(&u->reply_mutex); kfree(reply); break; case XS_WATCH: case XS_UNWATCH: { static const char *XS_RESP = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); BUG_ON(watch == NULL); watch->watch.node = kmalloc(strlen(path)+1, GFP_KERNEL); BUG_ON(watch->watch.node == NULL); strncpy((char *)watch->watch.node, path,strlen(path)+1); watch->watch.callback = watch_fired; watch->token = kmalloc(strlen(token)+1, GFP_KERNEL); BUG_ON(watch->token == NULL); strncpy(watch->token, token,strlen(token)+1); watch->token[strlen(token)] = '\0'; watch->dev_data = u; err = register_xenbus_watch(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = strlen(XS_RESP) + 1; mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&hdr, sizeof(hdr)); queue_reply(u, (char *)XS_RESP, hdr.len); mutex_unlock(&u->reply_mutex); break; } default: rc = -EINVAL; break; } out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .poll = xenbus_dev_poll, }; int xenbus_dev_init(void) { xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400); if (xenbus_dev_intf) xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops; return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/xenbus_probe.c000066400000000000000000000710371314037446600257320ustar00rootroot00000000000000/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef MODULE #include #endif #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif int xen_store_evtchn; struct xenstore_domain_interface *xen_store_interface; static unsigned long xen_store_mfn; extern struct mutex xenwatch_mutex; static BLOCKING_NOTIFIER_HEAD(xenstore_chain); static void wait_for_devices(struct xenbus_driver *xendrv); static int xenbus_probe_frontend(const char *type, const char *name); static void xenbus_dev_shutdown(struct device *_dev); /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } /* device// => - */ static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) { printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, BUS_ID_SIZE); if (!strchr(bus_id, '/')) { printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } static int read_backend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "backend-id", "backend"); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) static int xenbus_uevent_frontend(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) { struct xenbus_device *xdev; int length = 0, i = 0; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_PATH=%s", xdev->nodename); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "MODALIAS=xen:%s", xdev->devicetype); return 0; } #endif /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { .name = "xen", .match = xenbus_match, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_frontend, #endif }, .dev = { .bus_id = "xen", }, }; static void otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]); return; } state = xenbus_read_driver_state(dev->otherend); DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { struct xen_bus_type *bus = bus; bus = container_of(dev->dev.bus, struct xen_bus_type, bus); /* If we're frontend, drive the state machine to Closed. */ /* This should cause the backend to release our resources. */ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) xenbus_frontend_closed(dev); return; } #endif if (drv->otherend_changed) drv->otherend_changed(dev, state); } static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { return xenbus_watch_path2(dev, dev->otherend, "state", &dev->otherend_watch, otherend_changed); } int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { printk(KERN_WARNING "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { printk(KERN_WARNING "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); return -ENODEV; } int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); free_otherend_details(dev); if (drv->remove) drv->remove(dev); xenbus_switch_state(dev, XenbusStateClosed); return 0; } static void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); if (is_initial_xendomain()) return; get_device(&dev->dev); if (dev->state != XenbusStateConnected) { printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); } int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus) { int ret; if (bus->error) return bus->error; drv->driver.name = drv->name; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) drv->driver.owner = drv->owner; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; drv->driver.remove = xenbus_dev_remove; drv->driver.shutdown = xenbus_dev_shutdown; #endif mutex_lock(&xenwatch_mutex); ret = driver_register(&drv->driver); mutex_unlock(&xenwatch_mutex); return ret; } int xenbus_register_frontend(struct xenbus_driver *drv) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(xenbus_register_frontend); void xenbus_unregister_driver(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t xendev_show_nodename(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); static ssize_t xendev_show_devtype(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); static ssize_t xendev_show_modalias(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_driver_state(nodename); if (bus->error) return bus->error; if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); xendev->dev.parent = &bus->dev; xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); if (err) goto fail; /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) goto unregister; err = device_create_file(&xendev->dev, &dev_attr_devtype); if (err) goto unregister; err = device_create_file(&xendev->dev, &dev_attr_modalias); if (err) goto unregister; return 0; unregister: device_remove_file(&xendev->dev, &dev_attr_nodename); device_remove_file(&xendev->dev, &dev_attr_devtype); device_unregister(&xendev->dev); fail: kfree(xendev); return err; } /* device// */ static int xenbus_probe_frontend(const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(&xenbus_frontend, type, nodename); kfree(nodename); return err; } static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n = 0; if (bus->error) return bus->error; dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void dev_changed(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[BUS_ID_SIZE]; const char *p, *root; if (bus->error || char_count(node, '/') < 2) return; exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend//... or device//... */ p = strchr(node, '/') + 1; snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int suspend_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend) err = drv->suspend(xdev); if (err) printk(KERN_WARNING "xenbus: suspend %s failed: %i\n", dev->bus_id, err); return 0; } static int suspend_cancel_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend_cancel) err = drv->suspend_cancel(xdev); if (err) printk(KERN_WARNING "xenbus: suspend_cancel %s failed: %i\n", dev->bus_id, err); return 0; } static int resume_dev(struct device *dev, void *data) { int err; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); err = talk_to_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus: resume (talk_to_otherend) %s failed: %i\n", dev->bus_id, err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { printk(KERN_WARNING "xenbus: resume %s failed: %i\n", dev->bus_id, err); return err; } } err = watch_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus_probe: resume (watch_otherend) %s failed: " "%d.\n", dev->bus_id, err); return err; } return 0; } void xenbus_suspend(void) { DPRINTK(""); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); xenbus_backend_suspend(suspend_dev); xs_suspend(); } EXPORT_SYMBOL_GPL(xenbus_suspend); void xen_unplug_emulated_devices(void); void xenbus_resume(void) { xb_init_comms(); xs_resume(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); xenbus_backend_resume(resume_dev); xen_unplug_emulated_devices(); } EXPORT_SYMBOL_GPL(xenbus_resume); void xenbus_suspend_cancel(void) { xs_suspend_cancel(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); xenbus_backend_resume(suspend_cancel_dev); } EXPORT_SYMBOL_GPL(xenbus_suspend_cancel); /* A flag to determine if xenstored is 'ready' (i.e. has started) */ int xenstored_ready = 0; int register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; if (xenstored_ready > 0) ret = nb->notifier_call(nb, 0, NULL); else blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } EXPORT_SYMBOL_GPL(register_xenstore_notifier); void unregister_xenstore_notifier(struct notifier_block *nb) { blocking_notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); void xenbus_probe(void *unused) { BUG_ON((xenstored_ready <= 0)); /* Enumerate devices in xenstore and watch for changes. */ (void)xenbus_probe_devices(&xenbus_frontend); (void)register_xenbus_watch(&fe_watch); xenbus_backend_probe_and_watch(); /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) static struct file_operations xsd_kva_fops; static struct proc_dir_entry *xsd_kva_intf; static struct proc_dir_entry *xsd_port_intf; static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static int xsd_kva_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "0x%p", xen_store_interface); *eof = 1; return len; } static int xsd_port_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "%d", xen_store_evtchn); *eof = 1; return len; } #endif static int xenbus_probe_init(void) { int err = 0; unsigned long page = 0; DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) printk(KERN_WARNING "XENBUS: Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { struct evtchn_alloc_unbound alloc_unbound; /* Allocate page. */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = 0; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600); if (xsd_kva_intf) { memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, sizeof(xsd_kva_fops)); xsd_kva_fops.mmap = xsd_kva_mmap; xsd_kva_intf->proc_fops = &xsd_kva_fops; xsd_kva_intf->read_proc = xsd_kva_read; } xsd_port_intf = create_xen_proc_entry("xsd_port", 0400); if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else { xenstored_ready = 1; #ifdef CONFIG_XEN xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); #else xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN); xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN); xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); #endif } xenbus_dev_init(); /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); goto err; } /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); if (xenbus_frontend.error) { bus_unregister(&xenbus_frontend.bus); printk(KERN_WARNING "XENBUS: Error registering frontend device: %i\n", xenbus_frontend.error); } } xenbus_backend_device_register(); if (!is_initial_xendomain()) xenbus_probe(NULL); return 0; err: if (page) free_page(page); /* * Do not unregister the xenbus front/backend buses here. The buses * must exist because front/backend drivers will use them when they are * registered. */ return err; } #ifdef CONFIG_XEN postcore_initcall(xenbus_probe_init); MODULE_LICENSE("Dual BSD/GPL"); #else int xenbus_init(void) { return xenbus_probe_init(); } #endif static int is_disconnected_device(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_disconnected_device(struct device_driver *drv) { if (xenbus_frontend.error) return xenbus_frontend.error; return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_disconnected_device); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", xendev->nodename); return 0; } if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); printk(KERN_WARNING "XENBUS: Timeout connecting " "to device: %s (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } xendrv = to_xenbus_driver(dev->driver); if (xendrv->is_ready && !xendrv->is_ready(xendev)) printk(KERN_WARNING "XENBUS: Device not ready: %s\n", xendev->nodename); return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; } if (!ready_to_wait_for_devices || !is_running_on_xen()) return; while (exists_disconnected_device(drv)) { //5s if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) printk(KERN_WARNING "XENBUS: Waiting for " "devices to initialise: "); seconds_waited += 5; printk("%us...", 300 - seconds_waited); if (seconds_waited == 300) break; } //100ms schedule_timeout_interruptible(HZ/10); } if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } #ifndef MODULE static int __init boot_wait_for_devices(void) { if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; wait_for_devices(NULL); } return 0; } late_initcall(boot_wait_for_devices); #endif int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/xenbus_probe.h000066400000000000000000000061171314037446600257340ustar00rootroot00000000000000/****************************************************************************** * xenbus_probe.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_PROBE_H #define _XENBUS_PROBE_H #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); extern void xenbus_backend_probe_and_watch(void); extern void xenbus_backend_bus_register(void); extern void xenbus_backend_device_register(void); #else static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_probe_and_watch(void) {} static inline void xenbus_backend_bus_register(void) {} static inline void xenbus_backend_device_register(void) {} #endif struct xen_bus_type { char *root; int error; unsigned int levels; int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename); int (*probe)(const char *type, const char *dir); struct bus_type bus; struct device dev; }; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); extern int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus); extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); extern int xenbus_probe_devices(struct xen_bus_type *bus); extern void dev_changed(const char *node, struct xen_bus_type *bus); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/xenbus_probe_backend.c000066400000000000000000000172621314037446600274010ustar00rootroot00000000000000/****************************************************************************** * Talks to Xen Store to figure out what devices we have (backend half). * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_uevent_backend(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size); static int xenbus_probe_backend(const char *type, const char *domid); extern int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node); static int read_frontend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "frontend-id", "frontend"); } /* backend/// => -- */ static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) { int domid, err; const char *devid, *type, *frontend; unsigned int typelen; type = strchr(nodename, '/'); if (!type) return -EINVAL; type++; typelen = strcspn(type, "/"); if (!typelen || type[typelen] != '/') return -EINVAL; devid = strrchr(nodename, '/') + 1; err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid, "frontend", NULL, &frontend, NULL); if (err) return err; if (strlen(frontend) == 0) err = -ERANGE; if (!err && !xenbus_exists(XBT_NIL, frontend, "")) err = -ENOENT; kfree(frontend); if (err) return err; if (snprintf(bus_id, BUS_ID_SIZE, "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE) return -ENOSPC; return 0; } static struct xen_bus_type xenbus_backend = { .root = "backend", .levels = 3, /* backend/type// */ .get_bus_id = backend_bus_id, .probe = xenbus_probe_backend, .error = -ENODEV, .bus = { .name = "xen-backend", .match = xenbus_match, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, // .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_backend, }, .dev = { .bus_id = "xen-backend", }, }; static int xenbus_uevent_backend(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) { struct xenbus_device *xdev; struct xenbus_driver *drv; int i = 0; int length = 0; DPRINTK(""); if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_PATH=%s", xdev->nodename); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_BASE_PATH=%s", xenbus_backend.root); /* terminate, set to next free slot, shrink available space */ envp[i] = NULL; envp = &envp[i]; num_envp -= i; buffer = &buffer[length]; buffer_size -= length; if (dev->driver) { drv = to_xenbus_driver(dev->driver); if (drv && drv->uevent) return drv->uevent(xdev, envp, num_envp, buffer, buffer_size); } return 0; } int xenbus_register_backend(struct xenbus_driver *drv) { drv->read_otherend_details = read_frontend_details; return xenbus_register_driver_common(drv, &xenbus_backend); } EXPORT_SYMBOL_GPL(xenbus_register_backend); /* backend/// */ static int xenbus_probe_backend_unit(const char *dir, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); if (!nodename) return -ENOMEM; DPRINTK("%s\n", nodename); err = xenbus_probe_node(&xenbus_backend, type, nodename); kfree(nodename); return err; } /* backend// */ static int xenbus_probe_backend(const char *type, const char *domid) { char *nodename; int err = 0; char **dir; unsigned int i, dir_n = 0; DPRINTK(""); nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid); if (!nodename) return -ENOMEM; dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n); if (IS_ERR(dir)) { kfree(nodename); return PTR_ERR(dir); } for (i = 0; i < dir_n; i++) { err = xenbus_probe_backend_unit(nodename, type, dir[i]); if (err) break; } kfree(dir); kfree(nodename); return err; } static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); } static struct xenbus_watch be_watch = { .node = "backend", .callback = backend_changed, }; void xenbus_backend_suspend(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_resume(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_probe_and_watch(void) { xenbus_probe_devices(&xenbus_backend); register_xenbus_watch(&be_watch); } void xenbus_backend_bus_register(void) { xenbus_backend.error = bus_register(&xenbus_backend.bus); if (xenbus_backend.error) printk(KERN_WARNING "XENBUS: Error registering backend bus: %i\n", xenbus_backend.error); } void xenbus_backend_device_register(void) { if (xenbus_backend.error) return; xenbus_backend.error = device_register(&xenbus_backend.dev); if (xenbus_backend.error) { bus_unregister(&xenbus_backend.bus); printk(KERN_WARNING "XENBUS: Error registering backend device: %i\n", xenbus_backend.error); } } int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_backend); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-platform-pci/xenbus_xs.c000066400000000000000000000513551314037446600252560ustar00rootroot00000000000000/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct rw_semaphore transaction_mutex; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { printk(KERN_WARNING "XENBUS xen store gave: unknown error %s", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; struct xsd_sockmsg req_msg = *msg; int err; if (req_msg.type == XS_TRANSACTION_START) down_read(&xs_state.transaction_mutex); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((req_msg.type == XS_TRANSACTION_END) || ((req_msg.type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) up_read(&xs_state.transaction_mutex); return ret; } /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len);; if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { if (printk_ratelimit()) printk(KERN_WARNING "XENBUS unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len) + 1; /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; ret[*num] = strings + len; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len = 0; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; down_read(&xs_state.transaction_mutex); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { up_read(&xs_state.transaction_mutex); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strncpy(abortstr,"F",sizeof(abortstr)); else strncpy(abortstr,"T",sizeof(abortstr)); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); up_read(&xs_state.transaction_mutex); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; #define PRINTF_BUFFER_SIZE 4096 char *printf_buffer; printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH); if (printf_buffer == NULL) return -ENOMEM; va_start(ap, fmt); ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap); va_end(ap); BUG_ON(ret > PRINTF_BUFFER_SIZE-1); ret = xenbus_write(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; (void)snprintf(token,sizeof(token), "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); /* Ignore errors due to multiple registration. */ if ((err != 0) && (err != -EEXIST)) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; BUG_ON(watch->flags & XBWF_new_thread); (void)snprintf(token, sizeof(token),"%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) printk(KERN_WARNING "XENBUS Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); /* Flush any currently-executing callback, unless we are it. :-) */ if (current->pid != xenwatch_pid) { mutex_lock(&xenwatch_mutex); mutex_unlock(&xenwatch_mutex); } } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); void xs_suspend(void) { down_write(&xs_state.transaction_mutex); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.transaction_mutex); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { (void)snprintf(token,sizeof(token), "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); up_write(&xs_state.transaction_mutex); } static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) printk(KERN_WARNING "XENBUS error %d while reading " "message\n", err); if (kthread_should_stop()) break; } return 0; } int xs_init(void) { int err; struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); init_rwsem(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) return err; task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-scsi/000077500000000000000000000000001314037446600214275ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-scsi/Kbuild000066400000000000000000000001241314037446600225610ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-scsi.o xen-scsi-objs := scsifront.o xenbus.o UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-scsi/Makefile000066400000000000000000000000661314037446600230710ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-scsi/common.h000066400000000000000000000112021314037446600230640ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_SCSIFRONT_H__ #define __XEN_DRIVERS_SCSIFRONT_H__ #include #include #include #include #include #include #include #include #include #include #include /* ssleep prototype */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define GRANT_INVALID_REF 0 #define VSCSI_IN_ABORT 1 #define VSCSI_IN_RESET 2 /* tuning point*/ #define VSCSIIF_DEFAULT_CMD_PER_LUN 64 #define VSCSIIF_MAX_TARGET 64 #define VSCSIIF_MAX_LUN 255 #define RING_REF_PAGE_COUNT 8 /* IOҳ */ #define VSCSIIF_RING_PAGE_SIZE (RING_REF_PAGE_COUNT * PAGE_SIZE) /* IOռڴС */ #define VSCSIIF_RING_SIZE \ __RING_SIZE((struct vscsiif_sring *)0, VSCSIIF_RING_PAGE_SIZE) #define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE struct vscsifrnt_shadow { uint16_t next_free; /* command between backend and frontend * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */ unsigned char act; /* do reset function */ wait_queue_head_t wq_reset; /* reset work queue */ int wait_reset; /* reset work queue condition */ int32_t rslt_reset; /* reset response status */ /* (SUCESS or FAILED) */ /* for DMA_TO_DEVICE(1), DMA_FROM_DEVICE(2), DMA_NONE(3) requests */ unsigned int sc_data_direction; /* Number of pieces of scatter-gather */ unsigned int nr_segments; /* requested struct scsi_cmnd is stored from kernel */ unsigned long req_scsi_cmnd; int gref[VSCSIIF_SG_TABLESIZE]; }; struct vscsifrnt_info { struct xenbus_device *dev; struct Scsi_Host *host; spinlock_t io_lock; spinlock_t shadow_lock; unsigned int evtchn; unsigned int irq; grant_ref_t ring_ref[RING_REF_PAGE_COUNT]; struct vscsiif_front_ring ring; struct vscsiif_response ring_res; struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS]; uint32_t shadow_free; struct task_struct *kthread; wait_queue_head_t wq; unsigned int waiting_resp; int connected; }; #define DPRINTK(_f, _a...) \ pr_debug("(file=%s, line=%d) " _f, \ __FILE__ , __LINE__ , ## _a ) int scsifront_xenbus_init(void); void scsifront_xenbus_unregister(void); int scsifront_schedule(void *data); irqreturn_t scsifront_intr(int irq, void *dev_id, struct pt_regs *ptregs); int scsifront_cmd_done(struct vscsifrnt_info *info); extern int scsi_internal_device_block(struct scsi_device *sdev); extern int scsi_internal_device_unblock(struct scsi_device *sdev); void scsifront_resume_io(struct vscsifrnt_info *info); void scsifront_resume_free(struct vscsifrnt_info *info); void scsifront_recover(struct vscsifrnt_info *info); void scsifront_xenfree(struct vscsifrnt_info *info); #endif /* __XEN_DRIVERS_SCSIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-scsi/scsifront.c000066400000000000000000000440241314037446600236110ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "common.h" #define VSCSIIF_STATE_CONNECTED 1 /* hostѳʼresumeĹqueuecommandresetԹ */ #define VSCSIIF_STATE_SUSPENDED 2 /* ڶsuspendʱIOresumequeuecommandresetӦ */ /* next_freeΪ0x0fffһʹõģһshadow_idʼΪ0x0fffǿܲûʹ */ #define is_shadow_item_in_use(shadow_item) \ ((shadow_item.next_free == 0x0fff) && (shadow_item.req_scsi_cmnd != 0)) #define is_shadow_item_cmnd_cdb(shadow_item) (shadow_item.act == VSCSIIF_ACT_SCSI_CDB) #define is_reset_cmnd_and_completing(shadow_item) \ ((shadow_item.act == VSCSIIF_ACT_SCSI_RESET) && (shadow_item.wait_reset == 1)) static int get_id_from_freelist(struct vscsifrnt_info *info) { unsigned long flags; uint32_t free; spin_lock_irqsave(&info->shadow_lock, flags); free = info->shadow_free; BUG_ON(free > VSCSIIF_MAX_REQS); info->shadow_free = info->shadow[free].next_free; info->shadow[free].next_free = 0x0fff; info->shadow[free].wait_reset = 0; spin_unlock_irqrestore(&info->shadow_lock, flags); return free; } static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id) { unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); info->shadow[id].next_free = info->shadow_free; info->shadow[id].req_scsi_cmnd = 0; info->shadow_free = id; spin_unlock_irqrestore(&info->shadow_lock, flags); } static struct vscsiif_request* io_ring_get_request(struct vscsiif_front_ring *ring) { vscsiif_request_t *ring_req; ring_req = RING_GET_REQUEST(ring, ring->req_prod_pvt); ring->req_prod_pvt++; return ring_req; } static struct vscsiif_request* scsifront_pre_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); vscsiif_request_t *ring_req; uint32_t id; ring_req = io_ring_get_request(ring); id = get_id_from_freelist(info); /* use id by response */ ring_req->rqid = (uint16_t)id; return ring_req; } static void scsifront_notify_work(struct vscsifrnt_info *info) { info->waiting_resp = 1; wake_up(&info->wq); } static void scsifront_do_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); unsigned int irq = info->irq; int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); if (notify) notify_remote_via_irq(irq); } irqreturn_t scsifront_intr(int irq, void *dev_id, struct pt_regs *ptregs) { scsifront_notify_work((struct vscsifrnt_info *)dev_id); return IRQ_HANDLED; } static void scsifront_gnttab_done(struct vscsifrnt_shadow *s, uint32_t id) { int i; if (s->sc_data_direction == DMA_NONE) return; if (s->nr_segments) { for (i = 0; i < s->nr_segments; i++) { if (unlikely(gnttab_query_foreign_access( s->gref[i]) != 0)) { printk(KERN_ALERT "scsifront: " "grant still in use by backend.\n"); BUG(); } gnttab_end_foreign_access(s->gref[i], 0UL); } } return; } static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { struct scsi_cmnd *sc; uint32_t id; uint8_t sense_len; id = ring_res->rqid; sc = (struct scsi_cmnd *)info->shadow[id].req_scsi_cmnd; if (sc == NULL) BUG(); scsifront_gnttab_done(&info->shadow[id], id); add_id_to_freelist(info, id); sc->result = ring_res->rslt; sc->resid = ring_res->residual_len; if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE) sense_len = VSCSIIF_SENSE_BUFFERSIZE; else sense_len = ring_res->sense_len; if (sense_len) memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len); sc->scsi_done(sc); return; } static void scsifront_sync_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { uint16_t id = ring_res->rqid; unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); info->shadow[id].wait_reset = 1; info->shadow[id].rslt_reset = ring_res->rslt; spin_unlock_irqrestore(&info->shadow_lock, flags); wake_up(&(info->shadow[id].wq_reset)); } int scsifront_cmd_done(struct vscsifrnt_info *info) { vscsiif_response_t *ring_res; RING_IDX i, rp; int more_to_do = 0; unsigned long flags; spin_lock_irqsave(&info->io_lock, flags); /* ״̬Ϊconnectedresume³ʼIO˴ʱܷIO */ if (unlikely(VSCSIIF_STATE_CONNECTED != info->connected)) { more_to_do = 1; /* ˴δɵresumeʱط */ goto out; } rp = info->ring.sring->rsp_prod; rmb(); for (i = info->ring.rsp_cons; i != rp; i++) { ring_res = RING_GET_RESPONSE(&info->ring, i); if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB) scsifront_cdb_cmd_done(info, ring_res); else scsifront_sync_cmd_done(info, ring_res); } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); } else { info->ring.sring->rsp_event = i + 1; } out: spin_unlock_irqrestore(&info->io_lock, flags); /* Yield point for this unbounded loop. */ cond_resched(); return more_to_do; } int scsifront_schedule(void *data) { struct vscsifrnt_info *info = (struct vscsifrnt_info *)data; while (!kthread_should_stop()) { wait_event_interruptible( info->wq, info->waiting_resp || kthread_should_stop()); info->waiting_resp = 0; smp_mb(); if (scsifront_cmd_done(info)) info->waiting_resp = 1; } return 0; } void scsifront_resume_io(struct vscsifrnt_info *info) { unsigned long flags; spin_lock_irqsave(info->host->host_lock, flags); /* scsifront_recover()еif (info->connected == VSCSIIF_STATE_CONNECTED)һ */ info->connected = VSCSIIF_STATE_CONNECTED; scsifront_do_request(info); spin_unlock_irqrestore(info->host->host_lock, flags); } static int map_data_for_request(struct vscsifrnt_info *info, struct scsi_cmnd *sc, vscsiif_request_t *ring_req, uint32_t id, int is_first_time_map) { grant_ref_t gref_head; struct page *page; int err, i, ref, ref_cnt = 0; int write = (sc->sc_data_direction == DMA_TO_DEVICE); int nr_pages, off, len, bytes; unsigned long buffer_pfn; unsigned int data_len = 0; if (sc->sc_data_direction == DMA_NONE) return 0; if (is_first_time_map) { err = gnttab_alloc_grant_references(VSCSIIF_SG_TABLESIZE, &gref_head); if (err) { printk(KERN_ERR "scsifront: gnttab_alloc_grant_references() error\n"); return -ENOMEM; } } if (sc->use_sg) { /* quoted scsi_lib.c/scsi_req_map_sg . */ struct scatterlist *sg = (struct scatterlist *)sc->request_buffer; nr_pages = (sc->request_bufflen + sg[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; if (nr_pages > VSCSIIF_SG_TABLESIZE) { printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n"); ref_cnt = (-E2BIG); goto big_to_sg; } for (i = 0; i < sc->use_sg; i++) { page = sg[i].page; off = sg[i].offset; len = sg[i].length; data_len += len; buffer_pfn = page_to_phys(page) >> PAGE_SHIFT; while (len > 0) { bytes = min_t(unsigned int, len, PAGE_SIZE - off); if (is_first_time_map) { ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); info->shadow[id].gref[ref_cnt] = ref; } else { ref = info->shadow[id].gref[ref_cnt]; } gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id, buffer_pfn, write); ring_req->seg[ref_cnt].gref = ref; ring_req->seg[ref_cnt].offset = (uint16_t)off; ring_req->seg[ref_cnt].length = (uint16_t)bytes; buffer_pfn++; len -= bytes; off = 0; ref_cnt++; } } } else if (sc->request_bufflen) { unsigned long end = ((unsigned long)sc->request_buffer + sc->request_bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long start = (unsigned long)sc->request_buffer >> PAGE_SHIFT; page = virt_to_page(sc->request_buffer); nr_pages = end - start; len = sc->request_bufflen; if (nr_pages > VSCSIIF_SG_TABLESIZE) { ref_cnt = (-E2BIG); goto big_to_sg; } buffer_pfn = page_to_phys(page) >> PAGE_SHIFT; off = offset_in_page((unsigned long)sc->request_buffer); for (i = 0; i < nr_pages; i++) { bytes = PAGE_SIZE - off; if (bytes > len) bytes = len; if (is_first_time_map) { ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); info->shadow[id].gref[i] = ref; } else { ref = info->shadow[id].gref[i]; } gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id, buffer_pfn, write); ring_req->seg[i].gref = ref; ring_req->seg[i].offset = (uint16_t)off; ring_req->seg[i].length = (uint16_t)bytes; buffer_pfn++; len -= bytes; off = 0; ref_cnt++; } } big_to_sg: if (is_first_time_map) { gnttab_free_grant_references(gref_head); } return ref_cnt; } static void scsifront_device_block_io(struct Scsi_Host *host) { struct scsi_device *sdev; shost_for_each_device(sdev, host) scsi_internal_device_block(sdev); } static void scsifront_unblock_io_device(struct Scsi_Host *host) { struct scsi_device *sdev; shost_for_each_device(sdev, host) scsi_internal_device_unblock(sdev); } static void scsifront_fill_request(vscsiif_request_t *ring_req, struct scsi_cmnd *sc) { ring_req->id = sc->device->id; ring_req->lun = sc->device->lun; ring_req->channel = sc->device->channel; ring_req->cmd_len = sc->cmd_len; BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); if (sc->cmd_len) memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); else memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->timeout_per_command = (sc->timeout_per_command / HZ); } void scsifront_resume_free(struct vscsifrnt_info *info) { struct Scsi_Host *host = info->host; unsigned long flags; spin_lock_irqsave(host->host_lock, flags); info->connected = VSCSIIF_STATE_SUSPENDED; spin_unlock_irqrestore(host->host_lock, flags); /* kthread(scsifront_schedule->scsifront_cmd_done)ͬһ */ spin_lock_irqsave(&info->io_lock, flags); spin_unlock_irqrestore(&info->io_lock, flags); /* Prevent new requests being issued until we fix things up. */ scsifront_device_block_io(host); scsi_block_requests(host); scsifront_xenfree(info); } static void scsifront_resend_queued(struct vscsifrnt_info *info, int shadow_id) { vscsiif_request_t *ring_req; struct scsi_cmnd *sc; int ref_cnt; /* Grab a request slot and copy shadow state into it. */ ring_req = io_ring_get_request(&(info->ring)); ring_req->rqid = (uint16_t)shadow_id; /* ֱpending requestshadow id */ sc = (struct scsi_cmnd *)info->shadow[shadow_id].req_scsi_cmnd; scsifront_fill_request(ring_req, sc); ring_req->act = VSCSIIF_ACT_SCSI_CDB; /* * Rewrite any grant references invalidated by suspend/resume. * Ӧʧܣʧqueue_commandʱӦʧܡref_cntһЧֵ */ ref_cnt = map_data_for_request(info, sc, ring_req, shadow_id, 0); if (info->shadow[shadow_id].nr_segments != ref_cnt) { printk(KERN_ERR "Error: resend queued: nr_segments[%u] ref_cnt[%d]\n", info->shadow[shadow_id].nr_segments, ref_cnt); } ring_req->nr_segments = (uint8_t)ref_cnt; } static void scsifront_resend_reset(struct vscsifrnt_info *info, int reset_shadow_id) { vscsiif_request_t *ring_req; struct scsi_cmnd *sc; /* Grab a request slot and copy shadow state into it. */ ring_req = io_ring_get_request(&(info->ring)); ring_req->rqid = (uint16_t)reset_shadow_id; /* ֱpending requestshadow id */ sc = (struct scsi_cmnd *)info->shadow[reset_shadow_id].req_scsi_cmnd; scsifront_fill_request(ring_req, sc); ring_req->act = VSCSIIF_ACT_SCSI_RESET; ring_req->nr_segments = (uint8_t)0; } void scsifront_recover(struct vscsifrnt_info *info) { int i; unsigned long flags; struct xenbus_device *dev = info->dev; int reset_shadow_id = -1; xenbus_switch_state(dev, XenbusStateInitialised); spin_lock_irqsave(info->host->host_lock, flags); /* Find pending requests and requeue them. */ /* * shadowget_idadd_idʹöhost_lockУûhost_lockеscsifront_cdb_cmd_done()io_lockУ * scsifront_resume_free()Ѻio_lockͬһΣkthreadеscsifront_cdb_cmd_done()ʱִ */ for (i = 0; i < VSCSIIF_MAX_REQS; i++) { /* resetɣqueue commandҪϲգҪ· */ if (is_shadow_item_in_use(info->shadow[i]) && is_reset_cmnd_and_completing(info->shadow[i])) { goto out; } } for (i = 0; i < VSCSIIF_MAX_REQS; i++) { /* Not in use? */ if (!is_shadow_item_in_use(info->shadow[i])) { continue; } if (is_shadow_item_cmnd_cdb(info->shadow[i])) { scsifront_resend_queued(info, i); } else { if (reset_shadow_id != -1) { printk(KERN_ERR "Error: recover: more than one reset! info[%p]\n", info); } reset_shadow_id = i; } } if (reset_shadow_id != -1) { scsifront_resend_reset(info, reset_shadow_id); } out: /* Now safe for us to use the shared ring */ /* scsifront_backend_changed()еcase XenbusStateConnectedһ */ spin_unlock_irqrestore(info->host->host_lock, flags); scsi_unblock_requests(info->host); scsifront_unblock_io_device(info->host); } static int scsifront_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { struct vscsifrnt_info *info = (struct vscsifrnt_info *) sc->device->host->hostdata; vscsiif_request_t *ring_req; int ref_cnt; uint16_t rqid; if (unlikely(info->connected != VSCSIIF_STATE_CONNECTED)) goto out_host_busy; if (RING_FULL(&info->ring)) { goto out_host_busy; } sc->scsi_done = done; sc->result = 0; ring_req = scsifront_pre_request(info); rqid = ring_req->rqid; ring_req->act = VSCSIIF_ACT_SCSI_CDB; scsifront_fill_request(ring_req, sc); info->shadow[rqid].req_scsi_cmnd = (unsigned long)sc; info->shadow[rqid].sc_data_direction = sc->sc_data_direction; info->shadow[rqid].act = ring_req->act; ref_cnt = map_data_for_request(info, sc, ring_req, rqid, 1); if (ref_cnt < 0) { printk(KERN_ERR "scsifront_queuecommand map failed! ref_cnt[%d]\n", ref_cnt); add_id_to_freelist(info, rqid); if (ref_cnt == (-ENOMEM)) goto out_host_busy; else { sc->result = (DID_ERROR << 16); goto out_fail_command; } } ring_req->nr_segments = (uint8_t)ref_cnt; info->shadow[rqid].nr_segments = ref_cnt; scsifront_do_request(info); return 0; out_host_busy: return SCSI_MLQUEUE_HOST_BUSY; out_fail_command: done(sc); return 0; } static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) { return (FAILED); } /* vscsi supports only device_reset, because it is each of LUNs */ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc) { struct Scsi_Host *host = sc->device->host; struct vscsifrnt_info *info = (struct vscsifrnt_info *) sc->device->host->hostdata; vscsiif_request_t *ring_req; uint16_t rqid; int err; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_lock_irq(host->host_lock); #endif retry: if (unlikely(info->connected != VSCSIIF_STATE_CONNECTED)) { spin_unlock_irq(host->host_lock); ssleep(1); spin_lock_irq(host->host_lock); goto retry; } /* µĿԴѼ˴Իļ⣬޸Ĵ϶࣬pvscsiδ */ if (RING_FULL(&info->ring)) printk(KERN_ERR "Error: reset: fing full! info[%p] sc[%p]!\n", info, sc); ring_req = scsifront_pre_request(info); ring_req->act = VSCSIIF_ACT_SCSI_RESET; rqid = ring_req->rqid; info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET; info->shadow[rqid].sc_data_direction = sc->sc_data_direction; info->shadow[rqid].req_scsi_cmnd = (unsigned long)sc; scsifront_fill_request(ring_req, sc); ring_req->nr_segments = 0; info->shadow[rqid].nr_segments = 0; scsifront_do_request(info); spin_unlock_irq(host->host_lock); wait_event_interruptible(info->shadow[rqid].wq_reset, info->shadow[rqid].wait_reset); spin_lock_irq(host->host_lock); err = info->shadow[rqid].rslt_reset; add_id_to_freelist(info, rqid); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_unlock_irq(host->host_lock); #endif return (err); } struct scsi_host_template scsifront_sht = { .module = THIS_MODULE, .name = "Xen SCSI frontend driver", .queuecommand = scsifront_queuecommand, .eh_abort_handler = scsifront_eh_abort_handler, .eh_device_reset_handler= scsifront_dev_reset_handler, .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN, .can_queue = VSCSIIF_MAX_REQS, .this_id = -1, .sg_tablesize = VSCSIIF_SG_TABLESIZE, .use_clustering = DISABLE_CLUSTERING, .proc_name = "scsifront", }; static int __init scsifront_init(void) { int err; if (!is_running_on_xen()) return -ENODEV; printk(KERN_INFO "scsifront_init.\n"); err = scsifront_xenbus_init(); return err; } static void __exit scsifront_exit(void) { printk(KERN_INFO "scsifront_exit.\n"); scsifront_xenbus_unregister(); } module_init(scsifront_init); module_exit(scsifront_exit); MODULE_DESCRIPTION("Xen SCSI frontend driver"); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-scsi/xenbus.c000066400000000000000000000273701314037446600231100ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "common.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) #define DEFAULT_TASK_COMM_LEN 16 #else #define DEFAULT_TASK_COMM_LEN TASK_COMM_LEN #endif #define ADD_LUN_RETRY_NUM 3 extern struct scsi_host_template scsifront_sht; void scsifront_xenfree(struct vscsifrnt_info *info) { int i; for (i = 0; i < RING_REF_PAGE_COUNT; i++) { if (info->ring_ref[i] != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref[i], 0UL); info->ring_ref[i] = GRANT_INVALID_REF; } } free_pages((unsigned long)info->ring.sring, get_order(VSCSIIF_RING_PAGE_SIZE)); info->ring.sring = NULL; if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void scsifront_free(struct vscsifrnt_info *info) { struct Scsi_Host *host = info->host; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) if (host->shost_state != SHOST_DEL) { #else if (!test_bit(SHOST_DEL, &host->shost_state)) { #endif scsi_remove_host(info->host); } scsifront_xenfree(info); scsi_host_put(info->host); } static int scsifront_alloc_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct vscsiif_sring *sring; int err = -ENOMEM; int i; for (i = 0; i < RING_REF_PAGE_COUNT; i++) info->ring_ref[i] = GRANT_INVALID_REF; /***** Frontend to Backend ring start *****/ sring = (struct vscsiif_sring *) __get_free_pages(GFP_KERNEL, get_order(VSCSIIF_RING_PAGE_SIZE)); if (!sring) { xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)"); return err; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, VSCSIIF_RING_PAGE_SIZE); for (i = 0; i < RING_REF_PAGE_COUNT; i++) { err = xenbus_grant_ring(dev, virt_to_mfn((unsigned long)sring + i * PAGE_SIZE)); if (err < 0) { info->ring.sring = NULL; xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)"); goto free_sring; } info->ring_ref[i] = err; } err = bind_listening_port_to_irqhandler( dev->otherend_id, scsifront_intr, SA_SAMPLE_RANDOM, "scsifront", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto free_sring; } info->irq = err; return 0; /* free resource */ free_sring: scsifront_free(info); return err; } static int scsifront_init_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct xenbus_transaction xbt; int err; int i; char str[16]; DPRINTK("%s\n",__FUNCTION__); err = scsifront_alloc_ring(info); if (err) return err; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); } for (i = 0; i < RING_REF_PAGE_COUNT; i++) { snprintf(str, sizeof(str), "ring-ref-%d", i + 1); err = xenbus_printf(xbt, dev->nodename, str, "%u", info->ring_ref[i]); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing ring-ref"); goto fail; } } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing event-channel"); goto fail; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto free_sring; } return 0; fail: xenbus_transaction_end(xbt, 1); free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { struct vscsifrnt_info *info; struct Scsi_Host *host; int i, err = -ENOMEM; char name[DEFAULT_TASK_COMM_LEN]; host = scsi_host_alloc(&scsifront_sht, sizeof(*info)); if (!host) { xenbus_dev_fatal(dev, err, "fail to allocate scsi host"); return err; } info = (struct vscsifrnt_info *) host->hostdata; info->host = host; dev_set_drvdata(&dev->dev, info); info->dev = dev; for (i = 0; i < VSCSIIF_MAX_REQS; i++) { info->shadow[i].next_free = i + 1; init_waitqueue_head(&(info->shadow[i].wq_reset)); info->shadow[i].wait_reset = 0; } info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff; err = scsifront_init_ring(info); if (err) { scsi_host_put(host); return err; } init_waitqueue_head(&info->wq); spin_lock_init(&info->io_lock); spin_lock_init(&info->shadow_lock); snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no); info->kthread = kthread_run(scsifront_schedule, info, name); if (IS_ERR(info->kthread)) { err = PTR_ERR(info->kthread); info->kthread = NULL; printk(KERN_ERR "scsifront: kthread start err %d\n", err); goto free_sring; } host->max_id = VSCSIIF_MAX_TARGET; host->max_channel = 0; host->max_lun = VSCSIIF_MAX_LUN; host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512; host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE; err = scsi_add_host(host, &dev->dev); if (err) { printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err); goto free_sring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_resume(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); int err = -ENOMEM; if (gnttab_query_foreign_access(info->ring_ref[0])) { return 0; } /* * scsifront_resume_free()scsifront_init_ring()л³ʼIO/Ȩ * scsifront_recover()лѵǰshadowoutstandingIOطһ飬 * ׶λֱhost; Ϊˣqueuecommandreset_handlerkthreadУҪע׶ͬ */ scsifront_resume_free(info); err = scsifront_init_ring(info); if (!err) { scsifront_recover(info); return err; } return 0; } static int scsifront_remove(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename); if (info->kthread) { kthread_stop(info->kthread); info->kthread = NULL; } scsifront_free(info); return 0; } static int scsifront_disconnect(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct Scsi_Host *host = info->host; DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename); /* When this function is executed, all devices of Frontend have been deleted. Therefore, it need not block I/O before remove_host. */ scsi_remove_host(host); xenbus_frontend_closed(dev); return 0; } #define VSCSIFRONT_OP_ADD_LUN 1 #define VSCSIFRONT_OP_DEL_LUN 2 static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) { struct xenbus_device *dev = info->dev; int i, err = 0; char str[64], state_str[64]; char **dir; unsigned int dir_n = 0; unsigned int device_state; unsigned int hst, chn, tgt, lun; struct scsi_device *sdev; int count = 0; dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n); if (IS_ERR(dir)) return; for (i = 0; i < dir_n; i++) { /* read status */ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u", &device_state); if (XENBUS_EXIST_ERR(err)) continue; /* virtual SCSI device */ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u:%u:%u:%u", &hst, &chn, &tgt, &lun); if (XENBUS_EXIST_ERR(err)) continue; /* front device state path */ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]); switch (op) { case VSCSIFRONT_OP_ADD_LUN: if (device_state == XenbusStateInitialised) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { printk(KERN_ERR "scsifront: Device already in use.\n"); scsi_device_put(sdev); #if 0 (void)xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); #else /* ߻Ѻ豸Ѵں */ (void)xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); #endif } else { add_lun_retry: ++count; scsi_add_device(info->host, chn, tgt, lun); sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { printk(KERN_INFO "scsifront: scsi_add_device ok.\n"); scsi_device_put(sdev); (void)xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); } else { if (count > ADD_LUN_RETRY_NUM) { break; } printk(KERN_ERR "scsifront: scsi_add_device failed.\n"); goto add_lun_retry; } } } break; case VSCSIFRONT_OP_DEL_LUN: if (device_state == XenbusStateClosing) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); (void)xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } } break; default: break; } } kfree(dir); return; } static void scsifront_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%p %u %u\n", dev, dev->state, backend_state); switch (backend_state) { case XenbusStateUnknown: case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateClosed: break; case XenbusStateInitialised: break; case XenbusStateConnected: scsifront_resume_io(info); if (xenbus_read_driver_state(dev->nodename) == XenbusStateInitialised) { scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); } if (dev->state == XenbusStateConnected) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosing: scsifront_disconnect(info); break; case XenbusStateReconfiguring: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN); xenbus_switch_state(dev, XenbusStateReconfiguring); break; case XenbusStateReconfigured: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); xenbus_switch_state(dev, XenbusStateConnected); break; } } static struct xenbus_device_id scsifront_ids[] = { { "vscsi" }, { "" } }; static struct xenbus_driver scsifront_driver = { .name = "vscsi", .owner = THIS_MODULE, .ids = scsifront_ids, .probe = scsifront_probe, .remove = scsifront_remove, .resume = scsifront_resume, .otherend_changed = scsifront_backend_changed, }; int scsifront_xenbus_init(void) { return xenbus_register_frontend(&scsifront_driver); } void scsifront_xenbus_unregister(void) { xenbus_unregister_driver(&scsifront_driver); } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/000077500000000000000000000000001314037446600212415ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/2.6.18-194/000077500000000000000000000000001314037446600223105ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/2.6.18-194/blkfront.c000066400000000000000000000726741314037446600243150ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #include /* ssleep prototype */ #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 #define UNPLUG_DISK_TMOUT 60 static void connect(struct blkfront_info *); static void blkfront_closing(struct xenbus_device *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs); static void blkif_restart_queue(void *arg); static void blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice, i; struct blkfront_info *info; int max_segment; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); printk(KERN_INFO "blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } info->xbdev = dev; info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; max_segment = BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_MAX_IOREQ_PER_REQUEST; //BLKIF_MAX_IOREQ_PER_REQUEST info->sg = kzalloc( max_segment * sizeof(struct scatterlist),GFP_KERNEL); if (NULL == info->sg) { kfree(info); xenbus_dev_fatal(dev, -ENOMEM, "allocating info->sg structure"); return -ENOMEM; } INIT_WORK(&info->work, blkif_restart_queue, (void *)info); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev->dev.driver_data = info; err = talk_to_backend(dev, info); if (err) { kfree(info->sg); kfree(info); dev->dev.driver_data = NULL; return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; int err; DPRINTK("blkfront_resume: %s\n", dev->nodename); printk("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); printk("suo info->connected: %d\n", info->connected); err = talk_to_backend(dev, info); printk("err:%d info->connected: %d\n",err, info->connected); if (info->connected == BLKIF_STATE_SUSPENDED && !err) blkif_recover(info); printk("resume end info->connected: %d\n", info->connected); return err; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { const char *message = NULL; struct xenbus_transaction xbt; int err; printk("start talk_to_backend\n"); /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); printk("err:%d\n",err); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } err = xenbus_printf(xbt, dev->nodename, "ring-ref","%u", info->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { message = "writing protocol"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (message) xenbus_dev_fatal(dev, err, "%s", message); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; int max_segment; info->ring_ref = GRANT_INVALID_REF; sring = (blkif_sring_t *)__get_free_page(GFP_NOIO | __GFP_HIGH); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } //SHARED_RING_INIT(sring); max_segment = BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_MAX_IOREQ_PER_REQUEST; info->sg = kzalloc( max_segment * sizeof(struct scatterlist),GFP_KERNEL); if (NULL == info->sg) { kfree(sring); xenbus_dev_fatal(dev, -ENOMEM, "allocating info->sg structure"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); memset(info->sg, 0, sizeof(info->sg)); err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); if (err < 0) { free_page((unsigned long)sring); info->ring.sring = NULL; goto fail; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } static int xenwatch_unplugdisk_callback(void *data) { int len; char *devname; int iTimeout = 0; char *bestate; struct block_device *bd; struct xenbus_device *dev = data; struct blkfront_info *info = dev->dev.driver_data; if(!strcmp(info->gd->disk_name, "xvda")) { printk(KERN_ERR "xvda disk is unallowed to unplug!\n"); return 0; } devname = xenbus_read(XBT_NIL, dev->otherend, "dev", &len); if (IS_ERR(devname)){ printk(KERN_ERR "read %s xenstore error!\n", dev->otherend); return 0; } else{ xenbus_write(XBT_NIL, "control/uvp", "unplug-disk", devname); kfree(devname); } while(info && info->users != 0){ if(iTimeout > UNPLUG_DISK_TMOUT) break; printk(KERN_INFO "info->users=%d,ssleep(1),iTimeout=%d!\n", info->users, iTimeout); ssleep(1); iTimeout++; } if(info && !info->users) { printk(KERN_INFO "finish to umount,info->users has changed to 0!\n"); } bd = bdget(info->dev); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); bestate = xenbus_read(XBT_NIL, dev->otherend, "state", &len); if (IS_ERR(bestate)){ printk(KERN_ERR "read %s state error!\n", dev->otherend); } else{ if(strncmp(bestate, "5", 1) || iTimeout > UNPLUG_DISK_TMOUT) { kfree(bestate); return 0; } kfree(bestate); } return 0; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev->dev.driver_data; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateConnected: connect(info); break; case XenbusStateClosing: //blkfront_closing(info->xbdev); kthread_run(xenwatch_unplugdisk_callback, dev, "xenwatch_unplugdisk"); break; } } /* ** Connection ** */ /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err; struct block_device *bdev; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (XENBUS_EXIST_ERR(err)) return; printk(KERN_INFO "Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); /* for disk extend */ bdev=bdget_disk(info->gd,0); mutex_lock(&bdev->bd_mutex); check_disk_size_change(info->gd, bdev); mutex_unlock(&bdev->bd_mutex); bdput(bdev); /* fall through */ case BLKIF_STATE_SUSPENDED: if(info->connected == BLKIF_STATE_CONNECTED) printk(KERN_INFO "unplug timeout,backend has changed to 4 again,connect return\n"); return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%lu", &info->feature_barrier, NULL); if (err) info->feature_barrier = 0; err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&blkif_io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); add_disk(info->gd); info->is_ready = 1; if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; unsigned long flags; DPRINTK("blkfront_closing: %s removed\n", dev->nodename); if (info == NULL) { printk(KERN_ERR "info is null!\n"); return; } if (info->rq == NULL) goto out; spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&blkif_io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); xlvbd_sysfs_delif(info); ssleep(2); while(RING_FREE_REQUESTS(&info->ring) != RING_SIZE(&info->ring)) { ssleep(1); } spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_start_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); xlvbd_del(info); out: if (dev) xenbus_frontend_closed(dev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); kfree(info); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free > BLK_RING_SIZE); info->shadow_free = info->shadow[free].req.id; info->shadow[free].req.id = 0x0fffffee; /* debug */ return free; } static inline void ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = 0; info->shadow_free = id; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } int blkif_open(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users++; return 0; } int blkif_release(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users--; #if 0 if (info->users == 0) { /* Check whether we have been instructed to close. We will have ignored this request initially, as the device was still mounted. */ struct xenbus_device * dev = info->xbdev; if(!dev) { printk(KERN_ERR "blkif_release:dev is NULL\n"); return 0; } enum xenbus_state state = xenbus_read_driver_state(dev->otherend); if (state == XenbusStateClosing && info->is_ready && !strcmp(info->gd->disk_name, "xvda")){ blkfront_closing(dev); } } #endif return 0; } int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct block_device *bd = inode->i_bdev; struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; struct gendisk *gd = info->gd; if (gd->flags & GENHD_FL_CD) return 0; return -EINVAL; } default: /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", command);*/ return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * blkif_queue_request * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; unsigned long buffer_mfn; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref; grant_ref_t gref_head; struct scatterlist *sg, *blk_sg; int nr_segment; int gnt_ref_cnt; unsigned long *ioreq_cnt; blkif_sector_t sector_cnt; blkif_sector_t start_sector; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; ioreq_cnt = kzalloc(sizeof(unsigned long), GFP_ATOMIC); if (NULL == ioreq_cnt) return 1; nr_segment = blk_rq_map_sg(req->q, req, info->sg); if (BLKIF_MAX_SEGMENTS_PER_REQUEST >= nr_segment) { *ioreq_cnt = 1; } else { *ioreq_cnt = (nr_segment + BLKIF_MAX_SEGMENTS_PER_REQUEST -1)/ BLKIF_MAX_SEGMENTS_PER_REQUEST ; } if (*ioreq_cnt > RING_FREE_REQUESTS(&info->ring)) { kfree(ioreq_cnt); return 1; } gnt_ref_cnt = nr_segment ; if (0 < gnt_ref_cnt) { if (gnttab_alloc_grant_references( gnt_ref_cnt, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, gnt_ref_cnt); kfree(ioreq_cnt); return 1; } } blk_sg = info->sg; start_sector = (blkif_sector_t)req->sector; do { /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = (unsigned long)req; info->shadow[id].count = ioreq_cnt; if (BLKIF_MAX_SEGMENTS_PER_REQUEST < nr_segment) { ring_req->nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; } else { ring_req->nr_segments = nr_segment; } ring_req->id = id; ring_req->sector_number = start_sector; ring_req->handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (blk_barrier_rq(req)) ring_req->operation = BLKIF_OP_WRITE_BARRIER; BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST*BLKIF_MAX_IOREQ_PER_REQUEST); sector_cnt = 0; for (i = 0; i < ring_req->nr_segments; ++i) { sg = blk_sg + i; buffer_mfn = page_to_phys(sg->page) >> PAGE_SHIFT; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; sector_cnt += (sg->length >> 9); /* install a grant reference. */ ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ? GTF_readonly : 0 ); info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); ring_req->seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } start_sector += sector_cnt; info->ring.req_prod_pvt++; blk_sg += BLKIF_MAX_SEGMENTS_PER_REQUEST; nr_segment -= BLKIF_MAX_SEGMENTS_PER_REQUEST; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; } while(0 rq_disk->private_data; if (!blk_fs_request(req)) { end_request(req, 0); continue; } nr_segment = (req->nr_phys_segments+ BLKIF_MAX_SEGMENTS_PER_REQUEST -1) / BLKIF_MAX_SEGMENTS_PER_REQUEST; if (nr_segment > RING_FREE_REQUESTS(&info->ring)) { goto wait; } DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%li) buffer:%p [%s]\n", req, req->cmd, (long long)req->sector, req->current_nr_sectors, req->nr_sectors, req->buffer, rq_data_dir(req) ? "write" : "read"); blkdev_dequeue_request(req); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs) //blkif_interrupt { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; int uptodate; unsigned long *count; spin_lock_irqsave(&blkif_io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = (struct request *)info->shadow[id].request; blkif_completion(&info->shadow[id]); count = info->shadow[id].count; info->shadow[id].count = NULL; ADD_ID_TO_FREELIST(info, id); --(*count); if (0 == *count) { kfree(count); } else { continue; } uptodate = (bret->status == BLKIF_RSP_OKAY); switch (bret->operation) { case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk("blkfront: %s: write barrier op failed\n", info->gd->disk_name); uptodate = -EOPNOTSUPP; info->feature_barrier = 0; xlvbd_barrier(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); ret = end_that_request_first(req, uptodate, req->hard_nr_sectors); BUG_ON(ret); end_that_request_last(req, uptodate); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&blkif_io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); /* Free resources associated with old device channel. */ if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; kfree (info->sg); info->sg = NULL; } static void blkif_completion(struct blk_shadow *s) { int i; for (i = 0; i < s->req.nr_segments; i++) gnttab_end_foreign_access(s->req.seg[i].gref, 0UL); } static void blkif_recover(struct blkfront_info *info) { int i; blkif_request_t *req; struct blk_shadow *copy; int j; /* Stage 1: Make a safe copy of the shadow state. */ copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); memcpy(copy, info->shadow, sizeof(info->shadow)); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow_free = info->ring.req_prod_pvt; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ if (copy[i].request == 0) continue; /* Grab a request slot and copy shadow state into it. */ req = RING_GET_REQUEST( &info->ring, info->ring.req_prod_pvt); *req = copy[i].req; /* We get a new request id, and must reset the shadow state. */ req->id = GET_ID_FROM_FREELIST(info); memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i])); /* Rewrite any grant references invalidated by susp/resume. */ for (j = 0; j < req->nr_segments; j++) gnttab_grant_foreign_access_ref( req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), rq_data_dir((struct request *) info->shadow[req->id].request) ? GTF_readonly : 0); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; } kfree(copy); (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&blkif_io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Send off requeued requests */ flush_requests(info); /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; return info->is_ready; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static struct xenbus_driver blkfront = { .name = "vbd", .owner = THIS_MODULE, .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, }; static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront); } module_init(xlblk_init); static void __exit xlblk_exit(void) { return xenbus_unregister_driver(&blkfront); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/2.6.18-194/block.h000066400000000000000000000115521314037446600235570ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #if 0 #define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a) #else #define DPRINTK_IOCTL(_f, _a...) ((void)0) #endif #define BLKIF_MAX_IOREQ_PER_REQUEST 16 struct xlbd_type_info { int partn_shift; int disks_per_major; char *devname; char *diskname; }; struct xlbd_major_info { int major; int index; int usage; struct xlbd_type_info *type; }; struct blk_shadow { blkif_request_t req; unsigned long request; unsigned long *count; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; dev_t dev; struct gendisk *gd; int vdevice; blkif_vdev_t handle; int connected; int ring_ref; blkif_front_ring_t ring; //struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct scatterlist *sg; unsigned int irq; struct xlbd_major_info *mi; request_queue_t *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; unsigned long shadow_free; int feature_barrier; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; extern spinlock_t blkif_io_lock; extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (request_queue_t *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); int xlvbd_barrier(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/2.6.18-194/vbd.c000066400000000000000000000276541314037446600232450ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #include /* ssleep prototype */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_MAJOR 202 #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; DEFINE_SPINLOCK(blkif_io_lock); static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; int do_register, i; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SCSI_RANGE: ptr->type = &xlbd_scsi_type; ptr->index = index - XLBD_MAJOR_SCSI_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* if someone already registered block major EXT_MAJOR, * don't try to register it again */ for (i = XLBD_MAJOR_VBD_START; i < XLBD_MAJOR_VBD_START+ NUM_VBD_MAJORS; i++ ) { if (major_info[i] != NULL && major_info[i]->major == ptr->major) { do_register = 0; break; } } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(ptr); return NULL; } printk("xen-vbd: registered block device major %i\n", ptr->major); } major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = 10; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = 11 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = 18 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = 26; break; default: if (!VDEV_IS_EXTENDED(vdevice)) index = 27; else index = 28; break; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) { request_queue_t *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) elevator_init(rq, "noop"); #else elevator_init(rq, &elevator_noop); #endif /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_hardsect_size(rq, sector_size); //blk_queue_max_sectors(rq, 512); blk_queue_max_sectors(rq, BLK_DEF_MAX_SECTORS); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ //blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST*BLKIF_MAX_IOREQ_PER_REQUEST); //blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_MAX_IOREQ_PER_REQUEST); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; return 0; } static int xlvbd_alloc_gendisk(int major, int minor, blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; unsigned int offset; BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0) nr_minors = 1 << mi->type->partn_shift; gd = alloc_disk(nr_minors); if (gd == NULL) goto out; offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (nr_minors > 1) { if (offset < 26) { sprintf(gd->disk_name, "%s%c", mi->type->diskname, 'a' + offset ); } else { sprintf(gd->disk_name, "%s%c%c", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26) ); } } else { if (offset < 26) { sprintf(gd->disk_name, "%s%c%d", mi->type->diskname, 'a' + offset, minor & ((1 << mi->type->partn_shift) - 1)); } else { sprintf(gd->disk_name, "%s%c%c%d", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26), minor & ((1 << mi->type->partn_shift) - 1)); } } gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size)) { del_gendisk(gd); goto out; } info->rq = gd->queue; info->gd = gd; if (info->feature_barrier) xlvbd_barrier(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { struct block_device *bd; int err = 0; int major, minor; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = EXT_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); } info->dev = MKDEV(major, minor); bd = bdget(info->dev); if (bd == NULL) return -ENODEV; err = xlvbd_alloc_gendisk(major, minor, capacity, vdevice, vdisk_info, sector_size, info); bdput(bd); return err; } void xlvbd_del(struct blkfront_info *info) { unsigned long flags; if (info->mi == NULL) return; BUG_ON(info->gd == NULL); del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); while (!list_empty(&info->rq->queue_head)){ ssleep(1); } blk_cleanup_queue(info->rq); info->rq = NULL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int xlvbd_barrier(struct blkfront_info *info) { int err; err = blk_queue_ordered(info->rq, info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL); if (err) return err; printk(KERN_INFO "blkfront: %s: barriers %s\n", info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled"); return 0; } #else int xlvbd_barrier(struct blkfront_info *info) { printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name); return -ENOSYS; } #endif #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = xendev->dev.driver_data; if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/2.6.18-8/000077500000000000000000000000001314037446600221425ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/2.6.18-8/blkfront.c000066400000000000000000000657051314037446600241440ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #include /* ssleep prototype */ #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 #define UNPLUG_DISK_TMOUT 60 static void connect(struct blkfront_info *); static void blkfront_closing(struct xenbus_device *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs); static void blkif_restart_queue(void *arg); static void blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err = 0, vdevice = 0, i = 0; struct blkfront_info *info; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); printk(KERN_INFO "blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } info->xbdev = dev; info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue, (void *)info); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev->dev.driver_data = info; err = talk_to_backend(dev, info); if (err) { kfree(info); dev->dev.driver_data = NULL; return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; int err; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); err = talk_to_backend(dev, info); if (info->connected == BLKIF_STATE_SUSPENDED && !err) blkif_recover(info); return err; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { const char *message = NULL; struct xenbus_transaction xbt; int err; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } err = xenbus_printf(xbt, dev->nodename, "ring-ref","%u", info->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { message = "writing protocol"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (message) xenbus_dev_fatal(dev, err, "%s", message); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; info->ring_ref = GRANT_INVALID_REF; sring = (blkif_sring_t *)__get_free_page(GFP_NOIO | __GFP_HIGH); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); memset(info->sg, 0, sizeof(info->sg)); err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); if (err < 0) { free_page((unsigned long)sring); info->ring.sring = NULL; goto fail; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } static int xenwatch_unplugdisk_callback(void *data) { int len; char *devname; int iTimeout = 0; char *bestate; struct block_device *bd; struct xenbus_device *dev = data; struct blkfront_info *info = dev->dev.driver_data; if(!strcmp(info->gd->disk_name, "xvda")) { printk(KERN_ERR "xvda disk is unallowed to unplug!\n"); return 0; } devname = xenbus_read(XBT_NIL, dev->otherend, "dev", &len); if (IS_ERR(devname)){ printk(KERN_ERR "read %s xenstore error!\n", dev->otherend); return 0; } else{ xenbus_write(XBT_NIL, "control/uvp", "unplug-disk", devname); kfree(devname); } while(info && info->users != 0){ if(iTimeout > UNPLUG_DISK_TMOUT) break; printk(KERN_INFO "info->users=%d,ssleep(1),iTimeout=%d!\n", info->users, iTimeout); ssleep(1); iTimeout++; } if(info && !info->users) { printk(KERN_INFO "finish to umount,info->users has changed to 0!\n"); } bd = bdget(info->dev); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); bestate = xenbus_read(XBT_NIL, dev->otherend, "state", &len); if (IS_ERR(bestate)){ printk(KERN_ERR "read %s state error!\n", dev->otherend); } else{ if(strncmp(bestate, "5", 1) || iTimeout > UNPLUG_DISK_TMOUT) { kfree(bestate); return 0; } kfree(bestate); } return 0; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev->dev.driver_data; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateConnected: connect(info); break; case XenbusStateClosing: kthread_run(xenwatch_unplugdisk_callback, dev, "xenwatch_unplugdisk"); break; } } /* ** Connection ** */ /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err; struct block_device *bdev; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (XENBUS_EXIST_ERR(err)) return; printk(KERN_INFO "Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); /* for disk extend*/ #ifdef DISK_EX_FEATURE bdev=bdget_disk(info->gd,0); mutex_lock(&bdev->bd_mutex); check_disk_size_change(info->gd, bdev); mutex_unlock(&bdev->bd_mutex); bdput(bdev); #endif /* fall through */ case BLKIF_STATE_SUSPENDED: if(info->connected == BLKIF_STATE_CONNECTED) printk(KERN_INFO "unplug timeout,backend has changed to 4 again,connect return\n"); return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%lu", &info->feature_barrier, NULL); if (err) info->feature_barrier = 0; err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&blkif_io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); add_disk(info->gd); info->is_ready = 1; if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; unsigned long flags; if (info == NULL) { printk(KERN_ERR "info is null!\n"); return; } if (info->rq == NULL) goto out; spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&blkif_io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); xlvbd_sysfs_delif(info); ssleep(2); while(RING_FREE_REQUESTS(&info->ring) != RING_SIZE(&info->ring)) { ssleep(1); } spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_start_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); xlvbd_del(info); out: if (dev) xenbus_frontend_closed(dev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); kfree(info); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= BLK_RING_SIZE); info->shadow_free = info->shadow[free].req.id; info->shadow[free].req.id = 0x0fffffee; /* debug */ return free; } static inline void ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = 0; info->shadow_free = id; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } int blkif_open(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users++; return 0; } int blkif_release(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users--; #if 0 if (info->users == 0) { /* Check whether we have been instructed to close. We will have ignored this request initially, as the device was still mounted. */ struct xenbus_device * dev = info->xbdev; if(!dev) { printk(KERN_ERR "blkif_release:dev is NULL\n"); return 0; } enum xenbus_state state = xenbus_read_driver_state(dev->otherend); if (state == XenbusStateClosing && info->is_ready && !strcmp(info->gd->disk_name, "xvda")){ blkfront_closing(dev); } } #endif return 0; } int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct block_device *bd = inode->i_bdev; struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; struct gendisk *gd = info->gd; if (gd->flags & GENHD_FL_CD) return 0; return -EINVAL; } default: /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", command);*/ return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * blkif_queue_request * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; unsigned long buffer_mfn; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref; grant_ref_t gref_head; struct scatterlist *sg; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; if (gnttab_alloc_grant_references( BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, BLKIF_MAX_SEGMENTS_PER_REQUEST); return 1; } /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = (unsigned long)req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)req->sector; ring_req->handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (blk_barrier_rq(req)) ring_req->operation = BLKIF_OP_WRITE_BARRIER; ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); for (i = 0; i < ring_req->nr_segments; ++i) { sg = info->sg + i; buffer_mfn = page_to_phys(sg->page) >> PAGE_SHIFT; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; /* install a grant reference. */ ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ? GTF_readonly : 0 ); info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); ring_req->seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(request_queue_t *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = elv_next_request(rq)) != NULL) { info = req->rq_disk->private_data; if (!blk_fs_request(req)) { end_request(req, 0); continue; } if (RING_FULL(&info->ring)) goto wait; DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%li) buffer:%p [%s]\n", req, req->cmd, (long long)req->sector, req->current_nr_sectors, req->nr_sectors, req->buffer, rq_data_dir(req) ? "write" : "read"); blkdev_dequeue_request(req); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; int uptodate; spin_lock_irqsave(&blkif_io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = (struct request *)info->shadow[id].request; blkif_completion(&info->shadow[id]); ADD_ID_TO_FREELIST(info, id); uptodate = (bret->status == BLKIF_RSP_OKAY); switch (bret->operation) { case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk("blkfront: %s: write barrier op failed\n", info->gd->disk_name); uptodate = -EOPNOTSUPP; info->feature_barrier = 0; xlvbd_barrier(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); ret = end_that_request_first(req, uptodate, req->hard_nr_sectors); BUG_ON(ret); end_that_request_last(req, uptodate); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&blkif_io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); /* Free resources associated with old device channel. */ if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s) { int i; for (i = 0; i < s->req.nr_segments; i++) gnttab_end_foreign_access(s->req.seg[i].gref, 0UL); } static void blkif_recover(struct blkfront_info *info) { int i; blkif_request_t *req; struct blk_shadow *copy; int j; /* Stage 1: Make a safe copy of the shadow state. */ copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); if(NULL == copy) return; memcpy(copy, info->shadow, sizeof(info->shadow)); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow_free = info->ring.req_prod_pvt; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ if (copy[i].request == 0) continue; /* Grab a request slot and copy shadow state into it. */ req = RING_GET_REQUEST( &info->ring, info->ring.req_prod_pvt); *req = copy[i].req; /* We get a new request id, and must reset the shadow state. */ req->id = GET_ID_FROM_FREELIST(info); memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i])); /* Rewrite any grant references invalidated by susp/resume. */ for (j = 0; j < req->nr_segments; j++) gnttab_grant_foreign_access_ref( req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), rq_data_dir((struct request *) info->shadow[req->id].request) ? GTF_readonly : 0); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; } kfree(copy); (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&blkif_io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Send off requeued requests */ flush_requests(info); /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; return info->is_ready; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static struct xenbus_driver blkfront = { .name = "vbd", .owner = THIS_MODULE, .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, }; static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront); } module_init(xlblk_init); static void __exit xlblk_exit(void) { return xenbus_unregister_driver(&blkfront); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/2.6.18-8/block.h000066400000000000000000000114171314037446600234110ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #if 0 #define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a) #else #define DPRINTK_IOCTL(_f, _a...) ((void)0) #endif struct xlbd_type_info { int partn_shift; int disks_per_major; char *devname; char *diskname; }; struct xlbd_major_info { int major; int index; int usage; struct xlbd_type_info *type; }; struct blk_shadow { blkif_request_t req; unsigned long request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; dev_t dev; struct gendisk *gd; int vdevice; blkif_vdev_t handle; int connected; int ring_ref; blkif_front_ring_t ring; struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int irq; struct xlbd_major_info *mi; request_queue_t *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; unsigned long shadow_free; int feature_barrier; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; extern spinlock_t blkif_io_lock; extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (request_queue_t *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); int xlvbd_barrier(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/2.6.18-8/vbd.c000066400000000000000000000274721314037446600230750ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #include /* ssleep prototype */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_MAJOR 202 #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; DEFINE_SPINLOCK(blkif_io_lock); static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; int do_register, i; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SCSI_RANGE: ptr->type = &xlbd_scsi_type; ptr->index = index - XLBD_MAJOR_SCSI_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* if someone already registered block major EXT_MAJOR, * don't try to register it again */ for (i = XLBD_MAJOR_VBD_START; i < XLBD_MAJOR_VBD_START+ NUM_VBD_MAJORS; i++ ) { if (major_info[i] != NULL && major_info[i]->major == ptr->major) { do_register = 0; break; } } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(ptr); return NULL; } printk("xen-vbd: registered block device major %i\n", ptr->major); } major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = 10; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = 11 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = 18 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = 26; break; default: if (!VDEV_IS_EXTENDED(vdevice)) index = 27; else index = 28; break; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) { request_queue_t *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) elevator_init(rq, "noop"); #else elevator_init(rq, &elevator_noop); #endif /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_hardsect_size(rq, sector_size); blk_queue_max_sectors(rq, 512); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; return 0; } static int xlvbd_alloc_gendisk(int major, int minor, blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; unsigned int offset; BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0) nr_minors = 1 << mi->type->partn_shift; gd = alloc_disk(nr_minors); if (gd == NULL) goto out; offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (nr_minors > 1) { if (offset < 26) { snprintf(gd->disk_name, sizeof(gd->disk_name),"%s%c", mi->type->diskname, 'a' + offset ); } else { snprintf(gd->disk_name,sizeof(gd->disk_name), "%s%c%c", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26) ); } } else { if (offset < 26) { snprintf(gd->disk_name, sizeof(gd->disk_name),"%s%c%d", mi->type->diskname, 'a' + offset, minor & ((1 << mi->type->partn_shift) - 1)); } else { snprintf(gd->disk_name, sizeof(gd->disk_name),"%s%c%c%d", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26), minor & ((1 << mi->type->partn_shift) - 1)); } } gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size)) { del_gendisk(gd); goto out; } info->rq = gd->queue; info->gd = gd; if (info->feature_barrier) xlvbd_barrier(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { struct block_device *bd; int err = 0; int major, minor; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = EXT_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); } info->dev = MKDEV(major, minor); bd = bdget(info->dev); if (bd == NULL) return -ENODEV; err = xlvbd_alloc_gendisk(major, minor, capacity, vdevice, vdisk_info, sector_size, info); bdput(bd); return err; } void xlvbd_del(struct blkfront_info *info) { unsigned long flags; if (info->mi == NULL) return; BUG_ON(info->gd == NULL); del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); while (!list_empty(&info->rq->queue_head)){ ssleep(1); } blk_cleanup_queue(info->rq); info->rq = NULL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int xlvbd_barrier(struct blkfront_info *info) { int err; err = blk_queue_ordered(info->rq, info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL); if (err) return err; printk(KERN_INFO "blkfront: %s: barriers %s\n", info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled"); return 0; } #else int xlvbd_barrier(struct blkfront_info *info) { printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name); return -ENOSYS; } #endif #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = xendev->dev.driver_data; if (info->gd->flags & GENHD_FL_CD) return snprintf(buf, sizeof("cdrom\n"),"cdrom\n"); return snprintf(buf, sizeof("disk\n"),"disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/Kbuild000066400000000000000000000001151314037446600223730ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-vbd.o xen-vbd-objs := blkfront.o vbd.o UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/Makefile000066400000000000000000000000661314037446600227030ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/other/000077500000000000000000000000001314037446600223625ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/other/blkfront.c000066400000000000000000001440701314037446600243550ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #include /* ssleep prototype */ #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 #define UNPLUG_DISK_TMOUT 60 //Patchset to backport 2.6.24-rc1 kernel base //http://lists.openfabrics.org/pipermail/ewg/2007-October/004846.html #define sg_page(x) (x)->page static void connect(struct blkfront_info *); static void blkfront_closing(struct xenbus_device *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs); static void blkif_restart_queue(void *arg); static int blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *, struct blkfront_info *, struct blkif_response *); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err = 0, vdevice = 0; struct blkfront_info *info; enum xenbus_state backend_state; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); printk(KERN_INFO "blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } INIT_LIST_HEAD(&info->grants); INIT_LIST_HEAD(&info->indirect_pages); info->xbdev = dev; info->vdevice = vdevice; /* for persistent grant */ info->persistent_gnts_c = 0; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue, (void *)info); /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev->dev.driver_data = info; backend_state = xenbus_read_driver_state(dev->otherend); /* * XenbusStateInitWait would be the correct state to enter here, * but (at least) blkback considers this a fatal error. */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_backend(dev, info); if (err) { kfree(info); dev->dev.driver_data = NULL; return err; } return 0; } static int fill_grant_buffer(struct blkfront_info *info, int num) { struct page *granted_page; struct grant *gnt_list_entry, *n; int i = 0; while(i < num) { gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); if (!gnt_list_entry) goto out_of_memory; /* If frontend uses feature_persistent, it will preallocate * (256 + 1) * 256 pages, almost 257M. */ if (info->feature_persistent) { granted_page = alloc_page(GFP_NOIO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; } gnt_list_entry->pfn = page_to_pfn(granted_page); } gnt_list_entry->gref = GRANT_INVALID_REF; list_add(&gnt_list_entry->node, &info->grants); i++; } return 0; out_of_memory: list_for_each_entry_safe(gnt_list_entry, n, &info->grants, node) { list_del(&gnt_list_entry->node); if (info->feature_persistent) __free_page(pfn_to_page(gnt_list_entry->pfn)); kfree(gnt_list_entry); i--; } BUG_ON(i != 0); return -ENOMEM; } static int blkfront_setup_indirect(struct blkfront_info *info) { unsigned int indirect_segments, segs; int err, i; info->max_indirect_segments = 0; segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-max-indirect-segments", "%u", &indirect_segments, NULL); if (!err && indirect_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) { info->max_indirect_segments = min(indirect_segments, xen_blkif_max_segments); segs = info->max_indirect_segments; } printk("[%s:%d], segs %d\n", __func__, __LINE__, segs); /*info->sg = kzalloc(sizeof(info->sg[0]) * segs, GFP_KERNEL); if (info->sg == NULL) goto out_of_memory; memset(info->sg, 0, sizeof(info->sg));*/ err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * RING_SIZE(&info->ring)); if (err) goto out_of_memory; if (!info->feature_persistent && info->max_indirect_segments) { /* * We are using indirect descriptors but not persistent * grants, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int num = INDIRECT_GREFS(segs) * RING_SIZE(&info->ring); BUG_ON(!list_empty(&info->indirect_pages)); for (i = 0; i < num; i++) { struct page *indirect_page = alloc_page(GFP_NOIO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &info->indirect_pages); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { info->shadow[i].grants_used = kzalloc( sizeof(info->shadow[i].grants_used[0]) * segs, GFP_NOIO); /* malloc space for every shadow's sg */ info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO); if (info->max_indirect_segments) info->shadow[i].indirect_grants = kzalloc( sizeof(info->shadow[i].indirect_grants[0]) * INDIRECT_GREFS(segs), GFP_NOIO); if ((info->shadow[i].grants_used == NULL) || (info->shadow[i].sg == NULL) || (info->max_indirect_segments && (info->shadow[i].indirect_grants == NULL))) goto out_of_memory; /* initialise every shadow's sg */ //sg_init_table(info->shadow[i].sg, segs); memset(info->shadow[i].sg, 0, sizeof(info->shadow[i].sg[0]) * segs); } return 0; out_of_memory: //kfree(info->sg); //info->sg = NULL; for (i = 0; i < RING_SIZE(&info->ring); i++) { kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; /* free every shadow's sg */ kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; } if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } return -ENOMEM; } /* * This is a clone of md_trim_bio, used to split a bio into smaller ones */ static void trim_bio(struct bio *bio, int offset, int size) { /* 'bio' is a cloned bio which we need to trim to match * the given offset and size. * This requires adjusting bi_sector, bi_size, and bi_io_vec */ int i; struct bio_vec *bvec; int sofar = 0; size <<= 9; if (offset == 0 && size == bio->bi_size) return; bio->bi_sector += offset; bio->bi_size = size; offset <<= 9; clear_bit(BIO_SEG_VALID, &bio->bi_flags); while (bio->bi_idx < bio->bi_vcnt && bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { /* remove this whole bio_vec */ offset -= bio->bi_io_vec[bio->bi_idx].bv_len; bio->bi_idx++; } if (bio->bi_idx < bio->bi_vcnt) { bio->bi_io_vec[bio->bi_idx].bv_offset += offset; bio->bi_io_vec[bio->bi_idx].bv_len -= offset; } /* avoid any complications with bi_idx being non-zero*/ if (bio->bi_idx) { memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); bio->bi_vcnt -= bio->bi_idx; bio->bi_idx = 0; } /* Make sure vcnt and last bv are not too big */ bio_for_each_segment(bvec, bio, i) { if (sofar + bvec->bv_len > size) bvec->bv_len = size - sofar; if (bvec->bv_len == 0) { bio->bi_vcnt = i; break; } sofar += bvec->bv_len; } } static void split_bio_end(struct bio *bio, int error) { struct split_bio *split_bio = bio->bi_private; if (error) split_bio->err = error; if (atomic_dec_and_test(&split_bio->pending)) { split_bio->bio->bi_phys_segments = 0; bio_endio(split_bio->bio,split_bio->bio->bi_size, split_bio->err); kfree(split_bio); } bio_put(bio); } static struct grant *get_grant(grant_ref_t *gref_head, unsigned long pfn, struct blkfront_info *info) { struct grant *gnt_list_entry; unsigned long buffer_mfn; BUG_ON(list_empty(&info->grants)); gnt_list_entry = list_first_entry(&info->grants, struct grant, node); list_del(&gnt_list_entry->node); /* for persistent grant */ if (gnt_list_entry->gref != GRANT_INVALID_REF) { info->persistent_gnts_c--; return gnt_list_entry; } /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); if (!info->feature_persistent) { BUG_ON(!pfn); gnt_list_entry->pfn = pfn; } buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); gnttab_grant_foreign_access_ref(gnt_list_entry->gref, info->xbdev->otherend_id, buffer_mfn, 0); return gnt_list_entry; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; int err; enum xenbus_state backend_state; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); backend_state = xenbus_read_driver_state(dev->otherend); /* See respective comment in blkfront_probe(). */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_backend(dev, info); return err; } static void shadow_init(struct blk_shadow *shadow, unsigned int ring_size) { unsigned int i = 0; WARN_ON(!ring_size); while (++i < ring_size) shadow[i - 1].req.u.rw.id = i; shadow[i - 1].req.u.rw.id = 0x0fffffff; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { unsigned int ring_size, ring_order; const char *what = NULL; struct xenbus_transaction xbt; int err; if (dev->state >= XenbusStateInitialised) return 0; err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-pages", "%u", &ring_size); if (err != 1) ring_size = 0; else if (!ring_size) printk("blkfront: %s: zero max-ring-pages\n", dev->nodename); err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &ring_order); if (err != 1) ring_order = ring_size ? ilog2(ring_size) : 0; else if (!ring_size) /* nothing */; else if ((ring_size - 1) >> ring_order) printk("blkfront: %s: max-ring-pages (%#x) inconsistent with" " max-ring-page-order (%u)\n", dev->nodename, ring_size, ring_order); else ring_order = ilog2(ring_size); if (ring_order > BLK_MAX_RING_PAGE_ORDER) ring_order = BLK_MAX_RING_PAGE_ORDER; /* * While for larger rings not all pages are actually used, be on the * safe side and set up a full power of two to please as many backends * as possible. */ printk("talk_to_blkback ring_size %d, ring_order %d\n", 1U<ring_size = ring_size = 1U << ring_order; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } if (ring_size == 1) { what = "ring-ref"; err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[0]); if (err) { goto abort_transaction; } } else { unsigned int i; char buf[16]; what = "ring-page-order"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_order); if (err) goto abort_transaction; what = "num-ring-pages"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_size); if (err) goto abort_transaction; what = buf; for (i = 0; i < ring_size; i++) { snprintf(buf, sizeof(buf), "ring-ref%u", i); err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[i]); if (err) goto abort_transaction; } } what = "event-channel"; err = xenbus_printf(xbt, dev->nodename, what, "%u", irq_to_evtchn_port(info->irq)); if (err) { goto abort_transaction; } what = "protocol"; err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { goto abort_transaction; } /* for persistent grant */ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1); if (err) dev_warn(&dev->dev, "writing persistent grants feature to xenbus"); err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); ring_size = RING_SIZE(&info->ring); switch (info->connected) { case BLKIF_STATE_DISCONNECTED: shadow_init(info->shadow, ring_size); break; case BLKIF_STATE_SUSPENDED: err = blkif_recover(info); if (err) goto out; break; } pr_info("blkfront: %s: ring-pages=%u nr_ents=%u\n", dev->nodename, info->ring_size, ring_size); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (what) xenbus_dev_fatal(dev, err, "writing %s", what); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; unsigned int nr; int i; for (nr = 0; nr < info->ring_size; nr++) { info->ring_refs[nr] = GRANT_INVALID_REF; /*info->ring_pages[nr] = alloc_page(GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM);*/ info->ring_pages[nr] = alloc_page(GFP_NOIO | __GFP_HIGH); if (!info->ring_pages[nr]) break; } sring = nr == info->ring_size ? vmap(info->ring_pages, nr, VM_MAP, PAGE_KERNEL) : NULL; if (!sring) { while (nr--) __free_page(info->ring_pages[nr]); xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, (unsigned long)info->ring_size << PAGE_SHIFT); for (i=0; iring_size; i++) { err = xenbus_grant_ring(dev, pfn_to_mfn(page_to_pfn(info->ring_pages[i]))); if (err < 0) goto fail; info->ring_refs[i] = err; } err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } static int xenwatch_unplugdisk_callback(void *data) { int len; char *devname; int iTimeout = 0; char *bestate; struct block_device *bd; struct xenbus_device *dev = data; struct blkfront_info *info = dev->dev.driver_data; if(!strcmp(info->gd->disk_name, "xvda")) { printk(KERN_ERR "xvda disk is unallowed to unplug!\n"); return 0; } devname = xenbus_read(XBT_NIL, dev->otherend, "dev", &len); if (IS_ERR(devname)){ printk(KERN_ERR "read %s xenstore error!\n", dev->otherend); return 0; } else{ xenbus_write(XBT_NIL, "control/uvp", "unplug-disk", devname); kfree(devname); } while(info && info->users != 0){ if(iTimeout > UNPLUG_DISK_TMOUT) break; printk(KERN_INFO "info->users=%d,ssleep(1),iTimeout=%d!\n", info->users, iTimeout); ssleep(1); iTimeout++; } if(info && !info->users) { printk(KERN_INFO "finish to umount,info->users has changed to 0!\n"); } bd = bdget(info->dev); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); bestate = xenbus_read(XBT_NIL, dev->otherend, "state", &len); if (IS_ERR(bestate)){ printk(KERN_ERR "read %s state error!\n", dev->otherend); } else{ if(strncmp(bestate, "5", 1) || iTimeout > UNPLUG_DISK_TMOUT) { kfree(bestate); return 0; } kfree(bestate); } return 0; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev->dev.driver_data; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (talk_to_backend(dev, info)) { dev_set_drvdata(&dev->dev, NULL); kfree(info); } break; case XenbusStateConnected: connect(info); break; case XenbusStateClosing: //blkfront_closing(info->xbdev); kthread_run(xenwatch_unplugdisk_callback, dev, "xenwatch_unplugdisk"); break; } } /* ** Connection ** */ /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err; struct block_device *bdev; /* for persistent grant */ int persistent; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (XENBUS_EXIST_ERR(err)) return; printk(KERN_INFO "Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); /* for disk extend */ #ifdef DISK_EX_FEATURE bdev = bdget_disk(info->gd,0); mutex_lock(&bdev->bd_mutex); check_disk_size_change(info->gd, bdev); mutex_unlock(&bdev->bd_mutex); bdput(bdev); #endif return; case BLKIF_STATE_SUSPENDED: if(info->connected == BLKIF_STATE_CONNECTED) printk(KERN_INFO "unplug timeout,backend has changed to 4 again,connect return\n"); blkif_recover(info); return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%lu", &info->feature_barrier, NULL); if (err) info->feature_barrier = 0; err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-persistent", "%u", &persistent, NULL); if (err) info->feature_persistent = 0; else info->feature_persistent = persistent; err = blkfront_setup_indirect(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); return; } err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&blkif_io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); add_disk(info->gd); info->is_ready = 1; if (strstr(info->xbdev->nodename,"vbd")) { printk("%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; unsigned long flags; if (info == NULL) { printk(KERN_ERR "info is null!\n"); return; } if (info->rq == NULL) goto out; spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&blkif_io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); xlvbd_sysfs_delif(info); ssleep(2); while(RING_FREE_REQUESTS(&info->ring) != RING_SIZE(&info->ring)) { ssleep(1); } spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_start_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); xlvbd_del(info); out: if (dev) xenbus_frontend_closed(dev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); kfree(info); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= RING_SIZE(&info->ring)); info->shadow_free = info->shadow[free].req.u.rw.id; info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ return free; } static inline void ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { info->shadow[id].req.u.rw.id = info->shadow_free; info->shadow[id].request = NULL; info->shadow_free = id; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } int blkif_open(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users++; return 0; } int blkif_release(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users--; #if 0 if (info->users == 0) { /* Check whether we have been instructed to close. We will have ignored this request initially, as the device was still mounted. */ struct xenbus_device * dev = info->xbdev; if(!dev) { printk(KERN_ERR "blkif_release:dev is NULL\n"); return 0; } enum xenbus_state state = xenbus_read_driver_state(dev->otherend); if (state == XenbusStateClosing && info->is_ready && !strcmp(info->gd->disk_name, "xvda")){ blkfront_closing(dev); } } #endif return 0; } int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct block_device *bd = inode->i_bdev; struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; struct gendisk *gd = info->gd; if (gd->flags & GENHD_FL_CD) return 0; return -EINVAL; } default: /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", command);*/ return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * blkif_queue_request * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; struct blkif_request_segment_aligned *segments = NULL; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref, n; /* * Used to store if we are able to queue the request by just using * existing persistent grants, or if we have to get new grants, * as there are not sufficiently many free. */ bool new_persistent_gnts; grant_ref_t gref_head; struct scatterlist *sg; struct grant *gnt_list_entry = NULL; int nseg, max_grefs; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; max_grefs = info->max_indirect_segments ? info->max_indirect_segments + INDIRECT_GREFS(info->max_indirect_segments) : BLKIF_MAX_SEGMENTS_PER_REQUEST; /* Check if we have enought grants to allocate a requests */ if (info->persistent_gnts_c < max_grefs) { new_persistent_gnts = 1; if (gnttab_alloc_grant_references( max_grefs - info->persistent_gnts_c, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, max_grefs); return 1; } } else new_persistent_gnts = 0; /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = req; BUG_ON(info->max_indirect_segments == 0 && req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); BUG_ON(info->max_indirect_segments && req->nr_phys_segments > info->max_indirect_segments); nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); ring_req->u.rw.id = id; if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { /* * The indirect operation can only be a BLKIF_OP_READ or * BLKIF_OP_WRITE */ BUG_ON(req->flags & (REQ_FUA)); ring_req->operation = BLKIF_OP_INDIRECT; ring_req->u.indirect.indirect_op = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; ring_req->u.indirect.sector_number = (blkif_sector_t)req->sector; ring_req->u.indirect.handle = info->handle; ring_req->u.indirect.nr_segments = nseg; } else { ring_req->u.rw.sector_number = (blkif_sector_t)req->sector; ring_req->u.rw.handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (blk_barrier_rq(req)) ring_req->operation = BLKIF_OP_WRITE_BARRIER; ring_req->u.rw.nr_segments = nseg; } for (i = 0; i < nseg; ++i) { sg = info->shadow[id].sg + i; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; if ((ring_req->operation == BLKIF_OP_INDIRECT) && (i % SEGS_PER_INDIRECT_FRAME == 0)) { unsigned long uninitialized_var(pfn); if (segments) kunmap_atomic(segments, KM_USER0); n = i / SEGS_PER_INDIRECT_FRAME; if (!info->feature_persistent) { struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ BUG_ON(list_empty(&info->indirect_pages)); indirect_page = list_first_entry(&info->indirect_pages, struct page, lru); list_del(&indirect_page->lru); pfn = page_to_pfn(indirect_page); } gnt_list_entry = get_grant(&gref_head, pfn, info); info->shadow[id].indirect_grants[n] = gnt_list_entry; segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_USER0); ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; } gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg->page), info); ref = gnt_list_entry->gref; info->shadow[id].grants_used[i] = gnt_list_entry; /* If use persistent grant, it will have a memcpy, * just copy the data from sg page to grant page. */ if (rq_data_dir(req) && info->feature_persistent) { char *bvec_data; void *shared_data; BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_USER0); if (in_interrupt()) { if (in_irq()) bvec_data = kmap_atomic(sg_page(sg), KM_IRQ0); else if (in_softirq()) bvec_data = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); else printk(KERN_WARNING "[kmap] in interrupt, but not irq!\n"); } else bvec_data = kmap_atomic(sg_page(sg), KM_USER0); /* * this does not wipe data stored outside the * range sg->offset..sg->offset+sg->length. * Therefore, blkback *could* see data from * previous requests. This is OK as long as * persistent grants are shared with just one * domain. It may need refactoring if this * changes */ memcpy(shared_data + sg->offset, bvec_data + sg->offset, sg->length); if (in_interrupt()) { if (in_irq()) kunmap_atomic(bvec_data, KM_IRQ0); else if (in_softirq()) kunmap_atomic(bvec_data, KM_SOFTIRQ0); else printk(KERN_WARNING "[kunmap] in interrupt, but not irq!\n"); } else kunmap_atomic(bvec_data, KM_USER0); kunmap_atomic(shared_data, KM_USER0); } if (ring_req->operation != BLKIF_OP_INDIRECT) { ring_req->u.rw.seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } else { n = i % SEGS_PER_INDIRECT_FRAME; segments[n] = (struct blkif_request_segment_aligned) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } } if (segments) kunmap_atomic(segments, KM_USER0); info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; /* for persistent grant */ if (new_persistent_gnts) gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(request_queue_t *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = elv_next_request(rq)) != NULL) { info = req->rq_disk->private_data; if (!blk_fs_request(req)) { end_request(req, 0); continue; } if (RING_FULL(&info->ring)) goto wait; DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%li) buffer:%p [%s]\n", req, req->cmd, (long long)req->sector, req->current_nr_sectors, req->nr_sectors, req->buffer, rq_data_dir(req) ? "write" : "read"); blkdev_dequeue_request(req); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; int uptodate; spin_lock_irqsave(&blkif_io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = info->shadow[id].request; blkif_completion(&info->shadow[id], info, bret); ADD_ID_TO_FREELIST(info, id); uptodate = (bret->status == BLKIF_RSP_OKAY); switch (bret->operation) { case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk("blkfront: %s: write barrier op failed\n", info->gd->disk_name); uptodate = -EOPNOTSUPP; info->feature_barrier = 0; xlvbd_barrier(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: /* end_that_request_first()ķֵһ־ ָʾǷеѾ͡ ֵΪ0ʾеѾͲɣ ֮DZʹ blkdev_dequeue_request()Ӷ 󣬽󴫵ݸend_that_request_last() void end_that_request_last(struct request *req); end_that_request_last()֪ͨڵȴ ɵĶѾɲṹ塣 */ if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); ret = end_that_request_first(req, uptodate, req->hard_nr_sectors); BUG_ON(ret); end_that_request_last(req, uptodate); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { struct grant *persistent_gnt; struct grant *n; int i, j, segs; /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* Remove all persistent grants */ if (!list_empty(&info->grants)) { list_for_each_entry_safe(persistent_gnt, n, &info->grants, node) { list_del(&persistent_gnt->node); if (persistent_gnt->gref != GRANT_INVALID_REF) { gnttab_end_foreign_access(persistent_gnt->gref, 0UL); info->persistent_gnts_c--; } if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } } BUG_ON(info->persistent_gnts_c != 0); /* * Remove indirect pages, this only happens when using indirect * descriptors but not persistent grants */ if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; BUG_ON(info->feature_persistent); list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { /* * Clear persistent grants present in requests already * on the shared ring */ if (!info->shadow[i].request) goto free_shadow; segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? info->shadow[i].req.u.indirect.nr_segments : info->shadow[i].req.u.rw.nr_segments; for (j = 0; j < segs; j++) { persistent_gnt = info->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT) /* * If this is not an indirect operation don't try to * free indirect segments */ goto free_shadow; for (j = 0; j < INDIRECT_GREFS(segs); j++) { persistent_gnt = info->shadow[i].indirect_grants[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } free_shadow: kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; } //kfree(info->sg); //info->sg = NULL; /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&blkif_io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); /* Free resources associated with old device channel. */ vunmap(info->ring.sring); info->ring.sring = NULL; for (i = 0; i < info->ring_size; i++) { if (info->ring_refs[i] != GRANT_INVALID_REF) { /*gnttab_end_foreign_access(info->ring_refs[i], (unsigned long)mfn_to_virt(pfn_to_mfn(page_to_pfn(info->ring_pages[i]))));*/ gnttab_end_foreign_access(info->ring_refs[i],0UL); if(info->ring_pages[i]) { __free_page(info->ring_pages[i]); info->ring_pages[i] = NULL; } info->ring_refs[i] = GRANT_INVALID_REF; } } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, struct blkif_response *bret) { int i; int nseg; /* for persistent grant */ struct scatterlist *sg; char *bvec_data; void *shared_data; nseg = s->req.operation == BLKIF_OP_INDIRECT ? s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { /* * Copy the data received from the backend into the bvec. * Since bv_offset can be different than 0, and bv_len different * than PAGE_SIZE, we have to keep track of the current offset, * to be sure we are copying the data from the right shared page. */ //for_each_sg(s->sg, sg, nseg, i) { for (i = 0; i < nseg; ++i) { sg = s->sg + i; BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic( pfn_to_page(s->grants_used[i]->pfn), KM_USER0); if (in_interrupt()) { if (in_irq()) bvec_data = kmap_atomic(sg_page(sg), KM_IRQ0); else if (in_softirq()) bvec_data = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); else printk(KERN_WARNING "[kmap] in interrupt, but not irq!\n"); } else bvec_data = kmap_atomic(sg_page(sg), KM_USER0); memcpy(bvec_data + sg->offset, shared_data + sg->offset, sg->length); if (in_interrupt()) { if (in_irq()) kunmap_atomic(bvec_data, KM_IRQ0); else if (in_softirq()) kunmap_atomic(bvec_data, KM_SOFTIRQ0); else printk(KERN_WARNING "[kunmap] in interrupt, but not irq!\n"); } else kunmap_atomic(bvec_data, KM_USER0); kunmap_atomic(shared_data, KM_USER0); } } /* Add the persistent grant into the list of free grants */ for (i = 0; i < nseg; i++) { if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->grants_used[i]->gref); list_add(&s->grants_used[i]->node, &info->grants); info->persistent_gnts_c++; } else { /* * If the grant is not mapped by the backend we end the * foreign access and add it to the tail of the list, * so it will not be picked again unless we run out of * persistent grants. */ gnttab_end_foreign_access(s->grants_used[i]->gref, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &info->grants); } } if (s->req.operation == BLKIF_OP_INDIRECT) { for (i = 0; i < INDIRECT_GREFS(nseg); i++) { if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->indirect_grants[i]->gref); list_add(&s->indirect_grants[i]->node, &info->grants); info->persistent_gnts_c++; } else { struct page *indirect_page; gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. */ if (!info->feature_persistent) { indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); list_add(&indirect_page->lru, &info->indirect_pages); } s->indirect_grants[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->indirect_grants[i]->node, &info->grants); } } } } static int blkif_recover(struct blkfront_info *info) { int i; // struct blkif_request *req; struct request *req, *n; struct blk_shadow *copy; int rc; struct bio *bio, *cloned_bio; struct bio_list bio_list, merge_bio; unsigned int segs, offset; int pending, size; struct split_bio *split_bio; struct list_head requests; //printk("[INTO]----->blkif_recover\n"); /* Stage 1: Make a safe copy of the shadow state. */ copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); if (!copy) return -ENOMEM; memcpy(copy, info->shadow, sizeof(info->shadow)); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); shadow_init(info->shadow, RING_SIZE(&info->ring)); info->shadow_free = info->ring.req_prod_pvt; rc = blkfront_setup_indirect(info); if (rc) { kfree(copy); return rc; } segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; blk_queue_max_phys_segments(info->rq, segs); blk_queue_max_hw_segments(info->rq, segs); bio_list_init(&bio_list); INIT_LIST_HEAD(&requests); /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < RING_SIZE(&info->ring); i++) { /* Not in use? */ if (!copy[i].request) continue; #if 0 /* Grab a request slot and copy shadow state into it. */ req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); *req = copy[i].req; /* We get a new request id, and must reset the shadow state. */ req->u.rw.id = GET_ID_FROM_FREELIST(info); memcpy(&info->shadow[req->u.rw.id], ©[i], sizeof(copy[i])); /* Rewrite any grant references invalidated by susp/resume. */ for (j = 0; j < req->u.rw.nr_segments; j++) gnttab_grant_foreign_access_ref( req->u.rw.seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->u.rw.id].grants_used[j]->pfn), rq_data_dir( (struct request *) info->shadow[req->u.rw.id].request)); info->shadow[req->u.rw.id].req = *req; info->ring.req_prod_pvt++; #endif merge_bio.head = copy[i].request->bio; merge_bio.tail = copy[i].request->biotail; bio_list_merge(&bio_list, &merge_bio); copy[i].request->bio = NULL; blk_put_request(copy[i].request); } kfree(copy); /* * Empty the queue, this is important because we might have * requests in the queue with more segments than what we * can handle now. */ spin_lock_irq(&blkif_io_lock); //TODO while ((req = blk_fetch_request(info->rq)) != NULL) { while ((req = elv_next_request(info->rq)) != NULL) { //printk("[blkif_recover] while req:%p\n", req); blkdev_dequeue_request(req); merge_bio.head = req->bio; merge_bio.tail = req->biotail; bio_list_merge(&bio_list, &merge_bio); req->bio = NULL; if (req->flags & (REQ_FUA)) printk("diskcache flush request found!\n"); __blk_put_request(info->rq, req); } spin_unlock_irq(&blkif_io_lock); xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&blkif_io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Send off requeued requests */ //flush_requests(info); /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); list_for_each_entry_safe(req, n, &requests, queuelist) { /* Requeue pending requests (flush or discard) */ list_del_init(&req->queuelist); BUG_ON(req->nr_phys_segments > segs); blk_requeue_request(info->rq, req); } spin_unlock_irq(&blkif_io_lock); while ((bio = bio_list_pop(&bio_list)) != NULL) { /* Traverse the list of pending bios and re-queue them */ if (bio_segments(bio) > segs) { /* * This bio has more segments than what we can * handle, we have to split it. */ pending = (bio_segments(bio) + segs - 1) / segs; split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO); BUG_ON(split_bio == NULL); atomic_set(&split_bio->pending, pending); split_bio->bio = bio; for (i = 0; i < pending; i++) { offset = (i * segs * PAGE_SIZE) >> 9; size = min((unsigned int)(segs * PAGE_SIZE) >> 9, (unsigned int)(bio->bi_size >> 9) - offset); cloned_bio = bio_clone(bio, GFP_NOIO); BUG_ON(cloned_bio == NULL); trim_bio(cloned_bio, offset, size); cloned_bio->bi_private = split_bio; cloned_bio->bi_end_io = split_bio_end; submit_bio(cloned_bio->bi_rw, cloned_bio); } /* * Now we have to wait for all those smaller bios to * end, so we can also end the "parent" bio. */ continue; } /* We don't need to split this bio */ submit_bio(bio->bi_rw, bio); } if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } return 0; } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; return info->is_ready; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static struct xenbus_driver blkfront = { .name = "vbd", .owner = THIS_MODULE, .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, }; static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront); } module_init(xlblk_init); static void __exit xlblk_exit(void) { return xenbus_unregister_driver(&blkfront); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/other/block.h000066400000000000000000000141571314037446600236350ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #if 0 #define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a) #else #define DPRINTK_IOCTL(_f, _a...) ((void)0) #endif struct xlbd_type_info { int partn_shift; int disks_per_major; char *devname; char *diskname; }; struct xlbd_major_info { int major; int index; int usage; struct xlbd_type_info *type; }; struct grant { grant_ref_t gref; unsigned long pfn; struct list_head node; }; struct blk_shadow { blkif_request_t req; struct request *request; struct grant **grants_used; struct grant **indirect_grants; /* If use persistent grant, it will copy response * from grant page to sg when read io completion. */ struct scatterlist *sg; }; struct split_bio { struct bio *bio; atomic_t pending; int err; }; /* * Maximum number of segments in indirect requests, the actual value used by * the frontend driver is the minimum of this value and the value provided * by the backend driver. */ static unsigned int xen_blkif_max_segments = 256; //module_param_named(max, xen_blkif_max_segments, int, S_IRUGO); //MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 256 (1M))"); //#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) #define BLK_MAX_RING_PAGE_ORDER 4U #define BLK_MAX_RING_PAGES (1U << BLK_MAX_RING_PAGE_ORDER) #define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, \ BLK_MAX_RING_PAGES * PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; dev_t dev; struct gendisk *gd; int vdevice; blkif_vdev_t handle; int connected; unsigned int ring_size; blkif_front_ring_t ring; /* move sg to blk_shadow struct */ //struct scatterlist *sg; unsigned int irq; struct xlbd_major_info *mi; request_queue_t *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_MAX_RING_SIZE]; grant_ref_t ring_refs[BLK_MAX_RING_PAGES]; struct page *ring_pages[BLK_MAX_RING_PAGES]; struct list_head grants; struct list_head indirect_pages; unsigned int persistent_gnts_c; unsigned int feature_persistent:1; unsigned long shadow_free; int feature_barrier; unsigned int max_indirect_segments; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; extern spinlock_t blkif_io_lock; extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (request_queue_t *rq); #define SEGS_PER_INDIRECT_FRAME \ (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) #define INDIRECT_GREFS(_segs) \ ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); int xlvbd_barrier(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vbd/other/vbd.c000066400000000000000000000304111314037446600233000ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #include /* ssleep prototype */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_MAJOR 202 #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; DEFINE_SPINLOCK(blkif_io_lock); static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; int do_register, i; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SCSI_RANGE: ptr->type = &xlbd_scsi_type; ptr->index = index - XLBD_MAJOR_SCSI_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* if someone already registered block major EXT_MAJOR, * don't try to register it again */ for (i = XLBD_MAJOR_VBD_START; i < XLBD_MAJOR_VBD_START+ NUM_VBD_MAJORS; i++ ) { if (major_info[i] != NULL && major_info[i]->major == ptr->major) { do_register = 0; break; } } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(ptr); return NULL; } printk("xen-vbd: registered block device major %i\n", ptr->major); } major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = 10; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = 11 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = 18 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = 26; break; default: if (!VDEV_IS_EXTENDED(vdevice)) index = 27; else index = 28; break; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, unsigned int segments) { request_queue_t *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) elevator_init(rq, "noop"); #else elevator_init(rq, &elevator_noop); #endif /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_hardsect_size(rq, sector_size); blk_queue_max_sectors(rq, segments*8); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_phys_segments(rq, segments); blk_queue_max_hw_segments(rq, segments); printk("init queue set max_hw_sectors %u max_segments %u\n", segments*8, segments); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; return 0; } static int xlvbd_alloc_gendisk(int major, int minor, blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; unsigned int offset; BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0) nr_minors = 1 << mi->type->partn_shift; gd = alloc_disk(nr_minors); if (gd == NULL) goto out; offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (nr_minors > 1) { if (offset < 26) { snprintf(gd->disk_name, sizeof(gd->disk_name),"%s%c", mi->type->diskname, 'a' + offset ); } else { snprintf(gd->disk_name,sizeof(gd->disk_name), "%s%c%c", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26) ); } } else { if (offset < 26) { snprintf(gd->disk_name, sizeof(gd->disk_name),"%s%c%d", mi->type->diskname, 'a' + offset, minor & ((1 << mi->type->partn_shift) - 1)); } else { snprintf(gd->disk_name, sizeof(gd->disk_name),"%s%c%c%d", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26), minor & ((1 << mi->type->partn_shift) - 1)); } } gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); printk("Set blk queue %d\n", info->max_indirect_segments ? :BLKIF_MAX_SEGMENTS_PER_REQUEST); if (xlvbd_init_blk_queue(gd, sector_size, info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST)) { del_gendisk(gd); goto out; } info->rq = gd->queue; info->gd = gd; if (info->feature_barrier) xlvbd_barrier(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { struct block_device *bd; int err = 0; int major, minor; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = EXT_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); } info->dev = MKDEV(major, minor); bd = bdget(info->dev); if (bd == NULL) return -ENODEV; err = xlvbd_alloc_gendisk(major, minor, capacity, vdevice, vdisk_info, sector_size, info); bdput(bd); return err; } void xlvbd_del(struct blkfront_info *info) { unsigned long flags; if (info->mi == NULL) return; BUG_ON(info->gd == NULL); del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); while (!list_empty(&info->rq->queue_head)){ ssleep(1); } blk_cleanup_queue(info->rq); info->rq = NULL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int xlvbd_barrier(struct blkfront_info *info) { int err; err = blk_queue_ordered(info->rq, info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL); if (err) return err; printk(KERN_INFO "blkfront: %s: %s %s %s %s %s %s\n", info->gd->disk_name, "barriers", info->feature_barrier ? "enabled;" : "disabled;", "persistent grants:", info->feature_persistent ? "enabled;" : "disabled;", "indirect descriptors:", info->max_indirect_segments ? "enabled;" : "disabled;"); return 0; } #else int xlvbd_barrier(struct blkfront_info *info) { printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name); return -ENOSYS; } #endif #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = xendev->dev.driver_data; if (info->gd->flags & GENHD_FL_CD) return snprintf(buf, sizeof("cdrom\n"),"cdrom\n"); return snprintf(buf, sizeof("disk\n"),"disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vnif/000077500000000000000000000000001314037446600214305ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vnif/Kbuild000066400000000000000000000001411314037446600225610ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-vnif.o xen-vnif-objs := netfront.o xen-vnif-objs += accel.o UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vnif/Makefile000066400000000000000000000000661314037446600230720ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vnif/accel.c000066400000000000000000000534361314037446600226560ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront/accel: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront/accel: " fmt, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); static void netfront_accelerator_remove_watch(struct netfront_info *np); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Workqueue to process acceleration configuration changes */ struct workqueue_struct *accel_watch_workqueue; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); accel_watch_workqueue = create_workqueue("net_accel"); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; flush_workqueue(accel_watch_workqueue); destroy_workqueue(accel_watch_workqueue); /* No lock required as everything else should be quiet by now */ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len = 0, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); queue_work(accel_watch_workqueue, &vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* * If old watch exists, e.g. from before suspend/resume, * remove it now */ netfront_accelerator_remove_watch(np); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_purge_watch(struct netfront_accel_vif_state *vif_state) { flush_workqueue(accel_watch_workqueue); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; netfront_accelerator_purge_watch(vif_state); } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. Must be called with the accelerator * mutex held. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } } /* * Initialise the state to track an accelerator plugin module. * * Must be called with the accelerator mutex held. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; list_add(&accelerator->link, &accelerators_list); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { struct netfront_accelerator *accelerator; unsigned long flags; DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ netif_poll_disable(vif_state->np->netdev); netif_tx_lock_bh(vif_state->np->netdev); accelerator = vif_state->np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); vif_state->hooks = accelerator->hooks; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); netif_poll_enable(vif_state->np->netdev); } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks && hooks->new_device(np->netdev, dev) == 0) accelerator_set_vif_state_hooks(&np->accel_vif_state); return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* Holds accelerator_mutex to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if ((NULL != hooks) && (hooks->new_device(np->netdev, vif_state->dev) == 0)) accelerator_set_vif_state_hooks(vif_state); } } /* * Called by the netfront accelerator plugin module when it has * loaded. * * Takes the accelerator_mutex and vif_states_lock spinlock. */ int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded); /* * Remove the hooks from a single vif state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { unsigned long flags; /* Make sure there are no data path operations going on */ netif_poll_disable(vif_state->np->netdev); netif_tx_lock_bh(vif_state->np->netdev); spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); netif_poll_enable(vif_state->np->netdev); } /* * Safely remove the accelerator function hooks from a netfront state. * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { if(vif_state->hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ vif_state->hooks->get_stats(vif_state->np->netdev, &vif_state->np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. * * Takes the accelerator mutex, and vif_states_lock spinlock. */ void netfront_accelerator_stop(const char *frontend) { struct netfront_accelerator *accelerator; mutex_lock(&accelerator_mutex); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_remove_hooks(accelerator); goto out; } } out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop); /* * Helper for call_remove and do_suspend * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator = np->accelerator; unsigned long flags; int rc = 0; if (np->accel_vif_state.hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ np->accel_vif_state.hooks->get_stats(np->netdev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); } return rc; } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock */ static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev); np->accelerator = NULL; return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { int rc = 0; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ rc = do_remove(np, dev); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { netfront_accelerator_purge_watch(&np->accel_vif_state); /* * Gratuitously fire the watch handler to reinstate the * configured accelerator */ if (dev->state == XenbusStateConnected) queue_work(accel_watch_workqueue, &np->accel_vif_state.accel_work); return 0; } /* * No lock pre-requisites. Takes the accelerator mutex */ void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; break; } } out: mutex_unlock(&accelerator_mutex); return; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vnif/netfront.c000066400000000000000000001670731314037446600234510ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CONFIG_X86_PAE unsigned long long __supported_pte_mask = ~(_PAGE_NX ); #endif struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif #define RX_COPY_THRESHOLD 256 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define HAVE_CSUM_OFFLOAD 1 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define HAVE_CSUM_OFFLOAD 0 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || unlikely(skb->ip_summed != CHECKSUM_HW)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define HAVE_CSUM_OFFLOAD 0 #define netif_needs_gso(dev, skb) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif #define GRANT_INVALID_REF 0 struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront: " fmt, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static void send_fake_arp(struct net_device *, int arpType); static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of nic8139 */ #include #include static void remove_nic8139() { struct pci_dev *pci_dev_nic8139 = NULL; //do { pci_dev_nic8139 = pci_get_device(0x10ec, 0x8139, pci_dev_nic8139); if (pci_dev_nic8139) { printk(KERN_INFO "remove_nic8139 : 8139 NIC of subsystem(0x%2x,0x%2x) removed.\n", pci_dev_nic8139->subsystem_vendor, pci_dev_nic8139->subsystem_device); pci_disable_device(pci_dev_nic8139); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) pci_remove_bus_device(pci_dev_nic8139); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pci_dev_nic8139); pci_remove_bus_device(pci_dev_nic8139); #else pci_stop_and_remove_bus_device(pci_dev_nic8139); #endif /* check kernel version */ } //} while (pci_dev_nic8139); # comment off because fedora12 will crash return; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; // remove 8139 nic when xen nic devices appear remove_nic8139(); netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev->dev.driver_data = info; err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { printk(KERN_WARNING "%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); printk(KERN_WARNING "%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev->dev.driver_data = NULL; return err; } static int __devexit netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(info->mac)) { err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", !HAVE_CSUM_OFFLOAD); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", HAVE_TSO); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, SA_SAMPLE_RANDOM, netdev->name, netdev); if (err < 0) goto fail; info->irq = err; return 0; fail: return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev->dev.driver_data; struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); break; case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @return 0 on success, error code otherwise */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; // creat GARP if ( ARPOP_REQUEST == arpType ) { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); } else { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); } if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); memset(&np->stats, 0, sizeof(np->stats)); spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); netif_rx_schedule(dev); } } spin_unlock_bh(&np->rx_lock); network_maybe_wake_tx(dev); return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { printk(KERN_ALERT "network_tx_buf_gc: warning " "-- grant still in use by backend " "domain.\n"); BUG(); } (void)gnttab_end_foreign_access_ref(np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); netif_rx_schedule(dev); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; struct xen_memory_reservation reservation; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if ( nr_flips != 0 ) { /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); reservation.nr_extents = nr_flips; reservation.extent_order = 0; reservation.address_bits = 0; reservation.domid = DOMID_SELF; if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ np->rx_mcl[i].op = __HYPERVISOR_memory_op; np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; np->rx_mcl[i].args[1] = (unsigned long)&reservation; /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (nr_flips--) BUG_ON(np->rx_mcl[nr_flips].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return 0; } frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irq(&np->tx_lock); if (unlikely(!netfront_carrier_ok(np) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb))) { spin_unlock_irq(&np->tx_lock); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_HW) /* local packet? */ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; #ifdef CONFIG_XEN if (skb->proto_data_valid) /* remote but checksummed? */ tx->flags |= NETTXF_data_validated; #endif #if HAVE_TSO if (skb_shinfo(skb)->gso_size) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->irq); np->stats.tx_bytes += skb->len; np->stats.tx_packets++; dev->trans_start = jiffies; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irq(&np->tx_lock); return 0; drop: np->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); netif_rx_schedule(dev); dev->last_rx = jiffies; } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) WPRINTK("Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) WPRINTK("Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) WPRINTK("rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) WPRINTK("Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { if (net_ratelimit()) WPRINTK("Unfulfilled rx req " "(id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); BUG_ON(!ret); } gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) WPRINTK("Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) WPRINTK("GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) WPRINTK("Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) WPRINTK("GSO unsupported by this kernel.\n"); return -EINVAL; #endif } static int netif_poll(struct net_device *dev, int *pbudget) { struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; struct multicall_entry *mcl; int work_done, budget, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); if ((budget = *pbudget) > dev->quota) budget = dev->quota; rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); np->stats.rx_errors++; i = np->rx.rsp_cons; continue; } if ((skb = __skb_dequeue(&tmpq)) == NULL) WPRINTK("skb is NULL.\n"); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize must approximates the size of true data plus * any supervisor overheads. Adding hypervisor overheads * has been shown to significantly reduce achievable * bandwidth with the default receive buffer size. It is * therefore not wise to account for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to * RX_COPY_THRESHOLD + the supervisor overheads. Here, we * add the size of the data pulled in xennet_fill_frags(). * * We also adjust for any unused space in the main data * area by subtracting (RX_COPY_THRESHOLD - len). This is * especially important with drivers which split incoming * packets into header and data, using only 66 bytes of * the main data area (see the e1000 driver for example.) * On such systems, without this last adjustement, our * achievable receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; /* * Old backends do not assert data_validated but we * can infer it from csum_blank so test both flags. */ if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; #ifdef CONFIG_XEN skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE); skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank); #endif np->stats.rx_packets++; np->stats.rx_bytes += skb->len; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { mcl = np->rx_mcl + pages_flipped; mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = pages_flipped; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } while ((skb = __skb_dequeue(&errq))) kfree_skb(skb); while ((skb = __skb_dequeue(&rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); /* Pass it up. */ netif_receive_skb(skb); dev->last_rx = jiffies; } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } *pbudget -= work_done; dev->quota -= work_done; if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) __netif_rx_complete(dev); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return more_to_do | accel_more_to_do; } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; (void)gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); if (0 == mfn) { struct page *page = skb_shinfo(skb)->frags[0].page; balloon_release_driver_page(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = mmu - np->rx_mmu; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; mcl++; rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl - np->rx_mcl, NULL); BUG_ON(rc); } } while ((skb = __skb_dequeue(&free_list)) != NULL) dev_kfree_skb(skb); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_ref(ref)) { busy++; continue; } gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); return 0; } static struct net_device_stats *network_get_stats(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_get_stats(np, dev); return &np->stats; } static int xennet_set_mac_address(struct net_device *dev, void *p) { struct netfront_info *np = netdev_priv(dev); struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(np->mac, addr->sa_data, ETH_ALEN); return 0; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static int xennet_set_sg(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } else if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; return ethtool_op_set_sg(dev, data); } static int xennet_set_tso(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } return ethtool_op_set_tso(dev, data); } static void xennet_set_features(struct net_device *dev) { dev_disable_gso_features(dev); xennet_set_sg(dev, 0); /* We need checksum offload to enable scatter/gather and TSO. */ if (!(dev->features & NETIF_F_IP_CSUM)) return; if (xennet_set_sg(dev, 1)) return; /* Before 2.6.9 TSO seems to be unreliable so do not enable it * on older kernels. */ if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)) xennet_set_tso(dev, 1); } /** * Description: * Get device-specific settings * Version: */ static int netfront_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { ecmd->supported = SUPPORTED_1000baseT_Full; ecmd->transceiver = XCVR_EXTERNAL; ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_FIBRE); ecmd->port = PORT_FIBRE; ecmd->autoneg = AUTONEG_DISABLE; ecmd->speed = SPEED_1000; ecmd->duplex = DUPLEX_FULL; return 0; } /** * Description: * Report whether Wake-on-Lan is enabled */ static void netfront_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; wol->wolopts = 0; return; } /** * Description: * Report driver message level */ static u32 netfront_get_msglevel(struct net_device *dev) { struct netfront_info *adapter = netdev_priv(dev); adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; return adapter->msg_enable; } /* to support "ethtool -i" for getting driver information*/ static void netfront_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netfront_info *adapter = netdev_priv(dev); strcpy(info->driver, XEN_VIF_DRIVER_NAME); strcpy(info->version, XEN_VIF_DRIVER_VERSION); strcpy(info->bus_info, adapter->xbdev->dev.bus_id); return; } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; xennet_set_features(dev); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref( ref, np->xbdev->otherend_id, page_to_pfn(skb_shinfo(skb)->frags->page)); } else { gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static struct ethtool_ops network_ethtool_ops = { /** * Description: * Added some information for ethtool. */ .get_settings = netfront_get_settings, .get_wol = netfront_get_wol, .get_msglevel = netfront_get_msglevel, .get_drvinfo = netfront_get_drvinfo, .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = xennet_set_sg, #if HAVE_TSO .get_tso = ethtool_op_get_tso, .set_tso = xennet_set_tso, #endif .get_link = ethtool_op_get_link, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct class_device *cd, char *buf) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct class_device *cd, const char *buf, size_t len) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct class_device *cd, char *buf) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct class_device *cd, const char *buf, size_t len) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_target); } static const struct class_device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { error = class_device_create_file(&netdev->class_dev, &xennet_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) class_device_remove_file(&netdev->class_dev, &xennet_attrs[i]); return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { class_device_remove_file(&netdev->class_dev, &xennet_attrs[i]); } } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } static struct net_device * __devinit create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->open = network_open; netdev->hard_start_xmit = network_start_xmit; netdev->stop = network_close; netdev->get_stats = network_get_stats; netdev->poll = netif_poll; netdev->set_multicast_list = network_set_multicast_list; netdev->uninit = netif_uninit; netdev->set_mac_address = xennet_set_mac_address; netdev->change_mtu = xennet_change_mtu; netdev->weight = 64; netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } #ifdef CONFIG_INET /* * We use this notifier to send out a fake ARP reply to reset switches and * router ARP caches when an IP interface is brought up on a VIF. */ static int inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) { int count = 0; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ if (event == NETDEV_UP && dev->open == network_open) { for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REQUEST); } for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REPLY); } } return NOTIFY_DONE; } static struct notifier_block notifier_inetdev = { .notifier_call = inetdev_notify, .next = NULL, .priority = 0 }; #endif static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler(info->irq, info->netdev); info->irq = 0; end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, (unsigned long)page); } /* ** Driver registration ** */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static struct xenbus_driver netfront_driver = { .name = "vif", .owner = THIS_MODULE, .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(netfront_remove), .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, }; static int __init netif_init(void) { int err; if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN if (MODPARM_rx_flip && MODPARM_rx_copy) { WPRINTK("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_flip = 1; /* Default is to flip. */ #endif netif_init_accel(); IPRINTK("Initialising virtual ethernet driver.\n"); #ifdef CONFIG_INET (void)register_inetaddr_notifier(¬ifier_inetdev); #endif err = xenbus_register_frontend(&netfront_driver); if (err) { #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif } return err; } module_init(netif_init); static void __exit netif_exit(void) { #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif xenbus_unregister_driver(&netfront_driver); netif_exit_accel(); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/RHEL5/xen-vnif/netfront.h000066400000000000000000000217261314037446600234500ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include #include #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct net_device_stats stats; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; /* added by l00183214 */ u16 msg_enable; /* end by l00183214 */ unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; /* added by l00183214 */ #define DEFAULT_DEBUG_LEVEL_SHIFT 3 /* Notified: insure that the length of XEN_VIF_DRIVER_NAME and XEN_VIF_DRIVER_VERSION are * both shorter than 32 characters, or it will cause out of bound of array. * More information please refer to the definition of struct ethtool_drvinfo in ethtool.h */ #define XEN_VIF_DRIVER_NAME "netfront" #define XEN_VIF_DRIVER_VERSION "1.0" /* end by l00183214 */ /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded() */ extern void netfront_accelerator_stop(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/000077500000000000000000000000001314037446600177665ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/Makefile000066400000000000000000000016101314037446600214240ustar00rootroot00000000000000############################################################################### ###### File name : Makefile ###### ###### Author : ###### ###### Description : To optimize the PV_ON_HVM drivers's Makefile ###### ###### History : ###### ###### 2012-02-24 : Create the file ###### ############################################################################### M=$(shell pwd) COMPILEARGS = $(CROSSCOMPILE) obj-m += xen-platform-pci/ obj-m += xen-balloon/ obj-m += xen-vbd/ obj-m += xen-vnif/ all: make -C $(KERNDIR) M=$(M) modules $(COMPILEARGS) modules_install: make -C $(KERNDIR) M=$(M) modules_install $(COMPILEARGS) clean: make -C $(KERNDIR) M=$(M) clean $(COMPILEARGS) rm -rf Modules.symvers UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/compat-include/000077500000000000000000000000001314037446600226725ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/compat-include/asm-generic/000077500000000000000000000000001314037446600250645ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/compat-include/asm-generic/pgtable-nopmd.h000066400000000000000000000005611314037446600277700ustar00rootroot00000000000000#ifndef _PGTABLE_NOPMD_H #define _PGTABLE_NOPMD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopmd.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPMD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/compat-include/asm-generic/pgtable-nopud.h000066400000000000000000000006211314037446600277750ustar00rootroot00000000000000#ifndef _PGTABLE_NOPUD_H #define _PGTABLE_NOPUD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopud.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define pud_bad(pud) 0 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPUD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/compat-include/linux/000077500000000000000000000000001314037446600240315ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/compat-include/linux/io.h000066400000000000000000000002771314037446600246170ustar00rootroot00000000000000#ifndef _LINUX_IO_H #define _LINUX_IO_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) #error "This version of Linux should not need compat linux/io.h" #endif #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/compat-include/linux/mutex.h000066400000000000000000000015411314037446600253450ustar00rootroot00000000000000/* * Copyright (c) 2006 Cisco Systems. All rights reserved. * * This file is released under the GPLv2. */ /* mutex compatibility for pre-2.6.16 kernels */ #ifndef __LINUX_MUTEX_H #define __LINUX_MUTEX_H #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) #error "This version of Linux should not need compat mutex.h" #endif #include #include #define mutex semaphore #define DEFINE_MUTEX(foo) DECLARE_MUTEX(foo) #define mutex_init(foo) init_MUTEX(foo) #define mutex_lock(foo) down(foo) #define mutex_lock_interruptible(foo) down_interruptible(foo) /* this function follows the spin_trylock() convention, so * * it is negated to the down_trylock() return values! Be careful */ #define mutex_trylock(foo) !down_trylock(foo) #define mutex_unlock(foo) up(foo) #endif /* __LINUX_MUTEX_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/compat-include/xen/000077500000000000000000000000001314037446600234645ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/compat-include/xen/platform-compat.h000066400000000000000000000124151314037446600267450ustar00rootroot00000000000000#ifndef COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #define COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #include #include #include #if defined(__LINUX_COMPILER_H) && !defined(__always_inline) #define __always_inline inline #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_SPINLOCK) #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED #endif #if defined(_LINUX_INIT_H) && !defined(__init) #define __init #endif #if defined(__LINUX_CACHE_H) && !defined(__read_mostly) #define __read_mostly #endif #if defined(_LINUX_SKBUFF_H) && !defined(NET_IP_ALIGN) #define NET_IP_ALIGN 0 #endif #if defined(_LINUX_SKBUFF_H) && !defined(CHECKSUM_HW) #define CHECKSUM_HW CHECKSUM_PARTIAL #endif #if defined(_LINUX_ERR_H) && !defined(IS_ERR_VALUE) #define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) #endif #if defined(_ASM_IA64_PGTABLE_H) && !defined(_PGTABLE_NOPUD_H) #include #endif /* Some kernels have this typedef backported so we cannot reliably * detect based on version number, hence we forcibly #define it. */ #if defined(__LINUX_TYPES_H) || defined(__LINUX_GFP_H) || defined(_LINUX_KERNEL_H) #define gfp_t unsigned #endif #if defined(_LINUX_NOTIFIER_H) && !defined(ATOMIC_NOTIFIER_HEAD) #define ATOMIC_NOTIFIER_HEAD(name) struct notifier_block *name #define atomic_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define atomic_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define atomic_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_NOTIFIER_H) && !defined(BLOCKING_NOTIFIER_HEAD) #define BLOCKING_NOTIFIER_HEAD(name) struct notifier_block *name #define blocking_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define blocking_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define blocking_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_MM_H) && defined set_page_count #define init_page_count(page) set_page_count(page, 1) #endif #if defined(__LINUX_GFP_H) && !defined __GFP_NOMEMALLOC #define __GFP_NOMEMALLOC 0 #endif #if defined(_LINUX_FS_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) #define nonseekable_open(inode, filp) /* Nothing to do */ #endif #if defined(_LINUX_MM_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) unsigned long vmalloc_to_pfn(void *addr); #endif #if defined(__LINUX_COMPLETION_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); #endif #if defined(_LINUX_SCHED_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout); #endif #if defined(_LINUX_SLAB_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) void *kzalloc(size_t size, int flags); #endif #if defined(_LINUX_BLKDEV_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define end_that_request_last(req, uptodate) end_that_request_last(req) #endif #if defined(_LINUX_CAPABILITY_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define capable(cap) (1) #endif #if defined(_LINUX_KERNEL_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) extern char *kasprintf(gfp_t gfp, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); #endif #if defined(_LINUX_SYSRQ_H) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) #define handle_sysrq(x,y,z) handle_sysrq(x,y) #endif #if defined(_PAGE_PRESENT) && !defined(_PAGE_NX) #define _PAGE_NX 0 /* * This variable at present is referenced by netfront, but only in code that * is dead when running in hvm guests. To detect potential active uses of it * in the future, don't try to supply a 'valid' value here, so that any * mappings created with it will fault when accessed. */ #define __supported_pte_mask ((maddr_t)0) #endif /* This code duplication is not ideal, but || does not seem to properly * short circuit in a #if condition. **/ #if defined(_LINUX_NETDEVICE_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) #if !defined(SLE_VERSION) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #elif SLE_VERSION_CODE < SLE_VERSION(10,1,0) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #endif #endif #if defined(__LINUX_SEQLOCK_H) && !defined(DEFINE_SEQLOCK) #define DEFINE_SEQLOCK(x) seqlock_t x = SEQLOCK_UNLOCKED #endif /* Bug in RHEL4-U3: rw_lock_t is mistakenly defined in DEFINE_RWLOCK() macro */ #if defined(__LINUX_SPINLOCK_H) && defined(DEFINE_RWLOCK) #define rw_lock_t rwlock_t #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_RWLOCK) #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED #endif #if defined(_LINUX_INTERRUPT_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /** * RHEL4-U5 pulled back this feature into the older kernel * Since it is a typedef, and not a macro - detect this kernel via * RHEL_VERSION */ #if !defined(RHEL_VERSION) || (RHEL_VERSION == 4 && RHEL_UPDATE < 5) typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *); #endif #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) && CONFIG_SLE_VERSION < 10 #define setup_xen_features xen_setup_features #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/config.mk000066400000000000000000000017661314037446600215760ustar00rootroot00000000000000# Hack: we need to use the config which was used to build the kernel, # except that that won't have the right headers etc., so duplicate # some of the mach-xen infrastructure in here. # # (i.e. we need the native config for things like -mregparm, but # a Xen kernel to find the right headers) #EXTRA_CFLAGS += -D__XEN_INTERFACE_VERSION__=0x00030205 #EXTRA_CFLAGS += -DCONFIG_XEN_COMPAT=0xffffff #EXTRA_CFLAGS += -I$(M)/include -I$(M)/compat-include -DHAVE_XEN_PLATFORM_COMPAT_H #ifeq ($(ARCH),ia64) # EXTRA_CFLAGS += -DCONFIG_VMX_GUEST #endif # #EXTRA_CFLAGS += -include $(objtree)/include/linux/autoconf.h #AUTOCONF := $(shell find $(CROSS_COMPILE_KERNELSOURCE) -name autoconf.h) _XEN_CPPFLAGS += -D__XEN_INTERFACE_VERSION__=0x00030205 _XEN_CPPFLAGS += -DCONFIG_XEN_COMPAT=0xffffff _XEN_CPPFLAGS += -I$(M)/include -I$(M)/compat-include -DHAVE_XEN_PLATFORM_COMPAT_H #_XEN_CPPFLAGS += -include $(AUTOCONF) EXTRA_CFLAGS += $(_XEN_CPPFLAGS) EXTRA_AFLAGS += $(_XEN_CPPFLAGS) CPPFLAGS := -I$(M)/include $(CPPFLAGS) UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/000077500000000000000000000000001314037446600214115ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/000077500000000000000000000000001314037446600226605ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/8253pit.h000066400000000000000000000002371314037446600241510ustar00rootroot00000000000000/* * 8253/8254 Programmable Interval Timer */ #ifndef _8253PIT_H #define _8253PIT_H #include #define PIT_TICK_RATE CLOCK_TICK_RATE #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/a.out.h000066400000000000000000000015431314037446600240620ustar00rootroot00000000000000#ifndef __I386_A_OUT_H__ #define __I386_A_OUT_H__ #include struct exec { unsigned long a_info; /* Use macros N_MAGIC, etc for access */ unsigned a_text; /* length of text, in bytes */ unsigned a_data; /* length of data, in bytes */ unsigned a_bss; /* length of uninitialized data area for file, in bytes */ unsigned a_syms; /* length of symbol table data in file, in bytes */ unsigned a_entry; /* start address */ unsigned a_trsize; /* length of relocation info for text, in bytes */ unsigned a_drsize; /* length of relocation info for data, in bytes */ }; #define N_TRSIZE(a) ((a).a_trsize) #define N_DRSIZE(a) ((a).a_drsize) #define N_SYMSIZE(a) ((a).a_syms) #ifdef __KERNEL__ #ifndef CONFIG_XEN #define STACK_TOP TASK_SIZE #else #define STACK_TOP (TASK_SIZE - 3*PAGE_SIZE) #endif #endif #endif /* __A_OUT_GNU_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/acpi.h000066400000000000000000000113471314037446600237530ustar00rootroot00000000000000/* * asm-i386/acpi.h * * Copyright (C) 2001 Paul Diefenbaugh * Copyright (C) 2001 Patrick Mochel * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef _ASM_ACPI_H #define _ASM_ACPI_H #ifdef __KERNEL__ #include #include /* defines cmpxchg */ #define COMPILER_DEPENDENT_INT64 long long #define COMPILER_DEPENDENT_UINT64 unsigned long long /* * Calling conventions: * * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) * ACPI_EXTERNAL_XFACE - External ACPI interfaces * ACPI_INTERNAL_XFACE - Internal ACPI interfaces * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces */ #define ACPI_SYSTEM_XFACE #define ACPI_EXTERNAL_XFACE #define ACPI_INTERNAL_XFACE #define ACPI_INTERNAL_VAR_XFACE /* Asm macros */ #define ACPI_ASM_MACROS #define BREAKPOINT3 #define ACPI_DISABLE_IRQS() local_irq_disable() #define ACPI_ENABLE_IRQS() local_irq_enable() #define ACPI_FLUSH_CPU_CACHE() wbinvd() static inline int __acpi_acquire_global_lock (unsigned int *lock) { unsigned int old, new, val; do { old = *lock; new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); val = cmpxchg(lock, old, new); } while (unlikely (val != old)); return (new < 3) ? -1 : 0; } static inline int __acpi_release_global_lock (unsigned int *lock) { unsigned int old, new, val; do { old = *lock; new = old & ~0x3; val = cmpxchg(lock, old, new); } while (unlikely (val != old)); return old & 0x1; } #define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ ((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr)) #define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ ((Acq) = __acpi_release_global_lock((unsigned int *) GLptr)) /* * Math helper asm macros */ #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ asm("divl %2;" \ :"=a"(q32), "=d"(r32) \ :"r"(d32), \ "0"(n_lo), "1"(n_hi)) #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ asm("shrl $1,%2;" \ "rcrl $1,%3;" \ :"=r"(n_hi), "=r"(n_lo) \ :"0"(n_hi), "1"(n_lo)) #ifdef CONFIG_ACPI extern int acpi_lapic; extern int acpi_ioapic; extern int acpi_noirq; extern int acpi_strict; extern int acpi_disabled; extern int acpi_ht; extern int acpi_pci_disabled; static inline void disable_acpi(void) { acpi_disabled = 1; acpi_ht = 0; acpi_pci_disabled = 1; acpi_noirq = 1; } /* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ #define FIX_ACPI_PAGES 4 extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); #ifdef CONFIG_X86_IO_APIC extern int skip_ioapic_setup; extern int acpi_skip_timer_override; extern void check_acpi_pci(void); static inline void disable_ioapic_setup(void) { skip_ioapic_setup = 1; } static inline int ioapic_setup_disabled(void) { return skip_ioapic_setup; } #else static inline void disable_ioapic_setup(void) { } static inline void check_acpi_pci(void) { } #endif static inline void acpi_noirq_set(void) { acpi_noirq = 1; } static inline void acpi_disable_pci(void) { acpi_pci_disabled = 1; acpi_noirq_set(); } extern int acpi_irq_balance_set(char *str); #else /* !CONFIG_ACPI */ #define acpi_lapic 0 #define acpi_ioapic 0 static inline void acpi_noirq_set(void) { } static inline void acpi_disable_pci(void) { } #endif /* !CONFIG_ACPI */ #ifdef CONFIG_ACPI_SLEEP /* routines for saving/restoring kernel state */ extern int acpi_save_state_mem(void); extern void acpi_restore_state_mem(void); extern unsigned long acpi_wakeup_address; /* early initialization routine */ extern void acpi_reserve_bootmem(void); #ifdef CONFIG_ACPI_PV_SLEEP extern int acpi_notify_hypervisor_state(u8 sleep_state, u32 pm1a_cnt, u32 pm1b_cnt); #endif /* CONFIG_ACPI_PV_SLEEP */ #endif /*CONFIG_ACPI_SLEEP*/ extern u8 x86_acpiid_to_apicid[]; #ifndef CONFIG_XEN #define ARCH_HAS_POWER_INIT 1 #endif #endif /*__KERNEL__*/ #endif /*_ASM_ACPI_H*/ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/agp.h000066400000000000000000000021671314037446600236060ustar00rootroot00000000000000#ifndef AGP_H #define AGP_H 1 #include #include /* * Functions to keep the agpgart mappings coherent with the MMU. * The GART gives the CPU a physical alias of pages in memory. The alias region is * mapped uncacheable. Make sure there are no conflicting mappings * with different cachability attributes for the same page. This avoids * data corruption on some CPUs. */ int map_page_into_agp(struct page *page); int unmap_page_from_agp(struct page *page); #define flush_agp_mappings() global_flush_tlb() /* Could use CLFLUSH here if the cpu supports it. But then it would need to be called for each cacheline of the whole page so it may not be worth it. Would need a page for it. */ #define flush_agp_cache() wbinvd() /* Convert a physical address to an address suitable for the GART. */ #define phys_to_gart(x) (x) #define gart_to_phys(x) (x) /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) \ ((char *)__get_free_pages(GFP_KERNEL, (order))) #define free_gatt_pages(table, order) \ free_pages((unsigned long)(table), (order)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/ansidecl.h000066400000000000000000000307731314037446600246250ustar00rootroot00000000000000/* ANSI and traditional C compatability macros Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001 Free Software Foundation, Inc. This file is part of the GNU C Library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as * required. * Keith Owens 15 May 2006 */ /* ANSI and traditional C compatibility macros ANSI C is assumed if __STDC__ is #defined. Macro ANSI C definition Traditional C definition ----- ---- - ---------- ----------- - ---------- ANSI_PROTOTYPES 1 not defined PTR `void *' `char *' PTRCONST `void *const' `char *' LONG_DOUBLE `long double' `double' const not defined `' volatile not defined `' signed not defined `' VA_START(ap, var) va_start(ap, var) va_start(ap) Note that it is safe to write "void foo();" indicating a function with no return value, in all K+R compilers we have been able to test. For declaring functions with prototypes, we also provide these: PARAMS ((prototype)) -- for functions which take a fixed number of arguments. Use this when declaring the function. When defining the function, write a K+R style argument list. For example: char *strcpy PARAMS ((char *dest, char *source)); ... char * strcpy (dest, source) char *dest; char *source; { ... } VPARAMS ((prototype, ...)) -- for functions which take a variable number of arguments. Use PARAMS to declare the function, VPARAMS to define it. For example: int printf PARAMS ((const char *format, ...)); ... int printf VPARAMS ((const char *format, ...)) { ... } For writing functions which take variable numbers of arguments, we also provide the VA_OPEN, VA_CLOSE, and VA_FIXEDARG macros. These hide the differences between K+R and C89 more thoroughly than the simple VA_START() macro mentioned above. VA_OPEN and VA_CLOSE are used *instead of* va_start and va_end. Immediately after VA_OPEN, put a sequence of VA_FIXEDARG calls corresponding to the list of fixed arguments. Then use va_arg normally to get the variable arguments, or pass your va_list object around. You do not declare the va_list yourself; VA_OPEN does it for you. Here is a complete example: int printf VPARAMS ((const char *format, ...)) { int result; VA_OPEN (ap, format); VA_FIXEDARG (ap, const char *, format); result = vfprintf (stdout, format, ap); VA_CLOSE (ap); return result; } You can declare variables either before or after the VA_OPEN, VA_FIXEDARG sequence. Also, VA_OPEN and VA_CLOSE are the beginning and end of a block. They must appear at the same nesting level, and any variables declared after VA_OPEN go out of scope at VA_CLOSE. Unfortunately, with a K+R compiler, that includes the argument list. You can have multiple instances of VA_OPEN/VA_CLOSE pairs in a single function in case you need to traverse the argument list more than once. For ease of writing code which uses GCC extensions but needs to be portable to other compilers, we provide the GCC_VERSION macro that simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various wrappers around __attribute__. Also, __extension__ will be #defined to nothing if it doesn't work. See below. This header also defines a lot of obsolete macros: CONST, VOLATILE, SIGNED, PROTO, EXFUN, DEFUN, DEFUN_VOID, AND, DOTS, NOARGS. Don't use them. */ #ifndef _ANSIDECL_H #define _ANSIDECL_H 1 /* Every source file includes this file, so they will all get the switch for lint. */ /* LINTLIBRARY */ /* Using MACRO(x,y) in cpp #if conditionals does not work with some older preprocessors. Thus we can't define something like this: #define HAVE_GCC_VERSION(MAJOR, MINOR) \ (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR))) and then test "#if HAVE_GCC_VERSION(2,7)". So instead we use the macro below and test it against specific values. */ /* This macro simplifies testing whether we are using gcc, and if it is of a particular minimum version. (Both major & minor numbers are significant.) This macro will evaluate to 0 if we are not using gcc at all. */ #ifndef GCC_VERSION #define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__) #endif /* GCC_VERSION */ #if defined (__STDC__) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32) || (defined(__alpha) && defined(__cplusplus)) /* All known AIX compilers implement these things (but don't always define __STDC__). The RISC/OS MIPS compiler defines these things in SVR4 mode, but does not define __STDC__. */ /* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other C++ compilers, does not define __STDC__, though it acts as if this was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */ #define ANSI_PROTOTYPES 1 #define PTR void * #define PTRCONST void *const #define LONG_DOUBLE long double /* PARAMS is often defined elsewhere (e.g. by libintl.h), so wrap it in a #ifndef. */ #ifndef PARAMS #define PARAMS(ARGS) ARGS #endif #define VPARAMS(ARGS) ARGS #define VA_START(VA_LIST, VAR) va_start(VA_LIST, VAR) /* variadic function helper macros */ /* "struct Qdmy" swallows the semicolon after VA_OPEN/VA_FIXEDARG's use without inhibiting further decls and without declaring an actual variable. */ #define VA_OPEN(AP, VAR) { va_list AP; va_start(AP, VAR); { struct Qdmy #define VA_CLOSE(AP) } va_end(AP); } #define VA_FIXEDARG(AP, T, N) struct Qdmy #undef const #undef volatile #undef signed #ifdef __KERNEL__ #ifndef __STDC_VERSION__ #define __STDC_VERSION__ 0 #endif #endif /* __KERNEL__ */ /* inline requires special treatment; it's in C99, and GCC >=2.7 supports it too, but it's not in C89. */ #undef inline #if __STDC_VERSION__ > 199901L /* it's a keyword */ #else # if GCC_VERSION >= 2007 # define inline __inline__ /* __inline__ prevents -pedantic warnings */ # else # define inline /* nothing */ # endif #endif /* These are obsolete. Do not use. */ #ifndef IN_GCC #define CONST const #define VOLATILE volatile #define SIGNED signed #define PROTO(type, name, arglist) type name arglist #define EXFUN(name, proto) name proto #define DEFUN(name, arglist, args) name(args) #define DEFUN_VOID(name) name(void) #define AND , #define DOTS , ... #define NOARGS void #endif /* ! IN_GCC */ #else /* Not ANSI C. */ #undef ANSI_PROTOTYPES #define PTR char * #define PTRCONST PTR #define LONG_DOUBLE double #define PARAMS(args) () #define VPARAMS(args) (va_alist) va_dcl #define VA_START(va_list, var) va_start(va_list) #define VA_OPEN(AP, VAR) { va_list AP; va_start(AP); { struct Qdmy #define VA_CLOSE(AP) } va_end(AP); } #define VA_FIXEDARG(AP, TYPE, NAME) TYPE NAME = va_arg(AP, TYPE) /* some systems define these in header files for non-ansi mode */ #undef const #undef volatile #undef signed #undef inline #define const #define volatile #define signed #define inline #ifndef IN_GCC #define CONST #define VOLATILE #define SIGNED #define PROTO(type, name, arglist) type name () #define EXFUN(name, proto) name() #define DEFUN(name, arglist, args) name arglist args; #define DEFUN_VOID(name) name() #define AND ; #define DOTS #define NOARGS #endif /* ! IN_GCC */ #endif /* ANSI C. */ /* Define macros for some gcc attributes. This permits us to use the macros freely, and know that they will come into play for the version of gcc in which they are supported. */ #if (GCC_VERSION < 2007) # define __attribute__(x) #endif /* Attribute __malloc__ on functions was valid as of gcc 2.96. */ #ifndef ATTRIBUTE_MALLOC # if (GCC_VERSION >= 2096) # define ATTRIBUTE_MALLOC __attribute__ ((__malloc__)) # else # define ATTRIBUTE_MALLOC # endif /* GNUC >= 2.96 */ #endif /* ATTRIBUTE_MALLOC */ /* Attributes on labels were valid as of gcc 2.93. */ #ifndef ATTRIBUTE_UNUSED_LABEL # if (!defined (__cplusplus) && GCC_VERSION >= 2093) # define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED # else # define ATTRIBUTE_UNUSED_LABEL # endif /* !__cplusplus && GNUC >= 2.93 */ #endif /* ATTRIBUTE_UNUSED_LABEL */ #ifndef ATTRIBUTE_UNUSED #define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) #endif /* ATTRIBUTE_UNUSED */ /* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the identifier name. */ #if ! defined(__cplusplus) || (GCC_VERSION >= 3004) # define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED #else /* !__cplusplus || GNUC >= 3.4 */ # define ARG_UNUSED(NAME) NAME #endif /* !__cplusplus || GNUC >= 3.4 */ #ifndef ATTRIBUTE_NORETURN #define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__)) #endif /* ATTRIBUTE_NORETURN */ /* Attribute `nonnull' was valid as of gcc 3.3. */ #ifndef ATTRIBUTE_NONNULL # if (GCC_VERSION >= 3003) # define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m))) # else # define ATTRIBUTE_NONNULL(m) # endif /* GNUC >= 3.3 */ #endif /* ATTRIBUTE_NONNULL */ /* Attribute `pure' was valid as of gcc 3.0. */ #ifndef ATTRIBUTE_PURE # if (GCC_VERSION >= 3000) # define ATTRIBUTE_PURE __attribute__ ((__pure__)) # else # define ATTRIBUTE_PURE # endif /* GNUC >= 3.0 */ #endif /* ATTRIBUTE_PURE */ /* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL. This was the case for the `printf' format attribute by itself before GCC 3.3, but as of 3.3 we need to add the `nonnull' attribute to retain this behavior. */ #ifndef ATTRIBUTE_PRINTF #define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m) #define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2) #define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3) #define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4) #define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5) #define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6) #endif /* ATTRIBUTE_PRINTF */ /* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on a function pointer. Format attributes were allowed on function pointers as of gcc 3.1. */ #ifndef ATTRIBUTE_FPTR_PRINTF # if (GCC_VERSION >= 3001) # define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n) # else # define ATTRIBUTE_FPTR_PRINTF(m, n) # endif /* GNUC >= 3.1 */ # define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2) # define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3) # define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4) # define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5) # define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6) #endif /* ATTRIBUTE_FPTR_PRINTF */ /* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A NULL format specifier was allowed as of gcc 3.3. */ #ifndef ATTRIBUTE_NULL_PRINTF # if (GCC_VERSION >= 3003) # define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) # else # define ATTRIBUTE_NULL_PRINTF(m, n) # endif /* GNUC >= 3.3 */ # define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2) # define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3) # define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4) # define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5) # define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6) #endif /* ATTRIBUTE_NULL_PRINTF */ /* Attribute `sentinel' was valid as of gcc 3.5. */ #ifndef ATTRIBUTE_SENTINEL # if (GCC_VERSION >= 3005) # define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__)) # else # define ATTRIBUTE_SENTINEL # endif /* GNUC >= 3.5 */ #endif /* ATTRIBUTE_SENTINEL */ #ifndef ATTRIBUTE_ALIGNED_ALIGNOF # if (GCC_VERSION >= 3000) # define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m)))) # else # define ATTRIBUTE_ALIGNED_ALIGNOF(m) # endif /* GNUC >= 3.0 */ #endif /* ATTRIBUTE_ALIGNED_ALIGNOF */ /* We use __extension__ in some places to suppress -pedantic warnings about GCC extensions. This feature didn't work properly before gcc 2.8. */ #if GCC_VERSION < 2008 #define __extension__ #endif #endif /* ansidecl.h */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/apic.h000066400000000000000000000066441314037446600237570ustar00rootroot00000000000000#ifndef __ASM_APIC_H #define __ASM_APIC_H #include #include #include #include #include #include #define Dprintk(x...) /* * Debugging macros */ #define APIC_QUIET 0 #define APIC_VERBOSE 1 #define APIC_DEBUG 2 extern int enable_local_apic; extern int apic_verbosity; static inline void lapic_disable(void) { enable_local_apic = -1; clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); } static inline void lapic_enable(void) { enable_local_apic = 1; } /* * Define the default level of output to be very little * This can be turned up by using apic=verbose for more * information and apic=debug for _lots_ of information. * apic_verbosity is defined in apic.c */ #define apic_printk(v, s, a...) do { \ if ((v) <= apic_verbosity) \ printk(s, ##a); \ } while (0) #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN) #include static __inline__ void apic_wait_icr_idle(void) { while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ) cpu_relax(); } int get_physical_broadcast(void); #ifdef CONFIG_X86_GOOD_APIC # define FORCE_READ_AROUND_WRITE 0 # define apic_read_around(x) # define apic_write_around(x,y) apic_write((x),(y)) #else # define FORCE_READ_AROUND_WRITE 1 # define apic_read_around(x) apic_read(x) # define apic_write_around(x,y) apic_write_atomic((x),(y)) #endif static inline void ack_APIC_irq(void) { /* * ack_APIC_irq() actually gets compiled as a single instruction: * - a single rmw on Pentium/82489DX * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) * ... yummie. */ /* Docs say use 0 for future compatibility */ apic_write_around(APIC_EOI, 0); } extern void (*wait_timer_tick)(void); extern int get_maxlvt(void); extern void clear_local_APIC(void); extern void connect_bsp_APIC (void); extern void disconnect_bsp_APIC (int virt_wire_setup); extern void disable_local_APIC (void); extern void lapic_shutdown (void); extern int verify_local_APIC (void); extern void cache_APIC_registers (void); extern void sync_Arb_IDs (void); extern void init_bsp_APIC (void); extern void setup_local_APIC (void); extern void init_apic_mappings (void); extern void smp_local_timer_interrupt (struct pt_regs * regs); extern void setup_boot_APIC_clock (void); extern void setup_secondary_APIC_clock (void); extern void setup_apic_nmi_watchdog (void); extern int reserve_lapic_nmi(void); extern void release_lapic_nmi(void); extern void disable_timer_nmi_watchdog(void); extern void enable_timer_nmi_watchdog(void); extern void nmi_watchdog_tick (struct pt_regs * regs); extern int APIC_init_uniprocessor (void); extern void disable_APIC_timer(void); extern void enable_APIC_timer(void); extern void enable_NMI_through_LVT0 (void * dummy); extern unsigned int nmi_watchdog; #define NMI_NONE 0 #define NMI_IO_APIC 1 #define NMI_LOCAL_APIC 2 #define NMI_INVALID 3 extern int disable_timer_pin_1; void smp_send_timer_broadcast_ipi(struct pt_regs *regs); void switch_APIC_timer_to_ipi(void *cpumask); void switch_ipi_to_APIC_timer(void *cpumask); #define ARCH_APICTIMER_STOPS_ON_C3 1 extern int timer_over_8254; extern int modern_apic(void); extern void dmi_check_apic(void); #else /* !CONFIG_X86_LOCAL_APIC || CONFIG_XEN */ static inline void lapic_shutdown(void) { } #ifdef CONFIG_X86_LOCAL_APIC extern int APIC_init_uniprocessor (void); #endif #endif /* !CONFIG_X86_LOCAL_APIC */ #endif /* __ASM_APIC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/apicdef.h000066400000000000000000000236151314037446600244330ustar00rootroot00000000000000#ifndef __ASM_APICDEF_H #define __ASM_APICDEF_H #ifndef CONFIG_XEN /* * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) * * Alan Cox , 1995. * Ingo Molnar , 1999, 2000 */ #define APIC_DEFAULT_PHYS_BASE 0xfee00000 #define APIC_ID 0x20 #define APIC_LVR 0x30 #define APIC_LVR_MASK 0xFF00FF #define GET_APIC_VERSION(x) ((x)&0xFF) #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) #define APIC_INTEGRATED(x) ((x)&0xF0) #define APIC_XAPIC(x) ((x) >= 0x14) #define APIC_TASKPRI 0x80 #define APIC_TPRI_MASK 0xFF #define APIC_ARBPRI 0x90 #define APIC_ARBPRI_MASK 0xFF #define APIC_PROCPRI 0xA0 #define APIC_EOI 0xB0 #define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ #define APIC_RRR 0xC0 #define APIC_LDR 0xD0 #define APIC_LDR_MASK (0xFF<<24) #define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFF) #define SET_APIC_LOGICAL_ID(x) (((x)<<24)) #define APIC_ALL_CPUS 0xFF #define APIC_DFR 0xE0 #define APIC_DFR_CLUSTER 0x0FFFFFFFul #define APIC_DFR_FLAT 0xFFFFFFFFul #define APIC_SPIV 0xF0 #define APIC_SPIV_FOCUS_DISABLED (1<<9) #define APIC_SPIV_APIC_ENABLED (1<<8) #define APIC_ISR 0x100 #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ #define APIC_TMR 0x180 #define APIC_IRR 0x200 #define APIC_ESR 0x280 #define APIC_ESR_SEND_CS 0x00001 #define APIC_ESR_RECV_CS 0x00002 #define APIC_ESR_SEND_ACC 0x00004 #define APIC_ESR_RECV_ACC 0x00008 #define APIC_ESR_SENDILL 0x00020 #define APIC_ESR_RECVILL 0x00040 #define APIC_ESR_ILLREGA 0x00080 #define APIC_ICR 0x300 #define APIC_DEST_SELF 0x40000 #define APIC_DEST_ALLINC 0x80000 #define APIC_DEST_ALLBUT 0xC0000 #define APIC_ICR_RR_MASK 0x30000 #define APIC_ICR_RR_INVALID 0x00000 #define APIC_ICR_RR_INPROG 0x10000 #define APIC_ICR_RR_VALID 0x20000 #define APIC_INT_LEVELTRIG 0x08000 #define APIC_INT_ASSERT 0x04000 #define APIC_ICR_BUSY 0x01000 #define APIC_DEST_LOGICAL 0x00800 #define APIC_DM_FIXED 0x00000 #define APIC_DM_LOWEST 0x00100 #define APIC_DM_SMI 0x00200 #define APIC_DM_REMRD 0x00300 #define APIC_DM_NMI 0x00400 #define APIC_DM_INIT 0x00500 #define APIC_DM_STARTUP 0x00600 #define APIC_DM_EXTINT 0x00700 #define APIC_VECTOR_MASK 0x000FF #define APIC_ICR2 0x310 #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) #define SET_APIC_DEST_FIELD(x) ((x)<<24) #define APIC_LVTT 0x320 #define APIC_LVTTHMR 0x330 #define APIC_LVTPC 0x340 #define APIC_LVT0 0x350 #define APIC_LVT_TIMER_BASE_MASK (0x3<<18) #define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) #define SET_APIC_TIMER_BASE(x) (((x)<<18)) #define APIC_TIMER_BASE_CLKIN 0x0 #define APIC_TIMER_BASE_TMBASE 0x1 #define APIC_TIMER_BASE_DIV 0x2 #define APIC_LVT_TIMER_PERIODIC (1<<17) #define APIC_LVT_MASKED (1<<16) #define APIC_LVT_LEVEL_TRIGGER (1<<15) #define APIC_LVT_REMOTE_IRR (1<<14) #define APIC_INPUT_POLARITY (1<<13) #define APIC_SEND_PENDING (1<<12) #define APIC_MODE_MASK 0x700 #define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) #define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) #define APIC_MODE_FIXED 0x0 #define APIC_MODE_NMI 0x4 #define APIC_MODE_EXTINT 0x7 #define APIC_LVT1 0x360 #define APIC_LVTERR 0x370 #define APIC_TMICT 0x380 #define APIC_TMCCT 0x390 #define APIC_TDCR 0x3E0 #define APIC_TDR_DIV_TMBASE (1<<2) #define APIC_TDR_DIV_1 0xB #define APIC_TDR_DIV_2 0x0 #define APIC_TDR_DIV_4 0x1 #define APIC_TDR_DIV_8 0x2 #define APIC_TDR_DIV_16 0x3 #define APIC_TDR_DIV_32 0x8 #define APIC_TDR_DIV_64 0x9 #define APIC_TDR_DIV_128 0xA #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) #else /* CONFIG_XEN */ enum { APIC_DEST_ALLBUT = 0x1, APIC_DEST_SELF, APIC_DEST_ALLINC }; #endif /* CONFIG_XEN */ #define MAX_IO_APICS 64 #ifndef CONFIG_XEN #define XAPIC_DEST_CPUS_SHIFT 4 #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) #define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) #define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT) #define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK) #define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT) /* * the local APIC register structure, memory mapped. Not terribly well * tested, but we might eventually use this one in the future - the * problem why we cannot use it right now is the P5 APIC, it has an * errata which cannot take 8-bit reads and writes, only 32-bit ones ... */ #define u32 unsigned int #define lapic ((volatile struct local_apic *)APIC_BASE) struct local_apic { /*000*/ struct { u32 __reserved[4]; } __reserved_01; /*010*/ struct { u32 __reserved[4]; } __reserved_02; /*020*/ struct { /* APIC ID Register */ u32 __reserved_1 : 24, phys_apic_id : 4, __reserved_2 : 4; u32 __reserved[3]; } id; /*030*/ const struct { /* APIC Version Register */ u32 version : 8, __reserved_1 : 8, max_lvt : 8, __reserved_2 : 8; u32 __reserved[3]; } version; /*040*/ struct { u32 __reserved[4]; } __reserved_03; /*050*/ struct { u32 __reserved[4]; } __reserved_04; /*060*/ struct { u32 __reserved[4]; } __reserved_05; /*070*/ struct { u32 __reserved[4]; } __reserved_06; /*080*/ struct { /* Task Priority Register */ u32 priority : 8, __reserved_1 : 24; u32 __reserved_2[3]; } tpr; /*090*/ const struct { /* Arbitration Priority Register */ u32 priority : 8, __reserved_1 : 24; u32 __reserved_2[3]; } apr; /*0A0*/ const struct { /* Processor Priority Register */ u32 priority : 8, __reserved_1 : 24; u32 __reserved_2[3]; } ppr; /*0B0*/ struct { /* End Of Interrupt Register */ u32 eoi; u32 __reserved[3]; } eoi; /*0C0*/ struct { u32 __reserved[4]; } __reserved_07; /*0D0*/ struct { /* Logical Destination Register */ u32 __reserved_1 : 24, logical_dest : 8; u32 __reserved_2[3]; } ldr; /*0E0*/ struct { /* Destination Format Register */ u32 __reserved_1 : 28, model : 4; u32 __reserved_2[3]; } dfr; /*0F0*/ struct { /* Spurious Interrupt Vector Register */ u32 spurious_vector : 8, apic_enabled : 1, focus_cpu : 1, __reserved_2 : 22; u32 __reserved_3[3]; } svr; /*100*/ struct { /* In Service Register */ /*170*/ u32 bitfield; u32 __reserved[3]; } isr [8]; /*180*/ struct { /* Trigger Mode Register */ /*1F0*/ u32 bitfield; u32 __reserved[3]; } tmr [8]; /*200*/ struct { /* Interrupt Request Register */ /*270*/ u32 bitfield; u32 __reserved[3]; } irr [8]; /*280*/ union { /* Error Status Register */ struct { u32 send_cs_error : 1, receive_cs_error : 1, send_accept_error : 1, receive_accept_error : 1, __reserved_1 : 1, send_illegal_vector : 1, receive_illegal_vector : 1, illegal_register_address : 1, __reserved_2 : 24; u32 __reserved_3[3]; } error_bits; struct { u32 errors; u32 __reserved_3[3]; } all_errors; } esr; /*290*/ struct { u32 __reserved[4]; } __reserved_08; /*2A0*/ struct { u32 __reserved[4]; } __reserved_09; /*2B0*/ struct { u32 __reserved[4]; } __reserved_10; /*2C0*/ struct { u32 __reserved[4]; } __reserved_11; /*2D0*/ struct { u32 __reserved[4]; } __reserved_12; /*2E0*/ struct { u32 __reserved[4]; } __reserved_13; /*2F0*/ struct { u32 __reserved[4]; } __reserved_14; /*300*/ struct { /* Interrupt Command Register 1 */ u32 vector : 8, delivery_mode : 3, destination_mode : 1, delivery_status : 1, __reserved_1 : 1, level : 1, trigger : 1, __reserved_2 : 2, shorthand : 2, __reserved_3 : 12; u32 __reserved_4[3]; } icr1; /*310*/ struct { /* Interrupt Command Register 2 */ union { u32 __reserved_1 : 24, phys_dest : 4, __reserved_2 : 4; u32 __reserved_3 : 24, logical_dest : 8; } dest; u32 __reserved_4[3]; } icr2; /*320*/ struct { /* LVT - Timer */ u32 vector : 8, __reserved_1 : 4, delivery_status : 1, __reserved_2 : 3, mask : 1, timer_mode : 1, __reserved_3 : 14; u32 __reserved_4[3]; } lvt_timer; /*330*/ struct { /* LVT - Thermal Sensor */ u32 vector : 8, delivery_mode : 3, __reserved_1 : 1, delivery_status : 1, __reserved_2 : 3, mask : 1, __reserved_3 : 15; u32 __reserved_4[3]; } lvt_thermal; /*340*/ struct { /* LVT - Performance Counter */ u32 vector : 8, delivery_mode : 3, __reserved_1 : 1, delivery_status : 1, __reserved_2 : 3, mask : 1, __reserved_3 : 15; u32 __reserved_4[3]; } lvt_pc; /*350*/ struct { /* LVT - LINT0 */ u32 vector : 8, delivery_mode : 3, __reserved_1 : 1, delivery_status : 1, polarity : 1, remote_irr : 1, trigger : 1, mask : 1, __reserved_2 : 15; u32 __reserved_3[3]; } lvt_lint0; /*360*/ struct { /* LVT - LINT1 */ u32 vector : 8, delivery_mode : 3, __reserved_1 : 1, delivery_status : 1, polarity : 1, remote_irr : 1, trigger : 1, mask : 1, __reserved_2 : 15; u32 __reserved_3[3]; } lvt_lint1; /*370*/ struct { /* LVT - Error */ u32 vector : 8, __reserved_1 : 4, delivery_status : 1, __reserved_2 : 3, mask : 1, __reserved_3 : 15; u32 __reserved_4[3]; } lvt_error; /*380*/ struct { /* Timer Initial Count Register */ u32 initial_count; u32 __reserved_2[3]; } timer_icr; /*390*/ const struct { /* Timer Current Count Register */ u32 curr_count; u32 __reserved_2[3]; } timer_ccr; /*3A0*/ struct { u32 __reserved[4]; } __reserved_16; /*3B0*/ struct { u32 __reserved[4]; } __reserved_17; /*3C0*/ struct { u32 __reserved[4]; } __reserved_18; /*3D0*/ struct { u32 __reserved[4]; } __reserved_19; /*3E0*/ struct { /* Timer Divide Configuration Register */ u32 divisor : 4, __reserved_1 : 28; u32 __reserved_2[3]; } timer_dcr; /*3F0*/ struct { u32 __reserved[4]; } __reserved_20; } __attribute__ ((packed)); #undef u32 #endif /* CONFIG_XEN */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/arch_hooks.h000066400000000000000000000066221314037446600251570ustar00rootroot00000000000000#ifndef _ASM_ARCH_HOOKS_H #define _ASM_ARCH_HOOKS_H #include #include #include /* * linux/include/asm/arch_hooks.h * * define the architecture specific hooks */ /* these aren't arch hooks, they are generic routines * that can be used by the hooks */ extern void init_ISA_irqs(void); extern void apic_intr_init(void); extern void smp_intr_init(void); extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs); /* * There are also some generic structures used by most architectures. * * IRQ0 is the default timer interrupt * IRQ2 is cascade interrupt to second interrupt controller */ extern struct irqaction irq0; extern struct irqaction irq2; static inline void legacy_intr_init_hook(void) { if (!acpi_ioapic) setup_irq(2, &irq2); } static inline void default_timer_init(void) { setup_irq(0, &irq0); } /* include to allow sub-arch override */ #include /* these are the default hooks */ /** * pre_intr_init_hook - initialisation prior to setting up interrupt vectors * * Description: * Perform any necessary interrupt initialisation prior to setting up * the "ordinary" interrupt call gates. For legacy reasons, the ISA * interrupts should be initialised here if the machine emulates a PC * in any way. **/ #ifndef pre_intr_init_hook #define pre_intr_init_hook() init_ISA_irqs() #endif /** * intr_init_hook - post gate setup interrupt initialisation * * Description: * Fill in any interrupts that may have been left out by the general * init_IRQ() routine. interrupts having to do with the machine rather * than the devices on the I/O bus (like APIC interrupts in intel MP * systems) are started here. **/ #ifndef intr_init_hook #define intr_init_hook() legacy_intr_init_hook() #endif /** * pre_setup_arch_hook - hook called prior to any setup_arch() execution * * Description: * generally used to activate any machine specific identification * routines that may be needed before setup_arch() runs. On VISWS * this is used to get the board revision and type. **/ #ifndef pre_setup_arch_hook #define pre_setup_arch_hook() #endif /** * post_setup_arch_hook - hook called after any setup_arch() execution * * Description: * Provides a hook point immediate after setup_arch completes. **/ #ifndef post_setup_arch_hook #define post_setup_arch_hook() #endif /** * trap_init_hook - initialise system specific traps * * Description: * Called as the final act of trap_init(). Used in VISWS to initialise * the various board specific APIC traps. **/ #ifndef trap_init_hook #define trap_init_hook() #endif /** * time_init_hook - do any specific initialisations for the system timer. * * Description: * Must plug the system timer interrupt source at HZ into the IRQ listed * in irq_vectors.h:TIMER_IRQ **/ #ifndef time_init_hook #define time_init_hook() default_timer_init() #endif /** * smpboot_startup_ipi_hook - hook to set AP state prior to startup IPI * * Description: * Used in VMI to allow hypervisors to setup a known initial state on * coprocessors, rather than booting from real mode. **/ #ifndef smpboot_startup_ipi_hook #define smpboot_startup_ipi_hook(...) #endif /** * module_finalize_hook - perform module fixups required by subarch * * Description: * Used in VMI to apply annotations to kernel modules **/ #ifndef module_finalize_hook #define module_finalize_hook(...) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/atomic.h000066400000000000000000000134001314037446600243030ustar00rootroot00000000000000#ifndef __ARCH_I386_ATOMIC__ #define __ARCH_I386_ATOMIC__ #include #include #include #include /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. */ /* * Make sure gcc doesn't try to be clever and move things around * on us. We need to use _exactly_ the address the user gave us, * not some alias that contains the same information. */ typedef struct { volatile int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } /** * atomic_read - read atomic variable * @v: pointer of type atomic_t * * Atomically reads the value of @v. */ #define atomic_read(v) ((v)->counter) /** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value * * Atomically sets the value of @v to @i. */ #define atomic_set(v,i) (((v)->counter) = (i)) /** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t * * Atomically adds @i to @v. */ static __inline__ void atomic_add(int i, atomic_t *v) { __asm__ __volatile__( LOCK "addl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } /** * atomic_sub - subtract the atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t * * Atomically subtracts @i from @v. */ static __inline__ void atomic_sub(int i, atomic_t *v) { __asm__ __volatile__( LOCK "subl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } /** * atomic_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @v: pointer of type atomic_t * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all * other cases. */ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) { unsigned char c; __asm__ __volatile__( LOCK "subl %2,%0; sete %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; } /** * atomic_inc - increment atomic variable * @v: pointer of type atomic_t * * Atomically increments @v by 1. */ static __inline__ void atomic_inc(atomic_t *v) { __asm__ __volatile__( LOCK "incl %0" :"=m" (v->counter) :"m" (v->counter)); } /** * atomic_dec - decrement atomic variable * @v: pointer of type atomic_t * * Atomically decrements @v by 1. */ static __inline__ void atomic_dec(atomic_t *v) { __asm__ __volatile__( LOCK "decl %0" :"=m" (v->counter) :"m" (v->counter)); } /** * atomic_dec_and_test - decrement and test * @v: pointer of type atomic_t * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. */ static __inline__ int atomic_dec_and_test(atomic_t *v) { unsigned char c; __asm__ __volatile__( LOCK "decl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; } /** * atomic_inc_and_test - increment and test * @v: pointer of type atomic_t * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. */ static __inline__ int atomic_inc_and_test(atomic_t *v) { unsigned char c; __asm__ __volatile__( LOCK "incl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; } /** * atomic_add_negative - add and test if negative * @v: pointer of type atomic_t * @i: integer value to add * * Atomically adds @i to @v and returns true * if the result is negative, or false when * result is greater than or equal to zero. */ static __inline__ int atomic_add_negative(int i, atomic_t *v) { unsigned char c; __asm__ __volatile__( LOCK "addl %2,%0; sets %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; } /** * atomic_add_return - add and return * @v: pointer of type atomic_t * @i: integer value to add * * Atomically adds @i to @v and returns @i + @v */ static __inline__ int atomic_add_return(int i, atomic_t *v) { int __i; #ifdef CONFIG_M386 if(unlikely(boot_cpu_data.x86==3)) goto no_xadd; #endif /* Modern 486+ processor */ __i = i; __asm__ __volatile__( LOCK "xaddl %0, %1;" :"=r"(i) :"m"(v->counter), "0"(i)); return i + __i; #ifdef CONFIG_M386 no_xadd: /* Legacy 386 processor */ local_irq_disable(); __i = atomic_read(v); atomic_set(v, i + __i); local_irq_enable(); return i + __i; #endif } static __inline__ int atomic_sub_return(int i, atomic_t *v) { return atomic_add_return(-i,v); } #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** * atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ #define atomic_add_unless(v, a, u) \ ({ \ int c, old; \ c = atomic_read(v); \ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ c = old; \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_return(v) (atomic_add_return(1,v)) #define atomic_dec_return(v) (atomic_sub_return(1,v)) /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ __asm__ __volatile__(LOCK "andl %0,%1" \ : : "r" (~(mask)),"m" (*addr) : "memory") #define atomic_set_mask(mask, addr) \ __asm__ __volatile__(LOCK "orl %0,%1" \ : : "r" (mask),"m" (*(addr)) : "memory") /* Atomic operations are already serializing on x86 */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/auxvec.h000066400000000000000000000003551314037446600243270ustar00rootroot00000000000000#ifndef __ASMi386_AUXVEC_H #define __ASMi386_AUXVEC_H /* * Architecture-neutral AT_ values in 0-17, leave some room * for more of them, start the x86-specific ones at 32. */ #define AT_SYSINFO 32 #define AT_SYSINFO_EHDR 33 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/bfd.h000066400000000000000000005026621314037446600235770ustar00rootroot00000000000000/* DO NOT EDIT! -*- buffer-read-only: t -*- This file is automatically generated from "bfd-in.h", "init.c", "opncls.c", "libbfd.c", "bfdio.c", "bfdwin.c", "section.c", "archures.c", "reloc.c", "syms.c", "bfd.c", "archive.c", "corefile.c", "targets.c", "format.c", "linker.c" and "simple.c". Run "make headers" in your build bfd/ to regenerate. */ /* Main header file for the bfd library -- portable access to object files. Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. Contributed by Cygnus Support. This file is part of BFD, the Binary File Descriptor library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as * required. * Keith Owens 15 May 2006 */ #ifndef __BFD_H_SEEN__ #define __BFD_H_SEEN__ #ifdef __cplusplus extern "C" { #endif #ifdef __KERNEL__ #include #else /* __KERNEL__ */ #include "ansidecl.h" #include "symcat.h" #endif /* __KERNEL__ */ #if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE) #ifndef SABER /* This hack is to avoid a problem with some strict ANSI C preprocessors. The problem is, "32_" is not a valid preprocessing token, and we don't want extra underscores (e.g., "nlm_32_"). The XCONCAT2 macro will cause the inner CONCAT2 macros to be evaluated first, producing still-valid pp-tokens. Then the final concatenation can be done. */ #undef CONCAT4 #define CONCAT4(a,b,c,d) XCONCAT2(CONCAT2(a,b),CONCAT2(c,d)) #endif #endif /* The word size used by BFD on the host. This may be 64 with a 32 bit target if the host is 64 bit, or if other 64 bit targets have been selected with --enable-targets, or if --enable-64-bit-bfd. */ #ifdef __KERNEL__ #define BFD_ARCH_SIZE 32 #else /* __KERNEL__ */ #define BFD_ARCH_SIZE 64 #endif /* __KERNEL__ */ /* The word size of the default bfd target. */ #define BFD_DEFAULT_TARGET_SIZE 32 #define BFD_HOST_64BIT_LONG 0 #define BFD_HOST_LONG_LONG 1 #if 1 #define BFD_HOST_64_BIT long long #define BFD_HOST_U_64_BIT unsigned long long typedef BFD_HOST_64_BIT bfd_int64_t; typedef BFD_HOST_U_64_BIT bfd_uint64_t; #endif #if BFD_ARCH_SIZE >= 64 #define BFD64 #endif #ifndef INLINE #if __GNUC__ >= 2 #define INLINE __inline__ #else #define INLINE #endif #endif /* Forward declaration. */ typedef struct bfd bfd; /* Boolean type used in bfd. Too many systems define their own versions of "boolean" for us to safely typedef a "boolean" of our own. Using an enum for "bfd_boolean" has its own set of problems, with strange looking casts required to avoid warnings on some older compilers. Thus we just use an int. General rule: Functions which are bfd_boolean return TRUE on success and FALSE on failure (unless they're a predicate). */ typedef int bfd_boolean; #undef FALSE #undef TRUE #define FALSE 0 #define TRUE 1 #ifdef BFD64 #ifndef BFD_HOST_64_BIT #error No 64 bit integer type available #endif /* ! defined (BFD_HOST_64_BIT) */ typedef BFD_HOST_U_64_BIT bfd_vma; typedef BFD_HOST_64_BIT bfd_signed_vma; typedef BFD_HOST_U_64_BIT bfd_size_type; typedef BFD_HOST_U_64_BIT symvalue; #ifndef fprintf_vma #if BFD_HOST_64BIT_LONG #define sprintf_vma(s,x) sprintf (s, "%016lx", x) #define fprintf_vma(f,x) fprintf (f, "%016lx", x) #else #define _bfd_int64_low(x) ((unsigned long) (((x) & 0xffffffff))) #define _bfd_int64_high(x) ((unsigned long) (((x) >> 32) & 0xffffffff)) #define fprintf_vma(s,x) \ fprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x)) #define sprintf_vma(s,x) \ sprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x)) #endif #endif #else /* not BFD64 */ /* Represent a target address. Also used as a generic unsigned type which is guaranteed to be big enough to hold any arithmetic types we need to deal with. */ typedef unsigned long bfd_vma; /* A generic signed type which is guaranteed to be big enough to hold any arithmetic types we need to deal with. Can be assumed to be compatible with bfd_vma in the same way that signed and unsigned ints are compatible (as parameters, in assignment, etc). */ typedef long bfd_signed_vma; typedef unsigned long symvalue; typedef unsigned long bfd_size_type; /* Print a bfd_vma x on stream s. */ #define fprintf_vma(s,x) fprintf (s, "%08lx", x) #define sprintf_vma(s,x) sprintf (s, "%08lx", x) #endif /* not BFD64 */ #define HALF_BFD_SIZE_TYPE \ (((bfd_size_type) 1) << (8 * sizeof (bfd_size_type) / 2)) #ifndef BFD_HOST_64_BIT /* Fall back on a 32 bit type. The idea is to make these types always available for function return types, but in the case that BFD_HOST_64_BIT is undefined such a function should abort or otherwise signal an error. */ typedef bfd_signed_vma bfd_int64_t; typedef bfd_vma bfd_uint64_t; #endif /* An offset into a file. BFD always uses the largest possible offset based on the build time availability of fseek, fseeko, or fseeko64. */ typedef BFD_HOST_64_BIT file_ptr; typedef unsigned BFD_HOST_64_BIT ufile_ptr; extern void bfd_sprintf_vma (bfd *, char *, bfd_vma); extern void bfd_fprintf_vma (bfd *, void *, bfd_vma); #define printf_vma(x) fprintf_vma(stdout,x) #define bfd_printf_vma(abfd,x) bfd_fprintf_vma (abfd,stdout,x) typedef unsigned int flagword; /* 32 bits of flags */ typedef unsigned char bfd_byte; /* File formats. */ typedef enum bfd_format { bfd_unknown = 0, /* File format is unknown. */ bfd_object, /* Linker/assembler/compiler output. */ bfd_archive, /* Object archive file. */ bfd_core, /* Core dump. */ bfd_type_end /* Marks the end; don't use it! */ } bfd_format; /* Values that may appear in the flags field of a BFD. These also appear in the object_flags field of the bfd_target structure, where they indicate the set of flags used by that backend (not all flags are meaningful for all object file formats) (FIXME: at the moment, the object_flags values have mostly just been copied from backend to another, and are not necessarily correct). */ /* No flags. */ #define BFD_NO_FLAGS 0x00 /* BFD contains relocation entries. */ #define HAS_RELOC 0x01 /* BFD is directly executable. */ #define EXEC_P 0x02 /* BFD has line number information (basically used for F_LNNO in a COFF header). */ #define HAS_LINENO 0x04 /* BFD has debugging information. */ #define HAS_DEBUG 0x08 /* BFD has symbols. */ #define HAS_SYMS 0x10 /* BFD has local symbols (basically used for F_LSYMS in a COFF header). */ #define HAS_LOCALS 0x20 /* BFD is a dynamic object. */ #define DYNAMIC 0x40 /* Text section is write protected (if D_PAGED is not set, this is like an a.out NMAGIC file) (the linker sets this by default, but clears it for -r or -N). */ #define WP_TEXT 0x80 /* BFD is dynamically paged (this is like an a.out ZMAGIC file) (the linker sets this by default, but clears it for -r or -n or -N). */ #define D_PAGED 0x100 /* BFD is relaxable (this means that bfd_relax_section may be able to do something) (sometimes bfd_relax_section can do something even if this is not set). */ #define BFD_IS_RELAXABLE 0x200 /* This may be set before writing out a BFD to request using a traditional format. For example, this is used to request that when writing out an a.out object the symbols not be hashed to eliminate duplicates. */ #define BFD_TRADITIONAL_FORMAT 0x400 /* This flag indicates that the BFD contents are actually cached in memory. If this is set, iostream points to a bfd_in_memory struct. */ #define BFD_IN_MEMORY 0x800 /* The sections in this BFD specify a memory page. */ #define HAS_LOAD_PAGE 0x1000 /* This BFD has been created by the linker and doesn't correspond to any input file. */ #define BFD_LINKER_CREATED 0x2000 /* Symbols and relocation. */ /* A count of carsyms (canonical archive symbols). */ typedef unsigned long symindex; /* How to perform a relocation. */ typedef const struct reloc_howto_struct reloc_howto_type; #define BFD_NO_MORE_SYMBOLS ((symindex) ~0) /* General purpose part of a symbol X; target specific parts are in libcoff.h, libaout.h, etc. */ #define bfd_get_section(x) ((x)->section) #define bfd_get_output_section(x) ((x)->section->output_section) #define bfd_set_section(x,y) ((x)->section) = (y) #define bfd_asymbol_base(x) ((x)->section->vma) #define bfd_asymbol_value(x) (bfd_asymbol_base(x) + (x)->value) #define bfd_asymbol_name(x) ((x)->name) /*Perhaps future: #define bfd_asymbol_bfd(x) ((x)->section->owner)*/ #define bfd_asymbol_bfd(x) ((x)->the_bfd) #define bfd_asymbol_flavour(x) (bfd_asymbol_bfd(x)->xvec->flavour) /* A canonical archive symbol. */ /* This is a type pun with struct ranlib on purpose! */ typedef struct carsym { char *name; file_ptr file_offset; /* Look here to find the file. */ } carsym; /* To make these you call a carsymogen. */ /* Used in generating armaps (archive tables of contents). Perhaps just a forward definition would do? */ struct orl /* Output ranlib. */ { char **name; /* Symbol name. */ union { file_ptr pos; bfd *abfd; } u; /* bfd* or file position. */ int namidx; /* Index into string table. */ }; /* Linenumber stuff. */ typedef struct lineno_cache_entry { unsigned int line_number; /* Linenumber from start of function. */ union { struct bfd_symbol *sym; /* Function name. */ bfd_vma offset; /* Offset into section. */ } u; } alent; /* Object and core file sections. */ #define align_power(addr, align) \ (((addr) + ((bfd_vma) 1 << (align)) - 1) & ((bfd_vma) -1 << (align))) typedef struct bfd_section *sec_ptr; #define bfd_get_section_name(bfd, ptr) ((ptr)->name + 0) #define bfd_get_section_vma(bfd, ptr) ((ptr)->vma + 0) #define bfd_get_section_lma(bfd, ptr) ((ptr)->lma + 0) #define bfd_get_section_alignment(bfd, ptr) ((ptr)->alignment_power + 0) #define bfd_section_name(bfd, ptr) ((ptr)->name) #define bfd_section_size(bfd, ptr) ((ptr)->size) #define bfd_get_section_size(ptr) ((ptr)->size) #define bfd_section_vma(bfd, ptr) ((ptr)->vma) #define bfd_section_lma(bfd, ptr) ((ptr)->lma) #define bfd_section_alignment(bfd, ptr) ((ptr)->alignment_power) #define bfd_get_section_flags(bfd, ptr) ((ptr)->flags + 0) #define bfd_get_section_userdata(bfd, ptr) ((ptr)->userdata) #define bfd_is_com_section(ptr) (((ptr)->flags & SEC_IS_COMMON) != 0) #define bfd_set_section_vma(bfd, ptr, val) (((ptr)->vma = (ptr)->lma = (val)), ((ptr)->user_set_vma = TRUE), TRUE) #define bfd_set_section_alignment(bfd, ptr, val) (((ptr)->alignment_power = (val)),TRUE) #define bfd_set_section_userdata(bfd, ptr, val) (((ptr)->userdata = (val)),TRUE) /* Find the address one past the end of SEC. */ #define bfd_get_section_limit(bfd, sec) \ (((sec)->rawsize ? (sec)->rawsize : (sec)->size) \ / bfd_octets_per_byte (bfd)) typedef struct stat stat_type; typedef enum bfd_print_symbol { bfd_print_symbol_name, bfd_print_symbol_more, bfd_print_symbol_all } bfd_print_symbol_type; /* Information about a symbol that nm needs. */ typedef struct _symbol_info { symvalue value; char type; const char *name; /* Symbol name. */ unsigned char stab_type; /* Stab type. */ char stab_other; /* Stab other. */ short stab_desc; /* Stab desc. */ const char *stab_name; /* String for stab type. */ } symbol_info; /* Get the name of a stabs type code. */ extern const char *bfd_get_stab_name (int); /* Hash table routines. There is no way to free up a hash table. */ /* An element in the hash table. Most uses will actually use a larger structure, and an instance of this will be the first field. */ struct bfd_hash_entry { /* Next entry for this hash code. */ struct bfd_hash_entry *next; /* String being hashed. */ const char *string; /* Hash code. This is the full hash code, not the index into the table. */ unsigned long hash; }; /* A hash table. */ struct bfd_hash_table { /* The hash array. */ struct bfd_hash_entry **table; /* The number of slots in the hash table. */ unsigned int size; /* A function used to create new elements in the hash table. The first entry is itself a pointer to an element. When this function is first invoked, this pointer will be NULL. However, having the pointer permits a hierarchy of method functions to be built each of which calls the function in the superclass. Thus each function should be written to allocate a new block of memory only if the argument is NULL. */ struct bfd_hash_entry *(*newfunc) (struct bfd_hash_entry *, struct bfd_hash_table *, const char *); /* An objalloc for this hash table. This is a struct objalloc *, but we use void * to avoid requiring the inclusion of objalloc.h. */ void *memory; }; /* Initialize a hash table. */ extern bfd_boolean bfd_hash_table_init (struct bfd_hash_table *, struct bfd_hash_entry *(*) (struct bfd_hash_entry *, struct bfd_hash_table *, const char *)); /* Initialize a hash table specifying a size. */ extern bfd_boolean bfd_hash_table_init_n (struct bfd_hash_table *, struct bfd_hash_entry *(*) (struct bfd_hash_entry *, struct bfd_hash_table *, const char *), unsigned int size); /* Free up a hash table. */ extern void bfd_hash_table_free (struct bfd_hash_table *); /* Look up a string in a hash table. If CREATE is TRUE, a new entry will be created for this string if one does not already exist. The COPY argument must be TRUE if this routine should copy the string into newly allocated memory when adding an entry. */ extern struct bfd_hash_entry *bfd_hash_lookup (struct bfd_hash_table *, const char *, bfd_boolean create, bfd_boolean copy); /* Replace an entry in a hash table. */ extern void bfd_hash_replace (struct bfd_hash_table *, struct bfd_hash_entry *old, struct bfd_hash_entry *nw); /* Base method for creating a hash table entry. */ extern struct bfd_hash_entry *bfd_hash_newfunc (struct bfd_hash_entry *, struct bfd_hash_table *, const char *); /* Grab some space for a hash table entry. */ extern void *bfd_hash_allocate (struct bfd_hash_table *, unsigned int); /* Traverse a hash table in a random order, calling a function on each element. If the function returns FALSE, the traversal stops. The INFO argument is passed to the function. */ extern void bfd_hash_traverse (struct bfd_hash_table *, bfd_boolean (*) (struct bfd_hash_entry *, void *), void *info); /* Allows the default size of a hash table to be configured. New hash tables allocated using bfd_hash_table_init will be created with this size. */ extern void bfd_hash_set_default_size (bfd_size_type); /* This structure is used to keep track of stabs in sections information while linking. */ struct stab_info { /* A hash table used to hold stabs strings. */ struct bfd_strtab_hash *strings; /* The header file hash table. */ struct bfd_hash_table includes; /* The first .stabstr section. */ struct bfd_section *stabstr; }; #define COFF_SWAP_TABLE (void *) &bfd_coff_std_swap_table /* User program access to BFD facilities. */ /* Direct I/O routines, for programs which know more about the object file than BFD does. Use higher level routines if possible. */ extern bfd_size_type bfd_bread (void *, bfd_size_type, bfd *); extern bfd_size_type bfd_bwrite (const void *, bfd_size_type, bfd *); extern int bfd_seek (bfd *, file_ptr, int); extern file_ptr bfd_tell (bfd *); extern int bfd_flush (bfd *); extern int bfd_stat (bfd *, struct stat *); /* Deprecated old routines. */ #if __GNUC__ #define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \ (warn_deprecated ("bfd_read", __FILE__, __LINE__, __FUNCTION__), \ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) #define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \ (warn_deprecated ("bfd_write", __FILE__, __LINE__, __FUNCTION__), \ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) #else #define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \ (warn_deprecated ("bfd_read", (const char *) 0, 0, (const char *) 0), \ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) #define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \ (warn_deprecated ("bfd_write", (const char *) 0, 0, (const char *) 0),\ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) #endif extern void warn_deprecated (const char *, const char *, int, const char *); /* Cast from const char * to char * so that caller can assign to a char * without a warning. */ #define bfd_get_filename(abfd) ((char *) (abfd)->filename) #define bfd_get_cacheable(abfd) ((abfd)->cacheable) #define bfd_get_format(abfd) ((abfd)->format) #define bfd_get_target(abfd) ((abfd)->xvec->name) #define bfd_get_flavour(abfd) ((abfd)->xvec->flavour) #define bfd_family_coff(abfd) \ (bfd_get_flavour (abfd) == bfd_target_coff_flavour || \ bfd_get_flavour (abfd) == bfd_target_xcoff_flavour) #define bfd_big_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG) #define bfd_little_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_LITTLE) #define bfd_header_big_endian(abfd) \ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_BIG) #define bfd_header_little_endian(abfd) \ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_LITTLE) #define bfd_get_file_flags(abfd) ((abfd)->flags) #define bfd_applicable_file_flags(abfd) ((abfd)->xvec->object_flags) #define bfd_applicable_section_flags(abfd) ((abfd)->xvec->section_flags) #define bfd_my_archive(abfd) ((abfd)->my_archive) #define bfd_has_map(abfd) ((abfd)->has_armap) #define bfd_valid_reloc_types(abfd) ((abfd)->xvec->valid_reloc_types) #define bfd_usrdata(abfd) ((abfd)->usrdata) #define bfd_get_start_address(abfd) ((abfd)->start_address) #define bfd_get_symcount(abfd) ((abfd)->symcount) #define bfd_get_outsymbols(abfd) ((abfd)->outsymbols) #define bfd_count_sections(abfd) ((abfd)->section_count) #define bfd_get_dynamic_symcount(abfd) ((abfd)->dynsymcount) #define bfd_get_symbol_leading_char(abfd) ((abfd)->xvec->symbol_leading_char) #define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE) extern bfd_boolean bfd_cache_close (bfd *abfd); /* NB: This declaration should match the autogenerated one in libbfd.h. */ extern bfd_boolean bfd_cache_close_all (void); extern bfd_boolean bfd_record_phdr (bfd *, unsigned long, bfd_boolean, flagword, bfd_boolean, bfd_vma, bfd_boolean, bfd_boolean, unsigned int, struct bfd_section **); /* Byte swapping routines. */ bfd_uint64_t bfd_getb64 (const void *); bfd_uint64_t bfd_getl64 (const void *); bfd_int64_t bfd_getb_signed_64 (const void *); bfd_int64_t bfd_getl_signed_64 (const void *); bfd_vma bfd_getb32 (const void *); bfd_vma bfd_getl32 (const void *); bfd_signed_vma bfd_getb_signed_32 (const void *); bfd_signed_vma bfd_getl_signed_32 (const void *); bfd_vma bfd_getb16 (const void *); bfd_vma bfd_getl16 (const void *); bfd_signed_vma bfd_getb_signed_16 (const void *); bfd_signed_vma bfd_getl_signed_16 (const void *); void bfd_putb64 (bfd_uint64_t, void *); void bfd_putl64 (bfd_uint64_t, void *); void bfd_putb32 (bfd_vma, void *); void bfd_putl32 (bfd_vma, void *); void bfd_putb16 (bfd_vma, void *); void bfd_putl16 (bfd_vma, void *); /* Byte swapping routines which take size and endiannes as arguments. */ bfd_uint64_t bfd_get_bits (const void *, int, bfd_boolean); void bfd_put_bits (bfd_uint64_t, void *, int, bfd_boolean); extern bfd_boolean bfd_section_already_linked_table_init (void); extern void bfd_section_already_linked_table_free (void); /* Externally visible ECOFF routines. */ #if defined(__STDC__) || defined(ALMOST_STDC) struct ecoff_debug_info; struct ecoff_debug_swap; struct ecoff_extr; struct bfd_symbol; struct bfd_link_info; struct bfd_link_hash_entry; struct bfd_elf_version_tree; #endif extern bfd_vma bfd_ecoff_get_gp_value (bfd * abfd); extern bfd_boolean bfd_ecoff_set_gp_value (bfd *abfd, bfd_vma gp_value); extern bfd_boolean bfd_ecoff_set_regmasks (bfd *abfd, unsigned long gprmask, unsigned long fprmask, unsigned long *cprmask); extern void *bfd_ecoff_debug_init (bfd *output_bfd, struct ecoff_debug_info *output_debug, const struct ecoff_debug_swap *output_swap, struct bfd_link_info *); extern void bfd_ecoff_debug_free (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, const struct ecoff_debug_swap *output_swap, struct bfd_link_info *); extern bfd_boolean bfd_ecoff_debug_accumulate (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, const struct ecoff_debug_swap *output_swap, bfd *input_bfd, struct ecoff_debug_info *input_debug, const struct ecoff_debug_swap *input_swap, struct bfd_link_info *); extern bfd_boolean bfd_ecoff_debug_accumulate_other (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, const struct ecoff_debug_swap *output_swap, bfd *input_bfd, struct bfd_link_info *); extern bfd_boolean bfd_ecoff_debug_externals (bfd *abfd, struct ecoff_debug_info *debug, const struct ecoff_debug_swap *swap, bfd_boolean relocatable, bfd_boolean (*get_extr) (struct bfd_symbol *, struct ecoff_extr *), void (*set_index) (struct bfd_symbol *, bfd_size_type)); extern bfd_boolean bfd_ecoff_debug_one_external (bfd *abfd, struct ecoff_debug_info *debug, const struct ecoff_debug_swap *swap, const char *name, struct ecoff_extr *esym); extern bfd_size_type bfd_ecoff_debug_size (bfd *abfd, struct ecoff_debug_info *debug, const struct ecoff_debug_swap *swap); extern bfd_boolean bfd_ecoff_write_debug (bfd *abfd, struct ecoff_debug_info *debug, const struct ecoff_debug_swap *swap, file_ptr where); extern bfd_boolean bfd_ecoff_write_accumulated_debug (void *handle, bfd *abfd, struct ecoff_debug_info *debug, const struct ecoff_debug_swap *swap, struct bfd_link_info *info, file_ptr where); /* Externally visible ELF routines. */ struct bfd_link_needed_list { struct bfd_link_needed_list *next; bfd *by; const char *name; }; enum dynamic_lib_link_class { DYN_NORMAL = 0, DYN_AS_NEEDED = 1, DYN_DT_NEEDED = 2, DYN_NO_ADD_NEEDED = 4, DYN_NO_NEEDED = 8 }; extern bfd_boolean bfd_elf_record_link_assignment (struct bfd_link_info *, const char *, bfd_boolean); extern struct bfd_link_needed_list *bfd_elf_get_needed_list (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_elf_get_bfd_needed_list (bfd *, struct bfd_link_needed_list **); extern bfd_boolean bfd_elf_size_dynamic_sections (bfd *, const char *, const char *, const char *, const char * const *, struct bfd_link_info *, struct bfd_section **, struct bfd_elf_version_tree *); extern bfd_boolean bfd_elf_size_dynsym_hash_dynstr (bfd *, struct bfd_link_info *); extern void bfd_elf_set_dt_needed_name (bfd *, const char *); extern const char *bfd_elf_get_dt_soname (bfd *); extern void bfd_elf_set_dyn_lib_class (bfd *, int); extern int bfd_elf_get_dyn_lib_class (bfd *); extern struct bfd_link_needed_list *bfd_elf_get_runpath_list (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_elf_discard_info (bfd *, struct bfd_link_info *); extern unsigned int _bfd_elf_default_action_discarded (struct bfd_section *); /* Return an upper bound on the number of bytes required to store a copy of ABFD's program header table entries. Return -1 if an error occurs; bfd_get_error will return an appropriate code. */ extern long bfd_get_elf_phdr_upper_bound (bfd *abfd); /* Copy ABFD's program header table entries to *PHDRS. The entries will be stored as an array of Elf_Internal_Phdr structures, as defined in include/elf/internal.h. To find out how large the buffer needs to be, call bfd_get_elf_phdr_upper_bound. Return the number of program header table entries read, or -1 if an error occurs; bfd_get_error will return an appropriate code. */ extern int bfd_get_elf_phdrs (bfd *abfd, void *phdrs); /* Create a new BFD as if by bfd_openr. Rather than opening a file, reconstruct an ELF file by reading the segments out of remote memory based on the ELF file header at EHDR_VMA and the ELF program headers it points to. If not null, *LOADBASEP is filled in with the difference between the VMAs from which the segments were read, and the VMAs the file headers (and hence BFD's idea of each section's VMA) put them at. The function TARGET_READ_MEMORY is called to copy LEN bytes from the remote memory at target address VMA into the local buffer at MYADDR; it should return zero on success or an `errno' code on failure. TEMPL must be a BFD for an ELF target with the word size and byte order found in the remote memory. */ extern bfd *bfd_elf_bfd_from_remote_memory (bfd *templ, bfd_vma ehdr_vma, bfd_vma *loadbasep, int (*target_read_memory) (bfd_vma vma, bfd_byte *myaddr, int len)); /* Return the arch_size field of an elf bfd, or -1 if not elf. */ extern int bfd_get_arch_size (bfd *); /* Return TRUE if address "naturally" sign extends, or -1 if not elf. */ extern int bfd_get_sign_extend_vma (bfd *); extern struct bfd_section *_bfd_elf_tls_setup (bfd *, struct bfd_link_info *); extern void _bfd_elf_provide_symbol (struct bfd_link_info *, const char *, bfd_vma, struct bfd_section *); extern void _bfd_elf_provide_section_bound_symbols (struct bfd_link_info *, struct bfd_section *, const char *, const char *); extern void _bfd_elf_fix_excluded_sec_syms (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_m68k_elf32_create_embedded_relocs (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, char **); /* SunOS shared library support routines for the linker. */ extern struct bfd_link_needed_list *bfd_sunos_get_needed_list (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_sunos_record_link_assignment (bfd *, struct bfd_link_info *, const char *); extern bfd_boolean bfd_sunos_size_dynamic_sections (bfd *, struct bfd_link_info *, struct bfd_section **, struct bfd_section **, struct bfd_section **); /* Linux shared library support routines for the linker. */ extern bfd_boolean bfd_i386linux_size_dynamic_sections (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_m68klinux_size_dynamic_sections (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_sparclinux_size_dynamic_sections (bfd *, struct bfd_link_info *); /* mmap hacks */ struct _bfd_window_internal; typedef struct _bfd_window_internal bfd_window_internal; typedef struct _bfd_window { /* What the user asked for. */ void *data; bfd_size_type size; /* The actual window used by BFD. Small user-requested read-only regions sharing a page may share a single window into the object file. Read-write versions shouldn't until I've fixed things to keep track of which portions have been claimed by the application; don't want to give the same region back when the application wants two writable copies! */ struct _bfd_window_internal *i; } bfd_window; extern void bfd_init_window (bfd_window *); extern void bfd_free_window (bfd_window *); extern bfd_boolean bfd_get_file_window (bfd *, file_ptr, bfd_size_type, bfd_window *, bfd_boolean); /* XCOFF support routines for the linker. */ extern bfd_boolean bfd_xcoff_link_record_set (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_size_type); extern bfd_boolean bfd_xcoff_import_symbol (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_vma, const char *, const char *, const char *, unsigned int); extern bfd_boolean bfd_xcoff_export_symbol (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *); extern bfd_boolean bfd_xcoff_link_count_reloc (bfd *, struct bfd_link_info *, const char *); extern bfd_boolean bfd_xcoff_record_link_assignment (bfd *, struct bfd_link_info *, const char *); extern bfd_boolean bfd_xcoff_size_dynamic_sections (bfd *, struct bfd_link_info *, const char *, const char *, unsigned long, unsigned long, unsigned long, bfd_boolean, int, bfd_boolean, bfd_boolean, struct bfd_section **, bfd_boolean); extern bfd_boolean bfd_xcoff_link_generate_rtinit (bfd *, const char *, const char *, bfd_boolean); /* XCOFF support routines for ar. */ extern bfd_boolean bfd_xcoff_ar_archive_set_magic (bfd *, char *); /* Externally visible COFF routines. */ #if defined(__STDC__) || defined(ALMOST_STDC) struct internal_syment; union internal_auxent; #endif extern bfd_boolean bfd_coff_get_syment (bfd *, struct bfd_symbol *, struct internal_syment *); extern bfd_boolean bfd_coff_get_auxent (bfd *, struct bfd_symbol *, int, union internal_auxent *); extern bfd_boolean bfd_coff_set_symbol_class (bfd *, struct bfd_symbol *, unsigned int); extern bfd_boolean bfd_m68k_coff_create_embedded_relocs (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, char **); /* ARM Interworking support. Called from linker. */ extern bfd_boolean bfd_arm_allocate_interworking_sections (struct bfd_link_info *); extern bfd_boolean bfd_arm_process_before_allocation (bfd *, struct bfd_link_info *, int); extern bfd_boolean bfd_arm_get_bfd_for_interworking (bfd *, struct bfd_link_info *); /* PE ARM Interworking support. Called from linker. */ extern bfd_boolean bfd_arm_pe_allocate_interworking_sections (struct bfd_link_info *); extern bfd_boolean bfd_arm_pe_process_before_allocation (bfd *, struct bfd_link_info *, int); extern bfd_boolean bfd_arm_pe_get_bfd_for_interworking (bfd *, struct bfd_link_info *); /* ELF ARM Interworking support. Called from linker. */ extern bfd_boolean bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info *); extern bfd_boolean bfd_elf32_arm_process_before_allocation (bfd *, struct bfd_link_info *, int); void bfd_elf32_arm_set_target_relocs (struct bfd_link_info *, int, char *, int, int); extern bfd_boolean bfd_elf32_arm_get_bfd_for_interworking (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_elf32_arm_add_glue_sections_to_bfd (bfd *, struct bfd_link_info *); /* ELF ARM mapping symbol support */ extern bfd_boolean bfd_is_arm_mapping_symbol_name (const char * name); /* ARM Note section processing. */ extern bfd_boolean bfd_arm_merge_machines (bfd *, bfd *); extern bfd_boolean bfd_arm_update_notes (bfd *, const char *); extern unsigned int bfd_arm_get_mach_from_notes (bfd *, const char *); /* TI COFF load page support. */ extern void bfd_ticoff_set_section_load_page (struct bfd_section *, int); extern int bfd_ticoff_get_section_load_page (struct bfd_section *); /* H8/300 functions. */ extern bfd_vma bfd_h8300_pad_address (bfd *, bfd_vma); /* IA64 Itanium code generation. Called from linker. */ extern void bfd_elf32_ia64_after_parse (int); extern void bfd_elf64_ia64_after_parse (int); /* This structure is used for a comdat section, as in PE. A comdat section is associated with a particular symbol. When the linker sees a comdat section, it keeps only one of the sections with a given name and associated with a given symbol. */ struct coff_comdat_info { /* The name of the symbol associated with a comdat section. */ const char *name; /* The local symbol table index of the symbol associated with a comdat section. This is only meaningful to the object file format specific code; it is not an index into the list returned by bfd_canonicalize_symtab. */ long symbol; }; extern struct coff_comdat_info *bfd_coff_get_comdat_section (bfd *, struct bfd_section *); /* Extracted from init.c. */ void bfd_init (void); /* Extracted from opncls.c. */ bfd *bfd_fopen (const char *filename, const char *target, const char *mode, int fd); bfd *bfd_openr (const char *filename, const char *target); bfd *bfd_fdopenr (const char *filename, const char *target, int fd); bfd *bfd_openstreamr (const char *, const char *, void *); bfd *bfd_openr_iovec (const char *filename, const char *target, void *(*open) (struct bfd *nbfd, void *open_closure), void *open_closure, file_ptr (*pread) (struct bfd *nbfd, void *stream, void *buf, file_ptr nbytes, file_ptr offset), int (*close) (struct bfd *nbfd, void *stream)); bfd *bfd_openw (const char *filename, const char *target); bfd_boolean bfd_close (bfd *abfd); bfd_boolean bfd_close_all_done (bfd *); bfd *bfd_create (const char *filename, bfd *templ); bfd_boolean bfd_make_writable (bfd *abfd); bfd_boolean bfd_make_readable (bfd *abfd); unsigned long bfd_calc_gnu_debuglink_crc32 (unsigned long crc, const unsigned char *buf, bfd_size_type len); char *bfd_follow_gnu_debuglink (bfd *abfd, const char *dir); struct bfd_section *bfd_create_gnu_debuglink_section (bfd *abfd, const char *filename); bfd_boolean bfd_fill_in_gnu_debuglink_section (bfd *abfd, struct bfd_section *sect, const char *filename); /* Extracted from libbfd.c. */ /* Byte swapping macros for user section data. */ #define bfd_put_8(abfd, val, ptr) \ ((void) (*((unsigned char *) (ptr)) = (val) & 0xff)) #define bfd_put_signed_8 \ bfd_put_8 #define bfd_get_8(abfd, ptr) \ (*(unsigned char *) (ptr) & 0xff) #define bfd_get_signed_8(abfd, ptr) \ (((*(unsigned char *) (ptr) & 0xff) ^ 0x80) - 0x80) #define bfd_put_16(abfd, val, ptr) \ BFD_SEND (abfd, bfd_putx16, ((val),(ptr))) #define bfd_put_signed_16 \ bfd_put_16 #define bfd_get_16(abfd, ptr) \ BFD_SEND (abfd, bfd_getx16, (ptr)) #define bfd_get_signed_16(abfd, ptr) \ BFD_SEND (abfd, bfd_getx_signed_16, (ptr)) #define bfd_put_32(abfd, val, ptr) \ BFD_SEND (abfd, bfd_putx32, ((val),(ptr))) #define bfd_put_signed_32 \ bfd_put_32 #define bfd_get_32(abfd, ptr) \ BFD_SEND (abfd, bfd_getx32, (ptr)) #define bfd_get_signed_32(abfd, ptr) \ BFD_SEND (abfd, bfd_getx_signed_32, (ptr)) #define bfd_put_64(abfd, val, ptr) \ BFD_SEND (abfd, bfd_putx64, ((val), (ptr))) #define bfd_put_signed_64 \ bfd_put_64 #define bfd_get_64(abfd, ptr) \ BFD_SEND (abfd, bfd_getx64, (ptr)) #define bfd_get_signed_64(abfd, ptr) \ BFD_SEND (abfd, bfd_getx_signed_64, (ptr)) #define bfd_get(bits, abfd, ptr) \ ((bits) == 8 ? (bfd_vma) bfd_get_8 (abfd, ptr) \ : (bits) == 16 ? bfd_get_16 (abfd, ptr) \ : (bits) == 32 ? bfd_get_32 (abfd, ptr) \ : (bits) == 64 ? bfd_get_64 (abfd, ptr) \ : (abort (), (bfd_vma) - 1)) #define bfd_put(bits, abfd, val, ptr) \ ((bits) == 8 ? bfd_put_8 (abfd, val, ptr) \ : (bits) == 16 ? bfd_put_16 (abfd, val, ptr) \ : (bits) == 32 ? bfd_put_32 (abfd, val, ptr) \ : (bits) == 64 ? bfd_put_64 (abfd, val, ptr) \ : (abort (), (void) 0)) /* Byte swapping macros for file header data. */ #define bfd_h_put_8(abfd, val, ptr) \ bfd_put_8 (abfd, val, ptr) #define bfd_h_put_signed_8(abfd, val, ptr) \ bfd_put_8 (abfd, val, ptr) #define bfd_h_get_8(abfd, ptr) \ bfd_get_8 (abfd, ptr) #define bfd_h_get_signed_8(abfd, ptr) \ bfd_get_signed_8 (abfd, ptr) #define bfd_h_put_16(abfd, val, ptr) \ BFD_SEND (abfd, bfd_h_putx16, (val, ptr)) #define bfd_h_put_signed_16 \ bfd_h_put_16 #define bfd_h_get_16(abfd, ptr) \ BFD_SEND (abfd, bfd_h_getx16, (ptr)) #define bfd_h_get_signed_16(abfd, ptr) \ BFD_SEND (abfd, bfd_h_getx_signed_16, (ptr)) #define bfd_h_put_32(abfd, val, ptr) \ BFD_SEND (abfd, bfd_h_putx32, (val, ptr)) #define bfd_h_put_signed_32 \ bfd_h_put_32 #define bfd_h_get_32(abfd, ptr) \ BFD_SEND (abfd, bfd_h_getx32, (ptr)) #define bfd_h_get_signed_32(abfd, ptr) \ BFD_SEND (abfd, bfd_h_getx_signed_32, (ptr)) #define bfd_h_put_64(abfd, val, ptr) \ BFD_SEND (abfd, bfd_h_putx64, (val, ptr)) #define bfd_h_put_signed_64 \ bfd_h_put_64 #define bfd_h_get_64(abfd, ptr) \ BFD_SEND (abfd, bfd_h_getx64, (ptr)) #define bfd_h_get_signed_64(abfd, ptr) \ BFD_SEND (abfd, bfd_h_getx_signed_64, (ptr)) /* Aliases for the above, which should eventually go away. */ #define H_PUT_64 bfd_h_put_64 #define H_PUT_32 bfd_h_put_32 #define H_PUT_16 bfd_h_put_16 #define H_PUT_8 bfd_h_put_8 #define H_PUT_S64 bfd_h_put_signed_64 #define H_PUT_S32 bfd_h_put_signed_32 #define H_PUT_S16 bfd_h_put_signed_16 #define H_PUT_S8 bfd_h_put_signed_8 #define H_GET_64 bfd_h_get_64 #define H_GET_32 bfd_h_get_32 #define H_GET_16 bfd_h_get_16 #define H_GET_8 bfd_h_get_8 #define H_GET_S64 bfd_h_get_signed_64 #define H_GET_S32 bfd_h_get_signed_32 #define H_GET_S16 bfd_h_get_signed_16 #define H_GET_S8 bfd_h_get_signed_8 /* Extracted from bfdio.c. */ long bfd_get_mtime (bfd *abfd); long bfd_get_size (bfd *abfd); /* Extracted from bfdwin.c. */ /* Extracted from section.c. */ typedef struct bfd_section { /* The name of the section; the name isn't a copy, the pointer is the same as that passed to bfd_make_section. */ const char *name; /* A unique sequence number. */ int id; /* Which section in the bfd; 0..n-1 as sections are created in a bfd. */ int index; /* The next section in the list belonging to the BFD, or NULL. */ struct bfd_section *next; /* The previous section in the list belonging to the BFD, or NULL. */ struct bfd_section *prev; /* The field flags contains attributes of the section. Some flags are read in from the object file, and some are synthesized from other information. */ flagword flags; #define SEC_NO_FLAGS 0x000 /* Tells the OS to allocate space for this section when loading. This is clear for a section containing debug information only. */ #define SEC_ALLOC 0x001 /* Tells the OS to load the section from the file when loading. This is clear for a .bss section. */ #define SEC_LOAD 0x002 /* The section contains data still to be relocated, so there is some relocation information too. */ #define SEC_RELOC 0x004 /* A signal to the OS that the section contains read only data. */ #define SEC_READONLY 0x008 /* The section contains code only. */ #define SEC_CODE 0x010 /* The section contains data only. */ #define SEC_DATA 0x020 /* The section will reside in ROM. */ #define SEC_ROM 0x040 /* The section contains constructor information. This section type is used by the linker to create lists of constructors and destructors used by <>. When a back end sees a symbol which should be used in a constructor list, it creates a new section for the type of name (e.g., <<__CTOR_LIST__>>), attaches the symbol to it, and builds a relocation. To build the lists of constructors, all the linker has to do is catenate all the sections called <<__CTOR_LIST__>> and relocate the data contained within - exactly the operations it would peform on standard data. */ #define SEC_CONSTRUCTOR 0x080 /* The section has contents - a data section could be <> | <>; a debug section could be <> */ #define SEC_HAS_CONTENTS 0x100 /* An instruction to the linker to not output the section even if it has information which would normally be written. */ #define SEC_NEVER_LOAD 0x200 /* The section contains thread local data. */ #define SEC_THREAD_LOCAL 0x400 /* The section has GOT references. This flag is only for the linker, and is currently only used by the elf32-hppa back end. It will be set if global offset table references were detected in this section, which indicate to the linker that the section contains PIC code, and must be handled specially when doing a static link. */ #define SEC_HAS_GOT_REF 0x800 /* The section contains common symbols (symbols may be defined multiple times, the value of a symbol is the amount of space it requires, and the largest symbol value is the one used). Most targets have exactly one of these (which we translate to bfd_com_section_ptr), but ECOFF has two. */ #define SEC_IS_COMMON 0x1000 /* The section contains only debugging information. For example, this is set for ELF .debug and .stab sections. strip tests this flag to see if a section can be discarded. */ #define SEC_DEBUGGING 0x2000 /* The contents of this section are held in memory pointed to by the contents field. This is checked by bfd_get_section_contents, and the data is retrieved from memory if appropriate. */ #define SEC_IN_MEMORY 0x4000 /* The contents of this section are to be excluded by the linker for executable and shared objects unless those objects are to be further relocated. */ #define SEC_EXCLUDE 0x8000 /* The contents of this section are to be sorted based on the sum of the symbol and addend values specified by the associated relocation entries. Entries without associated relocation entries will be appended to the end of the section in an unspecified order. */ #define SEC_SORT_ENTRIES 0x10000 /* When linking, duplicate sections of the same name should be discarded, rather than being combined into a single section as is usually done. This is similar to how common symbols are handled. See SEC_LINK_DUPLICATES below. */ #define SEC_LINK_ONCE 0x20000 /* If SEC_LINK_ONCE is set, this bitfield describes how the linker should handle duplicate sections. */ #define SEC_LINK_DUPLICATES 0x40000 /* This value for SEC_LINK_DUPLICATES means that duplicate sections with the same name should simply be discarded. */ #define SEC_LINK_DUPLICATES_DISCARD 0x0 /* This value for SEC_LINK_DUPLICATES means that the linker should warn if there are any duplicate sections, although it should still only link one copy. */ #define SEC_LINK_DUPLICATES_ONE_ONLY 0x80000 /* This value for SEC_LINK_DUPLICATES means that the linker should warn if any duplicate sections are a different size. */ #define SEC_LINK_DUPLICATES_SAME_SIZE 0x100000 /* This value for SEC_LINK_DUPLICATES means that the linker should warn if any duplicate sections contain different contents. */ #define SEC_LINK_DUPLICATES_SAME_CONTENTS \ (SEC_LINK_DUPLICATES_ONE_ONLY | SEC_LINK_DUPLICATES_SAME_SIZE) /* This section was created by the linker as part of dynamic relocation or other arcane processing. It is skipped when going through the first-pass output, trusting that someone else up the line will take care of it later. */ #define SEC_LINKER_CREATED 0x200000 /* This section should not be subject to garbage collection. */ #define SEC_KEEP 0x400000 /* This section contains "short" data, and should be placed "near" the GP. */ #define SEC_SMALL_DATA 0x800000 /* Attempt to merge identical entities in the section. Entity size is given in the entsize field. */ #define SEC_MERGE 0x1000000 /* If given with SEC_MERGE, entities to merge are zero terminated strings where entsize specifies character size instead of fixed size entries. */ #define SEC_STRINGS 0x2000000 /* This section contains data about section groups. */ #define SEC_GROUP 0x4000000 /* The section is a COFF shared library section. This flag is only for the linker. If this type of section appears in the input file, the linker must copy it to the output file without changing the vma or size. FIXME: Although this was originally intended to be general, it really is COFF specific (and the flag was renamed to indicate this). It might be cleaner to have some more general mechanism to allow the back end to control what the linker does with sections. */ #define SEC_COFF_SHARED_LIBRARY 0x10000000 /* This section contains data which may be shared with other executables or shared objects. This is for COFF only. */ #define SEC_COFF_SHARED 0x20000000 /* When a section with this flag is being linked, then if the size of the input section is less than a page, it should not cross a page boundary. If the size of the input section is one page or more, it should be aligned on a page boundary. This is for TI TMS320C54X only. */ #define SEC_TIC54X_BLOCK 0x40000000 /* Conditionally link this section; do not link if there are no references found to any symbol in the section. This is for TI TMS320C54X only. */ #define SEC_TIC54X_CLINK 0x80000000 /* End of section flags. */ /* Some internal packed boolean fields. */ /* See the vma field. */ unsigned int user_set_vma : 1; /* A mark flag used by some of the linker backends. */ unsigned int linker_mark : 1; /* Another mark flag used by some of the linker backends. Set for output sections that have an input section. */ unsigned int linker_has_input : 1; /* Mark flags used by some linker backends for garbage collection. */ unsigned int gc_mark : 1; unsigned int gc_mark_from_eh : 1; /* The following flags are used by the ELF linker. */ /* Mark sections which have been allocated to segments. */ unsigned int segment_mark : 1; /* Type of sec_info information. */ unsigned int sec_info_type:3; #define ELF_INFO_TYPE_NONE 0 #define ELF_INFO_TYPE_STABS 1 #define ELF_INFO_TYPE_MERGE 2 #define ELF_INFO_TYPE_EH_FRAME 3 #define ELF_INFO_TYPE_JUST_SYMS 4 /* Nonzero if this section uses RELA relocations, rather than REL. */ unsigned int use_rela_p:1; /* Bits used by various backends. The generic code doesn't touch these fields. */ /* Nonzero if this section has TLS related relocations. */ unsigned int has_tls_reloc:1; /* Nonzero if this section has a gp reloc. */ unsigned int has_gp_reloc:1; /* Nonzero if this section needs the relax finalize pass. */ unsigned int need_finalize_relax:1; /* Whether relocations have been processed. */ unsigned int reloc_done : 1; /* End of internal packed boolean fields. */ /* The virtual memory address of the section - where it will be at run time. The symbols are relocated against this. The user_set_vma flag is maintained by bfd; if it's not set, the backend can assign addresses (for example, in <>, where the default address for <<.data>> is dependent on the specific target and various flags). */ bfd_vma vma; /* The load address of the section - where it would be in a rom image; really only used for writing section header information. */ bfd_vma lma; /* The size of the section in octets, as it will be output. Contains a value even if the section has no contents (e.g., the size of <<.bss>>). */ bfd_size_type size; /* For input sections, the original size on disk of the section, in octets. This field is used by the linker relaxation code. It is currently only set for sections where the linker relaxation scheme doesn't cache altered section and reloc contents (stabs, eh_frame, SEC_MERGE, some coff relaxing targets), and thus the original size needs to be kept to read the section multiple times. For output sections, rawsize holds the section size calculated on a previous linker relaxation pass. */ bfd_size_type rawsize; /* If this section is going to be output, then this value is the offset in *bytes* into the output section of the first byte in the input section (byte ==> smallest addressable unit on the target). In most cases, if this was going to start at the 100th octet (8-bit quantity) in the output section, this value would be 100. However, if the target byte size is 16 bits (bfd_octets_per_byte is "2"), this value would be 50. */ bfd_vma output_offset; /* The output section through which to map on output. */ struct bfd_section *output_section; /* The alignment requirement of the section, as an exponent of 2 - e.g., 3 aligns to 2^3 (or 8). */ unsigned int alignment_power; /* If an input section, a pointer to a vector of relocation records for the data in this section. */ struct reloc_cache_entry *relocation; /* If an output section, a pointer to a vector of pointers to relocation records for the data in this section. */ struct reloc_cache_entry **orelocation; /* The number of relocation records in one of the above. */ unsigned reloc_count; /* Information below is back end specific - and not always used or updated. */ /* File position of section data. */ file_ptr filepos; /* File position of relocation info. */ file_ptr rel_filepos; /* File position of line data. */ file_ptr line_filepos; /* Pointer to data for applications. */ void *userdata; /* If the SEC_IN_MEMORY flag is set, this points to the actual contents. */ unsigned char *contents; /* Attached line number information. */ alent *lineno; /* Number of line number records. */ unsigned int lineno_count; /* Entity size for merging purposes. */ unsigned int entsize; /* Points to the kept section if this section is a link-once section, and is discarded. */ struct bfd_section *kept_section; /* When a section is being output, this value changes as more linenumbers are written out. */ file_ptr moving_line_filepos; /* What the section number is in the target world. */ int target_index; void *used_by_bfd; /* If this is a constructor section then here is a list of the relocations created to relocate items within it. */ struct relent_chain *constructor_chain; /* The BFD which owns the section. */ bfd *owner; /* A symbol which points at this section only. */ struct bfd_symbol *symbol; struct bfd_symbol **symbol_ptr_ptr; /* Early in the link process, map_head and map_tail are used to build a list of input sections attached to an output section. Later, output sections use these fields for a list of bfd_link_order structs. */ union { struct bfd_link_order *link_order; struct bfd_section *s; } map_head, map_tail; } asection; /* These sections are global, and are managed by BFD. The application and target back end are not permitted to change the values in these sections. New code should use the section_ptr macros rather than referring directly to the const sections. The const sections may eventually vanish. */ #define BFD_ABS_SECTION_NAME "*ABS*" #define BFD_UND_SECTION_NAME "*UND*" #define BFD_COM_SECTION_NAME "*COM*" #define BFD_IND_SECTION_NAME "*IND*" /* The absolute section. */ extern asection bfd_abs_section; #define bfd_abs_section_ptr ((asection *) &bfd_abs_section) #define bfd_is_abs_section(sec) ((sec) == bfd_abs_section_ptr) /* Pointer to the undefined section. */ extern asection bfd_und_section; #define bfd_und_section_ptr ((asection *) &bfd_und_section) #define bfd_is_und_section(sec) ((sec) == bfd_und_section_ptr) /* Pointer to the common section. */ extern asection bfd_com_section; #define bfd_com_section_ptr ((asection *) &bfd_com_section) /* Pointer to the indirect section. */ extern asection bfd_ind_section; #define bfd_ind_section_ptr ((asection *) &bfd_ind_section) #define bfd_is_ind_section(sec) ((sec) == bfd_ind_section_ptr) #define bfd_is_const_section(SEC) \ ( ((SEC) == bfd_abs_section_ptr) \ || ((SEC) == bfd_und_section_ptr) \ || ((SEC) == bfd_com_section_ptr) \ || ((SEC) == bfd_ind_section_ptr)) extern const struct bfd_symbol * const bfd_abs_symbol; extern const struct bfd_symbol * const bfd_com_symbol; extern const struct bfd_symbol * const bfd_und_symbol; extern const struct bfd_symbol * const bfd_ind_symbol; /* Macros to handle insertion and deletion of a bfd's sections. These only handle the list pointers, ie. do not adjust section_count, target_index etc. */ #define bfd_section_list_remove(ABFD, S) \ do \ { \ asection *_s = S; \ asection *_next = _s->next; \ asection *_prev = _s->prev; \ if (_prev) \ _prev->next = _next; \ else \ (ABFD)->sections = _next; \ if (_next) \ _next->prev = _prev; \ else \ (ABFD)->section_last = _prev; \ } \ while (0) #define bfd_section_list_append(ABFD, S) \ do \ { \ asection *_s = S; \ bfd *_abfd = ABFD; \ _s->next = NULL; \ if (_abfd->section_last) \ { \ _s->prev = _abfd->section_last; \ _abfd->section_last->next = _s; \ } \ else \ { \ _s->prev = NULL; \ _abfd->sections = _s; \ } \ _abfd->section_last = _s; \ } \ while (0) #define bfd_section_list_prepend(ABFD, S) \ do \ { \ asection *_s = S; \ bfd *_abfd = ABFD; \ _s->prev = NULL; \ if (_abfd->sections) \ { \ _s->next = _abfd->sections; \ _abfd->sections->prev = _s; \ } \ else \ { \ _s->next = NULL; \ _abfd->section_last = _s; \ } \ _abfd->sections = _s; \ } \ while (0) #define bfd_section_list_insert_after(ABFD, A, S) \ do \ { \ asection *_a = A; \ asection *_s = S; \ asection *_next = _a->next; \ _s->next = _next; \ _s->prev = _a; \ _a->next = _s; \ if (_next) \ _next->prev = _s; \ else \ (ABFD)->section_last = _s; \ } \ while (0) #define bfd_section_list_insert_before(ABFD, B, S) \ do \ { \ asection *_b = B; \ asection *_s = S; \ asection *_prev = _b->prev; \ _s->prev = _prev; \ _s->next = _b; \ _b->prev = _s; \ if (_prev) \ _prev->next = _s; \ else \ (ABFD)->sections = _s; \ } \ while (0) #define bfd_section_removed_from_list(ABFD, S) \ ((S)->next == NULL ? (ABFD)->section_last != (S) : (S)->next->prev != (S)) void bfd_section_list_clear (bfd *); asection *bfd_get_section_by_name (bfd *abfd, const char *name); asection *bfd_get_section_by_name_if (bfd *abfd, const char *name, bfd_boolean (*func) (bfd *abfd, asection *sect, void *obj), void *obj); char *bfd_get_unique_section_name (bfd *abfd, const char *templat, int *count); asection *bfd_make_section_old_way (bfd *abfd, const char *name); asection *bfd_make_section_anyway_with_flags (bfd *abfd, const char *name, flagword flags); asection *bfd_make_section_anyway (bfd *abfd, const char *name); asection *bfd_make_section_with_flags (bfd *, const char *name, flagword flags); asection *bfd_make_section (bfd *, const char *name); bfd_boolean bfd_set_section_flags (bfd *abfd, asection *sec, flagword flags); void bfd_map_over_sections (bfd *abfd, void (*func) (bfd *abfd, asection *sect, void *obj), void *obj); asection *bfd_sections_find_if (bfd *abfd, bfd_boolean (*operation) (bfd *abfd, asection *sect, void *obj), void *obj); bfd_boolean bfd_set_section_size (bfd *abfd, asection *sec, bfd_size_type val); bfd_boolean bfd_set_section_contents (bfd *abfd, asection *section, const void *data, file_ptr offset, bfd_size_type count); bfd_boolean bfd_get_section_contents (bfd *abfd, asection *section, void *location, file_ptr offset, bfd_size_type count); bfd_boolean bfd_malloc_and_get_section (bfd *abfd, asection *section, bfd_byte **buf); bfd_boolean bfd_copy_private_section_data (bfd *ibfd, asection *isec, bfd *obfd, asection *osec); #define bfd_copy_private_section_data(ibfd, isection, obfd, osection) \ BFD_SEND (obfd, _bfd_copy_private_section_data, \ (ibfd, isection, obfd, osection)) bfd_boolean bfd_generic_is_group_section (bfd *, const asection *sec); bfd_boolean bfd_generic_discard_group (bfd *abfd, asection *group); /* Extracted from archures.c. */ enum bfd_architecture { bfd_arch_unknown, /* File arch not known. */ bfd_arch_obscure, /* Arch known, not one of these. */ bfd_arch_m68k, /* Motorola 68xxx */ #define bfd_mach_m68000 1 #define bfd_mach_m68008 2 #define bfd_mach_m68010 3 #define bfd_mach_m68020 4 #define bfd_mach_m68030 5 #define bfd_mach_m68040 6 #define bfd_mach_m68060 7 #define bfd_mach_cpu32 8 #define bfd_mach_mcf5200 9 #define bfd_mach_mcf5206e 10 #define bfd_mach_mcf5307 11 #define bfd_mach_mcf5407 12 #define bfd_mach_mcf528x 13 #define bfd_mach_mcfv4e 14 #define bfd_mach_mcf521x 15 #define bfd_mach_mcf5249 16 #define bfd_mach_mcf547x 17 #define bfd_mach_mcf548x 18 bfd_arch_vax, /* DEC Vax */ bfd_arch_i960, /* Intel 960 */ /* The order of the following is important. lower number indicates a machine type that only accepts a subset of the instructions available to machines with higher numbers. The exception is the "ca", which is incompatible with all other machines except "core". */ #define bfd_mach_i960_core 1 #define bfd_mach_i960_ka_sa 2 #define bfd_mach_i960_kb_sb 3 #define bfd_mach_i960_mc 4 #define bfd_mach_i960_xa 5 #define bfd_mach_i960_ca 6 #define bfd_mach_i960_jx 7 #define bfd_mach_i960_hx 8 bfd_arch_or32, /* OpenRISC 32 */ bfd_arch_a29k, /* AMD 29000 */ bfd_arch_sparc, /* SPARC */ #define bfd_mach_sparc 1 /* The difference between v8plus and v9 is that v9 is a true 64 bit env. */ #define bfd_mach_sparc_sparclet 2 #define bfd_mach_sparc_sparclite 3 #define bfd_mach_sparc_v8plus 4 #define bfd_mach_sparc_v8plusa 5 /* with ultrasparc add'ns. */ #define bfd_mach_sparc_sparclite_le 6 #define bfd_mach_sparc_v9 7 #define bfd_mach_sparc_v9a 8 /* with ultrasparc add'ns. */ #define bfd_mach_sparc_v8plusb 9 /* with cheetah add'ns. */ #define bfd_mach_sparc_v9b 10 /* with cheetah add'ns. */ /* Nonzero if MACH has the v9 instruction set. */ #define bfd_mach_sparc_v9_p(mach) \ ((mach) >= bfd_mach_sparc_v8plus && (mach) <= bfd_mach_sparc_v9b \ && (mach) != bfd_mach_sparc_sparclite_le) /* Nonzero if MACH is a 64 bit sparc architecture. */ #define bfd_mach_sparc_64bit_p(mach) \ ((mach) >= bfd_mach_sparc_v9 && (mach) != bfd_mach_sparc_v8plusb) bfd_arch_mips, /* MIPS Rxxxx */ #define bfd_mach_mips3000 3000 #define bfd_mach_mips3900 3900 #define bfd_mach_mips4000 4000 #define bfd_mach_mips4010 4010 #define bfd_mach_mips4100 4100 #define bfd_mach_mips4111 4111 #define bfd_mach_mips4120 4120 #define bfd_mach_mips4300 4300 #define bfd_mach_mips4400 4400 #define bfd_mach_mips4600 4600 #define bfd_mach_mips4650 4650 #define bfd_mach_mips5000 5000 #define bfd_mach_mips5400 5400 #define bfd_mach_mips5500 5500 #define bfd_mach_mips6000 6000 #define bfd_mach_mips7000 7000 #define bfd_mach_mips8000 8000 #define bfd_mach_mips9000 9000 #define bfd_mach_mips10000 10000 #define bfd_mach_mips12000 12000 #define bfd_mach_mips16 16 #define bfd_mach_mips5 5 #define bfd_mach_mips_sb1 12310201 /* octal 'SB', 01 */ #define bfd_mach_mipsisa32 32 #define bfd_mach_mipsisa32r2 33 #define bfd_mach_mipsisa64 64 #define bfd_mach_mipsisa64r2 65 bfd_arch_i386, /* Intel 386 */ #define bfd_mach_i386_i386 1 #define bfd_mach_i386_i8086 2 #define bfd_mach_i386_i386_intel_syntax 3 #define bfd_mach_x86_64 64 #define bfd_mach_x86_64_intel_syntax 65 bfd_arch_we32k, /* AT&T WE32xxx */ bfd_arch_tahoe, /* CCI/Harris Tahoe */ bfd_arch_i860, /* Intel 860 */ bfd_arch_i370, /* IBM 360/370 Mainframes */ bfd_arch_romp, /* IBM ROMP PC/RT */ bfd_arch_alliant, /* Alliant */ bfd_arch_convex, /* Convex */ bfd_arch_m88k, /* Motorola 88xxx */ bfd_arch_m98k, /* Motorola 98xxx */ bfd_arch_pyramid, /* Pyramid Technology */ bfd_arch_h8300, /* Renesas H8/300 (formerly Hitachi H8/300) */ #define bfd_mach_h8300 1 #define bfd_mach_h8300h 2 #define bfd_mach_h8300s 3 #define bfd_mach_h8300hn 4 #define bfd_mach_h8300sn 5 #define bfd_mach_h8300sx 6 #define bfd_mach_h8300sxn 7 bfd_arch_pdp11, /* DEC PDP-11 */ bfd_arch_powerpc, /* PowerPC */ #define bfd_mach_ppc 32 #define bfd_mach_ppc64 64 #define bfd_mach_ppc_403 403 #define bfd_mach_ppc_403gc 4030 #define bfd_mach_ppc_505 505 #define bfd_mach_ppc_601 601 #define bfd_mach_ppc_602 602 #define bfd_mach_ppc_603 603 #define bfd_mach_ppc_ec603e 6031 #define bfd_mach_ppc_604 604 #define bfd_mach_ppc_620 620 #define bfd_mach_ppc_630 630 #define bfd_mach_ppc_750 750 #define bfd_mach_ppc_860 860 #define bfd_mach_ppc_a35 35 #define bfd_mach_ppc_rs64ii 642 #define bfd_mach_ppc_rs64iii 643 #define bfd_mach_ppc_7400 7400 #define bfd_mach_ppc_e500 500 bfd_arch_rs6000, /* IBM RS/6000 */ #define bfd_mach_rs6k 6000 #define bfd_mach_rs6k_rs1 6001 #define bfd_mach_rs6k_rsc 6003 #define bfd_mach_rs6k_rs2 6002 bfd_arch_hppa, /* HP PA RISC */ #define bfd_mach_hppa10 10 #define bfd_mach_hppa11 11 #define bfd_mach_hppa20 20 #define bfd_mach_hppa20w 25 bfd_arch_d10v, /* Mitsubishi D10V */ #define bfd_mach_d10v 1 #define bfd_mach_d10v_ts2 2 #define bfd_mach_d10v_ts3 3 bfd_arch_d30v, /* Mitsubishi D30V */ bfd_arch_dlx, /* DLX */ bfd_arch_m68hc11, /* Motorola 68HC11 */ bfd_arch_m68hc12, /* Motorola 68HC12 */ #define bfd_mach_m6812_default 0 #define bfd_mach_m6812 1 #define bfd_mach_m6812s 2 bfd_arch_z8k, /* Zilog Z8000 */ #define bfd_mach_z8001 1 #define bfd_mach_z8002 2 bfd_arch_h8500, /* Renesas H8/500 (formerly Hitachi H8/500) */ bfd_arch_sh, /* Renesas / SuperH SH (formerly Hitachi SH) */ #define bfd_mach_sh 1 #define bfd_mach_sh2 0x20 #define bfd_mach_sh_dsp 0x2d #define bfd_mach_sh2a 0x2a #define bfd_mach_sh2a_nofpu 0x2b #define bfd_mach_sh2a_nofpu_or_sh4_nommu_nofpu 0x2a1 #define bfd_mach_sh2a_nofpu_or_sh3_nommu 0x2a2 #define bfd_mach_sh2a_or_sh4 0x2a3 #define bfd_mach_sh2a_or_sh3e 0x2a4 #define bfd_mach_sh2e 0x2e #define bfd_mach_sh3 0x30 #define bfd_mach_sh3_nommu 0x31 #define bfd_mach_sh3_dsp 0x3d #define bfd_mach_sh3e 0x3e #define bfd_mach_sh4 0x40 #define bfd_mach_sh4_nofpu 0x41 #define bfd_mach_sh4_nommu_nofpu 0x42 #define bfd_mach_sh4a 0x4a #define bfd_mach_sh4a_nofpu 0x4b #define bfd_mach_sh4al_dsp 0x4d #define bfd_mach_sh5 0x50 bfd_arch_alpha, /* Dec Alpha */ #define bfd_mach_alpha_ev4 0x10 #define bfd_mach_alpha_ev5 0x20 #define bfd_mach_alpha_ev6 0x30 bfd_arch_arm, /* Advanced Risc Machines ARM. */ #define bfd_mach_arm_unknown 0 #define bfd_mach_arm_2 1 #define bfd_mach_arm_2a 2 #define bfd_mach_arm_3 3 #define bfd_mach_arm_3M 4 #define bfd_mach_arm_4 5 #define bfd_mach_arm_4T 6 #define bfd_mach_arm_5 7 #define bfd_mach_arm_5T 8 #define bfd_mach_arm_5TE 9 #define bfd_mach_arm_XScale 10 #define bfd_mach_arm_ep9312 11 #define bfd_mach_arm_iWMMXt 12 bfd_arch_ns32k, /* National Semiconductors ns32000 */ bfd_arch_w65, /* WDC 65816 */ bfd_arch_tic30, /* Texas Instruments TMS320C30 */ bfd_arch_tic4x, /* Texas Instruments TMS320C3X/4X */ #define bfd_mach_tic3x 30 #define bfd_mach_tic4x 40 bfd_arch_tic54x, /* Texas Instruments TMS320C54X */ bfd_arch_tic80, /* TI TMS320c80 (MVP) */ bfd_arch_v850, /* NEC V850 */ #define bfd_mach_v850 1 #define bfd_mach_v850e 'E' #define bfd_mach_v850e1 '1' bfd_arch_arc, /* ARC Cores */ #define bfd_mach_arc_5 5 #define bfd_mach_arc_6 6 #define bfd_mach_arc_7 7 #define bfd_mach_arc_8 8 bfd_arch_m32c, /* Renesas M16C/M32C. */ #define bfd_mach_m16c 0x75 #define bfd_mach_m32c 0x78 bfd_arch_m32r, /* Renesas M32R (formerly Mitsubishi M32R/D) */ #define bfd_mach_m32r 1 /* For backwards compatibility. */ #define bfd_mach_m32rx 'x' #define bfd_mach_m32r2 '2' bfd_arch_mn10200, /* Matsushita MN10200 */ bfd_arch_mn10300, /* Matsushita MN10300 */ #define bfd_mach_mn10300 300 #define bfd_mach_am33 330 #define bfd_mach_am33_2 332 bfd_arch_fr30, #define bfd_mach_fr30 0x46523330 bfd_arch_frv, #define bfd_mach_frv 1 #define bfd_mach_frvsimple 2 #define bfd_mach_fr300 300 #define bfd_mach_fr400 400 #define bfd_mach_fr450 450 #define bfd_mach_frvtomcat 499 /* fr500 prototype */ #define bfd_mach_fr500 500 #define bfd_mach_fr550 550 bfd_arch_mcore, bfd_arch_ia64, /* HP/Intel ia64 */ #define bfd_mach_ia64_elf64 64 #define bfd_mach_ia64_elf32 32 bfd_arch_ip2k, /* Ubicom IP2K microcontrollers. */ #define bfd_mach_ip2022 1 #define bfd_mach_ip2022ext 2 bfd_arch_iq2000, /* Vitesse IQ2000. */ #define bfd_mach_iq2000 1 #define bfd_mach_iq10 2 bfd_arch_ms1, #define bfd_mach_ms1 1 #define bfd_mach_mrisc2 2 bfd_arch_pj, bfd_arch_avr, /* Atmel AVR microcontrollers. */ #define bfd_mach_avr1 1 #define bfd_mach_avr2 2 #define bfd_mach_avr3 3 #define bfd_mach_avr4 4 #define bfd_mach_avr5 5 bfd_arch_cr16c, /* National Semiconductor CompactRISC. */ #define bfd_mach_cr16c 1 bfd_arch_crx, /* National Semiconductor CRX. */ #define bfd_mach_crx 1 bfd_arch_cris, /* Axis CRIS */ #define bfd_mach_cris_v0_v10 255 #define bfd_mach_cris_v32 32 #define bfd_mach_cris_v10_v32 1032 bfd_arch_s390, /* IBM s390 */ #define bfd_mach_s390_31 31 #define bfd_mach_s390_64 64 bfd_arch_openrisc, /* OpenRISC */ bfd_arch_mmix, /* Donald Knuth's educational processor. */ bfd_arch_xstormy16, #define bfd_mach_xstormy16 1 bfd_arch_msp430, /* Texas Instruments MSP430 architecture. */ #define bfd_mach_msp11 11 #define bfd_mach_msp110 110 #define bfd_mach_msp12 12 #define bfd_mach_msp13 13 #define bfd_mach_msp14 14 #define bfd_mach_msp15 15 #define bfd_mach_msp16 16 #define bfd_mach_msp31 31 #define bfd_mach_msp32 32 #define bfd_mach_msp33 33 #define bfd_mach_msp41 41 #define bfd_mach_msp42 42 #define bfd_mach_msp43 43 #define bfd_mach_msp44 44 bfd_arch_xtensa, /* Tensilica's Xtensa cores. */ #define bfd_mach_xtensa 1 bfd_arch_maxq, /* Dallas MAXQ 10/20 */ #define bfd_mach_maxq10 10 #define bfd_mach_maxq20 20 bfd_arch_last }; typedef struct bfd_arch_info { int bits_per_word; int bits_per_address; int bits_per_byte; enum bfd_architecture arch; unsigned long mach; const char *arch_name; const char *printable_name; unsigned int section_align_power; /* TRUE if this is the default machine for the architecture. The default arch should be the first entry for an arch so that all the entries for that arch can be accessed via <>. */ bfd_boolean the_default; const struct bfd_arch_info * (*compatible) (const struct bfd_arch_info *a, const struct bfd_arch_info *b); bfd_boolean (*scan) (const struct bfd_arch_info *, const char *); const struct bfd_arch_info *next; } bfd_arch_info_type; const char *bfd_printable_name (bfd *abfd); const bfd_arch_info_type *bfd_scan_arch (const char *string); const char **bfd_arch_list (void); const bfd_arch_info_type *bfd_arch_get_compatible (const bfd *abfd, const bfd *bbfd, bfd_boolean accept_unknowns); void bfd_set_arch_info (bfd *abfd, const bfd_arch_info_type *arg); enum bfd_architecture bfd_get_arch (bfd *abfd); unsigned long bfd_get_mach (bfd *abfd); unsigned int bfd_arch_bits_per_byte (bfd *abfd); unsigned int bfd_arch_bits_per_address (bfd *abfd); const bfd_arch_info_type *bfd_get_arch_info (bfd *abfd); const bfd_arch_info_type *bfd_lookup_arch (enum bfd_architecture arch, unsigned long machine); const char *bfd_printable_arch_mach (enum bfd_architecture arch, unsigned long machine); unsigned int bfd_octets_per_byte (bfd *abfd); unsigned int bfd_arch_mach_octets_per_byte (enum bfd_architecture arch, unsigned long machine); /* Extracted from reloc.c. */ typedef enum bfd_reloc_status { /* No errors detected. */ bfd_reloc_ok, /* The relocation was performed, but there was an overflow. */ bfd_reloc_overflow, /* The address to relocate was not within the section supplied. */ bfd_reloc_outofrange, /* Used by special functions. */ bfd_reloc_continue, /* Unsupported relocation size requested. */ bfd_reloc_notsupported, /* Unused. */ bfd_reloc_other, /* The symbol to relocate against was undefined. */ bfd_reloc_undefined, /* The relocation was performed, but may not be ok - presently generated only when linking i960 coff files with i960 b.out symbols. If this type is returned, the error_message argument to bfd_perform_relocation will be set. */ bfd_reloc_dangerous } bfd_reloc_status_type; typedef struct reloc_cache_entry { /* A pointer into the canonical table of pointers. */ struct bfd_symbol **sym_ptr_ptr; /* offset in section. */ bfd_size_type address; /* addend for relocation value. */ bfd_vma addend; /* Pointer to how to perform the required relocation. */ reloc_howto_type *howto; } arelent; enum complain_overflow { /* Do not complain on overflow. */ complain_overflow_dont, /* Complain if the bitfield overflows, whether it is considered as signed or unsigned. */ complain_overflow_bitfield, /* Complain if the value overflows when considered as signed number. */ complain_overflow_signed, /* Complain if the value overflows when considered as an unsigned number. */ complain_overflow_unsigned }; struct reloc_howto_struct { /* The type field has mainly a documentary use - the back end can do what it wants with it, though normally the back end's external idea of what a reloc number is stored in this field. For example, a PC relative word relocation in a coff environment has the type 023 - because that's what the outside world calls a R_PCRWORD reloc. */ unsigned int type; /* The value the final relocation is shifted right by. This drops unwanted data from the relocation. */ unsigned int rightshift; /* The size of the item to be relocated. This is *not* a power-of-two measure. To get the number of bytes operated on by a type of relocation, use bfd_get_reloc_size. */ int size; /* The number of bits in the item to be relocated. This is used when doing overflow checking. */ unsigned int bitsize; /* Notes that the relocation is relative to the location in the data section of the addend. The relocation function will subtract from the relocation value the address of the location being relocated. */ bfd_boolean pc_relative; /* The bit position of the reloc value in the destination. The relocated value is left shifted by this amount. */ unsigned int bitpos; /* What type of overflow error should be checked for when relocating. */ enum complain_overflow complain_on_overflow; /* If this field is non null, then the supplied function is called rather than the normal function. This allows really strange relocation methods to be accommodated (e.g., i960 callj instructions). */ bfd_reloc_status_type (*special_function) (bfd *, arelent *, struct bfd_symbol *, void *, asection *, bfd *, char **); /* The textual name of the relocation type. */ char *name; /* Some formats record a relocation addend in the section contents rather than with the relocation. For ELF formats this is the distinction between USE_REL and USE_RELA (though the code checks for USE_REL == 1/0). The value of this field is TRUE if the addend is recorded with the section contents; when performing a partial link (ld -r) the section contents (the data) will be modified. The value of this field is FALSE if addends are recorded with the relocation (in arelent.addend); when performing a partial link the relocation will be modified. All relocations for all ELF USE_RELA targets should set this field to FALSE (values of TRUE should be looked on with suspicion). However, the converse is not true: not all relocations of all ELF USE_REL targets set this field to TRUE. Why this is so is peculiar to each particular target. For relocs that aren't used in partial links (e.g. GOT stuff) it doesn't matter what this is set to. */ bfd_boolean partial_inplace; /* src_mask selects the part of the instruction (or data) to be used in the relocation sum. If the target relocations don't have an addend in the reloc, eg. ELF USE_REL, src_mask will normally equal dst_mask to extract the addend from the section contents. If relocations do have an addend in the reloc, eg. ELF USE_RELA, this field should be zero. Non-zero values for ELF USE_RELA targets are bogus as in those cases the value in the dst_mask part of the section contents should be treated as garbage. */ bfd_vma src_mask; /* dst_mask selects which parts of the instruction (or data) are replaced with a relocated value. */ bfd_vma dst_mask; /* When some formats create PC relative instructions, they leave the value of the pc of the place being relocated in the offset slot of the instruction, so that a PC relative relocation can be made just by adding in an ordinary offset (e.g., sun3 a.out). Some formats leave the displacement part of an instruction empty (e.g., m88k bcs); this flag signals the fact. */ bfd_boolean pcrel_offset; }; #define HOWTO(C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC) \ { (unsigned) C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC } #define NEWHOWTO(FUNCTION, NAME, SIZE, REL, IN) \ HOWTO (0, 0, SIZE, 0, REL, 0, complain_overflow_dont, FUNCTION, \ NAME, FALSE, 0, 0, IN) #define EMPTY_HOWTO(C) \ HOWTO ((C), 0, 0, 0, FALSE, 0, complain_overflow_dont, NULL, \ NULL, FALSE, 0, 0, FALSE) #define HOWTO_PREPARE(relocation, symbol) \ { \ if (symbol != NULL) \ { \ if (bfd_is_com_section (symbol->section)) \ { \ relocation = 0; \ } \ else \ { \ relocation = symbol->value; \ } \ } \ } unsigned int bfd_get_reloc_size (reloc_howto_type *); typedef struct relent_chain { arelent relent; struct relent_chain *next; } arelent_chain; bfd_reloc_status_type bfd_check_overflow (enum complain_overflow how, unsigned int bitsize, unsigned int rightshift, unsigned int addrsize, bfd_vma relocation); bfd_reloc_status_type bfd_perform_relocation (bfd *abfd, arelent *reloc_entry, void *data, asection *input_section, bfd *output_bfd, char **error_message); bfd_reloc_status_type bfd_install_relocation (bfd *abfd, arelent *reloc_entry, void *data, bfd_vma data_start, asection *input_section, char **error_message); enum bfd_reloc_code_real { _dummy_first_bfd_reloc_code_real, /* Basic absolute relocations of N bits. */ BFD_RELOC_64, BFD_RELOC_32, BFD_RELOC_26, BFD_RELOC_24, BFD_RELOC_16, BFD_RELOC_14, BFD_RELOC_8, /* PC-relative relocations. Sometimes these are relative to the address of the relocation itself; sometimes they are relative to the start of the section containing the relocation. It depends on the specific target. The 24-bit relocation is used in some Intel 960 configurations. */ BFD_RELOC_64_PCREL, BFD_RELOC_32_PCREL, BFD_RELOC_24_PCREL, BFD_RELOC_16_PCREL, BFD_RELOC_12_PCREL, BFD_RELOC_8_PCREL, /* Section relative relocations. Some targets need this for DWARF2. */ BFD_RELOC_32_SECREL, /* For ELF. */ BFD_RELOC_32_GOT_PCREL, BFD_RELOC_16_GOT_PCREL, BFD_RELOC_8_GOT_PCREL, BFD_RELOC_32_GOTOFF, BFD_RELOC_16_GOTOFF, BFD_RELOC_LO16_GOTOFF, BFD_RELOC_HI16_GOTOFF, BFD_RELOC_HI16_S_GOTOFF, BFD_RELOC_8_GOTOFF, BFD_RELOC_64_PLT_PCREL, BFD_RELOC_32_PLT_PCREL, BFD_RELOC_24_PLT_PCREL, BFD_RELOC_16_PLT_PCREL, BFD_RELOC_8_PLT_PCREL, BFD_RELOC_64_PLTOFF, BFD_RELOC_32_PLTOFF, BFD_RELOC_16_PLTOFF, BFD_RELOC_LO16_PLTOFF, BFD_RELOC_HI16_PLTOFF, BFD_RELOC_HI16_S_PLTOFF, BFD_RELOC_8_PLTOFF, /* Relocations used by 68K ELF. */ BFD_RELOC_68K_GLOB_DAT, BFD_RELOC_68K_JMP_SLOT, BFD_RELOC_68K_RELATIVE, /* Linkage-table relative. */ BFD_RELOC_32_BASEREL, BFD_RELOC_16_BASEREL, BFD_RELOC_LO16_BASEREL, BFD_RELOC_HI16_BASEREL, BFD_RELOC_HI16_S_BASEREL, BFD_RELOC_8_BASEREL, BFD_RELOC_RVA, /* Absolute 8-bit relocation, but used to form an address like 0xFFnn. */ BFD_RELOC_8_FFnn, /* These PC-relative relocations are stored as word displacements -- i.e., byte displacements shifted right two bits. The 30-bit word displacement (<<32_PCREL_S2>> -- 32 bits, shifted 2) is used on the SPARC. (SPARC tools generally refer to this as <>.) The signed 16-bit displacement is used on the MIPS, and the 23-bit displacement is used on the Alpha. */ BFD_RELOC_32_PCREL_S2, BFD_RELOC_16_PCREL_S2, BFD_RELOC_23_PCREL_S2, /* High 22 bits and low 10 bits of 32-bit value, placed into lower bits of the target word. These are used on the SPARC. */ BFD_RELOC_HI22, BFD_RELOC_LO10, /* For systems that allocate a Global Pointer register, these are displacements off that register. These relocation types are handled specially, because the value the register will have is decided relatively late. */ BFD_RELOC_GPREL16, BFD_RELOC_GPREL32, /* Reloc types used for i960/b.out. */ BFD_RELOC_I960_CALLJ, /* SPARC ELF relocations. There is probably some overlap with other relocation types already defined. */ BFD_RELOC_NONE, BFD_RELOC_SPARC_WDISP22, BFD_RELOC_SPARC22, BFD_RELOC_SPARC13, BFD_RELOC_SPARC_GOT10, BFD_RELOC_SPARC_GOT13, BFD_RELOC_SPARC_GOT22, BFD_RELOC_SPARC_PC10, BFD_RELOC_SPARC_PC22, BFD_RELOC_SPARC_WPLT30, BFD_RELOC_SPARC_COPY, BFD_RELOC_SPARC_GLOB_DAT, BFD_RELOC_SPARC_JMP_SLOT, BFD_RELOC_SPARC_RELATIVE, BFD_RELOC_SPARC_UA16, BFD_RELOC_SPARC_UA32, BFD_RELOC_SPARC_UA64, /* I think these are specific to SPARC a.out (e.g., Sun 4). */ BFD_RELOC_SPARC_BASE13, BFD_RELOC_SPARC_BASE22, /* SPARC64 relocations */ #define BFD_RELOC_SPARC_64 BFD_RELOC_64 BFD_RELOC_SPARC_10, BFD_RELOC_SPARC_11, BFD_RELOC_SPARC_OLO10, BFD_RELOC_SPARC_HH22, BFD_RELOC_SPARC_HM10, BFD_RELOC_SPARC_LM22, BFD_RELOC_SPARC_PC_HH22, BFD_RELOC_SPARC_PC_HM10, BFD_RELOC_SPARC_PC_LM22, BFD_RELOC_SPARC_WDISP16, BFD_RELOC_SPARC_WDISP19, BFD_RELOC_SPARC_7, BFD_RELOC_SPARC_6, BFD_RELOC_SPARC_5, #define BFD_RELOC_SPARC_DISP64 BFD_RELOC_64_PCREL BFD_RELOC_SPARC_PLT32, BFD_RELOC_SPARC_PLT64, BFD_RELOC_SPARC_HIX22, BFD_RELOC_SPARC_LOX10, BFD_RELOC_SPARC_H44, BFD_RELOC_SPARC_M44, BFD_RELOC_SPARC_L44, BFD_RELOC_SPARC_REGISTER, /* SPARC little endian relocation */ BFD_RELOC_SPARC_REV32, /* SPARC TLS relocations */ BFD_RELOC_SPARC_TLS_GD_HI22, BFD_RELOC_SPARC_TLS_GD_LO10, BFD_RELOC_SPARC_TLS_GD_ADD, BFD_RELOC_SPARC_TLS_GD_CALL, BFD_RELOC_SPARC_TLS_LDM_HI22, BFD_RELOC_SPARC_TLS_LDM_LO10, BFD_RELOC_SPARC_TLS_LDM_ADD, BFD_RELOC_SPARC_TLS_LDM_CALL, BFD_RELOC_SPARC_TLS_LDO_HIX22, BFD_RELOC_SPARC_TLS_LDO_LOX10, BFD_RELOC_SPARC_TLS_LDO_ADD, BFD_RELOC_SPARC_TLS_IE_HI22, BFD_RELOC_SPARC_TLS_IE_LO10, BFD_RELOC_SPARC_TLS_IE_LD, BFD_RELOC_SPARC_TLS_IE_LDX, BFD_RELOC_SPARC_TLS_IE_ADD, BFD_RELOC_SPARC_TLS_LE_HIX22, BFD_RELOC_SPARC_TLS_LE_LOX10, BFD_RELOC_SPARC_TLS_DTPMOD32, BFD_RELOC_SPARC_TLS_DTPMOD64, BFD_RELOC_SPARC_TLS_DTPOFF32, BFD_RELOC_SPARC_TLS_DTPOFF64, BFD_RELOC_SPARC_TLS_TPOFF32, BFD_RELOC_SPARC_TLS_TPOFF64, /* Alpha ECOFF and ELF relocations. Some of these treat the symbol or "addend" in some special way. For GPDISP_HI16 ("gpdisp") relocations, the symbol is ignored when writing; when reading, it will be the absolute section symbol. The addend is the displacement in bytes of the "lda" instruction from the "ldah" instruction (which is at the address of this reloc). */ BFD_RELOC_ALPHA_GPDISP_HI16, /* For GPDISP_LO16 ("ignore") relocations, the symbol is handled as with GPDISP_HI16 relocs. The addend is ignored when writing the relocations out, and is filled in with the file's GP value on reading, for convenience. */ BFD_RELOC_ALPHA_GPDISP_LO16, /* The ELF GPDISP relocation is exactly the same as the GPDISP_HI16 relocation except that there is no accompanying GPDISP_LO16 relocation. */ BFD_RELOC_ALPHA_GPDISP, /* The Alpha LITERAL/LITUSE relocs are produced by a symbol reference; the assembler turns it into a LDQ instruction to load the address of the symbol, and then fills in a register in the real instruction. The LITERAL reloc, at the LDQ instruction, refers to the .lita section symbol. The addend is ignored when writing, but is filled in with the file's GP value on reading, for convenience, as with the GPDISP_LO16 reloc. The ELF_LITERAL reloc is somewhere between 16_GOTOFF and GPDISP_LO16. It should refer to the symbol to be referenced, as with 16_GOTOFF, but it generates output not based on the position within the .got section, but relative to the GP value chosen for the file during the final link stage. The LITUSE reloc, on the instruction using the loaded address, gives information to the linker that it might be able to use to optimize away some literal section references. The symbol is ignored (read as the absolute section symbol), and the "addend" indicates the type of instruction using the register: 1 - "memory" fmt insn 2 - byte-manipulation (byte offset reg) 3 - jsr (target of branch) */ BFD_RELOC_ALPHA_LITERAL, BFD_RELOC_ALPHA_ELF_LITERAL, BFD_RELOC_ALPHA_LITUSE, /* The HINT relocation indicates a value that should be filled into the "hint" field of a jmp/jsr/ret instruction, for possible branch- prediction logic which may be provided on some processors. */ BFD_RELOC_ALPHA_HINT, /* The LINKAGE relocation outputs a linkage pair in the object file, which is filled by the linker. */ BFD_RELOC_ALPHA_LINKAGE, /* The CODEADDR relocation outputs a STO_CA in the object file, which is filled by the linker. */ BFD_RELOC_ALPHA_CODEADDR, /* The GPREL_HI/LO relocations together form a 32-bit offset from the GP register. */ BFD_RELOC_ALPHA_GPREL_HI16, BFD_RELOC_ALPHA_GPREL_LO16, /* Like BFD_RELOC_23_PCREL_S2, except that the source and target must share a common GP, and the target address is adjusted for STO_ALPHA_STD_GPLOAD. */ BFD_RELOC_ALPHA_BRSGP, /* Alpha thread-local storage relocations. */ BFD_RELOC_ALPHA_TLSGD, BFD_RELOC_ALPHA_TLSLDM, BFD_RELOC_ALPHA_DTPMOD64, BFD_RELOC_ALPHA_GOTDTPREL16, BFD_RELOC_ALPHA_DTPREL64, BFD_RELOC_ALPHA_DTPREL_HI16, BFD_RELOC_ALPHA_DTPREL_LO16, BFD_RELOC_ALPHA_DTPREL16, BFD_RELOC_ALPHA_GOTTPREL16, BFD_RELOC_ALPHA_TPREL64, BFD_RELOC_ALPHA_TPREL_HI16, BFD_RELOC_ALPHA_TPREL_LO16, BFD_RELOC_ALPHA_TPREL16, /* Bits 27..2 of the relocation address shifted right 2 bits; simple reloc otherwise. */ BFD_RELOC_MIPS_JMP, /* The MIPS16 jump instruction. */ BFD_RELOC_MIPS16_JMP, /* MIPS16 GP relative reloc. */ BFD_RELOC_MIPS16_GPREL, /* High 16 bits of 32-bit value; simple reloc. */ BFD_RELOC_HI16, /* High 16 bits of 32-bit value but the low 16 bits will be sign extended and added to form the final result. If the low 16 bits form a negative number, we need to add one to the high value to compensate for the borrow when the low bits are added. */ BFD_RELOC_HI16_S, /* Low 16 bits. */ BFD_RELOC_LO16, /* High 16 bits of 32-bit pc-relative value */ BFD_RELOC_HI16_PCREL, /* High 16 bits of 32-bit pc-relative value, adjusted */ BFD_RELOC_HI16_S_PCREL, /* Low 16 bits of pc-relative value */ BFD_RELOC_LO16_PCREL, /* MIPS16 high 16 bits of 32-bit value. */ BFD_RELOC_MIPS16_HI16, /* MIPS16 high 16 bits of 32-bit value but the low 16 bits will be sign extended and added to form the final result. If the low 16 bits form a negative number, we need to add one to the high value to compensate for the borrow when the low bits are added. */ BFD_RELOC_MIPS16_HI16_S, /* MIPS16 low 16 bits. */ BFD_RELOC_MIPS16_LO16, /* Relocation against a MIPS literal section. */ BFD_RELOC_MIPS_LITERAL, /* MIPS ELF relocations. */ BFD_RELOC_MIPS_GOT16, BFD_RELOC_MIPS_CALL16, BFD_RELOC_MIPS_GOT_HI16, BFD_RELOC_MIPS_GOT_LO16, BFD_RELOC_MIPS_CALL_HI16, BFD_RELOC_MIPS_CALL_LO16, BFD_RELOC_MIPS_SUB, BFD_RELOC_MIPS_GOT_PAGE, BFD_RELOC_MIPS_GOT_OFST, BFD_RELOC_MIPS_GOT_DISP, BFD_RELOC_MIPS_SHIFT5, BFD_RELOC_MIPS_SHIFT6, BFD_RELOC_MIPS_INSERT_A, BFD_RELOC_MIPS_INSERT_B, BFD_RELOC_MIPS_DELETE, BFD_RELOC_MIPS_HIGHEST, BFD_RELOC_MIPS_HIGHER, BFD_RELOC_MIPS_SCN_DISP, BFD_RELOC_MIPS_REL16, BFD_RELOC_MIPS_RELGOT, BFD_RELOC_MIPS_JALR, BFD_RELOC_MIPS_TLS_DTPMOD32, BFD_RELOC_MIPS_TLS_DTPREL32, BFD_RELOC_MIPS_TLS_DTPMOD64, BFD_RELOC_MIPS_TLS_DTPREL64, BFD_RELOC_MIPS_TLS_GD, BFD_RELOC_MIPS_TLS_LDM, BFD_RELOC_MIPS_TLS_DTPREL_HI16, BFD_RELOC_MIPS_TLS_DTPREL_LO16, BFD_RELOC_MIPS_TLS_GOTTPREL, BFD_RELOC_MIPS_TLS_TPREL32, BFD_RELOC_MIPS_TLS_TPREL64, BFD_RELOC_MIPS_TLS_TPREL_HI16, BFD_RELOC_MIPS_TLS_TPREL_LO16, /* Fujitsu Frv Relocations. */ BFD_RELOC_FRV_LABEL16, BFD_RELOC_FRV_LABEL24, BFD_RELOC_FRV_LO16, BFD_RELOC_FRV_HI16, BFD_RELOC_FRV_GPREL12, BFD_RELOC_FRV_GPRELU12, BFD_RELOC_FRV_GPREL32, BFD_RELOC_FRV_GPRELHI, BFD_RELOC_FRV_GPRELLO, BFD_RELOC_FRV_GOT12, BFD_RELOC_FRV_GOTHI, BFD_RELOC_FRV_GOTLO, BFD_RELOC_FRV_FUNCDESC, BFD_RELOC_FRV_FUNCDESC_GOT12, BFD_RELOC_FRV_FUNCDESC_GOTHI, BFD_RELOC_FRV_FUNCDESC_GOTLO, BFD_RELOC_FRV_FUNCDESC_VALUE, BFD_RELOC_FRV_FUNCDESC_GOTOFF12, BFD_RELOC_FRV_FUNCDESC_GOTOFFHI, BFD_RELOC_FRV_FUNCDESC_GOTOFFLO, BFD_RELOC_FRV_GOTOFF12, BFD_RELOC_FRV_GOTOFFHI, BFD_RELOC_FRV_GOTOFFLO, BFD_RELOC_FRV_GETTLSOFF, BFD_RELOC_FRV_TLSDESC_VALUE, BFD_RELOC_FRV_GOTTLSDESC12, BFD_RELOC_FRV_GOTTLSDESCHI, BFD_RELOC_FRV_GOTTLSDESCLO, BFD_RELOC_FRV_TLSMOFF12, BFD_RELOC_FRV_TLSMOFFHI, BFD_RELOC_FRV_TLSMOFFLO, BFD_RELOC_FRV_GOTTLSOFF12, BFD_RELOC_FRV_GOTTLSOFFHI, BFD_RELOC_FRV_GOTTLSOFFLO, BFD_RELOC_FRV_TLSOFF, BFD_RELOC_FRV_TLSDESC_RELAX, BFD_RELOC_FRV_GETTLSOFF_RELAX, BFD_RELOC_FRV_TLSOFF_RELAX, BFD_RELOC_FRV_TLSMOFF, /* This is a 24bit GOT-relative reloc for the mn10300. */ BFD_RELOC_MN10300_GOTOFF24, /* This is a 32bit GOT-relative reloc for the mn10300, offset by two bytes in the instruction. */ BFD_RELOC_MN10300_GOT32, /* This is a 24bit GOT-relative reloc for the mn10300, offset by two bytes in the instruction. */ BFD_RELOC_MN10300_GOT24, /* This is a 16bit GOT-relative reloc for the mn10300, offset by two bytes in the instruction. */ BFD_RELOC_MN10300_GOT16, /* Copy symbol at runtime. */ BFD_RELOC_MN10300_COPY, /* Create GOT entry. */ BFD_RELOC_MN10300_GLOB_DAT, /* Create PLT entry. */ BFD_RELOC_MN10300_JMP_SLOT, /* Adjust by program base. */ BFD_RELOC_MN10300_RELATIVE, /* i386/elf relocations */ BFD_RELOC_386_GOT32, BFD_RELOC_386_PLT32, BFD_RELOC_386_COPY, BFD_RELOC_386_GLOB_DAT, BFD_RELOC_386_JUMP_SLOT, BFD_RELOC_386_RELATIVE, BFD_RELOC_386_GOTOFF, BFD_RELOC_386_GOTPC, BFD_RELOC_386_TLS_TPOFF, BFD_RELOC_386_TLS_IE, BFD_RELOC_386_TLS_GOTIE, BFD_RELOC_386_TLS_LE, BFD_RELOC_386_TLS_GD, BFD_RELOC_386_TLS_LDM, BFD_RELOC_386_TLS_LDO_32, BFD_RELOC_386_TLS_IE_32, BFD_RELOC_386_TLS_LE_32, BFD_RELOC_386_TLS_DTPMOD32, BFD_RELOC_386_TLS_DTPOFF32, BFD_RELOC_386_TLS_TPOFF32, /* x86-64/elf relocations */ BFD_RELOC_X86_64_GOT32, BFD_RELOC_X86_64_PLT32, BFD_RELOC_X86_64_COPY, BFD_RELOC_X86_64_GLOB_DAT, BFD_RELOC_X86_64_JUMP_SLOT, BFD_RELOC_X86_64_RELATIVE, BFD_RELOC_X86_64_GOTPCREL, BFD_RELOC_X86_64_32S, BFD_RELOC_X86_64_DTPMOD64, BFD_RELOC_X86_64_DTPOFF64, BFD_RELOC_X86_64_TPOFF64, BFD_RELOC_X86_64_TLSGD, BFD_RELOC_X86_64_TLSLD, BFD_RELOC_X86_64_DTPOFF32, BFD_RELOC_X86_64_GOTTPOFF, BFD_RELOC_X86_64_TPOFF32, BFD_RELOC_X86_64_GOTOFF64, BFD_RELOC_X86_64_GOTPC32, /* ns32k relocations */ BFD_RELOC_NS32K_IMM_8, BFD_RELOC_NS32K_IMM_16, BFD_RELOC_NS32K_IMM_32, BFD_RELOC_NS32K_IMM_8_PCREL, BFD_RELOC_NS32K_IMM_16_PCREL, BFD_RELOC_NS32K_IMM_32_PCREL, BFD_RELOC_NS32K_DISP_8, BFD_RELOC_NS32K_DISP_16, BFD_RELOC_NS32K_DISP_32, BFD_RELOC_NS32K_DISP_8_PCREL, BFD_RELOC_NS32K_DISP_16_PCREL, BFD_RELOC_NS32K_DISP_32_PCREL, /* PDP11 relocations */ BFD_RELOC_PDP11_DISP_8_PCREL, BFD_RELOC_PDP11_DISP_6_PCREL, /* Picojava relocs. Not all of these appear in object files. */ BFD_RELOC_PJ_CODE_HI16, BFD_RELOC_PJ_CODE_LO16, BFD_RELOC_PJ_CODE_DIR16, BFD_RELOC_PJ_CODE_DIR32, BFD_RELOC_PJ_CODE_REL16, BFD_RELOC_PJ_CODE_REL32, /* Power(rs6000) and PowerPC relocations. */ BFD_RELOC_PPC_B26, BFD_RELOC_PPC_BA26, BFD_RELOC_PPC_TOC16, BFD_RELOC_PPC_B16, BFD_RELOC_PPC_B16_BRTAKEN, BFD_RELOC_PPC_B16_BRNTAKEN, BFD_RELOC_PPC_BA16, BFD_RELOC_PPC_BA16_BRTAKEN, BFD_RELOC_PPC_BA16_BRNTAKEN, BFD_RELOC_PPC_COPY, BFD_RELOC_PPC_GLOB_DAT, BFD_RELOC_PPC_JMP_SLOT, BFD_RELOC_PPC_RELATIVE, BFD_RELOC_PPC_LOCAL24PC, BFD_RELOC_PPC_EMB_NADDR32, BFD_RELOC_PPC_EMB_NADDR16, BFD_RELOC_PPC_EMB_NADDR16_LO, BFD_RELOC_PPC_EMB_NADDR16_HI, BFD_RELOC_PPC_EMB_NADDR16_HA, BFD_RELOC_PPC_EMB_SDAI16, BFD_RELOC_PPC_EMB_SDA2I16, BFD_RELOC_PPC_EMB_SDA2REL, BFD_RELOC_PPC_EMB_SDA21, BFD_RELOC_PPC_EMB_MRKREF, BFD_RELOC_PPC_EMB_RELSEC16, BFD_RELOC_PPC_EMB_RELST_LO, BFD_RELOC_PPC_EMB_RELST_HI, BFD_RELOC_PPC_EMB_RELST_HA, BFD_RELOC_PPC_EMB_BIT_FLD, BFD_RELOC_PPC_EMB_RELSDA, BFD_RELOC_PPC64_HIGHER, BFD_RELOC_PPC64_HIGHER_S, BFD_RELOC_PPC64_HIGHEST, BFD_RELOC_PPC64_HIGHEST_S, BFD_RELOC_PPC64_TOC16_LO, BFD_RELOC_PPC64_TOC16_HI, BFD_RELOC_PPC64_TOC16_HA, BFD_RELOC_PPC64_TOC, BFD_RELOC_PPC64_PLTGOT16, BFD_RELOC_PPC64_PLTGOT16_LO, BFD_RELOC_PPC64_PLTGOT16_HI, BFD_RELOC_PPC64_PLTGOT16_HA, BFD_RELOC_PPC64_ADDR16_DS, BFD_RELOC_PPC64_ADDR16_LO_DS, BFD_RELOC_PPC64_GOT16_DS, BFD_RELOC_PPC64_GOT16_LO_DS, BFD_RELOC_PPC64_PLT16_LO_DS, BFD_RELOC_PPC64_SECTOFF_DS, BFD_RELOC_PPC64_SECTOFF_LO_DS, BFD_RELOC_PPC64_TOC16_DS, BFD_RELOC_PPC64_TOC16_LO_DS, BFD_RELOC_PPC64_PLTGOT16_DS, BFD_RELOC_PPC64_PLTGOT16_LO_DS, /* PowerPC and PowerPC64 thread-local storage relocations. */ BFD_RELOC_PPC_TLS, BFD_RELOC_PPC_DTPMOD, BFD_RELOC_PPC_TPREL16, BFD_RELOC_PPC_TPREL16_LO, BFD_RELOC_PPC_TPREL16_HI, BFD_RELOC_PPC_TPREL16_HA, BFD_RELOC_PPC_TPREL, BFD_RELOC_PPC_DTPREL16, BFD_RELOC_PPC_DTPREL16_LO, BFD_RELOC_PPC_DTPREL16_HI, BFD_RELOC_PPC_DTPREL16_HA, BFD_RELOC_PPC_DTPREL, BFD_RELOC_PPC_GOT_TLSGD16, BFD_RELOC_PPC_GOT_TLSGD16_LO, BFD_RELOC_PPC_GOT_TLSGD16_HI, BFD_RELOC_PPC_GOT_TLSGD16_HA, BFD_RELOC_PPC_GOT_TLSLD16, BFD_RELOC_PPC_GOT_TLSLD16_LO, BFD_RELOC_PPC_GOT_TLSLD16_HI, BFD_RELOC_PPC_GOT_TLSLD16_HA, BFD_RELOC_PPC_GOT_TPREL16, BFD_RELOC_PPC_GOT_TPREL16_LO, BFD_RELOC_PPC_GOT_TPREL16_HI, BFD_RELOC_PPC_GOT_TPREL16_HA, BFD_RELOC_PPC_GOT_DTPREL16, BFD_RELOC_PPC_GOT_DTPREL16_LO, BFD_RELOC_PPC_GOT_DTPREL16_HI, BFD_RELOC_PPC_GOT_DTPREL16_HA, BFD_RELOC_PPC64_TPREL16_DS, BFD_RELOC_PPC64_TPREL16_LO_DS, BFD_RELOC_PPC64_TPREL16_HIGHER, BFD_RELOC_PPC64_TPREL16_HIGHERA, BFD_RELOC_PPC64_TPREL16_HIGHEST, BFD_RELOC_PPC64_TPREL16_HIGHESTA, BFD_RELOC_PPC64_DTPREL16_DS, BFD_RELOC_PPC64_DTPREL16_LO_DS, BFD_RELOC_PPC64_DTPREL16_HIGHER, BFD_RELOC_PPC64_DTPREL16_HIGHERA, BFD_RELOC_PPC64_DTPREL16_HIGHEST, BFD_RELOC_PPC64_DTPREL16_HIGHESTA, /* IBM 370/390 relocations */ BFD_RELOC_I370_D12, /* The type of reloc used to build a constructor table - at the moment probably a 32 bit wide absolute relocation, but the target can choose. It generally does map to one of the other relocation types. */ BFD_RELOC_CTOR, /* ARM 26 bit pc-relative branch. The lowest two bits must be zero and are not stored in the instruction. */ BFD_RELOC_ARM_PCREL_BRANCH, /* ARM 26 bit pc-relative branch. The lowest bit must be zero and is not stored in the instruction. The 2nd lowest bit comes from a 1 bit field in the instruction. */ BFD_RELOC_ARM_PCREL_BLX, /* Thumb 22 bit pc-relative branch. The lowest bit must be zero and is not stored in the instruction. The 2nd lowest bit comes from a 1 bit field in the instruction. */ BFD_RELOC_THUMB_PCREL_BLX, /* Thumb 7-, 9-, 12-, 20-, 23-, and 25-bit pc-relative branches. The lowest bit must be zero and is not stored in the instruction. Note that the corresponding ELF R_ARM_THM_JUMPnn constant has an "nn" one smaller in all cases. Note further that BRANCH23 corresponds to R_ARM_THM_CALL. */ BFD_RELOC_THUMB_PCREL_BRANCH7, BFD_RELOC_THUMB_PCREL_BRANCH9, BFD_RELOC_THUMB_PCREL_BRANCH12, BFD_RELOC_THUMB_PCREL_BRANCH20, BFD_RELOC_THUMB_PCREL_BRANCH23, BFD_RELOC_THUMB_PCREL_BRANCH25, /* 12-bit immediate offset, used in ARM-format ldr and str instructions. */ BFD_RELOC_ARM_OFFSET_IMM, /* 5-bit immediate offset, used in Thumb-format ldr and str instructions. */ BFD_RELOC_ARM_THUMB_OFFSET, /* Pc-relative or absolute relocation depending on target. Used for entries in .init_array sections. */ BFD_RELOC_ARM_TARGET1, /* Read-only segment base relative address. */ BFD_RELOC_ARM_ROSEGREL32, /* Data segment base relative address. */ BFD_RELOC_ARM_SBREL32, /* This reloc is used for references to RTTI data from exception handling tables. The actual definition depends on the target. It may be a pc-relative or some form of GOT-indirect relocation. */ BFD_RELOC_ARM_TARGET2, /* 31-bit PC relative address. */ BFD_RELOC_ARM_PREL31, /* Relocations for setting up GOTs and PLTs for shared libraries. */ BFD_RELOC_ARM_JUMP_SLOT, BFD_RELOC_ARM_GLOB_DAT, BFD_RELOC_ARM_GOT32, BFD_RELOC_ARM_PLT32, BFD_RELOC_ARM_RELATIVE, BFD_RELOC_ARM_GOTOFF, BFD_RELOC_ARM_GOTPC, /* ARM thread-local storage relocations. */ BFD_RELOC_ARM_TLS_GD32, BFD_RELOC_ARM_TLS_LDO32, BFD_RELOC_ARM_TLS_LDM32, BFD_RELOC_ARM_TLS_DTPOFF32, BFD_RELOC_ARM_TLS_DTPMOD32, BFD_RELOC_ARM_TLS_TPOFF32, BFD_RELOC_ARM_TLS_IE32, BFD_RELOC_ARM_TLS_LE32, /* These relocs are only used within the ARM assembler. They are not (at present) written to any object files. */ BFD_RELOC_ARM_IMMEDIATE, BFD_RELOC_ARM_ADRL_IMMEDIATE, BFD_RELOC_ARM_T32_IMMEDIATE, BFD_RELOC_ARM_SHIFT_IMM, BFD_RELOC_ARM_SMI, BFD_RELOC_ARM_SWI, BFD_RELOC_ARM_MULTI, BFD_RELOC_ARM_CP_OFF_IMM, BFD_RELOC_ARM_CP_OFF_IMM_S2, BFD_RELOC_ARM_ADR_IMM, BFD_RELOC_ARM_LDR_IMM, BFD_RELOC_ARM_LITERAL, BFD_RELOC_ARM_IN_POOL, BFD_RELOC_ARM_OFFSET_IMM8, BFD_RELOC_ARM_T32_OFFSET_U8, BFD_RELOC_ARM_T32_OFFSET_IMM, BFD_RELOC_ARM_HWLITERAL, BFD_RELOC_ARM_THUMB_ADD, BFD_RELOC_ARM_THUMB_IMM, BFD_RELOC_ARM_THUMB_SHIFT, /* Renesas / SuperH SH relocs. Not all of these appear in object files. */ BFD_RELOC_SH_PCDISP8BY2, BFD_RELOC_SH_PCDISP12BY2, BFD_RELOC_SH_IMM3, BFD_RELOC_SH_IMM3U, BFD_RELOC_SH_DISP12, BFD_RELOC_SH_DISP12BY2, BFD_RELOC_SH_DISP12BY4, BFD_RELOC_SH_DISP12BY8, BFD_RELOC_SH_DISP20, BFD_RELOC_SH_DISP20BY8, BFD_RELOC_SH_IMM4, BFD_RELOC_SH_IMM4BY2, BFD_RELOC_SH_IMM4BY4, BFD_RELOC_SH_IMM8, BFD_RELOC_SH_IMM8BY2, BFD_RELOC_SH_IMM8BY4, BFD_RELOC_SH_PCRELIMM8BY2, BFD_RELOC_SH_PCRELIMM8BY4, BFD_RELOC_SH_SWITCH16, BFD_RELOC_SH_SWITCH32, BFD_RELOC_SH_USES, BFD_RELOC_SH_COUNT, BFD_RELOC_SH_ALIGN, BFD_RELOC_SH_CODE, BFD_RELOC_SH_DATA, BFD_RELOC_SH_LABEL, BFD_RELOC_SH_LOOP_START, BFD_RELOC_SH_LOOP_END, BFD_RELOC_SH_COPY, BFD_RELOC_SH_GLOB_DAT, BFD_RELOC_SH_JMP_SLOT, BFD_RELOC_SH_RELATIVE, BFD_RELOC_SH_GOTPC, BFD_RELOC_SH_GOT_LOW16, BFD_RELOC_SH_GOT_MEDLOW16, BFD_RELOC_SH_GOT_MEDHI16, BFD_RELOC_SH_GOT_HI16, BFD_RELOC_SH_GOTPLT_LOW16, BFD_RELOC_SH_GOTPLT_MEDLOW16, BFD_RELOC_SH_GOTPLT_MEDHI16, BFD_RELOC_SH_GOTPLT_HI16, BFD_RELOC_SH_PLT_LOW16, BFD_RELOC_SH_PLT_MEDLOW16, BFD_RELOC_SH_PLT_MEDHI16, BFD_RELOC_SH_PLT_HI16, BFD_RELOC_SH_GOTOFF_LOW16, BFD_RELOC_SH_GOTOFF_MEDLOW16, BFD_RELOC_SH_GOTOFF_MEDHI16, BFD_RELOC_SH_GOTOFF_HI16, BFD_RELOC_SH_GOTPC_LOW16, BFD_RELOC_SH_GOTPC_MEDLOW16, BFD_RELOC_SH_GOTPC_MEDHI16, BFD_RELOC_SH_GOTPC_HI16, BFD_RELOC_SH_COPY64, BFD_RELOC_SH_GLOB_DAT64, BFD_RELOC_SH_JMP_SLOT64, BFD_RELOC_SH_RELATIVE64, BFD_RELOC_SH_GOT10BY4, BFD_RELOC_SH_GOT10BY8, BFD_RELOC_SH_GOTPLT10BY4, BFD_RELOC_SH_GOTPLT10BY8, BFD_RELOC_SH_GOTPLT32, BFD_RELOC_SH_SHMEDIA_CODE, BFD_RELOC_SH_IMMU5, BFD_RELOC_SH_IMMS6, BFD_RELOC_SH_IMMS6BY32, BFD_RELOC_SH_IMMU6, BFD_RELOC_SH_IMMS10, BFD_RELOC_SH_IMMS10BY2, BFD_RELOC_SH_IMMS10BY4, BFD_RELOC_SH_IMMS10BY8, BFD_RELOC_SH_IMMS16, BFD_RELOC_SH_IMMU16, BFD_RELOC_SH_IMM_LOW16, BFD_RELOC_SH_IMM_LOW16_PCREL, BFD_RELOC_SH_IMM_MEDLOW16, BFD_RELOC_SH_IMM_MEDLOW16_PCREL, BFD_RELOC_SH_IMM_MEDHI16, BFD_RELOC_SH_IMM_MEDHI16_PCREL, BFD_RELOC_SH_IMM_HI16, BFD_RELOC_SH_IMM_HI16_PCREL, BFD_RELOC_SH_PT_16, BFD_RELOC_SH_TLS_GD_32, BFD_RELOC_SH_TLS_LD_32, BFD_RELOC_SH_TLS_LDO_32, BFD_RELOC_SH_TLS_IE_32, BFD_RELOC_SH_TLS_LE_32, BFD_RELOC_SH_TLS_DTPMOD32, BFD_RELOC_SH_TLS_DTPOFF32, BFD_RELOC_SH_TLS_TPOFF32, /* ARC Cores relocs. ARC 22 bit pc-relative branch. The lowest two bits must be zero and are not stored in the instruction. The high 20 bits are installed in bits 26 through 7 of the instruction. */ BFD_RELOC_ARC_B22_PCREL, /* ARC 26 bit absolute branch. The lowest two bits must be zero and are not stored in the instruction. The high 24 bits are installed in bits 23 through 0. */ BFD_RELOC_ARC_B26, /* Mitsubishi D10V relocs. This is a 10-bit reloc with the right 2 bits assumed to be 0. */ BFD_RELOC_D10V_10_PCREL_R, /* Mitsubishi D10V relocs. This is a 10-bit reloc with the right 2 bits assumed to be 0. This is the same as the previous reloc except it is in the left container, i.e., shifted left 15 bits. */ BFD_RELOC_D10V_10_PCREL_L, /* This is an 18-bit reloc with the right 2 bits assumed to be 0. */ BFD_RELOC_D10V_18, /* This is an 18-bit reloc with the right 2 bits assumed to be 0. */ BFD_RELOC_D10V_18_PCREL, /* Mitsubishi D30V relocs. This is a 6-bit absolute reloc. */ BFD_RELOC_D30V_6, /* This is a 6-bit pc-relative reloc with the right 3 bits assumed to be 0. */ BFD_RELOC_D30V_9_PCREL, /* This is a 6-bit pc-relative reloc with the right 3 bits assumed to be 0. Same as the previous reloc but on the right side of the container. */ BFD_RELOC_D30V_9_PCREL_R, /* This is a 12-bit absolute reloc with the right 3 bitsassumed to be 0. */ BFD_RELOC_D30V_15, /* This is a 12-bit pc-relative reloc with the right 3 bits assumed to be 0. */ BFD_RELOC_D30V_15_PCREL, /* This is a 12-bit pc-relative reloc with the right 3 bits assumed to be 0. Same as the previous reloc but on the right side of the container. */ BFD_RELOC_D30V_15_PCREL_R, /* This is an 18-bit absolute reloc with the right 3 bits assumed to be 0. */ BFD_RELOC_D30V_21, /* This is an 18-bit pc-relative reloc with the right 3 bits assumed to be 0. */ BFD_RELOC_D30V_21_PCREL, /* This is an 18-bit pc-relative reloc with the right 3 bits assumed to be 0. Same as the previous reloc but on the right side of the container. */ BFD_RELOC_D30V_21_PCREL_R, /* This is a 32-bit absolute reloc. */ BFD_RELOC_D30V_32, /* This is a 32-bit pc-relative reloc. */ BFD_RELOC_D30V_32_PCREL, /* DLX relocs */ BFD_RELOC_DLX_HI16_S, /* DLX relocs */ BFD_RELOC_DLX_LO16, /* DLX relocs */ BFD_RELOC_DLX_JMP26, /* Renesas M16C/M32C Relocations. */ BFD_RELOC_M16C_8_PCREL8, BFD_RELOC_M16C_16_PCREL8, BFD_RELOC_M16C_8_PCREL16, BFD_RELOC_M16C_8_ELABEL24, BFD_RELOC_M16C_8_ABS16, BFD_RELOC_M16C_16_ABS16, BFD_RELOC_M16C_16_ABS24, BFD_RELOC_M16C_16_ABS32, BFD_RELOC_M16C_24_ABS16, BFD_RELOC_M16C_24_ABS24, BFD_RELOC_M16C_24_ABS32, BFD_RELOC_M16C_32_ABS16, BFD_RELOC_M16C_32_ABS24, BFD_RELOC_M16C_32_ABS32, BFD_RELOC_M16C_40_ABS16, BFD_RELOC_M16C_40_ABS24, BFD_RELOC_M16C_40_ABS32, /* Renesas M32R (formerly Mitsubishi M32R) relocs. This is a 24 bit absolute address. */ BFD_RELOC_M32R_24, /* This is a 10-bit pc-relative reloc with the right 2 bits assumed to be 0. */ BFD_RELOC_M32R_10_PCREL, /* This is an 18-bit reloc with the right 2 bits assumed to be 0. */ BFD_RELOC_M32R_18_PCREL, /* This is a 26-bit reloc with the right 2 bits assumed to be 0. */ BFD_RELOC_M32R_26_PCREL, /* This is a 16-bit reloc containing the high 16 bits of an address used when the lower 16 bits are treated as unsigned. */ BFD_RELOC_M32R_HI16_ULO, /* This is a 16-bit reloc containing the high 16 bits of an address used when the lower 16 bits are treated as signed. */ BFD_RELOC_M32R_HI16_SLO, /* This is a 16-bit reloc containing the lower 16 bits of an address. */ BFD_RELOC_M32R_LO16, /* This is a 16-bit reloc containing the small data area offset for use in add3, load, and store instructions. */ BFD_RELOC_M32R_SDA16, /* For PIC. */ BFD_RELOC_M32R_GOT24, BFD_RELOC_M32R_26_PLTREL, BFD_RELOC_M32R_COPY, BFD_RELOC_M32R_GLOB_DAT, BFD_RELOC_M32R_JMP_SLOT, BFD_RELOC_M32R_RELATIVE, BFD_RELOC_M32R_GOTOFF, BFD_RELOC_M32R_GOTOFF_HI_ULO, BFD_RELOC_M32R_GOTOFF_HI_SLO, BFD_RELOC_M32R_GOTOFF_LO, BFD_RELOC_M32R_GOTPC24, BFD_RELOC_M32R_GOT16_HI_ULO, BFD_RELOC_M32R_GOT16_HI_SLO, BFD_RELOC_M32R_GOT16_LO, BFD_RELOC_M32R_GOTPC_HI_ULO, BFD_RELOC_M32R_GOTPC_HI_SLO, BFD_RELOC_M32R_GOTPC_LO, /* This is a 9-bit reloc */ BFD_RELOC_V850_9_PCREL, /* This is a 22-bit reloc */ BFD_RELOC_V850_22_PCREL, /* This is a 16 bit offset from the short data area pointer. */ BFD_RELOC_V850_SDA_16_16_OFFSET, /* This is a 16 bit offset (of which only 15 bits are used) from the short data area pointer. */ BFD_RELOC_V850_SDA_15_16_OFFSET, /* This is a 16 bit offset from the zero data area pointer. */ BFD_RELOC_V850_ZDA_16_16_OFFSET, /* This is a 16 bit offset (of which only 15 bits are used) from the zero data area pointer. */ BFD_RELOC_V850_ZDA_15_16_OFFSET, /* This is an 8 bit offset (of which only 6 bits are used) from the tiny data area pointer. */ BFD_RELOC_V850_TDA_6_8_OFFSET, /* This is an 8bit offset (of which only 7 bits are used) from the tiny data area pointer. */ BFD_RELOC_V850_TDA_7_8_OFFSET, /* This is a 7 bit offset from the tiny data area pointer. */ BFD_RELOC_V850_TDA_7_7_OFFSET, /* This is a 16 bit offset from the tiny data area pointer. */ BFD_RELOC_V850_TDA_16_16_OFFSET, /* This is a 5 bit offset (of which only 4 bits are used) from the tiny data area pointer. */ BFD_RELOC_V850_TDA_4_5_OFFSET, /* This is a 4 bit offset from the tiny data area pointer. */ BFD_RELOC_V850_TDA_4_4_OFFSET, /* This is a 16 bit offset from the short data area pointer, with the bits placed non-contiguously in the instruction. */ BFD_RELOC_V850_SDA_16_16_SPLIT_OFFSET, /* This is a 16 bit offset from the zero data area pointer, with the bits placed non-contiguously in the instruction. */ BFD_RELOC_V850_ZDA_16_16_SPLIT_OFFSET, /* This is a 6 bit offset from the call table base pointer. */ BFD_RELOC_V850_CALLT_6_7_OFFSET, /* This is a 16 bit offset from the call table base pointer. */ BFD_RELOC_V850_CALLT_16_16_OFFSET, /* Used for relaxing indirect function calls. */ BFD_RELOC_V850_LONGCALL, /* Used for relaxing indirect jumps. */ BFD_RELOC_V850_LONGJUMP, /* Used to maintain alignment whilst relaxing. */ BFD_RELOC_V850_ALIGN, /* This is a variation of BFD_RELOC_LO16 that can be used in v850e ld.bu instructions. */ BFD_RELOC_V850_LO16_SPLIT_OFFSET, /* This is a 32bit pcrel reloc for the mn10300, offset by two bytes in the instruction. */ BFD_RELOC_MN10300_32_PCREL, /* This is a 16bit pcrel reloc for the mn10300, offset by two bytes in the instruction. */ BFD_RELOC_MN10300_16_PCREL, /* This is a 8bit DP reloc for the tms320c30, where the most significant 8 bits of a 24 bit word are placed into the least significant 8 bits of the opcode. */ BFD_RELOC_TIC30_LDP, /* This is a 7bit reloc for the tms320c54x, where the least significant 7 bits of a 16 bit word are placed into the least significant 7 bits of the opcode. */ BFD_RELOC_TIC54X_PARTLS7, /* This is a 9bit DP reloc for the tms320c54x, where the most significant 9 bits of a 16 bit word are placed into the least significant 9 bits of the opcode. */ BFD_RELOC_TIC54X_PARTMS9, /* This is an extended address 23-bit reloc for the tms320c54x. */ BFD_RELOC_TIC54X_23, /* This is a 16-bit reloc for the tms320c54x, where the least significant 16 bits of a 23-bit extended address are placed into the opcode. */ BFD_RELOC_TIC54X_16_OF_23, /* This is a reloc for the tms320c54x, where the most significant 7 bits of a 23-bit extended address are placed into the opcode. */ BFD_RELOC_TIC54X_MS7_OF_23, /* This is a 48 bit reloc for the FR30 that stores 32 bits. */ BFD_RELOC_FR30_48, /* This is a 32 bit reloc for the FR30 that stores 20 bits split up into two sections. */ BFD_RELOC_FR30_20, /* This is a 16 bit reloc for the FR30 that stores a 6 bit word offset in 4 bits. */ BFD_RELOC_FR30_6_IN_4, /* This is a 16 bit reloc for the FR30 that stores an 8 bit byte offset into 8 bits. */ BFD_RELOC_FR30_8_IN_8, /* This is a 16 bit reloc for the FR30 that stores a 9 bit short offset into 8 bits. */ BFD_RELOC_FR30_9_IN_8, /* This is a 16 bit reloc for the FR30 that stores a 10 bit word offset into 8 bits. */ BFD_RELOC_FR30_10_IN_8, /* This is a 16 bit reloc for the FR30 that stores a 9 bit pc relative short offset into 8 bits. */ BFD_RELOC_FR30_9_PCREL, /* This is a 16 bit reloc for the FR30 that stores a 12 bit pc relative short offset into 11 bits. */ BFD_RELOC_FR30_12_PCREL, /* Motorola Mcore relocations. */ BFD_RELOC_MCORE_PCREL_IMM8BY4, BFD_RELOC_MCORE_PCREL_IMM11BY2, BFD_RELOC_MCORE_PCREL_IMM4BY2, BFD_RELOC_MCORE_PCREL_32, BFD_RELOC_MCORE_PCREL_JSR_IMM11BY2, BFD_RELOC_MCORE_RVA, /* These are relocations for the GETA instruction. */ BFD_RELOC_MMIX_GETA, BFD_RELOC_MMIX_GETA_1, BFD_RELOC_MMIX_GETA_2, BFD_RELOC_MMIX_GETA_3, /* These are relocations for a conditional branch instruction. */ BFD_RELOC_MMIX_CBRANCH, BFD_RELOC_MMIX_CBRANCH_J, BFD_RELOC_MMIX_CBRANCH_1, BFD_RELOC_MMIX_CBRANCH_2, BFD_RELOC_MMIX_CBRANCH_3, /* These are relocations for the PUSHJ instruction. */ BFD_RELOC_MMIX_PUSHJ, BFD_RELOC_MMIX_PUSHJ_1, BFD_RELOC_MMIX_PUSHJ_2, BFD_RELOC_MMIX_PUSHJ_3, BFD_RELOC_MMIX_PUSHJ_STUBBABLE, /* These are relocations for the JMP instruction. */ BFD_RELOC_MMIX_JMP, BFD_RELOC_MMIX_JMP_1, BFD_RELOC_MMIX_JMP_2, BFD_RELOC_MMIX_JMP_3, /* This is a relocation for a relative address as in a GETA instruction or a branch. */ BFD_RELOC_MMIX_ADDR19, /* This is a relocation for a relative address as in a JMP instruction. */ BFD_RELOC_MMIX_ADDR27, /* This is a relocation for an instruction field that may be a general register or a value 0..255. */ BFD_RELOC_MMIX_REG_OR_BYTE, /* This is a relocation for an instruction field that may be a general register. */ BFD_RELOC_MMIX_REG, /* This is a relocation for two instruction fields holding a register and an offset, the equivalent of the relocation. */ BFD_RELOC_MMIX_BASE_PLUS_OFFSET, /* This relocation is an assertion that the expression is not allocated as a global register. It does not modify contents. */ BFD_RELOC_MMIX_LOCAL, /* This is a 16 bit reloc for the AVR that stores 8 bit pc relative short offset into 7 bits. */ BFD_RELOC_AVR_7_PCREL, /* This is a 16 bit reloc for the AVR that stores 13 bit pc relative short offset into 12 bits. */ BFD_RELOC_AVR_13_PCREL, /* This is a 16 bit reloc for the AVR that stores 17 bit value (usually program memory address) into 16 bits. */ BFD_RELOC_AVR_16_PM, /* This is a 16 bit reloc for the AVR that stores 8 bit value (usually data memory address) into 8 bit immediate value of LDI insn. */ BFD_RELOC_AVR_LO8_LDI, /* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit of data memory address) into 8 bit immediate value of LDI insn. */ BFD_RELOC_AVR_HI8_LDI, /* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit of program memory address) into 8 bit immediate value of LDI insn. */ BFD_RELOC_AVR_HH8_LDI, /* This is a 16 bit reloc for the AVR that stores negated 8 bit value (usually data memory address) into 8 bit immediate value of SUBI insn. */ BFD_RELOC_AVR_LO8_LDI_NEG, /* This is a 16 bit reloc for the AVR that stores negated 8 bit value (high 8 bit of data memory address) into 8 bit immediate value of SUBI insn. */ BFD_RELOC_AVR_HI8_LDI_NEG, /* This is a 16 bit reloc for the AVR that stores negated 8 bit value (most high 8 bit of program memory address) into 8 bit immediate value of LDI or SUBI insn. */ BFD_RELOC_AVR_HH8_LDI_NEG, /* This is a 16 bit reloc for the AVR that stores 8 bit value (usually command address) into 8 bit immediate value of LDI insn. */ BFD_RELOC_AVR_LO8_LDI_PM, /* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit of command address) into 8 bit immediate value of LDI insn. */ BFD_RELOC_AVR_HI8_LDI_PM, /* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit of command address) into 8 bit immediate value of LDI insn. */ BFD_RELOC_AVR_HH8_LDI_PM, /* This is a 16 bit reloc for the AVR that stores negated 8 bit value (usually command address) into 8 bit immediate value of SUBI insn. */ BFD_RELOC_AVR_LO8_LDI_PM_NEG, /* This is a 16 bit reloc for the AVR that stores negated 8 bit value (high 8 bit of 16 bit command address) into 8 bit immediate value of SUBI insn. */ BFD_RELOC_AVR_HI8_LDI_PM_NEG, /* This is a 16 bit reloc for the AVR that stores negated 8 bit value (high 6 bit of 22 bit command address) into 8 bit immediate value of SUBI insn. */ BFD_RELOC_AVR_HH8_LDI_PM_NEG, /* This is a 32 bit reloc for the AVR that stores 23 bit value into 22 bits. */ BFD_RELOC_AVR_CALL, /* This is a 16 bit reloc for the AVR that stores all needed bits for absolute addressing with ldi with overflow check to linktime */ BFD_RELOC_AVR_LDI, /* This is a 6 bit reloc for the AVR that stores offset for ldd/std instructions */ BFD_RELOC_AVR_6, /* This is a 6 bit reloc for the AVR that stores offset for adiw/sbiw instructions */ BFD_RELOC_AVR_6_ADIW, /* Direct 12 bit. */ BFD_RELOC_390_12, /* 12 bit GOT offset. */ BFD_RELOC_390_GOT12, /* 32 bit PC relative PLT address. */ BFD_RELOC_390_PLT32, /* Copy symbol at runtime. */ BFD_RELOC_390_COPY, /* Create GOT entry. */ BFD_RELOC_390_GLOB_DAT, /* Create PLT entry. */ BFD_RELOC_390_JMP_SLOT, /* Adjust by program base. */ BFD_RELOC_390_RELATIVE, /* 32 bit PC relative offset to GOT. */ BFD_RELOC_390_GOTPC, /* 16 bit GOT offset. */ BFD_RELOC_390_GOT16, /* PC relative 16 bit shifted by 1. */ BFD_RELOC_390_PC16DBL, /* 16 bit PC rel. PLT shifted by 1. */ BFD_RELOC_390_PLT16DBL, /* PC relative 32 bit shifted by 1. */ BFD_RELOC_390_PC32DBL, /* 32 bit PC rel. PLT shifted by 1. */ BFD_RELOC_390_PLT32DBL, /* 32 bit PC rel. GOT shifted by 1. */ BFD_RELOC_390_GOTPCDBL, /* 64 bit GOT offset. */ BFD_RELOC_390_GOT64, /* 64 bit PC relative PLT address. */ BFD_RELOC_390_PLT64, /* 32 bit rel. offset to GOT entry. */ BFD_RELOC_390_GOTENT, /* 64 bit offset to GOT. */ BFD_RELOC_390_GOTOFF64, /* 12-bit offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_390_GOTPLT12, /* 16-bit offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_390_GOTPLT16, /* 32-bit offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_390_GOTPLT32, /* 64-bit offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_390_GOTPLT64, /* 32-bit rel. offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_390_GOTPLTENT, /* 16-bit rel. offset from the GOT to a PLT entry. */ BFD_RELOC_390_PLTOFF16, /* 32-bit rel. offset from the GOT to a PLT entry. */ BFD_RELOC_390_PLTOFF32, /* 64-bit rel. offset from the GOT to a PLT entry. */ BFD_RELOC_390_PLTOFF64, /* s390 tls relocations. */ BFD_RELOC_390_TLS_LOAD, BFD_RELOC_390_TLS_GDCALL, BFD_RELOC_390_TLS_LDCALL, BFD_RELOC_390_TLS_GD32, BFD_RELOC_390_TLS_GD64, BFD_RELOC_390_TLS_GOTIE12, BFD_RELOC_390_TLS_GOTIE32, BFD_RELOC_390_TLS_GOTIE64, BFD_RELOC_390_TLS_LDM32, BFD_RELOC_390_TLS_LDM64, BFD_RELOC_390_TLS_IE32, BFD_RELOC_390_TLS_IE64, BFD_RELOC_390_TLS_IEENT, BFD_RELOC_390_TLS_LE32, BFD_RELOC_390_TLS_LE64, BFD_RELOC_390_TLS_LDO32, BFD_RELOC_390_TLS_LDO64, BFD_RELOC_390_TLS_DTPMOD, BFD_RELOC_390_TLS_DTPOFF, BFD_RELOC_390_TLS_TPOFF, /* Long displacement extension. */ BFD_RELOC_390_20, BFD_RELOC_390_GOT20, BFD_RELOC_390_GOTPLT20, BFD_RELOC_390_TLS_GOTIE20, /* Scenix IP2K - 9-bit register number / data address */ BFD_RELOC_IP2K_FR9, /* Scenix IP2K - 4-bit register/data bank number */ BFD_RELOC_IP2K_BANK, /* Scenix IP2K - low 13 bits of instruction word address */ BFD_RELOC_IP2K_ADDR16CJP, /* Scenix IP2K - high 3 bits of instruction word address */ BFD_RELOC_IP2K_PAGE3, /* Scenix IP2K - ext/low/high 8 bits of data address */ BFD_RELOC_IP2K_LO8DATA, BFD_RELOC_IP2K_HI8DATA, BFD_RELOC_IP2K_EX8DATA, /* Scenix IP2K - low/high 8 bits of instruction word address */ BFD_RELOC_IP2K_LO8INSN, BFD_RELOC_IP2K_HI8INSN, /* Scenix IP2K - even/odd PC modifier to modify snb pcl.0 */ BFD_RELOC_IP2K_PC_SKIP, /* Scenix IP2K - 16 bit word address in text section. */ BFD_RELOC_IP2K_TEXT, /* Scenix IP2K - 7-bit sp or dp offset */ BFD_RELOC_IP2K_FR_OFFSET, /* Scenix VPE4K coprocessor - data/insn-space addressing */ BFD_RELOC_VPE4KMATH_DATA, BFD_RELOC_VPE4KMATH_INSN, /* These two relocations are used by the linker to determine which of the entries in a C++ virtual function table are actually used. When the --gc-sections option is given, the linker will zero out the entries that are not used, so that the code for those functions need not be included in the output. VTABLE_INHERIT is a zero-space relocation used to describe to the linker the inheritance tree of a C++ virtual function table. The relocation's symbol should be the parent class' vtable, and the relocation should be located at the child vtable. VTABLE_ENTRY is a zero-space relocation that describes the use of a virtual function table entry. The reloc's symbol should refer to the table of the class mentioned in the code. Off of that base, an offset describes the entry that is being used. For Rela hosts, this offset is stored in the reloc's addend. For Rel hosts, we are forced to put this offset in the reloc's section offset. */ BFD_RELOC_VTABLE_INHERIT, BFD_RELOC_VTABLE_ENTRY, /* Intel IA64 Relocations. */ BFD_RELOC_IA64_IMM14, BFD_RELOC_IA64_IMM22, BFD_RELOC_IA64_IMM64, BFD_RELOC_IA64_DIR32MSB, BFD_RELOC_IA64_DIR32LSB, BFD_RELOC_IA64_DIR64MSB, BFD_RELOC_IA64_DIR64LSB, BFD_RELOC_IA64_GPREL22, BFD_RELOC_IA64_GPREL64I, BFD_RELOC_IA64_GPREL32MSB, BFD_RELOC_IA64_GPREL32LSB, BFD_RELOC_IA64_GPREL64MSB, BFD_RELOC_IA64_GPREL64LSB, BFD_RELOC_IA64_LTOFF22, BFD_RELOC_IA64_LTOFF64I, BFD_RELOC_IA64_PLTOFF22, BFD_RELOC_IA64_PLTOFF64I, BFD_RELOC_IA64_PLTOFF64MSB, BFD_RELOC_IA64_PLTOFF64LSB, BFD_RELOC_IA64_FPTR64I, BFD_RELOC_IA64_FPTR32MSB, BFD_RELOC_IA64_FPTR32LSB, BFD_RELOC_IA64_FPTR64MSB, BFD_RELOC_IA64_FPTR64LSB, BFD_RELOC_IA64_PCREL21B, BFD_RELOC_IA64_PCREL21BI, BFD_RELOC_IA64_PCREL21M, BFD_RELOC_IA64_PCREL21F, BFD_RELOC_IA64_PCREL22, BFD_RELOC_IA64_PCREL60B, BFD_RELOC_IA64_PCREL64I, BFD_RELOC_IA64_PCREL32MSB, BFD_RELOC_IA64_PCREL32LSB, BFD_RELOC_IA64_PCREL64MSB, BFD_RELOC_IA64_PCREL64LSB, BFD_RELOC_IA64_LTOFF_FPTR22, BFD_RELOC_IA64_LTOFF_FPTR64I, BFD_RELOC_IA64_LTOFF_FPTR32MSB, BFD_RELOC_IA64_LTOFF_FPTR32LSB, BFD_RELOC_IA64_LTOFF_FPTR64MSB, BFD_RELOC_IA64_LTOFF_FPTR64LSB, BFD_RELOC_IA64_SEGREL32MSB, BFD_RELOC_IA64_SEGREL32LSB, BFD_RELOC_IA64_SEGREL64MSB, BFD_RELOC_IA64_SEGREL64LSB, BFD_RELOC_IA64_SECREL32MSB, BFD_RELOC_IA64_SECREL32LSB, BFD_RELOC_IA64_SECREL64MSB, BFD_RELOC_IA64_SECREL64LSB, BFD_RELOC_IA64_REL32MSB, BFD_RELOC_IA64_REL32LSB, BFD_RELOC_IA64_REL64MSB, BFD_RELOC_IA64_REL64LSB, BFD_RELOC_IA64_LTV32MSB, BFD_RELOC_IA64_LTV32LSB, BFD_RELOC_IA64_LTV64MSB, BFD_RELOC_IA64_LTV64LSB, BFD_RELOC_IA64_IPLTMSB, BFD_RELOC_IA64_IPLTLSB, BFD_RELOC_IA64_COPY, BFD_RELOC_IA64_LTOFF22X, BFD_RELOC_IA64_LDXMOV, BFD_RELOC_IA64_TPREL14, BFD_RELOC_IA64_TPREL22, BFD_RELOC_IA64_TPREL64I, BFD_RELOC_IA64_TPREL64MSB, BFD_RELOC_IA64_TPREL64LSB, BFD_RELOC_IA64_LTOFF_TPREL22, BFD_RELOC_IA64_DTPMOD64MSB, BFD_RELOC_IA64_DTPMOD64LSB, BFD_RELOC_IA64_LTOFF_DTPMOD22, BFD_RELOC_IA64_DTPREL14, BFD_RELOC_IA64_DTPREL22, BFD_RELOC_IA64_DTPREL64I, BFD_RELOC_IA64_DTPREL32MSB, BFD_RELOC_IA64_DTPREL32LSB, BFD_RELOC_IA64_DTPREL64MSB, BFD_RELOC_IA64_DTPREL64LSB, BFD_RELOC_IA64_LTOFF_DTPREL22, /* Motorola 68HC11 reloc. This is the 8 bit high part of an absolute address. */ BFD_RELOC_M68HC11_HI8, /* Motorola 68HC11 reloc. This is the 8 bit low part of an absolute address. */ BFD_RELOC_M68HC11_LO8, /* Motorola 68HC11 reloc. This is the 3 bit of a value. */ BFD_RELOC_M68HC11_3B, /* Motorola 68HC11 reloc. This reloc marks the beginning of a jump/call instruction. It is used for linker relaxation to correctly identify beginning of instruction and change some branches to use PC-relative addressing mode. */ BFD_RELOC_M68HC11_RL_JUMP, /* Motorola 68HC11 reloc. This reloc marks a group of several instructions that gcc generates and for which the linker relaxation pass can modify and/or remove some of them. */ BFD_RELOC_M68HC11_RL_GROUP, /* Motorola 68HC11 reloc. This is the 16-bit lower part of an address. It is used for 'call' instruction to specify the symbol address without any special transformation (due to memory bank window). */ BFD_RELOC_M68HC11_LO16, /* Motorola 68HC11 reloc. This is a 8-bit reloc that specifies the page number of an address. It is used by 'call' instruction to specify the page number of the symbol. */ BFD_RELOC_M68HC11_PAGE, /* Motorola 68HC11 reloc. This is a 24-bit reloc that represents the address with a 16-bit value and a 8-bit page number. The symbol address is transformed to follow the 16K memory bank of 68HC12 (seen as mapped in the window). */ BFD_RELOC_M68HC11_24, /* Motorola 68HC12 reloc. This is the 5 bits of a value. */ BFD_RELOC_M68HC12_5B, /* NS CR16C Relocations. */ BFD_RELOC_16C_NUM08, BFD_RELOC_16C_NUM08_C, BFD_RELOC_16C_NUM16, BFD_RELOC_16C_NUM16_C, BFD_RELOC_16C_NUM32, BFD_RELOC_16C_NUM32_C, BFD_RELOC_16C_DISP04, BFD_RELOC_16C_DISP04_C, BFD_RELOC_16C_DISP08, BFD_RELOC_16C_DISP08_C, BFD_RELOC_16C_DISP16, BFD_RELOC_16C_DISP16_C, BFD_RELOC_16C_DISP24, BFD_RELOC_16C_DISP24_C, BFD_RELOC_16C_DISP24a, BFD_RELOC_16C_DISP24a_C, BFD_RELOC_16C_REG04, BFD_RELOC_16C_REG04_C, BFD_RELOC_16C_REG04a, BFD_RELOC_16C_REG04a_C, BFD_RELOC_16C_REG14, BFD_RELOC_16C_REG14_C, BFD_RELOC_16C_REG16, BFD_RELOC_16C_REG16_C, BFD_RELOC_16C_REG20, BFD_RELOC_16C_REG20_C, BFD_RELOC_16C_ABS20, BFD_RELOC_16C_ABS20_C, BFD_RELOC_16C_ABS24, BFD_RELOC_16C_ABS24_C, BFD_RELOC_16C_IMM04, BFD_RELOC_16C_IMM04_C, BFD_RELOC_16C_IMM16, BFD_RELOC_16C_IMM16_C, BFD_RELOC_16C_IMM20, BFD_RELOC_16C_IMM20_C, BFD_RELOC_16C_IMM24, BFD_RELOC_16C_IMM24_C, BFD_RELOC_16C_IMM32, BFD_RELOC_16C_IMM32_C, /* NS CRX Relocations. */ BFD_RELOC_CRX_REL4, BFD_RELOC_CRX_REL8, BFD_RELOC_CRX_REL8_CMP, BFD_RELOC_CRX_REL16, BFD_RELOC_CRX_REL24, BFD_RELOC_CRX_REL32, BFD_RELOC_CRX_REGREL12, BFD_RELOC_CRX_REGREL22, BFD_RELOC_CRX_REGREL28, BFD_RELOC_CRX_REGREL32, BFD_RELOC_CRX_ABS16, BFD_RELOC_CRX_ABS32, BFD_RELOC_CRX_NUM8, BFD_RELOC_CRX_NUM16, BFD_RELOC_CRX_NUM32, BFD_RELOC_CRX_IMM16, BFD_RELOC_CRX_IMM32, BFD_RELOC_CRX_SWITCH8, BFD_RELOC_CRX_SWITCH16, BFD_RELOC_CRX_SWITCH32, /* These relocs are only used within the CRIS assembler. They are not (at present) written to any object files. */ BFD_RELOC_CRIS_BDISP8, BFD_RELOC_CRIS_UNSIGNED_5, BFD_RELOC_CRIS_SIGNED_6, BFD_RELOC_CRIS_UNSIGNED_6, BFD_RELOC_CRIS_SIGNED_8, BFD_RELOC_CRIS_UNSIGNED_8, BFD_RELOC_CRIS_SIGNED_16, BFD_RELOC_CRIS_UNSIGNED_16, BFD_RELOC_CRIS_LAPCQ_OFFSET, BFD_RELOC_CRIS_UNSIGNED_4, /* Relocs used in ELF shared libraries for CRIS. */ BFD_RELOC_CRIS_COPY, BFD_RELOC_CRIS_GLOB_DAT, BFD_RELOC_CRIS_JUMP_SLOT, BFD_RELOC_CRIS_RELATIVE, /* 32-bit offset to symbol-entry within GOT. */ BFD_RELOC_CRIS_32_GOT, /* 16-bit offset to symbol-entry within GOT. */ BFD_RELOC_CRIS_16_GOT, /* 32-bit offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_CRIS_32_GOTPLT, /* 16-bit offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_CRIS_16_GOTPLT, /* 32-bit offset to symbol, relative to GOT. */ BFD_RELOC_CRIS_32_GOTREL, /* 32-bit offset to symbol with PLT entry, relative to GOT. */ BFD_RELOC_CRIS_32_PLT_GOTREL, /* 32-bit offset to symbol with PLT entry, relative to this relocation. */ BFD_RELOC_CRIS_32_PLT_PCREL, /* Intel i860 Relocations. */ BFD_RELOC_860_COPY, BFD_RELOC_860_GLOB_DAT, BFD_RELOC_860_JUMP_SLOT, BFD_RELOC_860_RELATIVE, BFD_RELOC_860_PC26, BFD_RELOC_860_PLT26, BFD_RELOC_860_PC16, BFD_RELOC_860_LOW0, BFD_RELOC_860_SPLIT0, BFD_RELOC_860_LOW1, BFD_RELOC_860_SPLIT1, BFD_RELOC_860_LOW2, BFD_RELOC_860_SPLIT2, BFD_RELOC_860_LOW3, BFD_RELOC_860_LOGOT0, BFD_RELOC_860_SPGOT0, BFD_RELOC_860_LOGOT1, BFD_RELOC_860_SPGOT1, BFD_RELOC_860_LOGOTOFF0, BFD_RELOC_860_SPGOTOFF0, BFD_RELOC_860_LOGOTOFF1, BFD_RELOC_860_SPGOTOFF1, BFD_RELOC_860_LOGOTOFF2, BFD_RELOC_860_LOGOTOFF3, BFD_RELOC_860_LOPC, BFD_RELOC_860_HIGHADJ, BFD_RELOC_860_HAGOT, BFD_RELOC_860_HAGOTOFF, BFD_RELOC_860_HAPC, BFD_RELOC_860_HIGH, BFD_RELOC_860_HIGOT, BFD_RELOC_860_HIGOTOFF, /* OpenRISC Relocations. */ BFD_RELOC_OPENRISC_ABS_26, BFD_RELOC_OPENRISC_REL_26, /* H8 elf Relocations. */ BFD_RELOC_H8_DIR16A8, BFD_RELOC_H8_DIR16R8, BFD_RELOC_H8_DIR24A8, BFD_RELOC_H8_DIR24R8, BFD_RELOC_H8_DIR32A16, /* Sony Xstormy16 Relocations. */ BFD_RELOC_XSTORMY16_REL_12, BFD_RELOC_XSTORMY16_12, BFD_RELOC_XSTORMY16_24, BFD_RELOC_XSTORMY16_FPTR16, /* Relocations used by VAX ELF. */ BFD_RELOC_VAX_GLOB_DAT, BFD_RELOC_VAX_JMP_SLOT, BFD_RELOC_VAX_RELATIVE, /* Morpho MS1 - 16 bit immediate relocation. */ BFD_RELOC_MS1_PC16, /* Morpho MS1 - Hi 16 bits of an address. */ BFD_RELOC_MS1_HI16, /* Morpho MS1 - Low 16 bits of an address. */ BFD_RELOC_MS1_LO16, /* Morpho MS1 - Used to tell the linker which vtable entries are used. */ BFD_RELOC_MS1_GNU_VTINHERIT, /* Morpho MS1 - Used to tell the linker which vtable entries are used. */ BFD_RELOC_MS1_GNU_VTENTRY, /* msp430 specific relocation codes */ BFD_RELOC_MSP430_10_PCREL, BFD_RELOC_MSP430_16_PCREL, BFD_RELOC_MSP430_16, BFD_RELOC_MSP430_16_PCREL_BYTE, BFD_RELOC_MSP430_16_BYTE, BFD_RELOC_MSP430_2X_PCREL, BFD_RELOC_MSP430_RL_PCREL, /* IQ2000 Relocations. */ BFD_RELOC_IQ2000_OFFSET_16, BFD_RELOC_IQ2000_OFFSET_21, BFD_RELOC_IQ2000_UHI16, /* Special Xtensa relocation used only by PLT entries in ELF shared objects to indicate that the runtime linker should set the value to one of its own internal functions or data structures. */ BFD_RELOC_XTENSA_RTLD, /* Xtensa relocations for ELF shared objects. */ BFD_RELOC_XTENSA_GLOB_DAT, BFD_RELOC_XTENSA_JMP_SLOT, BFD_RELOC_XTENSA_RELATIVE, /* Xtensa relocation used in ELF object files for symbols that may require PLT entries. Otherwise, this is just a generic 32-bit relocation. */ BFD_RELOC_XTENSA_PLT, /* Xtensa relocations to mark the difference of two local symbols. These are only needed to support linker relaxation and can be ignored when not relaxing. The field is set to the value of the difference assuming no relaxation. The relocation encodes the position of the first symbol so the linker can determine whether to adjust the field value. */ BFD_RELOC_XTENSA_DIFF8, BFD_RELOC_XTENSA_DIFF16, BFD_RELOC_XTENSA_DIFF32, /* Generic Xtensa relocations for instruction operands. Only the slot number is encoded in the relocation. The relocation applies to the last PC-relative immediate operand, or if there are no PC-relative immediates, to the last immediate operand. */ BFD_RELOC_XTENSA_SLOT0_OP, BFD_RELOC_XTENSA_SLOT1_OP, BFD_RELOC_XTENSA_SLOT2_OP, BFD_RELOC_XTENSA_SLOT3_OP, BFD_RELOC_XTENSA_SLOT4_OP, BFD_RELOC_XTENSA_SLOT5_OP, BFD_RELOC_XTENSA_SLOT6_OP, BFD_RELOC_XTENSA_SLOT7_OP, BFD_RELOC_XTENSA_SLOT8_OP, BFD_RELOC_XTENSA_SLOT9_OP, BFD_RELOC_XTENSA_SLOT10_OP, BFD_RELOC_XTENSA_SLOT11_OP, BFD_RELOC_XTENSA_SLOT12_OP, BFD_RELOC_XTENSA_SLOT13_OP, BFD_RELOC_XTENSA_SLOT14_OP, /* Alternate Xtensa relocations. Only the slot is encoded in the relocation. The meaning of these relocations is opcode-specific. */ BFD_RELOC_XTENSA_SLOT0_ALT, BFD_RELOC_XTENSA_SLOT1_ALT, BFD_RELOC_XTENSA_SLOT2_ALT, BFD_RELOC_XTENSA_SLOT3_ALT, BFD_RELOC_XTENSA_SLOT4_ALT, BFD_RELOC_XTENSA_SLOT5_ALT, BFD_RELOC_XTENSA_SLOT6_ALT, BFD_RELOC_XTENSA_SLOT7_ALT, BFD_RELOC_XTENSA_SLOT8_ALT, BFD_RELOC_XTENSA_SLOT9_ALT, BFD_RELOC_XTENSA_SLOT10_ALT, BFD_RELOC_XTENSA_SLOT11_ALT, BFD_RELOC_XTENSA_SLOT12_ALT, BFD_RELOC_XTENSA_SLOT13_ALT, BFD_RELOC_XTENSA_SLOT14_ALT, /* Xtensa relocations for backward compatibility. These have all been replaced by BFD_RELOC_XTENSA_SLOT0_OP. */ BFD_RELOC_XTENSA_OP0, BFD_RELOC_XTENSA_OP1, BFD_RELOC_XTENSA_OP2, /* Xtensa relocation to mark that the assembler expanded the instructions from an original target. The expansion size is encoded in the reloc size. */ BFD_RELOC_XTENSA_ASM_EXPAND, /* Xtensa relocation to mark that the linker should simplify assembler-expanded instructions. This is commonly used internally by the linker after analysis of a BFD_RELOC_XTENSA_ASM_EXPAND. */ BFD_RELOC_XTENSA_ASM_SIMPLIFY, BFD_RELOC_UNUSED }; typedef enum bfd_reloc_code_real bfd_reloc_code_real_type; reloc_howto_type *bfd_reloc_type_lookup (bfd *abfd, bfd_reloc_code_real_type code); const char *bfd_get_reloc_code_name (bfd_reloc_code_real_type code); /* Extracted from syms.c. */ typedef struct bfd_symbol { /* A pointer to the BFD which owns the symbol. This information is necessary so that a back end can work out what additional information (invisible to the application writer) is carried with the symbol. This field is *almost* redundant, since you can use section->owner instead, except that some symbols point to the global sections bfd_{abs,com,und}_section. This could be fixed by making these globals be per-bfd (or per-target-flavor). FIXME. */ struct bfd *the_bfd; /* Use bfd_asymbol_bfd(sym) to access this field. */ /* The text of the symbol. The name is left alone, and not copied; the application may not alter it. */ const char *name; /* The value of the symbol. This really should be a union of a numeric value with a pointer, since some flags indicate that a pointer to another symbol is stored here. */ symvalue value; /* Attributes of a symbol. */ #define BSF_NO_FLAGS 0x00 /* The symbol has local scope; <> in <>. The value is the offset into the section of the data. */ #define BSF_LOCAL 0x01 /* The symbol has global scope; initialized data in <>. The value is the offset into the section of the data. */ #define BSF_GLOBAL 0x02 /* The symbol has global scope and is exported. The value is the offset into the section of the data. */ #define BSF_EXPORT BSF_GLOBAL /* No real difference. */ /* A normal C symbol would be one of: <>, <>, <> or <>. */ /* The symbol is a debugging record. The value has an arbitrary meaning, unless BSF_DEBUGGING_RELOC is also set. */ #define BSF_DEBUGGING 0x08 /* The symbol denotes a function entry point. Used in ELF, perhaps others someday. */ #define BSF_FUNCTION 0x10 /* Used by the linker. */ #define BSF_KEEP 0x20 #define BSF_KEEP_G 0x40 /* A weak global symbol, overridable without warnings by a regular global symbol of the same name. */ #define BSF_WEAK 0x80 /* This symbol was created to point to a section, e.g. ELF's STT_SECTION symbols. */ #define BSF_SECTION_SYM 0x100 /* The symbol used to be a common symbol, but now it is allocated. */ #define BSF_OLD_COMMON 0x200 /* The default value for common data. */ #define BFD_FORT_COMM_DEFAULT_VALUE 0 /* In some files the type of a symbol sometimes alters its location in an output file - ie in coff a <> symbol which is also <> symbol appears where it was declared and not at the end of a section. This bit is set by the target BFD part to convey this information. */ #define BSF_NOT_AT_END 0x400 /* Signal that the symbol is the label of constructor section. */ #define BSF_CONSTRUCTOR 0x800 /* Signal that the symbol is a warning symbol. The name is a warning. The name of the next symbol is the one to warn about; if a reference is made to a symbol with the same name as the next symbol, a warning is issued by the linker. */ #define BSF_WARNING 0x1000 /* Signal that the symbol is indirect. This symbol is an indirect pointer to the symbol with the same name as the next symbol. */ #define BSF_INDIRECT 0x2000 /* BSF_FILE marks symbols that contain a file name. This is used for ELF STT_FILE symbols. */ #define BSF_FILE 0x4000 /* Symbol is from dynamic linking information. */ #define BSF_DYNAMIC 0x8000 /* The symbol denotes a data object. Used in ELF, and perhaps others someday. */ #define BSF_OBJECT 0x10000 /* This symbol is a debugging symbol. The value is the offset into the section of the data. BSF_DEBUGGING should be set as well. */ #define BSF_DEBUGGING_RELOC 0x20000 /* This symbol is thread local. Used in ELF. */ #define BSF_THREAD_LOCAL 0x40000 flagword flags; /* A pointer to the section to which this symbol is relative. This will always be non NULL, there are special sections for undefined and absolute symbols. */ struct bfd_section *section; /* Back end special data. */ union { void *p; bfd_vma i; } udata; } asymbol; #define bfd_get_symtab_upper_bound(abfd) \ BFD_SEND (abfd, _bfd_get_symtab_upper_bound, (abfd)) bfd_boolean bfd_is_local_label (bfd *abfd, asymbol *sym); bfd_boolean bfd_is_local_label_name (bfd *abfd, const char *name); #define bfd_is_local_label_name(abfd, name) \ BFD_SEND (abfd, _bfd_is_local_label_name, (abfd, name)) bfd_boolean bfd_is_target_special_symbol (bfd *abfd, asymbol *sym); #define bfd_is_target_special_symbol(abfd, sym) \ BFD_SEND (abfd, _bfd_is_target_special_symbol, (abfd, sym)) #define bfd_canonicalize_symtab(abfd, location) \ BFD_SEND (abfd, _bfd_canonicalize_symtab, (abfd, location)) bfd_boolean bfd_set_symtab (bfd *abfd, asymbol **location, unsigned int count); void bfd_print_symbol_vandf (bfd *abfd, void *file, asymbol *symbol); #define bfd_make_empty_symbol(abfd) \ BFD_SEND (abfd, _bfd_make_empty_symbol, (abfd)) asymbol *_bfd_generic_make_empty_symbol (bfd *); #define bfd_make_debug_symbol(abfd,ptr,size) \ BFD_SEND (abfd, _bfd_make_debug_symbol, (abfd, ptr, size)) int bfd_decode_symclass (asymbol *symbol); bfd_boolean bfd_is_undefined_symclass (int symclass); void bfd_symbol_info (asymbol *symbol, symbol_info *ret); bfd_boolean bfd_copy_private_symbol_data (bfd *ibfd, asymbol *isym, bfd *obfd, asymbol *osym); #define bfd_copy_private_symbol_data(ibfd, isymbol, obfd, osymbol) \ BFD_SEND (obfd, _bfd_copy_private_symbol_data, \ (ibfd, isymbol, obfd, osymbol)) /* Extracted from bfd.c. */ struct bfd { /* A unique identifier of the BFD */ unsigned int id; /* The filename the application opened the BFD with. */ const char *filename; /* A pointer to the target jump table. */ const struct bfd_target *xvec; /* The IOSTREAM, and corresponding IO vector that provide access to the file backing the BFD. */ void *iostream; const struct bfd_iovec *iovec; /* Is the file descriptor being cached? That is, can it be closed as needed, and re-opened when accessed later? */ bfd_boolean cacheable; /* Marks whether there was a default target specified when the BFD was opened. This is used to select which matching algorithm to use to choose the back end. */ bfd_boolean target_defaulted; /* The caching routines use these to maintain a least-recently-used list of BFDs. */ struct bfd *lru_prev, *lru_next; /* When a file is closed by the caching routines, BFD retains state information on the file here... */ ufile_ptr where; /* ... and here: (``once'' means at least once). */ bfd_boolean opened_once; /* Set if we have a locally maintained mtime value, rather than getting it from the file each time. */ bfd_boolean mtime_set; /* File modified time, if mtime_set is TRUE. */ long mtime; /* Reserved for an unimplemented file locking extension. */ int ifd; /* The format which belongs to the BFD. (object, core, etc.) */ bfd_format format; /* The direction with which the BFD was opened. */ enum bfd_direction { no_direction = 0, read_direction = 1, write_direction = 2, both_direction = 3 } direction; /* Format_specific flags. */ flagword flags; /* Currently my_archive is tested before adding origin to anything. I believe that this can become always an add of origin, with origin set to 0 for non archive files. */ ufile_ptr origin; /* Remember when output has begun, to stop strange things from happening. */ bfd_boolean output_has_begun; /* A hash table for section names. */ struct bfd_hash_table section_htab; /* Pointer to linked list of sections. */ struct bfd_section *sections; /* The last section on the section list. */ struct bfd_section *section_last; /* The number of sections. */ unsigned int section_count; /* Stuff only useful for object files: The start address. */ bfd_vma start_address; /* Used for input and output. */ unsigned int symcount; /* Symbol table for output BFD (with symcount entries). */ struct bfd_symbol **outsymbols; /* Used for slurped dynamic symbol tables. */ unsigned int dynsymcount; /* Pointer to structure which contains architecture information. */ const struct bfd_arch_info *arch_info; /* Flag set if symbols from this BFD should not be exported. */ bfd_boolean no_export; /* Stuff only useful for archives. */ void *arelt_data; struct bfd *my_archive; /* The containing archive BFD. */ struct bfd *next; /* The next BFD in the archive. */ struct bfd *archive_head; /* The first BFD in the archive. */ bfd_boolean has_armap; /* A chain of BFD structures involved in a link. */ struct bfd *link_next; /* A field used by _bfd_generic_link_add_archive_symbols. This will be used only for archive elements. */ int archive_pass; /* Used by the back end to hold private data. */ union { struct aout_data_struct *aout_data; struct artdata *aout_ar_data; struct _oasys_data *oasys_obj_data; struct _oasys_ar_data *oasys_ar_data; struct coff_tdata *coff_obj_data; struct pe_tdata *pe_obj_data; struct xcoff_tdata *xcoff_obj_data; struct ecoff_tdata *ecoff_obj_data; struct ieee_data_struct *ieee_data; struct ieee_ar_data_struct *ieee_ar_data; struct srec_data_struct *srec_data; struct ihex_data_struct *ihex_data; struct tekhex_data_struct *tekhex_data; struct elf_obj_tdata *elf_obj_data; struct nlm_obj_tdata *nlm_obj_data; struct bout_data_struct *bout_data; struct mmo_data_struct *mmo_data; struct sun_core_struct *sun_core_data; struct sco5_core_struct *sco5_core_data; struct trad_core_struct *trad_core_data; struct som_data_struct *som_data; struct hpux_core_struct *hpux_core_data; struct hppabsd_core_struct *hppabsd_core_data; struct sgi_core_struct *sgi_core_data; struct lynx_core_struct *lynx_core_data; struct osf_core_struct *osf_core_data; struct cisco_core_struct *cisco_core_data; struct versados_data_struct *versados_data; struct netbsd_core_struct *netbsd_core_data; struct mach_o_data_struct *mach_o_data; struct mach_o_fat_data_struct *mach_o_fat_data; struct bfd_pef_data_struct *pef_data; struct bfd_pef_xlib_data_struct *pef_xlib_data; struct bfd_sym_data_struct *sym_data; void *any; } tdata; /* Used by the application to hold private data. */ void *usrdata; /* Where all the allocated stuff under this BFD goes. This is a struct objalloc *, but we use void * to avoid requiring the inclusion of objalloc.h. */ void *memory; }; typedef enum bfd_error { bfd_error_no_error = 0, bfd_error_system_call, bfd_error_invalid_target, bfd_error_wrong_format, bfd_error_wrong_object_format, bfd_error_invalid_operation, bfd_error_no_memory, bfd_error_no_symbols, bfd_error_no_armap, bfd_error_no_more_archived_files, bfd_error_malformed_archive, bfd_error_file_not_recognized, bfd_error_file_ambiguously_recognized, bfd_error_no_contents, bfd_error_nonrepresentable_section, bfd_error_no_debug_section, bfd_error_bad_value, bfd_error_file_truncated, bfd_error_file_too_big, bfd_error_invalid_error_code } bfd_error_type; bfd_error_type bfd_get_error (void); void bfd_set_error (bfd_error_type error_tag); const char *bfd_errmsg (bfd_error_type error_tag); void bfd_perror (const char *message); typedef void (*bfd_error_handler_type) (const char *, ...); bfd_error_handler_type bfd_set_error_handler (bfd_error_handler_type); void bfd_set_error_program_name (const char *); bfd_error_handler_type bfd_get_error_handler (void); long bfd_get_reloc_upper_bound (bfd *abfd, asection *sect); long bfd_canonicalize_reloc (bfd *abfd, asection *sec, arelent **loc, asymbol **syms); void bfd_set_reloc (bfd *abfd, asection *sec, arelent **rel, unsigned int count); bfd_boolean bfd_set_file_flags (bfd *abfd, flagword flags); int bfd_get_arch_size (bfd *abfd); int bfd_get_sign_extend_vma (bfd *abfd); bfd_boolean bfd_set_start_address (bfd *abfd, bfd_vma vma); unsigned int bfd_get_gp_size (bfd *abfd); void bfd_set_gp_size (bfd *abfd, unsigned int i); bfd_vma bfd_scan_vma (const char *string, const char **end, int base); bfd_boolean bfd_copy_private_header_data (bfd *ibfd, bfd *obfd); #define bfd_copy_private_header_data(ibfd, obfd) \ BFD_SEND (obfd, _bfd_copy_private_header_data, \ (ibfd, obfd)) bfd_boolean bfd_copy_private_bfd_data (bfd *ibfd, bfd *obfd); #define bfd_copy_private_bfd_data(ibfd, obfd) \ BFD_SEND (obfd, _bfd_copy_private_bfd_data, \ (ibfd, obfd)) bfd_boolean bfd_merge_private_bfd_data (bfd *ibfd, bfd *obfd); #define bfd_merge_private_bfd_data(ibfd, obfd) \ BFD_SEND (obfd, _bfd_merge_private_bfd_data, \ (ibfd, obfd)) bfd_boolean bfd_set_private_flags (bfd *abfd, flagword flags); #define bfd_set_private_flags(abfd, flags) \ BFD_SEND (abfd, _bfd_set_private_flags, (abfd, flags)) #define bfd_sizeof_headers(abfd, reloc) \ BFD_SEND (abfd, _bfd_sizeof_headers, (abfd, reloc)) #define bfd_find_nearest_line(abfd, sec, syms, off, file, func, line) \ BFD_SEND (abfd, _bfd_find_nearest_line, \ (abfd, sec, syms, off, file, func, line)) #define bfd_find_line(abfd, syms, sym, file, line) \ BFD_SEND (abfd, _bfd_find_line, \ (abfd, syms, sym, file, line)) #define bfd_find_inliner_info(abfd, file, func, line) \ BFD_SEND (abfd, _bfd_find_inliner_info, \ (abfd, file, func, line)) #define bfd_debug_info_start(abfd) \ BFD_SEND (abfd, _bfd_debug_info_start, (abfd)) #define bfd_debug_info_end(abfd) \ BFD_SEND (abfd, _bfd_debug_info_end, (abfd)) #define bfd_debug_info_accumulate(abfd, section) \ BFD_SEND (abfd, _bfd_debug_info_accumulate, (abfd, section)) #define bfd_stat_arch_elt(abfd, stat) \ BFD_SEND (abfd, _bfd_stat_arch_elt,(abfd, stat)) #define bfd_update_armap_timestamp(abfd) \ BFD_SEND (abfd, _bfd_update_armap_timestamp, (abfd)) #define bfd_set_arch_mach(abfd, arch, mach)\ BFD_SEND ( abfd, _bfd_set_arch_mach, (abfd, arch, mach)) #define bfd_relax_section(abfd, section, link_info, again) \ BFD_SEND (abfd, _bfd_relax_section, (abfd, section, link_info, again)) #define bfd_gc_sections(abfd, link_info) \ BFD_SEND (abfd, _bfd_gc_sections, (abfd, link_info)) #define bfd_merge_sections(abfd, link_info) \ BFD_SEND (abfd, _bfd_merge_sections, (abfd, link_info)) #define bfd_is_group_section(abfd, sec) \ BFD_SEND (abfd, _bfd_is_group_section, (abfd, sec)) #define bfd_discard_group(abfd, sec) \ BFD_SEND (abfd, _bfd_discard_group, (abfd, sec)) #define bfd_link_hash_table_create(abfd) \ BFD_SEND (abfd, _bfd_link_hash_table_create, (abfd)) #define bfd_link_hash_table_free(abfd, hash) \ BFD_SEND (abfd, _bfd_link_hash_table_free, (hash)) #define bfd_link_add_symbols(abfd, info) \ BFD_SEND (abfd, _bfd_link_add_symbols, (abfd, info)) #define bfd_link_just_syms(abfd, sec, info) \ BFD_SEND (abfd, _bfd_link_just_syms, (sec, info)) #define bfd_final_link(abfd, info) \ BFD_SEND (abfd, _bfd_final_link, (abfd, info)) #define bfd_free_cached_info(abfd) \ BFD_SEND (abfd, _bfd_free_cached_info, (abfd)) #define bfd_get_dynamic_symtab_upper_bound(abfd) \ BFD_SEND (abfd, _bfd_get_dynamic_symtab_upper_bound, (abfd)) #define bfd_print_private_bfd_data(abfd, file)\ BFD_SEND (abfd, _bfd_print_private_bfd_data, (abfd, file)) #define bfd_canonicalize_dynamic_symtab(abfd, asymbols) \ BFD_SEND (abfd, _bfd_canonicalize_dynamic_symtab, (abfd, asymbols)) #define bfd_get_synthetic_symtab(abfd, count, syms, dyncount, dynsyms, ret) \ BFD_SEND (abfd, _bfd_get_synthetic_symtab, (abfd, count, syms, \ dyncount, dynsyms, ret)) #define bfd_get_dynamic_reloc_upper_bound(abfd) \ BFD_SEND (abfd, _bfd_get_dynamic_reloc_upper_bound, (abfd)) #define bfd_canonicalize_dynamic_reloc(abfd, arels, asyms) \ BFD_SEND (abfd, _bfd_canonicalize_dynamic_reloc, (abfd, arels, asyms)) extern bfd_byte *bfd_get_relocated_section_contents (bfd *, struct bfd_link_info *, struct bfd_link_order *, bfd_byte *, bfd_boolean, asymbol **); bfd_boolean bfd_alt_mach_code (bfd *abfd, int alternative); struct bfd_preserve { void *marker; void *tdata; flagword flags; const struct bfd_arch_info *arch_info; struct bfd_section *sections; struct bfd_section *section_last; unsigned int section_count; struct bfd_hash_table section_htab; }; bfd_boolean bfd_preserve_save (bfd *, struct bfd_preserve *); void bfd_preserve_restore (bfd *, struct bfd_preserve *); void bfd_preserve_finish (bfd *, struct bfd_preserve *); /* Extracted from archive.c. */ symindex bfd_get_next_mapent (bfd *abfd, symindex previous, carsym **sym); bfd_boolean bfd_set_archive_head (bfd *output, bfd *new_head); bfd *bfd_openr_next_archived_file (bfd *archive, bfd *previous); /* Extracted from corefile.c. */ const char *bfd_core_file_failing_command (bfd *abfd); int bfd_core_file_failing_signal (bfd *abfd); bfd_boolean core_file_matches_executable_p (bfd *core_bfd, bfd *exec_bfd); /* Extracted from targets.c. */ #define BFD_SEND(bfd, message, arglist) \ ((*((bfd)->xvec->message)) arglist) #ifdef DEBUG_BFD_SEND #undef BFD_SEND #define BFD_SEND(bfd, message, arglist) \ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \ ((*((bfd)->xvec->message)) arglist) : \ (bfd_assert (__FILE__,__LINE__), NULL)) #endif #define BFD_SEND_FMT(bfd, message, arglist) \ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) #ifdef DEBUG_BFD_SEND #undef BFD_SEND_FMT #define BFD_SEND_FMT(bfd, message, arglist) \ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) : \ (bfd_assert (__FILE__,__LINE__), NULL)) #endif enum bfd_flavour { bfd_target_unknown_flavour, bfd_target_aout_flavour, bfd_target_coff_flavour, bfd_target_ecoff_flavour, bfd_target_xcoff_flavour, bfd_target_elf_flavour, bfd_target_ieee_flavour, bfd_target_nlm_flavour, bfd_target_oasys_flavour, bfd_target_tekhex_flavour, bfd_target_srec_flavour, bfd_target_ihex_flavour, bfd_target_som_flavour, bfd_target_os9k_flavour, bfd_target_versados_flavour, bfd_target_msdos_flavour, bfd_target_ovax_flavour, bfd_target_evax_flavour, bfd_target_mmo_flavour, bfd_target_mach_o_flavour, bfd_target_pef_flavour, bfd_target_pef_xlib_flavour, bfd_target_sym_flavour }; enum bfd_endian { BFD_ENDIAN_BIG, BFD_ENDIAN_LITTLE, BFD_ENDIAN_UNKNOWN }; /* Forward declaration. */ typedef struct bfd_link_info _bfd_link_info; typedef struct bfd_target { /* Identifies the kind of target, e.g., SunOS4, Ultrix, etc. */ char *name; /* The "flavour" of a back end is a general indication about the contents of a file. */ enum bfd_flavour flavour; /* The order of bytes within the data area of a file. */ enum bfd_endian byteorder; /* The order of bytes within the header parts of a file. */ enum bfd_endian header_byteorder; /* A mask of all the flags which an executable may have set - from the set <>, <>, ...<>. */ flagword object_flags; /* A mask of all the flags which a section may have set - from the set <>, <>, ...<>. */ flagword section_flags; /* The character normally found at the front of a symbol. (if any), perhaps `_'. */ char symbol_leading_char; /* The pad character for file names within an archive header. */ char ar_pad_char; /* The maximum number of characters in an archive header. */ unsigned short ar_max_namelen; /* Entries for byte swapping for data. These are different from the other entry points, since they don't take a BFD as the first argument. Certain other handlers could do the same. */ bfd_uint64_t (*bfd_getx64) (const void *); bfd_int64_t (*bfd_getx_signed_64) (const void *); void (*bfd_putx64) (bfd_uint64_t, void *); bfd_vma (*bfd_getx32) (const void *); bfd_signed_vma (*bfd_getx_signed_32) (const void *); void (*bfd_putx32) (bfd_vma, void *); bfd_vma (*bfd_getx16) (const void *); bfd_signed_vma (*bfd_getx_signed_16) (const void *); void (*bfd_putx16) (bfd_vma, void *); /* Byte swapping for the headers. */ bfd_uint64_t (*bfd_h_getx64) (const void *); bfd_int64_t (*bfd_h_getx_signed_64) (const void *); void (*bfd_h_putx64) (bfd_uint64_t, void *); bfd_vma (*bfd_h_getx32) (const void *); bfd_signed_vma (*bfd_h_getx_signed_32) (const void *); void (*bfd_h_putx32) (bfd_vma, void *); bfd_vma (*bfd_h_getx16) (const void *); bfd_signed_vma (*bfd_h_getx_signed_16) (const void *); void (*bfd_h_putx16) (bfd_vma, void *); /* Format dependent routines: these are vectors of entry points within the target vector structure, one for each format to check. */ /* Check the format of a file being read. Return a <> or zero. */ const struct bfd_target *(*_bfd_check_format[bfd_type_end]) (bfd *); /* Set the format of a file being written. */ bfd_boolean (*_bfd_set_format[bfd_type_end]) (bfd *); /* Write cached information into a file being written, at <>. */ bfd_boolean (*_bfd_write_contents[bfd_type_end]) (bfd *); /* Generic entry points. */ #define BFD_JUMP_TABLE_GENERIC(NAME) \ NAME##_close_and_cleanup, \ NAME##_bfd_free_cached_info, \ NAME##_new_section_hook, \ NAME##_get_section_contents, \ NAME##_get_section_contents_in_window /* Called when the BFD is being closed to do any necessary cleanup. */ bfd_boolean (*_close_and_cleanup) (bfd *); /* Ask the BFD to free all cached information. */ bfd_boolean (*_bfd_free_cached_info) (bfd *); /* Called when a new section is created. */ bfd_boolean (*_new_section_hook) (bfd *, sec_ptr); /* Read the contents of a section. */ bfd_boolean (*_bfd_get_section_contents) (bfd *, sec_ptr, void *, file_ptr, bfd_size_type); bfd_boolean (*_bfd_get_section_contents_in_window) (bfd *, sec_ptr, bfd_window *, file_ptr, bfd_size_type); /* Entry points to copy private data. */ #define BFD_JUMP_TABLE_COPY(NAME) \ NAME##_bfd_copy_private_bfd_data, \ NAME##_bfd_merge_private_bfd_data, \ NAME##_bfd_copy_private_section_data, \ NAME##_bfd_copy_private_symbol_data, \ NAME##_bfd_copy_private_header_data, \ NAME##_bfd_set_private_flags, \ NAME##_bfd_print_private_bfd_data /* Called to copy BFD general private data from one object file to another. */ bfd_boolean (*_bfd_copy_private_bfd_data) (bfd *, bfd *); /* Called to merge BFD general private data from one object file to a common output file when linking. */ bfd_boolean (*_bfd_merge_private_bfd_data) (bfd *, bfd *); /* Called to copy BFD private section data from one object file to another. */ bfd_boolean (*_bfd_copy_private_section_data) (bfd *, sec_ptr, bfd *, sec_ptr); /* Called to copy BFD private symbol data from one symbol to another. */ bfd_boolean (*_bfd_copy_private_symbol_data) (bfd *, asymbol *, bfd *, asymbol *); /* Called to copy BFD private header data from one object file to another. */ bfd_boolean (*_bfd_copy_private_header_data) (bfd *, bfd *); /* Called to set private backend flags. */ bfd_boolean (*_bfd_set_private_flags) (bfd *, flagword); /* Called to print private BFD data. */ bfd_boolean (*_bfd_print_private_bfd_data) (bfd *, void *); /* Core file entry points. */ #define BFD_JUMP_TABLE_CORE(NAME) \ NAME##_core_file_failing_command, \ NAME##_core_file_failing_signal, \ NAME##_core_file_matches_executable_p char * (*_core_file_failing_command) (bfd *); int (*_core_file_failing_signal) (bfd *); bfd_boolean (*_core_file_matches_executable_p) (bfd *, bfd *); /* Archive entry points. */ #define BFD_JUMP_TABLE_ARCHIVE(NAME) \ NAME##_slurp_armap, \ NAME##_slurp_extended_name_table, \ NAME##_construct_extended_name_table, \ NAME##_truncate_arname, \ NAME##_write_armap, \ NAME##_read_ar_hdr, \ NAME##_openr_next_archived_file, \ NAME##_get_elt_at_index, \ NAME##_generic_stat_arch_elt, \ NAME##_update_armap_timestamp bfd_boolean (*_bfd_slurp_armap) (bfd *); bfd_boolean (*_bfd_slurp_extended_name_table) (bfd *); bfd_boolean (*_bfd_construct_extended_name_table) (bfd *, char **, bfd_size_type *, const char **); void (*_bfd_truncate_arname) (bfd *, const char *, char *); bfd_boolean (*write_armap) (bfd *, unsigned int, struct orl *, unsigned int, int); void * (*_bfd_read_ar_hdr_fn) (bfd *); bfd * (*openr_next_archived_file) (bfd *, bfd *); #define bfd_get_elt_at_index(b,i) BFD_SEND (b, _bfd_get_elt_at_index, (b,i)) bfd * (*_bfd_get_elt_at_index) (bfd *, symindex); int (*_bfd_stat_arch_elt) (bfd *, struct stat *); bfd_boolean (*_bfd_update_armap_timestamp) (bfd *); /* Entry points used for symbols. */ #define BFD_JUMP_TABLE_SYMBOLS(NAME) \ NAME##_get_symtab_upper_bound, \ NAME##_canonicalize_symtab, \ NAME##_make_empty_symbol, \ NAME##_print_symbol, \ NAME##_get_symbol_info, \ NAME##_bfd_is_local_label_name, \ NAME##_bfd_is_target_special_symbol, \ NAME##_get_lineno, \ NAME##_find_nearest_line, \ _bfd_generic_find_line, \ NAME##_find_inliner_info, \ NAME##_bfd_make_debug_symbol, \ NAME##_read_minisymbols, \ NAME##_minisymbol_to_symbol long (*_bfd_get_symtab_upper_bound) (bfd *); long (*_bfd_canonicalize_symtab) (bfd *, struct bfd_symbol **); struct bfd_symbol * (*_bfd_make_empty_symbol) (bfd *); void (*_bfd_print_symbol) (bfd *, void *, struct bfd_symbol *, bfd_print_symbol_type); #define bfd_print_symbol(b,p,s,e) BFD_SEND (b, _bfd_print_symbol, (b,p,s,e)) void (*_bfd_get_symbol_info) (bfd *, struct bfd_symbol *, symbol_info *); #define bfd_get_symbol_info(b,p,e) BFD_SEND (b, _bfd_get_symbol_info, (b,p,e)) bfd_boolean (*_bfd_is_local_label_name) (bfd *, const char *); bfd_boolean (*_bfd_is_target_special_symbol) (bfd *, asymbol *); alent * (*_get_lineno) (bfd *, struct bfd_symbol *); bfd_boolean (*_bfd_find_nearest_line) (bfd *, struct bfd_section *, struct bfd_symbol **, bfd_vma, const char **, const char **, unsigned int *); bfd_boolean (*_bfd_find_line) (bfd *, struct bfd_symbol **, struct bfd_symbol *, const char **, unsigned int *); bfd_boolean (*_bfd_find_inliner_info) (bfd *, const char **, const char **, unsigned int *); /* Back-door to allow format-aware applications to create debug symbols while using BFD for everything else. Currently used by the assembler when creating COFF files. */ asymbol * (*_bfd_make_debug_symbol) (bfd *, void *, unsigned long size); #define bfd_read_minisymbols(b, d, m, s) \ BFD_SEND (b, _read_minisymbols, (b, d, m, s)) long (*_read_minisymbols) (bfd *, bfd_boolean, void **, unsigned int *); #define bfd_minisymbol_to_symbol(b, d, m, f) \ BFD_SEND (b, _minisymbol_to_symbol, (b, d, m, f)) asymbol * (*_minisymbol_to_symbol) (bfd *, bfd_boolean, const void *, asymbol *); /* Routines for relocs. */ #define BFD_JUMP_TABLE_RELOCS(NAME) \ NAME##_get_reloc_upper_bound, \ NAME##_canonicalize_reloc, \ NAME##_bfd_reloc_type_lookup long (*_get_reloc_upper_bound) (bfd *, sec_ptr); long (*_bfd_canonicalize_reloc) (bfd *, sec_ptr, arelent **, struct bfd_symbol **); /* See documentation on reloc types. */ reloc_howto_type * (*reloc_type_lookup) (bfd *, bfd_reloc_code_real_type); /* Routines used when writing an object file. */ #define BFD_JUMP_TABLE_WRITE(NAME) \ NAME##_set_arch_mach, \ NAME##_set_section_contents bfd_boolean (*_bfd_set_arch_mach) (bfd *, enum bfd_architecture, unsigned long); bfd_boolean (*_bfd_set_section_contents) (bfd *, sec_ptr, const void *, file_ptr, bfd_size_type); /* Routines used by the linker. */ #define BFD_JUMP_TABLE_LINK(NAME) \ NAME##_sizeof_headers, \ NAME##_bfd_get_relocated_section_contents, \ NAME##_bfd_relax_section, \ NAME##_bfd_link_hash_table_create, \ NAME##_bfd_link_hash_table_free, \ NAME##_bfd_link_add_symbols, \ NAME##_bfd_link_just_syms, \ NAME##_bfd_final_link, \ NAME##_bfd_link_split_section, \ NAME##_bfd_gc_sections, \ NAME##_bfd_merge_sections, \ NAME##_bfd_is_group_section, \ NAME##_bfd_discard_group, \ NAME##_section_already_linked \ int (*_bfd_sizeof_headers) (bfd *, bfd_boolean); bfd_byte * (*_bfd_get_relocated_section_contents) (bfd *, struct bfd_link_info *, struct bfd_link_order *, bfd_byte *, bfd_boolean, struct bfd_symbol **); bfd_boolean (*_bfd_relax_section) (bfd *, struct bfd_section *, struct bfd_link_info *, bfd_boolean *); /* Create a hash table for the linker. Different backends store different information in this table. */ struct bfd_link_hash_table * (*_bfd_link_hash_table_create) (bfd *); /* Release the memory associated with the linker hash table. */ void (*_bfd_link_hash_table_free) (struct bfd_link_hash_table *); /* Add symbols from this object file into the hash table. */ bfd_boolean (*_bfd_link_add_symbols) (bfd *, struct bfd_link_info *); /* Indicate that we are only retrieving symbol values from this section. */ void (*_bfd_link_just_syms) (asection *, struct bfd_link_info *); /* Do a link based on the link_order structures attached to each section of the BFD. */ bfd_boolean (*_bfd_final_link) (bfd *, struct bfd_link_info *); /* Should this section be split up into smaller pieces during linking. */ bfd_boolean (*_bfd_link_split_section) (bfd *, struct bfd_section *); /* Remove sections that are not referenced from the output. */ bfd_boolean (*_bfd_gc_sections) (bfd *, struct bfd_link_info *); /* Attempt to merge SEC_MERGE sections. */ bfd_boolean (*_bfd_merge_sections) (bfd *, struct bfd_link_info *); /* Is this section a member of a group? */ bfd_boolean (*_bfd_is_group_section) (bfd *, const struct bfd_section *); /* Discard members of a group. */ bfd_boolean (*_bfd_discard_group) (bfd *, struct bfd_section *); /* Check if SEC has been already linked during a reloceatable or final link. */ void (*_section_already_linked) (bfd *, struct bfd_section *); /* Routines to handle dynamic symbols and relocs. */ #define BFD_JUMP_TABLE_DYNAMIC(NAME) \ NAME##_get_dynamic_symtab_upper_bound, \ NAME##_canonicalize_dynamic_symtab, \ NAME##_get_synthetic_symtab, \ NAME##_get_dynamic_reloc_upper_bound, \ NAME##_canonicalize_dynamic_reloc /* Get the amount of memory required to hold the dynamic symbols. */ long (*_bfd_get_dynamic_symtab_upper_bound) (bfd *); /* Read in the dynamic symbols. */ long (*_bfd_canonicalize_dynamic_symtab) (bfd *, struct bfd_symbol **); /* Create synthetized symbols. */ long (*_bfd_get_synthetic_symtab) (bfd *, long, struct bfd_symbol **, long, struct bfd_symbol **, struct bfd_symbol **); /* Get the amount of memory required to hold the dynamic relocs. */ long (*_bfd_get_dynamic_reloc_upper_bound) (bfd *); /* Read in the dynamic relocs. */ long (*_bfd_canonicalize_dynamic_reloc) (bfd *, arelent **, struct bfd_symbol **); /* Opposite endian version of this target. */ const struct bfd_target * alternative_target; /* Data for use by back-end routines, which isn't generic enough to belong in this structure. */ const void *backend_data; } bfd_target; bfd_boolean bfd_set_default_target (const char *name); const bfd_target *bfd_find_target (const char *target_name, bfd *abfd); const char ** bfd_target_list (void); const bfd_target *bfd_search_for_target (int (*search_func) (const bfd_target *, void *), void *); /* Extracted from format.c. */ bfd_boolean bfd_check_format (bfd *abfd, bfd_format format); bfd_boolean bfd_check_format_matches (bfd *abfd, bfd_format format, char ***matching); bfd_boolean bfd_set_format (bfd *abfd, bfd_format format); const char *bfd_format_string (bfd_format format); /* Extracted from linker.c. */ bfd_boolean bfd_link_split_section (bfd *abfd, asection *sec); #define bfd_link_split_section(abfd, sec) \ BFD_SEND (abfd, _bfd_link_split_section, (abfd, sec)) void bfd_section_already_linked (bfd *abfd, asection *sec); #define bfd_section_already_linked(abfd, sec) \ BFD_SEND (abfd, _section_already_linked, (abfd, sec)) /* Extracted from simple.c. */ bfd_byte *bfd_simple_get_relocated_section_contents (bfd *abfd, asection *sec, bfd_byte *outbuf, asymbol **symbol_table); #ifdef __cplusplus } #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/bitops.h000066400000000000000000000270701314037446600243370ustar00rootroot00000000000000#ifndef _I386_BITOPS_H #define _I386_BITOPS_H /* * Copyright 1992, Linus Torvalds. */ #include #include #include /* * These have to be done with inline assembly: that way the bit-setting * is guaranteed to be atomic. All bit operations return 0 if the bit * was cleared before the operation and != 0 if it was not. * * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ #define ADDR (*(volatile long *) addr) /** * set_bit - Atomically set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * This function is atomic and may not be reordered. See __set_bit() * if you do not require the atomic guarantees. * * Note: there are no guarantees that this function will not be reordered * on non x86 architectures, so if you are writting portable code, * make sure not to rely on its reordering guarantees. * * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static inline void set_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( LOCK "btsl %1,%0" :"+m" (ADDR) :"Ir" (nr)); } /** * __set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * Unlike set_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ static inline void __set_bit(int nr, volatile unsigned long * addr) { __asm__( "btsl %1,%0" :"+m" (ADDR) :"Ir" (nr)); } /** * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ static inline void clear_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( LOCK "btrl %1,%0" :"+m" (ADDR) :"Ir" (nr)); } static inline void __clear_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( "btrl %1,%0" :"+m" (ADDR) :"Ir" (nr)); } #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() /** * __change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * * Unlike change_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ static inline void __change_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( "btcl %1,%0" :"+m" (ADDR) :"Ir" (nr)); } /** * change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from * * change_bit() is atomic and may not be reordered. It may be * reordered on other architectures than x86. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static inline void change_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( LOCK "btcl %1,%0" :"+m" (ADDR) :"Ir" (nr)); } /** * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It may be reordered on other architectures than x86. * It also implies a memory barrier. */ static inline int test_and_set_bit(int nr, volatile unsigned long * addr) { int oldbit; __asm__ __volatile__( LOCK "btsl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr) : "memory"); return oldbit; } /** * __test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) { int oldbit; __asm__( "btsl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr)); return oldbit; } /** * test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It can be reorderdered on other architectures other than x86. * It also implies a memory barrier. */ static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) { int oldbit; __asm__ __volatile__( LOCK "btrl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr) : "memory"); return oldbit; } /** * __test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) { int oldbit; __asm__( "btrl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr)); return oldbit; } /* WARNING: non atomic and it can be reordered! */ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) { int oldbit; __asm__ __volatile__( "btcl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr) : "memory"); return oldbit; } /** * test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ static inline int test_and_change_bit(int nr, volatile unsigned long* addr) { int oldbit; __asm__ __volatile__( LOCK "btcl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr) : "memory"); return oldbit; } #if 0 /* Fool kernel-doc since it doesn't do macros yet */ /** * test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ static int test_bit(int nr, const volatile void * addr); #endif static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) { return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; } static inline int variable_test_bit(int nr, const volatile unsigned long * addr) { int oldbit; __asm__ __volatile__( "btl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit) :"m" (ADDR),"Ir" (nr)); return oldbit; } #define test_bit(nr,addr) \ (__builtin_constant_p(nr) ? \ constant_test_bit((nr),(addr)) : \ variable_test_bit((nr),(addr))) #undef ADDR /** * find_first_zero_bit - find the first zero bit in a memory region * @addr: The address to start the search at * @size: The maximum size to search * * Returns the bit-number of the first zero bit, not the number of the byte * containing a bit. */ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) { int d0, d1, d2; int res; if (!size) return 0; /* This looks at memory. Mark it volatile to tell gcc not to move it around */ __asm__ __volatile__( "movl $-1,%%eax\n\t" "xorl %%edx,%%edx\n\t" "repe; scasl\n\t" "je 1f\n\t" "xorl -4(%%edi),%%eax\n\t" "subl $4,%%edi\n\t" "bsfl %%eax,%%edx\n" "1:\tsubl %%ebx,%%edi\n\t" "shll $3,%%edi\n\t" "addl %%edi,%%edx" :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); return res; } /** * find_next_zero_bit - find the first zero bit in a memory region * @addr: The address to base the search on * @offset: The bitnumber to start searching at * @size: The maximum size to search */ int find_next_zero_bit(const unsigned long *addr, int size, int offset); /** * __ffs - find first bit in word. * @word: The word to search * * Undefined if no bit exists, so code should check against 0 first. */ static inline unsigned long __ffs(unsigned long word) { __asm__("bsfl %1,%0" :"=r" (word) :"rm" (word)); return word; } /** * find_first_bit - find the first set bit in a memory region * @addr: The address to start the search at * @size: The maximum size to search * * Returns the bit-number of the first set bit, not the number of the byte * containing a bit. */ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) { unsigned x = 0; while (x < size) { unsigned long val = *addr++; if (val) return __ffs(val) + x; x += (sizeof(*addr)<<3); } return x; } /** * find_next_bit - find the first set bit in a memory region * @addr: The address to base the search on * @offset: The bitnumber to start searching at * @size: The maximum size to search */ int find_next_bit(const unsigned long *addr, int size, int offset); /** * ffz - find first zero in word. * @word: The word to search * * Undefined if no zero exists, so code should check against ~0UL first. */ static inline unsigned long ffz(unsigned long word) { __asm__("bsfl %1,%0" :"=r" (word) :"r" (~word)); return word; } #define fls64(x) generic_fls64(x) #ifdef __KERNEL__ /* * Every architecture must define this function. It's the fastest * way of searching a 140-bit bitmap where the first 100 bits are * unlikely to be set. It's guaranteed that at least one of the 140 * bits is cleared. */ static inline int sched_find_first_bit(const unsigned long *b) { if (unlikely(b[0])) return __ffs(b[0]); if (unlikely(b[1])) return __ffs(b[1]) + 32; if (unlikely(b[2])) return __ffs(b[2]) + 64; if (b[3]) return __ffs(b[3]) + 96; return __ffs(b[4]) + 128; } /** * ffs - find first bit set * @x: the word to search * * This is defined the same way as * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). */ static inline int ffs(int x) { int r; __asm__("bsfl %1,%0\n\t" "jnz 1f\n\t" "movl $-1,%0\n" "1:" : "=r" (r) : "rm" (x)); return r+1; } /** * fls - find last bit set * @x: the word to search * * This is defined the same way as ffs. */ static inline int fls(int x) { int r; __asm__("bsrl %1,%0\n\t" "jnz 1f\n\t" "movl $-1,%0\n" "1:" : "=r" (r) : "rm" (x)); return r+1; } /** * hweightN - returns the hamming weight of a N-bit word * @x: the word to weigh * * The Hamming Weight of a number is the total number of bits set in it. */ #define hweight32(x) generic_hweight32(x) #define hweight16(x) generic_hweight16(x) #define hweight8(x) generic_hweight8(x) #endif /* __KERNEL__ */ #ifdef __KERNEL__ #define ext2_set_bit(nr,addr) \ __test_and_set_bit((nr),(unsigned long*)addr) #define ext2_set_bit_atomic(lock,nr,addr) \ test_and_set_bit((nr),(unsigned long*)addr) #define ext2_clear_bit(nr, addr) \ __test_and_clear_bit((nr),(unsigned long*)addr) #define ext2_clear_bit_atomic(lock,nr, addr) \ test_and_clear_bit((nr),(unsigned long*)addr) #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) #define ext2_find_first_zero_bit(addr, size) \ find_first_zero_bit((unsigned long*)addr, size) #define ext2_find_next_zero_bit(addr, size, off) \ find_next_zero_bit((unsigned long*)addr, size, off) /* Bitmap functions for the minix filesystem. */ #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) #define minix_find_first_zero_bit(addr,size) \ find_first_zero_bit((void*)addr,size) #endif /* __KERNEL__ */ #endif /* _I386_BITOPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/boot.h000066400000000000000000000006311314037446600237740ustar00rootroot00000000000000#ifndef _LINUX_BOOT_H #define _LINUX_BOOT_H /* Don't touch these, unless you really know what you're doing. */ #define DEF_INITSEG 0x9000 #define DEF_SYSSEG 0x1000 #define DEF_SETUPSEG 0x9020 #define DEF_SYSSIZE 0x7F00 /* Internal svga startup constants */ #define NORMAL_VGA 0xffff /* 80x25 mode */ #define EXTENDED_VGA 0xfffe /* 80x50 mode */ #define ASK_VGA 0xfffd /* ask for it at bootup */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/bug.h000066400000000000000000000010361314037446600236060ustar00rootroot00000000000000#ifndef _I386_BUG_H #define _I386_BUG_H #include /* * Tell the user there is some problem. * The offending file and line are encoded after the "officially * undefined" opcode for parsing in the trap handler. */ #ifdef CONFIG_BUG #define HAVE_ARCH_BUG #ifdef CONFIG_DEBUG_BUGVERBOSE #define BUG() \ __asm__ __volatile__( "ud2\n" \ "\t.word %c0\n" \ "\t.long %c1\n" \ : : "i" (__LINE__), "i" (__FILE__)) #else #define BUG() __asm__ __volatile__("ud2\n") #endif #endif #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/bugs.h000066400000000000000000000115621314037446600237760ustar00rootroot00000000000000/* * include/asm-i386/bugs.h * * Copyright (C) 1994 Linus Torvalds * * Cyrix stuff, June 1998 by: * - Rafael R. Reilova (moved everything from head.S), * * - Channing Corn (tests & fixes), * - Andrew D. Balsa (code cleanup). */ /* * This is included by init/main.c to check for architecture-dependent bugs. * * Needs: * void check_bugs(void); */ #include #include #include #include #include static int __init no_halt(char *s) { boot_cpu_data.hlt_works_ok = 0; return 1; } __setup("no-hlt", no_halt); static int __init mca_pentium(char *s) { mca_pentium_flag = 1; return 1; } __setup("mca-pentium", mca_pentium); static int __init no_387(char *s) { boot_cpu_data.hard_math = 0; write_cr0(0xE | read_cr0()); return 1; } __setup("no387", no_387); static double __initdata x = 4195835.0; static double __initdata y = 3145727.0; /* * This used to check for exceptions.. * However, it turns out that to support that, * the XMM trap handlers basically had to * be buggy. So let's have a correct XMM trap * handler, and forget about printing out * some status at boot. * * We should really only care about bugs here * anyway. Not features. */ static void __init check_fpu(void) { if (!boot_cpu_data.hard_math) { #ifndef CONFIG_MATH_EMULATION printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); printk(KERN_EMERG "Giving up.\n"); for (;;) ; #endif return; } /* trap_init() enabled FXSR and company _before_ testing for FP problems here. */ /* Test for the divl bug.. */ __asm__("fninit\n\t" "fldl %1\n\t" "fdivl %2\n\t" "fmull %2\n\t" "fldl %1\n\t" "fsubp %%st,%%st(1)\n\t" "fistpl %0\n\t" "fwait\n\t" "fninit" : "=m" (*&boot_cpu_data.fdiv_bug) : "m" (*&x), "m" (*&y)); if (boot_cpu_data.fdiv_bug) printk("Hmm, FPU with FDIV bug.\n"); } static void __init check_hlt(void) { printk(KERN_INFO "Checking 'hlt' instruction... "); if (!boot_cpu_data.hlt_works_ok) { printk("disabled\n"); return; } halt(); halt(); halt(); halt(); printk("OK.\n"); } /* * Most 386 processors have a bug where a POPAD can lock the * machine even from user space. */ static void __init check_popad(void) { #ifndef CONFIG_X86_POPAD_OK int res, inp = (int) &res; printk(KERN_INFO "Checking for popad bug... "); __asm__ __volatile__( "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx " : "=&a" (res) : "d" (inp) : "ecx", "edi" ); /* If this fails, it means that any user program may lock the CPU hard. Too bad. */ if (res != 12345678) printk( "Buggy.\n" ); else printk( "OK.\n" ); #endif } /* * Check whether we are able to run this kernel safely on SMP. * * - In order to run on a i386, we need to be compiled for i386 * (for due to lack of "invlpg" and working WP on a i386) * - In order to run on anything without a TSC, we need to be * compiled for a i486. * - In order to support the local APIC on a buggy Pentium machine, * we need to be compiled with CONFIG_X86_GOOD_APIC disabled, * which happens implicitly if compiled for a Pentium or lower * (unless an advanced selection of CPU features is used) as an * otherwise config implies a properly working local APIC without * the need to do extra reads from the APIC. */ static void __init check_config(void) { /* * We'd better not be a i386 if we're configured to use some * i486+ only features! (WP works in supervisor mode and the * new "invlpg" and "bswap" instructions) */ #if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_BSWAP) if (boot_cpu_data.x86 == 3) panic("Kernel requires i486+ for 'invlpg' and other features"); #endif /* * If we configured ourselves for a TSC, we'd better have one! */ #ifdef CONFIG_X86_TSC if (!cpu_has_tsc) panic("Kernel compiled for Pentium+, requires TSC feature!"); #endif /* * If we were told we had a good local APIC, check for buggy Pentia, * i.e. all B steppings and the C2 stepping of P54C when using their * integrated APIC (see 11AP erratum in "Pentium Processor * Specification Update"). */ #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && cpu_has_apic && boot_cpu_data.x86 == 5 && boot_cpu_data.x86_model == 2 && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11)) panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!"); #endif } extern void alternative_instructions(void); static void __init check_bugs(void) { identify_cpu(&boot_cpu_data); #ifndef CONFIG_SMP printk("CPU: "); print_cpu_info(&boot_cpu_data); #endif check_config(); check_fpu(); check_hlt(); check_popad(); system_utsname.machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); alternative_instructions(); } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/byteorder.h000066400000000000000000000024571314037446600250400ustar00rootroot00000000000000#ifndef _I386_BYTEORDER_H #define _I386_BYTEORDER_H #include #include #ifdef __GNUC__ /* For avoiding bswap on i386 */ #ifdef __KERNEL__ #include #endif static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) { #ifdef CONFIG_X86_BSWAP __asm__("bswap %0" : "=r" (x) : "0" (x)); #else __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */ "rorl $16,%0\n\t" /* swap words */ "xchgb %b0,%h0" /* swap higher bytes */ :"=q" (x) : "0" (x)); #endif return x; } static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val) { union { struct { __u32 a,b; } s; __u64 u; } v; v.u = val; #ifdef CONFIG_X86_BSWAP asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); #else v.s.a = ___arch__swab32(v.s.a); v.s.b = ___arch__swab32(v.s.b); asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); #endif return v.u; } /* Do not define swab16. Gcc is smart enough to recognize "C" version and convert it into rotation or exhange. */ #define __arch__swab64(x) ___arch__swab64(x) #define __arch__swab32(x) ___arch__swab32(x) #define __BYTEORDER_HAS_U64__ #endif /* __GNUC__ */ #include #endif /* _I386_BYTEORDER_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/cache.h000066400000000000000000000003701314037446600240740ustar00rootroot00000000000000/* * include/asm-i386/cache.h */ #ifndef __ARCH_I386_CACHE_H #define __ARCH_I386_CACHE_H #include /* L1 cache line size */ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/cacheflush.h000066400000000000000000000024741314037446600251450ustar00rootroot00000000000000#ifndef _I386_CACHEFLUSH_H #define _I386_CACHEFLUSH_H /* Keep includes the same across arches. */ #include /* Caches aren't brain-dead on the intel. */ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_icache_range(start, end) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) void global_flush_tlb(void); int change_page_attr(struct page *page, int numpages, pgprot_t prot); #ifdef CONFIG_DEBUG_PAGEALLOC /* internal debugging function */ void kernel_map_pages(struct page *page, int numpages, int enable); #endif #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); #endif #endif /* _I386_CACHEFLUSH_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/checksum.h000066400000000000000000000115341314037446600246370ustar00rootroot00000000000000#ifndef _I386_CHECKSUM_H #define _I386_CHECKSUM_H #include #include /* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) * * returns a 32-bit number suitable for feeding into itself * or csum_tcpudp_magic * * this function must be called with even lengths, except * for the last fragment, which may be odd * * it's best to have buff aligned on a 32-bit boundary */ asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); /* * the same as csum_partial, but copies from src while it * checksums, and handles user-space pointer exceptions correctly, when needed. * * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ asmlinkage unsigned int csum_partial_copy_generic(const unsigned char *src, unsigned char *dst, int len, int sum, int *src_err_ptr, int *dst_err_ptr); /* * Note: when you get a NULL pointer exception here this means someone * passed in an incorrect kernel address to one of these functions. * * If you use these functions directly please don't forget the * access_ok(). */ static __inline__ unsigned int csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst, int len, int sum) { return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); } static __inline__ unsigned int csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst, int len, int sum, int *err_ptr) { might_sleep(); return csum_partial_copy_generic((__force unsigned char *)src, dst, len, sum, err_ptr, NULL); } /* * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. * * By Jorge Cwik , adapted for linux by * Arnt Gulbrandsen. */ static inline unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) { unsigned int sum; __asm__ __volatile__( "movl (%1), %0 ;\n" "subl $4, %2 ;\n" "jbe 2f ;\n" "addl 4(%1), %0 ;\n" "adcl 8(%1), %0 ;\n" "adcl 12(%1), %0 ;\n" "1: adcl 16(%1), %0 ;\n" "lea 4(%1), %1 ;\n" "decl %2 ;\n" "jne 1b ;\n" "adcl $0, %0 ;\n" "movl %0, %2 ;\n" "shrl $16, %0 ;\n" "addw %w2, %w0 ;\n" "adcl $0, %0 ;\n" "notl %0 ;\n" "2: ;\n" /* Since the input registers which are loaded with iph and ihl are modified, we must also specify them as outputs, or gcc will assume they contain their original values. */ : "=r" (sum), "=r" (iph), "=r" (ihl) : "1" (iph), "2" (ihl) : "memory"); return(sum); } /* * Fold a partial checksum */ static inline unsigned int csum_fold(unsigned int sum) { __asm__( "addl %1, %0 ;\n" "adcl $0xffff, %0 ;\n" : "=r" (sum) : "r" (sum << 16), "0" (sum & 0xffff0000) ); return (~sum) >> 16; } static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, unsigned short proto, unsigned int sum) { __asm__( "addl %1, %0 ;\n" "adcl %2, %0 ;\n" "adcl %3, %0 ;\n" "adcl $0, %0 ;\n" : "=r" (sum) : "g" (daddr), "g"(saddr), "g"((ntohs(len)<<16)+proto*256), "0"(sum)); return sum; } /* * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, unsigned short proto, unsigned int sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } /* * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ static inline unsigned short ip_compute_csum(unsigned char * buff, int len) { return csum_fold (csum_partial(buff, len, 0)); } #define _HAVE_ARCH_IPV6_CSUM static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len, unsigned short proto, unsigned int sum) { __asm__( "addl 0(%1), %0 ;\n" "adcl 4(%1), %0 ;\n" "adcl 8(%1), %0 ;\n" "adcl 12(%1), %0 ;\n" "adcl 0(%2), %0 ;\n" "adcl 4(%2), %0 ;\n" "adcl 8(%2), %0 ;\n" "adcl 12(%2), %0 ;\n" "adcl %3, %0 ;\n" "adcl %4, %0 ;\n" "adcl $0, %0 ;\n" : "=&r" (sum) : "r" (saddr), "r" (daddr), "r"(htonl(len)), "r"(htonl(proto)), "0"(sum)); return csum_fold(sum); } /* * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src, unsigned char __user *dst, int len, int sum, int *err_ptr) { might_sleep(); if (access_ok(VERIFY_WRITE, dst, len)) return csum_partial_copy_generic(src, (__force unsigned char *)dst, len, sum, NULL, err_ptr); if (len) *err_ptr = -EFAULT; return -1; /* invalid checksum */ } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/cpu.h000066400000000000000000000006421314037446600236220ustar00rootroot00000000000000#ifndef _ASM_I386_CPU_H_ #define _ASM_I386_CPU_H_ #include #include #include #include #include #include struct i386_cpu { struct cpu cpu; }; extern int arch_register_cpu(int num); #ifdef CONFIG_HOTPLUG_CPU extern void arch_unregister_cpu(int); #endif DECLARE_PER_CPU(int, cpu_state); #endif /* _ASM_I386_CPU_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/cpufeature.h000066400000000000000000000164261314037446600252050ustar00rootroot00000000000000/* * cpufeature.h * * Defines x86 CPU feature bits */ #ifndef __ASM_I386_CPUFEATURE_H #define __ASM_I386_CPUFEATURE_H #include #define NCAPINTS 9 /* N 32-bit words worth of info */ /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ #define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ #define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ #define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ #define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ #define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */ #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ /* of FPU context), and CR4.OSFXSR available */ #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ #define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ #define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ /* Don't duplicate feature flags which are redundant with Intel! */ #define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ #define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ #define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ #define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ #define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ /* Other features, Linux-defined mapping, word 3 */ /* This range is used for feature bits which conflict or are synthesized */ #define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ #define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ /* cpu types for specific tunings: */ #define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ #define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ #define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ #define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ #define X86_FEATURE_CID (4*32+10) /* Context ID */ #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ #define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ #define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ #define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ #define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */ #define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */ #define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */ #define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */ /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ /* * Auxiliary flags: Linux defined - For features scattered in various * CPUID levels like 0x6, 0xA etc */ #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) #define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) #define cpu_has_de boot_cpu_has(X86_FEATURE_DE) #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) #define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) #define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) #define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) #define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) #define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) #define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) #define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) #define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR) #define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) #define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) #define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) #define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) #define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) #define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) #define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) #define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) #define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) #define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) #endif /* __ASM_I386_CPUFEATURE_H */ /* * Local Variables: * mode:c * comment-column:42 * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/cputime.h000066400000000000000000000001631314037446600244770ustar00rootroot00000000000000#ifndef __I386_CPUTIME_H #define __I386_CPUTIME_H #include #endif /* __I386_CPUTIME_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/current.h000066400000000000000000000004161314037446600245140ustar00rootroot00000000000000#ifndef _I386_CURRENT_H #define _I386_CURRENT_H #include struct task_struct; static __always_inline struct task_struct * get_current(void) { return current_thread_info()->task; } #define current get_current() #endif /* !(_I386_CURRENT_H) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/debugreg.h000066400000000000000000000053011314037446600246140ustar00rootroot00000000000000#ifndef _I386_DEBUGREG_H #define _I386_DEBUGREG_H /* Indicate the register numbers for a number of the specific debug registers. Registers 0-3 contain the addresses we wish to trap on */ #define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */ #define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */ #define DR_STATUS 6 /* u_debugreg[DR_STATUS] */ #define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */ /* Define a few things for the status register. We can use this to determine which debugging register was responsible for the trap. The other bits are either reserved or not of interest to us. */ #define DR_TRAP0 (0x1) /* db0 */ #define DR_TRAP1 (0x2) /* db1 */ #define DR_TRAP2 (0x4) /* db2 */ #define DR_TRAP3 (0x8) /* db3 */ #define DR_STEP (0x4000) /* single-step */ #define DR_SWITCH (0x8000) /* task switch */ /* Now define a bunch of things for manipulating the control register. The top two bytes of the control register consist of 4 fields of 4 bits - each field corresponds to one of the four debug registers, and indicates what types of access we trap on, and how large the data field is that we are looking at */ #define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */ #define DR_CONTROL_SIZE 4 /* 4 control bits per register */ #define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */ #define DR_RW_WRITE (0x1) #define DR_RW_READ (0x3) #define DR_LEN_1 (0x0) /* Settings for data length to trap on */ #define DR_LEN_2 (0x4) #define DR_LEN_4 (0xC) /* The low byte to the control register determine which registers are enabled. There are 4 fields of two bits. One bit is "local", meaning that the processor will reset the bit after a task switch and the other is global meaning that we have to explicitly reset the bit. With linux, you can use either one, since we explicitly zero the register when we enter kernel mode. */ #define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ #define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ #define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ #define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ #define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */ /* The second byte to the control register has a few special things. We can slow the instruction pipeline for instructions coming via the gdt or the ldt if we want to. I am not sure why this is an advantage */ #define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */ #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/delay.h000066400000000000000000000012641314037446600241320ustar00rootroot00000000000000#ifndef _I386_DELAY_H #define _I386_DELAY_H /* * Copyright (C) 1993 Linus Torvalds * * Delay routines calling functions in arch/i386/lib/delay.c */ extern void __bad_udelay(void); extern void __bad_ndelay(void); extern void __udelay(unsigned long usecs); extern void __ndelay(unsigned long nsecs); extern void __const_udelay(unsigned long usecs); extern void __delay(unsigned long loops); #define udelay(n) (__builtin_constant_p(n) ? \ ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ __udelay(n)) #define ndelay(n) (__builtin_constant_p(n) ? \ ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ __ndelay(n)) #endif /* defined(_I386_DELAY_H) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/desc.h000066400000000000000000000077531314037446600237630ustar00rootroot00000000000000#ifndef __ARCH_DESC_H #define __ARCH_DESC_H #include #include #define CPU_16BIT_STACK_SIZE 1024 #ifndef __ASSEMBLY__ #include #include #include #include extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); struct Xgt_desc_struct { unsigned short size; unsigned long address __attribute__((packed)); unsigned short pad; } __attribute__ ((packed)); extern struct Xgt_desc_struct idt_descr; DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) { return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; } extern struct desc_struct idt_table[]; extern void set_intr_gate(unsigned int irq, void * addr); static inline void pack_descriptor(__u32 *a, __u32 *b, unsigned long base, unsigned long limit, unsigned char type, unsigned char flags) { *a = ((base & 0xffff) << 16) | (limit & 0xffff); *b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | ((type & 0xff) << 8) | ((flags & 0xf) << 12); } static inline void pack_gate(__u32 *a, __u32 *b, unsigned long base, unsigned short seg, unsigned char type, unsigned char flags) { *a = (seg << 16) | (base & 0xffff); *b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff); } #define DESCTYPE_LDT 0x82 /* present, system, DPL-0, LDT */ #define DESCTYPE_TSS 0x89 /* present, system, DPL-0, 32-bit TSS */ #define DESCTYPE_TASK 0x85 /* present, system, DPL-0, task gate */ #define DESCTYPE_INT 0x8e /* present, system, DPL-0, interrupt gate */ #define DESCTYPE_TRAP 0x8f /* present, system, DPL-0, trap gate */ #define DESCTYPE_DPL3 0x60 /* DPL-3 */ #define DESCTYPE_S 0x10 /* !system */ #include static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) { __u32 a, b; pack_gate(&a, &b, (unsigned long)addr, seg, type, 0); write_idt_entry(idt_table, gate, a, b); } static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr) { __u32 a, b; pack_descriptor(&a, &b, (unsigned long)addr, offsetof(struct tss_struct, __cacheline_filler) - 1, DESCTYPE_TSS, 0); write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); } static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries) { __u32 a, b; pack_descriptor(&a, &b, (unsigned long)addr, entries * sizeof(struct desc_struct) - 1, DESCTYPE_LDT, 0); write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b); } #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) #define LDT_entry_a(info) \ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) #define LDT_entry_b(info) \ (((info)->base_addr & 0xff000000) | \ (((info)->base_addr & 0x00ff0000) >> 16) | \ ((info)->limit & 0xf0000) | \ (((info)->read_exec_only ^ 1) << 9) | \ ((info)->contents << 10) | \ (((info)->seg_not_present ^ 1) << 15) | \ ((info)->seg_32bit << 22) | \ ((info)->limit_in_pages << 23) | \ ((info)->useable << 20) | \ 0x7000) #define LDT_empty(info) (\ (info)->base_addr == 0 && \ (info)->limit == 0 && \ (info)->contents == 0 && \ (info)->read_exec_only == 1 && \ (info)->seg_32bit == 0 && \ (info)->limit_in_pages == 0 && \ (info)->seg_not_present == 1 && \ (info)->useable == 0 ) static inline void clear_LDT(void) { clear_LDT_desc(); } /* * load one particular LDT into the current CPU */ static inline void load_LDT_nolock(mm_context_t *pc, int cpu) { int count = pc->size; if (likely(!count)) clear_LDT_desc(); else { set_ldt_desc(cpu, pc->ldt, count); load_LDT_desc(); } } static inline void load_LDT(mm_context_t *pc) { int cpu = get_cpu(); load_LDT_nolock(pc, cpu); put_cpu(); } static inline unsigned long get_desc_base(unsigned long *desc) { unsigned long base; base = ((desc[0] >> 16) & 0x0000ffff) | ((desc[1] << 16) & 0x00ff0000) | (desc[1] & 0xff000000); return base; } #endif /* !__ASSEMBLY__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/div64.h000066400000000000000000000022331314037446600237650ustar00rootroot00000000000000#ifndef __I386_DIV64 #define __I386_DIV64 /* * do_div() is NOT a C function. It wants to return * two values (the quotient and the remainder), but * since that doesn't work very well in C, what it * does is: * * - modifies the 64-bit dividend _in_place_ * - returns the 32-bit remainder * * This ends up being the most efficient "calling * convention" on x86. */ #define do_div(n,base) ({ \ unsigned long __upper, __low, __high, __mod, __base; \ __base = (base); \ asm("":"=a" (__low), "=d" (__high):"A" (n)); \ __upper = __high; \ if (__high) { \ __upper = __high % (__base); \ __high = __high / (__base); \ } \ asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \ asm("":"=A" (n):"a" (__low),"d" (__high)); \ __mod; \ }) /* * (long)X = ((long long)divs) / (long)div * (long)rem = ((long long)divs) % (long)div * * Warning, this will do an exception if X overflows. */ #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) static inline long div_ll_X_l_rem(long long divs, long div, long *rem) { long dum2; __asm__("divl %2":"=a"(dum2), "=d"(*rem) : "rm"(div), "A"(divs)); return dum2; } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/dma-mapping.h000066400000000000000000000101251314037446600252220ustar00rootroot00000000000000#ifndef _ASM_I386_DMA_MAPPING_H #define _ASM_I386_DMA_MAPPING_H #include #include #include #include #include #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); static inline dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction) { if (direction == DMA_NONE) BUG(); WARN_ON(size == 0); flush_write_buffers(); return virt_to_phys(ptr); } static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { if (direction == DMA_NONE) BUG(); } static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { int i; if (direction == DMA_NONE) BUG(); WARN_ON(nents == 0 || sg[0].length == 0); for (i = 0; i < nents; i++ ) { BUG_ON(!sg[i].page); sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; } flush_write_buffers(); return nents; } static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); return page_to_phys(page) + offset; } static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); } static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); } static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { flush_write_buffers(); } static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { } static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { flush_write_buffers(); } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { flush_write_buffers(); } static inline int dma_mapping_error(dma_addr_t dma_addr) { return 0; } static inline int dma_supported(struct device *dev, u64 mask) { /* * we fall back to GFP_DMA when the mask isn't all 1s, * so we can't guarantee allocations that must be * within a tighter range than GFP_DMA.. */ if(mask < 0x00ffffff) return 0; return 1; } static inline int dma_set_mask(struct device *dev, u64 mask) { if(!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; *dev->dma_mask = mask; return 0; } static inline int dma_get_cache_alignment(void) { /* no easy way to get cache size on all x86, so return the * maximum possible, to be safe */ return (1 << INTERNODE_CACHE_SHIFT); } #define dma_is_consistent(d) (1) static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { flush_write_buffers(); } #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY extern int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dma_addr_t device_addr, size_t size, int flags); extern void dma_release_declared_memory(struct device *dev); extern void * dma_mark_declared_memory_occupied(struct device *dev, dma_addr_t device_addr, size_t size); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/dma.h000066400000000000000000000231551314037446600236000ustar00rootroot00000000000000/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ * linux/include/asm/dma.h: Defines for using and allocating dma channels. * Written by Hennus Bergman, 1992. * High DMA channel support & info by Hannu Savolainen * and John Boyd, Nov. 1992. */ #ifndef _ASM_DMA_H #define _ASM_DMA_H #include #include /* And spinlocks */ #include /* need byte IO */ #include #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER #define dma_outb outb_p #else #define dma_outb outb #endif #define dma_inb inb /* * NOTES about DMA transfers: * * controller 1: channels 0-3, byte operations, ports 00-1F * controller 2: channels 4-7, word operations, ports C0-DF * * - ALL registers are 8 bits only, regardless of transfer size * - channel 4 is not used - cascades 1 into 2. * - channels 0-3 are byte - addresses/counts are for physical bytes * - channels 5-7 are word - addresses/counts are for physical words * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries * - transfer count loaded to registers is 1 less than actual count * - controller 2 offsets are all even (2x offsets for controller 1) * - page registers for 5-7 don't use data bit 0, represent 128K pages * - page registers for 0-3 use bit 0, represent 64K pages * * DMA transfers are limited to the lower 16MB of _physical_ memory. * Note that addresses loaded into registers must be _physical_ addresses, * not logical addresses (which may differ if paging is active). * * Address mapping for channels 0-3: * * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) * | ... | | ... | | ... | * | ... | | ... | | ... | * | ... | | ... | | ... | * P7 ... P0 A7 ... A0 A7 ... A0 * | Page | Addr MSB | Addr LSB | (DMA registers) * * Address mapping for channels 5-7: * * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) * | ... | \ \ ... \ \ \ ... \ \ * | ... | \ \ ... \ \ \ ... \ (not used) * | ... | \ \ ... \ \ \ ... \ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 * | Page | Addr MSB | Addr LSB | (DMA registers) * * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at * the hardware level, so odd-byte transfers aren't possible). * * Transfer count (_not # bytes_) is limited to 64K, represented as actual * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, * and up to 128K bytes may be transferred on channels 5-7 in one operation. * */ #define MAX_DMA_CHANNELS 8 /* The maximum address that we can perform a DMA transfer to on this platform */ #define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) /* 8237 DMA controllers */ #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ /* DMA controller registers */ #define DMA1_CMD_REG 0x08 /* command register (w) */ #define DMA1_STAT_REG 0x08 /* status register (r) */ #define DMA1_REQ_REG 0x09 /* request register (w) */ #define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ #define DMA1_MODE_REG 0x0B /* mode register (w) */ #define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ #define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ #define DMA1_RESET_REG 0x0D /* Master Clear (w) */ #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ #define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ #define DMA2_CMD_REG 0xD0 /* command register (w) */ #define DMA2_STAT_REG 0xD0 /* status register (r) */ #define DMA2_REQ_REG 0xD2 /* request register (w) */ #define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ #define DMA2_MODE_REG 0xD6 /* mode register (w) */ #define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ #define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ #define DMA2_RESET_REG 0xDA /* Master Clear (w) */ #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ #define DMA_ADDR_0 0x00 /* DMA address registers */ #define DMA_ADDR_1 0x02 #define DMA_ADDR_2 0x04 #define DMA_ADDR_3 0x06 #define DMA_ADDR_4 0xC0 #define DMA_ADDR_5 0xC4 #define DMA_ADDR_6 0xC8 #define DMA_ADDR_7 0xCC #define DMA_CNT_0 0x01 /* DMA count registers */ #define DMA_CNT_1 0x03 #define DMA_CNT_2 0x05 #define DMA_CNT_3 0x07 #define DMA_CNT_4 0xC2 #define DMA_CNT_5 0xC6 #define DMA_CNT_6 0xCA #define DMA_CNT_7 0xCE #define DMA_PAGE_0 0x87 /* DMA page registers */ #define DMA_PAGE_1 0x83 #define DMA_PAGE_2 0x81 #define DMA_PAGE_3 0x82 #define DMA_PAGE_5 0x8B #define DMA_PAGE_6 0x89 #define DMA_PAGE_7 0x8A #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ #define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ #define DMA_AUTOINIT 0x10 extern spinlock_t dma_spin_lock; static __inline__ unsigned long claim_dma_lock(void) { unsigned long flags; spin_lock_irqsave(&dma_spin_lock, flags); return flags; } static __inline__ void release_dma_lock(unsigned long flags) { spin_unlock_irqrestore(&dma_spin_lock, flags); } /* enable/disable a specific DMA channel */ static __inline__ void enable_dma(unsigned int dmanr) { if (dmanr<=3) dma_outb(dmanr, DMA1_MASK_REG); else dma_outb(dmanr & 3, DMA2_MASK_REG); } static __inline__ void disable_dma(unsigned int dmanr) { if (dmanr<=3) dma_outb(dmanr | 4, DMA1_MASK_REG); else dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); } /* Clear the 'DMA Pointer Flip Flop'. * Write 0 for LSB/MSB, 1 for MSB/LSB access. * Use this once to initialize the FF to a known state. * After that, keep track of it. :-) * --- In order to do that, the DMA routines below should --- * --- only be used while holding the DMA lock ! --- */ static __inline__ void clear_dma_ff(unsigned int dmanr) { if (dmanr<=3) dma_outb(0, DMA1_CLEAR_FF_REG); else dma_outb(0, DMA2_CLEAR_FF_REG); } /* set mode (above) for a specific DMA channel */ static __inline__ void set_dma_mode(unsigned int dmanr, char mode) { if (dmanr<=3) dma_outb(mode | dmanr, DMA1_MODE_REG); else dma_outb(mode | (dmanr&3), DMA2_MODE_REG); } /* Set only the page register bits of the transfer address. * This is used for successive transfers when we know the contents of * the lower 16 bits of the DMA current address register, but a 64k boundary * may have been crossed. */ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) { switch(dmanr) { case 0: dma_outb(pagenr, DMA_PAGE_0); break; case 1: dma_outb(pagenr, DMA_PAGE_1); break; case 2: dma_outb(pagenr, DMA_PAGE_2); break; case 3: dma_outb(pagenr, DMA_PAGE_3); break; case 5: dma_outb(pagenr & 0xfe, DMA_PAGE_5); break; case 6: dma_outb(pagenr & 0xfe, DMA_PAGE_6); break; case 7: dma_outb(pagenr & 0xfe, DMA_PAGE_7); break; } } /* Set transfer address & page bits for specific DMA channel. * Assumes dma flipflop is clear. */ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) { set_dma_page(dmanr, a>>16); if (dmanr <= 3) { dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); } else { dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); } } /* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for * a specific DMA channel. * You must ensure the parameters are valid. * NOTE: from a manual: "the number of transfers is one more * than the initial word count"! This is taken into account. * Assumes dma flip-flop is clear. * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. */ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) { count--; if (dmanr <= 3) { dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); } else { dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); } } /* Get DMA residue count. After a DMA transfer, this * should return zero. Reading this while a DMA transfer is * still in progress will return unpredictable results. * If called before the channel has been used, it may return 1. * Otherwise, it returns the number of _bytes_ left to transfer. * * Assumes DMA flip-flop is clear. */ static __inline__ int get_dma_residue(unsigned int dmanr) { unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; /* using short to get 16-bit wrap around */ unsigned short count; count = 1 + dma_inb(io_port); count += dma_inb(io_port) << 8; return (dmanr<=3)? count : (count<<1); } /* These are in kernel/dma.c: */ extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ extern void free_dma(unsigned int dmanr); /* release it again */ /* From PCI */ #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif #endif /* _ASM_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/dmi.h000066400000000000000000000001051314037446600235760ustar00rootroot00000000000000#ifndef _ASM_DMI_H #define _ASM_DMI_H 1 #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/dump.h000066400000000000000000000051311314037446600237760ustar00rootroot00000000000000/* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * * This code is released under version 2 of the GNU GPL. */ /* This header file holds the architecture specific crash dump header */ #ifndef _ASM_DUMP_H #define _ASM_DUMP_H /* necessary header files */ #include #include #include #include /* definitions */ #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ #define DUMP_ASM_VERSION_NUMBER 0x3 /* version number */ #define platform_timestamp(x) rdtscll(x) /* * Structure: __dump_header_asm * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. */ struct __dump_header_asm { /* the dump magic number -- unique to verify dump is valid */ u64 dha_magic_number; /* the version number of this dump */ u32 dha_version; /* the size of this header (in case we can't read it) */ u32 dha_header_size; /* the esp for i386 systems */ u32 dha_esp; /* the eip for i386 systems */ u32 dha_eip; /* the dump registers */ struct pt_regs dha_regs; /* smp specific */ u32 dha_smp_num_cpus; u32 dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; u32 dha_smp_current_task[NR_CPUS]; u32 dha_stack[NR_CPUS]; u32 dha_stack_ptr[NR_CPUS]; } __attribute__((packed)); #ifdef __KERNEL__ extern struct __dump_header_asm dump_header_asm; #ifdef CONFIG_SMP extern cpumask_t irq_affinity[]; extern int (*dump_ipi_function_ptr)(struct pt_regs *); extern void dump_send_ipi(void); #else #define dump_send_ipi() do { } while(0) #endif static inline void get_current_regs(struct pt_regs *regs) { __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx)); __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx)); __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx)); __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi)); __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi)); __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp)); __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax)); __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp)); __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss)); __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs)); __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds)); __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes)); __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags)); regs->eip = (unsigned long)current_text_addr(); } #endif /* __KERNEL__ */ #endif /* _ASM_DUMP_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/e820.h000066400000000000000000000023601314037446600235100ustar00rootroot00000000000000/* * structures and definitions for the int 15, ax=e820 memory map * scheme. * * In a nutshell, arch/i386/boot/setup.S populates a scratch table * in the empty_zero_block that contains a list of usable address/size * duples. In arch/i386/kernel/setup.c, this information is * transferred into the e820map, and in arch/i386/mm/init.c, that * new information is used to mark pages reserved or not. * */ #ifndef __E820_HEADER #define __E820_HEADER #define E820MAP 0x2d0 /* our map */ #define E820MAX 128 /* number of entries in E820MAP */ #define E820NR 0x1e8 /* # entries in E820MAP */ #define E820_RAM 1 #define E820_RESERVED 2 #define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */ #define E820_NVS 4 #define HIGH_MEMORY (1024*1024) #ifndef __ASSEMBLY__ struct e820map { int nr_map; struct e820entry { unsigned long long addr; /* start of memory segment */ unsigned long long size; /* size of memory segment */ unsigned long type; /* type of memory segment */ } map[E820MAX]; }; extern struct e820map e820; extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); extern int e820_any_mapped(u64 start, u64 end, unsigned type); #endif/*!__ASSEMBLY__*/ #endif/*__E820_HEADER*/ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/edac.h000066400000000000000000000006471314037446600237340ustar00rootroot00000000000000#ifndef ASM_EDAC_H #define ASM_EDAC_H /* ECC atomic, DMA, SMP and interrupt safe scrub function */ static __inline__ void atomic_scrub(void *va, u32 size) { unsigned long *virt_addr = va; u32 i; for (i = 0; i < size / 4; i++, virt_addr++) /* Very carefully read and write to memory atomically * so we are interrupt, DMA and SMP safe. */ __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/elf.h000066400000000000000000000150411314037446600236000ustar00rootroot00000000000000#ifndef __ASMi386_ELF_H #define __ASMi386_ELF_H /* * ELF register definitions.. */ #include #include #include #include /* for savesegment */ #include #include #define R_386_NONE 0 #define R_386_32 1 #define R_386_PC32 2 #define R_386_GOT32 3 #define R_386_PLT32 4 #define R_386_COPY 5 #define R_386_GLOB_DAT 6 #define R_386_JMP_SLOT 7 #define R_386_RELATIVE 8 #define R_386_GOTOFF 9 #define R_386_GOTPC 10 #define R_386_NUM 11 typedef unsigned long elf_greg_t; #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_i387_struct elf_fpregset_t; typedef struct user_fxsr_struct elf_fpxregset_t; /* * This is used to ensure we don't load something for the wrong architecture. */ #define elf_check_arch(x) \ (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) /* * These are used to set parameters in the core dumps. */ #define ELF_CLASS ELFCLASS32 #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_386 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx contains a pointer to a function which might be registered using `atexit'. This provides a mean for the dynamic linker to call DT_FINI functions for shared libraries that have been loaded before the code runs. A value of 0 tells we have no such handler. We might as well make sure everything else is cleared too (except for %esp), just to make things more deterministic. */ #define ELF_PLAT_INIT(_r, load_addr) do { \ _r->ebx = 0; _r->ecx = 0; _r->edx = 0; \ _r->esi = 0; _r->edi = 0; _r->ebp = 0; \ _r->eax = 0; \ } while (0) #define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical use of this is to invoke "./ld.so someprog" to test out a new version of the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is now struct_user_regs, they are different) */ #define ELF_CORE_COPY_REGS(pr_reg, regs) \ pr_reg[0] = regs->ebx; \ pr_reg[1] = regs->ecx; \ pr_reg[2] = regs->edx; \ pr_reg[3] = regs->esi; \ pr_reg[4] = regs->edi; \ pr_reg[5] = regs->ebp; \ pr_reg[6] = regs->eax; \ pr_reg[7] = regs->xds; \ pr_reg[8] = regs->xes; \ savesegment(fs,pr_reg[9]); \ savesegment(gs,pr_reg[10]); \ pr_reg[11] = regs->orig_eax; \ pr_reg[12] = regs->eip; \ pr_reg[13] = regs->xcs; \ pr_reg[14] = regs->eflags; \ pr_reg[15] = regs->esp; \ pr_reg[16] = regs->xss; /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, but it's not easy, and we've already done it here. */ #define ELF_HWCAP (boot_cpu_data.x86_capability[0]) /* This yields a string that ld.so will use to load implementation specific libraries for optimization. This is more specific in intent than poking at uname or /proc/cpuinfo. For the moment, we have only optimizations for the Intel generations, but that could change... */ #define ELF_PLATFORM (system_utsname.machine) #ifdef __KERNEL__ #define SET_PERSONALITY(ex, ibcs2) do { } while (0) /* * An executable for which elf_read_implies_exec() returns TRUE will * have the READ_IMPLIES_EXEC personality flag set automatically. */ #define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X) struct task_struct; extern int dump_task_regs (struct task_struct *, elf_gregset_t *); extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct *); #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) #ifndef CONFIG_XEN #define VSYSCALL_BASE (__fix_to_virt(FIX_VSYSCALL)) #else #define VSYSCALL_BASE (PAGE_OFFSET - 2*PAGE_SIZE) #define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack); #endif #define VSYSCALL_EHDR ((const struct elfhdr *) VSYSCALL_BASE) #define VSYSCALL_ENTRY ((unsigned long) (VSYSCALL_RELOCATION+&__kernel_vsyscall)) extern void __kernel_vsyscall; #define ARCH_DLINFO \ do { \ NEW_AUX_ENT(AT_SYSINFO, VSYSCALL_ENTRY); \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL_BASE); \ } while (0) /* * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out * extra segments containing the vsyscall DSO contents. Dumping its * contents makes post-mortem fully interpretable later without matching up * the same kernel and hardware config to see what PC values meant. * Dumping its extra ELF program headers includes all the other information * a debugger needs to easily find how the vsyscall DSO was being used. */ #define ELF_CORE_EXTRA_PHDRS (VSYSCALL_EHDR->e_phnum) #define ELF_CORE_WRITE_EXTRA_PHDRS \ do { \ const struct elf_phdr *const vsyscall_phdrs = \ (const struct elf_phdr *) (VSYSCALL_BASE \ + VSYSCALL_EHDR->e_phoff); \ int i; \ Elf32_Off ofs = 0; \ for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ struct elf_phdr phdr = vsyscall_phdrs[i]; \ if (phdr.p_type == PT_LOAD) { \ BUG_ON(ofs != 0); \ ofs = phdr.p_offset = offset; \ phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \ phdr.p_filesz = phdr.p_memsz; \ offset += phdr.p_filesz; \ } \ else \ phdr.p_offset += ofs; \ phdr.p_paddr = 0; /* match other core phdrs */ \ DUMP_WRITE(&phdr, sizeof(phdr)); \ } \ } while (0) #define ELF_CORE_WRITE_EXTRA_DATA \ do { \ const struct elf_phdr *const vsyscall_phdrs = \ (const struct elf_phdr *) (VSYSCALL_BASE \ + VSYSCALL_EHDR->e_phoff); \ int i; \ for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ if (vsyscall_phdrs[i].p_type == PT_LOAD) \ DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \ PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \ } \ } while (0) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/emergency-restart.h000066400000000000000000000002271314037446600264720ustar00rootroot00000000000000#ifndef _ASM_EMERGENCY_RESTART_H #define _ASM_EMERGENCY_RESTART_H extern void machine_emergency_restart(void); #endif /* _ASM_EMERGENCY_RESTART_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/errno.h000066400000000000000000000001241314037446600241530ustar00rootroot00000000000000#ifndef _I386_ERRNO_H #define _I386_ERRNO_H #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/fcntl.h000066400000000000000000000000371314037446600241370ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/fixmap.h000066400000000000000000000116711314037446600243230ustar00rootroot00000000000000/* * fixmap.h: compile-time virtual memory allocation * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Ingo Molnar * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #ifndef _ASM_FIXMAP_H #define _ASM_FIXMAP_H #include /* used by vmalloc.c, vsyscall.lds.S. * * Leave one empty page between vmalloc'ed areas and * the start of the fixmap. */ #ifdef CONFIG_RELOCATABLE_FIXMAP extern unsigned long __FIXADDR_TOP; #define VSYSCALL_RELOCATION __fix_to_virt(FIX_VSYSCALL) #else #define __FIXADDR_TOP (0xfffff000) #define VSYSCALL_RELOCATION 0 #endif #ifndef __ASSEMBLY__ #include #include #include #include #ifdef CONFIG_HIGHMEM #include #include #endif /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at * compile time, but to set the physical address only * in the boot process. We allocate these special addresses * from the end of virtual memory (0xfffff000) backwards. * Also this lets us do fail-safe vmalloc(), we * can guarantee that these special addresses and * vmalloc()-ed addresses never overlap. * * these 'compile-time allocated' memory buffers are * fixed-size 4k pages. (or larger if used with an increment * highger than 1) use fixmap_set(idx,phys) to associate * physical memory with fixmap indices. * * TLB entries of such buffers will not be flushed across * task switches. */ enum fixed_addresses { FIX_HOLE, FIX_VSYSCALL, #ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ #endif #ifdef CONFIG_X86_IO_APIC FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, #endif #ifdef CONFIG_X86_VISWS_APIC FIX_CO_CPU, /* Cobalt timer */ FIX_CO_APIC, /* Cobalt APIC Redirection Table */ FIX_LI_PCIA, /* Lithium PCI Bridge A */ FIX_LI_PCIB, /* Lithium PCI Bridge B */ #endif #ifdef CONFIG_X86_F00F_BUG FIX_F00F_IDT, /* Virtual mapping for IDT */ #endif #ifdef CONFIG_X86_CYCLONE_TIMER FIX_CYCLONE_TIMER, /*cyclone timer register*/ #endif #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, #endif #ifdef CONFIG_ACPI FIX_ACPI_BEGIN, FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, #endif #ifdef CONFIG_PCI_MMCONFIG FIX_PCIE_MCFG, #endif __end_of_permanent_fixed_addresses, /* temporary boot-time mappings, used before ioremap() is functional */ #define NR_FIX_BTMAPS 16 FIX_BTMAP_END = __end_of_permanent_fixed_addresses, FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, FIX_WP_TEST, __end_of_fixed_addresses }; /* * Paravirt guests (such as VMI) may need to reserve high memory */ extern void reserve_top_address(unsigned long reserve); extern void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags); #define set_fixmap(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL) /* * Some hardware wants to get fixmapped without caching. */ #define set_fixmap_nocache(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) #define clear_fixmap(idx) \ __set_fixmap(idx, 0, __pgprot(0)) #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) #define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) #define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) /* * This is the range that is readable by user mode, and things * acting like user mode such as get_user_pages. */ #define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL)) #define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) extern void __this_fixmap_does_not_exist(void); /* * 'index to address' translation. If anyone tries to use the idx * directly without tranlation, we catch the bug with a NULL-deference * kernel oops. Illegal ranges of incoming indices are caught too. */ static __always_inline unsigned long fix_to_virt(const unsigned int idx) { /* * this branch gets completely eliminated after inlining, * except when someone tries to use fixaddr indices in an * illegal way. (such as mixing up address types or using * out-of-range indices). * * If it doesn't get removed, the linker will complain * loudly with a reasonably clear error message.. */ if (idx >= __end_of_fixed_addresses) __this_fixmap_does_not_exist(); return __fix_to_virt(idx); } static inline unsigned long virt_to_fix(const unsigned long vaddr) { BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); return __virt_to_fix(vaddr); } #endif /* !__ASSEMBLY__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/floppy.h000066400000000000000000000162701314037446600243500ustar00rootroot00000000000000/* * Architecture specific parts of the Floppy driver * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995 */ #ifndef __ASM_I386_FLOPPY_H #define __ASM_I386_FLOPPY_H #include /* * The DMA channel used by the floppy controller cannot access data at * addresses >= 16MB * * Went back to the 1MB limit, as some people had problems with the floppy * driver otherwise. It doesn't matter much for performance anyway, as most * floppy accesses go through the track buffer. */ #define _CROSS_64KB(a,s,vdma) \ (!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) #define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) #define SW fd_routine[use_virtual_dma&1] #define CSW fd_routine[can_use_virtual_dma & 1] #define fd_inb(port) inb_p(port) #define fd_outb(value,port) outb_p(value,port) #define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy") #define fd_free_dma() CSW._free_dma(FLOPPY_DMA) #define fd_enable_irq() enable_irq(FLOPPY_IRQ) #define fd_disable_irq() disable_irq(FLOPPY_IRQ) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) #define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA) #define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size) #define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io) #define FLOPPY_CAN_FALLBACK_ON_NODMA static int virtual_dma_count; static int virtual_dma_residue; static char *virtual_dma_addr; static int virtual_dma_mode; static int doing_pdma; static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs) { register unsigned char st; #undef TRACE_FLPY_INT #define NO_FLOPPY_ASSEMBLER #ifdef TRACE_FLPY_INT static int calls=0; static int bytes=0; static int dma_wait=0; #endif if (!doing_pdma) return floppy_interrupt(irq, dev_id, regs); #ifdef TRACE_FLPY_INT if(!calls) bytes = virtual_dma_count; #endif #ifndef NO_FLOPPY_ASSEMBLER __asm__ ( "testl %1,%1" "je 3f" "1: inb %w4,%b0" "andb $160,%b0" "cmpb $160,%b0" "jne 2f" "incw %w4" "testl %3,%3" "jne 4f" "inb %w4,%b0" "movb %0,(%2)" "jmp 5f" "4: movb (%2),%0" "outb %b0,%w4" "5: decw %w4" "outb %0,$0x80" "decl %1" "incl %2" "testl %1,%1" "jne 1b" "3: inb %w4,%b0" "2: " : "=a" ((char) st), "=c" ((long) virtual_dma_count), "=S" ((long) virtual_dma_addr) : "b" ((long) virtual_dma_mode), "d" ((short) virtual_dma_port+4), "1" ((long) virtual_dma_count), "2" ((long) virtual_dma_addr)); #else { register int lcount; register char *lptr; st = 1; for(lcount=virtual_dma_count, lptr=virtual_dma_addr; lcount; lcount--, lptr++) { st=inb(virtual_dma_port+4) & 0xa0 ; if(st != 0xa0) break; if(virtual_dma_mode) outb_p(*lptr, virtual_dma_port+5); else *lptr = inb_p(virtual_dma_port+5); } virtual_dma_count = lcount; virtual_dma_addr = lptr; st = inb(virtual_dma_port+4); } #endif #ifdef TRACE_FLPY_INT calls++; #endif if(st == 0x20) return IRQ_HANDLED; if(!(st & 0x20)) { virtual_dma_residue += virtual_dma_count; virtual_dma_count=0; #ifdef TRACE_FLPY_INT printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", virtual_dma_count, virtual_dma_residue, calls, bytes, dma_wait); calls = 0; dma_wait=0; #endif doing_pdma = 0; floppy_interrupt(irq, dev_id, regs); return IRQ_HANDLED; } #ifdef TRACE_FLPY_INT if(!virtual_dma_count) dma_wait++; #endif return IRQ_HANDLED; } static void fd_disable_dma(void) { if(! (can_use_virtual_dma & 1)) disable_dma(FLOPPY_DMA); doing_pdma = 0; virtual_dma_residue += virtual_dma_count; virtual_dma_count=0; } static int vdma_request_dma(unsigned int dmanr, const char * device_id) { return 0; } static void vdma_nop(unsigned int dummy) { } static int vdma_get_dma_residue(unsigned int dummy) { return virtual_dma_count + virtual_dma_residue; } static int fd_request_irq(void) { if(can_use_virtual_dma) return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, "floppy", NULL); else return request_irq(FLOPPY_IRQ, floppy_interrupt, SA_INTERRUPT|SA_SAMPLE_RANDOM, "floppy", NULL); } static unsigned long dma_mem_alloc(unsigned long size) { return __get_dma_pages(GFP_KERNEL,get_order(size)); } static unsigned long vdma_mem_alloc(unsigned long size) { return (unsigned long) vmalloc(size); } #define nodma_mem_alloc(size) vdma_mem_alloc(size) static void _fd_dma_mem_free(unsigned long addr, unsigned long size) { if((unsigned int) addr >= (unsigned int) high_memory) vfree((void *)addr); else free_pages(addr, get_order(size)); } #define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size) static void _fd_chose_dma_mode(char *addr, unsigned long size) { if(can_use_virtual_dma == 2) { if((unsigned int) addr >= (unsigned int) high_memory || isa_virt_to_bus(addr) >= 0x1000000 || _CROSS_64KB(addr, size, 0)) use_virtual_dma = 1; else use_virtual_dma = 0; } else { use_virtual_dma = can_use_virtual_dma & 1; } } #define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size) static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) { doing_pdma = 1; virtual_dma_port = io; virtual_dma_mode = (mode == DMA_MODE_WRITE); virtual_dma_addr = addr; virtual_dma_count = size; virtual_dma_residue = 0; return 0; } static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) { #ifdef FLOPPY_SANITY_CHECK if (CROSS_64KB(addr, size)) { printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size); return -1; } #endif /* actual, physical DMA */ doing_pdma = 0; clear_dma_ff(FLOPPY_DMA); set_dma_mode(FLOPPY_DMA,mode); set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr)); set_dma_count(FLOPPY_DMA,size); enable_dma(FLOPPY_DMA); return 0; } static struct fd_routine_l { int (*_request_dma)(unsigned int dmanr, const char * device_id); void (*_free_dma)(unsigned int dmanr); int (*_get_dma_residue)(unsigned int dummy); unsigned long (*_dma_mem_alloc) (unsigned long size); int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); } fd_routine[] = { { request_dma, free_dma, get_dma_residue, dma_mem_alloc, hard_dma_setup }, { vdma_request_dma, vdma_nop, vdma_get_dma_residue, vdma_mem_alloc, vdma_dma_setup } }; static int FDC1 = 0x3f0; static int FDC2 = -1; /* * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock * is needed to prevent corrupted CMOS RAM in case "insmod floppy" * coincides with another rtc CMOS user. Paul G. */ #define FLOPPY0_TYPE ({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ val = (CMOS_READ(0x10) >> 4) & 15; \ spin_unlock_irqrestore(&rtc_lock, flags); \ val; \ }) #define FLOPPY1_TYPE ({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ val = CMOS_READ(0x10) & 15; \ spin_unlock_irqrestore(&rtc_lock, flags); \ val; \ }) #define N_FDC 2 #define N_DRIVE 8 #define FLOPPY_MOTOR_MASK 0xf0 #define AUTO_DMA #define EXTRA_FLOPPY_PARAMS #endif /* __ASM_I386_FLOPPY_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/futex.h000066400000000000000000000050051314037446600241640ustar00rootroot00000000000000#ifndef _ASM_FUTEX_H #define _ASM_FUTEX_H #ifdef __KERNEL__ #include #include #include #include #include #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile ( \ "1: " insn "\n" \ "2: .section .fixup,\"ax\"\n\ 3: mov %3, %1\n\ jmp 2b\n\ .previous\n\ .section __ex_table,\"a\"\n\ .align 8\n\ .long 1b,3b\n\ .previous" \ : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \ : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0)) #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile ( \ "1: movl %2, %0\n\ movl %0, %3\n" \ insn "\n" \ "2: " LOCK "cmpxchgl %3, %2\n\ jnz 1b\n\ 3: .section .fixup,\"ax\"\n\ 4: mov %5, %1\n\ jmp 3b\n\ .previous\n\ .section __ex_table,\"a\"\n\ .align 8\n\ .long 1b,4b,2b,4b\n\ .previous" \ : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \ "=&r" (tem) \ : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0)) static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; int oparg = (encoded_op << 8) >> 20; int cmparg = (encoded_op << 20) >> 20; int oldval = 0, ret, tem; if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; inc_preempt_count(); if (op == FUTEX_OP_SET) __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); else { #ifndef CONFIG_X86_BSWAP if (boot_cpu_data.x86 == 3) ret = -ENOSYS; else #endif switch (op) { case FUTEX_OP_ADD: __futex_atomic_op1(LOCK "xaddl %0, %2", ret, oldval, uaddr, oparg); break; case FUTEX_OP_OR: __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ANDN: __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg); break; case FUTEX_OP_XOR: __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg); break; default: ret = -ENOSYS; } } dec_preempt_count(); if (!ret) { switch (cmp) { case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; default: ret = -ENOSYS; } } return ret; } #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/genapic.h000066400000000000000000000065231314037446600244450ustar00rootroot00000000000000#ifndef _ASM_GENAPIC_H #define _ASM_GENAPIC_H 1 /* * Generic APIC driver interface. * * An straight forward mapping of the APIC related parts of the * x86 subarchitecture interface to a dynamic object. * * This is used by the "generic" x86 subarchitecture. * * Copyright 2003 Andi Kleen, SuSE Labs. */ struct mpc_config_translation; struct mpc_config_bus; struct mp_config_table; struct mpc_config_processor; struct genapic { char *name; int (*probe)(void); int (*apic_id_registered)(void); cpumask_t (*target_cpus)(void); int int_delivery_mode; int int_dest_mode; int ESR_DISABLE; int apic_destination_logical; unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); unsigned long (*check_apicid_present)(int apicid); int no_balance_irq; int no_ioapic_check; void (*init_apic_ldr)(void); physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); void (*clustered_apic_check)(void); int (*multi_timer_check)(int apic, int irq); int (*apicid_to_node)(int logical_apicid); int (*cpu_to_logical_apicid)(int cpu); int (*cpu_present_to_apicid)(int mps_cpu); physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); int (*mpc_apic_id)(struct mpc_config_processor *m, struct mpc_config_translation *t); void (*setup_portio_remap)(void); int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); void (*enable_apic_mode)(void); u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); /* mpparse */ void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *, struct mpc_config_translation *); void (*mpc_oem_pci_bus)(struct mpc_config_bus *, struct mpc_config_translation *); /* When one of the next two hooks returns 1 the genapic is switched to this. Essentially they are additional probe functions. */ int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, char *productid); int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); unsigned (*get_apic_id)(unsigned long x); unsigned long apic_id_mask; unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); /* ipi */ void (*send_IPI_mask)(cpumask_t mask, int vector); void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); }; #define APICFUNC(x) .x = x #define APIC_INIT(aname, aprobe) { \ .name = aname, \ .probe = aprobe, \ .int_delivery_mode = INT_DELIVERY_MODE, \ .int_dest_mode = INT_DEST_MODE, \ .no_balance_irq = NO_BALANCE_IRQ, \ .ESR_DISABLE = esr_disable, \ .apic_destination_logical = APIC_DEST_LOGICAL, \ APICFUNC(apic_id_registered), \ APICFUNC(target_cpus), \ APICFUNC(check_apicid_used), \ APICFUNC(check_apicid_present), \ APICFUNC(init_apic_ldr), \ APICFUNC(ioapic_phys_id_map), \ APICFUNC(clustered_apic_check), \ APICFUNC(multi_timer_check), \ APICFUNC(apicid_to_node), \ APICFUNC(cpu_to_logical_apicid), \ APICFUNC(cpu_present_to_apicid), \ APICFUNC(apicid_to_cpu_present), \ APICFUNC(mpc_apic_id), \ APICFUNC(setup_portio_remap), \ APICFUNC(check_phys_apicid_present), \ APICFUNC(mpc_oem_bus_info), \ APICFUNC(mpc_oem_pci_bus), \ APICFUNC(mps_oem_check), \ APICFUNC(get_apic_id), \ .apic_id_mask = APIC_ID_MASK, \ APICFUNC(cpu_mask_to_apicid), \ APICFUNC(acpi_madt_oem_check), \ APICFUNC(send_IPI_mask), \ APICFUNC(send_IPI_allbutself), \ APICFUNC(send_IPI_all), \ APICFUNC(enable_apic_mode), \ APICFUNC(phys_pkg_id), \ } extern struct genapic *genapic; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/hardirq.h000066400000000000000000000011411314037446600244600ustar00rootroot00000000000000#ifndef __ASM_HARDIRQ_H #define __ASM_HARDIRQ_H #include #include #include typedef struct { unsigned int __softirq_pending; unsigned long idle_timestamp; unsigned int __nmi_count; /* arch dependent */ unsigned int apic_timer_irqs; /* arch dependent */ } ____cacheline_aligned irq_cpustat_t; DECLARE_PER_CPU(irq_cpustat_t, irq_stat); extern irq_cpustat_t irq_stat[]; #define __ARCH_IRQ_STAT #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) void ack_bad_irq(unsigned int irq); #include #endif /* __ASM_HARDIRQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/highmem.h000066400000000000000000000040611314037446600244500ustar00rootroot00000000000000/* * highmem.h: virtual kernel memory mappings for high memory * * Used in CONFIG_HIGHMEM systems for memory pages which * are not addressable by direct kernel virtual addresses. * * Copyright (C) 1999 Gerhard Wichert, Siemens AG * Gerhard.Wichert@pdb.siemens.de * * * Redesigned the x86 32-bit VM architecture to deal with * up to 16 Terabyte physical memory. With current x86 CPUs * we now support up to 64 Gigabytes physical RAM. * * Copyright (C) 1999 Ingo Molnar */ #ifndef _ASM_HIGHMEM_H #define _ASM_HIGHMEM_H #ifdef __KERNEL__ #include #include #include #include #include /* declarations for highmem.c */ extern unsigned long highstart_pfn, highend_pfn; extern pte_t *kmap_pte; extern pgprot_t kmap_prot; extern pte_t *pkmap_page_table; /* * Right now we initialize only a single pte table. It can be extended * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. */ #ifdef CONFIG_X86_PAE #define LAST_PKMAP 512 #else #define LAST_PKMAP 1024 #endif /* * Ordering is: * * FIXADDR_TOP * fixed_addresses * FIXADDR_START * temp fixed addresses * FIXADDR_BOOT_START * Persistent kmap area * PKMAP_BASE * VMALLOC_END * Vmalloc area * VMALLOC_START * high_memory */ #define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK ) #define LAST_PKMAP_MASK (LAST_PKMAP-1) #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) extern void * FASTCALL(kmap_high(struct page *page)); extern void FASTCALL(kunmap_high(struct page *page)); void *kmap(struct page *page); void kunmap(struct page *page); void *kmap_atomic(struct page *page, enum km_type type); void kunmap_atomic(void *kvaddr, enum km_type type); void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); struct page *kmap_atomic_to_page(void *ptr); #define flush_cache_kmaps() do { } while (0) #endif /* __KERNEL__ */ #endif /* _ASM_HIGHMEM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/hpet.h000066400000000000000000000065001314037446600237720ustar00rootroot00000000000000 #ifndef _I386_HPET_H #define _I386_HPET_H #ifdef CONFIG_HPET_TIMER #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Documentation on HPET can be found at: * http://www.intel.com/ial/home/sp/pcmmspec.htm * ftp://download.intel.com/ial/home/sp/mmts098.pdf */ #define HPET_MMAP_SIZE 1024 #define HPET_ID 0x000 #define HPET_PERIOD 0x004 #define HPET_CFG 0x010 #define HPET_STATUS 0x020 #define HPET_COUNTER 0x0f0 #define HPET_T0_CFG 0x100 #define HPET_T0_CMP 0x108 #define HPET_T0_ROUTE 0x110 #define HPET_T1_CFG 0x120 #define HPET_T1_CMP 0x128 #define HPET_T1_ROUTE 0x130 #define HPET_T2_CFG 0x140 #define HPET_T2_CMP 0x148 #define HPET_T2_ROUTE 0x150 #define HPET_ID_LEGSUP 0x00008000 #define HPET_ID_NUMBER 0x00001f00 #define HPET_ID_REV 0x000000ff #define HPET_ID_NUMBER_SHIFT 8 #define HPET_CFG_ENABLE 0x001 #define HPET_CFG_LEGACY 0x002 #define HPET_LEGACY_8254 2 #define HPET_LEGACY_RTC 8 #define HPET_TN_ENABLE 0x004 #define HPET_TN_PERIODIC 0x008 #define HPET_TN_PERIODIC_CAP 0x010 #define HPET_TN_SETVAL 0x040 #define HPET_TN_32BIT 0x100 /* Use our own asm for 64 bit multiply/divide */ #define ASM_MUL64_REG(eax_out,edx_out,reg_in,eax_in) \ __asm__ __volatile__("mull %2" \ :"=a" (eax_out), "=d" (edx_out) \ :"r" (reg_in), "0" (eax_in)) #define ASM_DIV64_REG(eax_out,edx_out,reg_in,eax_in,edx_in) \ __asm__ __volatile__("divl %2" \ :"=a" (eax_out), "=d" (edx_out) \ :"r" (reg_in), "0" (eax_in), "1" (edx_in)) #define KERNEL_TICK_USEC (1000000UL/HZ) /* tick value in microsec */ /* Max HPET Period is 10^8 femto sec as in HPET spec */ #define HPET_MAX_PERIOD (100000000UL) /* * Min HPET period is 10^5 femto sec just for safety. If it is less than this, * then 32 bit HPET counter wrapsaround in less than 0.5 sec. */ #define HPET_MIN_PERIOD (100000UL) #define HPET_TICK_RATE (HZ * 100000UL) extern unsigned long hpet_tick; /* hpet clks count per tick */ extern unsigned long hpet_address; /* hpet memory map physical address */ extern int hpet_use_timer; extern int hpet_rtc_timer_init(void); extern int hpet_enable(void); extern int hpet_reenable(void); extern int is_hpet_enabled(void); extern int is_hpet_legacy_int_enabled(void); extern int is_hpet_capable(void); extern int hpet_readl(unsigned long a); extern int oem_force_hpet_timer(void); #ifdef CONFIG_HPET_EMULATE_RTC extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec); extern int hpet_set_periodic_freq(unsigned long freq); extern int hpet_rtc_dropped_irq(void); extern int hpet_rtc_timer_init(void); extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs); #endif /* CONFIG_HPET_EMULATE_RTC */ #endif /* CONFIG_HPET_TIMER */ #endif /* _I386_HPET_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/hw_irq.h000066400000000000000000000041171314037446600243250ustar00rootroot00000000000000#ifndef _ASM_HW_IRQ_H #define _ASM_HW_IRQ_H /* * linux/include/asm/hw_irq.h * * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar * * moved some of the old arch/i386/kernel/irq.h to here. VY * * IRQ/IPI changes taken from work by Thomas Radke * */ #include #include #include #include #include struct hw_interrupt_type; #define NMI_VECTOR 0x02 /* * Various low-level irq details needed by irq.c, process.c, * time.c, io_apic.c and smp.c * * Interrupt entry/exit code at both C and assembly level */ extern u8 irq_vector[NR_IRQ_VECTORS]; #define IO_APIC_VECTOR(irq) (irq_vector[irq]) #define AUTO_ASSIGN -1 extern void (*interrupt[NR_IRQS])(void); #ifdef CONFIG_SMP fastcall void reschedule_interrupt(void); fastcall void invalidate_interrupt(void); fastcall void call_function_interrupt(void); #endif #ifdef CONFIG_X86_LOCAL_APIC fastcall void apic_timer_interrupt(void); fastcall void error_interrupt(void); fastcall void spurious_interrupt(void); fastcall void thermal_interrupt(struct pt_regs *); #define platform_legacy_irq(irq) ((irq) < 16) #endif void disable_8259A_irq(unsigned int irq); void enable_8259A_irq(unsigned int irq); int i8259A_irq_pending(unsigned int irq); void make_8259A_irq(unsigned int irq); void init_8259A(int aeoi); void FASTCALL(send_IPI_self(int vector)); void init_VISWS_APIC_irqs(void); void setup_IO_APIC(void); void disable_IO_APIC(void); void print_IO_APIC(void); int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); void send_IPI(int dest, int vector); void setup_ioapic_dest(void); extern unsigned long io_apic_irqs; extern atomic_t irq_err_count; extern atomic_t irq_mis_count; #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) #if defined(CONFIG_X86_IO_APIC) static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { if (IO_APIC_IRQ(i)) send_IPI_self(IO_APIC_VECTOR(i)); } #else static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {} #endif #endif /* _ASM_HW_IRQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/i387.h000066400000000000000000000072761314037446600235370ustar00rootroot00000000000000/* * include/asm-i386/i387.h * * Copyright (C) 1994 Linus Torvalds * * Pentium III FXSR, SSE support * General FPU state handling cleanups * Gareth Hughes , May 2000 */ #ifndef __ASM_I386_I387_H #define __ASM_I386_I387_H #include #include #include #include #include #include extern void mxcsr_feature_mask_init(void); extern void init_fpu(struct task_struct *); /* * FPU lazy state save handling... */ /* * The "nop" is needed to make the instructions the same * length. */ #define restore_fpu(tsk) \ alternative_input( \ "nop ; frstor %1", \ "fxrstor %1", \ X86_FEATURE_FXSR, \ "m" ((tsk)->thread.i387.fxsave)) extern void kernel_fpu_begin(void); #define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) /* We need a safe address that is cheap to find and that is already in L1 during context switch. The best choices are unfortunately different for UP and SMP */ #ifdef CONFIG_SMP #define safe_address (__per_cpu_offset[0]) #else #define safe_address (kstat_cpu(0).cpustat.user) #endif /* * These must be called with preempt disabled */ static inline void __save_init_fpu( struct task_struct *tsk ) { /* Use more nops than strictly needed in case the compiler varies code */ alternative_input( "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, "fxsave %[fx]\n" "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", X86_FEATURE_FXSR, [fx] "m" (tsk->thread.i387.fxsave), [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory"); /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is pending. Clear the x87 state here by setting it to fixed values. safe_address is a random variable that should be in L1 */ alternative_input( GENERIC_NOP8 GENERIC_NOP2, "emms\n\t" /* clear stack tags */ "fildl %[addr]", /* set F?P to defined value */ X86_FEATURE_FXSAVE_LEAK, [addr] "m" (safe_address)); task_thread_info(tsk)->status &= ~TS_USEDFPU; } #define __unlazy_fpu( tsk ) do { \ if (task_thread_info(tsk)->status & TS_USEDFPU) \ save_init_fpu( tsk ); \ } while (0) #define __clear_fpu( tsk ) \ do { \ if (task_thread_info(tsk)->status & TS_USEDFPU) { \ asm volatile("fnclex ; fwait"); \ task_thread_info(tsk)->status &= ~TS_USEDFPU; \ stts(); \ } \ } while (0) /* * These disable preemption on their own and are safe */ static inline void save_init_fpu( struct task_struct *tsk ) { preempt_disable(); __save_init_fpu(tsk); stts(); preempt_enable(); } #define unlazy_fpu( tsk ) do { \ preempt_disable(); \ __unlazy_fpu(tsk); \ preempt_enable(); \ } while (0) #define clear_fpu( tsk ) do { \ preempt_disable(); \ __clear_fpu( tsk ); \ preempt_enable(); \ } while (0) \ /* * FPU state interaction... */ extern unsigned short get_fpu_cwd( struct task_struct *tsk ); extern unsigned short get_fpu_swd( struct task_struct *tsk ); extern unsigned short get_fpu_mxcsr( struct task_struct *tsk ); /* * Signal frame handlers... */ extern int save_i387( struct _fpstate __user *buf ); extern int restore_i387( struct _fpstate __user *buf ); /* * ptrace request handers... */ extern int get_fpregs( struct user_i387_struct __user *buf, struct task_struct *tsk ); extern int set_fpregs( struct task_struct *tsk, struct user_i387_struct __user *buf ); extern int get_fpxregs( struct user_fxsr_struct __user *buf, struct task_struct *tsk ); extern int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct __user *buf ); /* * FPU state for core dumps... */ extern int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu ); #endif /* __ASM_I386_I387_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/i8253.h000066400000000000000000000001551314037446600236040ustar00rootroot00000000000000#ifndef __ASM_I8253_H__ #define __ASM_I8253_H__ extern spinlock_t i8253_lock; #endif /* __ASM_I8253_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/i8259.h000066400000000000000000000007711314037446600236160ustar00rootroot00000000000000#ifndef __ASM_I8259_H__ #define __ASM_I8259_H__ extern unsigned int cached_irq_mask; #define __byte(x,y) (((unsigned char *) &(y))[x]) #define cached_master_mask (__byte(0, cached_irq_mask)) #define cached_slave_mask (__byte(1, cached_irq_mask)) extern spinlock_t i8259A_lock; extern void init_8259A(int auto_eoi); extern void enable_8259A_irq(unsigned int irq); extern void disable_8259A_irq(unsigned int irq); extern unsigned int startup_8259A_irq(unsigned int irq); #endif /* __ASM_I8259_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/ide.h000066400000000000000000000030521314037446600235720ustar00rootroot00000000000000/* * linux/include/asm-i386/ide.h * * Copyright (C) 1994-1996 Linus Torvalds & authors */ /* * This file contains the i386 architecture specific IDE code. */ #ifndef __ASMi386_IDE_H #define __ASMi386_IDE_H #ifdef __KERNEL__ #include #ifndef MAX_HWIFS # ifdef CONFIG_BLK_DEV_IDEPCI #define MAX_HWIFS 10 # else #define MAX_HWIFS 6 # endif #endif #define IDE_ARCH_OBSOLETE_DEFAULTS static __inline__ int ide_default_irq(unsigned long base) { switch (base) { case 0x1f0: return 14; case 0x170: return 15; case 0x1e8: return 11; case 0x168: return 10; case 0x1e0: return 8; case 0x160: return 12; default: return 0; } } static __inline__ unsigned long ide_default_io_base(int index) { /* * If PCI is present then it is not safe to poke around * the other legacy IDE ports. Only 0x1f0 and 0x170 are * defined compatibility mode ports for PCI. A user can * override this using ide= but we must default safe. */ if (pci_find_device(PCI_ANY_ID, PCI_ANY_ID, NULL) == NULL) { switch(index) { case 2: return 0x1e8; case 3: return 0x168; case 4: return 0x1e0; case 5: return 0x160; } } switch (index) { case 0: return 0x1f0; case 1: return 0x170; default: return 0; } } #define IDE_ARCH_OBSOLETE_INIT #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ #ifdef CONFIG_BLK_DEV_IDEPCI #define ide_init_default_irq(base) (0) #else #define ide_init_default_irq(base) ide_default_irq(base) #endif #include #endif /* __KERNEL__ */ #endif /* __ASMi386_IDE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/io.h000066400000000000000000000256671314037446600234600ustar00rootroot00000000000000#ifndef _ASM_IO_H #define _ASM_IO_H #include #include #include #include /* * This file contains the definitions for the x86 IO instructions * inb/inw/inl/outb/outw/outl and the "string versions" of the same * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" * versions of the single-IO instructions (inb_p/inw_p/..). * * This file is not meant to be obfuscating: it's just complicated * to (a) handle it all in a way that makes gcc able to optimize it * as well as possible and (b) trying to avoid writing the same thing * over and over again with slight variations and possibly making a * mistake somewhere. */ /* * Thanks to James van Artsdalen for a better timing-fix than * the two short jumps: using outb's to a nonexistent port seems * to guarantee better timings even on fast machines. * * On the other hand, I'd like to be sure of a non-existent port: * I feel a bit unsafe about using 0x80 (should be safe, though) * * Linus */ /* * Bit simplified and optimized by Jan Hubicka * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. * * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, * isa_read[wl] and isa_write[wl] fixed * - Arnaldo Carvalho de Melo */ #define IO_SPACE_LIMIT 0xffff #define XQUAD_PORTIO_BASE 0xfe400000 #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ #ifdef __KERNEL__ #include #include /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access */ #define xlate_dev_mem_ptr(p) __va(p) /* * Convert a virtual cached pointer to an uncached pointer */ #define xlate_dev_kmem_ptr(p) p /** * virt_to_phys - map virtual addresses to physical * @address: address to remap * * The returned physical address is the physical (CPU) mapping for * the memory address given. It is only valid to use this function on * addresses directly mapped or allocated via kmalloc. * * This function does not give bus mappings for DMA transfers. In * almost all conceivable cases a device driver should not be using * this function */ static inline unsigned long virt_to_phys(volatile void * address) { return __pa(address); } /** * phys_to_virt - map physical address to virtual * @address: address to remap * * The returned virtual address is a current CPU mapping for * the memory address given. It is only valid to use this function on * addresses that have a kernel mapping * * This function does not handle bus mappings for DMA transfers. In * almost all conceivable cases a device driver should not be using * this function */ static inline void * phys_to_virt(unsigned long address) { return __va(address); } /* * Change "struct page" to physical address. */ #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); /** * ioremap - map bus memory into CPU space * @offset: bus address of the memory * @size: size of the resource to map * * ioremap performs a platform specific sequence of operations to * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual * address. */ static inline void __iomem * ioremap(unsigned long offset, unsigned long size) { return __ioremap(offset, size, 0); } extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size); extern void iounmap(volatile void __iomem *addr); /* * bt_ioremap() and bt_iounmap() are for temporary early boot-time * mappings, before the real ioremap() is functional. * A boot-time mapping is currently limited to at most 16 pages. */ extern void *bt_ioremap(unsigned long offset, unsigned long size); extern void bt_iounmap(void *addr, unsigned long size); /* Use early IO mappings for DMI because it's initialized early */ #define dmi_ioremap bt_ioremap #define dmi_iounmap bt_iounmap #define dmi_alloc alloc_bootmem /* * ISA I/O bus memory addresses are 1:1 with the physical address. */ #define isa_virt_to_bus virt_to_phys #define isa_page_to_bus page_to_phys #define isa_bus_to_virt phys_to_virt /* * However PCI ones are not necessarily 1:1 and therefore these interfaces * are forbidden in portable PCI drivers. * * Allow them on x86 for legacy drivers, though. */ #define virt_to_bus virt_to_phys #define bus_to_virt phys_to_virt /* * readX/writeX() are used to access memory mapped devices. On some * architectures the memory mapped IO stuff needs to be accessed * differently. On the x86 architecture, we just read/write the * memory location directly. */ static inline unsigned char readb(const volatile void __iomem *addr) { return *(volatile unsigned char __force *) addr; } static inline unsigned short readw(const volatile void __iomem *addr) { return *(volatile unsigned short __force *) addr; } static inline unsigned int readl(const volatile void __iomem *addr) { return *(volatile unsigned int __force *) addr; } #define readb_relaxed(addr) readb(addr) #define readw_relaxed(addr) readw(addr) #define readl_relaxed(addr) readl(addr) #define __raw_readb readb #define __raw_readw readw #define __raw_readl readl static inline void writeb(unsigned char b, volatile void __iomem *addr) { *(volatile unsigned char __force *) addr = b; } static inline void writew(unsigned short b, volatile void __iomem *addr) { *(volatile unsigned short __force *) addr = b; } static inline void writel(unsigned int b, volatile void __iomem *addr) { *(volatile unsigned int __force *) addr = b; } #define __raw_writeb writeb #define __raw_writew writew #define __raw_writel writel #define mmiowb() static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) { memset((void __force *) addr, val, count); } static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) { __memcpy(dst, (void __force *) src, count); } static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) { __memcpy((void __force *) dst, src, count); } /* * ISA space is 'always mapped' on a typical x86 system, no need to * explicitly ioremap() it. The fact that the ISA IO space is mapped * to PAGE_OFFSET is pure coincidence - it does not mean ISA values * are physical addresses. The following constant pointer can be * used as the IO-area pointer (it can be iounmapped as well, so the * analogy with PCI is quite large): */ #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) #define isa_readb(a) readb(__ISA_IO_base + (a)) #define isa_readw(a) readw(__ISA_IO_base + (a)) #define isa_readl(a) readl(__ISA_IO_base + (a)) #define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) #define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) #define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) #define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) #define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) #define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) /* * Again, i386 does not require mem IO specific function. */ #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d)) #define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(__ISA_IO_base + (b)),(c),(d)) /** * check_signature - find BIOS signatures * @io_addr: mmio address to check * @signature: signature block * @length: length of signature * * Perform a signature comparison with the mmio address io_addr. This * address should have been obtained by ioremap. * Returns 1 on a match. */ static inline int check_signature(volatile void __iomem * io_addr, const unsigned char *signature, int length) { int retval = 0; do { if (readb(io_addr) != *signature) goto out; io_addr++; signature++; length--; } while (length); retval = 1; out: return retval; } /* * Cache management * * This needed for two cases * 1. Out of order aware processors * 2. Accidentally out of order processors (PPro errata #51) */ #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) static inline void flush_write_buffers(void) { __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory"); } #define dma_cache_inv(_start,_size) flush_write_buffers() #define dma_cache_wback(_start,_size) flush_write_buffers() #define dma_cache_wback_inv(_start,_size) flush_write_buffers() #else /* Nothing to do */ #define dma_cache_inv(_start,_size) do { } while (0) #define dma_cache_wback(_start,_size) do { } while (0) #define dma_cache_wback_inv(_start,_size) do { } while (0) #define flush_write_buffers() #endif #endif /* __KERNEL__ */ static inline void slow_down_io(void) { __SLOW_DOWN_IO; #ifdef REALLY_SLOW_IO __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; #endif } #ifdef CONFIG_X86_NUMAQ extern void *xquad_portio; /* Where the IO area was mapped */ #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) #define __BUILDIO(bwl,bw,type) \ static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \ if (xquad_portio) \ write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \ else \ out##bwl##_local(value, port); \ } \ static inline void out##bwl(unsigned type value, int port) { \ out##bwl##_quad(value, port, 0); \ } \ static inline unsigned type in##bwl##_quad(int port, int quad) { \ if (xquad_portio) \ return read##bwl(XQUAD_PORT_ADDR(port, quad)); \ else \ return in##bwl##_local(port); \ } \ static inline unsigned type in##bwl(int port) { \ return in##bwl##_quad(port, 0); \ } #else #define __BUILDIO(bwl,bw,type) \ static inline void out##bwl(unsigned type value, int port) { \ out##bwl##_local(value, port); \ } \ static inline unsigned type in##bwl(int port) { \ return in##bwl##_local(port); \ } #endif #define BUILDIO(bwl,bw,type) \ static inline void out##bwl##_local(unsigned type value, int port) { \ __BUILDOUTINST(bwl,bw,value,port); \ } \ static inline unsigned type in##bwl##_local(int port) { \ unsigned type value; \ __BUILDININST(bwl,bw,value,port); \ return value; \ } \ static inline void out##bwl##_local_p(unsigned type value, int port) { \ out##bwl##_local(value, port); \ slow_down_io(); \ } \ static inline unsigned type in##bwl##_local_p(int port) { \ unsigned type value = in##bwl##_local(port); \ slow_down_io(); \ return value; \ } \ __BUILDIO(bwl,bw,type) \ static inline void out##bwl##_p(unsigned type value, int port) { \ out##bwl(value, port); \ slow_down_io(); \ } \ static inline unsigned type in##bwl##_p(int port) { \ unsigned type value = in##bwl(port); \ slow_down_io(); \ return value; \ } \ static inline void outs##bwl(int port, const void *addr, unsigned long count) { \ __BUILDOUTSINST(bwl,addr,count,port); \ } \ static inline void ins##bwl(int port, void *addr, unsigned long count) { \ __BUILDINSINST(bwl,addr,count,port); \ } BUILDIO(b,b,char) BUILDIO(w,w,short) BUILDIO(l,,int) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/io_apic.h000066400000000000000000000142421314037446600244370ustar00rootroot00000000000000#ifndef __ASM_IO_APIC_H #define __ASM_IO_APIC_H #include #include #include /* * Intel IO-APIC support for SMP and UP systems. * * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar */ #ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_PCI_MSI static inline int use_pci_vector(void) {return 1;} static inline void disable_edge_ioapic_vector(unsigned int vector) { } static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { } static inline void end_edge_ioapic_vector (unsigned int vector) { } #define startup_level_ioapic startup_level_ioapic_vector #define shutdown_level_ioapic mask_IO_APIC_vector #define enable_level_ioapic unmask_IO_APIC_vector #define disable_level_ioapic mask_IO_APIC_vector #define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_vector #define end_level_ioapic end_level_ioapic_vector #define set_ioapic_affinity set_ioapic_affinity_vector #define startup_edge_ioapic startup_edge_ioapic_vector #define shutdown_edge_ioapic disable_edge_ioapic_vector #define enable_edge_ioapic unmask_IO_APIC_vector #define disable_edge_ioapic disable_edge_ioapic_vector #define ack_edge_ioapic ack_edge_ioapic_vector #define end_edge_ioapic end_edge_ioapic_vector #else static inline int use_pci_vector(void) {return 0;} static inline void disable_edge_ioapic_irq(unsigned int irq) { } static inline void mask_and_ack_level_ioapic_irq(unsigned int irq) { } static inline void end_edge_ioapic_irq (unsigned int irq) { } #define startup_level_ioapic startup_level_ioapic_irq #define shutdown_level_ioapic mask_IO_APIC_irq #define enable_level_ioapic unmask_IO_APIC_irq #define disable_level_ioapic mask_IO_APIC_irq #define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_irq #define end_level_ioapic end_level_ioapic_irq #define set_ioapic_affinity set_ioapic_affinity_irq #define startup_edge_ioapic startup_edge_ioapic_irq #define shutdown_edge_ioapic disable_edge_ioapic_irq #define enable_edge_ioapic unmask_IO_APIC_irq #define disable_edge_ioapic disable_edge_ioapic_irq #define ack_edge_ioapic ack_edge_ioapic_irq #define end_edge_ioapic end_edge_ioapic_irq #endif #ifndef CONFIG_XEN #define IO_APIC_BASE(idx) \ ((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \ + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK))) #else #include #include #endif /* * The structure of the IO-APIC: */ union IO_APIC_reg_00 { u32 raw; struct { u32 __reserved_2 : 14, LTS : 1, delivery_type : 1, __reserved_1 : 8, ID : 8; } __attribute__ ((packed)) bits; }; union IO_APIC_reg_01 { u32 raw; struct { u32 version : 8, __reserved_2 : 7, PRQ : 1, entries : 8, __reserved_1 : 8; } __attribute__ ((packed)) bits; }; union IO_APIC_reg_02 { u32 raw; struct { u32 __reserved_2 : 24, arbitration : 4, __reserved_1 : 4; } __attribute__ ((packed)) bits; }; union IO_APIC_reg_03 { u32 raw; struct { u32 boot_DT : 1, __reserved_1 : 31; } __attribute__ ((packed)) bits; }; /* * # of IO-APICs and # of IRQ routing registers */ extern int nr_ioapics; extern int nr_ioapic_registers[MAX_IO_APICS]; enum ioapic_irq_destination_types { dest_Fixed = 0, dest_LowestPrio = 1, dest_SMI = 2, dest__reserved_1 = 3, dest_NMI = 4, dest_INIT = 5, dest__reserved_2 = 6, dest_ExtINT = 7 }; struct IO_APIC_route_entry { __u32 vector : 8, delivery_mode : 3, /* 000: FIXED * 001: lowest prio * 111: ExtINT */ dest_mode : 1, /* 0: physical, 1: logical */ delivery_status : 1, polarity : 1, irr : 1, trigger : 1, /* 0: edge, 1: level */ mask : 1, /* 0: enabled, 1: disabled */ __reserved_2 : 15; union { struct { __u32 __reserved_1 : 24, physical_dest : 4, __reserved_2 : 4; } physical; struct { __u32 __reserved_1 : 24, logical_dest : 8; } logical; } dest; } __attribute__ ((packed)); /* * MP-BIOS irq configuration table structures: */ /* I/O APIC entries */ extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; /* # of MP IRQ source entries */ extern int mp_irq_entries; /* MP IRQ source entries */ extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; /* non-0 if default (table-less) MP configuration */ extern int mpc_default_type; static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) { #ifndef CONFIG_XEN *IO_APIC_BASE(apic) = reg; return *(IO_APIC_BASE(apic)+4); #else struct physdev_apic apic_op; int ret; apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr; apic_op.reg = reg; ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op); if (ret) return ret; return apic_op.value; #endif } static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) { #ifndef CONFIG_XEN *IO_APIC_BASE(apic) = reg; *(IO_APIC_BASE(apic)+4) = value; #else struct physdev_apic apic_op; apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr; apic_op.reg = reg; apic_op.value = value; WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op)); #endif } /* * Re-write a value: to be used for read-modify-write * cycles where the read already set up the index register. * * Older SiS APIC requires we rewrite the index regiser */ extern int sis_apic_bug; #ifndef CONFIG_XEN static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) { if (sis_apic_bug) *IO_APIC_BASE(apic) = reg; *(IO_APIC_BASE(apic)+4) = value; } #else #define io_apic_modify io_apic_write #endif /* 1 if "noapic" boot option passed */ extern int skip_ioapic_setup; /* * If we use the IO-APIC for IRQ routing, disable automatic * assignment of PCI IRQ's. */ #define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) #ifdef CONFIG_ACPI extern int io_apic_get_unique_id (int ioapic, int apic_id); extern int io_apic_get_version (int ioapic); extern int io_apic_get_redir_entries (int ioapic); extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low); #endif /* CONFIG_ACPI */ extern int (*ioapic_renumber_irq)(int ioapic, int irq); #else /* !CONFIG_X86_IO_APIC */ #define io_apic_assign_pci_irqs 0 #endif extern int assign_irq_vector(int irq); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/ioctl.h000066400000000000000000000000371314037446600241430ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/ioctls.h000066400000000000000000000051711314037446600243320ustar00rootroot00000000000000#ifndef __ARCH_I386_IOCTLS_H__ #define __ARCH_I386_IOCTLS_H__ #include /* 0x54 is just a magic number to make these relatively unique ('T') */ #define TCGETS 0x5401 #define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */ #define TCSETSW 0x5403 #define TCSETSF 0x5404 #define TCGETA 0x5405 #define TCSETA 0x5406 #define TCSETAW 0x5407 #define TCSETAF 0x5408 #define TCSBRK 0x5409 #define TCXONC 0x540A #define TCFLSH 0x540B #define TIOCEXCL 0x540C #define TIOCNXCL 0x540D #define TIOCSCTTY 0x540E #define TIOCGPGRP 0x540F #define TIOCSPGRP 0x5410 #define TIOCOUTQ 0x5411 #define TIOCSTI 0x5412 #define TIOCGWINSZ 0x5413 #define TIOCSWINSZ 0x5414 #define TIOCMGET 0x5415 #define TIOCMBIS 0x5416 #define TIOCMBIC 0x5417 #define TIOCMSET 0x5418 #define TIOCGSOFTCAR 0x5419 #define TIOCSSOFTCAR 0x541A #define FIONREAD 0x541B #define TIOCINQ FIONREAD #define TIOCLINUX 0x541C #define TIOCCONS 0x541D #define TIOCGSERIAL 0x541E #define TIOCSSERIAL 0x541F #define TIOCPKT 0x5420 #define FIONBIO 0x5421 #define TIOCNOTTY 0x5422 #define TIOCSETD 0x5423 #define TIOCGETD 0x5424 #define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ /* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */ #define TIOCSBRK 0x5427 /* BSD compatibility */ #define TIOCCBRK 0x5428 /* BSD compatibility */ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ #define FIONCLEX 0x5450 #define FIOCLEX 0x5451 #define FIOASYNC 0x5452 #define TIOCSERCONFIG 0x5453 #define TIOCSERGWILD 0x5454 #define TIOCSERSWILD 0x5455 #define TIOCGLCKTRMIOS 0x5456 #define TIOCSLCKTRMIOS 0x5457 #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ #define TIOCSERGETLSR 0x5459 /* Get line status register */ #define TIOCSERGETMULTI 0x545A /* Get multiport config */ #define TIOCSERSETMULTI 0x545B /* Set multiport config */ #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ #define FIOQSIZE 0x5460 /* Used for packet mode */ #define TIOCPKT_DATA 0 #define TIOCPKT_FLUSHREAD 1 #define TIOCPKT_FLUSHWRITE 2 #define TIOCPKT_STOP 4 #define TIOCPKT_START 8 #define TIOCPKT_NOSTOP 16 #define TIOCPKT_DOSTOP 32 #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/ipc.h000066400000000000000000000000351314037446600236020ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/ipcbuf.h000066400000000000000000000011641314037446600243030ustar00rootroot00000000000000#ifndef __i386_IPCBUF_H__ #define __i386_IPCBUF_H__ /* * The ipc64_perm structure for i386 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * * Pad space is left for: * - 32-bit mode_t and seq * - 2 miscellaneous 32-bit values */ struct ipc64_perm { __kernel_key_t key; __kernel_uid32_t uid; __kernel_gid32_t gid; __kernel_uid32_t cuid; __kernel_gid32_t cgid; __kernel_mode_t mode; unsigned short __pad1; unsigned short seq; unsigned short __pad2; unsigned long __unused1; unsigned long __unused2; }; #endif /* __i386_IPCBUF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/irq.h000066400000000000000000000017471314037446600236350ustar00rootroot00000000000000#ifndef _ASM_IRQ_H #define _ASM_IRQ_H /* * linux/include/asm/irq.h * * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar * * IRQ/IPI changes taken from work by Thomas Radke * */ #include #include /* include comes from machine specific directory */ #include "irq_vectors.h" #include static __inline__ int irq_canonicalize(int irq) { return ((irq == 2) ? 9 : irq); } #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN) # define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ #endif #ifdef CONFIG_4KSTACKS extern void irq_ctx_init(int cpu); extern void irq_ctx_exit(int cpu); # define __ARCH_HAS_DO_SOFTIRQ #else # define irq_ctx_init(cpu) do { } while (0) # define irq_ctx_exit(cpu) do { } while (0) #endif #ifdef CONFIG_IRQBALANCE extern int irqbalance_disable(char *str); #endif #ifdef CONFIG_HOTPLUG_CPU extern void fixup_irqs(cpumask_t map); #endif #endif /* _ASM_IRQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/ist.h000066400000000000000000000015351314037446600236340ustar00rootroot00000000000000#ifndef _ASM_IST_H #define _ASM_IST_H /* * Include file for the interface to IST BIOS * Copyright 2002 Andy Grover * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #ifdef __KERNEL__ struct ist_info { unsigned long signature; unsigned long command; unsigned long event; unsigned long perf_level; }; extern struct ist_info ist_info; #endif /* __KERNEL__ */ #endif /* _ASM_IST_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/kdb.h000066400000000000000000000022541314037446600235740ustar00rootroot00000000000000#ifndef _ASM_KDB_H #define _ASM_KDB_H /* * Kernel Debugger Architecture Dependent Global Headers * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. */ /* * KDB_ENTER() is a macro which causes entry into the kernel * debugger from any point in the kernel code stream. If it * is intended to be used from interrupt level, it must use * a non-maskable entry method. */ #define KDB_ENTER() do {if (kdb_on && !KDB_IS_RUNNING()) { asm("\tint $129\n"); }} while(0) /* * Needed for exported symbols. */ typedef unsigned long kdb_machreg_t; #define kdb_machreg_fmt "0x%lx" #define kdb_machreg_fmt0 "0x%08lx" #define kdb_bfd_vma_fmt "0x%lx" #define kdb_bfd_vma_fmt0 "0x%08lx" #define kdb_elfw_addr_fmt "0x%x" #define kdb_elfw_addr_fmt0 "0x%08x" /* * Per cpu arch specific kdb state. Must be in range 0xff000000. */ #define KDB_STATE_A_IF 0x01000000 /* Saved IF flag */ static inline unsigned long kdba_funcptr_value(void *fp) { return (unsigned long)fp; } #endif /* !_ASM_KDB_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/kdbprivate.h000066400000000000000000000126311314037446600251670ustar00rootroot00000000000000#ifndef _ASM_KDBPRIVATE_H #define _ASM_KDBPRIVATE_H /* * Kernel Debugger Architecture Dependent Private Headers * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved. */ typedef unsigned char kdb_machinst_t; /* * KDB_MAXBPT describes the total number of breakpoints * supported by this architecure. */ #define KDB_MAXBPT 16 /* * KDB_MAXHARDBPT describes the total number of hardware * breakpoint registers that exist. */ #define KDB_MAXHARDBPT 4 /* Maximum number of arguments to a function */ #define KDBA_MAXARGS 16 /* * Platform specific environment entries */ #define KDB_PLATFORM_ENV "IDMODE=x86", "BYTESPERWORD=4", "IDCOUNT=16" /* * Support for ia32 debug registers */ typedef struct _kdbhard_bp { kdb_machreg_t bph_reg; /* Register this breakpoint uses */ unsigned int bph_free:1; /* Register available for use */ unsigned int bph_data:1; /* Data Access breakpoint */ unsigned int bph_write:1; /* Write Data breakpoint */ unsigned int bph_mode:2; /* 0=inst, 1=write, 2=io, 3=read */ unsigned int bph_length:2; /* 0=1, 1=2, 2=BAD, 3=4 (bytes) */ } kdbhard_bp_t; #define IA32_BREAKPOINT_INSTRUCTION 0xcc #define DR6_BT 0x00008000 #define DR6_BS 0x00004000 #define DR6_BD 0x00002000 #define DR6_B3 0x00000008 #define DR6_B2 0x00000004 #define DR6_B1 0x00000002 #define DR6_B0 0x00000001 #define DR6_DR_MASK 0x0000000F #define DR7_RW_VAL(dr, drnum) \ (((dr) >> (16 + (4 * (drnum)))) & 0x3) #define DR7_RW_SET(dr, drnum, rw) \ do { \ (dr) &= ~(0x3 << (16 + (4 * (drnum)))); \ (dr) |= (((rw) & 0x3) << (16 + (4 * (drnum)))); \ } while (0) #define DR7_RW0(dr) DR7_RW_VAL(dr, 0) #define DR7_RW0SET(dr,rw) DR7_RW_SET(dr, 0, rw) #define DR7_RW1(dr) DR7_RW_VAL(dr, 1) #define DR7_RW1SET(dr,rw) DR7_RW_SET(dr, 1, rw) #define DR7_RW2(dr) DR7_RW_VAL(dr, 2) #define DR7_RW2SET(dr,rw) DR7_RW_SET(dr, 2, rw) #define DR7_RW3(dr) DR7_RW_VAL(dr, 3) #define DR7_RW3SET(dr,rw) DR7_RW_SET(dr, 3, rw) #define DR7_LEN_VAL(dr, drnum) \ (((dr) >> (18 + (4 * (drnum)))) & 0x3) #define DR7_LEN_SET(dr, drnum, rw) \ do { \ (dr) &= ~(0x3 << (18 + (4 * (drnum)))); \ (dr) |= (((rw) & 0x3) << (18 + (4 * (drnum)))); \ } while (0) #define DR7_LEN0(dr) DR7_LEN_VAL(dr, 0) #define DR7_LEN0SET(dr,len) DR7_LEN_SET(dr, 0, len) #define DR7_LEN1(dr) DR7_LEN_VAL(dr, 1) #define DR7_LEN1SET(dr,len) DR7_LEN_SET(dr, 1, len) #define DR7_LEN2(dr) DR7_LEN_VAL(dr, 2) #define DR7_LEN2SET(dr,len) DR7_LEN_SET(dr, 2, len) #define DR7_LEN3(dr) DR7_LEN_VAL(dr, 3) #define DR7_LEN3SET(dr,len) DR7_LEN_SET(dr, 3, len) #define DR7_G0(dr) (((dr)>>1)&0x1) #define DR7_G0SET(dr) ((dr) |= 0x2) #define DR7_G0CLR(dr) ((dr) &= ~0x2) #define DR7_G1(dr) (((dr)>>3)&0x1) #define DR7_G1SET(dr) ((dr) |= 0x8) #define DR7_G1CLR(dr) ((dr) &= ~0x8) #define DR7_G2(dr) (((dr)>>5)&0x1) #define DR7_G2SET(dr) ((dr) |= 0x20) #define DR7_G2CLR(dr) ((dr) &= ~0x20) #define DR7_G3(dr) (((dr)>>7)&0x1) #define DR7_G3SET(dr) ((dr) |= 0x80) #define DR7_G3CLR(dr) ((dr) &= ~0x80) #define DR7_L0(dr) (((dr))&0x1) #define DR7_L0SET(dr) ((dr) |= 0x1) #define DR7_L0CLR(dr) ((dr) &= ~0x1) #define DR7_L1(dr) (((dr)>>2)&0x1) #define DR7_L1SET(dr) ((dr) |= 0x4) #define DR7_L1CLR(dr) ((dr) &= ~0x4) #define DR7_L2(dr) (((dr)>>4)&0x1) #define DR7_L2SET(dr) ((dr) |= 0x10) #define DR7_L2CLR(dr) ((dr) &= ~0x10) #define DR7_L3(dr) (((dr)>>6)&0x1) #define DR7_L3SET(dr) ((dr) |= 0x40) #define DR7_L3CLR(dr) ((dr) &= ~0x40) #define DR7_GD 0x00002000 /* General Detect Enable */ #define DR7_GE 0x00000200 /* Global exact */ #define DR7_LE 0x00000100 /* Local exact */ #define DR_TYPE_EXECUTE 0x0 #define DR_TYPE_WRITE 0x1 #define DR_TYPE_IO 0x2 #define DR_TYPE_RW 0x3 extern kdb_machreg_t kdba_getdr6(void); extern void kdba_putdr6(kdb_machreg_t); extern kdb_machreg_t kdba_getdr7(void); /* * Support for setjmp/longjmp */ #define JB_BX 0 #define JB_SI 1 #define JB_DI 2 #define JB_BP 3 #define JB_SP 4 #define JB_PC 5 typedef struct __kdb_jmp_buf { unsigned long regs[6]; /* kdba_setjmp assumes fixed offsets here */ } kdb_jmp_buf; extern int asmlinkage kdba_setjmp(kdb_jmp_buf *); extern void asmlinkage kdba_longjmp(kdb_jmp_buf *, int); #define kdba_setjmp kdba_setjmp extern kdb_jmp_buf *kdbjmpbuf; /* Arch specific data saved for running processes */ struct kdba_running_process { long esp; /* CONFIG_4KSTACKS may be on a different stack */ long eip; /* eip when esp was set */ }; static inline void kdba_save_running(struct kdba_running_process *k, struct pt_regs *regs) { k->esp = current_stack_pointer; __asm__ __volatile__ ( " lea 1f,%%eax; movl %%eax,%0 ; 1: " : "=r"(k->eip) : : "eax" ); } static inline void kdba_unsave_running(struct kdba_running_process *k, struct pt_regs *regs) { } struct kdb_activation_record; extern void kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu, struct kdb_activation_record *ar); extern void kdba_wait_for_cpus(void); extern fastcall void kdb_interrupt(void); #define KDB_INT_REGISTERS 8 #endif /* !_ASM_KDBPRIVATE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/kdebug.h000066400000000000000000000021231314037446600242700ustar00rootroot00000000000000#ifndef _I386_KDEBUG_H #define _I386_KDEBUG_H 1 /* * Aug-05 2004 Ported by Prasanna S Panchamukhi * from x86_64 architecture. */ #include struct pt_regs; struct die_args { struct pt_regs *regs; const char *str; long err; int trapnr; int signr; }; /* Note - you should never unregister because that can race with NMIs. If you really want to do it first unregister - then synchronize_sched - then free. */ int register_die_notifier(struct notifier_block *nb); extern struct notifier_block *i386die_chain; /* Grossly misnamed. */ enum die_val { DIE_OOPS = 1, DIE_INT3, DIE_DEBUG, DIE_PANIC, DIE_NMI, DIE_DIE, DIE_NMIWATCHDOG, DIE_KERNELDEBUG, DIE_KDEBUG_ENTER, DIE_KDEBUG_LEAVE, DIE_TRAP, DIE_GPF, DIE_CALL, DIE_NMI_IPI, DIE_PAGE_FAULT, }; static inline int notify_die(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig) { struct die_args args = { .regs = regs, .str = str, .err = err, .trapnr = trap, .signr = sig }; return notifier_call_chain(&i386die_chain, val, &args); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/kerntypes.h000066400000000000000000000011541314037446600250560ustar00rootroot00000000000000/* * asm-i386/kerntypes.h * * Arch-dependent header file that includes headers for all arch-specific * types of interest. * The kernel type information is used by the lcrash utility when * analyzing system crash dumps or the live system. Using the type * information for the running system, rather than kernel header files, * makes for a more flexible and robust analysis tool. * * This source code is released under the GNU GPL. */ /* ix86-specific header files */ #ifndef _I386_KERNTYPES_H #define _I386_KERNTYPES_H /* Use the default */ #include #endif /* _I386_KERNTYPES_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/kexec.h000066400000000000000000000100241314037446600241250ustar00rootroot00000000000000#ifndef _I386_KEXEC_H #define _I386_KEXEC_H #define PA_CONTROL_PAGE 0 #define VA_CONTROL_PAGE 1 #define PA_PGD 2 #define VA_PGD 3 #define PA_PTE_0 4 #define VA_PTE_0 5 #define PA_PTE_1 6 #define VA_PTE_1 7 #ifdef CONFIG_X86_PAE #define PA_PMD_0 8 #define VA_PMD_0 9 #define PA_PMD_1 10 #define VA_PMD_1 11 #define PAGES_NR 12 #else #define PAGES_NR 8 #endif #ifndef __ASSEMBLY__ #include #include #include /* * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. * I.e. Maximum page that is mapped directly into kernel memory, * and kmap is not required. * * Someone correct me if FIXADDR_START - PAGEOFFSET is not the correct * calculation for the amount of memory directly mappable into the * kernel memory space. */ /* Maximum physical address we can use pages from */ #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) /* Maximum address we can reach in physical address mode */ #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) /* Maximum address we can use for the control code buffer */ #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE #define KEXEC_CONTROL_CODE_SIZE 4096 /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_386 /* We can also handle crash dumps from 64 bit kernel. */ #define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) #define MAX_NOTE_BYTES 1024 /* CPU does not save ss and esp on stack if execution is already * running in kernel mode at the time of NMI occurrence. This code * fixes it. */ static inline void crash_fixup_ss_esp(struct pt_regs *newregs, struct pt_regs *oldregs) { memcpy(newregs, oldregs, sizeof(*newregs)); newregs->esp = (unsigned long)&(oldregs->esp); __asm__ __volatile__( "xorl %%eax, %%eax\n\t" "movw %%ss, %%ax\n\t" :"=a"(newregs->xss)); } /* * This function is responsible for capturing register states if coming * via panic otherwise just fix up the ss and esp if coming via kernel * mode exception. */ static inline void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) { if (oldregs) crash_fixup_ss_esp(newregs, oldregs); else { __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx)); __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx)); __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx)); __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi)); __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi)); __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp)); __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax)); __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp)); __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss)); __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs)); __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds)); __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes)); __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags)); newregs->eip = (unsigned long)current_text_addr(); } } asmlinkage NORET_TYPE void relocate_kernel(unsigned long indirection_page, unsigned long control_page, unsigned long start_address, unsigned int has_pae) ATTRIB_NORET; /* Under Xen we need to work with machine addresses. These macros give the * machine address of a certain page to the generic kexec code instead of * the pseudo physical address which would be given by the default macros. */ #ifdef CONFIG_XEN #define KEXEC_ARCH_HAS_PAGE_MACROS #define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page)) #define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn)) #define kexec_virt_to_phys(addr) virt_to_machine(addr) #define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr)) #endif #endif /* __ASSEMBLY__ */ #endif /* _I386_KEXEC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/kmap_types.h000066400000000000000000000007501314037446600252070ustar00rootroot00000000000000#ifndef _ASM_KMAP_TYPES_H #define _ASM_KMAP_TYPES_H #include #ifdef CONFIG_DEBUG_HIGHMEM # define D(n) __KM_FENCE_##n , #else # define D(n) #endif enum km_type { D(0) KM_BOUNCE_READ, D(1) KM_SKB_SUNRPC_DATA, D(2) KM_SKB_DATA_SOFTIRQ, D(3) KM_USER0, D(4) KM_USER1, D(5) KM_BIO_SRC_IRQ, D(6) KM_BIO_DST_IRQ, D(7) KM_PTE0, D(8) KM_PTE1, D(9) KM_IRQ0, D(10) KM_IRQ1, D(11) KM_SOFTIRQ0, D(12) KM_SOFTIRQ1, D(13) KM_KDB, D(14) KM_DUMP, D(15) KM_TYPE_NR }; #undef D #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/kprobes.h000066400000000000000000000051021314037446600244740ustar00rootroot00000000000000#ifndef _ASM_KPROBES_H #define _ASM_KPROBES_H /* * Kernel Probes (KProbes) * include/asm-i386/kprobes.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2002, 2004 * * 2002-Oct Created by Vamsi Krishna S Kernel * Probes initial implementation ( includes suggestions from * Rusty Russell). */ #include #include #define __ARCH_WANT_KPROBES_INSN_SLOT struct kprobe; struct pt_regs; typedef u8 kprobe_opcode_t; #define BREAKPOINT_INSTRUCTION 0xcc #define MAX_INSN_SIZE 16 #define MAX_STACK_SIZE 64 #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \ ? (MAX_STACK_SIZE) \ : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES void arch_remove_kprobe(struct kprobe *p); void kretprobe_trampoline(void); /* Architecture specific copy of original instruction*/ struct arch_specific_insn { /* copy of the original instruction */ kprobe_opcode_t *insn; }; struct prev_kprobe { struct kprobe *kp; unsigned long status; unsigned long old_eflags; unsigned long saved_eflags; }; /* per-cpu kprobe control block */ struct kprobe_ctlblk { unsigned long kprobe_status; unsigned long kprobe_old_eflags; unsigned long kprobe_saved_eflags; long *jprobe_saved_esp; struct pt_regs jprobe_saved_regs; kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; struct prev_kprobe prev_kprobe; }; /* trap3/1 are intr gates for kprobes. So, restore the status of IF, * if necessary, before executing the original int3/1 (trap) handler. */ static inline void restore_interrupts(struct pt_regs *regs) { if (regs->eflags & IF_MASK) local_irq_enable(); } extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); #endif /* _ASM_KPROBES_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/ldt.h000066400000000000000000000013001314037446600236060ustar00rootroot00000000000000/* * ldt.h * * Definitions of structures used with the modify_ldt system call. */ #ifndef _LINUX_LDT_H #define _LINUX_LDT_H /* Maximum number of LDT entries supported. */ #define LDT_ENTRIES 8192 /* The size of each LDT entry. */ #define LDT_ENTRY_SIZE 8 #ifndef __ASSEMBLY__ struct user_desc { unsigned int entry_number; unsigned long base_addr; unsigned int limit; unsigned int seg_32bit:1; unsigned int contents:2; unsigned int read_exec_only:1; unsigned int limit_in_pages:1; unsigned int seg_not_present:1; unsigned int useable:1; }; #define MODIFY_LDT_CONTENTS_DATA 0 #define MODIFY_LDT_CONTENTS_STACK 1 #define MODIFY_LDT_CONTENTS_CODE 2 #endif /* !__ASSEMBLY__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/linkage.h000066400000000000000000000006071314037446600244460ustar00rootroot00000000000000#ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) #define FASTCALL(x) x __attribute__((regparm(3))) #define fastcall __attribute__((regparm(3))) #define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret)) #ifdef CONFIG_X86_ALIGNMENT_16 #define __ALIGN .align 16,0x90 #define __ALIGN_STR ".align 16,0x90" #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/local.h000066400000000000000000000034211314037446600241230ustar00rootroot00000000000000#ifndef _ARCH_I386_LOCAL_H #define _ARCH_I386_LOCAL_H #include typedef struct { volatile unsigned long counter; } local_t; #define LOCAL_INIT(i) { (i) } #define local_read(v) ((v)->counter) #define local_set(v,i) (((v)->counter) = (i)) static __inline__ void local_inc(local_t *v) { __asm__ __volatile__( "incl %0" :"=m" (v->counter) :"m" (v->counter)); } static __inline__ void local_dec(local_t *v) { __asm__ __volatile__( "decl %0" :"=m" (v->counter) :"m" (v->counter)); } static __inline__ void local_add(unsigned long i, local_t *v) { __asm__ __volatile__( "addl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } static __inline__ void local_sub(unsigned long i, local_t *v) { __asm__ __volatile__( "subl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } /* On x86, these are no better than the atomic variants. */ #define __local_inc(l) local_inc(l) #define __local_dec(l) local_dec(l) #define __local_add(i,l) local_add((i),(l)) #define __local_sub(i,l) local_sub((i),(l)) /* Use these for per-cpu local_t variables: on some archs they are * much more efficient than these naive implementations. Note they take * a variable, not an address. */ #define cpu_local_read(v) local_read(&__get_cpu_var(v)) #define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) #define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) #define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) #define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) #define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) #define __cpu_local_inc(v) cpu_local_inc(v) #define __cpu_local_dec(v) cpu_local_dec(v) #define __cpu_local_add(i, v) cpu_local_add((i), (v)) #define __cpu_local_sub(i, v) cpu_local_sub((i), (v)) #endif /* _ARCH_I386_LOCAL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-bigsmp/000077500000000000000000000000001314037446600250475ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-bigsmp/mach_apic.h000066400000000000000000000066201314037446600271300ustar00rootroot00000000000000#ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H extern u8 bios_cpu_apicid[]; #define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) #define esr_disable (1) static inline int apic_id_registered(void) { return (1); } /* Round robin the irqs amoung the online cpus */ static inline cpumask_t target_cpus(void) { static unsigned long cpu = NR_CPUS; do { if (cpu >= NR_CPUS) cpu = first_cpu(cpu_online_map); else cpu = next_cpu(cpu, cpu_online_map); } while (cpu >= NR_CPUS); return cpumask_of_cpu(cpu); } #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL 0 #define TARGET_CPUS (target_cpus()) #define APIC_DFR_VALUE (APIC_DFR_FLAT) #define INT_DELIVERY_MODE (dest_Fixed) #define INT_DEST_MODE (0) /* phys delivery to target proc */ #define NO_BALANCE_IRQ (0) #define WAKE_SECONDARY_VIA_INIT static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { return (0); } static inline unsigned long check_apicid_present(int bit) { return (1); } static inline unsigned long calculate_ldr(int cpu) { unsigned long val, id; val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; id = xapic_phys_to_log_apicid(cpu); val |= SET_APIC_LOGICAL_ID(id); return val; } /* * Set up the logical destination ID. * * Intel recommends to set DFR, LDR and TPR before enabling * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ static inline void init_apic_ldr(void) { unsigned long val; int cpu = smp_processor_id(); apic_write_around(APIC_DFR, APIC_DFR_VALUE); val = calculate_ldr(cpu); apic_write_around(APIC_LDR, val); } static inline void clustered_apic_check(void) { printk("Enabling APIC mode: %s. Using %d I/O APICs\n", "Physflat", nr_ioapics); } static inline int multi_timer_check(int apic, int irq) { return (0); } static inline int apicid_to_node(int logical_apicid) { return (0); } static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < NR_CPUS) return (int) bios_cpu_apicid[mps_cpu]; return BAD_APICID; } static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) { return physid_mask_of_physid(phys_apicid); } extern u8 cpu_2_logical_apicid[]; /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { if (cpu >= NR_CPUS) return BAD_APICID; return cpu_physical_id(cpu); } static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *translation_record) { printk("Processor #%d %ld:%ld APIC version %d\n", m->mpc_apicid, (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, m->mpc_apicver); return m->mpc_apicid; } static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { /* For clustered we don't have a good way to do this yet - hack */ return physids_promote(0xFFL); } static inline void setup_portio_remap(void) { } static inline void enable_apic_mode(void) { } static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) { return (1); } /* As we are using single CPU as destination, pick only one CPU here */ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) { int cpu; int apicid; cpu = first_cpu(cpumask); apicid = cpu_to_logical_apicid(cpu); return apicid; } static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } #endif /* __ASM_MACH_APIC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-bigsmp/mach_apicdef.h000066400000000000000000000003431314037446600276030ustar00rootroot00000000000000#ifndef __ASM_MACH_APICDEF_H #define __ASM_MACH_APICDEF_H #define APIC_ID_MASK (0xFF<<24) static inline unsigned get_apic_id(unsigned long x) { return (((x)>>24)&0xFF); } #define GET_APIC_ID(x) get_apic_id(x) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-bigsmp/mach_ipi.h000066400000000000000000000010051314037446600267650ustar00rootroot00000000000000#ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H void send_IPI_mask_sequence(cpumask_t mask, int vector); static inline void send_IPI_mask(cpumask_t mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) send_IPI_mask(mask, vector); } static inline void send_IPI_all(int vector) { send_IPI_mask(cpu_online_map, vector); } #endif /* __ASM_MACH_IPI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-bigsmp/mach_mpspec.h000066400000000000000000000002211314037446600274720ustar00rootroot00000000000000#ifndef __ASM_MACH_MPSPEC_H #define __ASM_MACH_MPSPEC_H #define MAX_IRQ_SOURCES 256 #define MAX_MP_BUSSES 32 #endif /* __ASM_MACH_MPSPEC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/000077500000000000000000000000001314037446600252125ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/apm.h000066400000000000000000000032621314037446600261430ustar00rootroot00000000000000/* * include/asm-i386/mach-default/apm.h * * Machine specific APM BIOS functions for generic. * Split out from apm.c by Osamu Tomita */ #ifndef _ASM_APM_H #define _ASM_APM_H #ifdef APM_ZERO_SEGS # define APM_DO_ZERO_SEGS \ "pushl %%ds\n\t" \ "pushl %%es\n\t" \ "xorl %%edx, %%edx\n\t" \ "mov %%dx, %%ds\n\t" \ "mov %%dx, %%es\n\t" \ "mov %%dx, %%fs\n\t" \ "mov %%dx, %%gs\n\t" # define APM_DO_POP_SEGS \ "popl %%es\n\t" \ "popl %%ds\n\t" #else # define APM_DO_ZERO_SEGS # define APM_DO_POP_SEGS #endif static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, u32 *esi) { /* * N.B. We do NOT need a cld after the BIOS call * because we always save and restore the flags. */ __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" "lcall *%%cs:apm_bios_entry\n\t" "setc %%al\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" APM_DO_POP_SEGS : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx), "=S" (*esi) : "a" (func), "b" (ebx_in), "c" (ecx_in) : "memory", "cc"); } static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax) { int cx, dx, si; u8 error; /* * N.B. We do NOT need a cld after the BIOS call * because we always save and restore the flags. */ __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" "lcall *%%cs:apm_bios_entry\n\t" "setc %%bl\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" APM_DO_POP_SEGS : "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx), "=S" (si) : "a" (func), "b" (ebx_in), "c" (ecx_in) : "memory", "cc"); return error; } #endif /* _ASM_APM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/bios_ebda.h000066400000000000000000000005271314037446600272760ustar00rootroot00000000000000#ifndef _MACH_BIOS_EBDA_H #define _MACH_BIOS_EBDA_H /* * there is a real-mode segmented pointer pointing to the * 4K EBDA area at 0x40E. */ static inline unsigned int get_bios_ebda(void) { unsigned int address = *(unsigned short *)phys_to_virt(0x40E); address <<= 4; return address; /* 0 means none */ } #endif /* _MACH_BIOS_EBDA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/do_timer.h000066400000000000000000000043701314037446600271710ustar00rootroot00000000000000/* defines for inline arch setup functions */ #include #include /** * do_timer_interrupt_hook - hook into timer tick * @regs: standard registers from interrupt * * Description: * This hook is called immediately after the timer interrupt is ack'd. * It's primary purpose is to allow architectures that don't possess * individual per CPU clocks (like the CPU APICs supply) to broadcast the * timer interrupt as a means of triggering reschedules etc. **/ static inline void do_timer_interrupt_hook(struct pt_regs *regs) { do_timer(regs); #ifndef CONFIG_SMP update_process_times(user_mode(regs)); #endif /* * In the SMP case we use the local APIC timer interrupt to do the * profiling, except when we simulate SMP mode on a uniprocessor * system, in that case we have to call the local interrupt handler. */ #ifndef CONFIG_X86_LOCAL_APIC profile_tick(CPU_PROFILING, regs); #else if (!using_apic_timer) smp_local_timer_interrupt(regs); #endif } /* you can safely undefine this if you don't have the Neptune chipset */ #define BUGGY_NEPTUN_TIMER /** * do_timer_overflow - process a detected timer overflow condition * @count: hardware timer interrupt count on overflow * * Description: * This call is invoked when the jiffies count has not incremented but * the hardware timer interrupt has. It means that a timer tick interrupt * came along while the previous one was pending, thus a tick was missed **/ static inline int do_timer_overflow(int count) { int i; spin_lock(&i8259A_lock); /* * This is tricky when I/O APICs are used; * see do_timer_interrupt(). */ i = inb(0x20); spin_unlock(&i8259A_lock); /* assumption about timer being IRQ0 */ if (i & 0x01) { /* * We cannot detect lost timer interrupts ... * well, that's why we call them lost, don't we? :) * [hmm, on the Pentium and Alpha we can ... sort of] */ count -= LATCH; } else { #ifdef BUGGY_NEPTUN_TIMER /* * for the Neptun bug we know that the 'latch' * command doesn't latch the high and low value * of the counter atomically. Thus we have to * substract 256 from the counter * ... funny, isnt it? :) */ count -= 256; #else printk("do_slow_gettimeoffset(): hardware timer problem?\n"); #endif } return count; } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/entry_arch.h000066400000000000000000000022351314037446600275230ustar00rootroot00000000000000/* * This file is designed to contain the BUILD_INTERRUPT specifications for * all of the extra named interrupt vectors used by the architecture. * Usually this is the Inter Process Interrupts (IPIs) */ /* * The following vectors are part of the Linux architecture, there * is no hardware IRQ pin equivalent for them, they are triggered * through the ICC by us (IPIs) */ #ifdef CONFIG_X86_SMP BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) #endif /* * every pentium local APIC has two 'local interrupts', with a * soft-definable vector attached to both interrupts, one of * which is a timer interrupt, the other one is error counter * overflow. Linux uses the local APIC timer interrupt to get * a much simpler SMP time architecture: */ #ifdef CONFIG_X86_LOCAL_APIC BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) #ifdef CONFIG_X86_MCE_P4THERMAL BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/io_ports.h000066400000000000000000000013521314037446600272220ustar00rootroot00000000000000/* * arch/i386/mach-generic/io_ports.h * * Machine specific IO port address definition for generic. * Written by Osamu Tomita */ #ifndef _MACH_IO_PORTS_H #define _MACH_IO_PORTS_H /* i8253A PIT registers */ #define PIT_MODE 0x43 #define PIT_CH0 0x40 #define PIT_CH2 0x42 /* i8259A PIC registers */ #define PIC_MASTER_CMD 0x20 #define PIC_MASTER_IMR 0x21 #define PIC_MASTER_ISR PIC_MASTER_CMD #define PIC_MASTER_POLL PIC_MASTER_ISR #define PIC_MASTER_OCW3 PIC_MASTER_ISR #define PIC_SLAVE_CMD 0xa0 #define PIC_SLAVE_IMR 0xa1 /* i8259A PIC related value */ #define PIC_CASCADE_IR 2 #define MASTER_ICW4_DEFAULT 0x01 #define SLAVE_ICW4_DEFAULT 0x01 #define PIC_ICW4_AEOI 2 #endif /* !_MACH_IO_PORTS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/irq_vectors.h000066400000000000000000000051171314037446600277270ustar00rootroot00000000000000/* * This file should contain #defines for all of the interrupt vector * numbers used by this architecture. * * In addition, there are some standard defines: * * FIRST_EXTERNAL_VECTOR: * The first free place for external interrupts * * SYSCALL_VECTOR: * The IRQ vector a syscall makes the user to kernel transition * under. * * TIMER_IRQ: * The IRQ number the timer interrupt comes in at. * * NR_IRQS: * The total number of interrupt vectors (including all the * architecture specific interrupts) needed. * */ #ifndef _ASM_IRQ_VECTORS_H #define _ASM_IRQ_VECTORS_H /* * IDT vectors usable for external interrupt sources start * at 0x20: */ #define FIRST_EXTERNAL_VECTOR 0x20 #define SYSCALL_VECTOR 0x80 #define KDBENTER_VECTOR 0x81 /* * Vectors 0x20-0x2f are used for ISA interrupts. */ /* * Special IRQ vectors used by the SMP architecture, 0xf0-0xff * * some of the following vectors are 'rare', they are merged * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. * TLB, reschedule and local APIC vectors are performance-critical. * * Vectors 0xf0-0xfa are free (reserved for future Linux use). */ #define SPURIOUS_APIC_VECTOR 0xff #define ERROR_APIC_VECTOR 0xfe #define INVALIDATE_TLB_VECTOR 0xfd #define RESCHEDULE_VECTOR 0xfc #define CALL_FUNCTION_VECTOR 0xfb #define DUMP_VECTOR 0xfa #define KDB_VECTOR 0xf9 #define THERMAL_APIC_VECTOR 0xf0 /* * Local APIC timer IRQ vector is on a different priority level, * to work around the 'lost local interrupt if more than 2 IRQ * sources per level' errata. */ #define LOCAL_TIMER_VECTOR 0xef /* * First APIC vector available to drivers: (vectors 0x30-0xee) * we start at 0x31 to spread out vectors evenly between priority * levels. (0x80 is the syscall vector) */ #define FIRST_DEVICE_VECTOR 0x31 #define FIRST_SYSTEM_VECTOR 0xef #define TIMER_IRQ 0 /* * 16 8259A IRQ's, 208 potential APIC interrupt sources. * Right now the APIC is mostly only used for SMP. * 256 vectors is an architectural limit. (we can have * more than 256 devices theoretically, but they will * have to use shared interrupts) * Since vectors 0x00-0x1f are used/reserved for the CPU, * the usable vector space is 0x20-0xff (224 vectors) */ /* * The maximum number of vectors supported by i386 processors * is limited to 256. For processors other than i386, NR_VECTORS * should be changed accordingly. */ #define NR_VECTORS 256 #include "irq_vectors_limits.h" #define FPU_IRQ 13 #define FIRST_VM86_IRQ 3 #define LAST_VM86_IRQ 15 #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) #endif /* _ASM_IRQ_VECTORS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/irq_vectors_limits.h000066400000000000000000000006611314037446600313070ustar00rootroot00000000000000#ifndef _ASM_IRQ_VECTORS_LIMITS_H #define _ASM_IRQ_VECTORS_LIMITS_H #ifdef CONFIG_PCI_MSI #define NR_IRQS FIRST_SYSTEM_VECTOR #define NR_IRQ_VECTORS NR_IRQS #else #ifdef CONFIG_X86_IO_APIC #define NR_IRQS 224 # if (224 >= 32 * NR_CPUS) # define NR_IRQ_VECTORS NR_IRQS # else # define NR_IRQ_VECTORS (32 * NR_CPUS) # endif #else #define NR_IRQS 16 #define NR_IRQ_VECTORS NR_IRQS #endif #endif #endif /* _ASM_IRQ_VECTORS_LIMITS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_apic.h000066400000000000000000000055001314037446600272670ustar00rootroot00000000000000#ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H #include #include #define APIC_DFR_VALUE (APIC_DFR_FLAT) static inline cpumask_t target_cpus(void) { #ifdef CONFIG_SMP return cpu_online_map; #else return cpumask_of_cpu(0); #endif } #define TARGET_CPUS (target_cpus()) #define NO_BALANCE_IRQ (0) #define esr_disable (0) #define INT_DELIVERY_MODE dest_LowestPrio #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { return physid_isset(apicid, bitmap); } static inline unsigned long check_apicid_present(int bit) { return physid_isset(bit, phys_cpu_present_map); } /* * Set up the logical destination ID. * * Intel recommends to set DFR, LDR and TPR before enabling * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ static inline void init_apic_ldr(void) { unsigned long val; apic_write_around(APIC_DFR, APIC_DFR_VALUE); val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); apic_write_around(APIC_LDR, val); } static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { return phys_map; } static inline void clustered_apic_check(void) { #ifdef CONFIG_SMP printk("Enabling APIC mode: %s. Using %d I/O APICs\n", "Flat", nr_ioapics); #endif } static inline int multi_timer_check(int apic, int irq) { return 0; } static inline int apicid_to_node(int logical_apicid) { return 0; } /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { return 1 << cpu; } static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < get_physical_broadcast()) return mps_cpu; else return BAD_APICID; } static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) { return physid_mask_of_physid(phys_apicid); } static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *translation_record) { printk("Processor #%d %ld:%ld APIC version %d\n", m->mpc_apicid, (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, m->mpc_apicver); return (m->mpc_apicid); } static inline void setup_portio_remap(void) { } static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) { return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); } static inline int apic_id_registered(void) { return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); } static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) { return cpus_addr(cpumask)[0]; } static inline void enable_apic_mode(void) { } static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } #endif /* __ASM_MACH_APIC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_apicdef.h000066400000000000000000000006051314037446600277470ustar00rootroot00000000000000#ifndef __ASM_MACH_APICDEF_H #define __ASM_MACH_APICDEF_H #include #define APIC_ID_MASK (0xF<<24) static inline unsigned get_apic_id(unsigned long x) { #ifndef CONFIG_XEN unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); if (APIC_XAPIC(ver)) return (((x)>>24)&0xFF); else #endif return (((x)>>24)&0xF); } #define GET_APIC_ID(x) get_apic_id(x) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_apicops.h000066400000000000000000000012251314037446600300110ustar00rootroot00000000000000/* * Basic functions accessing APICs. * These may optionally be overridden by the subarchitecture in * mach_apicops.h. */ #ifndef __ASM_MACH_APICOPS_H #define __ASM_MACH_APICOPS_H #ifdef CONFIG_X86_LOCAL_APIC static __inline void apic_write(unsigned long reg, unsigned long v) { *((volatile unsigned long *)(APIC_BASE+reg)) = v; } static __inline void apic_write_atomic(unsigned long reg, unsigned long v) { xchg((volatile unsigned long *)(APIC_BASE+reg), v); } static __inline unsigned long apic_read(unsigned long reg) { return *((volatile unsigned long *)(APIC_BASE+reg)); } #endif /* CONFIG_X86_LOCAL_APIC */ #endif /* __ASM_MACH_APICOPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_apictimer.h000066400000000000000000000026241314037446600303340ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to dhecht@vmware.com * */ /* * Initiation routines related to the APIC timer. * These may optionally be overridden by the subarchitecture in * mach_apictimer.h. */ #ifndef __ASM_MACH_APICTIMER_H #define __ASM_MACH_APICTIMER_H #ifdef CONFIG_X86_LOCAL_APIC extern int __init timer_irq_works(void); static inline void mach_setup_boot_local_clock(void) { setup_boot_APIC_clock(); } static inline void mach_setup_secondary_local_clock(void) { setup_secondary_APIC_clock(); } static inline int mach_timer_irq_works(void) { return timer_irq_works(); } #endif /* CONFIG_X86_LOCAL_APIC */ #endif /* __ASM_MACH_APICTIMER_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_asm.h000066400000000000000000000004561314037446600271400ustar00rootroot00000000000000#ifndef __MACH_ASM_H #define __MACH_ASM_H #define IRET iret #define CLI cli #define STI sti #define STI_SYSEXIT sti; sysexit #define GET_CR0 mov %cr0, %eax #define WRMSR wrmsr #define RDMSR rdmsr #define CPUID cpuid #define CLI_STRING "cli" #define STI_STRING "sti" #endif /* __MACH_ASM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_desc.h000066400000000000000000000026341314037446600272760ustar00rootroot00000000000000#ifndef __MACH_DESC_H #define __MACH_DESC_H #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) #define clear_LDT_desc() __asm__ __volatile__("lldt %w0"::"r" (0)) #define mach_setup_dt(ptr,size) #define mach_release_dt(ptr,size) #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) #define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) #define store_tr(tr) __asm__ ("str %0":"=m" (tr)) #define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) #if TLS_SIZE != 24 # error update this code. #endif static inline void load_TLS(struct thread_struct *t, unsigned int cpu) { #define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] C(0); C(1); C(2); #undef C } static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b) { __u32 *lp = (__u32 *)((char *)dt + entry*8); *lp = entry_a; *(lp+1) = entry_b; } #define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) #define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) #define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_hooks.h000066400000000000000000000001731314037446600274770ustar00rootroot00000000000000#ifndef _MACH_HOOKS_H #define _MACH_HOOKS_H /* Fall back to the default hooks in include/asm-i386/arch_hooks.h */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_idletimer.h000066400000000000000000000003621314037446600303320ustar00rootroot00000000000000 /* * NO_IDLE_HZ callbacks. */ #ifndef __ASM_MACH_IDLETIMER_H #define __ASM_MACH_IDLETIMER_H static inline void stop_hz_timer(void) { } static inline void restart_hz_timer(struct pt_regs *regs) { } #endif /* __ASM_MACH_IDLETIMER_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_io.h000066400000000000000000000013501314037446600267610ustar00rootroot00000000000000#ifndef _MACH_ASM_IO_H #define _MACH_ASM_IO_H #ifdef SLOW_IO_BY_JUMPING #define __SLOW_DOWN_IO asm volatile("jmp 1f; 1: jmp 1f; 1:") #else #define __SLOW_DOWN_IO asm volatile("outb %al, $0x80") #endif #define __BUILDOUTINST(bwl,bw,value,port) \ __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)) #define __BUILDININST(bwl,bw,value,port) \ __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)) #define __BUILDOUTSINST(bwl,addr,count,port) \ __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)) #define __BUILDINSINST(bwl,addr,count,port) \ __asm__ __volatile__("rep; ins" #bwl \ : "+D"(addr), "+c"(count) \ : "d"(port) \ : "memory") #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_ipi.h000066400000000000000000000021461314037446600271370ustar00rootroot00000000000000#ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H void send_IPI_mask_bitmask(cpumask_t mask, int vector); void __send_IPI_shortcut(unsigned int shortcut, int vector); extern int no_broadcast; static inline void send_IPI_mask(cpumask_t mask, int vector) { send_IPI_mask_bitmask(mask, vector); } static inline void __local_send_IPI_allbutself(int vector) { if (no_broadcast) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); send_IPI_mask(mask, vector); } else __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); } static inline void __local_send_IPI_all(int vector) { if (no_broadcast) send_IPI_mask(cpu_online_map, vector); else __send_IPI_shortcut(APIC_DEST_ALLINC, vector); } static inline void send_IPI_allbutself(int vector) { /* * if there are no other CPUs in the system then we get an APIC send * error if we try to broadcast, thus avoid sending IPIs in this case. */ if (!(num_online_cpus() > 1)) return; __local_send_IPI_allbutself(vector); return; } static inline void send_IPI_all(int vector) { __local_send_IPI_all(vector); } #endif /* __ASM_MACH_IPI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_module.h000066400000000000000000000002071314037446600276370ustar00rootroot00000000000000#ifndef _ASM_I386_ARCH_MODULE_H #define _ASM_I386_ARCH_MODULE_H #define MODULE_SUBARCH_VERMAGIC #endif /* _ASM_I386_ARCH_MODULE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_mpparse.h000066400000000000000000000011561314037446600300250ustar00rootroot00000000000000#ifndef __ASM_MACH_MPPARSE_H #define __ASM_MACH_MPPARSE_H static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, struct mpc_config_translation *translation) { // Dprintk("Bus #%d is %s\n", m->mpc_busid, name); } static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, struct mpc_config_translation *translation) { } static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) { return 0; } /* Hook from generic ACPI tables.c */ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; } #endif /* __ASM_MACH_MPPARSE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_mpspec.h000066400000000000000000000002221314037446600276360ustar00rootroot00000000000000#ifndef __ASM_MACH_MPSPEC_H #define __ASM_MACH_MPSPEC_H #define MAX_IRQ_SOURCES 256 #define MAX_MP_BUSSES 260 #endif /* __ASM_MACH_MPSPEC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_pgalloc.h000066400000000000000000000022741314037446600300010ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to zach@vmware.com * */ #ifndef _MACH_PGALLOC_H #define _MACH_PGALLOC_H #define mach_setup_pte(va, pfn) #define mach_release_pte(va, pfn) #define mach_setup_pmd(va, pfn) #define mach_release_pmd(va, pfn) #define mach_setup_pgd(va, pfn, root, base, pdirs) #define mach_release_pgd(va, pfn) #define mach_map_linear_pt(num, ptep, pfn) #define mach_map_linear_range(start, pages, pfn) #endif /* _MACH_PGALLOC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_pgtable.h000066400000000000000000000013301314037446600277660ustar00rootroot00000000000000#ifndef _MACH_PGTABLE_H #define _MACH_PGTABLE_H /* Update hook for shadow mode hypervisors */ #define pte_update_hook(mm, addr, pteptr) do { } while (0) /* * Also, we only update the dirty/accessed state if we set * the dirty bit by hand in the kernel, since the hardware * will do the accessed bit for us, and we don't want to * race with other CPU's that might be updating the dirty * bit at the same time. */ #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ do { \ if (__dirty) { \ (__ptep)->pte_low = (__entry).pte_low; \ flush_tlb_page(__vma, __address); \ } \ } while (0) #endif /* _PGTABLE_OPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_processor.h000066400000000000000000000043031314037446600303720ustar00rootroot00000000000000#ifndef _MACH_PROCESSOR_H #define _MACH_PROCESSOR_H /* * Generic CPUID function * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx * resulting in stale register contents being returned. */ static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { __asm__("cpuid" : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "c"(0)); } /* Some CPUID calls want 'count' to be placed in ecx */ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, int *edx) { __asm__("cpuid" : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "c" (count)); } /* * CPUID functions returning a single datum */ static inline unsigned int cpuid_eax(unsigned int op) { unsigned int eax; __asm__("cpuid" : "=a" (eax) : "0" (op) : "bx", "cx", "dx"); return eax; } static inline unsigned int cpuid_ebx(unsigned int op) { unsigned int eax, ebx; __asm__("cpuid" : "=a" (eax), "=b" (ebx) : "0" (op) : "cx", "dx" ); return ebx; } static inline unsigned int cpuid_ecx(unsigned int op) { unsigned int eax, ecx; __asm__("cpuid" : "=a" (eax), "=c" (ecx) : "0" (op) : "bx", "dx" ); return ecx; } static inline unsigned int cpuid_edx(unsigned int op) { unsigned int eax, edx; __asm__("cpuid" : "=a" (eax), "=d" (edx) : "0" (op) : "bx", "cx"); return edx; } #define load_cr3(pgdir) \ asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir))) #define flush_deferred_cpu_state() #define arch_update_kernel_stack(t,s) /* * These special macros can be used to get or set a debugging register */ #define get_debugreg(var, register) \ __asm__("movl %%db" #register ", %0" \ :"=r" (var)) #define set_debugreg(value, register) \ __asm__("movl %0,%%db" #register \ : /* no output */ \ :"r" (value)) /* * Set IOPL bits in EFLAGS from given mask */ static inline void set_iopl_mask(unsigned mask) { unsigned int reg; __asm__ __volatile__ ("pushfl;" "popl %0;" "andl %1, %0;" "orl %2, %0;" "pushl %0;" "popfl" : "=&r" (reg) : "i" (~X86_EFLAGS_IOPL), "r" (mask)); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_reboot.h000066400000000000000000000013061314037446600276450ustar00rootroot00000000000000/* * arch/i386/mach-generic/mach_reboot.h * * Machine specific reboot functions for generic. * Split out from reboot.c by Osamu Tomita */ #ifndef _MACH_REBOOT_H #define _MACH_REBOOT_H static inline void kb_wait(void) { int i; for (i = 0; i < 0x10000; i++) if ((inb_p(0x64) & 0x02) == 0) break; } static inline void mach_reboot(void) { int i; for (i = 0; i < 10; i++) { kb_wait(); udelay(50); outb(0x60, 0x64); /* write Controller Command Byte */ udelay(50); kb_wait(); udelay(50); outb(0x14, 0x60); /* set "System flag" */ udelay(50); kb_wait(); udelay(50); outb(0xfe, 0x64); /* pulse reset low */ udelay(50); } } #endif /* !_MACH_REBOOT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_schedclock.h000066400000000000000000000004541314037446600304600ustar00rootroot00000000000000 /* * Machine specific sched_clock routines. */ #ifndef __ASM_MACH_SCHEDCLOCK_H #define __ASM_MACH_SCHEDCLOCK_H #include static inline unsigned long long sched_clock_cycles(void) { unsigned long long cycles; rdtscll(cycles); return cycles; } #endif /* __ASM_MACH_SCHEDCLOCK_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_segment.h000066400000000000000000000021271314037446600300170ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to zach@vmware.com * */ #ifndef __MACH_SEGMENT_H #define __MACH_SEGMENT_H #define get_kernel_rpl() 0 #define COMPARE_SEGMENT_STACK(segment, offset) \ cmpw $segment, offset(%esp); #define COMPARE_SEGMENT_REG(segment, reg) \ pushl %eax; \ mov reg, %eax; \ cmpw $segment,%ax; \ popl %eax; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_system.h000066400000000000000000000043271314037446600277050ustar00rootroot00000000000000#ifndef _MACH_SYSTEM_H #define _MACH_SYSTEM_H #define clts() __asm__ __volatile__ ("clts") #define read_cr0() ({ \ unsigned int __dummy; \ __asm__ __volatile__( \ "movl %%cr0,%0\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) #define write_cr0(x) \ __asm__ __volatile__("movl %0,%%cr0": :"r" (x)); #define read_cr2() ({ \ unsigned int __dummy; \ __asm__ __volatile__( \ "movl %%cr2,%0\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) #define write_cr2(x) \ __asm__ __volatile__("movl %0,%%cr2": :"r" (x)); #define read_cr3() ({ \ unsigned int __dummy; \ __asm__( \ "movl %%cr3,%0\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) #define write_cr3(x) \ __asm__ __volatile__("movl %0,%%cr3": :"r" (x)); #define read_cr4() ({ \ unsigned int __dummy; \ __asm__( \ "movl %%cr4,%0\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) #define read_cr4_safe() ({ \ unsigned int __dummy; \ /* This could fault if %cr4 does not exist */ \ __asm__("1: movl %%cr4, %0 \n" \ "2: \n" \ ".section __ex_table,\"a\" \n" \ ".long 1b,2b \n" \ ".previous \n" \ : "=r" (__dummy): "0" (0)); \ __dummy; \ }) #define write_cr4(x) \ __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); #define wbinvd() \ __asm__ __volatile__ ("wbinvd": : :"memory"); /* interrupt control.. */ #define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0) /* For spinlocks etc */ #define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0) #define local_irq_disable() __asm__ __volatile__("cli": : :"memory") #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") /* used in the idle loop; sti holds off interrupts for 1 instruction */ #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") /* force shutdown of the processor; used when IRQs are disabled */ #define shutdown_halt() __asm__ __volatile__("hlt": : :"memory") /* halt until interrupted */ #define halt() __asm__ __volatile__("hlt") #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_time.h000066400000000000000000000077701314037446600273240ustar00rootroot00000000000000/* * include/asm-i386/mach-default/mach_time.h * * Machine specific set RTC function for generic. * Split out from time.c by Osamu Tomita */ #ifndef _MACH_TIME_H #define _MACH_TIME_H #include /* for check timing call set_rtc_mmss() 500ms */ /* used in arch/i386/time.c::do_timer_interrupt() */ #define USEC_AFTER 500000 #define USEC_BEFORE 500000 /* * In order to set the CMOS clock precisely, set_rtc_mmss has to be * called 500 ms after the second nowtime has started, because when * nowtime is written into the registers of the CMOS clock, it will * jump to the next second precisely 500 ms later. Check the Motorola * MC146818A or Dallas DS12887 data sheet for details. * * BUG: This routine does not handle hour overflow properly; it just * sets the minutes. Usually you'll only notice that after reboot! */ static inline int mach_set_rtc_mmss(unsigned long nowtime) { int retval = 0; int real_seconds, real_minutes, cmos_minutes; unsigned char save_control, save_freq_select; save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); cmos_minutes = CMOS_READ(RTC_MINUTES); if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) BCD_TO_BIN(cmos_minutes); /* * since we're only adjusting minutes and seconds, * don't interfere with hour overflow. This avoids * messing with unknown time zones but requires your * RTC not to be off by more than 15 minutes */ real_seconds = nowtime % 60; real_minutes = nowtime / 60; if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) real_minutes += 30; /* correct for half hour time zone */ real_minutes %= 60; if (abs(real_minutes - cmos_minutes) < 30) { if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { BIN_TO_BCD(real_seconds); BIN_TO_BCD(real_minutes); } CMOS_WRITE(real_seconds,RTC_SECONDS); CMOS_WRITE(real_minutes,RTC_MINUTES); } else { printk(KERN_WARNING "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); retval = -1; } /* The following flags have to be released exactly in this order, * otherwise the DS12887 (popular MC146818A clone with integrated * battery and quartz) will not reset the oscillator and will not * update precisely 500 ms later. You won't find this mentioned in * the Dallas Semiconductor data sheets, but who believes data * sheets anyway ... -- Markus Kuhn */ CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); return retval; } static inline unsigned long mach_get_cmos_time(void) { unsigned int year, mon, day, hour, min, sec; int i; /* The Linux interpretation of the CMOS clock register contents: * When the Update-In-Progress (UIP) flag goes from 1 to 0, the * RTC registers show the second which has precisely just started. * Let's hope other operating systems interpret the RTC the same way. */ /* read RTC exactly on falling edge of update flag */ for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) break; for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) break; do { /* Isn't this overkill ? UIP above should guarantee consistency */ sec = CMOS_READ(RTC_SECONDS); min = CMOS_READ(RTC_MINUTES); hour = CMOS_READ(RTC_HOURS); day = CMOS_READ(RTC_DAY_OF_MONTH); mon = CMOS_READ(RTC_MONTH); year = CMOS_READ(RTC_YEAR); } while (sec != CMOS_READ(RTC_SECONDS)); if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { BCD_TO_BIN(sec); BCD_TO_BIN(min); BCD_TO_BIN(hour); BCD_TO_BIN(day); BCD_TO_BIN(mon); BCD_TO_BIN(year); } if ((year += 1900) < 1970) year += 100; return mktime(year, mon, day, hour, min, sec); } #endif /* !_MACH_TIME_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_timer.h000066400000000000000000000026501314037446600274760ustar00rootroot00000000000000/* * include/asm-i386/mach-default/mach_timer.h * * Machine specific calibrate_tsc() for generic. * Split out from timer_tsc.c by Osamu Tomita */ /* ------ Calibrate the TSC ------- * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset(). * Too much 64-bit arithmetic here to do this cleanly in C, and for * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2) * output busy loop as low as possible. We avoid reading the CTC registers * directly because of the awkward 8-bit access mechanism of the 82C54 * device. */ #ifndef _MACH_TIMER_H #define _MACH_TIMER_H #define CALIBRATE_LATCH (5 * LATCH) static inline void mach_prepare_counter(void) { /* Set the Gate high, disable speaker */ outb((inb(0x61) & ~0x02) | 0x01, 0x61); /* * Now let's take care of CTC channel 2 * * Set the Gate high, program CTC channel 2 for mode 0, * (interrupt on terminal count mode), binary count, * load 5 * LATCH count, (LSB and MSB) to begin countdown. * * Some devices need a delay here. */ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ } static inline void mach_countup(unsigned long *count_p) { unsigned long count = 0; do { count++; } while ((inb_p(0x61) & 0x20) == 0); *count_p = count; } #endif /* !_MACH_TIMER_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_tlbflush.h000066400000000000000000000017571314037446600302100ustar00rootroot00000000000000#ifndef _MACH_TLBFLUSH_H #define _MACH_TLBFLUSH_H #define __flush_tlb() \ do { \ unsigned int tmpreg; \ \ __asm__ __volatile__( \ "movl %%cr3, %0; \n" \ "movl %0, %%cr3; # flush TLB \n" \ : "=r" (tmpreg) \ :: "memory"); \ } while (0) /* * Global pages have to be flushed a bit differently. Not a real * performance problem because this does not happen often. */ #define __flush_tlb_global() \ do { \ unsigned int tmpreg; \ \ __asm__ __volatile__( \ "movl %1, %%cr4; # turn off PGE \n" \ "movl %%cr3, %0; \n" \ "movl %0, %%cr3; # flush TLB \n" \ "movl %2, %%cr4; # turn PGE back on \n" \ : "=&r" (tmpreg) \ : "r" (mmu_cr4_features & ~X86_CR4_PGE), \ "r" (mmu_cr4_features) \ : "memory"); \ } while (0) #define __flush_tlb_single(addr) \ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) #endif /* _MACH_TLBFLUSH_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_traps.h000066400000000000000000000017561314037446600275150ustar00rootroot00000000000000/* * include/asm-i386/mach-default/mach_traps.h * * Machine specific NMI handling for generic. * Split out from traps.c by Osamu Tomita */ #ifndef _MACH_TRAPS_H #define _MACH_TRAPS_H #include static inline void clear_mem_error(unsigned char reason) { reason = (reason & 0xf) | 4; outb(reason, 0x61); } static inline void clear_io_check_error(unsigned char reason) { unsigned long i; reason = (reason & 0xf) | 8; outb(reason, 0x61); i = 2000; while (--i) udelay(1000); reason &= ~8; outb(reason, 0x61); } static inline unsigned char get_nmi_reason(void) { return inb(0x61); } static inline void reassert_nmi(void) { int old_reg = -1; if (do_i_have_lock_cmos()) old_reg = current_lock_cmos_reg(); else lock_cmos(0); /* register doesn't matter here */ outb(0x8f, 0x70); inb(0x71); /* dummy */ outb(0x0f, 0x70); inb(0x71); /* dummy */ if (old_reg >= 0) outb(old_reg, 0x70); else unlock_cmos(); } #endif /* !_MACH_TRAPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/mach_wakecpu.h000066400000000000000000000016261314037446600300170ustar00rootroot00000000000000#ifndef __ASM_MACH_WAKECPU_H #define __ASM_MACH_WAKECPU_H /* * This file copes with machines that wakeup secondary CPUs by the * INIT, INIT, STARTUP sequence. */ #define WAKE_SECONDARY_VIA_INIT #define TRAMPOLINE_LOW phys_to_virt(0x467) #define TRAMPOLINE_HIGH phys_to_virt(0x469) #define boot_cpu_apicid boot_cpu_physical_apicid static inline void wait_for_init_deassert(atomic_t *deassert) { while (!atomic_read(deassert)); return; } /* Nothing to do for most platforms, since cleared by the INIT cycle */ static inline void smp_callin_clear_local_apic(void) { } static inline void store_NMI_vector(unsigned short *high, unsigned short *low) { } static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) { } #if APIC_DEBUG #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) #else #define inquire_remote_apic(apicid) {} #endif #endif /* __ASM_MACH_WAKECPU_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/pci-functions.h000066400000000000000000000011471314037446600301470ustar00rootroot00000000000000/* * PCI BIOS function numbering for conventional PCI BIOS * systems */ #define PCIBIOS_PCI_FUNCTION_ID 0xb1XX #define PCIBIOS_PCI_BIOS_PRESENT 0xb101 #define PCIBIOS_FIND_PCI_DEVICE 0xb102 #define PCIBIOS_FIND_PCI_CLASS_CODE 0xb103 #define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106 #define PCIBIOS_READ_CONFIG_BYTE 0xb108 #define PCIBIOS_READ_CONFIG_WORD 0xb109 #define PCIBIOS_READ_CONFIG_DWORD 0xb10a #define PCIBIOS_WRITE_CONFIG_BYTE 0xb10b #define PCIBIOS_WRITE_CONFIG_WORD 0xb10c #define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d #define PCIBIOS_GET_ROUTING_OPTIONS 0xb10e #define PCIBIOS_SET_PCI_HW_INT 0xb10f UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/setup_arch_post.h000066400000000000000000000016501314037446600305670ustar00rootroot00000000000000/** * machine_specific_memory_setup - Hook for machine specific memory setup. * * Description: * This is included late in kernel/setup.c so that it can make * use of all of the static functions. **/ static char * __init machine_specific_memory_setup(void) { char *who; who = "BIOS-e820"; /* * Try to copy the BIOS-supplied E820-map. * * Otherwise fake a memory map; one section from 0k->640k, * the next section from 1mb->appropriate_mem_k */ sanitize_e820_map(E820_MAP, &E820_MAP_NR); if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) { unsigned long mem_size; /* compare results from other methods and take the greater */ if (ALT_MEM_K < EXT_MEM_K) { mem_size = EXT_MEM_K; who = "BIOS-88"; } else { mem_size = ALT_MEM_K; who = "BIOS-e801"; } e820.nr_map = 0; add_memory_region(0, LOWMEMSIZE(), E820_RAM); add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM); } return who; } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/setup_arch_pre.h000066400000000000000000000001411314037446600303620ustar00rootroot00000000000000/* Hook to call BIOS initialisation function */ /* no action for generic */ #define ARCH_SETUP UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-default/smpboot_hooks.h000066400000000000000000000017651314037446600302620ustar00rootroot00000000000000/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws * which needs to alter them. */ static inline void smpboot_clear_io_apic_irqs(void) { io_apic_irqs = 0; } static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) { CMOS_WRITE(0xa, 0xf); local_flush_tlb(); Dprintk("1.\n"); *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4; Dprintk("2.\n"); *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf; Dprintk("3.\n"); } static inline void smpboot_restore_warm_reset_vector(void) { /* * Install writable page 0 entry to set BIOS data area. */ local_flush_tlb(); /* * Paranoid: Set warm reset code and vector here back * to default values. */ CMOS_WRITE(0, 0xf); *((volatile long *) phys_to_virt(0x467)) = 0; } static inline void smpboot_setup_io_apic(void) { /* * Here we can be sure that there is an IO-APIC in the system. Let's * go and set it up: */ if (!skip_ioapic_setup && nr_ioapics) setup_IO_APIC(); } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-es7000/000077500000000000000000000000001314037446600245045ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-es7000/mach_apic.h000066400000000000000000000114241314037446600265630ustar00rootroot00000000000000#ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H extern u8 bios_cpu_apicid[]; #define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) #define esr_disable (1) static inline int apic_id_registered(void) { return (1); } static inline cpumask_t target_cpus(void) { #if defined CONFIG_ES7000_CLUSTERED_APIC return CPU_MASK_ALL; #else return cpumask_of_cpu(smp_processor_id()); #endif } #define TARGET_CPUS (target_cpus()) #if defined CONFIG_ES7000_CLUSTERED_APIC #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) #define INT_DELIVERY_MODE (dest_LowestPrio) #define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */ #define NO_BALANCE_IRQ (1) #undef WAKE_SECONDARY_VIA_INIT #define WAKE_SECONDARY_VIA_MIP #else #define APIC_DFR_VALUE (APIC_DFR_FLAT) #define INT_DELIVERY_MODE (dest_Fixed) #define INT_DEST_MODE (0) /* phys delivery to target procs */ #define NO_BALANCE_IRQ (0) #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL 0x0 #define WAKE_SECONDARY_VIA_INIT #endif static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { return 0; } static inline unsigned long check_apicid_present(int bit) { return physid_isset(bit, phys_cpu_present_map); } #define apicid_cluster(apicid) (apicid & 0xF0) static inline unsigned long calculate_ldr(int cpu) { unsigned long id; id = xapic_phys_to_log_apicid(cpu); return (SET_APIC_LOGICAL_ID(id)); } /* * Set up the logical destination ID. * * Intel recommends to set DFR, LdR and TPR before enabling * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ static inline void init_apic_ldr(void) { unsigned long val; int cpu = smp_processor_id(); apic_write_around(APIC_DFR, APIC_DFR_VALUE); val = calculate_ldr(cpu); apic_write_around(APIC_LDR, val); } extern void es7000_sw_apic(void); static inline void enable_apic_mode(void) { es7000_sw_apic(); return; } extern int apic_version [MAX_APICS]; static inline void clustered_apic_check(void) { int apic = bios_cpu_apicid[smp_processor_id()]; printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", (apic_version[apic] == 0x14) ? "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); } static inline int multi_timer_check(int apic, int irq) { return 0; } static inline int apicid_to_node(int logical_apicid) { return 0; } static inline int cpu_present_to_apicid(int mps_cpu) { if (!mps_cpu) return boot_cpu_physical_apicid; else if (mps_cpu < NR_CPUS) return (int) bios_cpu_apicid[mps_cpu]; else return BAD_APICID; } static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) { static int id = 0; physid_mask_t mask; mask = physid_mask_of_physid(id); ++id; return mask; } extern u8 cpu_2_logical_apicid[]; /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { if (cpu >= NR_CPUS) return BAD_APICID; return (int)cpu_2_logical_apicid[cpu]; } static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) { printk("Processor #%d %ld:%ld APIC version %d\n", m->mpc_apicid, (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, m->mpc_apicver); return (m->mpc_apicid); } static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { /* For clustered we don't have a good way to do this yet - hack */ return physids_promote(0xff); } static inline void setup_portio_remap(void) { } extern unsigned int boot_cpu_physical_apicid; static inline int check_phys_apicid_present(int cpu_physical_apicid) { boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); return (1); } static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) { int num_bits_set; int cpus_found = 0; int cpu; int apicid; num_bits_set = cpus_weight(cpumask); /* Return id to all */ if (num_bits_set == NR_CPUS) #if defined CONFIG_ES7000_CLUSTERED_APIC return 0xFF; #else return cpu_to_logical_apicid(0); #endif /* * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ cpu = first_cpu(cpumask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { if (cpu_isset(cpu, cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ printk ("%s: Not a valid mask!\n",__FUNCTION__); #if defined CONFIG_ES7000_CLUSTERED_APIC return 0xFF; #else return cpu_to_logical_apicid(0); #endif } apicid = new_apicid; cpus_found++; } cpu++; } return apicid; } static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } #endif /* __ASM_MACH_APIC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-es7000/mach_apicdef.h000066400000000000000000000003431314037446600272400ustar00rootroot00000000000000#ifndef __ASM_MACH_APICDEF_H #define __ASM_MACH_APICDEF_H #define APIC_ID_MASK (0xFF<<24) static inline unsigned get_apic_id(unsigned long x) { return (((x)>>24)&0xFF); } #define GET_APIC_ID(x) get_apic_id(x) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-es7000/mach_ipi.h000066400000000000000000000010041314037446600264210ustar00rootroot00000000000000#ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H void send_IPI_mask_sequence(cpumask_t mask, int vector); static inline void send_IPI_mask(cpumask_t mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) send_IPI_mask(mask, vector); } static inline void send_IPI_all(int vector) { send_IPI_mask(cpu_online_map, vector); } #endif /* __ASM_MACH_IPI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-es7000/mach_mpparse.h000066400000000000000000000026071314037446600273210ustar00rootroot00000000000000#ifndef __ASM_MACH_MPPARSE_H #define __ASM_MACH_MPPARSE_H #include static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, struct mpc_config_translation *translation) { Dprintk("Bus #%d is %s\n", m->mpc_busid, name); } static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, struct mpc_config_translation *translation) { } extern int parse_unisys_oem (char *oemptr); extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); extern void setup_unisys(void); static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) { if (mpc->mpc_oemptr) { struct mp_config_oemtable *oem_table = (struct mp_config_oemtable *)mpc->mpc_oemptr; if (!strncmp(oem, "UNISYS", 6)) return parse_unisys_oem((char *)oem_table); } return 0; } static inline int es7000_check_dsdt() { struct acpi_table_header *header = NULL; if(!acpi_get_table_header_early(ACPI_DSDT, &header)) acpi_table_print(header, 0); if (!strncmp(header->oem_id, "UNISYS", 6)) return 1; return 0; } /* Hook from generic ACPI tables.c */ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { unsigned long oem_addr; if (!find_unisys_acpi_oem_table(&oem_addr)) { if (es7000_check_dsdt()) return parse_unisys_oem((char *)oem_addr); else { setup_unisys(); return 1; } } return 0; } #endif /* __ASM_MACH_MPPARSE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-es7000/mach_mpspec.h000066400000000000000000000002221314037446600271300ustar00rootroot00000000000000#ifndef __ASM_MACH_MPSPEC_H #define __ASM_MACH_MPSPEC_H #define MAX_IRQ_SOURCES 256 #define MAX_MP_BUSSES 256 #endif /* __ASM_MACH_MPSPEC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-es7000/mach_wakecpu.h000066400000000000000000000024411314037446600273050ustar00rootroot00000000000000#ifndef __ASM_MACH_WAKECPU_H #define __ASM_MACH_WAKECPU_H /* * This file copes with machines that wakeup secondary CPUs by the * INIT, INIT, STARTUP sequence. */ #ifdef CONFIG_ES7000_CLUSTERED_APIC #define WAKE_SECONDARY_VIA_MIP #else #define WAKE_SECONDARY_VIA_INIT #endif #ifdef WAKE_SECONDARY_VIA_MIP extern int es7000_start_cpu(int cpu, unsigned long eip); static inline int wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) { int boot_error = 0; boot_error = es7000_start_cpu(phys_apicid, start_eip); return boot_error; } #endif #define TRAMPOLINE_LOW phys_to_virt(0x467) #define TRAMPOLINE_HIGH phys_to_virt(0x469) #define boot_cpu_apicid boot_cpu_physical_apicid static inline void wait_for_init_deassert(atomic_t *deassert) { #ifdef WAKE_SECONDARY_VIA_INIT while (!atomic_read(deassert)); #endif return; } /* Nothing to do for most platforms, since cleared by the INIT cycle */ static inline void smp_callin_clear_local_apic(void) { } static inline void store_NMI_vector(unsigned short *high, unsigned short *low) { } static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) { } #if APIC_DEBUG #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) #else #define inquire_remote_apic(apicid) {} #endif #endif /* __ASM_MACH_WAKECPU_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-generic/000077500000000000000000000000001314037446600252025ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-generic/irq_vectors_limits.h000066400000000000000000000006231314037446600312750ustar00rootroot00000000000000#ifndef _ASM_IRQ_VECTORS_LIMITS_H #define _ASM_IRQ_VECTORS_LIMITS_H /* * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, * even with uni-proc kernels, so use a big array. * * This value should be the same in both the generic and summit subarches. * Change one, change 'em both. */ #define NR_IRQS 224 #define NR_IRQ_VECTORS 1024 #endif /* _ASM_IRQ_VECTORS_LIMITS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-generic/mach_apic.h000066400000000000000000000026361314037446600272660ustar00rootroot00000000000000#ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H #include #define esr_disable (genapic->ESR_DISABLE) #define NO_BALANCE_IRQ (genapic->no_balance_irq) #define INT_DELIVERY_MODE (genapic->int_delivery_mode) #define INT_DEST_MODE (genapic->int_dest_mode) #undef APIC_DEST_LOGICAL #define APIC_DEST_LOGICAL (genapic->apic_destination_logical) #define TARGET_CPUS (genapic->target_cpus()) #define apic_id_registered (genapic->apic_id_registered) #define init_apic_ldr (genapic->init_apic_ldr) #define ioapic_phys_id_map (genapic->ioapic_phys_id_map) #define clustered_apic_check (genapic->clustered_apic_check) #define multi_timer_check (genapic->multi_timer_check) #define apicid_to_node (genapic->apicid_to_node) #define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) #define cpu_present_to_apicid (genapic->cpu_present_to_apicid) #define apicid_to_cpu_present (genapic->apicid_to_cpu_present) #define mpc_apic_id (genapic->mpc_apic_id) #define setup_portio_remap (genapic->setup_portio_remap) #define check_apicid_present (genapic->check_apicid_present) #define check_phys_apicid_present (genapic->check_phys_apicid_present) #define check_apicid_used (genapic->check_apicid_used) #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) #define enable_apic_mode (genapic->enable_apic_mode) #define phys_pkg_id (genapic->phys_pkg_id) extern void generic_bigsmp_probe(void); #endif /* __ASM_MACH_APIC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-generic/mach_apicdef.h000066400000000000000000000003341314037446600277360ustar00rootroot00000000000000#ifndef _GENAPIC_MACH_APICDEF_H #define _GENAPIC_MACH_APICDEF_H 1 #ifndef APIC_DEFINITION #include #define GET_APIC_ID (genapic->get_apic_id) #define APIC_ID_MASK (genapic->apic_id_mask) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-generic/mach_ipi.h000066400000000000000000000003441314037446600271250ustar00rootroot00000000000000#ifndef _MACH_IPI_H #define _MACH_IPI_H 1 #include #define send_IPI_mask (genapic->send_IPI_mask) #define send_IPI_allbutself (genapic->send_IPI_allbutself) #define send_IPI_all (genapic->send_IPI_all) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-generic/mach_mpparse.h000066400000000000000000000005071314037446600300140ustar00rootroot00000000000000#ifndef _MACH_MPPARSE_H #define _MACH_MPPARSE_H 1 #include #define mpc_oem_bus_info (genapic->mpc_oem_bus_info) #define mpc_oem_pci_bus (genapic->mpc_oem_pci_bus) int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); int acpi_madt_oem_check(char *oem_id, char *oem_table_id); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-generic/mach_mpspec.h000066400000000000000000000004411314037446600276310ustar00rootroot00000000000000#ifndef __ASM_MACH_MPSPEC_H #define __ASM_MACH_MPSPEC_H #define MAX_IRQ_SOURCES 256 /* Summit or generic (i.e. installer) kernels need lots of bus entries. */ /* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ #define MAX_MP_BUSSES 260 #endif /* __ASM_MACH_MPSPEC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-numaq/000077500000000000000000000000001314037446600247075ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-numaq/mach_apic.h000066400000000000000000000072041314037446600267670ustar00rootroot00000000000000#ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H #include #include #include #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) static inline cpumask_t target_cpus(void) { return CPU_MASK_ALL; } #define TARGET_CPUS (target_cpus()) #define NO_BALANCE_IRQ (1) #define esr_disable (1) #define INT_DELIVERY_MODE dest_LowestPrio #define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */ #define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap) #define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map) #define apicid_cluster(apicid) (apicid & 0xF0) static inline int apic_id_registered(void) { return 1; } static inline void init_apic_ldr(void) { /* Already done in NUMA-Q firmware */ } static inline void clustered_apic_check(void) { printk("Enabling APIC mode: %s. Using %d I/O APICs\n", "NUMA-Q", nr_ioapics); } /* * Skip adding the timer int on secondary nodes, which causes * a small but painful rift in the time-space continuum. */ static inline int multi_timer_check(int apic, int irq) { return apic != 0 && irq == 0; } static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { /* We don't have a good way to do this yet - hack */ return physids_promote(0xFUL); } /* Mapping from cpu number to logical apicid */ extern u8 cpu_2_logical_apicid[]; static inline int cpu_to_logical_apicid(int cpu) { if (cpu >= NR_CPUS) return BAD_APICID; return (int)cpu_2_logical_apicid[cpu]; } /* * Supporting over 60 cpus on NUMA-Q requires a locality-dependent * cpu to APIC ID relation to properly interact with the intelligent * mode of the cluster controller. */ static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < 60) return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); else return BAD_APICID; } static inline int generate_logical_apicid(int quad, int phys_apicid) { return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1); } static inline int apicid_to_node(int logical_apicid) { return logical_apicid >> 4; } static inline physid_mask_t apicid_to_cpu_present(int logical_apicid) { int node = apicid_to_node(logical_apicid); int cpu = __ffs(logical_apicid & 0xf); return physid_mask_of_physid(cpu + 4*node); } static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *translation_record) { int quad = translation_record->trans_quad; int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid); printk("Processor #%d %ld:%ld APIC version %d (quad %d, apic %d)\n", m->mpc_apicid, (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, m->mpc_apicver, quad, logical_apicid); return logical_apicid; } static inline void setup_portio_remap(void) { int num_quads = num_online_nodes(); if (num_quads <= 1) return; printk("Remapping cross-quad port I/O for %d quads\n", num_quads); xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD); printk("xquad_portio vaddr 0x%08lx, len %08lx\n", (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); } static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) { return (1); } static inline void enable_apic_mode(void) { } /* * We use physical apicids here, not logical, so just return the default * physical broadcast to stop people from breaking us */ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) { return (int) 0xF; } /* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } #endif /* __ASM_MACH_APIC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-numaq/mach_apicdef.h000066400000000000000000000003561314037446600274470ustar00rootroot00000000000000#ifndef __ASM_MACH_APICDEF_H #define __ASM_MACH_APICDEF_H #define APIC_ID_MASK (0xF<<24) static inline unsigned get_apic_id(unsigned long x) { return (((x)>>24)&0x0F); } #define GET_APIC_ID(x) get_apic_id(x) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-numaq/mach_ipi.h000066400000000000000000000010001314037446600266200ustar00rootroot00000000000000#ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H void send_IPI_mask_sequence(cpumask_t, int vector); static inline void send_IPI_mask(cpumask_t mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) send_IPI_mask(mask, vector); } static inline void send_IPI_all(int vector) { send_IPI_mask(cpu_online_map, vector); } #endif /* __ASM_MACH_IPI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-numaq/mach_mpparse.h000066400000000000000000000014501314037446600275170ustar00rootroot00000000000000#ifndef __ASM_MACH_MPPARSE_H #define __ASM_MACH_MPPARSE_H static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, struct mpc_config_translation *translation) { int quad = translation->trans_quad; int local = translation->trans_local; mp_bus_id_to_node[m->mpc_busid] = quad; mp_bus_id_to_local[m->mpc_busid] = local; printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad); } static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, struct mpc_config_translation *translation) { int quad = translation->trans_quad; int local = translation->trans_local; quad_local_to_mp_bus_id[quad][local] = m->mpc_busid; } /* Hook from generic ACPI tables.c */ static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) { } #endif /* __ASM_MACH_MPPARSE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-numaq/mach_mpspec.h000066400000000000000000000002211314037446600273320ustar00rootroot00000000000000#ifndef __ASM_MACH_MPSPEC_H #define __ASM_MACH_MPSPEC_H #define MAX_IRQ_SOURCES 512 #define MAX_MP_BUSSES 32 #endif /* __ASM_MACH_MPSPEC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-numaq/mach_wakecpu.h000066400000000000000000000022401314037446600275050ustar00rootroot00000000000000#ifndef __ASM_MACH_WAKECPU_H #define __ASM_MACH_WAKECPU_H /* This file copes with machines that wakeup secondary CPUs by NMIs */ #define WAKE_SECONDARY_VIA_NMI #define TRAMPOLINE_LOW phys_to_virt(0x8) #define TRAMPOLINE_HIGH phys_to_virt(0xa) #define boot_cpu_apicid boot_cpu_logical_apicid /* We don't do anything here because we use NMI's to boot instead */ static inline void wait_for_init_deassert(atomic_t *deassert) { } /* * Because we use NMIs rather than the INIT-STARTUP sequence to * bootstrap the CPUs, the APIC may be in a weird state. Kick it. */ static inline void smp_callin_clear_local_apic(void) { clear_local_APIC(); } static inline void store_NMI_vector(unsigned short *high, unsigned short *low) { printk("Storing NMI vector\n"); *high = *((volatile unsigned short *) TRAMPOLINE_HIGH); *low = *((volatile unsigned short *) TRAMPOLINE_LOW); } static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) { printk("Restoring NMI vector\n"); *((volatile unsigned short *) TRAMPOLINE_HIGH) = *high; *((volatile unsigned short *) TRAMPOLINE_LOW) = *low; } #define inquire_remote_apic(apicid) {} #endif /* __ASM_MACH_WAKECPU_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-summit/000077500000000000000000000000001314037446600251045ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-summit/irq_vectors_limits.h000066400000000000000000000006231314037446600311770ustar00rootroot00000000000000#ifndef _ASM_IRQ_VECTORS_LIMITS_H #define _ASM_IRQ_VECTORS_LIMITS_H /* * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, * even with uni-proc kernels, so use a big array. * * This value should be the same in both the generic and summit subarches. * Change one, change 'em both. */ #define NR_IRQS 224 #define NR_IRQ_VECTORS 1024 #endif /* _ASM_IRQ_VECTORS_LIMITS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-summit/mach_apic.h000066400000000000000000000112361314037446600271640ustar00rootroot00000000000000#ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H #include #include #define esr_disable (1) #define NO_BALANCE_IRQ (0) /* In clustered mode, the high nibble of APIC ID is a cluster number. * The low nibble is a 4-bit bitmap. */ #define XAPIC_DEST_CPUS_SHIFT 4 #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) static inline cpumask_t target_cpus(void) { /* CPU_MASK_ALL (0xff) has undefined behaviour with * dest_LowestPrio mode logical clustered apic interrupt routing * Just start on cpu 0. IRQ balancing will spread load */ return cpumask_of_cpu(0); } #define TARGET_CPUS (target_cpus()) #define INT_DELIVERY_MODE (dest_LowestPrio) #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { return 0; } /* we don't use the phys_cpu_present_map to indicate apicid presence */ static inline unsigned long check_apicid_present(int bit) { return 1; } #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) extern u8 bios_cpu_apicid[]; extern u8 cpu_2_logical_apicid[]; static inline void init_apic_ldr(void) { unsigned long val, id; int i, count; u8 lid; u8 my_id = (u8)hard_smp_processor_id(); u8 my_cluster = (u8)apicid_cluster(my_id); /* Create logical APIC IDs by counting CPUs already in cluster. */ for (count = 0, i = NR_CPUS; --i >= 0; ) { lid = cpu_2_logical_apicid[i]; if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) ++count; } /* We only have a 4 wide bitmap in cluster mode. If a deranged * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); id = my_cluster | (1UL << count); apic_write_around(APIC_DFR, APIC_DFR_VALUE); val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; val |= SET_APIC_LOGICAL_ID(id); apic_write_around(APIC_LDR, val); } static inline int multi_timer_check(int apic, int irq) { return 0; } static inline int apic_id_registered(void) { return 1; } static inline void clustered_apic_check(void) { printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", nr_ioapics); } static inline int apicid_to_node(int logical_apicid) { return logical_apicid >> 5; /* 2 clusterids per CEC */ } /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { if (cpu >= NR_CPUS) return BAD_APICID; return (int)cpu_2_logical_apicid[cpu]; } static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < NR_CPUS) return (int)bios_cpu_apicid[mps_cpu]; else return BAD_APICID; } static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) { /* For clustered we don't have a good way to do this yet - hack */ return physids_promote(0x0F); } static inline physid_mask_t apicid_to_cpu_present(int apicid) { return physid_mask_of_physid(0); } static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *translation_record) { printk("Processor #%d %ld:%ld APIC version %d\n", m->mpc_apicid, (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, m->mpc_apicver); return (m->mpc_apicid); } static inline void setup_portio_remap(void) { } static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) { return 1; } static inline void enable_apic_mode(void) { } static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) { int num_bits_set; int cpus_found = 0; int cpu; int apicid; num_bits_set = cpus_weight(cpumask); /* Return id to all */ if (num_bits_set == NR_CPUS) return (int) 0xFF; /* * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ cpu = first_cpu(cpumask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { if (cpu_isset(cpu, cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ printk ("%s: Not a valid mask!\n",__FUNCTION__); return 0xFF; } apicid = apicid | new_apicid; cpus_found++; } cpu++; } return apicid; } /* cpuid returns the value latched in the HW at reset, not the APIC ID * register's value. For any box whose BIOS changes APIC IDs, like * clustered APIC systems, we must use hard_smp_processor_id. * * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. */ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return hard_smp_processor_id() >> index_msb; } #endif /* __ASM_MACH_APIC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-summit/mach_apicdef.h000066400000000000000000000003431314037446600276400ustar00rootroot00000000000000#ifndef __ASM_MACH_APICDEF_H #define __ASM_MACH_APICDEF_H #define APIC_ID_MASK (0xFF<<24) static inline unsigned get_apic_id(unsigned long x) { return (((x)>>24)&0xFF); } #define GET_APIC_ID(x) get_apic_id(x) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-summit/mach_ipi.h000066400000000000000000000010051314037446600270220ustar00rootroot00000000000000#ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H void send_IPI_mask_sequence(cpumask_t mask, int vector); static inline void send_IPI_mask(cpumask_t mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) send_IPI_mask(mask, vector); } static inline void send_IPI_all(int vector) { send_IPI_mask(cpu_online_map, vector); } #endif /* __ASM_MACH_IPI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-summit/mach_mpparse.h000066400000000000000000000124151314037446600277170ustar00rootroot00000000000000#ifndef __ASM_MACH_MPPARSE_H #define __ASM_MACH_MPPARSE_H #include extern int use_cyclone; #ifdef CONFIG_X86_SUMMIT_NUMA extern void setup_summit(void); #else #define setup_summit() {} #endif static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, struct mpc_config_translation *translation) { Dprintk("Bus #%d is %s\n", m->mpc_busid, name); } static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, struct mpc_config_translation *translation) { } static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) { if (!strncmp(oem, "IBM ENSW", 8) && (!strncmp(productid, "VIGIL SMP", 9) || !strncmp(productid, "EXA", 3) || !strncmp(productid, "RUTHLESS SMP", 12))){ use_cyclone = 1; /*enable cyclone-timer*/ setup_summit(); return 1; } return 0; } /* Hook from generic ACPI tables.c */ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERVIGIL", 8) || !strncmp(oem_table_id, "EXA", 3))){ use_cyclone = 1; /*enable cyclone-timer*/ setup_summit(); return 1; } return 0; } struct rio_table_hdr { unsigned char version; /* Version number of this data structure */ /* Version 3 adds chassis_num & WP_index */ unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */ unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */ } __attribute__((packed)); struct scal_detail { unsigned char node_id; /* Scalability Node ID */ unsigned long CBAR; /* Address of 1MB register space */ unsigned char port0node; /* Node ID port connected to: 0xFF=None */ unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ unsigned char port1node; /* Node ID port connected to: 0xFF = None */ unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ unsigned char port2node; /* Node ID port connected to: 0xFF = None */ unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */ unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */ } __attribute__((packed)); struct rio_detail { unsigned char node_id; /* RIO Node ID */ unsigned long BBAR; /* Address of 1MB register space */ unsigned char type; /* Type of device */ unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/ /* For CYC: Node ID of Twister that owns this CYC */ unsigned char port0node; /* Node ID port connected to: 0xFF=None */ unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ unsigned char port1node; /* Node ID port connected to: 0xFF=None */ unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */ /* For CYC: 0 */ unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */ /* = 0 : the XAPIC is not used, ie:*/ /* ints fwded to another XAPIC */ /* Bits1:7 Reserved */ /* For CYC: Bits0:7 Reserved */ unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */ /* lower slot numbers/PCI bus numbers */ /* For CYC: No meaning */ unsigned char chassis_num; /* 1 based Chassis number */ /* For LookOut WPEGs this field indicates the */ /* Expansion Chassis #, enumerated from Boot */ /* Node WPEG external port, then Boot Node CYC */ /* external port, then Next Vigil chassis WPEG */ /* external port, etc. */ /* Shared Lookouts have only 1 chassis number (the */ /* first one assigned) */ } __attribute__((packed)); typedef enum { CompatTwister = 0, /* Compatibility Twister */ AltTwister = 1, /* Alternate Twister of internal 8-way */ CompatCyclone = 2, /* Compatibility Cyclone */ AltCyclone = 3, /* Alternate Cyclone of internal 8-way */ CompatWPEG = 4, /* Compatibility WPEG */ AltWPEG = 5, /* Second Planar WPEG */ LookOutAWPEG = 6, /* LookOut WPEG */ LookOutBWPEG = 7, /* LookOut WPEG */ } node_type; static inline int is_WPEG(struct rio_detail *rio){ return (rio->type == CompatWPEG || rio->type == AltWPEG || rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); } #endif /* __ASM_MACH_MPPARSE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-summit/mach_mpspec.h000066400000000000000000000003261314037446600275350ustar00rootroot00000000000000#ifndef __ASM_MACH_MPSPEC_H #define __ASM_MACH_MPSPEC_H #define MAX_IRQ_SOURCES 256 /* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ #define MAX_MP_BUSSES 260 #endif /* __ASM_MACH_MPSPEC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-visws/000077500000000000000000000000001314037446600247415ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-visws/cobalt.h000066400000000000000000000067661314037446600263750ustar00rootroot00000000000000#ifndef __I386_SGI_COBALT_H #define __I386_SGI_COBALT_H #include /* * Cobalt SGI Visual Workstation system ASIC */ #define CO_CPU_NUM_PHYS 0x1e00 #define CO_CPU_TAB_PHYS (CO_CPU_NUM_PHYS + 2) #define CO_CPU_MAX 4 #define CO_CPU_PHYS 0xc2000000 #define CO_APIC_PHYS 0xc4000000 /* see set_fixmap() and asm/fixmap.h */ #define CO_CPU_VADDR (fix_to_virt(FIX_CO_CPU)) #define CO_APIC_VADDR (fix_to_virt(FIX_CO_APIC)) /* Cobalt CPU registers -- relative to CO_CPU_VADDR, use co_cpu_*() */ #define CO_CPU_REV 0x08 #define CO_CPU_CTRL 0x10 #define CO_CPU_STAT 0x20 #define CO_CPU_TIMEVAL 0x30 /* CO_CPU_CTRL bits */ #define CO_CTRL_TIMERUN 0x04 /* 0 == disabled */ #define CO_CTRL_TIMEMASK 0x08 /* 0 == unmasked */ /* CO_CPU_STATUS bits */ #define CO_STAT_TIMEINTR 0x02 /* (r) 1 == int pend, (w) 0 == clear */ /* CO_CPU_TIMEVAL value */ #define CO_TIME_HZ 100000000 /* Cobalt core rate */ /* Cobalt APIC registers -- relative to CO_APIC_VADDR, use co_apic_*() */ #define CO_APIC_HI(n) (((n) * 0x10) + 4) #define CO_APIC_LO(n) ((n) * 0x10) #define CO_APIC_ID 0x0ffc /* CO_APIC_ID bits */ #define CO_APIC_ENABLE 0x00000100 /* CO_APIC_LO bits */ #define CO_APIC_MASK 0x00010000 /* 0 = enabled */ #define CO_APIC_LEVEL 0x00008000 /* 0 = edge */ /* * Where things are physically wired to Cobalt * #defines with no board ___ are common to all (thus far) */ #define CO_APIC_IDE0 4 #define CO_APIC_IDE1 2 /* Only on 320 */ #define CO_APIC_8259 12 /* serial, floppy, par-l-l */ /* Lithium PCI Bridge A -- "the one with 82557 Ethernet" */ #define CO_APIC_PCIA_BASE0 0 /* and 1 */ /* slot 0, line 0 */ #define CO_APIC_PCIA_BASE123 5 /* and 6 */ /* slot 0, line 1 */ #define CO_APIC_PIIX4_USB 7 /* this one is weird */ /* Lithium PCI Bridge B -- "the one with PIIX4" */ #define CO_APIC_PCIB_BASE0 8 /* and 9-12 *//* slot 0, line 0 */ #define CO_APIC_PCIB_BASE123 13 /* 14.15 */ /* slot 0, line 1 */ #define CO_APIC_VIDOUT0 16 #define CO_APIC_VIDOUT1 17 #define CO_APIC_VIDIN0 18 #define CO_APIC_VIDIN1 19 #define CO_APIC_LI_AUDIO 22 #define CO_APIC_AS 24 #define CO_APIC_RE 25 #define CO_APIC_CPU 28 /* Timer and Cache interrupt */ #define CO_APIC_NMI 29 #define CO_APIC_LAST CO_APIC_NMI /* * This is how irqs are assigned on the Visual Workstation. * Legacy devices get irq's 1-15 (system clock is 0 and is CO_APIC_CPU). * All other devices (including PCI) go to Cobalt and are irq's 16 on up. */ #define CO_IRQ_APIC0 16 /* irq of apic entry 0 */ #define IS_CO_APIC(irq) ((irq) >= CO_IRQ_APIC0) #define CO_IRQ(apic) (CO_IRQ_APIC0 + (apic)) /* apic ent to irq */ #define CO_APIC(irq) ((irq) - CO_IRQ_APIC0) /* irq to apic ent */ #define CO_IRQ_IDE0 14 /* knowledge of... */ #define CO_IRQ_IDE1 15 /* ... ide driver defaults! */ #define CO_IRQ_8259 CO_IRQ(CO_APIC_8259) #ifdef CONFIG_X86_VISWS_APIC extern __inline void co_cpu_write(unsigned long reg, unsigned long v) { *((volatile unsigned long *)(CO_CPU_VADDR+reg))=v; } extern __inline unsigned long co_cpu_read(unsigned long reg) { return *((volatile unsigned long *)(CO_CPU_VADDR+reg)); } extern __inline void co_apic_write(unsigned long reg, unsigned long v) { *((volatile unsigned long *)(CO_APIC_VADDR+reg))=v; } extern __inline unsigned long co_apic_read(unsigned long reg) { return *((volatile unsigned long *)(CO_APIC_VADDR+reg)); } #endif extern char visws_board_type; #define VISWS_320 0 #define VISWS_540 1 extern char visws_board_rev; #endif /* __I386_SGI_COBALT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-visws/do_timer.h000066400000000000000000000023641314037446600267210ustar00rootroot00000000000000/* defines for inline arch setup functions */ #include #include #include "cobalt.h" static inline void do_timer_interrupt_hook(struct pt_regs *regs) { /* Clear the interrupt */ co_cpu_write(CO_CPU_STAT,co_cpu_read(CO_CPU_STAT) & ~CO_STAT_TIMEINTR); do_timer(regs); #ifndef CONFIG_SMP update_process_times(user_mode(regs)); #endif /* * In the SMP case we use the local APIC timer interrupt to do the * profiling, except when we simulate SMP mode on a uniprocessor * system, in that case we have to call the local interrupt handler. */ #ifndef CONFIG_X86_LOCAL_APIC profile_tick(CPU_PROFILING, regs); #else if (!using_apic_timer) smp_local_timer_interrupt(regs); #endif } static inline int do_timer_overflow(int count) { int i; spin_lock(&i8259A_lock); /* * This is tricky when I/O APICs are used; * see do_timer_interrupt(). */ i = inb(0x20); spin_unlock(&i8259A_lock); /* assumption about timer being IRQ0 */ if (i & 0x01) { /* * We cannot detect lost timer interrupts ... * well, that's why we call them lost, don't we? :) * [hmm, on the Pentium and Alpha we can ... sort of] */ count -= LATCH; } else { printk("do_slow_gettimeoffset(): hardware timer problem?\n"); } return count; } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-visws/entry_arch.h000066400000000000000000000015551314037446600272560ustar00rootroot00000000000000/* * The following vectors are part of the Linux architecture, there * is no hardware IRQ pin equivalent for them, they are triggered * through the ICC by us (IPIs) */ #ifdef CONFIG_X86_SMP BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) #endif /* * every pentium local APIC has two 'local interrupts', with a * soft-definable vector attached to both interrupts, one of * which is a timer interrupt, the other one is error counter * overflow. Linux uses the local APIC timer interrupt to get * a much simpler SMP time architecture: */ #ifdef CONFIG_X86_LOCAL_APIC BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-visws/irq_vectors.h000066400000000000000000000027631314037446600274620ustar00rootroot00000000000000#ifndef _ASM_IRQ_VECTORS_H #define _ASM_IRQ_VECTORS_H /* * IDT vectors usable for external interrupt sources start * at 0x20: */ #define FIRST_EXTERNAL_VECTOR 0x20 #define SYSCALL_VECTOR 0x80 /* * Vectors 0x20-0x2f are used for ISA interrupts. */ /* * Special IRQ vectors used by the SMP architecture, 0xf0-0xff * * some of the following vectors are 'rare', they are merged * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. * TLB, reschedule and local APIC vectors are performance-critical. * * Vectors 0xf0-0xfa are free (reserved for future Linux use). */ #define SPURIOUS_APIC_VECTOR 0xff #define ERROR_APIC_VECTOR 0xfe #define INVALIDATE_TLB_VECTOR 0xfd #define RESCHEDULE_VECTOR 0xfc #define CALL_FUNCTION_VECTOR 0xfb #define THERMAL_APIC_VECTOR 0xf0 /* * Local APIC timer IRQ vector is on a different priority level, * to work around the 'lost local interrupt if more than 2 IRQ * sources per level' errata. */ #define LOCAL_TIMER_VECTOR 0xef /* * First APIC vector available to drivers: (vectors 0x30-0xee) * we start at 0x31 to spread out vectors evenly between priority * levels. (0x80 is the syscall vector) */ #define FIRST_DEVICE_VECTOR 0x31 #define FIRST_SYSTEM_VECTOR 0xef #define TIMER_IRQ 0 /* * IRQ definitions */ #define NR_VECTORS 256 #define NR_IRQS 224 #define NR_IRQ_VECTORS NR_IRQS #define FPU_IRQ 13 #define FIRST_VM86_IRQ 3 #define LAST_VM86_IRQ 15 #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) #endif /* _ASM_IRQ_VECTORS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-visws/lithium.h000066400000000000000000000024521314037446600265700ustar00rootroot00000000000000#ifndef __I386_SGI_LITHIUM_H #define __I386_SGI_LITHIUM_H #include /* * Lithium is the SGI Visual Workstation I/O ASIC */ #define LI_PCI_A_PHYS 0xfc000000 /* Enet is dev 3 */ #define LI_PCI_B_PHYS 0xfd000000 /* PIIX4 is here */ /* see set_fixmap() and asm/fixmap.h */ #define LI_PCIA_VADDR (fix_to_virt(FIX_LI_PCIA)) #define LI_PCIB_VADDR (fix_to_virt(FIX_LI_PCIB)) /* Not a standard PCI? (not in linux/pci.h) */ #define LI_PCI_BUSNUM 0x44 /* lo8: primary, hi8: sub */ #define LI_PCI_INTEN 0x46 /* LI_PCI_INTENT bits */ #define LI_INTA_0 0x0001 #define LI_INTA_1 0x0002 #define LI_INTA_2 0x0004 #define LI_INTA_3 0x0008 #define LI_INTA_4 0x0010 #define LI_INTB 0x0020 #define LI_INTC 0x0040 #define LI_INTD 0x0080 /* More special purpose macros... */ extern __inline void li_pcia_write16(unsigned long reg, unsigned short v) { *((volatile unsigned short *)(LI_PCIA_VADDR+reg))=v; } extern __inline unsigned short li_pcia_read16(unsigned long reg) { return *((volatile unsigned short *)(LI_PCIA_VADDR+reg)); } extern __inline void li_pcib_write16(unsigned long reg, unsigned short v) { *((volatile unsigned short *)(LI_PCIB_VADDR+reg))=v; } extern __inline unsigned short li_pcib_read16(unsigned long reg) { return *((volatile unsigned short *)(LI_PCIB_VADDR+reg)); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-visws/mach_apic.h000066400000000000000000000041441314037446600270210ustar00rootroot00000000000000#ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H #include #include #define APIC_DFR_VALUE (APIC_DFR_FLAT) #define no_balance_irq (0) #define esr_disable (0) #define INT_DELIVERY_MODE dest_LowestPrio #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ #ifdef CONFIG_SMP #define TARGET_CPUS cpu_online_map #else #define TARGET_CPUS cpumask_of_cpu(0) #endif #define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap) #define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map) static inline int apic_id_registered(void) { return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); } /* * Set up the logical destination ID. * * Intel recommends to set DFR, LDR and TPR before enabling * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ static inline void init_apic_ldr(void) { unsigned long val; apic_write_around(APIC_DFR, APIC_DFR_VALUE); val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); apic_write_around(APIC_LDR, val); } static inline void summit_check(char *oem, char *productid) { } static inline void clustered_apic_check(void) { } /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { return 1 << cpu; } static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < get_physical_broadcast()) return mps_cpu; else return BAD_APICID; } static inline physid_mask_t apicid_to_cpu_present(int apicid) { return physid_mask_of_physid(apicid); } #define WAKE_SECONDARY_VIA_INIT static inline void setup_portio_remap(void) { } static inline void enable_apic_mode(void) { } static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) { return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); } static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) { return cpus_addr(cpumask)[0]; } static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } #endif /* __ASM_MACH_APIC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-visws/mach_apicdef.h000066400000000000000000000003761314037446600275030ustar00rootroot00000000000000#ifndef __ASM_MACH_APICDEF_H #define __ASM_MACH_APICDEF_H #define APIC_ID_MASK (0xF<<24) static inline unsigned get_apic_id(unsigned long x) { return (((x)>>24)&0xF); } #define GET_APIC_ID(x) get_apic_id(x) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-visws/mach_hooks.h000066400000000000000000000006111314037446600272230ustar00rootroot00000000000000#ifndef _MACH_HOOKS_H #define _MACH_HOOKS_H #define pre_intr_init_hook() init_VISWS_APIC_irqs() extern void visws_get_board_type_and_rev(void); #define pre_setup_arch_hook() visws_get_board_type_and_rev() extern void visws_time_init_hook(void); #define time_init_hook() visws_time_init_hook() extern void visws_trap_init_hook(void); #define trap_init_hook() visws_trap_init_hook() #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-visws/piix4.h000066400000000000000000000050251314037446600261510ustar00rootroot00000000000000#ifndef __I386_SGI_PIIX_H #define __I386_SGI_PIIX_H /* * PIIX4 as used on SGI Visual Workstations */ #define PIIX_PM_START 0x0F80 #define SIO_GPIO_START 0x0FC0 #define SIO_PM_START 0x0FC8 #define PMBASE PIIX_PM_START #define GPIREG0 (PMBASE+0x30) #define GPIREG(x) (GPIREG0+((x)/8)) #define GPIBIT(x) (1 << ((x)%8)) #define PIIX_GPI_BD_ID1 18 #define PIIX_GPI_BD_ID2 19 #define PIIX_GPI_BD_ID3 20 #define PIIX_GPI_BD_ID4 21 #define PIIX_GPI_BD_REG GPIREG(PIIX_GPI_BD_ID1) #define PIIX_GPI_BD_MASK (GPIBIT(PIIX_GPI_BD_ID1) | \ GPIBIT(PIIX_GPI_BD_ID2) | \ GPIBIT(PIIX_GPI_BD_ID3) | \ GPIBIT(PIIX_GPI_BD_ID4) ) #define PIIX_GPI_BD_SHIFT (PIIX_GPI_BD_ID1 % 8) #define SIO_INDEX 0x2e #define SIO_DATA 0x2f #define SIO_DEV_SEL 0x7 #define SIO_DEV_ENB 0x30 #define SIO_DEV_MSB 0x60 #define SIO_DEV_LSB 0x61 #define SIO_GP_DEV 0x7 #define SIO_GP_BASE SIO_GPIO_START #define SIO_GP_MSB (SIO_GP_BASE>>8) #define SIO_GP_LSB (SIO_GP_BASE&0xff) #define SIO_GP_DATA1 (SIO_GP_BASE+0) #define SIO_PM_DEV 0x8 #define SIO_PM_BASE SIO_PM_START #define SIO_PM_MSB (SIO_PM_BASE>>8) #define SIO_PM_LSB (SIO_PM_BASE&0xff) #define SIO_PM_INDEX (SIO_PM_BASE+0) #define SIO_PM_DATA (SIO_PM_BASE+1) #define SIO_PM_FER2 0x1 #define SIO_PM_GP_EN 0x80 /* * This is the dev/reg where generating a config cycle will * result in a PCI special cycle. */ #define SPECIAL_DEV 0xff #define SPECIAL_REG 0x00 /* * PIIX4 needs to see a special cycle with the following data * to be convinced the processor has gone into the stop grant * state. PIIX4 insists on seeing this before it will power * down a system. */ #define PIIX_SPECIAL_STOP 0x00120002 #define PIIX4_RESET_PORT 0xcf9 #define PIIX4_RESET_VAL 0x6 #define PMSTS_PORT 0xf80 // 2 bytes PM Status #define PMEN_PORT 0xf82 // 2 bytes PM Enable #define PMCNTRL_PORT 0xf84 // 2 bytes PM Control #define PM_SUSPEND_ENABLE 0x2000 // start sequence to suspend state /* * PMSTS and PMEN I/O bit definitions. * (Bits are the same in both registers) */ #define PM_STS_RSM (1<<15) // Resume Status #define PM_STS_PWRBTNOR (1<<11) // Power Button Override #define PM_STS_RTC (1<<10) // RTC status #define PM_STS_PWRBTN (1<<8) // Power Button Pressed? #define PM_STS_GBL (1<<5) // Global Status #define PM_STS_BM (1<<4) // Bus Master Status #define PM_STS_TMROF (1<<0) // Timer Overflow Status. /* * Stop clock GPI register */ #define PIIX_GPIREG0 (0xf80 + 0x30) /* * Stop clock GPI bit in GPIREG0 */ #define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-visws/setup_arch_post.h000066400000000000000000000023001314037446600303070ustar00rootroot00000000000000/* Hook for machine specific memory setup. * * This is included late in kernel/setup.c so that it can make use of all of * the static functions. */ #define MB (1024 * 1024) unsigned long sgivwfb_mem_phys; unsigned long sgivwfb_mem_size; long long mem_size __initdata = 0; static char * __init machine_specific_memory_setup(void) { long long gfx_mem_size = 8 * MB; mem_size = ALT_MEM_K; if (!mem_size) { printk(KERN_WARNING "Bootloader didn't set memory size, upgrade it !\n"); mem_size = 128 * MB; } /* * this hardcodes the graphics memory to 8 MB * it really should be sized dynamically (or at least * set as a boot param) */ if (!sgivwfb_mem_size) { printk(KERN_WARNING "Defaulting to 8 MB framebuffer size\n"); sgivwfb_mem_size = 8 * MB; } /* * Trim to nearest MB */ sgivwfb_mem_size &= ~((1 << 20) - 1); sgivwfb_mem_phys = mem_size - gfx_mem_size; add_memory_region(0, LOWMEMSIZE(), E820_RAM); add_memory_region(HIGH_MEMORY, mem_size - sgivwfb_mem_size - HIGH_MEMORY, E820_RAM); add_memory_region(sgivwfb_mem_phys, sgivwfb_mem_size, E820_RESERVED); return "PROM"; /* Remove gcc warnings */ (void) sanitize_e820_map(NULL, NULL); (void) copy_e820_map(NULL, 0); } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-visws/setup_arch_pre.h000066400000000000000000000001371314037446600301160ustar00rootroot00000000000000/* Hook to call BIOS initialisation function */ /* no action for visws */ #define ARCH_SETUP UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-visws/smpboot_hooks.h000066400000000000000000000010171314037446600277770ustar00rootroot00000000000000static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) { CMOS_WRITE(0xa, 0xf); local_flush_tlb(); Dprintk("1.\n"); *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4; Dprintk("2.\n"); *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf; Dprintk("3.\n"); } /* for visws do nothing for any of these */ static inline void smpboot_clear_io_apic_irqs(void) { } static inline void smpboot_restore_warm_reset_vector(void) { } static inline void smpboot_setup_io_apic(void) { } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/000077500000000000000000000000001314037446600243615ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/entry_arch.h000066400000000000000000000023321314037446600266700ustar00rootroot00000000000000/* * This file is designed to contain the BUILD_INTERRUPT specifications for * all of the extra named interrupt vectors used by the architecture. * Usually this is the Inter Process Interrupts (IPIs) */ /* * The following vectors are part of the Linux architecture, there * is no hardware IRQ pin equivalent for them, they are triggered * through the ICC by us (IPIs) */ #ifdef CONFIG_X86_SMP BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) #endif /* * every pentium local APIC has two 'local interrupts', with a * soft-definable vector attached to both interrupts, one of * which is a timer interrupt, the other one is error counter * overflow. Linux uses the local APIC timer interrupt to get * a much simpler SMP time architecture: */ #ifdef CONFIG_X86_LOCAL_APIC BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) BUILD_INTERRUPT(apic_vmi_timer_interrupt,LOCAL_TIMER_VECTOR) BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) #ifdef CONFIG_X86_MCE_P4THERMAL BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_apicops.h000066400000000000000000000042161314037446600271630ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to arai@vmware.com * */ /* * VMI implementation for basic functions accessing APICs. Calls into VMI * to perform operations. */ #ifndef __ASM_MACH_APICOPS_H #if defined(CONFIG_VMI_APIC_HYPERCALLS) && defined(CONFIG_X86_LOCAL_APIC) #define __ASM_MACH_APICOPS_H #include static inline void apic_write(unsigned long reg, unsigned long value) { void *addr = (void *)(APIC_BASE + reg); vmi_wrap_call( APICWrite, "movl %1, (%0)", VMI_NO_OUTPUT, 2, VMI_XCONC(VMI_IREG1(addr), VMI_IREG2(value)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } /* * Workaround APIC errata with serialization by using locked instruction; * See Pentium erratum 11AP */ static inline void apic_write_atomic(unsigned long reg, unsigned long value) { void *addr = (void *)(APIC_BASE + reg); vmi_wrap_call( APICWrite, "xchgl %1, (%0)", /* Actually, there is unused output in %edx, clobbered below */ VMI_NO_OUTPUT, 2, VMI_XCONC(VMI_IREG1(addr), VMI_IREG2(value)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline unsigned long apic_read(unsigned long reg) { unsigned long value; void *addr = (void *)(APIC_BASE + reg); vmi_wrap_call( APICRead, "movl (%0), %%eax", VMI_OREG1(value), 1,VMI_IREG1(addr), VMI_CLOBBER(ONE_RETURN)); return value; } #else #include <../mach-default/mach_apicops.h> #endif #endif /* __ASM_MACH_APICOPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_apictimer.h000066400000000000000000000027621314037446600275060ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to dhecht@vmware.com * */ /* * Initiation routines related to the APIC timer. */ #ifndef __ASM_MACH_APICTIMER_H #define __ASM_MACH_APICTIMER_H #ifdef CONFIG_X86_LOCAL_APIC extern int using_apic_timer; extern void __init setup_boot_vmi_alarm(void); extern void __devinit setup_secondary_vmi_alarm(void); extern int __init vmi_timer_irq_works(void); extern int __init timer_irq_works(void); static inline void mach_setup_boot_local_clock(void) { setup_boot_vmi_alarm(); } static inline void mach_setup_secondary_local_clock(void) { setup_secondary_vmi_alarm(); } static inline int mach_timer_irq_works(void) { return vmi_timer_irq_works(); } #endif /* CONFIG_X86_LOCAL_APIC */ #endif /* __ASM_MACH_APICTIMER_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_asm.h000066400000000000000000000142341314037446600263060ustar00rootroot00000000000000#ifndef __MACH_ASM_H #define __MACH_ASM_H /* * Take care to ensure that these labels do not overlap any in parent scope, * as the breakage can be difficult to detect. I haven't found a flawless * way to avoid collision because local \@ can not be saved for a future * macro definitions, and we are stuck running cpp -traditional on entry.S * * To work around gas bugs, we must emit the native sequence here into a * separate section first to measure the length. Some versions of gas have * difficulty resolving vmi_native_end - vmi_native_begin during evaluation * of an assembler conditional, which we use during the .rept directive * below to generate the nop padding -- Zach */ /* First, measure the native instruction sequence length */ #define vmi_native_start \ .pushsection .vmi.native,"",@nobits; \ 771:; #define vmi_native_finish \ 772:; \ .popsection; #define vmi_native_begin 771b #define vmi_native_end 772b #define vmi_native_len (vmi_native_end - vmi_native_begin) /* Now, measure and emit the vmi translation sequence */ #define vmi_translation_start \ .pushsection .vmi.translation,"ax"; \ 781:; #define vmi_translation_finish \ 782:; \ .popsection; #define vmi_translation_begin 781b #define vmi_translation_end 782b #define vmi_translation_len (vmi_translation_end - vmi_translation_begin) /* Finally, emit the padded native sequence */ #define vmi_padded_start \ 791:; #define vmi_padded_finish \ 792:; #define vmi_padded_begin 791b #define vmi_padded_end 792b #define vmi_padded_len (vmi_padded_end - vmi_padded_begin) /* Measure the offset of the call instruction in the translation */ #define vmi_callout 761: call .+5; #define vmi_call_eip 761b #define vmi_call_offset (vmi_call_eip - vmi_translation_begin) /* * Pad out the current native instruction sequence with a series of nops; * the nop used is 0x66* 0x90 which is data16 nop or xchg %ax, %ax. We * pad out with up to 11 bytes of prefix at a time because beyond that * both AMD and Intel processors start to show inefficiencies. */ #define MNEM_OPSIZE 0x66 #define MNEM_NOP 0x90 #define VMI_NOP_MAX 12 #define vmi_nop_pad \ .equ vmi_pad_total, vmi_translation_len - vmi_native_len; \ .equ vmi_pad, vmi_pad_total; \ .rept (vmi_pad+VMI_NOP_MAX-1)/VMI_NOP_MAX; \ .if vmi_pad > VMI_NOP_MAX; \ .equ vmi_cur_pad, VMI_NOP_MAX; \ .else; \ .equ vmi_cur_pad, vmi_pad; \ .endif; \ .if vmi_cur_pad > 1; \ .fill vmi_cur_pad-1, 1, MNEM_OPSIZE; \ .endif; \ .byte MNEM_NOP; \ .equ vmi_pad, vmi_pad - vmi_cur_pad; \ .endr; /* * Create an annotation for a VMI call; the VMI call currently must be * wrapped in one of the vmi_raw_call (for assembler) or one of the * family of defined wrappers for C code. */ #define vmi_annotate(name) \ .pushsection .vmi.annotation,"a"; \ .align 4; \ .long name; \ .long vmi_padded_begin; \ .long vmi_translation_begin; \ .byte vmi_padded_len; \ .byte vmi_translation_len; \ .byte vmi_pad_total; \ .byte vmi_call_offset; \ .popsection; #ifndef __ASSEMBLY__ struct vmi_annotation { unsigned long vmi_call; unsigned char *nativeEIP; unsigned char *translationEIP; unsigned char native_size; unsigned char translation_size; signed char nop_size; unsigned char call_offset; }; extern struct vmi_annotation __vmi_annotation[], __vmi_annotation_end[]; #endif #define vmi_raw_call(name, native) \ vmi_native_start; \ native; \ vmi_native_finish; \ \ vmi_translation_start; \ vmi_callout; \ vmi_translation_finish; \ \ vmi_padded_start; \ native; \ vmi_nop_pad; \ vmi_padded_finish; \ \ vmi_annotate(name); #include #ifdef __ASSEMBLY__ /* * Create VMI_CALL_FuncName definitions for assembly code using * equates; the C enumerations can not be used without propagating * them in some fashion, and rather the obfuscate asm-offsets.c, it * seems reasonable to confine this here. */ .equ VMI_CALL_CUR, 0; #define VDEF(call) \ .equ VMI_CALL_/**/call, VMI_CALL_CUR; \ .equ VMI_CALL_CUR, VMI_CALL_CUR+1; VMI_CALLS #undef VDEF #endif /* __ASSEMBLY__ */ #define IRET vmi_raw_call(VMI_CALL_IRET, iret) #define CLI vmi_raw_call(VMI_CALL_DisableInterrupts, cli) #define STI vmi_raw_call(VMI_CALL_EnableInterrupts, sti) #define STI_SYSEXIT vmi_raw_call(VMI_CALL_SYSEXIT, sti; sysexit) /* * Due to the presence of "," in the instruction, and the use of * -traditional to compile entry.S, we can not use a macro to * encapsulate (mov %cr0, %eax); the full expansion must be * written. */ #define GET_CR0 vmi_native_start; \ mov %cr0, %eax; \ vmi_native_finish; \ vmi_translation_start; \ vmi_callout; \ vmi_translation_finish; \ vmi_padded_start; \ mov %cr0, %eax; \ vmi_nop_pad; \ vmi_padded_finish; \ vmi_annotate(VMI_CALL_GetCR0); #ifndef __ASSEMBLY__ /* * Several handy macro definitions used to convert the raw assembler * definitions here into quoted strings for use in inline assembler * from C code. * * To convert the value of a defined token to a string, XSTR(x) * To concatenate multiple parameters separated by commas, VMI_XCONC() * To convert the value of a defined value with commas, XCSTR() * * These macros are incompatible with -traditional */ #define MAKESTR(x) #x #define XSTR(x) MAKESTR(x) #define VMI_XCONC(args...) args #define CONCSTR(x...) #x #define XCSTR(x...) CONCSTR(x) /* * Create a typedef to define the VMI call enumeration. */ #define VDEF(call) VMI_CALL_##call, typedef enum VMICall { VMI_CALLS NUM_VMI_CALLS } VMICall; #undef VDEF /* Absolute Hack */ asm(".equ VMI_CALL_EnableInterrupts, 29;\n\t"); asm(".equ VMI_CALL_DisableInterrupts, 30;\n\t"); /* * Sti and Cli are special cases, used in raw inline assembler strings; * they do not need much of the machinery provided by C-code to do type * checking or push arguments onto the stack, which means we can simply * quote the assembler versions defined above rather than try to pry * apart the call sites which use these raw strings. */ #define CLI_STRING XCSTR(CLI) #define STI_STRING XCSTR(STI) #endif /* !__ASSEMBLY__ */ #endif /* __MACH_ASM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_desc.h000066400000000000000000000105761314037446600264510ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to zach@vmware.com * */ #ifndef __MACH_DESC_H #define __MACH_DESC_H #include #if !defined(CONFIG_X86_VMI) # error invalid sub-arch include #endif static inline void load_gdt(struct Xgt_desc_struct *const dtr) { vmi_wrap_call( SetGDT, "lgdt (%0)", VMI_NO_OUTPUT, 1, VMI_IREG1 (dtr), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void load_idt(struct Xgt_desc_struct *const dtr) { vmi_wrap_call( SetIDT, "lidt (%0)", VMI_NO_OUTPUT, 1, VMI_IREG1 (dtr), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void load_ldt(const u16 sel) { vmi_wrap_call( SetLDT, "lldt %w0", VMI_NO_OUTPUT, 1, VMI_IREG1 (sel), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void load_tr(const u16 sel) { vmi_wrap_call( SetTR, "ltr %w0", VMI_NO_OUTPUT, 1, VMI_IREG1 (sel), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void store_gdt(struct Xgt_desc_struct *const dtr) { vmi_wrap_call( GetGDT, "sgdt (%0)", VMI_NO_OUTPUT, 1, VMI_IREG1 (dtr), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void store_idt(struct Xgt_desc_struct *const dtr) { vmi_wrap_call(GetIDT, "sidt (%0)", VMI_NO_OUTPUT, 1, VMI_IREG1 (dtr), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline unsigned vmi_get_ldt(void) { unsigned ret; vmi_wrap_call( GetLDT, "sldt %%ax", VMI_OREG1 (ret), 0, VMI_NO_INPUT, VMI_CLOBBER(ONE_RETURN)); return ret; } static inline unsigned vmi_get_tr(void) { unsigned ret; vmi_wrap_call( GetTR, "str %%ax", VMI_OREG1 (ret), 0, VMI_NO_INPUT, VMI_CLOBBER(ONE_RETURN)); return ret; } #define load_TR_desc() load_tr(GDT_ENTRY_TSS*8) #define load_LDT_desc() load_ldt(GDT_ENTRY_LDT*8) #define clear_LDT_desc() load_ldt(0) #define store_tr(tr) do { (tr) = vmi_get_tr(); } while (0) #define store_ldt(ldt) do { (ldt) = vmi_get_ldt(); } while (0) static inline void vmi_write_gdt(void *gdt, unsigned entry, u32 descLo, u32 descHi) { vmi_wrap_call( /* * We want to write a two word descriptor to GDT entry * number (%1) offset from *gdt (%0), so using 8 byte * indexing, this can be expressed compactly as * (gdt,entry,8) and 4(gdt,entry,8) */ WriteGDTEntry, "movl %2, (%0,%1,8);" "movl %3, 4(%0,%1,8);", VMI_NO_OUTPUT, 4, VMI_XCONC(VMI_IREG1(gdt), VMI_IREG2(entry), VMI_IREG3(descLo), VMI_IREG4(descHi)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } extern void vmi_write_ldt(void *ldt, unsigned entry, u32 descLo, u32 descHi); static inline void vmi_write_idt(void *idt, unsigned entry, u32 descLo, u32 descHi) { vmi_wrap_call( WriteIDTEntry, "movl %2, (%0,%1,8);" "movl %3, 4(%0,%1,8);", VMI_NO_OUTPUT, 4, VMI_XCONC(VMI_IREG1(idt), VMI_IREG2(entry), VMI_IREG3(descLo), VMI_IREG4(descHi)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void load_TLS(struct thread_struct *t, unsigned int cpu) { unsigned i; struct desc_struct *gdt = get_cpu_gdt_table(cpu); for (i = 0; i < TLS_SIZE / sizeof(struct desc_struct); i++) { unsigned cur = i + GDT_ENTRY_TLS_MIN; if (gdt[cur].a != t->tls_array[i].a || gdt[cur].b != t->tls_array[i].b) { vmi_write_gdt(gdt, cur, t->tls_array[i].a, t->tls_array[i].b); } } } static inline void write_gdt_entry(void *gdt, int entry, u32 entry_a, u32 entry_b) { vmi_write_gdt(gdt, entry, entry_a, entry_b); } static inline void write_ldt_entry(void *ldt, int entry, u32 entry_a, u32 entry_b) { vmi_write_ldt(ldt, entry, entry_a, entry_b); } static inline void write_idt_entry(void *idt, int entry, u32 entry_a, u32 entry_b) { vmi_write_idt(idt, entry, entry_a, entry_b); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_hooks.h000066400000000000000000000036441314037446600266540ustar00rootroot00000000000000#ifndef MACH_HOOKS_H #define MACH_HOOKS_H #include #include #include #include /** * pre_setup_arch_hook - hook called prior to any setup_arch() execution * * Description: * generally used to activate any machine specific identification * routines that may be needed before setup_arch() runs. * We probe for various component option ROMs here. **/ extern void vmi_init(void); #define pre_setup_arch_hook() vmi_init() extern void vmi_module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod); #define module_finalize_hook vmi_module_finalize /** * smpboot_startup_ipi_hook - hook to set AP state prior to startup IPI * * Description: * Used in VMI to allow hypervisors to setup a known initial state on * coprocessors, rather than booting from real mode. **/ extern void vmi_ap_start_of_day(int phys_apicid, unsigned long start_eip, unsigned long start_esp); #define smpboot_startup_ipi_hook(apicid, eip, esp) \ vmi_ap_start_of_day(apicid, eip, esp) #ifdef CONFIG_X86_LOCAL_APIC extern fastcall void apic_vmi_timer_interrupt(void); #endif static inline void vmi_intr_init_hook(void) { #ifdef CONFIG_X86_LOCAL_APIC /* if the VMI-timer is used, redirect the local APIC timer interrupt * gate to point to the vmi interrupt handler. */ if (vmi_timer_used()) set_intr_gate(LOCAL_TIMER_VECTOR, apic_vmi_timer_interrupt); #endif legacy_intr_init_hook(); } #define intr_init_hook() vmi_intr_init_hook() /** * time_init_hook - do any specific initialisations for the system timer. * * Description: * Must plug the system timer interrupt source at HZ into the IRQ listed * in irq_vectors.h:TIMER_IRQ **/ extern struct irqaction vmi_irq0; static inline void vmi_time_init_hook(void) { if (vmi_timer_used()) setup_irq(0, &vmi_irq0); else default_timer_init(); } #define time_init_hook vmi_time_init_hook #endif /* MACH_HOOKS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_idletimer.h000066400000000000000000000033351314037446600275040ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to dhecht@vmware.com * */ /* * NO_IDLE_HZ callbacks. */ #ifndef __ASM_MACH_IDLETIMER_H #define __ASM_MACH_IDLETIMER_H #include #include #if !defined(CONFIG_X86_VMI) # error invalid sub-arch include #endif #ifdef CONFIG_NO_IDLE_HZ extern DEFINE_PER_CPU(unsigned char, one_shot_mode); extern void vmi_stop_hz_timer(void); extern void vmi_account_time_restart_hz_timer(struct pt_regs *regs, int cpu); static inline void stop_hz_timer(void) { if (vmi_timer_used()) vmi_stop_hz_timer(); } static inline void restart_hz_timer(struct pt_regs *regs) { int cpu = smp_processor_id(); /* nohz_cpu_mask bit set also implies vmi_timer_used(). */ if (per_cpu(one_shot_mode, cpu)) vmi_account_time_restart_hz_timer(regs, cpu); } #else /* CONFIG_NO_IDLE_HZ */ static inline void stop_hz_timer(void) { } static inline void restart_hz_timer(struct pt_regs *regs) { } #endif /* CONFIG_NO_IDLE_HZ */ #endif /* __ASM_MACH_IDLETIMER_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_io.h000066400000000000000000000101361314037446600261320ustar00rootroot00000000000000#ifndef _MACH_ASM_IO_H #include /* * Note on the VMI calls below; all of them must specify memory * clobber, even output calls. The reason is that the memory * clobber is not an effect of the VMI call, but is used to * serialize memory writes by the compiler before an I/O * instruction. In addition, even input operations may clobber * hardware mapped memory. */ static inline void vmi_outl(const u32 value, const u16 port) { vmi_wrap_call( OUTL, "out %0, %w1", VMI_NO_OUTPUT, 2, VMI_XCONC("a"(value), "d"(port)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void vmi_outb(const u8 value, const u16 port) { vmi_wrap_call( OUTB, "outb %b0, %w1", VMI_NO_OUTPUT, 2, VMI_XCONC("a"(value), "d"(port)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void vmi_outw(const u16 value, const u16 port) { vmi_wrap_call( OUTW, "outw %w0, %w1", VMI_NO_OUTPUT, 2, VMI_XCONC("a"(value), "d"(port)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline u32 vmi_inl(const u16 port) { u32 ret; vmi_wrap_call( INL, "in %w0, %%eax", VMI_OREG1(ret), 1, VMI_XCONC("d"(port)), VMI_CLOBBER_EXTENDED(ONE_RETURN, "memory")); return ret; } static inline u8 vmi_inb(const u16 port) { u8 ret; vmi_wrap_call( INB, "inb %w0, %%al", VMI_OREG1(ret), 1, VMI_XCONC("d"(port)), VMI_CLOBBER_EXTENDED(ONE_RETURN, "memory")); return ret; } static inline u16 vmi_inw(const u16 port) { u16 ret; vmi_wrap_call( INW, "inw %w0, %%ax", VMI_OREG1(ret), 1, VMI_XCONC("d"(port)), VMI_CLOBBER_EXTENDED(ONE_RETURN, "memory")); return ret; } static inline void vmi_outsl(const void *addr, const u16 port, u32 count) { vmi_wrap_call( OUTSL, "rep; outsl", VMI_NO_OUTPUT, 3, VMI_XCONC("S"(addr), "c"(count), "d"(port)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "esi", "ecx", "memory")); } static inline void vmi_outsb(const void *addr, const u16 port, u32 count) { vmi_wrap_call( OUTSB, "rep; outsb", VMI_NO_OUTPUT, 3, VMI_XCONC("S"(addr), "c"(count), "d"(port)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "esi", "ecx", "memory")); } static inline void vmi_outsw(const void *addr, const u16 port, u32 count) { vmi_wrap_call( OUTSW, "rep; outsw", VMI_NO_OUTPUT, 3, VMI_XCONC("S"(addr), "c"(count), "d"(port)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "esi", "ecx", "memory")); } static inline void vmi_insl(const void *addr, const u16 port, u32 count) { vmi_wrap_call( INSL, "rep; insl", VMI_NO_OUTPUT, 3, VMI_XCONC("D"(addr), "c"(count), "d"(port)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "edi", "ecx", "memory")); } static inline void vmi_insb(const void *addr, const u16 port, u32 count) { vmi_wrap_call( INSB, "rep; insb", VMI_NO_OUTPUT, 3, VMI_XCONC("D"(addr), "c"(count), "d"(port)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "edi", "ecx", "memory")); } static inline void vmi_insw(const void *addr, const u16 port, u32 count) { vmi_wrap_call( INSW, "rep; insw", VMI_NO_OUTPUT, 3, VMI_XCONC("D"(addr), "c"(count), "d"(port)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "edi", "ecx", "memory")); } /* * Allow I/O hypercalls to be turned off for debugging; trapping port I/O is * easier to log and rules out a potential source of errors during development. */ #ifdef CONFIG_VMI_IO_HYPERCALLS #define __BUILDOUTINST(bwl,bw,value,port) \ vmi_out##bwl(value, port) #define __BUILDININST(bwl,bw,value,port) \ do { value = vmi_in##bwl(port); } while (0) #define __BUILDOUTSINST(bwl,addr,count,port) \ vmi_outs##bwl(addr, port, count) #define __BUILDINSINST(bwl,addr,count,port) \ vmi_ins##bwl(addr, port, count) #else /* !CONFIG_VMI_IO_HYPERCALLS */ #include <../mach-default/mach_io.h> #undef __SLOW_DOWN_IO #endif /* * Slow down port I/O by issuing a write to a chipset scratch * register. This is an easy way to slow down I/O regardless * of processor speed, but useless in a virtual machine. */ static inline void vmi_iodelay(void) { vmi_wrap_call( IODelay, "outb %%al, $0x80", VMI_NO_OUTPUT, 0, VMI_NO_INPUT, VMI_CLOBBER(ZERO_RETURNS)); } #define __SLOW_DOWN_IO vmi_iodelay() #define _MACH_ASM_IO_H /* At the end, so we can pull in native fallbacks */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_module.h000066400000000000000000000003511314037446600270060ustar00rootroot00000000000000#ifndef _ASM_I386_ARCH_MODULE_H #define _ASM_I386_ARCH_MODULE_H #include #define MODULE_SUBARCH_VERMAGIC "VMI " XSTR(VMI_API_REV_MAJOR) "." XSTR(VMI_API_REV_MINOR) " " #endif /* _ASM_I386_ARCH_MODULE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_pgalloc.h000066400000000000000000000104541314037446600271470ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to zach@vmware.com * */ #ifndef _MACH_PGALLOC_H #define _MACH_PGALLOC_H #include static inline void vmi_allocate_page(u32 pfn, u32 flags, u32 clone_pfn, u32 base, u32 count) { vmi_wrap_call( AllocatePage, "", VMI_NO_OUTPUT, 5, VMI_XCONC(VMI_IREG1(pfn), VMI_IREG2(flags), VMI_IREG3(clone_pfn), VMI_IREG4(base), VMI_IREG5(count)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void vmi_release_page(u32 pfn, u32 flags) { vmi_wrap_call( ReleasePage, "", VMI_NO_OUTPUT, 2, VMI_XCONC(VMI_IREG1(pfn), VMI_IREG2(flags)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } #ifndef CONFIG_X86_PAE #define mach_setup_pte(va, pfn) \ do { \ vmi_allocate_page(pfn, VMI_PAGE_ZEROED | \ VMI_PAGE_PT, 0, 0, 0); \ } while (0) #define mach_release_pte(va, pfn) \ do { \ vmi_release_page(pfn, VMI_PAGE_PT); \ } while (0) #define mach_setup_pmd(va, pfn) #define mach_release_pmd(va, pfn) #define vmi_setup_pgd(va, pfn, root, base, count) \ do { \ vmi_allocate_page(pfn, VMI_PAGE_ZEROED | VMI_PAGE_CLONE | \ VMI_PAGE_PD, root, base, count); \ } while (0) #define vmi_release_pgd(va, pfn) \ do { \ vmi_release_page(pfn, VMI_PAGE_PD); \ } while (0) #else /* CONFIG_X86_PAE */ #define mach_setup_pte(va, pfn) \ do { \ vmi_allocate_page(pfn, VMI_PAGE_ZEROED | VMI_PAGE_PAE | \ VMI_PAGE_PT, 0, 0, 0); \ } while (0) #define mach_release_pte(va, pfn) \ do { \ vmi_release_page(pfn, VMI_PAGE_PAE | VMI_PAGE_PT); \ } while (0) #define mach_setup_pmd(va, pfn) \ do { \ vmi_allocate_page(pfn, VMI_PAGE_ZEROED | VMI_PAGE_PAE | \ VMI_PAGE_PD, 0, 0, 0); \ } while (0) #define mach_release_pmd(va, pfn) \ do { \ vmi_release_page(pfn, VMI_PAGE_PAE|VMI_PAGE_PD); \ } while (0) #define vmi_setup_pgd(va, pfn, root, base, count) \ vmi_allocate_page(pfn, VMI_PAGE_PDP, 0, 0, 0) #define vmi_release_pgd(va, pfn) \ vmi_release_page(pfn, VMI_PAGE_PDP) #endif #define mach_setup_pgd(va, pfn, root, base, count) \ do { \ vmi_setup_pgd(va, pfn, root, base, count); \ } while (0) #define mach_release_pgd(va, pfn) \ do { \ vmi_release_pgd(va, pfn); \ } while (0) static inline void vmi_set_linear_mapping(const int slot, const u32 va, const u32 pages, const u32 pfn) { vmi_wrap_call( SetLinearMapping, "", VMI_NO_OUTPUT, 4, VMI_XCONC(VMI_IREG1(slot), VMI_IREG2(va), VMI_IREG3(pages), VMI_IREG4(pfn)), VMI_CLOBBER(ZERO_RETURNS)); } #define mach_map_linear_pt(num, ptep, pfn) \ vmi_set_linear_mapping(num+1, (u32)ptep, 1, pfn) #define mach_map_linear_ldt(ldt, pfn) \ vmi_set_linear_mapping(3, (u32)ldt, 1, pfn) #define mach_map_linear_range(start, pages, pfn) \ vmi_set_linear_mapping(0, start, pages, pfn) #endif /* _MACH_PGALLOC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_pgtable.h000066400000000000000000000120361314037446600271420ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to zach@vmware.com * */ #ifndef _MACH_PGTABLE_H #define _MACH_PGTABLE_H #include #define __HAVE_SUBARCH_PTE_WRITE_FUNCTIONS #define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \ (!mustbeuser && (mm) == &init_mm)) #define vmi_flags_addr(mm, addr, level, user) \ ((level) | (is_current_as(mm, user) ? \ (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) #define vmi_flags_addr_defer(mm, addr, level, user) \ ((level) | (is_current_as(mm, user) ? \ (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) /* * Rules for using set_pte: the pte being assigned *must* be * either not present or in a state where the hardware will * not attempt to update the pte. In places where this is * not possible, use pte_get_and_clear to obtain the old pte * value and then use set_pte to update it. -ben */ #define set_pte(ptep, pte) \ vmi_set_pxe(pte, ptep, VMI_PAGE_PT) /* * Rules for using set_pte_at: the pte may be a kernel or user * page table entry, and must be not present or not mapped. Use * this in places where the mm and address information has been * preserved. */ #define set_pte_at(mm, addr, ptep, pte) \ vmi_set_pxe(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)) /* * Rules for using set_pte_at_defer: the pte may be a kernel or * user page table entry, and must be not present or a private * map. Use this in places where the mm and address information * has been preserved and the page update is followed by an * immediate flush. */ #define set_pte_at_defer(mm, addr, ptep, pte) \ vmi_set_pxe(pte, ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0)) #ifdef CONFIG_X86_PAE # include #else # include #endif /* * Rules for using ptep_establish: the pte MUST be a user pte, and must be a * present->present transition. */ #define __HAVE_ARCH_PTEP_ESTABLISH #define ptep_establish(__vma, __address, __ptep, __entry) \ do { \ vmi_set_pxe_present(__entry, __ptep, \ vmi_flags_addr_defer((__vma)->vm_mm, \ (__address), VMI_PAGE_PT, 1)); \ flush_tlb_page(__vma, __address); \ } while (0) /* * Rules for using ptep_set_access_flags: the pte MUST be a user PTE, and * must only modify accessed / dirty bits. */ #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ do { \ if (__dirty) { \ vmi_set_pxe(__entry, __ptep, \ vmi_flags_addr_defer((__vma)->vm_mm, \ (__address), VMI_PAGE_PT, 1)); \ flush_tlb_page(__vma, __address); \ } \ } while (0) #define pte_clear(__mm, __address, __ptep) \ vmi_clear_pxe((__ptep), vmi_flags_addr((__mm), (__address), \ VMI_PAGE_PT, 0)) #define pmd_clear(__pmdptr) \ vmi_clear_pxe((pte_t *)(__pmdptr), VMI_PAGE_PD) #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL #define pte_clear_not_present_full(__mm, __address, __ptep, __full) \ do { \ _pte_clear(__ptep); \ } while (0) static inline void vmi_set_lazy_mode(u32 lazy_mode) { vmi_wrap_call( SetLazyMode, "", VMI_NO_OUTPUT, 1, VMI_IREG1(lazy_mode), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE static inline void arch_enter_lazy_cpu_mode(void) { vmi_set_lazy_mode(VMI_LAZY_CPU_STATE); } static inline void arch_leave_lazy_cpu_mode(void) { vmi_set_lazy_mode(VMI_LAZY_MODE_OFF); } #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE static inline void arch_enter_lazy_mmu_mode(void) { vmi_set_lazy_mode(VMI_LAZY_MMU_UPDATES); } static inline void arch_leave_lazy_mmu_mode(void) { vmi_set_lazy_mode(VMI_LAZY_MODE_OFF); } /* * Rulese for using pte_update_hook - it must be called after any PTE * update which has not been done using the set_pte interfaces. Kernel * PTE updates should either be sets, clears, or set_pte_atomic for P->P * transitions, which means this hook should only be called for user PTEs. * This hook implies a P->P protection or access change has taken place, * which requires a subsequent TLB flush. This is why we allow the * notification to be deferred. */ #define pte_update_hook(mm, addr, ptep) \ vmi_notify_pte_update(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 1)); #endif /* _PGTABLE_OPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_processor.h000066400000000000000000000062041314037446600275430ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to zach@vmware.com * */ #ifndef _MACH_PROCESSOR_H #define _MACH_PROCESSOR_H #include static inline void vmi_cpuid(const int op, int *eax, int *ebx, int *ecx, int *edx) { vmi_wrap_call( CPUID, "cpuid", VMI_XCONC("=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)), 1, "a" (op), VMI_CLOBBER(FOUR_RETURNS)); } /* * Generic CPUID function */ static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx) { vmi_cpuid(op, eax, ebx, ecx, edx); } /* Some CPUID calls want 'count' to be placed in ecx */ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, int *edx) { vmi_wrap_call( CPUID, "cpuid", VMI_XCONC("=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)), 2, VMI_XCONC("a" (op), "c" (count)), VMI_CLOBBER(FOUR_RETURNS)); } /* * CPUID functions returning a single datum */ static inline unsigned int cpuid_eax(unsigned int op) { unsigned int eax, ebx, ecx, edx; vmi_cpuid(op, &eax, &ebx, &ecx, &edx); return eax; } static inline unsigned int cpuid_ebx(unsigned int op) { unsigned int eax, ebx, ecx, edx; vmi_cpuid(op, &eax, &ebx, &ecx, &edx); return ebx; } static inline unsigned int cpuid_ecx(unsigned int op) { unsigned int eax, ebx, ecx, edx; vmi_cpuid(op, &eax, &ebx, &ecx, &edx); return ecx; } static inline unsigned int cpuid_edx(unsigned int op) { unsigned int eax, ebx, ecx, edx; vmi_cpuid(op, &eax, &ebx, &ecx, &edx); return edx; } static inline void arch_update_kernel_stack(unsigned sel, u32 stack) { vmi_wrap_call( UpdateKernelStack, "", VMI_NO_OUTPUT, 2, VMI_XCONC(VMI_IREG1(sel), VMI_IREG2(stack)), VMI_CLOBBER(ZERO_RETURNS)); } static inline void set_debugreg(const u32 val, const int num) { vmi_wrap_call( SetDR, "movl %1, %%db%c2", VMI_NO_OUTPUT, 2, VMI_XCONC(VMI_IREG1(num), VMI_IREG2(val), VMI_IMM (num)), VMI_CLOBBER(ZERO_RETURNS)); } static inline u32 vmi_get_dr(const int num) { u32 ret; vmi_wrap_call( GetDR, "movl %%db%c1, %%eax", VMI_OREG1(ret), 1, VMI_XCONC(VMI_IREG1(num), VMI_IMM (num)), VMI_CLOBBER(ONE_RETURN)); return ret; } #define get_debugreg(var, register) do { var = vmi_get_dr(register); } while (0) static inline void set_iopl_mask(u32 mask) { vmi_wrap_call( SetIOPLMask, "pushfl;" "andl $0xffffcfff, (%%esp);" "orl %0, (%%esp);" "popfl", VMI_NO_OUTPUT, 1, VMI_IREG1 (mask), VMI_CLOBBER(ZERO_RETURNS)); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_schedclock.h000066400000000000000000000022171314037446600276260ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to dhecht@vmware.com * */ /* * Machine specific sched_clock routines. */ #ifndef __ASM_MACH_SCHEDCLOCK_H #define __ASM_MACH_SCHEDCLOCK_H #include #if !defined(CONFIG_X86_VMI) # error invalid sub-arch include #endif static inline unsigned long long sched_clock_cycles(void) { return vmi_get_available_cycles(); } #endif /* __ASM_MACH_SCHEDCLOCK_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_segment.h000066400000000000000000000026441314037446600271720ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to zach@vmware.com * */ #ifndef __MACH_SEGMENT_H #define __MACH_SEGMENT_H #if !defined(CONFIG_X86_VMI) # error invalid sub-arch include #endif #ifndef __ASSEMBLY__ static inline unsigned get_kernel_rpl(void) { unsigned cs; __asm__ ("movl %%cs,%0" : "=r"(cs):); return cs & SEGMENT_RPL_MASK; } #endif #define COMPARE_SEGMENT_STACK(segment, offset) \ pushl %eax; \ mov offset+4(%esp), %eax; \ andl $~SEGMENT_RPL_MASK, %eax; \ cmpw $segment,%ax; \ popl %eax; #define COMPARE_SEGMENT_REG(segment, reg) \ pushl %eax; \ mov reg, %eax; \ andl $~SEGMENT_RPL_MASK, %eax; \ cmpw $segment,%ax; \ popl %eax; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_system.h000066400000000000000000000106541314037446600270540ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to zach@vmware.com * */ #ifndef _MACH_SYSTEM_H #define _MACH_SYSTEM_H #include static inline void write_cr0(const u32 val) { vmi_wrap_call( SetCR0, "mov %0, %%cr0", VMI_NO_OUTPUT, 1, VMI_IREG1(val), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void write_cr2(const u32 val) { vmi_wrap_call( SetCR2, "mov %0, %%cr2", VMI_NO_OUTPUT, 1, VMI_IREG1(val), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void write_cr3(const u32 val) { vmi_wrap_call( SetCR3, "mov %0, %%cr3", VMI_NO_OUTPUT, 1, VMI_IREG1(val), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void write_cr4(const u32 val) { vmi_wrap_call( SetCR4, "mov %0, %%cr4", VMI_NO_OUTPUT, 1, VMI_IREG1(val), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline u32 read_cr0(void) { u32 ret; vmi_wrap_call( GetCR0, "mov %%cr0, %%eax", VMI_OREG1(ret), 0, VMI_NO_INPUT, VMI_CLOBBER(ONE_RETURN)); return ret; } static inline u32 read_cr2(void) { u32 ret; vmi_wrap_call( GetCR2, "mov %%cr2, %%eax", VMI_OREG1(ret), 0, VMI_NO_INPUT, VMI_CLOBBER(ONE_RETURN)); return ret; } static inline u32 read_cr3(void) { u32 ret; vmi_wrap_call( GetCR3, "mov %%cr3, %%eax", VMI_OREG1(ret), 0, VMI_NO_INPUT, VMI_CLOBBER(ONE_RETURN)); return ret; } static inline u32 read_cr4(void) { u32 ret; vmi_wrap_call( GetCR4, "mov %%cr4, %%eax", VMI_OREG1(ret), 0, VMI_NO_INPUT, VMI_CLOBBER(ONE_RETURN)); return ret; } #define read_cr4_safe() read_cr4() #define load_cr3(pgdir) write_cr3(__pa(pgdir)) static inline void clts(void) { vmi_wrap_call( CLTS, "clts", VMI_NO_OUTPUT, 0, VMI_NO_INPUT, VMI_CLOBBER(ZERO_RETURNS)); } static inline void wbinvd(void) { vmi_wrap_call( WBINVD, "wbinvd", VMI_NO_OUTPUT, 0, VMI_NO_INPUT, VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } /* * For EnableInterrupts, DisableInterrupts, GetInterruptMask, SetInterruptMask, * only flags are clobbered by these calls, since they have assembler call * convention. We can get better C code by indicating only "cc" clobber. * Both setting and disabling interrupts must use memory clobber as well, to * prevent GCC from reordering memory access around them. */ static inline void local_irq_disable(void) { vmi_wrap_call( DisableInterrupts, "cli", VMI_NO_OUTPUT, 0, VMI_NO_INPUT, VMI_XCONC("cc", "memory")); } static inline void local_irq_enable(void) { vmi_wrap_call( EnableInterrupts, "sti", VMI_NO_OUTPUT, 0, VMI_NO_INPUT, VMI_XCONC("cc", "memory")); } static inline void local_irq_restore(const unsigned long flags) { vmi_wrap_call( SetInterruptMask, "pushl %0; popfl", VMI_NO_OUTPUT, 1, VMI_IREG1 (flags), VMI_XCONC("cc", "memory")); } static inline unsigned long vmi_get_flags(void) { unsigned long ret; vmi_wrap_call( GetInterruptMask, "pushfl; popl %%eax", VMI_OREG1 (ret), 0, VMI_NO_INPUT, "cc"); return ret; } #define local_save_flags(x) do { typecheck(unsigned long,x); (x) = vmi_get_flags(); } while (0) static inline void vmi_reboot(int how) { vmi_wrap_call( Reboot, "", VMI_NO_OUTPUT, 1, VMI_IREG1(how), "memory"); /* only memory clobber for better code */ } static inline void safe_halt(void) { vmi_wrap_call( Halt, "sti; hlt", VMI_NO_OUTPUT, 0, VMI_NO_INPUT, VMI_CLOBBER(ZERO_RETURNS)); } /* By default, halt is assumed safe, but we can drop the sti */ static inline void halt(void) { vmi_wrap_call( Halt, "hlt", VMI_NO_OUTPUT, 0, VMI_NO_INPUT, VMI_CLOBBER(ZERO_RETURNS)); } static inline void shutdown_halt(void) { vmi_wrap_call( Shutdown, "cli; hlt", VMI_NO_OUTPUT, 0, VMI_NO_INPUT, "memory"); /* only memory clobber for better code */ } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/mach_tlbflush.h000066400000000000000000000031551314037446600273510ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to zach@vmware.com * */ #ifndef _MACH_TLBFLUSH_H #define _MACH_TLBFLUSH_H #include static inline void __flush_tlb(void) { vmi_wrap_call( FlushTLB, "mov %%cr3, %%eax; mov %%eax, %%cr3", VMI_NO_OUTPUT, 1, VMI_IREG1(VMI_FLUSH_TLB), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "eax", "memory")); } static inline void __flush_tlb_global(void) { vmi_wrap_call( FlushTLB, "mov %%cr4, %%eax; \n" "andb $0x7f, %%al; \n" "mov %%eax, %%cr4; \n" "orb $0x80, %%al; \n" "mov %%eax, %%cr4", VMI_NO_OUTPUT, 1, VMI_IREG1(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "eax", "memory")); } static inline void __flush_tlb_single(u32 va) { vmi_wrap_call( InvalPage, "invlpg (%0)", VMI_NO_OUTPUT, 1, VMI_IREG1(va), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } #endif /* _MACH_TLBFLUSH_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/paravirtualInterface.h000066400000000000000000000111131314037446600307020ustar00rootroot00000000000000/* * paravirtualInterface.h -- * * Header file for paravirtualization interface and definitions * for the hypervisor option ROM tables. * * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to zach@vmware.com * */ #ifndef _PARAVIRTUAL_INTERFACE_H_ #define _PARAVIRTUAL_INTERFACE_H_ #include "vmiCalls.h" /* *--------------------------------------------------------------------- * * VMI Option ROM API * *--------------------------------------------------------------------- */ #define VMI_SIGNATURE 0x696d5663 /* "cVmi" */ #define VMI_API_REV_MAJOR 3 /* The minor number we require for compatibility */ #define VMI_API_REV_MINOR 0 /* VMI Relocation types */ #define VMI_RELOCATION_NONE 0 #define VMI_RELOCATION_CALL_REL 1 #define VMI_RELOCATION_JUMP_REL 2 #ifndef __ASSEMBLY__ struct vmi_relocation_info { unsigned long eip; unsigned char type; unsigned char reserved[3]; }; #endif /* Flags used by VMI_Reboot call */ #define VMI_REBOOT_SOFT 0x0 #define VMI_REBOOT_HARD 0x1 /* Flags used by VMI_{Allocate|Release}Page call */ #define VMI_PAGE_PAE 0x10 /* Allocate PAE shadow */ #define VMI_PAGE_CLONE 0x20 /* Clone from another shadow */ #define VMI_PAGE_ZEROED 0x40 /* Page is pre-zeroed */ /* Flags shared by Allocate|Release Page and PTE updates */ #define VMI_PAGE_PT 0x01 #define VMI_PAGE_PD 0x02 #define VMI_PAGE_PDP 0x04 #define VMI_PAGE_PML4 0x08 /* Flags used by PTE updates */ #define VMI_PAGE_CURRENT_AS 0x10 /* implies VMI_PAGE_VA_MASK is valid */ #define VMI_PAGE_DEFER 0x20 /* may queue update until TLB inval */ #define VMI_PAGE_VA_MASK 0xfffff000 /* Flags used by VMI_FlushTLB call */ #define VMI_FLUSH_TLB 0x01 #define VMI_FLUSH_GLOBAL 0x02 /* Flags used by VMI_SetLazyMode */ #define VMI_LAZY_MODE_OFF 0x00 #define VMI_LAZY_MMU_UPDATES 0x01 #define VMI_LAZY_CPU_STATE 0x02 /* The number of VMI address translation slot */ #define VMI_LINEAR_MAP_SLOTS 4 /* The cycle counters. */ #define VMI_CYCLES_REAL 0 #define VMI_CYCLES_AVAILABLE 1 #define VMI_CYCLES_STOLEN 2 /* The alarm interface 'flags' bits. [TBD: exact format of 'flags'] */ #define VMI_ALARM_COUNTERS 2 #define VMI_ALARM_COUNTER_MASK 0x000000ff #define VMI_ALARM_WIRED_IRQ0 0x00000000 #define VMI_ALARM_WIRED_LVTT 0x00010000 #define VMI_ALARM_IS_ONESHOT 0x00000000 #define VMI_ALARM_IS_PERIODIC 0x00000100 /* *--------------------------------------------------------------------- * * Generic VROM structures and definitions * *--------------------------------------------------------------------- */ #ifndef __ASSEMBLY__ struct vrom_header { u16 rom_signature; // option ROM signature u8 rom_length; // ROM length in 512 byte chunks u8 rom_entry[4]; // 16-bit code entry point u8 rom_pad0; // 4-byte align pad u32 vrom_signature; // VROM identification signature u8 api_version_min;// Minor version of API u8 api_version_maj;// Major version of API u8 jump_slots; // Number of jump slots u8 reserved1; // Reserved for expansion u32 reserved2; // Reserved for expansion u32 reserved3; // Reserved for private use u16 pci_header_offs;// Offset to PCI OPROM header u16 pnp_header_offs;// Offset to PnP OPROM header u32 rom_pad3; // PnP reserverd / VMI reserved u8 reserved[96]; // Reserved for headers char vmi_init[8]; // VMI_Init jump point char get_reloc[8]; // VMI_GetRelocationInfo jump point }; /* State needed to start an application processor in an SMP system. */ struct vmi_ap_state { u32 cr0; u32 cr2; u32 cr3; u32 cr4; u64 efer; u32 eip; u32 eflags; u32 eax; u32 ebx; u32 ecx; u32 edx; u32 esp; u32 ebp; u32 esi; u32 edi; u16 cs; u16 ss; u16 ds; u16 es; u16 fs; u16 gs; u16 ldtr; u16 gdtr_limit; u32 gdtr_base; u32 idtr_base; u16 idtr_limit; }; #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/pgtable-2level-ops.h000066400000000000000000000034031314037446600301360ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to zach@vmware.com * */ #ifndef _MACH_PGTABLE_LEVEL_OPS_H #define _MACH_PGTABLE_LEVEL_OPS_H static inline void vmi_set_pxe(pte_t pteval, pte_t *ptep, u32 flags) { vmi_wrap_call( SetPxE, "movl %0, (%1)", VMI_NO_OUTPUT, 3, VMI_XCONC(VMI_IREG1(pteval), VMI_IREG2(ptep), VMI_IREG3(flags)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } #define vmi_set_pxe_present(pteval, ptep, flags) vmi_set_pxe((pteval), (ptep), (flags)) #define vmi_clear_pxe(ptep, flags) vmi_set_pxe((pte_t) {0}, (ptep), (flags)) /* * (pmds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) */ #define set_pmd(pmdptr, pmdval) \ vmi_set_pxe((pte_t) {(pmdval).pud.pgd.pgd}, (pte_t *) pmdptr, VMI_PAGE_PD) static inline void vmi_notify_pte_update(pte_t *ptep, u32 flags) { vmi_wrap_call( UpdatePxE, "", VMI_NO_OUTPUT, 2, VMI_XCONC(VMI_IREG1(ptep), VMI_IREG2(flags)), VMI_CLOBBER(ZERO_RETURNS)); } #endif /* _MACH_PGTABLE_LEVEL_OPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/pgtable-3level-ops.h000066400000000000000000000051061314037446600301410ustar00rootroot00000000000000#ifndef _MACH_PGTABLE_LEVEL_OPS_H #define _MACH_PGTABLE_LEVEL_OPS_H /* * CPUs that support out of order store must provide a write memory barrier * to ensure that the TLB does not prefetch an entry with only the low word * written. We are guaranteed that the TLB can not prefetch a partially * written entry when writing the high word first, since by calling * convention, set_pte can only be called when the entry is either not * present, or not accessible by any TLB. */ #ifdef CONFIG_X86_OOSTORE #define PAE_PTE_WMB "lock; addl $0,0(%%esp);" #else #define PAE_PTE_WMB #endif static inline void vmi_set_pxe(pte_t pteval, pte_t *ptep, u32 flags) { vmi_wrap_call( SetPxELong, "movl %1, 4(%2);" PAE_PTE_WMB "movl %0, (%2);", VMI_NO_OUTPUT, 4, VMI_XCONC(VMI_IREG1(pteval.pte_low), VMI_IREG2(pteval.pte_high), VMI_IREG3(ptep), VMI_IREG4(flags)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } /* * Since this is only called on user PTEs, and the page fault handler * must handle the already racy situation of simultaneous page faults, * we are justified in merely clearing the PTE, followed by a set. * The ordering here is important. */ static inline void vmi_set_pxe_present(pte_t pte, pte_t *ptep, u32 flags) { vmi_wrap_call( SetPxELong, "movl $0, (%2);" PAE_PTE_WMB "movl %1, 4(%2);" PAE_PTE_WMB "movl %0, (%2);", VMI_NO_OUTPUT, 4, VMI_XCONC(VMI_IREG1(pte.pte_low), VMI_IREG2(pte.pte_high), VMI_IREG3(ptep), VMI_IREG4(flags)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory", "ebx", "edi", "eax", "edx")); } static inline void vmi_clear_pxe(pte_t *ptep, u32 flags) { vmi_wrap_call( SetPxELong, "movl %0, (%2);" PAE_PTE_WMB "movl %1, 4(%2);", VMI_NO_OUTPUT, 4, VMI_XCONC(VMI_IREG1(0), VMI_IREG2(0), VMI_IREG3(ptep), VMI_IREG4(flags)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void vmi_notify_pte_update(pte_t *ptep, u32 flags) { vmi_wrap_call( UpdatePxELong, "", VMI_NO_OUTPUT, 2, VMI_XCONC(VMI_IREG1(ptep), VMI_IREG2(flags)), VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "memory")); } static inline void vmi_set_pxe_atomic(pte_t *ptep, unsigned long long pteval) { set_64bit((unsigned long long *)ptep, pteval); vmi_notify_pte_update(ptep, VMI_PAGE_PT); } /* Called only for kernel PTEs and PMDs (set_pmd_pte) */ #define set_pte_atomic(ptep, pte) \ vmi_set_pxe_atomic(ptep, pte_val(pte)) static inline void set_pmd(pmd_t *pmdptr, pmd_t pmdval) { vmi_set_pxe_atomic((pte_t *)pmdptr, pmd_val(pmdval)); } #define set_pud(pudptr,pudval) \ (*(pudptr) = (pudval)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/vmi.h000066400000000000000000000075501314037446600253340ustar00rootroot00000000000000/* * Copyright (C) 2005, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to zach@vmware.com * */ #ifndef __MACH_VMI_H #define __MACH_VMI_H /* Linux type system definitions */ #include #include #include /* * We must know the number of return values to avoid clobbering * one of EAX, EDX, which are used for 32-bit and 64-bit returns. */ #define VMI_CLOBBER_ZERO_RETURNS "cc", "eax", "edx", "ecx" #define VMI_CLOBBER_ONE_RETURN "cc", "edx", "ecx" #define VMI_CLOBBER_TWO_RETURNS "cc", "ecx" #define VMI_CLOBBER_FOUR_RETURNS "cc" #define VMI_CLOBBER(saved) VMI_XCONC(VMI_CLOBBER_##saved) #define VMI_CLOBBER_EXTENDED(saved, extras...) VMI_XCONC(VMI_CLOBBER_##saved, extras) /* Output register declarations */ #define VMI_OREG1 "=a" #define VMI_OREG2 "=d" #define VMI_OREG64 "=A" /* Input register declarations */ #define VMI_IREG1 "a" #define VMI_IREG2 "d" #define VMI_IREG3 "c" #define VMI_IREG4 "ir" #define VMI_IREG5 "ir" /* * Some native instructions (debug register access) need immediate encodings, * which can always be satisfied; surpress warnings in GCC 3.X by using 'V' as * a potential constraint. */ #if (__GNUC__ == 4) #define VMI_IMM "i" #else #define VMI_IMM "iV" #endif /* * vmi_preambleX encodes the pushing of stack arguments; * Note the first three parameters are passed with * registers, thus there is no push. */ #define vmi_preamble0 #define vmi_preamble1 #define vmi_preamble2 #define vmi_preamble3 #define vmi_preamble4 "push %3;" #define vmi_preamble5 "push %4; push %3;" #define vmi_preamble(num_inputs) vmi_preamble##num_inputs /* * vmi_postambleX encodes the after call stack fixup */ #define vmi_postamble0 #define vmi_postamble1 #define vmi_postamble2 #define vmi_postamble3 #define vmi_postamble4 "lea 4(%%esp), %%esp" #define vmi_postamble5 "lea 8(%%esp), %%esp" #define vmi_postamble(num_inputs) vmi_postamble##num_inputs #define vmi_call_text(num_inputs) \ vmi_preamble(num_inputs) "\n\t" \ XCSTR(vmi_callout) "\n\t" \ vmi_postamble(num_inputs) "\n\t" \ /* * VMI inline assembly with input, output, and clobbers. * Multiple inputs and output must be wrapped using VMI_XCONC(...) */ #define vmi_call_lowlevel(call, native, alternative, output, input, clobber) \ do { \ asm volatile (XCSTR(vmi_native_start) "\n\t" \ native "\n\t" \ XCSTR(vmi_native_finish) "\n\t" \ \ XCSTR(vmi_translation_start) "\n\t" \ alternative "\n\t" \ XCSTR(vmi_translation_finish) "\n\t" \ \ XCSTR(vmi_padded_start) "\n\t" \ native "\n\t" \ XCSTR(vmi_nop_pad) "\n\t" \ XCSTR(vmi_padded_finish) "\n\t" \ \ :: input ); \ asm volatile (XCSTR(vmi_annotate(%c0)) :: "i"(VMI_CALL_##call)); \ asm volatile ( "" : output :: clobber ); \ } while (0) #define vmi_wrap_call(call, native, output, num_inputs, input, clobber) \ vmi_call_lowlevel(call, native, vmi_call_text(num_inputs), \ VMI_XCONC(output), VMI_XCONC(input), VMI_XCONC(clobber)) #define VMI_NO_INPUT #define VMI_NO_OUTPUT extern int hypervisor_found; #endif /* __MACH_VMI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/vmiCalls.h000066400000000000000000000051441314037446600263100ustar00rootroot00000000000000/* * vmiCalls.h -- * * List of 32-bit VMI interface calls and parameters * * Copyright (C) 2005, VMware, Inc. * * If a new VMI call is added at the end of the list, the VMI API minor * revision must be incremented. No calls can be deleted or rearranged * without incrementing the major revision number. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to zach@vmware.com * */ #define VMI_CALLS \ VDEF(CPUID) \ VDEF(WRMSR) \ VDEF(RDMSR) \ VDEF(SetGDT) \ VDEF(SetLDT) \ VDEF(SetIDT) \ VDEF(SetTR) \ VDEF(GetGDT) \ VDEF(GetLDT) \ VDEF(GetIDT) \ VDEF(GetTR) \ VDEF(WriteGDTEntry) \ VDEF(WriteLDTEntry) \ VDEF(WriteIDTEntry) \ VDEF(UpdateKernelStack) \ VDEF(SetCR0) \ VDEF(SetCR2) \ VDEF(SetCR3) \ VDEF(SetCR4) \ VDEF(GetCR0) \ VDEF(GetCR2) \ VDEF(GetCR3) \ VDEF(GetCR4) \ VDEF(WBINVD) \ VDEF(SetDR) \ VDEF(GetDR) \ VDEF(RDPMC) \ VDEF(RDTSC) \ VDEF(CLTS) \ VDEF(EnableInterrupts) \ VDEF(DisableInterrupts) \ VDEF(GetInterruptMask) \ VDEF(SetInterruptMask) \ VDEF(IRET) \ VDEF(SYSEXIT) \ VDEF(Halt) \ VDEF(Reboot) \ VDEF(Shutdown) \ VDEF(SetPxE) \ VDEF(SetPxELong) \ VDEF(UpdatePxE) \ VDEF(UpdatePxELong) \ VDEF(MachineToPhysical) \ VDEF(PhysicalToMachine) \ VDEF(AllocatePage) \ VDEF(ReleasePage) \ VDEF(InvalPage) \ VDEF(FlushTLB) \ VDEF(SetLinearMapping) \ VDEF(INL) \ VDEF(INB) \ VDEF(INW) \ VDEF(INSL) \ VDEF(INSB) \ VDEF(INSW) \ VDEF(OUTL) \ VDEF(OUTB) \ VDEF(OUTW) \ VDEF(OUTSL) \ VDEF(OUTSB) \ VDEF(OUTSW) \ VDEF(SetIOPLMask) \ VDEF(SetInitialAPState) \ VDEF(APICWrite) \ VDEF(APICRead) \ VDEF(IODelay) \ VDEF(GetCycleFrequency) \ VDEF(GetCycleCounter) \ VDEF(SetAlarm) \ VDEF(CancelAlarm) \ VDEF(GetWallclockTime) \ VDEF(WallclockUpdated) \ VDEF(SetCallbacks) \ VDEF(SetLazyMode) UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-vmi/vmi_time.h000066400000000000000000000072611314037446600263510ustar00rootroot00000000000000/* * VMI Time wrappers * * Copyright (C) 2006, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to dhecht@vmware.com * */ #ifndef __MACH_VMI_TIME_H #define __MACH_VMI_TIME_H #include "vmi.h" extern int hypervisor_timer_found; extern void probe_vmi_timer(void); static inline int vmi_timer_used(void) { return hypervisor_timer_found; } /* * When run under a hypervisor, a vcpu is always in one of three states: * running, halted, or ready. The vcpu is in the 'running' state if it * is executing. When the vcpu executes the halt interface, the vcpu * enters the 'halted' state and remains halted until there is some work * pending for the vcpu (e.g. an alarm expires, host I/O completes on * behalf of virtual I/O). At this point, the vcpu enters the 'ready' * state (waiting for the hypervisor to reschedule it). Finally, at any * time when the vcpu is not in the 'running' state nor the 'halted' * state, it is in the 'ready' state. * * Real time is advances while the vcpu is 'running', 'ready', or * 'halted'. Stolen time is the time in which the vcpu is in the * 'ready' state. Available time is the remaining time -- the vcpu is * either 'running' or 'halted'. * * All three views of time are accessible through the VMI cycle * counters. */ static inline u64 vmi_get_cycle_frequency(void) { u64 ret; vmi_wrap_call( GetCycleFrequency, "xor %%eax, %%eax;" "xor %%edx, %%edx;", VMI_OREG64 (ret), 0, VMI_NO_INPUT, VMI_CLOBBER(TWO_RETURNS)); return ret; } static inline u64 vmi_get_real_cycles(void) { u64 ret; vmi_wrap_call( GetCycleCounter, "rdtsc", VMI_OREG64 (ret), 1, VMI_IREG1(VMI_CYCLES_REAL), VMI_CLOBBER(TWO_RETURNS)); return ret; } static inline u64 vmi_get_available_cycles(void) { u64 ret; vmi_wrap_call( GetCycleCounter, "rdtsc", VMI_OREG64 (ret), 1, VMI_IREG1(VMI_CYCLES_AVAILABLE), VMI_CLOBBER(TWO_RETURNS)); return ret; } static inline u64 vmi_get_stolen_cycles(void) { u64 ret; vmi_wrap_call( GetCycleCounter, "xor %%eax, %%eax;" "xor %%edx, %%edx;", VMI_OREG64 (ret), 1, VMI_IREG1(VMI_CYCLES_STOLEN), VMI_CLOBBER(TWO_RETURNS)); return ret; } static inline u64 vmi_get_wallclock(void) { u64 ret; vmi_wrap_call( GetWallclockTime, "xor %%eax, %%eax;" "xor %%edx, %%edx;", VMI_OREG64 (ret), 0, VMI_NO_INPUT, VMI_CLOBBER(TWO_RETURNS)); return ret; } static inline int vmi_wallclock_updated(void) { int ret; vmi_wrap_call( WallclockUpdated, "xor %%eax, %%eax;", VMI_OREG1 (ret), 0, VMI_NO_INPUT, VMI_CLOBBER(ONE_RETURN)); return ret; } static inline void vmi_set_alarm(u32 flags, u64 expiry, u64 period) { vmi_wrap_call( SetAlarm, "", VMI_NO_OUTPUT, 5, VMI_XCONC(VMI_IREG1(flags), VMI_IREG2((u32)expiry), VMI_IREG3((u32)(expiry >> 32)), VMI_IREG4((u32)period), VMI_IREG5((u32)(period >> 32))), VMI_CLOBBER(ZERO_RETURNS)); } static inline void vmi_cancel_alarm(u32 flags) { vmi_wrap_call( CancelAlarm, "", VMI_NO_OUTPUT, 1, VMI_IREG1(flags), VMI_CLOBBER(ONE_RETURN)); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-voyager/000077500000000000000000000000001314037446600252425ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-voyager/do_timer.h000066400000000000000000000010271314037446600272150ustar00rootroot00000000000000/* defines for inline arch setup functions */ #include static inline void do_timer_interrupt_hook(struct pt_regs *regs) { do_timer(regs); #ifndef CONFIG_SMP update_process_times(user_mode(regs)); #endif voyager_timer_interrupt(regs); } static inline int do_timer_overflow(int count) { /* can't read the ISR, just assume 1 tick overflow */ if(count > LATCH || count < 0) { printk(KERN_ERR "VOYAGER PROBLEM: count is %d, latch is %d\n", count, LATCH); count = LATCH; } count -= LATCH; return count; } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-voyager/entry_arch.h000066400000000000000000000015741314037446600275600ustar00rootroot00000000000000/* -*- mode: c; c-basic-offset: 8 -*- */ /* Copyright (C) 2002 * * Author: James.Bottomley@HansenPartnership.com * * linux/arch/i386/voyager/entry_arch.h * * This file builds the VIC and QIC CPI gates */ /* initialise the voyager interrupt gates * * This uses the macros in irq.h to set up assembly jump gates. The * calls are then redirected to the same routine with smp_ prefixed */ BUILD_INTERRUPT(vic_sys_interrupt, VIC_SYS_INT) BUILD_INTERRUPT(vic_cmn_interrupt, VIC_CMN_INT) BUILD_INTERRUPT(vic_cpi_interrupt, VIC_CPI_LEVEL0); /* do all the QIC interrupts */ BUILD_INTERRUPT(qic_timer_interrupt, QIC_TIMER_CPI); BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI); BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-voyager/irq_vectors.h000066400000000000000000000044401314037446600277550ustar00rootroot00000000000000/* -*- mode: c; c-basic-offset: 8 -*- */ /* Copyright (C) 2002 * * Author: James.Bottomley@HansenPartnership.com * * linux/arch/i386/voyager/irq_vectors.h * * This file provides definitions for the VIC and QIC CPIs */ #ifndef _ASM_IRQ_VECTORS_H #define _ASM_IRQ_VECTORS_H /* * IDT vectors usable for external interrupt sources start * at 0x20: */ #define FIRST_EXTERNAL_VECTOR 0x20 #define SYSCALL_VECTOR 0x80 /* * Vectors 0x20-0x2f are used for ISA interrupts. */ /* These define the CPIs we use in linux */ #define VIC_CPI_LEVEL0 0 #define VIC_CPI_LEVEL1 1 /* now the fake CPIs */ #define VIC_TIMER_CPI 2 #define VIC_INVALIDATE_CPI 3 #define VIC_RESCHEDULE_CPI 4 #define VIC_ENABLE_IRQ_CPI 5 #define VIC_CALL_FUNCTION_CPI 6 /* Now the QIC CPIs: Since we don't need the two initial levels, * these are 2 less than the VIC CPIs */ #define QIC_CPI_OFFSET 1 #define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET) #define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET) #define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) #define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) #define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) #define VIC_START_FAKE_CPI VIC_TIMER_CPI #define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI /* this is the SYS_INT CPI. */ #define VIC_SYS_INT 8 #define VIC_CMN_INT 15 /* This is the boot CPI for alternate processors. It gets overwritten * by the above once the system has activated all available processors */ #define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0 #define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8) #define NR_VECTORS 256 #define NR_IRQS 224 #define NR_IRQ_VECTORS NR_IRQS #define FPU_IRQ 13 #define FIRST_VM86_IRQ 3 #define LAST_VM86_IRQ 15 #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) #ifndef __ASSEMBLY__ extern asmlinkage void vic_cpi_interrupt(void); extern asmlinkage void vic_sys_interrupt(void); extern asmlinkage void vic_cmn_interrupt(void); extern asmlinkage void qic_timer_interrupt(void); extern asmlinkage void qic_invalidate_interrupt(void); extern asmlinkage void qic_reschedule_interrupt(void); extern asmlinkage void qic_enable_irq_interrupt(void); extern asmlinkage void qic_call_function_interrupt(void); #endif /* !__ASSEMBLY__ */ #endif /* _ASM_IRQ_VECTORS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-voyager/mach_hooks.h000066400000000000000000000017011314037446600275250ustar00rootroot00000000000000#ifndef _MACH_HOOKS_H #define _MACH_HOOKS_H /** * intr_init_hook - post gate setup interrupt initialisation * * Description: * Fill in any interrupts that may have been left out by the general * init_IRQ() routine. interrupts having to do with the machine rather * than the devices on the I/O bus (like APIC interrupts in intel MP * systems) are started here. **/ static inline void voyager_intr_init_hook(void) { #ifdef CONFIG_SMP smp_intr_init(); #endif legacy_intr_init_hook(); } #define intr_init_hook() voyager_intr_init_hook() /** * pre_setup_arch_hook - hook called prior to any setup_arch() execution * * Description: * generally used to activate any machine specific identification * routines that may be needed before setup_arch() runs. * Voyagers run their CPUs from independent clocks, so disable * the TSC code because we can't sync them **/ #define pre_setup_arch_hook() \ do { \ tsc_disable = 1; \ } while (0) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-voyager/setup_arch_post.h000066400000000000000000000036501314037446600306210ustar00rootroot00000000000000/* Hook for machine specific memory setup. * * This is included late in kernel/setup.c so that it can make use of all of * the static functions. */ static char * __init machine_specific_memory_setup(void) { char *who; who = "NOT VOYAGER"; if(voyager_level == 5) { __u32 addr, length; int i; who = "Voyager-SUS"; e820.nr_map = 0; for(i=0; voyager_memory_detect(i, &addr, &length); i++) { add_memory_region(addr, length, E820_RAM); } return who; } else if(voyager_level == 4) { __u32 tom; __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8; /* select the DINO config space */ outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT); /* Read DINO top of memory register */ tom = ((inb(catbase + 0x4) & 0xf0) << 16) + ((inb(catbase + 0x5) & 0x7f) << 24); if(inb(catbase) != VOYAGER_DINO) { printk(KERN_ERR "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n"); tom = (EXT_MEM_K)<<10; } who = "Voyager-TOM"; add_memory_region(0, 0x9f000, E820_RAM); /* map from 1M to top of memory */ add_memory_region(1*1024*1024, tom - 1*1024*1024, E820_RAM); /* FIXME: Should check the ASICs to see if I need to * take out the 8M window. Just do it at the moment * */ add_memory_region(8*1024*1024, 8*1024*1024, E820_RESERVED); return who; } who = "BIOS-e820"; /* * Try to copy the BIOS-supplied E820-map. * * Otherwise fake a memory map; one section from 0k->640k, * the next section from 1mb->appropriate_mem_k */ sanitize_e820_map(E820_MAP, &E820_MAP_NR); if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) { unsigned long mem_size; /* compare results from other methods and take the greater */ if (ALT_MEM_K < EXT_MEM_K) { mem_size = EXT_MEM_K; who = "BIOS-88"; } else { mem_size = ALT_MEM_K; who = "BIOS-e801"; } e820.nr_map = 0; add_memory_region(0, LOWMEMSIZE(), E820_RAM); add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM); } return who; } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-voyager/setup_arch_pre.h000066400000000000000000000004351314037446600304200ustar00rootroot00000000000000#include #define VOYAGER_BIOS_INFO ((struct voyager_bios_info *)(PARAM+0x40)) /* Hook to call BIOS initialisation function */ /* for voyager, pass the voyager BIOS/SUS info area to the detection * routines */ #define ARCH_SETUP voyager_detect(VOYAGER_BIOS_INFO); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/000077500000000000000000000000001314037446600243605ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/000077500000000000000000000000001314037446600251405ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/agp.h000066400000000000000000000033271314037446600260650ustar00rootroot00000000000000#ifndef AGP_H #define AGP_H 1 #include #include #include /* * Functions to keep the agpgart mappings coherent with the MMU. * The GART gives the CPU a physical alias of pages in memory. The alias region is * mapped uncacheable. Make sure there are no conflicting mappings * with different cachability attributes for the same page. This avoids * data corruption on some CPUs. */ /* Caller's responsibility to call global_flush_tlb() for * performance reasons */ #define map_page_into_agp(page) ( \ xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \ ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)) #define unmap_page_from_agp(page) ( \ xen_destroy_contiguous_region((unsigned long)page_address(page), 0), \ /* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \ change_page_attr(page, 1, PAGE_KERNEL)) #define flush_agp_mappings() global_flush_tlb() /* Could use CLFLUSH here if the cpu supports it. But then it would need to be called for each cacheline of the whole page so it may not be worth it. Would need a page for it. */ #define flush_agp_cache() wbinvd() /* Convert a physical address to an address suitable for the GART. */ #define phys_to_gart(x) phys_to_machine(x) #define gart_to_phys(x) machine_to_phys(x) /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) ({ \ char *_t; dma_addr_t _d; \ _t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); \ _t; }) #define free_gatt_pages(table, order) \ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/desc.h000066400000000000000000000104351314037446600262320ustar00rootroot00000000000000#ifndef __ARCH_DESC_H #define __ARCH_DESC_H #include #include #define CPU_16BIT_STACK_SIZE 1024 #ifndef __ASSEMBLY__ #include #include #include extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); struct Xgt_desc_struct { unsigned short size; unsigned long address __attribute__((packed)); unsigned short pad; } __attribute__ ((packed)); extern struct Xgt_desc_struct idt_descr; DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) { return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; } #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) #define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr)) #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt)) #define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr)) #define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr)) #define store_tr(tr) __asm__ ("str %0":"=mr" (tr)) #define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt)) /* * This is the ldt that every process will get unless we need * something other than this. */ extern struct desc_struct default_ldt[]; extern void set_intr_gate(unsigned int irq, void * addr); #define _set_tssldt_desc(n,addr,limit,type) \ __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ "movw %w1,2(%2)\n\t" \ "rorl $16,%1\n\t" \ "movb %b1,4(%2)\n\t" \ "movb %4,5(%2)\n\t" \ "movb $0,6(%2)\n\t" \ "movb %h1,7(%2)\n\t" \ "rorl $16,%1" \ : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type)) #ifndef CONFIG_X86_NO_TSS static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr) { _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr, offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89); } #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) #endif static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) { _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82); } #define LDT_entry_a(info) \ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) #define LDT_entry_b(info) \ (((info)->base_addr & 0xff000000) | \ (((info)->base_addr & 0x00ff0000) >> 16) | \ ((info)->limit & 0xf0000) | \ (((info)->read_exec_only ^ 1) << 9) | \ ((info)->contents << 10) | \ (((info)->seg_not_present ^ 1) << 15) | \ ((info)->seg_32bit << 22) | \ ((info)->limit_in_pages << 23) | \ ((info)->useable << 20) | \ 0x7000) #define LDT_empty(info) (\ (info)->base_addr == 0 && \ (info)->limit == 0 && \ (info)->contents == 0 && \ (info)->read_exec_only == 1 && \ (info)->seg_32bit == 0 && \ (info)->limit_in_pages == 0 && \ (info)->seg_not_present == 1 && \ (info)->useable == 0 ) extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b); #if TLS_SIZE != 24 # error update this code. #endif static inline void load_TLS(struct thread_struct *t, unsigned int cpu) { #define C(i) if (HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), \ *(u64 *)&t->tls_array[i])) \ BUG(); C(0); C(1); C(2); #undef C } static inline void clear_LDT(void) { int cpu = get_cpu(); /* * NB. We load the default_ldt for lcall7/27 handling on demand, as * it slows down context switching. Noone uses it anyway. */ cpu = cpu; /* XXX avoid compiler warning */ xen_set_ldt(NULL, 0); put_cpu(); } /* * load one particular LDT into the current CPU */ static inline void load_LDT_nolock(mm_context_t *pc, int cpu) { void *segments = pc->ldt; int count = pc->size; if (likely(!count)) segments = NULL; xen_set_ldt(segments, count); } static inline void load_LDT(mm_context_t *pc) { int cpu = get_cpu(); load_LDT_nolock(pc, cpu); put_cpu(); } static inline unsigned long get_desc_base(unsigned long *desc) { unsigned long base; base = ((desc[0] >> 16) & 0x0000ffff) | ((desc[1] << 16) & 0x00ff0000) | (desc[1] & 0xff000000); return base; } #endif /* !__ASSEMBLY__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/dma-mapping.h000066400000000000000000000103641314037446600275070ustar00rootroot00000000000000#ifndef _ASM_I386_DMA_MAPPING_H #define _ASM_I386_DMA_MAPPING_H /* * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for * documentation. */ #include #include #include #include #include static inline int address_needs_mapping(struct device *hwdev, dma_addr_t addr) { dma_addr_t mask = 0xffffffff; /* If the device has a mask, use it, otherwise default to 32 bits */ if (hwdev && hwdev->dma_mask) mask = *hwdev->dma_mask; return (addr & ~mask) != 0; } static inline int range_straddles_page_boundary(paddr_t p, size_t size) { extern unsigned long *contiguous_bitmap; return ((((p & ~PAGE_MASK) + size) > PAGE_SIZE) && !test_bit(p >> PAGE_SHIFT, contiguous_bitmap)); } #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction); extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction); extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, enum dma_data_direction direction); extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, enum dma_data_direction direction); #ifdef CONFIG_HIGHMEM extern dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction); extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction); #else #define dma_map_page(dev, page, offset, size, dir) \ dma_map_single(dev, page_address(page) + (offset), (size), (dir)) #define dma_unmap_page dma_unmap_single #endif extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction); extern void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction); static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); } static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction) { dma_sync_single_for_device(dev, dma_handle+offset, size, direction); } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { if (swiotlb) swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction); flush_write_buffers(); } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { if (swiotlb) swiotlb_sync_sg_for_device(dev,sg,nelems,direction); flush_write_buffers(); } extern int dma_mapping_error(dma_addr_t dma_addr); extern int dma_supported(struct device *dev, u64 mask); static inline int dma_set_mask(struct device *dev, u64 mask) { if(!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; *dev->dma_mask = mask; return 0; } static inline int dma_get_cache_alignment(void) { /* no easy way to get cache size on all x86, so return the * maximum possible, to be safe */ return (1 << INTERNODE_CACHE_SHIFT); } #define dma_is_consistent(d) (1) static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { flush_write_buffers(); } #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY extern int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dma_addr_t device_addr, size_t size, int flags); extern void dma_release_declared_memory(struct device *dev); extern void * dma_mark_declared_memory_occupied(struct device *dev, dma_addr_t device_addr, size_t size); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/fixmap.h000066400000000000000000000112041314037446600265730ustar00rootroot00000000000000/* * fixmap.h: compile-time virtual memory allocation * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Ingo Molnar * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #ifndef _ASM_FIXMAP_H #define _ASM_FIXMAP_H /* used by vmalloc.c, vsyscall.lds.S. * * Leave one empty page between vmalloc'ed areas and * the start of the fixmap. */ extern unsigned long __FIXADDR_TOP; #ifndef __ASSEMBLY__ #include #include #include #include #ifdef CONFIG_HIGHMEM #include #include #endif /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at * compile time, but to set the physical address only * in the boot process. We allocate these special addresses * from the end of virtual memory (0xfffff000) backwards. * Also this lets us do fail-safe vmalloc(), we * can guarantee that these special addresses and * vmalloc()-ed addresses never overlap. * * these 'compile-time allocated' memory buffers are * fixed-size 4k pages. (or larger if used with an increment * highger than 1) use fixmap_set(idx,phys) to associate * physical memory with fixmap indices. * * TLB entries of such buffers will not be flushed across * task switches. */ enum fixed_addresses { FIX_HOLE, #ifndef CONFIG_XEN #ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ #endif #ifdef CONFIG_X86_IO_APIC FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, #endif #endif #ifdef CONFIG_X86_VISWS_APIC FIX_CO_CPU, /* Cobalt timer */ FIX_CO_APIC, /* Cobalt APIC Redirection Table */ FIX_LI_PCIA, /* Lithium PCI Bridge A */ FIX_LI_PCIB, /* Lithium PCI Bridge B */ #endif #ifdef CONFIG_X86_F00F_BUG FIX_F00F_IDT, /* Virtual mapping for IDT */ #endif #ifdef CONFIG_X86_CYCLONE_TIMER FIX_CYCLONE_TIMER, /*cyclone timer register*/ #endif #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, #endif #ifdef CONFIG_ACPI FIX_ACPI_BEGIN, FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, #endif #ifdef CONFIG_PCI_MMCONFIG FIX_PCIE_MCFG, #endif FIX_SHARED_INFO, #define NR_FIX_ISAMAPS 256 FIX_ISAMAP_END, FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1, __end_of_permanent_fixed_addresses, /* temporary boot-time mappings, used before ioremap() is functional */ #define NR_FIX_BTMAPS 16 FIX_BTMAP_END = __end_of_permanent_fixed_addresses, FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, FIX_WP_TEST, __end_of_fixed_addresses }; extern void __set_fixmap(enum fixed_addresses idx, maddr_t phys, pgprot_t flags); extern void set_fixaddr_top(void); #define set_fixmap(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL) /* * Some hardware wants to get fixmapped without caching. */ #define set_fixmap_nocache(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) #define clear_fixmap(idx) \ __set_fixmap(idx, 0, __pgprot(0)) #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) #define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) #define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) /* Xen kernel does not dynamically relocate the fixmap */ #define VSYSCALL_RELOCATION 0 extern void __this_fixmap_does_not_exist(void); /* * 'index to address' translation. If anyone tries to use the idx * directly without tranlation, we catch the bug with a NULL-deference * kernel oops. Illegal ranges of incoming indices are caught too. */ static __always_inline unsigned long fix_to_virt(const unsigned int idx) { /* * this branch gets completely eliminated after inlining, * except when someone tries to use fixaddr indices in an * illegal way. (such as mixing up address types or using * out-of-range indices). * * If it doesn't get removed, the linker will complain * loudly with a reasonably clear error message.. */ if (idx >= __end_of_fixed_addresses) __this_fixmap_does_not_exist(); return __fix_to_virt(idx); } static inline unsigned long virt_to_fix(const unsigned long vaddr) { BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); return __virt_to_fix(vaddr); } #endif /* !__ASSEMBLY__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/floppy.h000066400000000000000000000073651314037446600266350ustar00rootroot00000000000000/* * Architecture specific parts of the Floppy driver * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995 * * Modifications for Xen are Copyright (c) 2004, Keir Fraser. */ #ifndef __ASM_XEN_I386_FLOPPY_H #define __ASM_XEN_I386_FLOPPY_H #include /* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */ #include #undef MAX_DMA_ADDRESS #define MAX_DMA_ADDRESS 0 #define CROSS_64KB(a,s) (0) #define fd_inb(port) inb_p(port) #define fd_outb(value,port) outb_p(value,port) #define fd_request_dma() (0) #define fd_free_dma() ((void)0) #define fd_enable_irq() enable_irq(FLOPPY_IRQ) #define fd_disable_irq() disable_irq(FLOPPY_IRQ) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) #define fd_get_dma_residue() (virtual_dma_count + virtual_dma_residue) #define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io) /* * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from * softirq context via motor_off_callback. A generic bug we happen to trigger. */ #define fd_dma_mem_alloc(size) __get_free_pages(GFP_KERNEL, get_order(size)) #define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size)) static int virtual_dma_count; static int virtual_dma_residue; static char *virtual_dma_addr; static int virtual_dma_mode; static int doing_pdma; static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs) { register unsigned char st; register int lcount; register char *lptr; if (!doing_pdma) return floppy_interrupt(irq, dev_id, regs); st = 1; for(lcount=virtual_dma_count, lptr=virtual_dma_addr; lcount; lcount--, lptr++) { st=inb(virtual_dma_port+4) & 0xa0 ; if(st != 0xa0) break; if(virtual_dma_mode) outb_p(*lptr, virtual_dma_port+5); else *lptr = inb_p(virtual_dma_port+5); } virtual_dma_count = lcount; virtual_dma_addr = lptr; st = inb(virtual_dma_port+4); if(st == 0x20) return IRQ_HANDLED; if(!(st & 0x20)) { virtual_dma_residue += virtual_dma_count; virtual_dma_count=0; doing_pdma = 0; floppy_interrupt(irq, dev_id, regs); return IRQ_HANDLED; } return IRQ_HANDLED; } static void fd_disable_dma(void) { doing_pdma = 0; virtual_dma_residue += virtual_dma_count; virtual_dma_count=0; } static int fd_request_irq(void) { return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, "floppy", NULL); } static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) { doing_pdma = 1; virtual_dma_port = io; virtual_dma_mode = (mode == DMA_MODE_WRITE); virtual_dma_addr = addr; virtual_dma_count = size; virtual_dma_residue = 0; return 0; } /* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */ #define FDC1 xen_floppy_init() static int FDC2 = -1; static int xen_floppy_init(void) { use_virtual_dma = 1; can_use_virtual_dma = 1; return 0x3f0; } /* * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock * is needed to prevent corrupted CMOS RAM in case "insmod floppy" * coincides with another rtc CMOS user. Paul G. */ #define FLOPPY0_TYPE ({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ val = (CMOS_READ(0x10) >> 4) & 15; \ spin_unlock_irqrestore(&rtc_lock, flags); \ val; \ }) #define FLOPPY1_TYPE ({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ val = CMOS_READ(0x10) & 15; \ spin_unlock_irqrestore(&rtc_lock, flags); \ val; \ }) #define N_FDC 2 #define N_DRIVE 8 #define FLOPPY_MOTOR_MASK 0xf0 #define EXTRA_FLOPPY_PARAMS #endif /* __ASM_XEN_I386_FLOPPY_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/gnttab_dma.h000066400000000000000000000026471314037446600274220ustar00rootroot00000000000000/* * Copyright (c) 2007 Herbert Xu * Copyright (c) 2007 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _ASM_I386_GNTTAB_DMA_H #define _ASM_I386_GNTTAB_DMA_H static inline int gnttab_dma_local_pfn(struct page *page) { /* Has it become a local MFN? */ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page)))); } static inline maddr_t gnttab_dma_map_page(struct page *page) { __gnttab_dma_map_page(page); return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT); } static inline void gnttab_dma_unmap_page(maddr_t maddr) { __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr))); } #endif /* _ASM_I386_GNTTAB_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/highmem.h000066400000000000000000000041241314037446600267300ustar00rootroot00000000000000/* * highmem.h: virtual kernel memory mappings for high memory * * Used in CONFIG_HIGHMEM systems for memory pages which * are not addressable by direct kernel virtual addresses. * * Copyright (C) 1999 Gerhard Wichert, Siemens AG * Gerhard.Wichert@pdb.siemens.de * * * Redesigned the x86 32-bit VM architecture to deal with * up to 16 Terabyte physical memory. With current x86 CPUs * we now support up to 64 Gigabytes physical RAM. * * Copyright (C) 1999 Ingo Molnar */ #ifndef _ASM_HIGHMEM_H #define _ASM_HIGHMEM_H #ifdef __KERNEL__ #include #include #include #include /* declarations for highmem.c */ extern unsigned long highstart_pfn, highend_pfn; extern pte_t *kmap_pte; extern pgprot_t kmap_prot; extern pte_t *pkmap_page_table; /* * Right now we initialize only a single pte table. It can be extended * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. */ #ifdef CONFIG_X86_PAE #define LAST_PKMAP 512 #else #define LAST_PKMAP 1024 #endif /* * Ordering is: * * FIXADDR_TOP * fixed_addresses * FIXADDR_START * temp fixed addresses * FIXADDR_BOOT_START * Persistent kmap area * PKMAP_BASE * VMALLOC_END * Vmalloc area * VMALLOC_START * high_memory */ #define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK ) #define LAST_PKMAP_MASK (LAST_PKMAP-1) #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) extern void * FASTCALL(kmap_high(struct page *page)); extern void FASTCALL(kunmap_high(struct page *page)); void *kmap(struct page *page); void kunmap(struct page *page); void *kmap_atomic(struct page *page, enum km_type type); void *kmap_atomic_pte(struct page *page, enum km_type type); void kunmap_atomic(void *kvaddr, enum km_type type); void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); struct page *kmap_atomic_to_page(void *ptr); #define flush_cache_kmaps() do { } while (0) #endif /* __KERNEL__ */ #endif /* _ASM_HIGHMEM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/hw_irq.h000066400000000000000000000037161314037446600266110ustar00rootroot00000000000000#ifndef _ASM_HW_IRQ_H #define _ASM_HW_IRQ_H /* * linux/include/asm/hw_irq.h * * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar * * moved some of the old arch/i386/kernel/irq.h to here. VY * * IRQ/IPI changes taken from work by Thomas Radke * */ #include #include #include #include struct hw_interrupt_type; /* * Various low-level irq details needed by irq.c, process.c, * time.c, io_apic.c and smp.c * * Interrupt entry/exit code at both C and assembly level */ extern u8 irq_vector[NR_IRQ_VECTORS]; #define IO_APIC_VECTOR(irq) (irq_vector[irq]) #define AUTO_ASSIGN -1 extern void (*interrupt[NR_IRQS])(void); #ifdef CONFIG_SMP fastcall void reschedule_interrupt(void); fastcall void invalidate_interrupt(void); fastcall void call_function_interrupt(void); #endif #ifdef CONFIG_X86_LOCAL_APIC fastcall void apic_timer_interrupt(void); fastcall void error_interrupt(void); fastcall void spurious_interrupt(void); fastcall void thermal_interrupt(struct pt_regs *); #define platform_legacy_irq(irq) ((irq) < 16) #endif void disable_8259A_irq(unsigned int irq); void enable_8259A_irq(unsigned int irq); int i8259A_irq_pending(unsigned int irq); void make_8259A_irq(unsigned int irq); void init_8259A(int aeoi); void FASTCALL(send_IPI_self(int vector)); void init_VISWS_APIC_irqs(void); void setup_IO_APIC(void); void disable_IO_APIC(void); #define print_IO_APIC() int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); void send_IPI(int dest, int vector); void setup_ioapic_dest(void); extern unsigned long io_apic_irqs; extern atomic_t irq_err_count; extern atomic_t irq_mis_count; #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) extern void resend_irq_on_evtchn(struct hw_interrupt_type *h, unsigned int i); static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { resend_irq_on_evtchn(h, i); } #endif /* _ASM_HW_IRQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/hypercall.h000066400000000000000000000245761314037446600273120ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #include /* memcpy() */ #include #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #ifdef CONFIG_XEN #define HYPERCALL_STR(name) \ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" #else #define HYPERCALL_STR(name) \ "mov hypercall_stubs,%%eax; " \ "add $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ "call *%%eax" #endif #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res) \ : \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, a1) \ ({ \ type __res; \ long __ign1; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1) \ : "1" ((long)(a1)) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ long __ign1, __ign2; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ : "1" ((long)(a1)), "2" ((long)(a2)) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ long __ign1, __ign2, __ign3, __ign4; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ long __ign1, __ign2, __ign3, __ign4, __ign5; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)), \ "5" ((long)(a5)) \ : "memory" ); \ __res; \ }) static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_selector, unsigned long event_address, unsigned long failsafe_selector, unsigned long failsafe_address) { return _hypercall4(int, set_callbacks, event_selector, event_address, failsafe_selector, failsafe_address); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { unsigned long timeout_hi = (unsigned long)(timeout>>32); unsigned long timeout_lo = (unsigned long)timeout; return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); } static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_update_descriptor( u64 ma, u64 desc) { return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; /* when hypercall memory_op failed, retry */ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall4(int, update_va_mapping, va, new_val.pte_low, pte_hi, flags); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_acm_op( int cmd, void *arg) { return _hypercall2(int, acm_op, cmd, arg); } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { return _hypercall3(int, grant_table_op, cmd, uop, count); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall5(int, update_va_mapping_otherdomain, va, new_val.pte_low, pte_hi, flags, domid); } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/hypervisor.h000066400000000000000000000160101314037446600275210ustar00rootroot00000000000000/****************************************************************************** * hypervisor.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERVISOR_H__ #define __HYPERVISOR_H__ #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) # ifdef CONFIG_X86_PAE # include # else # include # endif #elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) # include #endif extern shared_info_t *HYPERVISOR_shared_info; #define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu)) #ifdef CONFIG_SMP #define current_vcpu_info() vcpu_info(smp_processor_id()) #else #define current_vcpu_info() vcpu_info(0) #endif #ifdef CONFIG_X86_32 extern unsigned long hypervisor_virt_start; #endif /* arch/xen/i386/kernel/setup.c */ extern start_info_t *xen_start_info; #ifdef CONFIG_XEN_PRIVILEGED_GUEST #define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN) #else #define is_initial_xendomain() 0 #endif /* arch/xen/kernel/evtchn.c */ /* Force a proper event-channel callback from Xen. */ void force_evtchn_callback(void); /* arch/xen/kernel/process.c */ void xen_cpu_idle (void); /* arch/xen/i386/kernel/hypervisor.c */ void do_hypervisor_callback(struct pt_regs *regs); /* arch/xen/i386/mm/hypervisor.c */ /* * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already * be MACHINE addresses. */ void xen_pt_switch(unsigned long ptr); void xen_new_user_pt(unsigned long ptr); /* x86_64 only */ void xen_load_gs(unsigned int selector); /* x86_64 only */ void xen_tlb_flush(void); void xen_invlpg(unsigned long ptr); void xen_l1_entry_update(pte_t *ptr, pte_t val); void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */ void xen_pgd_pin(unsigned long ptr); void xen_pgd_unpin(unsigned long ptr); void xen_set_ldt(const void *ptr, unsigned int ents); #ifdef CONFIG_SMP #include void xen_tlb_flush_all(void); void xen_invlpg_all(unsigned long ptr); void xen_tlb_flush_mask(cpumask_t *mask); void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr); #endif /* Returns zero on success else negative errno. */ int xen_create_contiguous_region( unsigned long vstart, unsigned int order, unsigned int address_bits); void xen_destroy_contiguous_region( unsigned long vstart, unsigned int order); struct page; int xen_limit_pages_to_max_mfn( struct page *pages, unsigned int order, unsigned int address_bits); /* Turn jiffies into Xen system time. */ u64 jiffies_to_st(unsigned long jiffies); #ifdef CONFIG_XEN_SCRUB_PAGES void scrub_pages(void *, unsigned int); #else #define scrub_pages(_p,_n) ((void)0) #endif #include #if defined(CONFIG_X86_64) #define MULTI_UVMFLAGS_INDEX 2 #define MULTI_UVMDOMID_INDEX 3 #else #define MULTI_UVMFLAGS_INDEX 3 #define MULTI_UVMDOMID_INDEX 4 #endif #ifdef CONFIG_XEN #define is_running_on_xen() 1 #else extern char *hypercall_stubs; #define is_running_on_xen() (!!hypercall_stubs) #endif static inline int HYPERVISOR_yield( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int HYPERVISOR_block( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0); #endif return rc; } static inline void /*__noreturn*/ HYPERVISOR_shutdown( unsigned int reason) { struct sched_shutdown sched_shutdown = { .reason = reason }; VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown)); #if CONFIG_XEN_COMPAT <= 0x030002 VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason)); #endif /* Don't recurse needlessly. */ BUG_ON(reason != SHUTDOWN_crash); for(;;); } static inline int __must_check HYPERVISOR_poll( evtchn_port_t *ports, unsigned int nr_ports, u64 timeout) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports, .timeout = jiffies_to_st(timeout) }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } #ifdef CONFIG_XEN static inline void MULTI_update_va_mapping( multicall_entry_t *mcl, unsigned long va, pte_t new_val, unsigned long flags) { mcl->op = __HYPERVISOR_update_va_mapping; mcl->args[0] = va; #if defined(CONFIG_X86_64) mcl->args[1] = new_val.pte; #elif defined(CONFIG_X86_PAE) mcl->args[1] = new_val.pte_low; mcl->args[2] = new_val.pte_high; #else mcl->args[1] = new_val.pte_low; mcl->args[2] = 0; #endif mcl->args[MULTI_UVMFLAGS_INDEX] = flags; } static inline void MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd, void *uop, unsigned int count) { mcl->op = __HYPERVISOR_grant_table_op; mcl->args[0] = cmd; mcl->args[1] = (unsigned long)uop; mcl->args[2] = count; } #else /* !defined(CONFIG_XEN) */ /* Multicalls not supported for HVM guests. */ #define MULTI_update_va_mapping(a,b,c,d) ((void)0) #define MULTI_grant_table_op(a,b,c,d) ((void)0) #endif #endif /* __HYPERVISOR_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/io.h000066400000000000000000000300051314037446600257160ustar00rootroot00000000000000#ifndef _ASM_IO_H #define _ASM_IO_H #include #include /* * This file contains the definitions for the x86 IO instructions * inb/inw/inl/outb/outw/outl and the "string versions" of the same * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" * versions of the single-IO instructions (inb_p/inw_p/..). * * This file is not meant to be obfuscating: it's just complicated * to (a) handle it all in a way that makes gcc able to optimize it * as well as possible and (b) trying to avoid writing the same thing * over and over again with slight variations and possibly making a * mistake somewhere. */ /* * Thanks to James van Artsdalen for a better timing-fix than * the two short jumps: using outb's to a nonexistent port seems * to guarantee better timings even on fast machines. * * On the other hand, I'd like to be sure of a non-existent port: * I feel a bit unsafe about using 0x80 (should be safe, though) * * Linus */ /* * Bit simplified and optimized by Jan Hubicka * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. * * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, * isa_read[wl] and isa_write[wl] fixed * - Arnaldo Carvalho de Melo */ #define IO_SPACE_LIMIT 0xffff #define XQUAD_PORTIO_BASE 0xfe400000 #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ #ifdef __KERNEL__ #include #include #include /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access */ #define xlate_dev_mem_ptr(p) __va(p) /* * Convert a virtual cached pointer to an uncached pointer */ #define xlate_dev_kmem_ptr(p) p /** * virt_to_phys - map virtual addresses to physical * @address: address to remap * * The returned physical address is the physical (CPU) mapping for * the memory address given. It is only valid to use this function on * addresses directly mapped or allocated via kmalloc. * * This function does not give bus mappings for DMA transfers. In * almost all conceivable cases a device driver should not be using * this function */ static inline unsigned long virt_to_phys(volatile void * address) { return __pa(address); } /** * phys_to_virt - map physical address to virtual * @address: address to remap * * The returned virtual address is a current CPU mapping for * the memory address given. It is only valid to use this function on * addresses that have a kernel mapping * * This function does not handle bus mappings for DMA transfers. In * almost all conceivable cases a device driver should not be using * this function */ static inline void * phys_to_virt(unsigned long address) { return __va(address); } /* * Change "struct page" to physical address. */ #define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) #define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page))) #define page_to_bus(page) (phys_to_machine(page_to_pseudophys(page))) #define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \ (unsigned long) bio_offset((bio))) #define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \ (unsigned long) (bv)->bv_offset) #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \ bvec_to_pseudophys((vec2)))) extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); /** * ioremap - map bus memory into CPU space * @offset: bus address of the memory * @size: size of the resource to map * * ioremap performs a platform specific sequence of operations to * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual * address. */ static inline void __iomem * ioremap(unsigned long offset, unsigned long size) { return __ioremap(offset, size, 0); } extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size); extern void iounmap(volatile void __iomem *addr); /* * bt_ioremap() and bt_iounmap() are for temporary early boot-time * mappings, before the real ioremap() is functional. * A boot-time mapping is currently limited to at most 16 pages. */ extern void *bt_ioremap(unsigned long offset, unsigned long size); extern void bt_iounmap(void *addr, unsigned long size); /* Use early IO mappings for DMI because it's initialized early */ #define dmi_ioremap bt_ioremap #define dmi_iounmap bt_iounmap #define dmi_alloc alloc_bootmem /* * ISA I/O bus memory addresses are 1:1 with the physical address. */ #define isa_virt_to_bus(_x) ({ BUG(); virt_to_bus(_x); }) #define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x #define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x)) /* * However PCI ones are not necessarily 1:1 and therefore these interfaces * are forbidden in portable PCI drivers. * * Allow them on x86 for legacy drivers, though. */ #define virt_to_bus(_x) phys_to_machine(__pa(_x)) #define bus_to_virt(_x) __va(machine_to_phys(_x)) /* * readX/writeX() are used to access memory mapped devices. On some * architectures the memory mapped IO stuff needs to be accessed * differently. On the x86 architecture, we just read/write the * memory location directly. */ static inline unsigned char readb(const volatile void __iomem *addr) { return *(volatile unsigned char __force *) addr; } static inline unsigned short readw(const volatile void __iomem *addr) { return *(volatile unsigned short __force *) addr; } static inline unsigned int readl(const volatile void __iomem *addr) { return *(volatile unsigned int __force *) addr; } #define readb_relaxed(addr) readb(addr) #define readw_relaxed(addr) readw(addr) #define readl_relaxed(addr) readl(addr) #define __raw_readb readb #define __raw_readw readw #define __raw_readl readl static inline void writeb(unsigned char b, volatile void __iomem *addr) { *(volatile unsigned char __force *) addr = b; } static inline void writew(unsigned short b, volatile void __iomem *addr) { *(volatile unsigned short __force *) addr = b; } static inline void writel(unsigned int b, volatile void __iomem *addr) { *(volatile unsigned int __force *) addr = b; } #define __raw_writeb writeb #define __raw_writew writew #define __raw_writel writel #define mmiowb() static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) { memset((void __force *) addr, val, count); } static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) { __memcpy(dst, (void __force *) src, count); } static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) { __memcpy((void __force *) dst, src, count); } /* * ISA space is 'always mapped' on a typical x86 system, no need to * explicitly ioremap() it. The fact that the ISA IO space is mapped * to PAGE_OFFSET is pure coincidence - it does not mean ISA values * are physical addresses. The following constant pointer can be * used as the IO-area pointer (it can be iounmapped as well, so the * analogy with PCI is quite large): */ #define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN))) #define isa_readb(a) readb(__ISA_IO_base + (a)) #define isa_readw(a) readw(__ISA_IO_base + (a)) #define isa_readl(a) readl(__ISA_IO_base + (a)) #define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) #define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) #define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) #define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) #define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) #define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) /* * Again, i386 does not require mem IO specific function. */ #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d)) #define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(__ISA_IO_base + (b)),(c),(d)) /** * check_signature - find BIOS signatures * @io_addr: mmio address to check * @signature: signature block * @length: length of signature * * Perform a signature comparison with the mmio address io_addr. This * address should have been obtained by ioremap. * Returns 1 on a match. */ static inline int check_signature(volatile void __iomem * io_addr, const unsigned char *signature, int length) { int retval = 0; do { if (readb(io_addr) != *signature) goto out; io_addr++; signature++; length--; } while (length); retval = 1; out: return retval; } /* * Cache management * * This needed for two cases * 1. Out of order aware processors * 2. Accidentally out of order processors (PPro errata #51) */ #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) static inline void flush_write_buffers(void) { __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory"); } #define dma_cache_inv(_start,_size) flush_write_buffers() #define dma_cache_wback(_start,_size) flush_write_buffers() #define dma_cache_wback_inv(_start,_size) flush_write_buffers() #else /* Nothing to do */ #define dma_cache_inv(_start,_size) do { } while (0) #define dma_cache_wback(_start,_size) do { } while (0) #define dma_cache_wback_inv(_start,_size) do { } while (0) #define flush_write_buffers() #endif #endif /* __KERNEL__ */ #ifdef SLOW_IO_BY_JUMPING #define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:" #else #define __SLOW_DOWN_IO "outb %%al,$0x80;" #endif static inline void slow_down_io(void) { __asm__ __volatile__( __SLOW_DOWN_IO #ifdef REALLY_SLOW_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO #endif : : ); } #ifdef CONFIG_X86_NUMAQ extern void *xquad_portio; /* Where the IO area was mapped */ #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) #define __BUILDIO(bwl,bw,type) \ static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \ if (xquad_portio) \ write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \ else \ out##bwl##_local(value, port); \ } \ static inline void out##bwl(unsigned type value, int port) { \ out##bwl##_quad(value, port, 0); \ } \ static inline unsigned type in##bwl##_quad(int port, int quad) { \ if (xquad_portio) \ return read##bwl(XQUAD_PORT_ADDR(port, quad)); \ else \ return in##bwl##_local(port); \ } \ static inline unsigned type in##bwl(int port) { \ return in##bwl##_quad(port, 0); \ } #else #define __BUILDIO(bwl,bw,type) \ static inline void out##bwl(unsigned type value, int port) { \ out##bwl##_local(value, port); \ } \ static inline unsigned type in##bwl(int port) { \ return in##bwl##_local(port); \ } #endif #define BUILDIO(bwl,bw,type) \ static inline void out##bwl##_local(unsigned type value, int port) { \ __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \ } \ static inline unsigned type in##bwl##_local(int port) { \ unsigned type value; \ __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \ return value; \ } \ static inline void out##bwl##_local_p(unsigned type value, int port) { \ out##bwl##_local(value, port); \ slow_down_io(); \ } \ static inline unsigned type in##bwl##_local_p(int port) { \ unsigned type value = in##bwl##_local(port); \ slow_down_io(); \ return value; \ } \ __BUILDIO(bwl,bw,type) \ static inline void out##bwl##_p(unsigned type value, int port) { \ out##bwl(value, port); \ slow_down_io(); \ } \ static inline unsigned type in##bwl##_p(int port) { \ unsigned type value = in##bwl(port); \ slow_down_io(); \ return value; \ } \ static inline void outs##bwl(int port, const void *addr, unsigned long count) { \ __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \ } \ static inline void ins##bwl(int port, void *addr, unsigned long count) { \ __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \ } BUILDIO(b,b,char) BUILDIO(w,w,short) BUILDIO(l,,int) /* We will be supplying our own /dev/mem implementation */ #define ARCH_HAS_DEV_MEM #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/maddr.h000066400000000000000000000133331314037446600264030ustar00rootroot00000000000000#ifndef _I386_MADDR_H #define _I386_MADDR_H #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<31) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ #ifdef CONFIG_X86_PAE typedef unsigned long long paddr_t; typedef unsigned long long maddr_t; #else typedef unsigned long paddr_t; typedef unsigned long maddr_t; #endif #ifdef CONFIG_XEN extern unsigned long *phys_to_machine_mapping; extern unsigned long max_mapnr; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return pfn; BUG_ON(max_mapnr && pfn >= max_mapnr); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return 1; BUG_ON(max_mapnr && pfn >= max_mapnr); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return max_mapnr; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movl %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movl %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if ((pfn < max_mapnr) && !xen_feature(XENFEAT_auto_translated_physmap) && (phys_to_machine_mapping[pfn] != mfn)) return max_mapnr; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { BUG_ON(max_mapnr && pfn >= max_mapnr); if (xen_feature(XENFEAT_auto_translated_physmap)) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } #ifdef CONFIG_X86_PAE static inline paddr_t pte_phys_to_machine(paddr_t phys) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to pfn_to_mfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to mfn_to_pfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #endif #ifdef CONFIG_X86_PAE #define __pte_ma(x) ((pte_t) { (x), (maddr_t)(x) >> 32 } ) static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot) { pte_t pte; pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \ (pgprot_val(pgprot) >> 32); pte.pte_high &= (__supported_pte_mask >> 32); pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \ __supported_pte_mask; return pte; } #else #define __pte_ma(x) ((pte_t) { (x) } ) #define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #endif #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _I386_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/mmu.h000066400000000000000000000012011314037446600261010ustar00rootroot00000000000000#ifndef __i386_MMU_H #define __i386_MMU_H #include /* * The i386 doesn't have a mmu context, but * we put the segment information here. * * cpu_vm_mask is used to optimize ldt flushing. */ typedef struct { int size; struct semaphore sem; void *ldt; #ifdef CONFIG_XEN int has_foreign_mappings; #endif } mm_context_t; /* mm/memory.c:exit_mmap hook */ extern void _arch_exit_mmap(struct mm_struct *mm); #define arch_exit_mmap(_mm) _arch_exit_mmap(_mm) /* kernel/fork.c:dup_mmap hook */ extern void _arch_dup_mmap(struct mm_struct *mm); #define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/mmu_context.h000066400000000000000000000055661314037446600276670ustar00rootroot00000000000000#ifndef __I386_SCHED_H #define __I386_SCHED_H #include #include #include #include /* * Used for LDT copy/destruction. */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm); void destroy_context(struct mm_struct *mm); static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #if 0 /* XEN: no lazy tlb */ unsigned cpu = smp_processor_id(); if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; #endif } #define prepare_arch_switch(next) __prepare_arch_switch() static inline void __prepare_arch_switch(void) { /* * Save away %fs and %gs. No need to save %es and %ds, as those * are always kernel segments while inside the kernel. Must * happen before reload of cr3/ldt (i.e., not in __switch_to). */ asm volatile ( "mov %%fs,%0 ; mov %%gs,%1" : "=m" (current->thread.fs), "=m" (current->thread.gs)); asm volatile ( "movl %0,%%fs ; movl %0,%%gs" : : "r" (0) ); } extern void mm_pin(struct mm_struct *mm); extern void mm_unpin(struct mm_struct *mm); void mm_pin_all(void); static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { int cpu = smp_processor_id(); struct mmuext_op _op[2], *op = _op; if (likely(prev != next)) { BUG_ON(!xen_feature(XENFEAT_writable_page_tables) && !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags)); /* stop flush ipis for the previous mm */ cpu_clear(cpu, prev->cpu_vm_mask); #if 0 /* XEN: no lazy tlb */ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; per_cpu(cpu_tlbstate, cpu).active_mm = next; #endif cpu_set(cpu, next->cpu_vm_mask); /* Re-load page tables: load_cr3(next->pgd) */ op->cmd = MMUEXT_NEW_BASEPTR; op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT); op++; /* * load the LDT, if the LDT is different: */ if (unlikely(prev->context.ldt != next->context.ldt)) { /* load_LDT_nolock(&next->context, cpu) */ op->cmd = MMUEXT_SET_LDT; op->arg1.linear_addr = (unsigned long)next->context.ldt; op->arg2.nr_ents = next->context.size; op++; } BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF)); } #if 0 /* XEN: no lazy tlb */ else { per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload %cr3. */ load_cr3(next->pgd); load_LDT_nolock(&next->context, cpu); } } #endif } #define deactivate_mm(tsk, mm) \ asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags)) mm_pin(next); switch_mm(prev, next, NULL); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/page.h000066400000000000000000000156041314037446600262330ustar00rootroot00000000000000#ifndef _I386_PAGE_H #define _I386_PAGE_H /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) #ifdef CONFIG_X86_PAE #define __PHYSICAL_MASK_SHIFT 40 #define __PHYSICAL_MASK ((1ULL << __PHYSICAL_MASK_SHIFT) - 1) #define PHYSICAL_PAGE_MASK (~((1ULL << PAGE_SHIFT) - 1) & __PHYSICAL_MASK) #else #define __PHYSICAL_MASK_SHIFT 32 #define __PHYSICAL_MASK (~0UL) #define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK) #endif #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) #ifdef __KERNEL__ /* * Need to repeat this here in order to not include pgtable.h (which in turn * depends on definitions made here), but to be able to use the symbolic * below. The preprocessor will warn if the two definitions aren't identical. */ #define _PAGE_PRESENT 0x001 #ifndef __ASSEMBLY__ #include #include #include #include #include #include #ifdef CONFIG_X86_USE_3DNOW #include #define clear_page(page) mmx_clear_page((void *)(page)) #define copy_page(to,from) mmx_copy_page(to,from) #else #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE /* * On older X86 processors it's not a win to use MMX here it seems. * Maybe the K6-III ? */ #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) #endif #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) /* * These are used to make use of C type-checking.. */ extern int nx_enabled; #ifdef CONFIG_X86_PAE extern unsigned long long __supported_pte_mask; typedef struct { unsigned long pte_low, pte_high; } pte_t; typedef struct { unsigned long long pmd; } pmd_t; typedef struct { unsigned long long pgd; } pgd_t; typedef struct { unsigned long long pgprot; } pgprot_t; #define pgprot_val(x) ((x).pgprot) #include #define __pte(x) ({ unsigned long long _x = (x); \ if (_x & _PAGE_PRESENT) _x = pte_phys_to_machine(_x); \ ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); }) #define __pgd(x) ({ unsigned long long _x = (x); \ (pgd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; }) #define __pmd(x) ({ unsigned long long _x = (x); \ (pmd_t) {((_x) & _PAGE_PRESENT) ? pte_phys_to_machine(_x) : (_x)}; }) static inline unsigned long long __pte_val(pte_t x) { return ((unsigned long long)x.pte_high << 32) | x.pte_low; } static inline unsigned long long pte_val(pte_t x) { unsigned long long ret = __pte_val(x); if (x.pte_low & _PAGE_PRESENT) ret = pte_machine_to_phys(ret); return ret; } #define __pmd_val(x) ((x).pmd) static inline unsigned long long pmd_val(pmd_t x) { unsigned long long ret = __pmd_val(x); #if CONFIG_XEN_COMPAT <= 0x030002 if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT; #else if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret); #endif return ret; } #define __pud_val(x) __pgd_val((x).pgd) #define __pgd_val(x) ((x).pgd) static inline unsigned long long pgd_val(pgd_t x) { unsigned long long ret = __pgd_val(x); if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret); return ret; } #define HPAGE_SHIFT 21 #else typedef struct { unsigned long pte_low; } pte_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; #define pgprot_val(x) ((x).pgprot) #include #define boot_pte_t pte_t /* or would you rather have a typedef */ #define __pte_val(x) ((x).pte_low) #define pte_val(x) (__pte_val(x) & _PAGE_PRESENT ? \ machine_to_phys(__pte_val(x)) : \ __pte_val(x)) #define __pte(x) ({ unsigned long _x = (x); \ (pte_t) {((_x) & _PAGE_PRESENT) ? phys_to_machine(_x) : (_x)}; }) #define __pmd_val(x) __pud_val((x).pud) #define __pud_val(x) __pgd_val((x).pgd) #define __pgd(x) ({ unsigned long _x = (x); \ (pgd_t) {((_x) & _PAGE_PRESENT) ? phys_to_machine(_x) : (_x)}; }) #define __pgd_val(x) ((x).pgd) static inline unsigned long pgd_val(pgd_t x) { unsigned long ret = __pgd_val(x); #if CONFIG_XEN_COMPAT <= 0x030002 if (ret) ret = machine_to_phys(ret) | _PAGE_PRESENT; #else if (ret & _PAGE_PRESENT) ret = machine_to_phys(ret); #endif return ret; } #define HPAGE_SHIFT 22 #endif #define PTE_MASK PHYSICAL_PAGE_MASK #ifdef CONFIG_HUGETLB_PAGE #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif #define __pgprot(x) ((pgprot_t) { (x) } ) #endif /* !__ASSEMBLY__ */ /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) /* * This handles the memory map.. We could make this a config * option, but too many people screw it up, and too few need * it. * * A __PAGE_OFFSET of 0xC0000000 means that the kernel has * a virtual address space of one gigabyte, which limits the * amount of physical memory you can use to about 950MB. * * If you want more physical memory than this then see the CONFIG_HIGHMEM4G * and CONFIG_HIGHMEM64G options in the kernel configuration. */ #ifndef __ASSEMBLY__ /* * This much address space is reserved for vmalloc() and iomap() * as well as fixmap mappings. */ extern unsigned int __VMALLOC_RESERVE; extern int sysctl_legacy_va_layout; extern int page_is_ram(unsigned long pagenr); #endif /* __ASSEMBLY__ */ #ifdef __ASSEMBLY__ #define __PAGE_OFFSET CONFIG_PAGE_OFFSET #define __PHYSICAL_START CONFIG_PHYSICAL_START #else #define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) #endif #define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START) #if CONFIG_XEN_COMPAT <= 0x030002 #undef LOAD_OFFSET #define LOAD_OFFSET 0 #endif #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) #define MAXMEM (__FIXADDR_TOP-__PAGE_OFFSET-__VMALLOC_RESERVE) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #ifdef CONFIG_FLATMEM #define pfn_to_page(pfn) (mem_map + (pfn)) #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < max_mapnr) #endif /* CONFIG_FLATMEM */ #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define VM_DATA_DEFAULT_FLAGS \ (VM_READ | VM_WRITE | \ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define __HAVE_ARCH_GATE_AREA 1 #endif /* __KERNEL__ */ #include #endif /* _I386_PAGE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/pci.h000066400000000000000000000075131314037446600260720ustar00rootroot00000000000000#ifndef __i386_PCI_H #define __i386_PCI_H #ifdef __KERNEL__ #include /* for struct page */ /* Can be used to override the logic in pci_scan_bus for skipping already-configured bus numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the loader */ #ifdef CONFIG_PCI extern unsigned int pcibios_assign_all_busses(void); #else #define pcibios_assign_all_busses() 0 #endif #include #define pcibios_scan_all_fns(a, b) (!is_initial_xendomain()) extern unsigned long pci_mem_start; #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM (pci_mem_start) #define PCIBIOS_MIN_CARDBUS_IO 0x4000 void pcibios_config_init(void); struct pci_bus * pcibios_scan_root(int bus); void pcibios_set_master(struct pci_dev *dev); void pcibios_penalize_isa_irq(int irq, int active); struct irq_routing_table *pcibios_get_irq_routing_table(void); int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); /* Dynamic DMA mapping stuff. * i386 has everything mapped statically. */ #include #include #include #include #include struct pci_dev; #ifdef CONFIG_SWIOTLB /* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */ #define PCI_DMA_BUS_IS_PHYS (0) #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ dma_addr_t ADDR_NAME; #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ __u32 LEN_NAME; #define pci_unmap_addr(PTR, ADDR_NAME) \ ((PTR)->ADDR_NAME) #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ (((PTR)->ADDR_NAME) = (VAL)) #define pci_unmap_len(PTR, LEN_NAME) \ ((PTR)->LEN_NAME) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ (((PTR)->LEN_NAME) = (VAL)) #else /* The PCI address space does equal the physical memory * address space. The networking and block device layers use * this boolean for bounce buffer decisions. */ #define PCI_DMA_BUS_IS_PHYS (1) /* pci_unmap_{page,single} is a nop so... */ #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) #define pci_unmap_addr(PTR, ADDR_NAME) (0) #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) #define pci_unmap_len(PTR, LEN_NAME) (0) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) #endif /* This is always fine. */ #define pci_dac_dma_supported(pci_dev, mask) (1) static inline dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction) { return ((dma64_addr_t) page_to_phys(page) + (dma64_addr_t) offset); } static inline struct page * pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr) { return pfn_to_page(dma_addr >> PAGE_SHIFT); } static inline unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr) { return (dma_addr & ~PAGE_MASK); } static inline void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) { } static inline void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) { flush_write_buffers(); } #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); static inline void pcibios_add_platform_entries(struct pci_dev *dev) { } #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, unsigned long *strategy_parameter) { *strat = PCI_DMA_BURST_INFINITY; *strategy_parameter = ~0UL; } #endif #endif /* __KERNEL__ */ #ifdef CONFIG_XEN_PCIDEV_FRONTEND #include #endif /* CONFIG_XEN_PCIDEV_FRONTEND */ /* implement the pci_ DMA API in terms of the generic device dma_ one */ #include /* generic pci stuff */ #include #endif /* __i386_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/pgalloc.h000066400000000000000000000033441314037446600267360ustar00rootroot00000000000000#ifndef _I386_PGALLOC_H #define _I386_PGALLOC_H #include #include #include /* for struct page */ #include /* for phys_to_virt and page_to_pseudophys */ #define pmd_populate_kernel(mm, pmd, pte) \ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) #define pmd_populate(mm, pmd, pte) \ do { \ unsigned long pfn = page_to_pfn(pte); \ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) { \ if (!PageHighMem(pte)) \ BUG_ON(HYPERVISOR_update_va_mapping( \ (unsigned long)__va(pfn << PAGE_SHIFT), \ pfn_pte(pfn, PAGE_KERNEL_RO), 0)); \ else if (!test_and_set_bit(PG_pinned, &pte->flags)) \ kmap_flush_unused(); \ set_pmd(pmd, \ __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT))); \ } else \ *(pmd) = __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT)); \ } while (0) /* * Allocate and free page tables. */ extern pgd_t *pgd_alloc(struct mm_struct *); extern void pgd_free(pgd_t *pgd); extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); extern struct page *pte_alloc_one(struct mm_struct *, unsigned long); static inline void pte_free_kernel(pte_t *pte) { free_page((unsigned long)pte); make_lowmem_page_writable(pte, XENFEAT_writable_page_tables); } extern void pte_free(struct page *pte); #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) #ifdef CONFIG_X86_PAE /* * In the PAE case we free the pmds as part of the pgd. */ #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(x) do { } while (0) #define __pmd_free_tlb(tlb,x) do { } while (0) #define pud_populate(mm, pmd, pte) BUG() #endif #define check_pgt_cache() do { } while (0) #endif /* _I386_PGALLOC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h000066400000000000000000000005641314037446600310420ustar00rootroot00000000000000#ifndef _I386_PGTABLE_2LEVEL_DEFS_H #define _I386_PGTABLE_2LEVEL_DEFS_H #define HAVE_SHARED_KERNEL_PMD 0 /* * traditional i386 two-level paging structure: */ #define PGDIR_SHIFT 22 #define PTRS_PER_PGD 1024 /* * the i386 is two-level, so we don't really have any * PMD directory physically. */ #define PTRS_PER_PTE 1024 #endif /* _I386_PGTABLE_2LEVEL_DEFS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/pgtable-2level.h000066400000000000000000000067531314037446600301310ustar00rootroot00000000000000#ifndef _I386_PGTABLE_2LEVEL_H #define _I386_PGTABLE_2LEVEL_H #include #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx (pfn %05lx).\n", __FILE__, __LINE__, \ __pte_val(e), pte_pfn(e)) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx (pfn %05lx).\n", __FILE__, __LINE__, \ __pgd_val(e), pgd_val(e) >> PAGE_SHIFT) /* * Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following * hook is made available. */ #define set_pte(pteptr, pteval) (*(pteptr) = pteval) #define set_pte_at(_mm,addr,ptep,pteval) do { \ if (((_mm) != current->mm && (_mm) != &init_mm) || \ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \ set_pte((ptep), (pteval)); \ } while (0) #define set_pte_at_sync(_mm,addr,ptep,pteval) do { \ if (((_mm) != current->mm && (_mm) != &init_mm) || \ HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \ set_pte((ptep), (pteval)); \ xen_invlpg((addr)); \ } \ } while (0) #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) #define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval)) #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pte_none(x) (!(x).pte_low) static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; if (!pte_none(pte)) { if ((mm != &init_mm) || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) pte = __pte_ma(xchg(&ptep->pte_low, 0)); } return pte; } #define ptep_clear_flush(vma, addr, ptep) \ ({ \ pte_t *__ptep = (ptep); \ pte_t __res = *__ptep; \ if (!pte_none(__res) && \ ((vma)->vm_mm != current->mm || \ HYPERVISOR_update_va_mapping(addr, __pte(0), \ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \ UVMF_INVLPG|UVMF_MULTI))) { \ __ptep->pte_low = 0; \ flush_tlb_page(vma, addr); \ } \ __res; \ }) #define pte_same(a, b) ((a).pte_low == (b).pte_low) #define __pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT) #define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte))) #define pte_pfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \ mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte)) #define pte_page(_pte) pfn_to_page(pte_pfn(_pte)) #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) /* * All present user pages are user-executable: */ static inline int pte_exec(pte_t pte) { return pte_user(pte); } /* * All present pages are kernel-executable: */ static inline int pte_exec_kernel(pte_t pte) { return 1; } /* * Bits 0, 6 and 7 are taken, split up the 29 bits of offset * into this range: */ #define PTE_FILE_MAX_BITS 29 #define pte_to_pgoff(pte) \ ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 )) #define pgoff_to_pte(off) \ ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x1f) #define __swp_offset(x) ((x).val >> 8) #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) void vmalloc_sync_all(void); #endif /* _I386_PGTABLE_2LEVEL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h000066400000000000000000000007331314037446600310410ustar00rootroot00000000000000#ifndef _I386_PGTABLE_3LEVEL_DEFS_H #define _I386_PGTABLE_3LEVEL_DEFS_H #define HAVE_SHARED_KERNEL_PMD 0 /* * PGDIR_SHIFT determines what a top-level page table entry can map */ #define PGDIR_SHIFT 30 #define PTRS_PER_PGD 4 /* * PMD_SHIFT determines the size of the area a middle-level * page table can map */ #define PMD_SHIFT 21 #define PTRS_PER_PMD 512 /* * entries per page directory level */ #define PTRS_PER_PTE 512 #endif /* _I386_PGTABLE_3LEVEL_DEFS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/pgtable-3level.h000066400000000000000000000136541314037446600301300ustar00rootroot00000000000000#ifndef _I386_PGTABLE_3LEVEL_H #define _I386_PGTABLE_3LEVEL_H #include /* * Intel Physical Address Extension (PAE) Mode - three-level page * tables on PPro+ CPUs. * * Copyright (C) 1999 Ingo Molnar */ #define pte_ERROR(e) \ printk("%s:%d: bad pte %p(%016Lx pfn %08lx).\n", __FILE__, __LINE__, \ &(e), __pte_val(e), pte_pfn(e)) #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \ &(e), __pmd_val(e), (pmd_val(e) & PTE_MASK) >> PAGE_SHIFT) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \ &(e), __pgd_val(e), (pgd_val(e) & PTE_MASK) >> PAGE_SHIFT) #define pud_none(pud) 0 #define pud_bad(pud) 0 #define pud_present(pud) 1 /* * Is the pte executable? */ static inline int pte_x(pte_t pte) { return !(__pte_val(pte) & _PAGE_NX); } /* * All present user-pages with !NX bit are user-executable: */ static inline int pte_exec(pte_t pte) { return pte_user(pte) && pte_x(pte); } /* * All present pages with !NX bit are kernel-executable: */ static inline int pte_exec_kernel(pte_t pte) { return pte_x(pte); } /* Rules for using set_pte: the pte being assigned *must* be * either not present or in a state where the hardware will * not attempt to update the pte. In places where this is * not possible, use pte_get_and_clear to obtain the old pte * value and then use set_pte to update it. -ben */ #define __HAVE_ARCH_SET_PTE_ATOMIC static inline void set_pte(pte_t *ptep, pte_t pte) { ptep->pte_high = pte.pte_high; smp_wmb(); ptep->pte_low = pte.pte_low; } #define set_pte_atomic(pteptr,pteval) \ set_64bit((unsigned long long *)(pteptr),__pte_val(pteval)) #define set_pte_at(_mm,addr,ptep,pteval) do { \ if (((_mm) != current->mm && (_mm) != &init_mm) || \ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \ set_pte((ptep), (pteval)); \ } while (0) #define set_pte_at_sync(_mm,addr,ptep,pteval) do { \ if (((_mm) != current->mm && (_mm) != &init_mm) || \ HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \ set_pte((ptep), (pteval)); \ xen_invlpg((addr)); \ } \ } while (0) #define set_pmd(pmdptr,pmdval) \ xen_l2_entry_update((pmdptr), (pmdval)) #define set_pud(pudptr,pudval) \ xen_l3_entry_update((pudptr), (pudval)) /* * Pentium-II erratum A13: in PAE mode we explicitly have to flush * the TLB via cr3 if the top-level pgd is changed... * We do not let the generic code free and clear pgd entries due to * this erratum. */ static inline void pud_clear (pud_t * pud) { } #define pud_page(pud) \ ((struct page *) __va(pud_val(pud) & PAGE_MASK)) #define pud_page_kernel(pud) \ ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) /* Find an entry in the second-level page table.. */ #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ pmd_index(address)) static inline int pte_none(pte_t pte) { return !(pte.pte_low | pte.pte_high); } /* * For PTEs and PDEs, we must clear the P-bit first when clearing a page table * entry, so clear the bottom half first and enforce ordering with a compiler * barrier. */ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { if ((mm != current->mm && mm != &init_mm) || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) { ptep->pte_low = 0; smp_wmb(); ptep->pte_high = 0; } } #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; if (!pte_none(pte)) { if ((mm != &init_mm) || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) { uint64_t val = __pte_val(pte); if (__cmpxchg64(ptep, val, 0) != val) { /* xchg acts as a barrier before the setting of the high bits */ pte.pte_low = xchg(&ptep->pte_low, 0); pte.pte_high = ptep->pte_high; ptep->pte_high = 0; } } } return pte; } #define ptep_clear_flush(vma, addr, ptep) \ ({ \ pte_t *__ptep = (ptep); \ pte_t __res = *__ptep; \ if (!pte_none(__res) && \ ((vma)->vm_mm != current->mm || \ HYPERVISOR_update_va_mapping(addr, __pte(0), \ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \ UVMF_INVLPG|UVMF_MULTI))) { \ __ptep->pte_low = 0; \ smp_wmb(); \ __ptep->pte_high = 0; \ flush_tlb_page(vma, addr); \ } \ __res; \ }) static inline int pte_same(pte_t a, pte_t b) { return a.pte_low == b.pte_low && a.pte_high == b.pte_high; } #define pte_page(x) pfn_to_page(pte_pfn(x)) #define __pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \ ((_pte).pte_high << (32-PAGE_SHIFT))) #define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte))) #define pte_pfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \ mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte)) extern unsigned long long __supported_pte_mask; static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { return __pte((((unsigned long long)page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & __supported_pte_mask); } static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) { return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & __supported_pte_mask); } /* * Bits 0, 6 and 7 are taken in the low part of the pte, * put the 32 bits of offset into the high part. */ #define pte_to_pgoff(pte) ((pte).pte_high) #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) #define PTE_FILE_MAX_BITS 32 /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val) & 0x1f) #define __swp_offset(x) ((x).val >> 5) #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) #define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) #define __pmd_free_tlb(tlb, x) do { } while (0) void vmalloc_sync_all(void); #endif /* _I386_PGTABLE_3LEVEL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/pgtable.h000066400000000000000000000440321314037446600267320ustar00rootroot00000000000000#ifndef _I386_PGTABLE_H #define _I386_PGTABLE_H #include /* * The Linux memory management assumes a three-level page table setup. On * the i386, we use that, but "fold" the mid level into the top-level page * table, so that we physically have the same two-level page table as the * i386 mmu expects. * * This file contains the functions and defines necessary to modify and use * the i386 page table tree. */ #ifndef __ASSEMBLY__ #include #include #include #ifndef _I386_BITOPS_H #include #endif #include #include #include /* Is this pagetable pinned? */ #define PG_pinned PG_arch_1 struct mm_struct; struct vm_area_struct; /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) extern unsigned long empty_zero_page[1024]; extern pgd_t *swapper_pg_dir; extern kmem_cache_t *pgd_cache; extern kmem_cache_t *pmd_cache; extern spinlock_t pgd_lock; extern struct page *pgd_list; void pmd_ctor(void *, kmem_cache_t *, unsigned long); void pgd_ctor(void *, kmem_cache_t *, unsigned long); void pgd_dtor(void *, kmem_cache_t *, unsigned long); void pgtable_cache_init(void); void paging_init(void); /* * The Linux x86 paging architecture is 'compile-time dual-mode', it * implements both the traditional 2-level x86 page tables and the * newer 3-level PAE-mode page tables. */ #ifdef CONFIG_X86_PAE # include # define PMD_SIZE (1UL << PMD_SHIFT) # define PMD_MASK (~(PMD_SIZE-1)) #else # include #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) #define TWOLEVEL_PGDIR_SHIFT 22 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS) /* Just any arbitrary offset to the start of the vmalloc VM area: the * current 8MB value just means that there will be a 8MB "hole" after the * physical memory until the kernel virtual memory starts. That means that * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \ 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) #ifdef CONFIG_HIGHMEM # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) #else # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) #endif /* * _PAGE_PSE set in the page directory entry just means that * the page directory entry points directly to a 4MB-aligned block of * memory. */ #define _PAGE_BIT_PRESENT 0 #define _PAGE_BIT_RW 1 #define _PAGE_BIT_USER 2 #define _PAGE_BIT_PWT 3 #define _PAGE_BIT_PCD 4 #define _PAGE_BIT_ACCESSED 5 #define _PAGE_BIT_DIRTY 6 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ #define _PAGE_BIT_UNUSED2 10 #define _PAGE_BIT_UNUSED3 11 #define _PAGE_BIT_NX 63 #define _PAGE_PRESENT 0x001 #define _PAGE_RW 0x002 #define _PAGE_USER 0x004 #define _PAGE_PWT 0x008 #define _PAGE_PCD 0x010 #define _PAGE_ACCESSED 0x020 #define _PAGE_DIRTY 0x040 #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ #define _PAGE_UNUSED1 0x200 /* available for programmer */ #define _PAGE_UNUSED2 0x400 #define _PAGE_UNUSED3 0x800 /* If _PAGE_PRESENT is clear, we use these: */ #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ #define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE; pte_present gives true */ #ifdef CONFIG_X86_PAE #define _PAGE_NX (1ULL<<_PAGE_BIT_NX) #else #define _PAGE_NX 0 #endif #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) #define PAGE_NONE \ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define PAGE_SHARED \ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_SHARED_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_COPY_NOEXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_COPY_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_COPY \ PAGE_COPY_NOEXEC #define PAGE_READONLY \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_READONLY_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define _PAGE_KERNEL \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) #define _PAGE_KERNEL_EXEC \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) /* * The i386 can't do page protection for execute, and considers that * the same are read. Also, write permissions imply read permissions. * This is the closest we can get.. */ #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_COPY #define __P011 PAGE_COPY #define __P100 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_COPY_EXEC #define __P111 PAGE_COPY_EXEC #define __S000 PAGE_NONE #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED #define __S100 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC /* * Define this if things work differently on an i386 and an i486: * it will (on an i486) warn about kernel memory accesses that are * done without a 'access_ok(VERIFY_WRITE,..)' */ #undef TEST_ACCESS_OK /* The boot page tables (all created as a single array) */ extern unsigned long pg0[]; #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)__pmd_val(x)) #if CONFIG_XEN_COMPAT <= 0x030002 /* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t. can temporarily clear it. */ #define pmd_present(x) (__pmd_val(x)) #define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT)) #else #define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT) #define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #endif #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* * The following only work if pte_present() is true. * Undefined behaviour if not.. */ #define __LARGE_PTE (_PAGE_PSE | _PAGE_PRESENT) static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } static inline int pte_huge(pte_t pte) { return ((pte).pte_low & __LARGE_PTE) == __LARGE_PTE; } /* * The following only works if pte_present() is not true. */ static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; } static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; } static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; } static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= __LARGE_PTE; return pte; } #ifdef CONFIG_X86_PAE # include #else # include #endif #define ptep_test_and_clear_dirty(vma, addr, ptep) \ ({ \ pte_t __pte = *(ptep); \ int __ret = pte_dirty(__pte); \ if (__ret) { \ __pte = pte_mkclean(__pte); \ if ((vma)->vm_mm != current->mm || \ HYPERVISOR_update_va_mapping(addr, __pte, 0)) \ (ptep)->pte_low = __pte.pte_low; \ } \ __ret; \ }) #define ptep_test_and_clear_young(vma, addr, ptep) \ ({ \ pte_t __pte = *(ptep); \ int __ret = pte_young(__pte); \ if (__ret) \ __pte = pte_mkold(__pte); \ if ((vma)->vm_mm != current->mm || \ HYPERVISOR_update_va_mapping(addr, __pte, 0)) \ (ptep)->pte_low = __pte.pte_low; \ __ret; \ }) #define ptep_get_and_clear_full(mm, addr, ptep, full) \ ((full) ? ({ \ pte_t __res = *(ptep); \ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) \ xen_l1_entry_update(ptep, __pte(0)); \ else \ *(ptep) = __pte(0); \ __res; \ }) : \ ptep_get_and_clear(mm, addr, ptep)) static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; if (pte_write(pte)) set_pte_at(mm, addr, ptep, pte_wrprotect(pte)); } /* * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * * dst - pointer to pgd range anwhere on a pgd page * src - "" * count - the number of pgds to copy. * * dst and src can be on the same page, but the range must not overlap, * and must not cross a page boundary. */ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) { memcpy(dst, src, count * sizeof(pgd_t)); } /* * Macro to mark a page protection value as "uncacheable". On processors which do not support * it, this is a no-op. */ #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { /* * Since this might change the present bit (which controls whether * a pte_t object has undergone p2m translation), we must use * pte_val() on the input pte and __pte() for the return value. */ paddr_t pteval = pte_val(pte); pteval &= _PAGE_CHG_MASK; pteval |= pgprot_val(newprot); #ifdef CONFIG_X86_PAE pteval &= __supported_pte_mask; #endif return __pte(pteval); } #define pmd_large(pmd) \ ((__pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) /* * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] * * this macro returns the index of the entry in the pgd page which would * control the given virtual address */ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pgd_index_k(addr) pgd_index(addr) /* * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */ #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) /* * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] * * this macro returns the index of the entry in the pmd page which would * control the given virtual address */ #define pmd_index(address) \ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) /* * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] * * this macro returns the index of the entry in the pte page which would * control the given virtual address */ #define pte_index(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) \ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) #define pmd_page_kernel(pmd) \ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) /* * Helper function that returns the kernel pagetable entry controlling * the virtual address 'address'. NULL means no pagetable entry present. * NOTE: the return type is pte_t but if the pmd is PSE then we return it * as a pte too. */ extern pte_t *lookup_address(unsigned long address); /* * Make a given kernel text page executable/non-executable. * Returns the previous executability setting of that page (which * is used to restore the previous state). Used by the SMP bootup code. * NOTE: this is an __init function for security reasons. */ #ifdef CONFIG_X86_PAE extern int set_kernel_exec(unsigned long vaddr, int enable); #else static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} #endif extern void noexec_setup(const char *str); #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \ pte_index(address)) #define pte_offset_map_nested(dir, address) \ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \ pte_index(address)) #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) #else #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) #endif #define __HAVE_ARCH_PTEP_ESTABLISH #define ptep_establish(vma, address, ptep, pteval) \ do { \ if ( likely((vma)->vm_mm == current->mm) ) { \ BUG_ON(HYPERVISOR_update_va_mapping(address, \ pteval, \ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \ UVMF_INVLPG|UVMF_MULTI)); \ } else { \ xen_l1_entry_update(ptep, pteval); \ flush_tlb_page(vma, address); \ } \ } while (0) /* * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. * * Also, we only update the dirty/accessed state if we set * the dirty bit by hand in the kernel, since the hardware * will do the accessed bit for us, and we don't want to * race with other CPU's that might be updating the dirty * bit at the same time. */ #define update_mmu_cache(vma,address,pte) do { } while (0) #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ do { \ if (dirty) \ ptep_establish(vma, address, ptep, entry); \ } while (0) #include void make_lowmem_page_readonly(void *va, unsigned int feature); void make_lowmem_page_writable(void *va, unsigned int feature); void make_page_readonly(void *va, unsigned int feature); void make_page_writable(void *va, unsigned int feature); void make_pages_readonly(void *va, unsigned int nr, unsigned int feature); void make_pages_writable(void *va, unsigned int nr, unsigned int feature); #define virt_to_ptep(__va) \ ({ \ pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \ pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \ pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \ pte_offset_kernel(__pmd, (unsigned long)(__va)); \ }) #define arbitrary_virt_to_machine(__va) \ ({ \ maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\ m | ((unsigned long)(__va) & (PAGE_SIZE-1)); \ }) #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_FLATMEM #define kern_addr_valid(addr) (1) #endif /* CONFIG_FLATMEM */ int direct_remap_pfn_range(struct vm_area_struct *vma, unsigned long address, unsigned long mfn, unsigned long size, pgprot_t prot, domid_t domid); int direct_kernel_remap_pfn_range(unsigned long address, unsigned long mfn, unsigned long size, pgprot_t prot, domid_t domid); int create_lookup_pte_addr(struct mm_struct *mm, unsigned long address, uint64_t *ptep); int touch_pte_range(struct mm_struct *mm, unsigned long address, unsigned long size); int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot); #define arch_change_pte_range(mm, pmd, addr, end, newprot) \ xen_change_pte_range(mm, pmd, addr, end, newprot) #define io_remap_pfn_range(vma,from,pfn,size,prot) \ direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO) #define MK_IOSPACE_PFN(space, pfn) (pfn) #define GET_IOSPACE(pfn) 0 #define GET_PFN(pfn) (pfn) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL #define __HAVE_ARCH_PTEP_CLEAR_FLUSH #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME #include #endif /* _I386_PGTABLE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/processor.h000066400000000000000000000475551314037446600273500ustar00rootroot00000000000000/* * include/asm-i386/processor.h * * Copyright (C) 1994 Linus Torvalds */ #ifndef __ASM_I386_PROCESSOR_H #define __ASM_I386_PROCESSOR_H #include #include #include #include #include #include #include #include #include #include #include #include #include /* flag for disabling the tsc */ extern int tsc_disable; struct desc_struct { unsigned long a,b; }; #define desc_empty(desc) \ (!((desc)->a | (desc)->b)) #define desc_equal(desc1, desc2) \ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) /* * Default implementation of macro that returns current * instruction pointer ("program counter"). */ #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) /* * CPU type and hardware bug flags. Kept separately for each CPU. * Members of this structure are referenced in head.S, so think twice * before touching them. [mj] */ struct cpuinfo_x86 { __u8 x86; /* CPU family */ __u8 x86_vendor; /* CPU vendor */ __u8 x86_model; __u8 x86_mask; char wp_works_ok; /* It doesn't on 386's */ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ char hard_math; char rfu; int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ unsigned long x86_capability[NCAPINTS]; char x86_vendor_id[16]; char x86_model_id[64]; int x86_cache_size; /* in KB - valid for CPUS which support this call */ int x86_cache_alignment; /* In bytes */ char fdiv_bug; char f00f_bug; char coma_bug; char pad0; int x86_power; unsigned long loops_per_jiffy; unsigned char x86_max_cores; /* cpuid returned max cores value */ unsigned char booted_cores; /* number of cores as seen by OS */ unsigned char apicid; } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 #define X86_VENDOR_CYRIX 1 #define X86_VENDOR_AMD 2 #define X86_VENDOR_UMC 3 #define X86_VENDOR_NEXGEN 4 #define X86_VENDOR_CENTAUR 5 #define X86_VENDOR_RISE 6 #define X86_VENDOR_TRANSMETA 7 #define X86_VENDOR_NSC 8 #define X86_VENDOR_NUM 9 #define X86_VENDOR_UNKNOWN 0xff /* * capabilities of CPUs */ extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; #ifndef CONFIG_X86_NO_TSS extern struct tss_struct doublefault_tss; DECLARE_PER_CPU(struct tss_struct, init_tss); #endif #ifdef CONFIG_SMP extern struct cpuinfo_x86 cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] #else #define cpu_data (&boot_cpu_data) #define current_cpu_data boot_cpu_data #endif extern int phys_proc_id[NR_CPUS]; extern int cpu_core_id[NR_CPUS]; extern char ignore_fpu_irq; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); #ifdef CONFIG_X86_HT extern void detect_ht(struct cpuinfo_x86 *c); #else static inline void detect_ht(struct cpuinfo_x86 *c) {} #endif /* * EFLAGS bits */ #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ #define X86_EFLAGS_NT 0x00004000 /* Nested Task */ #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ /* * Generic CPUID function * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx * resulting in stale register contents being returned. */ static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { __asm__(XEN_CPUID : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "c"(0)); } /* Some CPUID calls want 'count' to be placed in ecx */ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, int *edx) { __asm__(XEN_CPUID : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "c" (count)); } /* * CPUID functions returning a single datum */ static inline unsigned int cpuid_eax(unsigned int op) { unsigned int eax; __asm__(XEN_CPUID : "=a" (eax) : "0" (op) : "bx", "cx", "dx"); return eax; } static inline unsigned int cpuid_ebx(unsigned int op) { unsigned int eax, ebx; __asm__(XEN_CPUID : "=a" (eax), "=b" (ebx) : "0" (op) : "cx", "dx" ); return ebx; } static inline unsigned int cpuid_ecx(unsigned int op) { unsigned int eax, ecx; __asm__(XEN_CPUID : "=a" (eax), "=c" (ecx) : "0" (op) : "bx", "dx" ); return ecx; } static inline unsigned int cpuid_edx(unsigned int op) { unsigned int eax, edx; __asm__(XEN_CPUID : "=a" (eax), "=d" (edx) : "0" (op) : "bx", "cx"); return edx; } #define load_cr3(pgdir) write_cr3(__pa(pgdir)) /* * Intel CPU features in CR4 */ #define X86_CR4_VME 0x0001 /* enable vm86 extensions */ #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */ #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */ #define X86_CR4_DE 0x0008 /* enable debugging extensions */ #define X86_CR4_PSE 0x0010 /* enable page size extensions */ #define X86_CR4_PAE 0x0020 /* enable physical address extensions */ #define X86_CR4_MCE 0x0040 /* Machine check enable */ #define X86_CR4_PGE 0x0080 /* enable global pages */ #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */ #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ /* * Save the cr4 feature set we're using (ie * Pentium 4MB enable and PPro Global page * enable), so that any CPU's that boot up * after us can get the correct flags. */ extern unsigned long mmu_cr4_features; static inline void set_in_cr4 (unsigned long mask) { unsigned cr4; mmu_cr4_features |= mask; cr4 = read_cr4(); cr4 |= mask; write_cr4(cr4); } static inline void clear_in_cr4 (unsigned long mask) { unsigned cr4; mmu_cr4_features &= ~mask; cr4 = read_cr4(); cr4 &= ~mask; write_cr4(cr4); } /* * NSC/Cyrix CPU configuration register indexes */ #define CX86_PCR0 0x20 #define CX86_GCR 0xb8 #define CX86_CCR0 0xc0 #define CX86_CCR1 0xc1 #define CX86_CCR2 0xc2 #define CX86_CCR3 0xc3 #define CX86_CCR4 0xe8 #define CX86_CCR5 0xe9 #define CX86_CCR6 0xea #define CX86_CCR7 0xeb #define CX86_PCR1 0xf0 #define CX86_DIR0 0xfe #define CX86_DIR1 0xff #define CX86_ARR_BASE 0xc4 #define CX86_RCR_BASE 0xdc /* * NSC/Cyrix CPU indexed register access macros */ #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); }) #define setCx86(reg, data) do { \ outb((reg), 0x22); \ outb((data), 0x23); \ } while (0) /* Stop speculative execution */ static inline void sync_core(void) { int tmp; asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); } static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { /* "monitor %eax,%ecx,%edx;" */ asm volatile( ".byte 0x0f,0x01,0xc8;" : :"a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax,%ecx;" */ asm volatile( ".byte 0x0f,0x01,0xc9;" : :"a" (eax), "c" (ecx)); } /* from system description table in BIOS. Mostly for MCA use, but others may find it useful. */ extern unsigned int machine_id; extern unsigned int machine_submodel_id; extern unsigned int BIOS_revision; extern unsigned int mca_pentium_flag; /* Boot loader type from the setup header */ extern int bootloader_type; /* * User space process size: 3GB (default). */ #define TASK_SIZE (PAGE_OFFSET) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define TASK_UNMAPPED_BASE (current->map_base) #define __TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3) #define HAVE_ARCH_PICK_MMAP_LAYOUT /* * Size of io_bitmap. */ #define IO_BITMAP_BITS 65536 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) #ifndef CONFIG_X86_NO_TSS #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) #endif #define INVALID_IO_BITMAP_OFFSET 0x8000 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 struct i387_fsave_struct { long cwd; long swd; long twd; long fip; long fcs; long foo; long fos; long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ long status; /* software status information */ }; struct i387_fxsave_struct { unsigned short cwd; unsigned short swd; unsigned short twd; unsigned short fop; long fip; long fcs; long foo; long fos; long mxcsr; long mxcsr_mask; long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ long padding[56]; } __attribute__ ((aligned (16))); struct i387_soft_struct { long cwd; long swd; long twd; long fip; long fcs; long foo; long fos; long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ unsigned char ftop, changed, lookahead, no_update, rm, alimit; struct info *info; unsigned long entry_eip; }; union i387_union { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; }; typedef struct { unsigned long seg; } mm_segment_t; struct thread_struct; #ifndef CONFIG_X86_NO_TSS struct tss_struct { unsigned short back_link,__blh; unsigned long esp0; unsigned short ss0,__ss0h; unsigned long esp1; unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */ unsigned long esp2; unsigned short ss2,__ss2h; unsigned long __cr3; unsigned long eip; unsigned long eflags; unsigned long eax,ecx,edx,ebx; unsigned long esp; unsigned long ebp; unsigned long esi; unsigned long edi; unsigned short es, __esh; unsigned short cs, __csh; unsigned short ss, __ssh; unsigned short ds, __dsh; unsigned short fs, __fsh; unsigned short gs, __gsh; unsigned short ldt, __ldth; unsigned short trace, io_bitmap_base; /* * The extra 1 is there because the CPU will access an * additional byte beyond the end of the IO permission * bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; /* * Cache the current maximum and the last task that used the bitmap: */ unsigned long io_bitmap_max; struct thread_struct *io_bitmap_owner; /* * pads the TSS to be cacheline-aligned (size is 0x100) */ unsigned long __cacheline_filler[35]; /* * .. and then another 0x100 bytes for emergency kernel stack */ unsigned long stack[64]; } __attribute__((packed)); #endif #define ARCH_MIN_TASKALIGN 16 struct thread_struct { /* cached TLS descriptors. */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; unsigned long esp0; unsigned long sysenter_cs; unsigned long eip; unsigned long esp; unsigned long fs; unsigned long gs; /* Hardware debugging registers */ unsigned long debugreg[8]; /* %%db0-7 debug registers */ /* fault info */ unsigned long cr2, trap_no, error_code; /* floating point info */ union i387_union i387; /* virtual 86 mode info */ struct vm86_struct __user * vm86_info; unsigned long screen_bitmap; unsigned long v86flags, v86mask, saved_esp0; unsigned int saved_fs, saved_gs; /* IO permissions */ unsigned long *io_bitmap_ptr; unsigned long iopl; /* max allowed port in the bitmap, in bytes: */ unsigned long io_bitmap_max; }; #define INIT_THREAD { \ .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ } #ifndef CONFIG_X86_NO_TSS /* * Note that the .io_bitmap member must be extra-big. This is because * the CPU will access an additional byte beyond the end of the IO * permission bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ #define INIT_TSS { \ .esp0 = sizeof(init_stack) + (long)&init_stack, \ .ss0 = __KERNEL_DS, \ .ss1 = __KERNEL_CS, \ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ } static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread) { tss->esp0 = thread->esp0; /* This can only happen when SEP is enabled, no need to test "SEP"arately */ if (unlikely(tss->ss1 != thread->sysenter_cs)) { tss->ss1 = thread->sysenter_cs; wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); } } #define load_esp0(tss, thread) \ __load_esp0(tss, thread) #else #define load_esp0(tss, thread) do { \ if (HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)) \ BUG(); \ } while (0) #endif #define start_thread(regs, new_eip, new_esp) do { \ __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \ set_fs(USER_DS); \ regs->xds = __USER_DS; \ regs->xes = __USER_DS; \ regs->xss = __USER_DS; \ regs->xcs = __USER_CS; \ regs->eip = new_eip; \ regs->esp = new_esp; \ } while (0) /* * These special macros can be used to get or set a debugging register */ #define get_debugreg(var, register) \ (var) = HYPERVISOR_get_debugreg((register)) #define set_debugreg(value, register) \ WARN_ON(HYPERVISOR_set_debugreg((register), (value))) /* * Set IOPL bits in EFLAGS from given mask */ static inline void set_iopl_mask(unsigned mask) { struct physdev_set_iopl set_iopl; /* Force the change at ring 0. */ set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3; WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl)); } /* Forward declaration, a strange C thing */ struct task_struct; struct mm_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); /* Prepare to copy thread state - unlazy all lazy status */ extern void prepare_to_copy(struct task_struct *tsk); /* * create a kernel thread without removing it from tasklists */ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern unsigned long thread_saved_pc(struct task_struct *tsk); void show_trace(struct task_struct *task, unsigned long *stack); unsigned long get_wchan(struct task_struct *p); #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) #define KSTK_TOP(info) \ ({ \ unsigned long *__ptr = (unsigned long *)(info); \ (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ }) /* * The below -8 is to reserve 8 bytes on top of the ring0 stack. * This is necessary to guarantee that the entire "struct pt_regs" * is accessable even if the CPU haven't stored the SS/ESP registers * on the stack (interrupt gate does not save these registers * when switching to the same priv ring). * Therefore beware: accessing the xss/esp fields of the * "struct pt_regs" is possible, but they may contain the * completely wrong values. */ #define task_pt_regs(task) \ ({ \ struct pt_regs *__regs__; \ __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ __regs__ - 1; \ }) #define KSTK_EIP(task) (task_pt_regs(task)->eip) #define KSTK_ESP(task) (task_pt_regs(task)->esp) struct microcode_header { unsigned int hdrver; unsigned int rev; unsigned int date; unsigned int sig; unsigned int cksum; unsigned int ldrver; unsigned int pf; unsigned int datasize; unsigned int totalsize; unsigned int reserved[3]; }; struct microcode { struct microcode_header hdr; unsigned int bits[0]; }; typedef struct microcode microcode_t; typedef struct microcode_header microcode_header_t; /* microcode format is extended from prescott processors */ struct extended_signature { unsigned int sig; unsigned int pf; unsigned int cksum; }; struct extended_sigtable { unsigned int count; unsigned int cksum; unsigned int reserved[3]; struct extended_signature sigs[0]; }; /* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ #define MICROCODE_IOCFREE _IO('6',0) /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) { __asm__ __volatile__("rep;nop": : :"memory"); } #define cpu_relax() rep_nop() /* generic versions from gas */ #define GENERIC_NOP1 ".byte 0x90\n" #define GENERIC_NOP2 ".byte 0x89,0xf6\n" #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 /* Opteron nops */ #define K8_NOP1 GENERIC_NOP1 #define K8_NOP2 ".byte 0x66,0x90\n" #define K8_NOP3 ".byte 0x66,0x66,0x90\n" #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" #define K8_NOP5 K8_NOP3 K8_NOP2 #define K8_NOP6 K8_NOP3 K8_NOP3 #define K8_NOP7 K8_NOP4 K8_NOP3 #define K8_NOP8 K8_NOP4 K8_NOP4 /* K7 nops */ /* uses eax dependencies (arbitary choice) */ #define K7_NOP1 GENERIC_NOP1 #define K7_NOP2 ".byte 0x8b,0xc0\n" #define K7_NOP3 ".byte 0x8d,0x04,0x20\n" #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" #define K7_NOP5 K7_NOP4 ASM_NOP1 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" #define K7_NOP8 K7_NOP7 ASM_NOP1 #ifdef CONFIG_MK8 #define ASM_NOP1 K8_NOP1 #define ASM_NOP2 K8_NOP2 #define ASM_NOP3 K8_NOP3 #define ASM_NOP4 K8_NOP4 #define ASM_NOP5 K8_NOP5 #define ASM_NOP6 K8_NOP6 #define ASM_NOP7 K8_NOP7 #define ASM_NOP8 K8_NOP8 #elif defined(CONFIG_MK7) #define ASM_NOP1 K7_NOP1 #define ASM_NOP2 K7_NOP2 #define ASM_NOP3 K7_NOP3 #define ASM_NOP4 K7_NOP4 #define ASM_NOP5 K7_NOP5 #define ASM_NOP6 K7_NOP6 #define ASM_NOP7 K7_NOP7 #define ASM_NOP8 K7_NOP8 #else #define ASM_NOP1 GENERIC_NOP1 #define ASM_NOP2 GENERIC_NOP2 #define ASM_NOP3 GENERIC_NOP3 #define ASM_NOP4 GENERIC_NOP4 #define ASM_NOP5 GENERIC_NOP5 #define ASM_NOP6 GENERIC_NOP6 #define ASM_NOP7 GENERIC_NOP7 #define ASM_NOP8 GENERIC_NOP8 #endif #define ASM_NOP_MAX 8 /* Prefetch instructions for Pentium III and AMD Athlon */ /* It's not worth to care about 3dnow! prefetches for the K6 because they are microcoded there and very slow. However we don't do prefetches for pre XP Athlons currently That should be fixed. */ #define ARCH_HAS_PREFETCH static inline void prefetch(const void *x) { alternative_input(ASM_NOP4, "prefetchnta (%1)", X86_FEATURE_XMM, "r" (x)); } #define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCHW #define ARCH_HAS_SPINLOCK_PREFETCH /* 3dnow! prefetch to get an exclusive cache line. Useful for spinlocks to avoid one state transition in the cache coherency protocol. */ static inline void prefetchw(const void *x) { alternative_input(ASM_NOP4, "prefetchw (%1)", X86_FEATURE_3DNOW, "r" (x)); } #define spin_lock_prefetch(x) prefetchw(x) extern void select_idle_routine(const struct cpuinfo_x86 *c); #define cache_line_size() (boot_cpu_data.x86_cache_alignment) extern unsigned long boot_option_idle_override; extern void enable_sep_cpu(void); extern int sysenter_setup(void); #ifdef CONFIG_MTRR extern void mtrr_ap_init(void); extern void mtrr_bp_init(void); #else #define mtrr_ap_init() do {} while (0) #define mtrr_bp_init() do {} while (0) #endif #ifdef CONFIG_X86_MCE extern void mcheck_init(struct cpuinfo_x86 *c); #else #define mcheck_init(c) do {} while(0) #endif #endif /* __ASM_I386_PROCESSOR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/ptrace.h000066400000000000000000000036521314037446600265750ustar00rootroot00000000000000#ifndef _I386_PTRACE_H #define _I386_PTRACE_H #define EBX 0 #define ECX 1 #define EDX 2 #define ESI 3 #define EDI 4 #define EBP 5 #define EAX 6 #define DS 7 #define ES 8 #define FS 9 #define GS 10 #define ORIG_EAX 11 #define EIP 12 #define CS 13 #define EFL 14 #define UESP 15 #define SS 16 #define FRAME_SIZE 17 /* this struct defines the way the registers are stored on the stack during a system call. */ struct pt_regs { long ebx; long ecx; long edx; long esi; long edi; long ebp; long eax; int xds; int xes; long orig_eax; long eip; int xcs; long eflags; long esp; int xss; }; /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ #define PTRACE_GETREGS 12 #define PTRACE_SETREGS 13 #define PTRACE_GETFPREGS 14 #define PTRACE_SETFPREGS 15 #define PTRACE_GETFPXREGS 18 #define PTRACE_SETFPXREGS 19 #define PTRACE_OLDSETOPTIONS 21 #define PTRACE_GET_THREAD_AREA 25 #define PTRACE_SET_THREAD_AREA 26 #define PTRACE_SYSEMU 31 #define PTRACE_SYSEMU_SINGLESTEP 32 #ifdef __KERNEL__ #include struct task_struct; extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); /* * user_mode_vm(regs) determines whether a register set came from user mode. * This is true if V8086 mode was enabled OR if the register set was from * protected mode with RPL-3 CS value. This tricky test checks that with * one comparison. Many places in the kernel can bypass this full check * if they have already ruled out V8086 mode, so user_mode(regs) can be used. */ static inline int user_mode(struct pt_regs *regs) { return (regs->xcs & 2) != 0; } static inline int user_mode_vm(struct pt_regs *regs) { return ((regs->xcs & 2) | (regs->eflags & VM_MASK)) != 0; } #define instruction_pointer(regs) ((regs)->eip) extern unsigned long profile_pc(struct pt_regs *regs); #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/scatterlist.h000066400000000000000000000011301314037446600276450ustar00rootroot00000000000000#ifndef _I386_SCATTERLIST_H #define _I386_SCATTERLIST_H struct scatterlist { struct page *page; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; }; /* These macros should be used after a pci_map_sg call has been done * to get bus addresses of each of the SG entries and their lengths. * You should only work with the number of sg entries pci_map_sg * returns. */ #define sg_dma_address(sg) ((sg)->dma_address) #define sg_dma_len(sg) ((sg)->dma_length) #define ISA_DMA_THRESHOLD (0x00ffffff) #endif /* !(_I386_SCATTERLIST_H) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/segment.h000066400000000000000000000065351314037446600267640ustar00rootroot00000000000000#ifndef _ASM_SEGMENT_H #define _ASM_SEGMENT_H /* * The layout of the per-CPU GDT under Linux: * * 0 - null * 1 - reserved * 2 - reserved * 3 - reserved * * 4 - unused <==== new cacheline * 5 - unused * * ------- start of TLS (Thread-Local Storage) segments: * * 6 - TLS segment #1 [ glibc's TLS segment ] * 7 - TLS segment #2 [ Wine's %fs Win32 segment ] * 8 - TLS segment #3 * 9 - reserved * 10 - reserved * 11 - reserved * * ------- start of kernel segments: * * 12 - kernel code segment <==== new cacheline * 13 - kernel data segment * 14 - default user CS * 15 - default user DS * 16 - TSS * 17 - LDT * 18 - PNPBIOS support (16->32 gate) * 19 - PNPBIOS support * 20 - PNPBIOS support * 21 - PNPBIOS support * 22 - PNPBIOS support * 23 - APM BIOS support * 24 - APM BIOS support * 25 - APM BIOS support * * 26 - ESPFIX small SS * 27 - unused * 28 - unused * 29 - unused * 30 - unused * 31 - TSS for double fault handler */ #define GDT_ENTRY_TLS_ENTRIES 3 #define GDT_ENTRY_TLS_MIN 6 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) #define GDT_ENTRY_DEFAULT_USER_CS 14 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3) #define GDT_ENTRY_DEFAULT_USER_DS 15 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3) #define GDT_ENTRY_KERNEL_BASE 12 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) #define GET_KERNEL_CS() (__KERNEL_CS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) ) #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) #define GET_KERNEL_DS() (__KERNEL_DS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) ) #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) #define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) #define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6) #define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11) #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) #define GDT_ENTRY_DOUBLEFAULT_TSS 31 /* * The GDT has 32 entries */ #define GDT_ENTRIES 32 #define GDT_SIZE (GDT_ENTRIES * 8) /* Simple and small GDT entries for booting only */ #define GDT_ENTRY_BOOT_CS 2 #define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) #define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) #define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) /* The PnP BIOS entries in the GDT */ #define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) #define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) #define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) #define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) #define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) /* The PnP BIOS selectors */ #define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ #define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ #define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ #define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ #define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ /* * The interrupt descriptor table has room for 256 idt's, * the global descriptor table is dependent on the number * of tasks we can have.. */ #define IDT_ENTRIES 256 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/setup.h000066400000000000000000000047661314037446600264660ustar00rootroot00000000000000/* * Just a place holder. We don't want to have to test x86 before * we include stuff */ #ifndef _i386_SETUP_H #define _i386_SETUP_H #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) #define PFN_PHYS(x) ((unsigned long long)(x) << PAGE_SHIFT) /* * Reserved space for vmalloc and iomap - defined in asm/page.h */ #define MAXMEM_PFN PFN_DOWN(MAXMEM) #define MAX_NONPAE_PFN (1 << 20) #define PARAM_SIZE 4096 #define COMMAND_LINE_SIZE 2048 #define OLD_CL_MAGIC_ADDR 0x90020 #define OLD_CL_MAGIC 0xA33F #define OLD_CL_BASE_ADDR 0x90000 #define OLD_CL_OFFSET 0x90022 #define NEW_CL_POINTER 0x228 /* Relative to real mode data */ #ifndef __ASSEMBLY__ /* * This is set up by the setup-routine at boot-time */ extern unsigned char boot_params[PARAM_SIZE]; #define PARAM (boot_params) #define SCREEN_INFO (*(struct screen_info *) (PARAM+0)) #define EXT_MEM_K (*(unsigned short *) (PARAM+2)) #define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0)) #define E820_MAP_NR (*(char*) (PARAM+E820NR)) #define E820_MAP ((struct e820entry *) (PARAM+E820MAP)) #define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40)) #define IST_INFO (*(struct ist_info *) (PARAM+0x60)) #define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80)) #define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0)) #define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4))) #define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8))) #define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc))) #define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0))) #define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4))) #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2)) #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8)) #define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA)) #define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC)) #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF)) #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210)) #define KERNEL_START (*(unsigned long *) (PARAM+0x214)) #define INITRD_START (__pa(xen_start_info->mod_start)) #define INITRD_SIZE (xen_start_info->mod_len) #define EDID_INFO (*(struct edid_info *) (PARAM+0x440)) #define EDD_NR (*(unsigned char *) (PARAM+EDDNR)) #define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF)) #define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF)) #define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF)) #endif /* __ASSEMBLY__ */ #endif /* _i386_SETUP_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/smp.h000066400000000000000000000045211314037446600261120ustar00rootroot00000000000000#ifndef __ASM_SMP_H #define __ASM_SMP_H /* * We need the APIC definitions automatically as part of 'smp.h' */ #ifndef __ASSEMBLY__ #include #include #include #endif #ifdef CONFIG_X86_LOCAL_APIC #ifndef __ASSEMBLY__ #include #include #include #ifdef CONFIG_X86_IO_APIC #include #endif #include #endif #endif #define BAD_APICID 0xFFu #ifdef CONFIG_SMP #ifndef __ASSEMBLY__ /* * Private routines/data */ extern void smp_alloc_memory(void); extern int pic_mode; extern int smp_num_siblings; extern cpumask_t cpu_sibling_map[]; extern cpumask_t cpu_core_map[]; extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); #define MAX_APICID 256 extern u8 x86_cpu_to_apicid[]; #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] #ifdef CONFIG_HOTPLUG_CPU extern void cpu_exit_clear(void); extern void cpu_uninit(void); #endif /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. We map APIC_BASE very early in page_setup(), * so this is correct in the x86 case. */ #define raw_smp_processor_id() (current_thread_info()->cpu) extern cpumask_t cpu_possible_map; #define cpu_callin_map cpu_possible_map /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) { return cpus_weight(cpu_possible_map); } #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN) #ifdef APIC_DEFINITION extern int hard_smp_processor_id(void); #else #include static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); } #endif static __inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); } #endif extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); extern void prefill_possible_map(void); #endif /* !__ASSEMBLY__ */ #else /* CONFIG_SMP */ #define cpu_physical_id(cpu) boot_cpu_physical_apicid #define NO_PROC_ID 0xFF /* No processor magic marker */ #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/spinlock.h000066400000000000000000000111711314037446600271340ustar00rootroot00000000000000#ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H #include #include #include #include #include /* * Your basic SMP spinlocks, allowing only a single CPU anywhere * * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. * * We make no fairness assumptions. They have a cost. * * (the type definitions are in asm/spinlock_types.h) */ #define __raw_spin_is_locked(x) \ (*(volatile signed char *)(&(x)->slock) <= 0) #define __raw_spin_lock_string \ "\n1:\t" \ LOCK "decb %0\n\t" \ "jns 3f\n" \ "2:\t" \ "rep;nop\n\t" \ "cmpb $0,%0\n\t" \ "jle 2b\n\t" \ "jmp 1b\n" \ "3:\n\t" #define __raw_spin_lock_string_flags \ "\n1:\t" \ LOCK "decb %0\n\t" \ "jns 4f\n\t" \ "2:\t" \ "testl $0x200, %1\n\t" \ "jz 3f\n\t" \ "#sti\n\t" \ "3:\t" \ "rep;nop\n\t" \ "cmpb $0, %0\n\t" \ "jle 3b\n\t" \ "#cli\n\t" \ "jmp 1b\n" \ "4:\n\t" static inline void __raw_spin_lock(raw_spinlock_t *lock) { __asm__ __volatile__( __raw_spin_lock_string :"+m" (lock->slock) : : "memory"); } static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { __asm__ __volatile__( __raw_spin_lock_string_flags :"+m" (lock->slock) : "r" (flags) : "memory"); } static inline int __raw_spin_trylock(raw_spinlock_t *lock) { char oldval; #ifdef CONFIG_SMP_ALTERNATIVES __asm__ __volatile__( "1:movb %1,%b0\n" "movb $0,%1\n" "2:" ".section __smp_alternatives,\"a\"\n" ".long 1b\n" ".long 3f\n" ".previous\n" ".section __smp_replacements,\"a\"\n" "3: .byte 2b - 1b\n" ".byte 5f-4f\n" ".byte 0\n" ".byte 6f-5f\n" ".byte -1\n" "4: xchgb %b0,%1\n" "5: movb %1,%b0\n" "movb $0,%1\n" "6:\n" ".previous\n" :"=q" (oldval), "+m" (lock->slock) :"0" (0) : "memory"); #else __asm__ __volatile__( "xchgb %b0,%1" :"=q" (oldval), "+m" (lock->slock) :"0" (0) : "memory"); #endif return oldval > 0; } /* * __raw_spin_unlock based on writing $1 to the low byte. * This method works. Despite all the confusion. * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) * (PPro errata 66, 92) */ #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) #define __raw_spin_unlock_string \ "movb $1,%0" \ :"+m" (lock->slock) : : "memory" static inline void __raw_spin_unlock(raw_spinlock_t *lock) { __asm__ __volatile__( __raw_spin_unlock_string ); } #else #define __raw_spin_unlock_string \ "xchgb %b0, %1" \ :"=q" (oldval), "+m" (lock->slock) \ :"0" (oldval) : "memory" static inline void __raw_spin_unlock(raw_spinlock_t *lock) { char oldval = 1; __asm__ __volatile__( __raw_spin_unlock_string ); } #endif #define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) /* * Read-write spinlocks, allowing multiple readers * but only one writer. * * NOTE! it is quite common to have readers in interrupts * but no interrupt writers. For those circumstances we * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. * * On x86, we implement read-write locks as a 32-bit counter * with the high bit (sign) being the "contended" bit. * * The inline assembly is non-obvious. Think about it. * * Changed to use the same technique as rw semaphores. See * semaphore.h for details. -ben * * the helpers are in arch/i386/kernel/semaphore.c */ /** * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ #define __raw_read_can_lock(x) ((int)(x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) static inline void __raw_read_lock(raw_rwlock_t *rw) { __build_read_lock(rw, "__read_lock_failed"); } static inline void __raw_write_lock(raw_rwlock_t *rw) { __build_write_lock(rw, "__write_lock_failed"); } static inline int __raw_read_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; atomic_dec(count); if (atomic_read(count) >= 0) return 1; atomic_inc(count); return 0; } static inline int __raw_write_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) return 1; atomic_add(RW_LOCK_BIAS, count); return 0; } static inline void __raw_read_unlock(raw_rwlock_t *rw) { asm volatile(LOCK "incl %0" :"+m" (rw->lock) : : "memory"); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ", %0" : "+m" (rw->lock) : : "memory"); } #endif /* __ASM_SPINLOCK_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/swiotlb.h000066400000000000000000000031251314037446600267750ustar00rootroot00000000000000#ifndef _ASM_SWIOTLB_H #define _ASM_SWIOTLB_H 1 /* SWIOTLB interface */ extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir); extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir); extern void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir); extern void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir); extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, int dir); extern void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, int dir); extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction); extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction); extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); #ifdef CONFIG_HIGHMEM extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction); extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction); #endif extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); extern void swiotlb_init(void); #ifdef CONFIG_SWIOTLB extern int swiotlb; #else #define swiotlb 0 #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/synch_bitops.h000066400000000000000000000072471314037446600300270ustar00rootroot00000000000000#ifndef __XEN_SYNCH_BITOPS_H__ #define __XEN_SYNCH_BITOPS_H__ /* * Copyright 1992, Linus Torvalds. * Heavily modified to provide guaranteed strong synchronisation * when communicating with Xen or other guest OSes running on other CPUs. */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define ADDR (*(volatile long *) addr) static __inline__ void synch_set_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btsl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_clear_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btrl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_change_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btcl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btsl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btrl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btcl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } struct __synch_xchg_dummy { unsigned long a[100]; }; #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x)) #define synch_cmpxchg(ptr, old, new) \ ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ (unsigned long)(old), \ (unsigned long)(new), \ sizeof(*(ptr)))) static inline unsigned long __synch_cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__("lock; cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__("lock; cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #ifdef CONFIG_X86_64 case 4: __asm__ __volatile__("lock; cmpxchgl %k1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__("lock; cmpxchgq %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #else case 4: __asm__ __volatile__("lock; cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #endif } return old; } static __always_inline int synch_const_test_bit(int nr, const volatile void * addr) { return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; } static __inline__ int synch_var_test_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "btl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) ); return oldbit; } #define synch_test_bit(nr,addr) \ (__builtin_constant_p(nr) ? \ synch_const_test_bit((nr),(addr)) : \ synch_var_test_bit((nr),(addr))) #define synch_cmpxchg_subword synch_cmpxchg #endif /* __XEN_SYNCH_BITOPS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/system.h000066400000000000000000000500361314037446600266410ustar00rootroot00000000000000#ifndef __ASM_SYSTEM_H #define __ASM_SYSTEM_H #include #include #include #include #include #include #include #ifdef __KERNEL__ #define AT_VECTOR_SIZE_ARCH 2 struct task_struct; /* one of the stranger aspects of C forward declarations.. */ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); /* * Saving eflags is important. It switches not only IOPL between tasks, * it also protects other tasks from NT leaking through sysenter etc. */ #define switch_to(prev,next,last) do { \ unsigned long esi,edi; \ asm volatile("pushfl\n\t" /* Save flags */ \ "pushl %%ebp\n\t" \ "movl %%esp,%0\n\t" /* save ESP */ \ "movl %5,%%esp\n\t" /* restore ESP */ \ "movl $1f,%1\n\t" /* save EIP */ \ "pushl %6\n\t" /* restore EIP */ \ "jmp __switch_to\n" \ "1:\t" \ "popl %%ebp\n\t" \ "popfl" \ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ "=a" (last),"=S" (esi),"=D" (edi) \ :"m" (next->thread.esp),"m" (next->thread.eip), \ "2" (prev), "d" (next)); \ } while (0) #define _set_base(addr,base) do { unsigned long __pr; \ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ "rorl $16,%%edx\n\t" \ "movb %%dl,%2\n\t" \ "movb %%dh,%3" \ :"=&d" (__pr) \ :"m" (*((addr)+2)), \ "m" (*((addr)+4)), \ "m" (*((addr)+7)), \ "0" (base) \ ); } while(0) #define _set_limit(addr,limit) do { unsigned long __lr; \ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ "rorl $16,%%edx\n\t" \ "movb %2,%%dh\n\t" \ "andb $0xf0,%%dh\n\t" \ "orb %%dh,%%dl\n\t" \ "movb %%dl,%2" \ :"=&d" (__lr) \ :"m" (*(addr)), \ "m" (*((addr)+6)), \ "0" (limit) \ ); } while(0) #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) /* * Load a segment. Fall back on loading the zero * segment if something goes wrong.. */ #define loadsegment(seg,value) \ asm volatile("\n" \ "1:\t" \ "mov %0,%%" #seg "\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3:\t" \ "pushl $0\n\t" \ "popl %%" #seg "\n\t" \ "jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n\t" \ ".align 4\n\t" \ ".long 1b,3b\n" \ ".previous" \ : :"rm" (value)) /* * Save a segment register away */ #define savesegment(seg, value) \ asm volatile("mov %%" #seg ",%0":"=rm" (value)) #define read_cr0() ({ \ unsigned int __dummy; \ __asm__ __volatile__( \ "movl %%cr0,%0\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) #define write_cr0(x) \ __asm__ __volatile__("movl %0,%%cr0": :"r" (x)) #define read_cr2() (current_vcpu_info()->arch.cr2) #define write_cr2(x) \ __asm__ __volatile__("movl %0,%%cr2": :"r" (x)) #define read_cr3() ({ \ unsigned int __dummy; \ __asm__ ( \ "movl %%cr3,%0\n\t" \ :"=r" (__dummy)); \ __dummy = xen_cr3_to_pfn(__dummy); \ mfn_to_pfn(__dummy) << PAGE_SHIFT; \ }) #define write_cr3(x) ({ \ unsigned int __dummy = pfn_to_mfn((x) >> PAGE_SHIFT); \ __dummy = xen_pfn_to_cr3(__dummy); \ __asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy)); \ }) #define read_cr4() ({ \ unsigned int __dummy; \ __asm__( \ "movl %%cr4,%0\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) #define read_cr4_safe() ({ \ unsigned int __dummy; \ /* This could fault if %cr4 does not exist */ \ __asm__("1: movl %%cr4, %0 \n" \ "2: \n" \ ".section __ex_table,\"a\" \n" \ ".long 1b,2b \n" \ ".previous \n" \ : "=r" (__dummy): "0" (0)); \ __dummy; \ }) #define write_cr4(x) \ __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) /* * Clear and set 'TS' bit respectively */ #define clts() (HYPERVISOR_fpu_taskswitch(0)) #define stts() (HYPERVISOR_fpu_taskswitch(1)) #endif /* __KERNEL__ */ #define wbinvd() \ __asm__ __volatile__ ("wbinvd": : :"memory") static inline unsigned long get_limit(unsigned long segment) { unsigned long __limit; __asm__("lsll %1,%0" :"=r" (__limit):"r" (segment)); return __limit+1; } #define nop() __asm__ __volatile__ ("nop") #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) #define tas(ptr) (xchg((ptr),1)) struct __xchg_dummy { unsigned long a[100]; }; #define __xg(x) ((struct __xchg_dummy *)(x)) #ifdef CONFIG_X86_CMPXCHG64 /* * The semantics of XCHGCMP8B are a bit strange, this is why * there is a loop and the loading of %%eax and %%edx has to * be inside. This inlines well in most cases, the cached * cost is around ~38 cycles. (in the future we might want * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that * might have an implicit FPU-save as a cost, so it's not * clear which path to go.) * * cmpxchg8b must be used with the lock prefix here to allow * the instruction to be executed atomically, see page 3-102 * of the instruction set reference 24319102.pdf. We need * the reader side to see the coherent 64bit value. */ static inline void __set_64bit (unsigned long long * ptr, unsigned int low, unsigned int high) { __asm__ __volatile__ ( "\n1:\t" "movl (%0), %%eax\n\t" "movl 4(%0), %%edx\n\t" "lock cmpxchg8b (%0)\n\t" "jnz 1b" : /* no outputs */ : "D"(ptr), "b"(low), "c"(high) : "ax","dx","memory"); } static inline void __set_64bit_constant (unsigned long long *ptr, unsigned long long value) { __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); } #define ll_low(x) *(((unsigned int*)&(x))+0) #define ll_high(x) *(((unsigned int*)&(x))+1) static inline void __set_64bit_var (unsigned long long *ptr, unsigned long long value) { __set_64bit(ptr,ll_low(value), ll_high(value)); } #define set_64bit(ptr,value) \ (__builtin_constant_p(value) ? \ __set_64bit_constant(ptr, value) : \ __set_64bit_var(ptr, value) ) #define _set_64bit(ptr,value) \ (__builtin_constant_p(value) ? \ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ __set_64bit(ptr, ll_low(value), ll_high(value)) ) #endif /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note 2: xchg has side effect, so that attribute volatile is necessary, * but generally the primitive is invalid, *ptr is output argument. --ANK */ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { case 1: __asm__ __volatile__("xchgb %b0,%1" :"=q" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 2: __asm__ __volatile__("xchgw %w0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 4: __asm__ __volatile__("xchgl %0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; } return x; } /* * Atomic compare and exchange. Compare OLD with MEM, if identical, * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ #ifdef CONFIG_X86_CMPXCHG #define __HAVE_ARCH_CMPXCHG 1 #define cmpxchg(ptr,o,n)\ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ (unsigned long)(n),sizeof(*(ptr)))) #endif static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__(LOCK "cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__(LOCK "cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 4: __asm__ __volatile__(LOCK "cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; } return old; } #ifndef CONFIG_X86_CMPXCHG /* * Building a kernel capable running on 80386. It may be necessary to * simulate the cmpxchg on the 80386 CPU. For that purpose we define * a function for each of the sizes we support. */ extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, unsigned long new, int size) { switch (size) { case 1: return cmpxchg_386_u8(ptr, old, new); case 2: return cmpxchg_386_u16(ptr, old, new); case 4: return cmpxchg_386_u32(ptr, old, new); } return old; } #define cmpxchg(ptr,o,n) \ ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 3)) \ __ret = __cmpxchg((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr))); \ else \ __ret = cmpxchg_386((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr))); \ __ret; \ }) #endif #ifdef CONFIG_X86_CMPXCHG64 static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, unsigned long long new) { unsigned long long prev; __asm__ __volatile__(LOCK "cmpxchg8b %3" : "=A"(prev) : "b"((unsigned long)new), "c"((unsigned long)(new >> 32)), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; } #define cmpxchg64(ptr,o,n)\ ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\ (unsigned long long)(n))) #endif #ifdef __KERNEL__ struct alt_instr { __u8 *instr; /* original instruction */ __u8 *replacement; __u8 cpuid; /* cpuid bit set for replacement */ __u8 instrlen; /* length of original instruction */ __u8 replacementlen; /* length of new instruction, <= instrlen */ __u8 pad; }; #endif /* * Alternative instructions for different CPU types or capabilities. * * This allows to use optimized instructions even on generic binary * kernels. * * length of oldinstr must be longer or equal the length of newinstr * It can be padded with nops as needed. * * For non barrier like inlines please define new variants * without volatile and memory clobber. */ #define alternative(oldinstr, newinstr, feature) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ " .align 4\n" \ " .long 661b\n" /* label */ \ " .long 663f\n" /* new instruction */ \ " .byte %c0\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ ".section .altinstr_replacement,\"ax\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" :: "i" (feature) : "memory") /* * Alternative inline assembly with input. * * Pecularities: * No memory clobber here. * Argument numbers start with 1. * Best is to use constraints that are fixed size (like (%1) ... "r") * If you use variable sized constraints like "m" or "g" in the * replacement maake sure to pad to the worst case length. */ #define alternative_input(oldinstr, newinstr, feature, input...) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ " .align 4\n" \ " .long 661b\n" /* label */ \ " .long 663f\n" /* new instruction */ \ " .byte %c0\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ ".section .altinstr_replacement,\"ax\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" :: "i" (feature), ##input) /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking * to devices. * * For now, "wmb()" doesn't actually do anything, as all * Intel CPU's follow what Intel calls a *Processor Order*, * in which all writes are seen in the program order even * outside the CPU. * * I expect future Intel CPU's to have a weaker ordering, * but I'd also expect them to finally get their act together * and add some real memory barriers if so. * * Some non intel clones support out of order store. wmb() ceases to be a * nop for these. */ /* * Actually only lfence would be needed for mb() because all stores done * by the kernel should be already ordered. But keep a full barrier for now. */ #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) /** * read_barrier_depends - Flush all pending reads that subsequents reads * depend on. * * No data-dependent reads from memory-like regions are ever reordered * over this barrier. All reads preceding this primitive are guaranteed * to access memory (but not necessarily other CPUs' caches) before any * reads following this primitive that depend on the data return by * any of the preceding reads. This primitive is much lighter weight than * rmb() on most CPUs, and is never heavier weight than is * rmb(). * * These ordering constraints are respected by both the local CPU * and the compiler. * * Ordering is not guaranteed by anything other than these primitives, * not even by data dependencies. See the documentation for * memory_barrier() for examples and URLs to more information. * * For example, the following code would force ordering (the initial * value of "a" is zero, "b" is one, and "p" is "&a"): * * * CPU 0 CPU 1 * * b = 2; * memory_barrier(); * p = &b; q = p; * read_barrier_depends(); * d = *q; * * * because the read of "*q" depends on the read of "p" and these * two reads are separated by a read_barrier_depends(). However, * the following code, with the same initial values for "a" and "b": * * * CPU 0 CPU 1 * * a = 2; * memory_barrier(); * b = 3; y = b; * read_barrier_depends(); * x = a; * * * does not enforce ordering, since there is no data dependency between * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() * in cases like this where there are no data dependencies. **/ #define read_barrier_depends() do { } while(0) #ifdef CONFIG_X86_OOSTORE /* Actually there are no OOO store capable CPUs for now that do SSE, but make it already an possibility. */ #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) #else #define wmb() __asm__ __volatile__ ("": : :"memory") #endif #ifdef CONFIG_SMP #define smp_wmb() wmb() #if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE) #define smp_alt_mb(instr) \ __asm__ __volatile__("6667:\nnop\nnop\nnop\nnop\nnop\nnop\n6668:\n" \ ".section __smp_alternatives,\"a\"\n" \ ".long 6667b\n" \ ".long 6673f\n" \ ".previous\n" \ ".section __smp_replacements,\"a\"\n" \ "6673:.byte 6668b-6667b\n" \ ".byte 6670f-6669f\n" \ ".byte 6671f-6670f\n" \ ".byte 0\n" \ ".byte %c0\n" \ "6669:lock;addl $0,0(%%esp)\n" \ "6670:" instr "\n" \ "6671:\n" \ ".previous\n" \ : \ : "i" (X86_FEATURE_XMM2) \ : "memory") #define smp_rmb() smp_alt_mb("lfence") #define smp_mb() smp_alt_mb("mfence") #define set_mb(var, value) do { \ unsigned long __set_mb_temp; \ __asm__ __volatile__("6667:movl %1, %0\n6668:\n" \ ".section __smp_alternatives,\"a\"\n" \ ".long 6667b\n" \ ".long 6673f\n" \ ".previous\n" \ ".section __smp_replacements,\"a\"\n" \ "6673: .byte 6668b-6667b\n" \ ".byte 6670f-6669f\n" \ ".byte 0\n" \ ".byte 6671f-6670f\n" \ ".byte -1\n" \ "6669: xchg %1, %0\n" \ "6670:movl %1, %0\n" \ "6671:\n" \ ".previous\n" \ : "=m" (var), "=r" (__set_mb_temp) \ : "1" (value) \ : "memory"); } while (0) #else #define smp_rmb() rmb() #define smp_mb() mb() #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #endif #define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define smp_read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif #define set_wmb(var, value) do { var = value; wmb(); } while (0) /* interrupt control.. */ /* * The use of 'barrier' in the following reflects their use as local-lock * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following * critical operations are executed. All critical operations must complete * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also * includes these barriers, for example. */ #define __cli() \ do { \ preempt_disable(); \ current_vcpu_info()->evtchn_upcall_mask = 1; \ preempt_enable_no_resched(); \ barrier(); \ } while (0) #define __sti() \ do { \ vcpu_info_t *_vcpu; \ barrier(); \ preempt_disable(); \ _vcpu = current_vcpu_info(); \ _vcpu->evtchn_upcall_mask = 0; \ barrier(); /* unmask then check (avoid races) */ \ if (unlikely(_vcpu->evtchn_upcall_pending)) \ force_evtchn_callback(); \ preempt_enable(); \ } while (0) #define __save_flags(x) \ do { \ preempt_disable(); \ (x) = current_vcpu_info()->evtchn_upcall_mask; \ preempt_enable(); \ } while (0) #define __restore_flags(x) \ do { \ vcpu_info_t *_vcpu; \ barrier(); \ preempt_disable(); \ _vcpu = current_vcpu_info(); \ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ barrier(); /* unmask then check (avoid races) */ \ if (unlikely(_vcpu->evtchn_upcall_pending)) \ force_evtchn_callback(); \ preempt_enable(); \ } else \ preempt_enable_no_resched(); \ } while (0) void safe_halt(void); void halt(void); #define __save_and_cli(x) \ do { \ vcpu_info_t *_vcpu; \ preempt_disable(); \ _vcpu = current_vcpu_info(); \ (x) = _vcpu->evtchn_upcall_mask; \ _vcpu->evtchn_upcall_mask = 1; \ preempt_enable_no_resched(); \ barrier(); \ } while (0) #define local_irq_save(x) __save_and_cli(x) #define local_irq_restore(x) __restore_flags(x) #define local_save_flags(x) __save_flags(x) #define local_irq_disable() __cli() #define local_irq_enable() __sti() /* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */ #define irqs_disabled() \ ({ int ___x; \ preempt_disable(); \ ___x = (current_vcpu_info()->evtchn_upcall_mask != 0); \ preempt_enable_no_resched(); \ ___x; }) /* * disable hlt during certain critical i/o operations */ #define HAVE_DISABLE_HLT void disable_hlt(void); void enable_hlt(void); extern int es7000_plat; void cpu_idle_wait(void); /* * On SMP systems, when the scheduler does migration-cost autodetection, * it needs a way to flush as much of the CPU's caches as possible: */ static inline void sched_cacheflush(void) { wbinvd(); } extern unsigned long arch_align_stack(unsigned long sp); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/tlbflush.h000066400000000000000000000047751314037446600271510ustar00rootroot00000000000000#ifndef _I386_TLBFLUSH_H #define _I386_TLBFLUSH_H #include #include #define __flush_tlb() xen_tlb_flush() #define __flush_tlb_global() xen_tlb_flush() #define __flush_tlb_all() xen_tlb_flush() extern unsigned long pgkern_mask; #define cpu_has_invlpg (boot_cpu_data.x86 > 3) #define __flush_tlb_single(addr) xen_invlpg(addr) #define __flush_tlb_one(addr) __flush_tlb_single(addr) /* * TLB flushing: * * - flush_tlb() flushes the current mm struct TLBs * - flush_tlb_all() flushes all processes TLBs * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * * ..but the i386 has somewhat limited tlb flushing capabilities, * and page-granular flushes are available only on i486 and up. */ #ifndef CONFIG_SMP #define flush_tlb() __flush_tlb() #define flush_tlb_all() __flush_tlb_all() #define local_flush_tlb() __flush_tlb() static inline void flush_tlb_mm(struct mm_struct *mm) { if (mm == current->active_mm) __flush_tlb(); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { if (vma->vm_mm == current->active_mm) __flush_tlb_one(addr); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (vma->vm_mm == current->active_mm) __flush_tlb(); } #else #include #define local_flush_tlb() \ __flush_tlb() #define flush_tlb_all xen_tlb_flush_all #define flush_tlb_current_task() xen_tlb_flush_mask(¤t->mm->cpu_vm_mask) #define flush_tlb_mm(mm) xen_tlb_flush_mask(&(mm)->cpu_vm_mask) #define flush_tlb_page(vma, va) xen_invlpg_mask(&(vma)->vm_mm->cpu_vm_mask, va) #define flush_tlb() flush_tlb_current_task() static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) { flush_tlb_mm(vma->vm_mm); } #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 struct tlb_state { struct mm_struct *active_mm; int state; char __cacheline_padding[L1_CACHE_BYTES-8]; }; DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); #endif #define flush_tlb_kernel_range(start, end) flush_tlb_all() static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { /* i386 does not keep any page table caches in TLB */ } #endif /* _I386_TLBFLUSH_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/vga.h000066400000000000000000000005741314037446600260740ustar00rootroot00000000000000/* * Access to VGA videoram * * (c) 1998 Martin Mares */ #ifndef _LINUX_ASM_VGA_H_ #define _LINUX_ASM_VGA_H_ /* * On the PC, we can just recalculate addresses and then * access the videoram directly without any black magic. */ #define VGA_MAP_MEM(x) (unsigned long)isa_bus_to_virt(x) #define vga_readb(x) (*(x)) #define vga_writeb(x,y) (*(y) = (x)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/asm/xenoprof.h000066400000000000000000000034651314037446600271610ustar00rootroot00000000000000/****************************************************************************** * asm-i386/mach-xen/asm/xenoprof.h * * Copyright (c) 2006 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __ASM_XENOPROF_H__ #define __ASM_XENOPROF_H__ #ifdef CONFIG_XEN struct super_block; struct dentry; int xenoprof_create_files(struct super_block * sb, struct dentry * root); #define HAVE_XENOPROF_CREATE_FILES struct xenoprof_init; void xenoprof_arch_init_counter(struct xenoprof_init *init); void xenoprof_arch_counter(void); void xenoprof_arch_start(void); void xenoprof_arch_stop(void); struct xenoprof_arch_shared_buffer { /* nothing */ }; struct xenoprof_shared_buffer; void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf); struct xenoprof_get_buffer; int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer, struct xenoprof_shared_buffer* sbuf); struct xenoprof_passive; int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain, struct xenoprof_shared_buffer* sbuf); #endif /* CONFIG_XEN */ #endif /* __ASM_XENOPROF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/irq_vectors.h000066400000000000000000000067111314037446600270760ustar00rootroot00000000000000/* * This file should contain #defines for all of the interrupt vector * numbers used by this architecture. * * In addition, there are some standard defines: * * FIRST_EXTERNAL_VECTOR: * The first free place for external interrupts * * SYSCALL_VECTOR: * The IRQ vector a syscall makes the user to kernel transition * under. * * TIMER_IRQ: * The IRQ number the timer interrupt comes in at. * * NR_IRQS: * The total number of interrupt vectors (including all the * architecture specific interrupts) needed. * */ #ifndef _ASM_IRQ_VECTORS_H #define _ASM_IRQ_VECTORS_H /* * IDT vectors usable for external interrupt sources start * at 0x20: */ #define FIRST_EXTERNAL_VECTOR 0x20 #define SYSCALL_VECTOR 0x80 /* * Vectors 0x20-0x2f are used for ISA interrupts. */ #if 0 /* * Special IRQ vectors used by the SMP architecture, 0xf0-0xff * * some of the following vectors are 'rare', they are merged * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. * TLB, reschedule and local APIC vectors are performance-critical. * * Vectors 0xf0-0xfa are free (reserved for future Linux use). */ #define SPURIOUS_APIC_VECTOR 0xff #define ERROR_APIC_VECTOR 0xfe #define INVALIDATE_TLB_VECTOR 0xfd #define RESCHEDULE_VECTOR 0xfc #define CALL_FUNCTION_VECTOR 0xfb #define THERMAL_APIC_VECTOR 0xf0 /* * Local APIC timer IRQ vector is on a different priority level, * to work around the 'lost local interrupt if more than 2 IRQ * sources per level' errata. */ #define LOCAL_TIMER_VECTOR 0xef #endif #define SPURIOUS_APIC_VECTOR 0xff #define ERROR_APIC_VECTOR 0xfe /* * First APIC vector available to drivers: (vectors 0x30-0xee) * we start at 0x31 to spread out vectors evenly between priority * levels. (0x80 is the syscall vector) */ #define FIRST_DEVICE_VECTOR 0x31 #define FIRST_SYSTEM_VECTOR 0xef /* * 16 8259A IRQ's, 208 potential APIC interrupt sources. * Right now the APIC is mostly only used for SMP. * 256 vectors is an architectural limit. (we can have * more than 256 devices theoretically, but they will * have to use shared interrupts) * Since vectors 0x00-0x1f are used/reserved for the CPU, * the usable vector space is 0x20-0xff (224 vectors) */ #define RESCHEDULE_VECTOR 0 #define CALL_FUNCTION_VECTOR 1 #define NR_IPIS 2 /* * The maximum number of vectors supported by i386 processors * is limited to 256. For processors other than i386, NR_VECTORS * should be changed accordingly. */ #define NR_VECTORS 256 #define FPU_IRQ 13 #define FIRST_VM86_IRQ 3 #define LAST_VM86_IRQ 15 #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) /* * The flat IRQ space is divided into two regions: * 1. A one-to-one mapping of real physical IRQs. This space is only used * if we have physical device-access privilege. This region is at the * start of the IRQ space so that existing device drivers do not need * to be modified to translate physical IRQ numbers into our IRQ space. * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These * are bound using the provided bind/unbind functions. */ #define PIRQ_BASE 0 #define NR_PIRQS 256 #define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS) #define NR_DYNIRQS 256 #define NR_IRQS (NR_PIRQS + NR_DYNIRQS) #define NR_IRQ_VECTORS NR_IRQS #define pirq_to_irq(_x) ((_x) + PIRQ_BASE) #define irq_to_pirq(_x) ((_x) - PIRQ_BASE) #define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE) #define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE) #endif /* _ASM_IRQ_VECTORS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/mach_apic.h000066400000000000000000000014451314037446600264410ustar00rootroot00000000000000#ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H #include #include static inline cpumask_t target_cpus(void) { #ifdef CONFIG_SMP return cpu_online_map; #else return cpumask_of_cpu(0); #endif } #define TARGET_CPUS (target_cpus()) #define INT_DELIVERY_MODE dest_LowestPrio #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ static inline void clustered_apic_check(void) { } static inline int multi_timer_check(int apic, int irq) { return 0; } static inline int apicid_to_node(int logical_apicid) { return 0; } static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) { return cpus_addr(cpumask)[0]; } static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } #endif /* __ASM_MACH_APIC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/mach_traps.h000066400000000000000000000014221314037446600266510ustar00rootroot00000000000000/* * include/asm-xen/asm-i386/mach-xen/mach_traps.h * * Machine specific NMI handling for Xen */ #ifndef _MACH_TRAPS_H #define _MACH_TRAPS_H #include #include static inline void clear_mem_error(unsigned char reason) {} static inline void clear_io_check_error(unsigned char reason) {} static inline unsigned char get_nmi_reason(void) { shared_info_t *s = HYPERVISOR_shared_info; unsigned char reason = 0; /* construct a value which looks like it came from * port 0x61. */ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason)) reason |= 0x40; if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason)) reason |= 0x80; return reason; } static inline void reassert_nmi(void) {} #endif /* !_MACH_TRAPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/setup_arch_post.h000066400000000000000000000017551314037446600277430ustar00rootroot00000000000000/** * machine_specific_memory_setup - Hook for machine specific memory setup. * * Description: * This is included late in kernel/setup.c so that it can make * use of all of the static functions. **/ static char * __init machine_specific_memory_setup(void) { int rc; struct xen_memory_map memmap; /* * This is rather large for a stack variable but this early in * the boot process we know we have plenty slack space. */ struct e820entry map[E820MAX]; memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, map); rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); if ( rc == -ENOSYS ) { memmap.nr_entries = 1; map[0].addr = 0ULL; map[0].size = PFN_PHYS((unsigned long long)xen_start_info->nr_pages); /* 8MB slack (to balance backend allocations). */ map[0].size += 8ULL << 20; map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); sanitize_e820_map(map, (char *)&memmap.nr_entries); BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0); return "Xen"; } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mach-xen/setup_arch_pre.h000066400000000000000000000002141314037446600275310ustar00rootroot00000000000000/* Hook to call BIOS initialisation function */ #define ARCH_SETUP machine_specific_arch_setup(); void machine_specific_arch_setup(void); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/math_emu.h000066400000000000000000000013631314037446600246330ustar00rootroot00000000000000#ifndef _I386_MATH_EMU_H #define _I386_MATH_EMU_H #include int restore_i387_soft(void *s387, struct _fpstate __user *buf); int save_i387_soft(void *s387, struct _fpstate __user *buf); /* This structure matches the layout of the data saved to the stack following a device-not-present interrupt, part of it saved automatically by the 80386/80486. */ struct info { long ___orig_eip; long ___ebx; long ___ecx; long ___edx; long ___esi; long ___edi; long ___ebp; long ___eax; long ___ds; long ___es; long ___orig_eax; long ___eip; long ___cs; long ___eflags; long ___esp; long ___ss; long ___vm86_es; /* This and the following only in vm86 mode */ long ___vm86_ds; long ___vm86_fs; long ___vm86_gs; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mc146818rtc.h000066400000000000000000000051031314037446600246340ustar00rootroot00000000000000/* * Machine dependent access functions for RTC registers. */ #ifndef _ASM_MC146818RTC_H #define _ASM_MC146818RTC_H #include #include #include #ifndef RTC_PORT #define RTC_PORT(x) (0x70 + (x)) #define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ #endif #ifdef __HAVE_ARCH_CMPXCHG /* * This lock provides nmi access to the CMOS/RTC registers. It has some * special properties. It is owned by a CPU and stores the index register * currently being accessed (if owned). The idea here is that it works * like a normal lock (normally). However, in an NMI, the NMI code will * first check to see if its CPU owns the lock, meaning that the NMI * interrupted during the read/write of the device. If it does, it goes ahead * and performs the access and then restores the index register. If it does * not, it locks normally. * * Note that since we are working with NMIs, we need this lock even in * a non-SMP machine just to mark that the lock is owned. * * This only works with compare-and-swap. There is no other way to * atomically claim the lock and set the owner. */ #include extern volatile unsigned long cmos_lock; /* * All of these below must be called with interrupts off, preempt * disabled, etc. */ static inline void lock_cmos(unsigned char reg) { unsigned long new; new = ((smp_processor_id()+1) << 8) | reg; for (;;) { if (cmos_lock) continue; if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0) return; } } static inline void unlock_cmos(void) { cmos_lock = 0; } static inline int do_i_have_lock_cmos(void) { return (cmos_lock >> 8) == (smp_processor_id()+1); } static inline unsigned char current_lock_cmos_reg(void) { return cmos_lock & 0xff; } #define lock_cmos_prefix(reg) \ do { \ unsigned long cmos_flags; \ local_irq_save(cmos_flags); \ lock_cmos(reg) #define lock_cmos_suffix(reg) \ unlock_cmos(); \ local_irq_restore(cmos_flags); \ } while (0) #else #define lock_cmos_prefix(reg) do {} while (0) #define lock_cmos_suffix(reg) do {} while (0) #define lock_cmos(reg) #define unlock_cmos() #define do_i_have_lock_cmos() 0 #define current_lock_cmos_reg() 0 #endif /* * The yet supported machines all access the RTC index register via * an ISA port access but the way to access the date register differs ... */ #define CMOS_READ(addr) rtc_cmos_read(addr) #define CMOS_WRITE(val, addr) rtc_cmos_write(val, addr) unsigned char rtc_cmos_read(unsigned char addr); void rtc_cmos_write(unsigned char val, unsigned char addr); #define RTC_IRQ 8 #endif /* _ASM_MC146818RTC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mca.h000066400000000000000000000023221314037446600235700ustar00rootroot00000000000000/* -*- mode: c; c-basic-offset: 8 -*- */ /* Platform specific MCA defines */ #ifndef _ASM_MCA_H #define _ASM_MCA_H /* Maximal number of MCA slots - actually, some machines have less, but * they all have sufficient number of POS registers to cover 8. */ #define MCA_MAX_SLOT_NR 8 /* Most machines have only one MCA bus. The only multiple bus machines * I know have at most two */ #define MAX_MCA_BUSSES 2 #define MCA_PRIMARY_BUS 0 #define MCA_SECONDARY_BUS 1 /* Dummy slot numbers on primary MCA for integrated functions */ #define MCA_INTEGSCSI (MCA_MAX_SLOT_NR) #define MCA_INTEGVIDEO (MCA_MAX_SLOT_NR+1) #define MCA_MOTHERBOARD (MCA_MAX_SLOT_NR+2) /* Dummy POS values for integrated functions */ #define MCA_DUMMY_POS_START 0x10000 #define MCA_INTEGSCSI_POS (MCA_DUMMY_POS_START+1) #define MCA_INTEGVIDEO_POS (MCA_DUMMY_POS_START+2) #define MCA_MOTHERBOARD_POS (MCA_DUMMY_POS_START+3) /* MCA registers */ #define MCA_MOTHERBOARD_SETUP_REG 0x94 #define MCA_ADAPTER_SETUP_REG 0x96 #define MCA_POS_REG(n) (0x100+(n)) #define MCA_ENABLED 0x01 /* POS 2, set if adapter enabled */ /* Max number of adapters, including both slots and various integrated * things. */ #define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mca_dma.h000066400000000000000000000121571314037446600244200ustar00rootroot00000000000000#ifndef MCA_DMA_H #define MCA_DMA_H #include #include /* * Microchannel specific DMA stuff. DMA on an MCA machine is fairly similar to * standard PC dma, but it certainly has its quirks. DMA register addresses * are in a different place and there are some added functions. Most of this * should be pretty obvious on inspection. Note that the user must divide * count by 2 when using 16-bit dma; that is not handled by these functions. * * Ramen Noodles are yummy. * * 1998 Tymm Twillman */ /* * Registers that are used by the DMA controller; FN is the function register * (tell the controller what to do) and EXE is the execution register (how * to do it) */ #define MCA_DMA_REG_FN 0x18 #define MCA_DMA_REG_EXE 0x1A /* * Functions that the DMA controller can do */ #define MCA_DMA_FN_SET_IO 0x00 #define MCA_DMA_FN_SET_ADDR 0x20 #define MCA_DMA_FN_GET_ADDR 0x30 #define MCA_DMA_FN_SET_COUNT 0x40 #define MCA_DMA_FN_GET_COUNT 0x50 #define MCA_DMA_FN_GET_STATUS 0x60 #define MCA_DMA_FN_SET_MODE 0x70 #define MCA_DMA_FN_SET_ARBUS 0x80 #define MCA_DMA_FN_MASK 0x90 #define MCA_DMA_FN_RESET_MASK 0xA0 #define MCA_DMA_FN_MASTER_CLEAR 0xD0 /* * Modes (used by setting MCA_DMA_FN_MODE in the function register) * * Note that the MODE_READ is read from memory (write to device), and * MODE_WRITE is vice-versa. */ #define MCA_DMA_MODE_XFER 0x04 /* read by default */ #define MCA_DMA_MODE_READ 0x04 /* same as XFER */ #define MCA_DMA_MODE_WRITE 0x08 /* OR with MODE_XFER to use */ #define MCA_DMA_MODE_IO 0x01 /* DMA from IO register */ #define MCA_DMA_MODE_16 0x40 /* 16 bit xfers */ /** * mca_enable_dma - channel to enable DMA on * @dmanr: DMA channel * * Enable the MCA bus DMA on a channel. This can be called from * IRQ context. */ static __inline__ void mca_enable_dma(unsigned int dmanr) { outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN); } /** * mca_disble_dma - channel to disable DMA on * @dmanr: DMA channel * * Enable the MCA bus DMA on a channel. This can be called from * IRQ context. */ static __inline__ void mca_disable_dma(unsigned int dmanr) { outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN); } /** * mca_set_dma_addr - load a 24bit DMA address * @dmanr: DMA channel * @a: 24bit bus address * * Load the address register in the DMA controller. This has a 24bit * limitation (16Mb). */ static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a) { outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN); outb(a & 0xff, MCA_DMA_REG_EXE); outb((a >> 8) & 0xff, MCA_DMA_REG_EXE); outb((a >> 16) & 0xff, MCA_DMA_REG_EXE); } /** * mca_get_dma_addr - load a 24bit DMA address * @dmanr: DMA channel * * Read the address register in the DMA controller. This has a 24bit * limitation (16Mb). The return is a bus address. */ static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr) { unsigned int addr; outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN); addr = inb(MCA_DMA_REG_EXE); addr |= inb(MCA_DMA_REG_EXE) << 8; addr |= inb(MCA_DMA_REG_EXE) << 16; return addr; } /** * mca_set_dma_count - load a 16bit transfer count * @dmanr: DMA channel * @count: count * * Set the DMA count for this channel. This can be up to 64Kbytes. * Setting a count of zero will not do what you expect. */ static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count) { count--; /* transfers one more than count -- correct for this */ outb(MCA_DMA_FN_SET_COUNT | dmanr, MCA_DMA_REG_FN); outb(count & 0xff, MCA_DMA_REG_EXE); outb((count >> 8) & 0xff, MCA_DMA_REG_EXE); } /** * mca_get_dma_residue - get the remaining bytes to transfer * @dmanr: DMA channel * * This function returns the number of bytes left to transfer * on this DMA channel. */ static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr) { unsigned short count; outb(MCA_DMA_FN_GET_COUNT | dmanr, MCA_DMA_REG_FN); count = 1 + inb(MCA_DMA_REG_EXE); count += inb(MCA_DMA_REG_EXE) << 8; return count; } /** * mca_set_dma_io - set the port for an I/O transfer * @dmanr: DMA channel * @io_addr: an I/O port number * * Unlike the ISA bus DMA controllers the DMA on MCA bus can transfer * with an I/O port target. */ static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) { /* * DMA from a port address -- set the io address */ outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN); outb(io_addr & 0xff, MCA_DMA_REG_EXE); outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE); } /** * mca_set_dma_mode - set the DMA mode * @dmanr: DMA channel * @mode: mode to set * * The DMA controller supports several modes. The mode values you can * set are : * * %MCA_DMA_MODE_READ when reading from the DMA device. * * %MCA_DMA_MODE_WRITE to writing to the DMA device. * * %MCA_DMA_MODE_IO to do DMA to or from an I/O port. * * %MCA_DMA_MODE_16 to do 16bit transfers. * */ static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) { outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN); outb(mode, MCA_DMA_REG_EXE); } #endif /* MCA_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mman.h000066400000000000000000000011501314037446600237560ustar00rootroot00000000000000#ifndef __I386_MMAN_H__ #define __I386_MMAN_H__ #include #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ #define MAP_LOCKED 0x2000 /* pages are locked */ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ #endif /* __I386_MMAN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mmu.h000066400000000000000000000004551314037446600236330ustar00rootroot00000000000000#ifndef __i386_MMU_H #define __i386_MMU_H #include /* * The i386 doesn't have a mmu context, but * we put the segment information here. * * cpu_vm_mask is used to optimize ldt flushing. */ typedef struct { int size; struct semaphore sem; void *ldt; } mm_context_t; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mmu_context.h000066400000000000000000000033121314037446600253720ustar00rootroot00000000000000#ifndef __I386_SCHED_H #define __I386_SCHED_H #include #include #include #include #include /* * Used for LDT copy/destruction. */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm); void destroy_context(struct mm_struct *mm); static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #ifdef CONFIG_SMP unsigned cpu = smp_processor_id(); if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; #endif } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { int cpu = smp_processor_id(); if (likely(prev != next)) { /* stop flush ipis for the previous mm */ cpu_clear(cpu, prev->cpu_vm_mask); #ifdef CONFIG_SMP per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; per_cpu(cpu_tlbstate, cpu).active_mm = next; #endif cpu_set(cpu, next->cpu_vm_mask); /* Re-load page tables */ load_cr3(next->pgd); /* * load the LDT, if the LDT is different: */ if (unlikely(prev->context.ldt != next->context.ldt)) load_LDT_nolock(&next->context, cpu); } #ifdef CONFIG_SMP else { per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload %cr3. */ load_cr3(next->pgd); load_LDT_nolock(&next->context, cpu); } } #endif } #define deactivate_mm(tsk, mm) \ asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) #define activate_mm(prev, next) \ switch_mm((prev),(next),NULL) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mmx.h000066400000000000000000000004161314037446600236330ustar00rootroot00000000000000#ifndef _ASM_MMX_H #define _ASM_MMX_H /* * MMX 3Dnow! helper operations */ #include extern void *_mmx_memcpy(void *to, const void *from, size_t size); extern void mmx_clear_page(void *page); extern void mmx_copy_page(void *to, void *from); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mmzone.h000066400000000000000000000071661314037446600243500ustar00rootroot00000000000000/* * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002 * */ #ifndef _ASM_MMZONE_H_ #define _ASM_MMZONE_H_ #include #ifdef CONFIG_NUMA extern struct pglist_data *node_data[]; #define NODE_DATA(nid) (node_data[nid]) #ifdef CONFIG_X86_NUMAQ #include #else /* summit or generic arch */ #include #endif extern int get_memcfg_numa_flat(void ); /* * This allows any one NUMA architecture to be compiled * for, and still fall back to the flat function if it * fails. */ static inline void get_memcfg_numa(void) { #ifdef CONFIG_X86_NUMAQ if (get_memcfg_numaq()) return; #elif defined(CONFIG_ACPI_SRAT) if (get_memcfg_from_srat()) return; #endif get_memcfg_numa_flat(); } extern int early_pfn_to_nid(unsigned long pfn); #else /* !CONFIG_NUMA */ #define get_memcfg_numa get_memcfg_numa_flat #define get_zholes_size(n) (0) #endif /* CONFIG_NUMA */ #ifdef CONFIG_DISCONTIGMEM /* * generic node memory support, the following assumptions apply: * * 1) memory comes in 256Mb contigious chunks which are either present or not * 2) we will not have more than 64Gb in total * * for now assume that 64Gb is max amount of RAM for whole system * 64Gb / 4096bytes/page = 16777216 pages */ #define MAX_NR_PAGES 16777216 #define MAX_ELEMENTS 256 #define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS) extern s8 physnode_map[]; static inline int pfn_to_nid(unsigned long pfn) { #ifdef CONFIG_NUMA return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]); #else return 0; #endif } #define node_localnr(pfn, nid) ((pfn) - node_data[nid]->node_start_pfn) /* * Following are macros that each numa implmentation must define. */ #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) \ ({ \ pg_data_t *__pgdat = NODE_DATA(nid); \ __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ }) /* XXX: FIXME -- wli */ #define kern_addr_valid(kaddr) (0) #define pfn_to_page(pfn) \ ({ \ unsigned long __pfn = pfn; \ int __node = pfn_to_nid(__pfn); \ &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \ }) #define page_to_pfn(pg) \ ({ \ struct page *__page = pg; \ struct zone *__zone = page_zone(__page); \ (unsigned long)(__page - __zone->zone_mem_map) \ + __zone->zone_start_pfn; \ }) #ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ #define pfn_valid(pfn) ((pfn) < num_physpages) #else static inline int pfn_valid(int pfn) { int nid = pfn_to_nid(pfn); if (nid >= 0) return (pfn < node_end_pfn(nid)); return 0; } #endif /* CONFIG_X86_NUMAQ */ #endif /* CONFIG_DISCONTIGMEM */ #ifdef CONFIG_NEED_MULTIPLE_NODES /* * Following are macros that are specific to this numa platform. */ #define reserve_bootmem(addr, size) \ reserve_bootmem_node(NODE_DATA(0), (addr), (size)) #define alloc_bootmem(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) #define alloc_bootmem_pages(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) #define alloc_bootmem_node(ignore, x) \ __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_node(ignore, x) \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages_node(ignore, x) \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) #endif /* CONFIG_NEED_MULTIPLE_NODES */ #endif /* _ASM_MMZONE_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/module.h000066400000000000000000000041021314037446600243130ustar00rootroot00000000000000#ifndef _ASM_I386_MODULE_H #define _ASM_I386_MODULE_H #include /* x86 is simple */ struct mod_arch_specific { }; #define Elf_Shdr Elf32_Shdr #define Elf_Sym Elf32_Sym #define Elf_Ehdr Elf32_Ehdr #ifdef CONFIG_M386 #define MODULE_PROC_FAMILY "386 " #elif defined CONFIG_M486 #define MODULE_PROC_FAMILY "486 " #elif defined CONFIG_M586 #define MODULE_PROC_FAMILY "586 " #elif defined CONFIG_M586TSC #define MODULE_PROC_FAMILY "586TSC " #elif defined CONFIG_M586MMX #define MODULE_PROC_FAMILY "586MMX " #elif defined CONFIG_M686 #define MODULE_PROC_FAMILY "686 " #elif defined CONFIG_MPENTIUMII #define MODULE_PROC_FAMILY "PENTIUMII " #elif defined CONFIG_MPENTIUMIII #define MODULE_PROC_FAMILY "PENTIUMIII " #elif defined CONFIG_MPENTIUMM #define MODULE_PROC_FAMILY "PENTIUMM " #elif defined CONFIG_MPENTIUM4 #define MODULE_PROC_FAMILY "PENTIUM4 " #elif defined CONFIG_MK6 #define MODULE_PROC_FAMILY "K6 " #elif defined CONFIG_MK7 #define MODULE_PROC_FAMILY "K7 " #elif defined CONFIG_MK8 #define MODULE_PROC_FAMILY "K8 " #elif defined CONFIG_X86_ELAN #define MODULE_PROC_FAMILY "ELAN " #elif defined CONFIG_MCRUSOE #define MODULE_PROC_FAMILY "CRUSOE " #elif defined CONFIG_MEFFICEON #define MODULE_PROC_FAMILY "EFFICEON " #elif defined CONFIG_MWINCHIPC6 #define MODULE_PROC_FAMILY "WINCHIPC6 " #elif defined CONFIG_MWINCHIP2 #define MODULE_PROC_FAMILY "WINCHIP2 " #elif defined CONFIG_MWINCHIP3D #define MODULE_PROC_FAMILY "WINCHIP3D " #elif defined CONFIG_MCYRIXIII #define MODULE_PROC_FAMILY "CYRIXIII " #elif defined CONFIG_MVIAC3_2 #define MODULE_PROC_FAMILY "VIAC3-2 " #elif defined CONFIG_MGEODEGX1 #define MODULE_PROC_FAMILY "GEODEGX1 " #elif defined CONFIG_MGEODE_LX #define MODULE_PROC_FAMILY "GEODE " #else #error unknown processor family #endif #ifdef CONFIG_REGPARM #define MODULE_REGPARM "REGPARM " #else #define MODULE_REGPARM "" #endif #ifdef CONFIG_4KSTACKS #define MODULE_STACKSIZE "4KSTACKS " #else #define MODULE_STACKSIZE "" #endif #define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE \ MODULE_SUBARCH_VERMAGIC #endif /* _ASM_I386_MODULE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mpspec.h000066400000000000000000000060301314037446600243170ustar00rootroot00000000000000#ifndef __ASM_MPSPEC_H #define __ASM_MPSPEC_H #include #include #include extern int mp_bus_id_to_type [MAX_MP_BUSSES]; extern int mp_bus_id_to_node [MAX_MP_BUSSES]; extern int mp_bus_id_to_local [MAX_MP_BUSSES]; extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; extern unsigned int def_to_bigsmp; extern unsigned int boot_cpu_physical_apicid; extern int smp_found_config; extern void find_smp_config (void); extern void get_smp_config (void); extern int nr_ioapics; extern int apic_version [MAX_APICS]; extern int mp_bus_id_to_type [MAX_MP_BUSSES]; extern int mp_irq_entries; extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; extern int mpc_default_type; extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; extern unsigned long mp_lapic_addr; extern int pic_mode; extern int using_apic_timer; #ifdef CONFIG_ACPI extern void mp_register_lapic (u8 id, u8 enabled); extern void mp_register_lapic_address (u64 address); extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); extern void mp_config_acpi_legacy_irqs (void); extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low); #endif /* CONFIG_ACPI */ #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) struct physid_mask { unsigned long mask[PHYSID_ARRAY_SIZE]; }; typedef struct physid_mask physid_mask_t; #define physid_set(physid, map) set_bit(physid, (map).mask) #define physid_clear(physid, map) clear_bit(physid, (map).mask) #define physid_isset(physid, map) test_bit(physid, (map).mask) #define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) #define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) #define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) #define physids_clear(map) bitmap_zero((map).mask, MAX_APICS) #define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS) #define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) #define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) #define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) #define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) #define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) #define physids_coerce(map) ((map).mask[0]) #define physids_promote(physids) \ ({ \ physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ __physid_mask.mask[0] = physids; \ __physid_mask; \ }) #define physid_mask_of_physid(physid) \ ({ \ physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ physid_set(physid, __physid_mask); \ __physid_mask; \ }) #define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } #define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } extern physid_mask_t phys_cpu_present_map; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mpspec_def.h000066400000000000000000000106041314037446600251370ustar00rootroot00000000000000#ifndef __ASM_MPSPEC_DEF_H #define __ASM_MPSPEC_DEF_H /* * Structure definitions for SMP machines following the * Intel Multiprocessing Specification 1.1 and 1.4. */ /* * This tag identifies where the SMP configuration * information is. */ #define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') #define MAX_MPC_ENTRY 1024 #define MAX_APICS 256 struct intel_mp_floating { char mpf_signature[4]; /* "_MP_" */ unsigned long mpf_physptr; /* Configuration table address */ unsigned char mpf_length; /* Our length (paragraphs) */ unsigned char mpf_specification;/* Specification version */ unsigned char mpf_checksum; /* Checksum (makes sum 0) */ unsigned char mpf_feature1; /* Standard or configuration ? */ unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ unsigned char mpf_feature3; /* Unused (0) */ unsigned char mpf_feature4; /* Unused (0) */ unsigned char mpf_feature5; /* Unused (0) */ }; struct mp_config_table { char mpc_signature[4]; #define MPC_SIGNATURE "PCMP" unsigned short mpc_length; /* Size of table */ char mpc_spec; /* 0x01 */ char mpc_checksum; char mpc_oem[8]; char mpc_productid[12]; unsigned long mpc_oemptr; /* 0 if not present */ unsigned short mpc_oemsize; /* 0 if not present */ unsigned short mpc_oemcount; unsigned long mpc_lapic; /* APIC address */ unsigned long reserved; }; /* Followed by entries */ #define MP_PROCESSOR 0 #define MP_BUS 1 #define MP_IOAPIC 2 #define MP_INTSRC 3 #define MP_LINTSRC 4 #define MP_TRANSLATION 192 /* Used by IBM NUMA-Q to describe node locality */ struct mpc_config_processor { unsigned char mpc_type; unsigned char mpc_apicid; /* Local APIC number */ unsigned char mpc_apicver; /* Its versions */ unsigned char mpc_cpuflag; #define CPU_ENABLED 1 /* Processor is available */ #define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ unsigned long mpc_cpufeature; #define CPU_STEPPING_MASK 0x0F #define CPU_MODEL_MASK 0xF0 #define CPU_FAMILY_MASK 0xF00 unsigned long mpc_featureflag; /* CPUID feature value */ unsigned long mpc_reserved[2]; }; struct mpc_config_bus { unsigned char mpc_type; unsigned char mpc_busid; unsigned char mpc_bustype[6]; }; /* List of Bus Type string values, Intel MP Spec. */ #define BUSTYPE_EISA "EISA" #define BUSTYPE_ISA "ISA" #define BUSTYPE_INTERN "INTERN" /* Internal BUS */ #define BUSTYPE_MCA "MCA" #define BUSTYPE_VL "VL" /* Local bus */ #define BUSTYPE_PCI "PCI" #define BUSTYPE_PCMCIA "PCMCIA" #define BUSTYPE_CBUS "CBUS" #define BUSTYPE_CBUSII "CBUSII" #define BUSTYPE_FUTURE "FUTURE" #define BUSTYPE_MBI "MBI" #define BUSTYPE_MBII "MBII" #define BUSTYPE_MPI "MPI" #define BUSTYPE_MPSA "MPSA" #define BUSTYPE_NUBUS "NUBUS" #define BUSTYPE_TC "TC" #define BUSTYPE_VME "VME" #define BUSTYPE_XPRESS "XPRESS" #define BUSTYPE_NEC98 "NEC98" struct mpc_config_ioapic { unsigned char mpc_type; unsigned char mpc_apicid; unsigned char mpc_apicver; unsigned char mpc_flags; #define MPC_APIC_USABLE 0x01 unsigned long mpc_apicaddr; }; struct mpc_config_intsrc { unsigned char mpc_type; unsigned char mpc_irqtype; unsigned short mpc_irqflag; unsigned char mpc_srcbus; unsigned char mpc_srcbusirq; unsigned char mpc_dstapic; unsigned char mpc_dstirq; }; enum mp_irq_source_types { mp_INT = 0, mp_NMI = 1, mp_SMI = 2, mp_ExtINT = 3 }; #define MP_IRQDIR_DEFAULT 0 #define MP_IRQDIR_HIGH 1 #define MP_IRQDIR_LOW 3 struct mpc_config_lintsrc { unsigned char mpc_type; unsigned char mpc_irqtype; unsigned short mpc_irqflag; unsigned char mpc_srcbusid; unsigned char mpc_srcbusirq; unsigned char mpc_destapic; #define MP_APIC_ALL 0xFF unsigned char mpc_destapiclint; }; struct mp_config_oemtable { char oem_signature[4]; #define MPC_OEM_SIGNATURE "_OEM" unsigned short oem_length; /* Size of table */ char oem_rev; /* 0x01 */ char oem_checksum; char mpc_oem[8]; }; struct mpc_config_translation { unsigned char mpc_type; unsigned char trans_len; unsigned char trans_type; unsigned char trans_quad; unsigned char trans_global; unsigned char trans_local; unsigned short trans_reserved; }; /* * Default configurations * * 1 2 CPU ISA 82489DX * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining * 3 2 CPU EISA 82489DX * 4 2 CPU MCA 82489DX * 5 2 CPU ISA+PCI * 6 2 CPU EISA+PCI * 7 2 CPU MCA+PCI */ enum mp_bustype { MP_BUS_ISA = 1, MP_BUS_EISA, MP_BUS_PCI, MP_BUS_MCA, MP_BUS_NEC98 }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/msgbuf.h000066400000000000000000000017171314037446600243220ustar00rootroot00000000000000#ifndef _I386_MSGBUF_H #define _I386_MSGBUF_H /* * The msqid64_ds structure for i386 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * * Pad space is left for: * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ struct msqid64_ds { struct ipc64_perm msg_perm; __kernel_time_t msg_stime; /* last msgsnd time */ unsigned long __unused1; __kernel_time_t msg_rtime; /* last msgrcv time */ unsigned long __unused2; __kernel_time_t msg_ctime; /* last change time */ unsigned long __unused3; unsigned long msg_cbytes; /* current number of bytes on queue */ unsigned long msg_qnum; /* number of messages in queue */ unsigned long msg_qbytes; /* max number of bytes on queue */ __kernel_pid_t msg_lspid; /* pid of last msgsnd */ __kernel_pid_t msg_lrpid; /* last receive pid */ unsigned long __unused4; unsigned long __unused5; }; #endif /* _I386_MSGBUF_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/msi.h000066400000000000000000000004161314037446600236220ustar00rootroot00000000000000/* * Copyright (C) 2003-2004 Intel * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) */ #ifndef ASM_MSI_H #define ASM_MSI_H #include #include #define LAST_DEVICE_VECTOR 232 #define MSI_TARGET_CPU_SHIFT 12 #endif /* ASM_MSI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/msr.h000066400000000000000000000205561314037446600236420ustar00rootroot00000000000000#ifndef __ASM_MSR_H #define __ASM_MSR_H /* * Access to machine-specific registers (available on 586 and better only) * Note: the rd* operations modify the parameters directly (without using * pointer indirection), this allows gcc to optimize better */ #define rdmsr(msr,val1,val2) \ __asm__ __volatile__("rdmsr" \ : "=a" (val1), "=d" (val2) \ : "c" (msr)) #define wrmsr(msr,val1,val2) \ __asm__ __volatile__("wrmsr" \ : /* no outputs */ \ : "c" (msr), "a" (val1), "d" (val2)) #define rdmsrl(msr,val) do { \ unsigned long l__,h__; \ rdmsr (msr, l__, h__); \ val = l__; \ val |= ((u64)h__<<32); \ } while(0) static inline void wrmsrl (unsigned long msr, unsigned long long val) { unsigned long lo, hi; lo = (unsigned long) val; hi = val >> 32; wrmsr (msr, lo, hi); } /* wrmsr with exception handling */ #define wrmsr_safe(msr,a,b) ({ int ret__; \ asm volatile("2: wrmsr ; xorl %0,%0\n" \ "1:\n\t" \ ".section .fixup,\"ax\"\n\t" \ "3: movl %4,%0 ; jmp 1b\n\t" \ ".previous\n\t" \ ".section __ex_table,\"a\"\n" \ " .align 4\n\t" \ " .long 2b,3b\n\t" \ ".previous" \ : "=a" (ret__) \ : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\ ret__; }) /* rdmsr with exception handling */ #define rdmsr_safe(msr,a,b) ({ int ret__; \ asm volatile("2: rdmsr ; xorl %0,%0\n" \ "1:\n\t" \ ".section .fixup,\"ax\"\n\t" \ "3: movl %4,%0 ; jmp 1b\n\t" \ ".previous\n\t" \ ".section __ex_table,\"a\"\n" \ " .align 4\n\t" \ " .long 2b,3b\n\t" \ ".previous" \ : "=r" (ret__), "=a" (*(a)), "=d" (*(b)) \ : "c" (msr), "i" (-EFAULT));\ ret__; }) #define rdtsc(low,high) \ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) #define rdtscl(low) \ __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx") #define rdtscll(val) \ __asm__ __volatile__("rdtsc" : "=A" (val)) #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) #define rdpmc(counter,low,high) \ __asm__ __volatile__("rdpmc" \ : "=a" (low), "=d" (high) \ : "c" (counter)) /* symbolic names for some interesting MSRs */ /* Intel defined MSRs. */ #define MSR_IA32_P5_MC_ADDR 0 #define MSR_IA32_P5_MC_TYPE 1 #define MSR_IA32_PLATFORM_ID 0x17 #define MSR_IA32_EBL_CR_POWERON 0x2a #define MSR_IA32_APICBASE 0x1b #define MSR_IA32_APICBASE_BSP (1<<8) #define MSR_IA32_APICBASE_ENABLE (1<<11) #define MSR_IA32_APICBASE_BASE (0xfffff<<12) #define MSR_IA32_UCODE_WRITE 0x79 #define MSR_IA32_UCODE_REV 0x8b #define MSR_P6_PERFCTR0 0xc1 #define MSR_P6_PERFCTR1 0xc2 #define MSR_IA32_BBL_CR_CTL 0x119 #define MSR_IA32_SYSENTER_CS 0x174 #define MSR_IA32_SYSENTER_ESP 0x175 #define MSR_IA32_SYSENTER_EIP 0x176 #define MSR_IA32_MCG_CAP 0x179 #define MSR_IA32_MCG_STATUS 0x17a #define MSR_IA32_MCG_CTL 0x17b /* P4/Xeon+ specific */ #define MSR_IA32_MCG_EAX 0x180 #define MSR_IA32_MCG_EBX 0x181 #define MSR_IA32_MCG_ECX 0x182 #define MSR_IA32_MCG_EDX 0x183 #define MSR_IA32_MCG_ESI 0x184 #define MSR_IA32_MCG_EDI 0x185 #define MSR_IA32_MCG_EBP 0x186 #define MSR_IA32_MCG_ESP 0x187 #define MSR_IA32_MCG_EFLAGS 0x188 #define MSR_IA32_MCG_EIP 0x189 #define MSR_IA32_MCG_RESERVED 0x18A #define MSR_P6_EVNTSEL0 0x186 #define MSR_P6_EVNTSEL1 0x187 #define MSR_IA32_PERF_STATUS 0x198 #define MSR_IA32_PERF_CTL 0x199 #define MSR_IA32_MPERF 0xE7 #define MSR_IA32_APERF 0xE8 #define MSR_IA32_THERM_CONTROL 0x19a #define MSR_IA32_THERM_INTERRUPT 0x19b #define MSR_IA32_THERM_STATUS 0x19c #define MSR_IA32_MISC_ENABLE 0x1a0 #define MSR_IA32_DEBUGCTLMSR 0x1d9 #define MSR_IA32_LASTBRANCHFROMIP 0x1db #define MSR_IA32_LASTBRANCHTOIP 0x1dc #define MSR_IA32_LASTINTFROMIP 0x1dd #define MSR_IA32_LASTINTTOIP 0x1de #define MSR_IA32_MC0_CTL 0x400 #define MSR_IA32_MC0_STATUS 0x401 #define MSR_IA32_MC0_ADDR 0x402 #define MSR_IA32_MC0_MISC 0x403 /* Pentium IV performance counter MSRs */ #define MSR_P4_BPU_PERFCTR0 0x300 #define MSR_P4_BPU_PERFCTR1 0x301 #define MSR_P4_BPU_PERFCTR2 0x302 #define MSR_P4_BPU_PERFCTR3 0x303 #define MSR_P4_MS_PERFCTR0 0x304 #define MSR_P4_MS_PERFCTR1 0x305 #define MSR_P4_MS_PERFCTR2 0x306 #define MSR_P4_MS_PERFCTR3 0x307 #define MSR_P4_FLAME_PERFCTR0 0x308 #define MSR_P4_FLAME_PERFCTR1 0x309 #define MSR_P4_FLAME_PERFCTR2 0x30a #define MSR_P4_FLAME_PERFCTR3 0x30b #define MSR_P4_IQ_PERFCTR0 0x30c #define MSR_P4_IQ_PERFCTR1 0x30d #define MSR_P4_IQ_PERFCTR2 0x30e #define MSR_P4_IQ_PERFCTR3 0x30f #define MSR_P4_IQ_PERFCTR4 0x310 #define MSR_P4_IQ_PERFCTR5 0x311 #define MSR_P4_BPU_CCCR0 0x360 #define MSR_P4_BPU_CCCR1 0x361 #define MSR_P4_BPU_CCCR2 0x362 #define MSR_P4_BPU_CCCR3 0x363 #define MSR_P4_MS_CCCR0 0x364 #define MSR_P4_MS_CCCR1 0x365 #define MSR_P4_MS_CCCR2 0x366 #define MSR_P4_MS_CCCR3 0x367 #define MSR_P4_FLAME_CCCR0 0x368 #define MSR_P4_FLAME_CCCR1 0x369 #define MSR_P4_FLAME_CCCR2 0x36a #define MSR_P4_FLAME_CCCR3 0x36b #define MSR_P4_IQ_CCCR0 0x36c #define MSR_P4_IQ_CCCR1 0x36d #define MSR_P4_IQ_CCCR2 0x36e #define MSR_P4_IQ_CCCR3 0x36f #define MSR_P4_IQ_CCCR4 0x370 #define MSR_P4_IQ_CCCR5 0x371 #define MSR_P4_ALF_ESCR0 0x3ca #define MSR_P4_ALF_ESCR1 0x3cb #define MSR_P4_BPU_ESCR0 0x3b2 #define MSR_P4_BPU_ESCR1 0x3b3 #define MSR_P4_BSU_ESCR0 0x3a0 #define MSR_P4_BSU_ESCR1 0x3a1 #define MSR_P4_CRU_ESCR0 0x3b8 #define MSR_P4_CRU_ESCR1 0x3b9 #define MSR_P4_CRU_ESCR2 0x3cc #define MSR_P4_CRU_ESCR3 0x3cd #define MSR_P4_CRU_ESCR4 0x3e0 #define MSR_P4_CRU_ESCR5 0x3e1 #define MSR_P4_DAC_ESCR0 0x3a8 #define MSR_P4_DAC_ESCR1 0x3a9 #define MSR_P4_FIRM_ESCR0 0x3a4 #define MSR_P4_FIRM_ESCR1 0x3a5 #define MSR_P4_FLAME_ESCR0 0x3a6 #define MSR_P4_FLAME_ESCR1 0x3a7 #define MSR_P4_FSB_ESCR0 0x3a2 #define MSR_P4_FSB_ESCR1 0x3a3 #define MSR_P4_IQ_ESCR0 0x3ba #define MSR_P4_IQ_ESCR1 0x3bb #define MSR_P4_IS_ESCR0 0x3b4 #define MSR_P4_IS_ESCR1 0x3b5 #define MSR_P4_ITLB_ESCR0 0x3b6 #define MSR_P4_ITLB_ESCR1 0x3b7 #define MSR_P4_IX_ESCR0 0x3c8 #define MSR_P4_IX_ESCR1 0x3c9 #define MSR_P4_MOB_ESCR0 0x3aa #define MSR_P4_MOB_ESCR1 0x3ab #define MSR_P4_MS_ESCR0 0x3c0 #define MSR_P4_MS_ESCR1 0x3c1 #define MSR_P4_PMH_ESCR0 0x3ac #define MSR_P4_PMH_ESCR1 0x3ad #define MSR_P4_RAT_ESCR0 0x3bc #define MSR_P4_RAT_ESCR1 0x3bd #define MSR_P4_SAAT_ESCR0 0x3ae #define MSR_P4_SAAT_ESCR1 0x3af #define MSR_P4_SSU_ESCR0 0x3be #define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */ #define MSR_P4_TBPU_ESCR0 0x3c2 #define MSR_P4_TBPU_ESCR1 0x3c3 #define MSR_P4_TC_ESCR0 0x3c4 #define MSR_P4_TC_ESCR1 0x3c5 #define MSR_P4_U2L_ESCR0 0x3b0 #define MSR_P4_U2L_ESCR1 0x3b1 /* AMD Defined MSRs */ #define MSR_K6_EFER 0xC0000080 #define MSR_K6_STAR 0xC0000081 #define MSR_K6_WHCR 0xC0000082 #define MSR_K6_UWCCR 0xC0000085 #define MSR_K6_EPMR 0xC0000086 #define MSR_K6_PSOR 0xC0000087 #define MSR_K6_PFIR 0xC0000088 #define MSR_K7_EVNTSEL0 0xC0010000 #define MSR_K7_EVNTSEL1 0xC0010001 #define MSR_K7_EVNTSEL2 0xC0010002 #define MSR_K7_EVNTSEL3 0xC0010003 #define MSR_K7_PERFCTR0 0xC0010004 #define MSR_K7_PERFCTR1 0xC0010005 #define MSR_K7_PERFCTR2 0xC0010006 #define MSR_K7_PERFCTR3 0xC0010007 #define MSR_K7_HWCR 0xC0010015 #define MSR_K7_CLK_CTL 0xC001001b #define MSR_K7_FID_VID_CTL 0xC0010041 #define MSR_K7_FID_VID_STATUS 0xC0010042 /* extended feature register */ #define MSR_EFER 0xc0000080 /* EFER bits: */ /* Execute Disable enable */ #define _EFER_NX 11 #define EFER_NX (1<<_EFER_NX) /* Centaur-Hauls/IDT defined MSRs. */ #define MSR_IDT_FCR1 0x107 #define MSR_IDT_FCR2 0x108 #define MSR_IDT_FCR3 0x109 #define MSR_IDT_FCR4 0x10a #define MSR_IDT_MCR0 0x110 #define MSR_IDT_MCR1 0x111 #define MSR_IDT_MCR2 0x112 #define MSR_IDT_MCR3 0x113 #define MSR_IDT_MCR4 0x114 #define MSR_IDT_MCR5 0x115 #define MSR_IDT_MCR6 0x116 #define MSR_IDT_MCR7 0x117 #define MSR_IDT_MCR_CTRL 0x120 /* VIA Cyrix defined MSRs*/ #define MSR_VIA_FCR 0x1107 #define MSR_VIA_LONGHAUL 0x110a #define MSR_VIA_RNG 0x110b #define MSR_VIA_BCR2 0x1147 /* Transmeta defined MSRs */ #define MSR_TMTA_LONGRUN_CTRL 0x80868010 #define MSR_TMTA_LONGRUN_FLAGS 0x80868011 #define MSR_TMTA_LRTI_READOUT 0x80868018 #define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a #endif /* __ASM_MSR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mtrr.h000066400000000000000000000073341314037446600240240ustar00rootroot00000000000000/* Generic MTRR (Memory Type Range Register) ioctls. Copyright (C) 1997-1999 Richard Gooch This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. Richard Gooch may be reached by email at rgooch@atnf.csiro.au The postal address is: Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. */ #ifndef _LINUX_MTRR_H #define _LINUX_MTRR_H #include #include #define MTRR_IOCTL_BASE 'M' struct mtrr_sentry { unsigned long base; /* Base address */ unsigned int size; /* Size of region */ unsigned int type; /* Type of region */ }; struct mtrr_gentry { unsigned int regnum; /* Register number */ unsigned long base; /* Base address */ unsigned int size; /* Size of region */ unsigned int type; /* Type of region */ }; /* These are the various ioctls */ #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) #define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry) #define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry) #define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry) #define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry) #define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry) #define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry) #define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry) #define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry) /* These are the region types */ #define MTRR_TYPE_UNCACHABLE 0 #define MTRR_TYPE_WRCOMB 1 /*#define MTRR_TYPE_ 2*/ /*#define MTRR_TYPE_ 3*/ #define MTRR_TYPE_WRTHROUGH 4 #define MTRR_TYPE_WRPROT 5 #define MTRR_TYPE_WRBACK 6 #define MTRR_NUM_TYPES 7 #ifdef __KERNEL__ /* The following functions are for use by other drivers */ # ifdef CONFIG_MTRR extern int mtrr_add (unsigned long base, unsigned long size, unsigned int type, char increment); extern int mtrr_add_page (unsigned long base, unsigned long size, unsigned int type, char increment); extern int mtrr_del (int reg, unsigned long base, unsigned long size); extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); # else static __inline__ int mtrr_add (unsigned long base, unsigned long size, unsigned int type, char increment) { return -ENODEV; } static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, unsigned int type, char increment) { return -ENODEV; } static __inline__ int mtrr_del (int reg, unsigned long base, unsigned long size) { return -ENODEV; } static __inline__ int mtrr_del_page (int reg, unsigned long base, unsigned long size) { return -ENODEV; } static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} # endif #endif #endif /* _LINUX_MTRR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/mutex.h000066400000000000000000000100451314037446600241730ustar00rootroot00000000000000/* * Assembly implementation of the mutex fastpath, based on atomic * decrement/increment. * * started by Ingo Molnar: * * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar */ #ifndef _ASM_MUTEX_H #define _ASM_MUTEX_H /** * __mutex_fastpath_lock - try to take the lock by moving the count * from 1 to a 0 value * @count: pointer of type atomic_t * @fn: function to call if the original value was not 1 * * Change the count from 1 to a value lower than 1, and call if it * wasn't 1 originally. This function MUST leave the value lower than 1 * even when the "1" assertion wasn't true. */ #define __mutex_fastpath_lock(count, fail_fn) \ do { \ unsigned int dummy; \ \ typecheck(atomic_t *, count); \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ LOCK " decl (%%eax) \n" \ " js 2f \n" \ "1: \n" \ \ LOCK_SECTION_START("") \ "2: call "#fail_fn" \n" \ " jmp 1b \n" \ LOCK_SECTION_END \ \ :"=a" (dummy) \ : "a" (count) \ : "memory", "ecx", "edx"); \ } while (0) /** * __mutex_fastpath_lock_retval - try to take the lock by moving the count * from 1 to a 0 value * @count: pointer of type atomic_t * @fail_fn: function to call if the original value was not 1 * * Change the count from 1 to a value lower than 1, and call if it * wasn't 1 originally. This function returns 0 if the fastpath succeeds, * or anything the slow path function returns */ static inline int __mutex_fastpath_lock_retval(atomic_t *count, int fastcall (*fail_fn)(atomic_t *)) { if (unlikely(atomic_dec_return(count) < 0)) return fail_fn(count); else return 0; } /** * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1 * @count: pointer of type atomic_t * @fail_fn: function to call if the original value was not 0 * * try to promote the mutex from 0 to 1. if it wasn't 0, call . * In the failure case, this function is allowed to either set the value * to 1, or to set it to a value lower than 1. * * If the implementation sets it to a value of lower than 1, the * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs * to return 0 otherwise. */ #define __mutex_fastpath_unlock(count, fail_fn) \ do { \ unsigned int dummy; \ \ typecheck(atomic_t *, count); \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ LOCK " incl (%%eax) \n" \ " jle 2f \n" \ "1: \n" \ \ LOCK_SECTION_START("") \ "2: call "#fail_fn" \n" \ " jmp 1b \n" \ LOCK_SECTION_END \ \ :"=a" (dummy) \ : "a" (count) \ : "memory", "ecx", "edx"); \ } while (0) #define __mutex_slowpath_needs_to_unlock() 1 /** * __mutex_fastpath_trylock - try to acquire the mutex, without waiting * * @count: pointer of type atomic_t * @fail_fn: fallback function * * Change the count from 1 to a value lower than 1, and return 0 (failure) * if it wasn't 1 originally, or return 1 (success) otherwise. This function * MUST leave the value lower than 1 even when the "1" assertion wasn't true. * Additionally, if the value was < 0 originally, this function must not leave * it to 0 on failure. */ static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { /* * We have two variants here. The cmpxchg based one is the best one * because it never induce a false contention state. It is included * here because architectures using the inc/dec algorithms over the * xchg ones are much more likely to support cmpxchg natively. * * If not we fall back to the spinlock based variant - that is * just as efficient (and simpler) as a 'destructive' probing of * the mutex state would be. */ #ifdef __HAVE_ARCH_CMPXCHG if (likely(atomic_cmpxchg(count, 1, 0) == 1)) return 1; return 0; #else return fail_fn(count); #endif } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/namei.h000066400000000000000000000005641314037446600241270ustar00rootroot00000000000000/* $Id: namei.h,v 1.1 1996/12/13 14:48:21 jj Exp $ * linux/include/asm-i386/namei.h * * Included from linux/fs/namei.c */ #ifndef __I386_NAMEI_H #define __I386_NAMEI_H /* This dummy routine maybe changed to something useful * for /usr/gnemul/ emulation stuff. * Look at asm-sparc/namei.h for details. */ #define __emul_prefix() NULL #endif /* __I386_NAMEI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/nmi.h000066400000000000000000000007501314037446600236160ustar00rootroot00000000000000/* * linux/include/asm-i386/nmi.h */ #ifndef ASM_NMI_H #define ASM_NMI_H #include struct pt_regs; typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); /** * set_nmi_callback * * Set a handler for an NMI. Only one handler may be * set. Return 1 if the NMI was handled. */ void set_nmi_callback(nmi_callback_t callback); /** * unset_nmi_callback * * Remove the handler previously set. */ void unset_nmi_callback(void); #endif /* ASM_NMI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/node.h000066400000000000000000000011161314037446600237550ustar00rootroot00000000000000#ifndef _ASM_I386_NODE_H_ #define _ASM_I386_NODE_H_ #include #include #include #include #include struct i386_node { struct node node; }; extern struct i386_node node_devices[MAX_NUMNODES]; static inline int arch_register_node(int num){ int p_node; struct node *parent = NULL; if (!node_online(num)) return 0; p_node = parent_node(num); if (p_node != num) parent = &node_devices[p_node].node; return register_node(&node_devices[num].node, num, parent); } #endif /* _ASM_I386_NODE_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/numa.h000066400000000000000000000000331314037446600237650ustar00rootroot00000000000000 int pxm_to_nid(int pxm); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/numaq.h000066400000000000000000000135521314037446600241600ustar00rootroot00000000000000/* * Written by: Patricia Gaughen, IBM Corporation * * Copyright (C) 2002, IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to */ #ifndef NUMAQ_H #define NUMAQ_H #ifdef CONFIG_X86_NUMAQ extern int get_memcfg_numaq(void); /* * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the */ #define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private quad space */ /* * Communication area for each processor on lynxer-processor tests. * * NOTE: If you change the size of this eachproc structure you need * to change the definition for EACH_QUAD_SIZE. */ struct eachquadmem { unsigned int priv_mem_start; /* Starting address of this */ /* quad's private memory. */ /* This is always 0. */ /* In MB. */ unsigned int priv_mem_size; /* Size of this quad's */ /* private memory. */ /* In MB. */ unsigned int low_shrd_mem_strp_start;/* Starting address of this */ /* quad's low shared block */ /* (untranslated). */ /* In MB. */ unsigned int low_shrd_mem_start; /* Starting address of this */ /* quad's low shared memory */ /* (untranslated). */ /* In MB. */ unsigned int low_shrd_mem_size; /* Size of this quad's low */ /* shared memory. */ /* In MB. */ unsigned int lmmio_copb_start; /* Starting address of this */ /* quad's local memory */ /* mapped I/O in the */ /* compatibility OPB. */ /* In MB. */ unsigned int lmmio_copb_size; /* Size of this quad's local */ /* memory mapped I/O in the */ /* compatibility OPB. */ /* In MB. */ unsigned int lmmio_nopb_start; /* Starting address of this */ /* quad's local memory */ /* mapped I/O in the */ /* non-compatibility OPB. */ /* In MB. */ unsigned int lmmio_nopb_size; /* Size of this quad's local */ /* memory mapped I/O in the */ /* non-compatibility OPB. */ /* In MB. */ unsigned int io_apic_0_start; /* Starting address of I/O */ /* APIC 0. */ unsigned int io_apic_0_sz; /* Size I/O APIC 0. */ unsigned int io_apic_1_start; /* Starting address of I/O */ /* APIC 1. */ unsigned int io_apic_1_sz; /* Size I/O APIC 1. */ unsigned int hi_shrd_mem_start; /* Starting address of this */ /* quad's high shared memory.*/ /* In MB. */ unsigned int hi_shrd_mem_size; /* Size of this quad's high */ /* shared memory. */ /* In MB. */ unsigned int mps_table_addr; /* Address of this quad's */ /* MPS tables from BIOS, */ /* in system space.*/ unsigned int lcl_MDC_pio_addr; /* Port-I/O address for */ /* local access of MDC. */ unsigned int rmt_MDC_mmpio_addr; /* MM-Port-I/O address for */ /* remote access of MDC. */ unsigned int mm_port_io_start; /* Starting address of this */ /* quad's memory mapped Port */ /* I/O space. */ unsigned int mm_port_io_size; /* Size of this quad's memory*/ /* mapped Port I/O space. */ unsigned int mm_rmt_io_apic_start; /* Starting address of this */ /* quad's memory mapped */ /* remote I/O APIC space. */ unsigned int mm_rmt_io_apic_size; /* Size of this quad's memory*/ /* mapped remote I/O APIC */ /* space. */ unsigned int mm_isa_start; /* Starting address of this */ /* quad's memory mapped ISA */ /* space (contains MDC */ /* memory space). */ unsigned int mm_isa_size; /* Size of this quad's memory*/ /* mapped ISA space (contains*/ /* MDC memory space). */ unsigned int rmt_qmi_addr; /* Remote addr to access QMI.*/ unsigned int lcl_qmi_addr; /* Local addr to access QMI. */ }; /* * Note: This structure must be NOT be changed unless the multiproc and * OS are changed to reflect the new structure. */ struct sys_cfg_data { unsigned int quad_id; unsigned int bsp_proc_id; /* Boot Strap Processor in this quad. */ unsigned int scd_version; /* Version number of this table. */ unsigned int first_quad_id; unsigned int quads_present31_0; /* 1 bit for each quad */ unsigned int quads_present63_32; /* 1 bit for each quad */ unsigned int config_flags; unsigned int boot_flags; unsigned int csr_start_addr; /* Absolute value (not in MB) */ unsigned int csr_size; /* Absolute value (not in MB) */ unsigned int lcl_apic_start_addr; /* Absolute value (not in MB) */ unsigned int lcl_apic_size; /* Absolute value (not in MB) */ unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */ unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */ /* may not be totally populated */ unsigned int split_mem_enbl; /* 0 for no low shared memory */ unsigned int mmio_sz; /* Size of total system memory mapped I/O */ /* (in MB). */ unsigned int quad_spin_lock; /* Spare location used for quad */ /* bringup. */ unsigned int nonzero55; /* For checksumming. */ unsigned int nonzeroaa; /* For checksumming. */ unsigned int scd_magic_number; unsigned int system_type; unsigned int checksum; /* * memory configuration area for each quad */ struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ }; static inline unsigned long *get_zholes_size(int nid) { return NULL; } #endif /* CONFIG_X86_NUMAQ */ #endif /* NUMAQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/numnodes.h000066400000000000000000000004411314037446600246600ustar00rootroot00000000000000#ifndef _ASM_MAX_NUMNODES_H #define _ASM_MAX_NUMNODES_H #include #ifdef CONFIG_X86_NUMAQ /* Max 16 Nodes */ #define NODES_SHIFT 4 #elif defined(CONFIG_ACPI_SRAT) /* Max 8 Nodes */ #define NODES_SHIFT 3 #endif /* CONFIG_X86_NUMAQ */ #endif /* _ASM_MAX_NUMNODES_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/page.h000066400000000000000000000104531314037446600237500ustar00rootroot00000000000000#ifndef _I386_PAGE_H #define _I386_PAGE_H /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) #ifdef __KERNEL__ #ifndef __ASSEMBLY__ #include #ifdef CONFIG_X86_USE_3DNOW #include #define clear_page(page) mmx_clear_page((void *)(page)) #define copy_page(to,from) mmx_copy_page(to,from) #else /* * On older X86 processors it's not a win to use MMX here it seems. * Maybe the K6-III ? */ #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) #endif #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE /* * These are used to make use of C type-checking.. */ extern int nx_enabled; #ifdef CONFIG_X86_PAE extern unsigned long long __supported_pte_mask; typedef struct { unsigned long pte_low, pte_high; } pte_t; typedef struct { unsigned long long pmd; } pmd_t; typedef struct { unsigned long long pgd; } pgd_t; typedef struct { unsigned long long pgprot; } pgprot_t; #define pmd_val(x) ((x).pmd) #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) #define __pte(x) ((pte_t) { (x), (x) >> 32 } ) #define __pmd(x) ((pmd_t) { (x) } ) #define HPAGE_SHIFT 21 #else typedef struct { unsigned long pte_low; } pte_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; #define boot_pte_t pte_t /* or would you rather have a typedef */ #define pte_val(x) ((x).pte_low) #define __pte(x) ((pte_t) { (x) } ) #define HPAGE_SHIFT 22 #endif #define PTE_MASK PAGE_MASK #ifdef CONFIG_HUGETLB_PAGE #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif #define pgd_val(x) ((x).pgd) #define pgprot_val(x) ((x).pgprot) #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) #endif /* !__ASSEMBLY__ */ /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) /* * This handles the memory map.. We could make this a config * option, but too many people screw it up, and too few need * it. * * A __PAGE_OFFSET of 0xC0000000 means that the kernel has * a virtual address space of one gigabyte, which limits the * amount of physical memory you can use to about 950MB. * * If you want more physical memory than this then see the CONFIG_HIGHMEM4G * and CONFIG_HIGHMEM64G options in the kernel configuration. */ #ifndef __ASSEMBLY__ /* * This much address space is reserved for vmalloc() and iomap() * as well as fixmap mappings. */ extern unsigned int __VMALLOC_RESERVE; extern int sysctl_legacy_va_layout; extern int page_is_ram(unsigned long pagenr); #endif /* __ASSEMBLY__ */ #ifdef __ASSEMBLY__ #define __PAGE_OFFSET CONFIG_PAGE_OFFSET #define __PHYSICAL_START CONFIG_PHYSICAL_START #else #define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) #endif #define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START) #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #ifdef CONFIG_FLATMEM #define pfn_to_page(pfn) (mem_map + (pfn)) #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < max_mapnr) #endif /* CONFIG_FLATMEM */ #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define VM_DATA_DEFAULT_FLAGS \ (VM_READ | VM_WRITE | \ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #endif /* __KERNEL__ */ #include #endif /* _I386_PAGE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/param.h000066400000000000000000000007501314037446600241330ustar00rootroot00000000000000#ifndef _ASMi386_PARAM_H #define _ASMi386_PARAM_H #ifdef __KERNEL__ # include # define HZ CONFIG_HZ /* Internal kernel timer frequency */ # define USER_HZ 100 /* .. some user interfaces are in "ticks" */ # define CLOCKS_PER_SEC (USER_HZ) /* like times() */ #endif #ifndef HZ #define HZ 100 #endif #define EXEC_PAGESIZE 4096 #ifndef NOGROUP #define NOGROUP (-1) #endif #define MAXHOSTNAMELEN 64 /* max length of hostname */ #define COMMAND_LINE_SIZE 2048 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/parport.h000066400000000000000000000007731314037446600245270ustar00rootroot00000000000000/* * parport.h: ia32-specific parport initialisation * * Copyright (C) 1999, 2000 Tim Waugh * * This file should only be included by drivers/parport/parport_pc.c. */ #ifndef _ASM_I386_PARPORT_H #define _ASM_I386_PARPORT_H 1 static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) { return parport_pc_find_isa_ports (autoirq, autodma); } #endif /* !(_ASM_I386_PARPORT_H) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/pci-direct.h000066400000000000000000000000431314037446600250510ustar00rootroot00000000000000#include "asm-x86_64/pci-direct.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/pci.h000066400000000000000000000062451314037446600236130ustar00rootroot00000000000000#ifndef __i386_PCI_H #define __i386_PCI_H #include #ifdef __KERNEL__ #include /* for struct page */ /* Can be used to override the logic in pci_scan_bus for skipping already-configured bus numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the loader */ #ifdef CONFIG_PCI extern unsigned int pcibios_assign_all_busses(void); #else #define pcibios_assign_all_busses() 0 #endif #define pcibios_scan_all_fns(a, b) 0 extern unsigned long pci_mem_start; #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM (pci_mem_start) #define PCIBIOS_MIN_CARDBUS_IO 0x4000 void pcibios_config_init(void); struct pci_bus * pcibios_scan_root(int bus); void pcibios_set_master(struct pci_dev *dev); void pcibios_penalize_isa_irq(int irq, int active); struct irq_routing_table *pcibios_get_irq_routing_table(void); int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); /* Dynamic DMA mapping stuff. * i386 has everything mapped statically. */ #include #include #include #include #include struct pci_dev; /* The PCI address space does equal the physical memory * address space. The networking and block device layers use * this boolean for bounce buffer decisions. */ #define PCI_DMA_BUS_IS_PHYS (1) /* pci_unmap_{page,single} is a nop so... */ #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) #define pci_unmap_addr(PTR, ADDR_NAME) (0) #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) #define pci_unmap_len(PTR, LEN_NAME) (0) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) /* This is always fine. */ #define pci_dac_dma_supported(pci_dev, mask) (1) static inline dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction) { return ((dma64_addr_t) page_to_phys(page) + (dma64_addr_t) offset); } static inline struct page * pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr) { return pfn_to_page(dma_addr >> PAGE_SHIFT); } static inline unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr) { return (dma_addr & ~PAGE_MASK); } static inline void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) { } static inline void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) { flush_write_buffers(); } #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); static inline void pcibios_add_platform_entries(struct pci_dev *dev) { } #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, unsigned long *strategy_parameter) { *strat = PCI_DMA_BURST_INFINITY; *strategy_parameter = ~0UL; } #endif #endif /* __KERNEL__ */ /* implement the pci_ DMA API in terms of the generic device dma_ one */ #include /* generic pci stuff */ #include #endif /* __i386_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/percpu.h000066400000000000000000000001761314037446600243330ustar00rootroot00000000000000#ifndef __ARCH_I386_PERCPU__ #define __ARCH_I386_PERCPU__ #include #endif /* __ARCH_I386_PERCPU__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/pgalloc.h000066400000000000000000000027561314037446600244640ustar00rootroot00000000000000#ifndef _I386_PGALLOC_H #define _I386_PGALLOC_H #include #include #include #include struct page; #define pmd_populate_kernel(mm, pmd, pte) \ do { \ mach_setup_pte(pte, __pa(pte) >> PAGE_SHIFT); \ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \ } while (0) #define pmd_populate(mm, pmd, pte) \ do { \ mach_setup_pte(page_address(pte), page_to_pfn(pte)); \ set_pmd(pmd, __pmd(_PAGE_TABLE + \ ((unsigned long long)page_to_pfn(pte) << \ (unsigned long long) PAGE_SHIFT))); \ } while (0) /* * Allocate and free page tables. */ extern pgd_t *pgd_alloc(struct mm_struct *); extern void pgd_free(pgd_t *pgd); extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); extern struct page *pte_alloc_one(struct mm_struct *, unsigned long); static inline void pte_free_kernel(pte_t *pte) { free_page((unsigned long)pte); } static inline void pte_free(struct page *pte) { __free_page(pte); } #define __pte_free_tlb(tlb,pte) \ do { \ tlb_remove_page((tlb),(pte)); \ mach_release_pte(page_address(pte), page_to_pfn(pte)); \ } while (0) #ifdef CONFIG_X86_PAE /* * In the PAE case we free the pmds as part of the pgd. */ #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(x) do { } while (0) #define __pmd_free_tlb(tlb,x) do { } while (0) #define pud_populate(mm, pmd, pte) BUG() #endif #define check_pgt_cache() do { } while (0) #endif /* _I386_PGALLOC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/pgtable-2level-defs.h000066400000000000000000000005221314037446600265540ustar00rootroot00000000000000#ifndef _I386_PGTABLE_2LEVEL_DEFS_H #define _I386_PGTABLE_2LEVEL_DEFS_H /* * traditional i386 two-level paging structure: */ #define PGDIR_SHIFT 22 #define PTRS_PER_PGD 1024 /* * the i386 is two-level, so we don't really have any * PMD directory physically. */ #define PTRS_PER_PTE 1024 #endif /* _I386_PGTABLE_2LEVEL_DEFS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/pgtable-2level.h000066400000000000000000000045661314037446600256510ustar00rootroot00000000000000#ifndef _I386_PGTABLE_2LEVEL_H #define _I386_PGTABLE_2LEVEL_H #include #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) #define pte_same(a, b) ((a).pte_low == (b).pte_low) #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_none(x) (!(x).pte_low) #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) /* * All present user pages are user-executable: */ static inline int pte_exec(pte_t pte) { return pte_user(pte); } /* * All present pages are kernel-executable: */ static inline int pte_exec_kernel(pte_t pte) { return 1; } /* * Bits 0, 6 and 7 are taken, split up the 29 bits of offset * into this range: */ #define PTE_FILE_MAX_BITS 29 #define pte_to_pgoff(pte) \ ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 )) #define pgoff_to_pte(off) \ ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x1f) #define __swp_offset(x) ((x).val >> 8) #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) /* Raw PTE clear */ #define _pte_clear(pteptr) do { (*(pteptr) = __pte(0)); } while (0) #define _ptep_get_and_clear(pteptr) ((pte_t) { xchg(&(pteptr)->pte_low, 0) }) /* * Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following * hook is made available. */ #ifndef __HAVE_SUBARCH_PTE_WRITE_FUNCTIONS #define set_pte(pteptr, pteval) (*(pteptr) = pteval) #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pte_at_defer(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #endif /* __HAVE_SUBARCH_PTE_WRITE_FUNCTIONS */ #define set_pte_atomic(pteptr, pteval) set_pte((pteptr), (pteval)) #endif /* _I386_PGTABLE_2LEVEL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/pgtable-3level-defs.h000066400000000000000000000006711314037446600265620ustar00rootroot00000000000000#ifndef _I386_PGTABLE_3LEVEL_DEFS_H #define _I386_PGTABLE_3LEVEL_DEFS_H /* * PGDIR_SHIFT determines what a top-level page table entry can map */ #define PGDIR_SHIFT 30 #define PTRS_PER_PGD 4 /* * PMD_SHIFT determines the size of the area a middle-level * page table can map */ #define PMD_SHIFT 21 #define PTRS_PER_PMD 512 /* * entries per page directory level */ #define PTRS_PER_PTE 512 #endif /* _I386_PGTABLE_3LEVEL_DEFS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/pgtable-3level.h000066400000000000000000000115131314037446600256400ustar00rootroot00000000000000#ifndef _I386_PGTABLE_3LEVEL_H #define _I386_PGTABLE_3LEVEL_H #include /* * Intel Physical Address Extension (PAE) Mode - three-level page * tables on PPro+ CPUs. * * Copyright (C) 1999 Ingo Molnar */ #define pte_ERROR(e) \ printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) #define pud_none(pud) 0 #define pud_bad(pud) 0 #define pud_present(pud) 1 /* * Is the pte executable? */ static inline int pte_x(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); } /* * All present user-pages with !NX bit are user-executable: */ static inline int pte_exec(pte_t pte) { return pte_user(pte) && pte_x(pte); } /* * All present pages with !NX bit are kernel-executable: */ static inline int pte_exec_kernel(pte_t pte) { return pte_x(pte); } #define pud_page(pud) \ ((struct page *) __va(pud_val(pud) & PAGE_MASK)) #define pud_page_kernel(pud) \ ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) /* Find an entry in the second-level page table.. */ #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ pmd_index(address)) static inline int pte_same(pte_t a, pte_t b) { return a.pte_low == b.pte_low && a.pte_high == b.pte_high; } #define pte_page(x) pfn_to_page(pte_pfn(x)) static inline int pte_none(pte_t pte) { return !pte.pte_low && !pte.pte_high; } static inline unsigned long pte_pfn(pte_t pte) { return (pte.pte_low >> PAGE_SHIFT) | (pte.pte_high << (32 - PAGE_SHIFT)); } extern unsigned long long __supported_pte_mask; static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { pte_t pte; pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \ (pgprot_val(pgprot) >> 32); pte.pte_high &= (__supported_pte_mask >> 32); pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \ __supported_pte_mask; return pte; } static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) { return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | \ pgprot_val(pgprot)) & __supported_pte_mask); } /* * Bits 0, 6 and 7 are taken in the low part of the pte, * put the 32 bits of offset into the high part. */ #define pte_to_pgoff(pte) ((pte).pte_high) #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) #define PTE_FILE_MAX_BITS 32 /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val) & 0x1f) #define __swp_offset(x) ((x).val >> 5) #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) #define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) /* * For PTEs and PDEs, we must clear the P-bit first when clearing a page table * entry, so clear the bottom half first and enforce ordering with a compiler * barrier. */ static inline void _pte_clear(pte_t *ptep) { (ptep)->pte_low = 0; smp_wmb(); ptep->pte_high = 0; } static inline pte_t _ptep_get_and_clear(pte_t *ptep) { pte_t res; /* xchg acts as a barrier before the setting of the high bits */ res.pte_low = xchg(&ptep->pte_low, 0); res.pte_high = ptep->pte_high; ptep->pte_high = 0; return res; } /* * Sub-arch is allowed to override these, so check for definition first. * New functions which write to hardware page table entries should go here. */ #ifndef __HAVE_SUBARCH_PTE_WRITE_FUNCTIONS /* Rules for using set_pte: the pte being assigned *must* be * either not present or in a state where the hardware will * not attempt to update the pte. In places where this is * not possible, use pte_get_and_clear to obtain the old pte * value and then use set_pte to update it. -ben */ static inline void set_pte(pte_t *ptep, pte_t pte) { ptep->pte_high = pte.pte_high; smp_wmb(); ptep->pte_low = pte.pte_low; } #define set_pte_at(mm,addr,ptep,pte) set_pte(ptep,pte) #define set_pte_at_defer(mm,addr,ptep,pte) set_pte(ptep,pte) #define set_pte_atomic(pteptr,pteval) \ set_64bit((unsigned long long *)(pteptr),pte_val(pteval)) #define set_pmd(pmdptr,pmdval) \ set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval)) #define set_pud(pudptr,pudval) \ (*(pudptr) = (pudval)) #define pte_clear(mm,addr,xp) do { _pte_clear(xp); } while (0) static inline void pmd_clear(pmd_t *pmd) { u32 *tmp = (u32 *)pmd; *tmp = 0; smp_wmb(); *(tmp + 1) = 0; } #endif /* __HAVE_SUBARCH_PTE_WRITE_FUNCTIONS */ /* * Pentium-II erratum A13: in PAE mode we explicitly have to flush * the TLB via cr3 if the top-level pgd is changed... * We do not let the generic code free and clear pgd entries due to * this erratum. */ static inline void pud_clear (pud_t * pud) { } #define __HAVE_ARCH_SET_PTE_ATOMIC #endif /* _I386_PGTABLE_3LEVEL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/pgtable.h000066400000000000000000000376021314037446600244570ustar00rootroot00000000000000#ifndef _I386_PGTABLE_H #define _I386_PGTABLE_H #include /* * The Linux memory management assumes a three-level page table setup. On * the i386, we use that, but "fold" the mid level into the top-level page * table, so that we physically have the same two-level page table as the * i386 mmu expects. * * This file contains the functions and defines necessary to modify and use * the i386 page table tree. */ #ifndef __ASSEMBLY__ #include #include #include #ifndef _I386_BITOPS_H #include #endif #include #include #include struct mm_struct; struct vm_area_struct; /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) extern unsigned long empty_zero_page[1024]; extern pgd_t swapper_pg_dir[1024]; extern kmem_cache_t *pgd_cache; extern kmem_cache_t *pmd_cache; extern spinlock_t pgd_lock; extern struct page *pgd_list; void pmd_ctor(void *, kmem_cache_t *, unsigned long); void pgd_ctor(void *, kmem_cache_t *, unsigned long); void pgd_dtor(void *, kmem_cache_t *, unsigned long); void pgtable_cache_init(void); void paging_init(void); /* * The Linux x86 paging architecture is 'compile-time dual-mode', it * implements both the traditional 2-level x86 page tables and the * newer 3-level PAE-mode page tables. */ #ifdef CONFIG_X86_PAE # include # define PMD_SIZE (1UL << PMD_SHIFT) # define PMD_MASK (~(PMD_SIZE-1)) #else # include #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_ADDRESS 0 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) #define TWOLEVEL_PGDIR_SHIFT 22 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS) /* Just any arbitrary offset to the start of the vmalloc VM area: the * current 8MB value just means that there will be a 8MB "hole" after the * physical memory until the kernel virtual memory starts. That means that * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \ 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) #ifdef CONFIG_HIGHMEM # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) #else # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) #endif /* * _PAGE_PSE set in the page directory entry just means that * the page directory entry points directly to a 4MB-aligned block of * memory. */ #define _PAGE_BIT_PRESENT 0 #define _PAGE_BIT_RW 1 #define _PAGE_BIT_USER 2 #define _PAGE_BIT_PWT 3 #define _PAGE_BIT_PCD 4 #define _PAGE_BIT_ACCESSED 5 #define _PAGE_BIT_DIRTY 6 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ #define _PAGE_BIT_UNUSED2 10 #define _PAGE_BIT_UNUSED3 11 #define _PAGE_BIT_NX 63 #define _PAGE_PRESENT 0x001 #define _PAGE_RW 0x002 #define _PAGE_USER 0x004 #define _PAGE_PWT 0x008 #define _PAGE_PCD 0x010 #define _PAGE_ACCESSED 0x020 #define _PAGE_DIRTY 0x040 #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ #define _PAGE_UNUSED1 0x200 /* available for programmer */ #define _PAGE_UNUSED2 0x400 #define _PAGE_UNUSED3 0x800 /* If _PAGE_PRESENT is clear, we use these: */ #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ #define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE; pte_present gives true */ #ifdef CONFIG_X86_PAE #define _PAGE_NX (1ULL<<_PAGE_BIT_NX) #else #define _PAGE_NX 0 #endif #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) #define PAGE_NONE \ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define PAGE_SHARED \ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_SHARED_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_COPY_NOEXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_COPY_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_COPY \ PAGE_COPY_NOEXEC #define PAGE_READONLY \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_READONLY_EXEC \ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define _PAGE_KERNEL \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) #define _PAGE_KERNEL_EXEC \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC, __PAGE_KERNEL_PAGE_TABLE; #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) #define PAGE_KERNEL_PAGE_TABLE __pgprot(__PAGE_KERNEL_PAGE_TABLE) #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) /* * The i386 can't do page protection for execute, and considers that * the same are read. Also, write permissions imply read permissions. * This is the closest we can get.. */ #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_COPY #define __P011 PAGE_COPY #define __P100 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_COPY_EXEC #define __P111 PAGE_COPY_EXEC #define __S000 PAGE_NONE #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED #define __S100 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC /* * Define this if things work differently on an i386 and an i486: * it will (on an i486) warn about kernel memory accesses that are * done without a 'access_ok(VERIFY_WRITE,..)' */ #undef TEST_ACCESS_OK /* The boot page tables (all created as a single array) */ extern unsigned long pg0[]; #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* * The following only work if pte_present() is true. * Undefined behaviour if not.. */ #define __LARGE_PTE (_PAGE_PSE | _PAGE_PRESENT) static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } static inline int pte_huge(pte_t pte) { return ((pte).pte_low & __LARGE_PTE) == __LARGE_PTE; } /* * The following only works if pte_present() is not true. */ static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; } static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; } static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; } static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= __LARGE_PTE; return pte; } #include #ifdef CONFIG_X86_PAE # include #else # include #endif #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define ptep_test_and_clear_dirty(vma, addr, ptep) \ ({ \ int ret; \ if (pte_dirty(*(ptep))) \ ret = 0; \ else { \ ret = test_and_clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low);\ if (ret) \ pte_update_hook((vma)->vm_mm, (addr), (ptep)); \ } \ ret; \ }) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define ptep_test_and_clear_young(vma, addr, ptep) \ ({ \ int ret; \ if (!pte_young(*(ptep))) \ ret = 0; \ else { \ ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low);\ if (ret) \ pte_update_hook((vma)->vm_mm, (addr), (ptep)); \ } \ ret; \ }) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = _ptep_get_and_clear(ptep); pte_update_hook(mm, addr, ptep); return pte; } #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) { pte_t pte; if (full) { pte = *ptep; _pte_clear(ptep); } else { pte = ptep_get_and_clear(mm, addr, ptep); } return pte; } #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { if (!pte_write(*ptep)) return; clear_bit(_PAGE_BIT_RW, &ptep->pte_low); pte_update_hook(mm, addr, ptep); } /* * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * * dst - pointer to pgd range anwhere on a pgd page * src - "" * count - the number of pgds to copy. * * dst and src can be on the same page, but the range must not overlap, * and must not cross a page boundary. */ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) { memcpy(dst, src, count * sizeof(pgd_t)); } /* * Macro to mark a page protection value as "uncacheable". On processors which do not support * it, this is a no-op. */ #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte.pte_low &= _PAGE_CHG_MASK; pte.pte_low |= pgprot_val(newprot); #ifdef CONFIG_X86_PAE /* * Chop off the NX bit (if present), and add the NX portion of * the newprot (if present): */ pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); pte.pte_high |= (pgprot_val(newprot) >> 32) & \ (__supported_pte_mask >> 32); #endif return pte; } #define pmd_large(pmd) \ ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) /* * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] * * this macro returns the index of the entry in the pgd page which would * control the given virtual address */ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pgd_index_k(addr) pgd_index(addr) /* * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */ #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) /* * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] * * this macro returns the index of the entry in the pmd page which would * control the given virtual address */ #define pmd_index(address) \ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) /* * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] * * this macro returns the index of the entry in the pte page which would * control the given virtual address */ #define pte_index(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) \ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) #define pmd_page_kernel(pmd) \ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) /* * Helper function that returns the kernel pagetable entry controlling * the virtual address 'address'. NULL means no pagetable entry present. * NOTE: the return type is pte_t but if the pmd is PSE then we return it * as a pte too. */ extern pte_t *lookup_address(unsigned long address); /* * Make a given kernel text page executable/non-executable. * Returns the previous executability setting of that page (which * is used to restore the previous state). Used by the SMP bootup code. * NOTE: this is an __init function for security reasons. */ #ifdef CONFIG_X86_PAE extern int set_kernel_exec(unsigned long vaddr, int enable); #else static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} #endif extern void noexec_setup(const char *str); #include #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ({ \ pte_t *__ptep; \ unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \ __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE0); \ mach_map_linear_pt(0, __ptep, pfn); \ __ptep = __ptep + pte_index(address); \ __ptep; \ }) #define pte_offset_map_nested(dir, address) \ ({ \ pte_t *__ptep; \ unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \ __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1); \ mach_map_linear_pt(1, __ptep, pfn); \ __ptep = __ptep + pte_index(address); \ __ptep; \ }) #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) #else #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) #endif /* * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. */ #define update_mmu_cache(vma,address,pte) do { } while (0) #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_FLATMEM #define kern_addr_valid(addr) (1) #endif /* CONFIG_FLATMEM */ #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) #define MK_IOSPACE_PFN(space, pfn) (pfn) #define GET_IOSPACE(pfn) 0 #define GET_PFN(pfn) (pfn) #define __HAVE_ARCH_PTE_SAME #include #endif /* _I386_PGTABLE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/poll.h000066400000000000000000000007761314037446600240110ustar00rootroot00000000000000#ifndef __i386_POLL_H #define __i386_POLL_H /* These are specified by iBCS2 */ #define POLLIN 0x0001 #define POLLPRI 0x0002 #define POLLOUT 0x0004 #define POLLERR 0x0008 #define POLLHUP 0x0010 #define POLLNVAL 0x0020 /* The rest seem to be more-or-less nonstandard. Check them! */ #define POLLRDNORM 0x0040 #define POLLRDBAND 0x0080 #define POLLWRNORM 0x0100 #define POLLWRBAND 0x0200 #define POLLMSG 0x0400 #define POLLREMOVE 0x1000 struct pollfd { int fd; short events; short revents; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/posix_types.h000066400000000000000000000045731314037446600254300ustar00rootroot00000000000000#ifndef __ARCH_I386_POSIX_TYPES_H #define __ARCH_I386_POSIX_TYPES_H /* * This file is generally used by user-level software, so you need to * be a little careful about namespace pollution etc. Also, we cannot * assume GCC is being used. */ typedef unsigned long __kernel_ino_t; typedef unsigned short __kernel_mode_t; typedef unsigned short __kernel_nlink_t; typedef long __kernel_off_t; typedef int __kernel_pid_t; typedef unsigned short __kernel_ipc_pid_t; typedef unsigned short __kernel_uid_t; typedef unsigned short __kernel_gid_t; typedef unsigned int __kernel_size_t; typedef int __kernel_ssize_t; typedef int __kernel_ptrdiff_t; typedef long __kernel_time_t; typedef long __kernel_suseconds_t; typedef long __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef int __kernel_daddr_t; typedef char * __kernel_caddr_t; typedef unsigned short __kernel_uid16_t; typedef unsigned short __kernel_gid16_t; typedef unsigned int __kernel_uid32_t; typedef unsigned int __kernel_gid32_t; typedef unsigned short __kernel_old_uid_t; typedef unsigned short __kernel_old_gid_t; typedef unsigned short __kernel_old_dev_t; #ifdef __GNUC__ typedef long long __kernel_loff_t; #endif typedef struct { #if defined(__KERNEL__) || defined(__USE_ALL) int val[2]; #else /* !defined(__KERNEL__) && !defined(__USE_ALL) */ int __val[2]; #endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */ } __kernel_fsid_t; #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) #undef __FD_SET #define __FD_SET(fd,fdsetp) \ __asm__ __volatile__("btsl %1,%0": \ "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) #undef __FD_CLR #define __FD_CLR(fd,fdsetp) \ __asm__ __volatile__("btrl %1,%0": \ "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) #undef __FD_ISSET #define __FD_ISSET(fd,fdsetp) (__extension__ ({ \ unsigned char __result; \ __asm__ __volatile__("btl %1,%2 ; setb %0" \ :"=q" (__result) :"r" ((int) (fd)), \ "m" (*(__kernel_fd_set *) (fdsetp))); \ __result; })) #undef __FD_ZERO #define __FD_ZERO(fdsetp) \ do { \ int __d0, __d1; \ __asm__ __volatile__("cld ; rep ; stosl" \ :"=m" (*(__kernel_fd_set *) (fdsetp)), \ "=&c" (__d0), "=&D" (__d1) \ :"a" (0), "1" (__FDSET_LONGS), \ "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \ } while (0) #endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/processor.h000066400000000000000000000436101314037446600250540ustar00rootroot00000000000000/* * include/asm-i386/processor.h * * Copyright (C) 1994 Linus Torvalds */ #ifndef __ASM_I386_PROCESSOR_H #define __ASM_I386_PROCESSOR_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* flag for disabling the tsc */ extern int tsc_disable; struct desc_struct { unsigned long a,b; }; #define desc_empty(desc) \ (!((desc)->a | (desc)->b)) #define desc_equal(desc1, desc2) \ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) /* * Default implementation of macro that returns current * instruction pointer ("program counter"). */ #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) /* * CPU type and hardware bug flags. Kept separately for each CPU. * Members of this structure are referenced in head.S, so think twice * before touching them. [mj] */ struct cpuinfo_x86 { __u8 x86; /* CPU family */ __u8 x86_vendor; /* CPU vendor */ __u8 x86_model; __u8 x86_mask; char wp_works_ok; /* It doesn't on 386's */ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ char hard_math; char rfu; int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ unsigned long x86_capability[NCAPINTS]; char x86_vendor_id[16]; char x86_model_id[64]; int x86_cache_size; /* in KB - valid for CPUS which support this call */ int x86_cache_alignment; /* In bytes */ char fdiv_bug; char f00f_bug; char coma_bug; char pad0; int x86_power; unsigned long loops_per_jiffy; #ifdef CONFIG_SMP cpumask_t llc_shared_map; /* cpus sharing the last level cache */ #endif unsigned char x86_max_cores; /* cpuid returned max cores value */ unsigned char booted_cores; /* number of cores as seen by OS */ unsigned char apicid; } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 #define X86_VENDOR_CYRIX 1 #define X86_VENDOR_AMD 2 #define X86_VENDOR_UMC 3 #define X86_VENDOR_NEXGEN 4 #define X86_VENDOR_CENTAUR 5 #define X86_VENDOR_RISE 6 #define X86_VENDOR_TRANSMETA 7 #define X86_VENDOR_NSC 8 #define X86_VENDOR_NUM 9 #define X86_VENDOR_UNKNOWN 0xff /* * capabilities of CPUs */ extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; extern struct tss_struct doublefault_tss; DECLARE_PER_CPU(struct tss_struct, init_tss); #ifdef CONFIG_SMP extern struct cpuinfo_x86 cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] #else #define cpu_data (&boot_cpu_data) #define current_cpu_data boot_cpu_data #endif extern int phys_proc_id[NR_CPUS]; extern int cpu_core_id[NR_CPUS]; extern int cpu_llc_id[NR_CPUS]; extern char ignore_fpu_irq; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); #ifdef CONFIG_X86_HT extern void detect_ht(struct cpuinfo_x86 *c); #else static inline void detect_ht(struct cpuinfo_x86 *c) {} #endif /* * EFLAGS bits */ #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ #define X86_EFLAGS_NT 0x00004000 /* Nested Task */ #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ /* * Intel CPU features in CR4 */ #define X86_CR4_VME 0x0001 /* enable vm86 extensions */ #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */ #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */ #define X86_CR4_DE 0x0008 /* enable debugging extensions */ #define X86_CR4_PSE 0x0010 /* enable page size extensions */ #define X86_CR4_PAE 0x0020 /* enable physical address extensions */ #define X86_CR4_MCE 0x0040 /* Machine check enable */ #define X86_CR4_PGE 0x0080 /* enable global pages */ #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */ #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ #include /* * Save the cr4 feature set we're using (ie * Pentium 4MB enable and PPro Global page * enable), so that any CPU's that boot up * after us can get the correct flags. */ extern unsigned long mmu_cr4_features; static inline void set_in_cr4 (unsigned long mask) { unsigned cr4; mmu_cr4_features |= mask; cr4 = read_cr4(); cr4 |= mask; write_cr4(cr4); } static inline void clear_in_cr4 (unsigned long mask) { unsigned cr4; mmu_cr4_features &= ~mask; cr4 = read_cr4(); cr4 &= ~mask; write_cr4(cr4); } /* * NSC/Cyrix CPU configuration register indexes */ #define CX86_PCR0 0x20 #define CX86_GCR 0xb8 #define CX86_CCR0 0xc0 #define CX86_CCR1 0xc1 #define CX86_CCR2 0xc2 #define CX86_CCR3 0xc3 #define CX86_CCR4 0xe8 #define CX86_CCR5 0xe9 #define CX86_CCR6 0xea #define CX86_CCR7 0xeb #define CX86_PCR1 0xf0 #define CX86_DIR0 0xfe #define CX86_DIR1 0xff #define CX86_ARR_BASE 0xc4 #define CX86_RCR_BASE 0xdc /* * NSC/Cyrix CPU indexed register access macros */ #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); }) #define setCx86(reg, data) do { \ outb((reg), 0x22); \ outb((data), 0x23); \ } while (0) /* Stop speculative execution */ static inline void sync_core(void) { int tmp; asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); } static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { /* "monitor %eax,%ecx,%edx;" */ asm volatile( ".byte 0x0f,0x01,0xc8;" : :"a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax,%ecx;" */ asm volatile( ".byte 0x0f,0x01,0xc9;" : :"a" (eax), "c" (ecx)); } /* from system description table in BIOS. Mostly for MCA use, but others may find it useful. */ extern unsigned int machine_id; extern unsigned int machine_submodel_id; extern unsigned int BIOS_revision; extern unsigned int mca_pentium_flag; /* Boot loader type from the setup header */ extern int bootloader_type; /* * User space process size: 3GB (default). */ #define TASK_SIZE (PAGE_OFFSET) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define TASK_UNMAPPED_BASE (current->map_base) #define __TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3) #define HAVE_ARCH_PICK_MMAP_LAYOUT /* * Size of io_bitmap. */ #define IO_BITMAP_BITS 65536 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) #define INVALID_IO_BITMAP_OFFSET 0x8000 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 struct i387_fsave_struct { long cwd; long swd; long twd; long fip; long fcs; long foo; long fos; long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ long status; /* software status information */ }; struct i387_fxsave_struct { unsigned short cwd; unsigned short swd; unsigned short twd; unsigned short fop; long fip; long fcs; long foo; long fos; long mxcsr; long mxcsr_mask; long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ long padding[56]; } __attribute__ ((aligned (16))); struct i387_soft_struct { long cwd; long swd; long twd; long fip; long fcs; long foo; long fos; long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ unsigned char ftop, changed, lookahead, no_update, rm, alimit; struct info *info; unsigned long entry_eip; }; union i387_union { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; }; typedef struct { unsigned long seg; } mm_segment_t; struct thread_struct; struct tss_struct { unsigned short back_link,__blh; unsigned long esp0; unsigned short ss0,__ss0h; unsigned long esp1; unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */ unsigned long esp2; unsigned short ss2,__ss2h; unsigned long __cr3; unsigned long eip; unsigned long eflags; unsigned long eax,ecx,edx,ebx; unsigned long esp; unsigned long ebp; unsigned long esi; unsigned long edi; unsigned short es, __esh; unsigned short cs, __csh; unsigned short ss, __ssh; unsigned short ds, __dsh; unsigned short fs, __fsh; unsigned short gs, __gsh; unsigned short ldt, __ldth; unsigned short trace, io_bitmap_base; /* * The extra 1 is there because the CPU will access an * additional byte beyond the end of the IO permission * bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; /* * Cache the current maximum and the last task that used the bitmap: */ unsigned long io_bitmap_max; struct thread_struct *io_bitmap_owner; /* * pads the TSS to be cacheline-aligned (size is 0x100) */ unsigned long __cacheline_filler[35]; /* * .. and then another 0x100 bytes for emergency kernel stack */ unsigned long stack[64]; } __attribute__((packed)); #define ARCH_MIN_TASKALIGN 16 struct thread_struct { /* cached TLS descriptors. */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; unsigned long esp0; unsigned long sysenter_cs; unsigned long eip; unsigned long esp; unsigned long fs; unsigned long gs; /* Hardware debugging registers */ unsigned long debugreg[8]; /* %%db0-7 debug registers */ /* fault info */ unsigned long cr2, trap_no, error_code; /* floating point info */ union i387_union i387; /* virtual 86 mode info */ struct vm86_struct __user * vm86_info; unsigned long screen_bitmap; unsigned long v86flags, v86mask, saved_esp0; unsigned int saved_fs, saved_gs; /* IO permissions */ unsigned long *io_bitmap_ptr; unsigned long iopl; /* max allowed port in the bitmap, in bytes: */ unsigned long io_bitmap_max; }; #define INIT_THREAD { \ .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ } /* * Note that the .io_bitmap member must be extra-big. This is because * the CPU will access an additional byte beyond the end of the IO * permission bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ #define INIT_TSS { \ .esp0 = sizeof(init_stack) + (long)&init_stack, \ .ss0 = __KERNEL_DS, \ .ss1 = __KERNEL_CS, \ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ } static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) { tss->esp0 = thread->esp0; arch_update_kernel_stack(__KERNEL_DS, thread->esp0); /* This can only happen when SEP is enabled, no need to test "SEP"arately */ if (unlikely(tss->ss1 != thread->sysenter_cs)) { tss->ss1 = thread->sysenter_cs; wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); } } #define start_thread(regs, new_eip, new_esp) do { \ __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \ set_fs(USER_DS); \ regs->xds = __USER_DS; \ regs->xes = __USER_DS; \ regs->xss = __USER_DS; \ regs->xcs = __USER_CS; \ regs->eip = new_eip; \ regs->esp = new_esp; \ } while (0) /* Forward declaration, a strange C thing */ struct task_struct; struct mm_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); /* Prepare to copy thread state - unlazy all lazy status */ extern void prepare_to_copy(struct task_struct *tsk); /* * create a kernel thread without removing it from tasklists */ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern unsigned long thread_saved_pc(struct task_struct *tsk); void show_trace(struct task_struct *task, unsigned long *stack); unsigned long get_wchan(struct task_struct *p); #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) #define KSTK_TOP(info) \ ({ \ unsigned long *__ptr = (unsigned long *)(info); \ (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ }) /* * The below -8 is to reserve 8 bytes on top of the ring0 stack. * This is necessary to guarantee that the entire "struct pt_regs" * is accessable even if the CPU haven't stored the SS/ESP registers * on the stack (interrupt gate does not save these registers * when switching to the same priv ring). * Therefore beware: accessing the xss/esp fields of the * "struct pt_regs" is possible, but they may contain the * completely wrong values. */ #define task_pt_regs(task) \ ({ \ struct pt_regs *__regs__; \ __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ __regs__ - 1; \ }) #define KSTK_EIP(task) (task_pt_regs(task)->eip) #define KSTK_ESP(task) (task_pt_regs(task)->esp) struct microcode_header { unsigned int hdrver; unsigned int rev; unsigned int date; unsigned int sig; unsigned int cksum; unsigned int ldrver; unsigned int pf; unsigned int datasize; unsigned int totalsize; unsigned int reserved[3]; }; struct microcode { struct microcode_header hdr; unsigned int bits[0]; }; typedef struct microcode microcode_t; typedef struct microcode_header microcode_header_t; /* microcode format is extended from prescott processors */ struct extended_signature { unsigned int sig; unsigned int pf; unsigned int cksum; }; struct extended_sigtable { unsigned int count; unsigned int cksum; unsigned int reserved[3]; struct extended_signature sigs[0]; }; /* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ #define MICROCODE_IOCFREE _IO('6',0) /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) { __asm__ __volatile__("rep;nop": : :"memory"); } #define cpu_relax() rep_nop() /* generic versions from gas */ #define GENERIC_NOP1 ".byte 0x90\n" #define GENERIC_NOP2 ".byte 0x89,0xf6\n" #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 /* Opteron nops */ #define K8_NOP1 GENERIC_NOP1 #define K8_NOP2 ".byte 0x66,0x90\n" #define K8_NOP3 ".byte 0x66,0x66,0x90\n" #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" #define K8_NOP5 K8_NOP3 K8_NOP2 #define K8_NOP6 K8_NOP3 K8_NOP3 #define K8_NOP7 K8_NOP4 K8_NOP3 #define K8_NOP8 K8_NOP4 K8_NOP4 /* K7 nops */ /* uses eax dependencies (arbitary choice) */ #define K7_NOP1 GENERIC_NOP1 #define K7_NOP2 ".byte 0x8b,0xc0\n" #define K7_NOP3 ".byte 0x8d,0x04,0x20\n" #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" #define K7_NOP5 K7_NOP4 ASM_NOP1 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" #define K7_NOP8 K7_NOP7 ASM_NOP1 #ifdef CONFIG_MK8 #define ASM_NOP1 K8_NOP1 #define ASM_NOP2 K8_NOP2 #define ASM_NOP3 K8_NOP3 #define ASM_NOP4 K8_NOP4 #define ASM_NOP5 K8_NOP5 #define ASM_NOP6 K8_NOP6 #define ASM_NOP7 K8_NOP7 #define ASM_NOP8 K8_NOP8 #elif defined(CONFIG_MK7) #define ASM_NOP1 K7_NOP1 #define ASM_NOP2 K7_NOP2 #define ASM_NOP3 K7_NOP3 #define ASM_NOP4 K7_NOP4 #define ASM_NOP5 K7_NOP5 #define ASM_NOP6 K7_NOP6 #define ASM_NOP7 K7_NOP7 #define ASM_NOP8 K7_NOP8 #else #define ASM_NOP1 GENERIC_NOP1 #define ASM_NOP2 GENERIC_NOP2 #define ASM_NOP3 GENERIC_NOP3 #define ASM_NOP4 GENERIC_NOP4 #define ASM_NOP5 GENERIC_NOP5 #define ASM_NOP6 GENERIC_NOP6 #define ASM_NOP7 GENERIC_NOP7 #define ASM_NOP8 GENERIC_NOP8 #endif #define ASM_NOP_MAX 8 /* Prefetch instructions for Pentium III and AMD Athlon */ /* It's not worth to care about 3dnow! prefetches for the K6 because they are microcoded there and very slow. However we don't do prefetches for pre XP Athlons currently That should be fixed. */ #define ARCH_HAS_PREFETCH static inline void prefetch(const void *x) { alternative_input(ASM_NOP4, "prefetchnta (%1)", X86_FEATURE_XMM, "r" (x)); } #define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCHW #define ARCH_HAS_SPINLOCK_PREFETCH /* 3dnow! prefetch to get an exclusive cache line. Useful for spinlocks to avoid one state transition in the cache coherency protocol. */ static inline void prefetchw(const void *x) { alternative_input(ASM_NOP4, "prefetchw (%1)", X86_FEATURE_3DNOW, "r" (x)); } #define spin_lock_prefetch(x) prefetchw(x) extern void select_idle_routine(const struct cpuinfo_x86 *c); #define cache_line_size() (boot_cpu_data.x86_cache_alignment) extern unsigned long boot_option_idle_override; extern void enable_sep_cpu(void); extern int sysenter_setup(void); #ifdef CONFIG_MTRR extern void mtrr_ap_init(void); extern void mtrr_bp_init(void); #else #define mtrr_ap_init() do {} while (0) #define mtrr_bp_init() do {} while (0) #endif #ifdef CONFIG_X86_MCE extern void mcheck_init(struct cpuinfo_x86 *c); #else #define mcheck_init(c) do {} while(0) #endif #include #define stts() write_cr0(8 | read_cr0()) #endif /* __ASM_I386_PROCESSOR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/ptrace.h000066400000000000000000000056741314037446600243230ustar00rootroot00000000000000#ifndef _I386_PTRACE_H #define _I386_PTRACE_H #define EBX 0 #define ECX 1 #define EDX 2 #define ESI 3 #define EDI 4 #define EBP 5 #define EAX 6 #define DS 7 #define ES 8 #define FS 9 #define GS 10 #define ORIG_EAX 11 #define EIP 12 #define CS 13 #define EFL 14 #define UESP 15 #define SS 16 #define FRAME_SIZE 17 /* this struct defines the way the registers are stored on the stack during a system call. */ struct pt_regs { long ebx; long ecx; long edx; long esi; long edi; long ebp; long eax; int xds; int xes; long orig_eax; long eip; int xcs; long eflags; long esp; int xss; }; /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ #define PTRACE_GETREGS 12 #define PTRACE_SETREGS 13 #define PTRACE_GETFPREGS 14 #define PTRACE_SETFPREGS 15 #define PTRACE_GETFPXREGS 18 #define PTRACE_SETFPXREGS 19 #define PTRACE_OLDSETOPTIONS 21 #define PTRACE_GET_THREAD_AREA 25 #define PTRACE_SET_THREAD_AREA 26 #define PTRACE_SYSEMU 31 #define PTRACE_SYSEMU_SINGLESTEP 32 enum EFLAGS { EF_CF = 0x00000001, EF_PF = 0x00000004, EF_AF = 0x00000010, EF_ZF = 0x00000040, EF_SF = 0x00000080, EF_TF = 0x00000100, EF_IE = 0x00000200, EF_DF = 0x00000400, EF_OF = 0x00000800, EF_IOPL = 0x00003000, EF_IOPL_RING0 = 0x00000000, EF_IOPL_RING1 = 0x00001000, EF_IOPL_RING2 = 0x00002000, EF_NT = 0x00004000, /* nested task */ EF_RF = 0x00010000, /* resume */ EF_VM = 0x00020000, /* virtual mode */ EF_AC = 0x00040000, /* alignment */ EF_VIF = 0x00080000, /* virtual interrupt */ EF_VIP = 0x00100000, /* virtual interrupt pending */ EF_ID = 0x00200000, /* id */ }; #ifdef __KERNEL__ #include #include /* * ptrace.h gets copied around a lot and abused by UML; define these constants * here as backup versions in case segment.h goes missing */ #ifndef SEGMENT_RPL_MASK #define SEGMENT_RPL_MASK 3 #endif struct task_struct; extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); /* * user_mode_vm(regs) determines whether a register set came from user mode. * This is true if V8086 mode was enabled OR if the register set was from * protected mode with RPL-3 CS value. This tricky test checks that with * one comparison. Many places in the kernel can bypass this full check * if they have already ruled out V8086 mode, so user_mode(regs) can be used. */ static inline int user_mode(struct pt_regs *regs) { return (regs->xcs & SEGMENT_RPL_MASK) == 3; } static inline int user_mode_vm(struct pt_regs *regs) { return (((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= 3); } #define instruction_pointer(regs) ((regs)->eip) extern unsigned long profile_pc(struct pt_regs *regs); #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/resource.h000066400000000000000000000001351314037446600246570ustar00rootroot00000000000000#ifndef _I386_RESOURCE_H #define _I386_RESOURCE_H #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/rtc.h000066400000000000000000000002121314037446600236140ustar00rootroot00000000000000#ifndef _I386_RTC_H #define _I386_RTC_H /* * x86 uses the default access methods for the RTC. */ #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/rwlock.h000066400000000000000000000037551314037446600243440ustar00rootroot00000000000000/* include/asm-i386/rwlock.h * * Helpers used by both rw spinlocks and rw semaphores. * * Based in part on code from semaphore.h and * spinlock.h Copyright 1996 Linus Torvalds. * * Copyright 1999 Red Hat, Inc. * * Written by Benjamin LaHaise. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #ifndef _ASM_I386_RWLOCK_H #define _ASM_I386_RWLOCK_H #define RW_LOCK_BIAS 0x01000000 #define RW_LOCK_BIAS_STR "0x01000000" #define __build_read_lock_ptr(rw, helper) \ asm volatile(LOCK "subl $1,(%0)\n\t" \ "jns 1f\n" \ "call " helper "\n\t" \ "1:\n" \ ::"a" (rw) : "memory") #define __build_read_lock_const(rw, helper) \ asm volatile(LOCK "subl $1,%0\n\t" \ "jns 1f\n" \ "pushl %%eax\n\t" \ "leal %0,%%eax\n\t" \ "call " helper "\n\t" \ "popl %%eax\n\t" \ "1:\n" \ :"=m" (*(volatile int *)rw) : : "memory") #define __build_read_lock(rw, helper) do { \ if (__builtin_constant_p(rw)) \ __build_read_lock_const(rw, helper); \ else \ __build_read_lock_ptr(rw, helper); \ } while (0) #define __build_write_lock_ptr(rw, helper) \ asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ "jz 1f\n" \ "call " helper "\n\t" \ "1:\n" \ ::"a" (rw) : "memory") #define __build_write_lock_const(rw, helper) \ asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ "jz 1f\n" \ "pushl %%eax\n\t" \ "leal %0,%%eax\n\t" \ "call " helper "\n\t" \ "popl %%eax\n\t" \ "1:\n" \ :"=m" (*(volatile int *)rw) : : "memory") #define __build_write_lock(rw, helper) do { \ if (__builtin_constant_p(rw)) \ __build_write_lock_const(rw, helper); \ else \ __build_write_lock_ptr(rw, helper); \ } while (0) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/rwsem.h000066400000000000000000000175131314037446600241750ustar00rootroot00000000000000/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+ * * Written by David Howells (dhowells@redhat.com). * * Derived from asm-i386/semaphore.h * * * The MSW of the count is the negated number of active writers and waiting * lockers, and the LSW is the total number of active locks * * The lock count is initialized to 0 (no active and no waiting lockers). * * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an * uncontended lock. This can be determined because XADD returns the old value. * Readers increment by 1 and see a positive value when uncontended, negative * if there are writers (and maybe) readers waiting (in which case it goes to * sleep). * * The value of WAITING_BIAS supports up to 32766 waiting processes. This can * be extended to 65534 by manually checking the whole MSW rather than relying * on the S flag. * * The value of ACTIVE_BIAS supports up to 65535 active processes. * * This should be totally fair - if anything is waiting, a process that wants a * lock will go to the back of the queue. When the currently active lock is * released, if there's a writer at the front of the queue, then that and only * that will be woken up; if there's a bunch of consequtive readers at the * front, then they'll all be woken up, but no other readers will be. */ #ifndef _I386_RWSEM_H #define _I386_RWSEM_H #ifndef _LINUX_RWSEM_H #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" #endif #ifdef __KERNEL__ #include #include #include struct rwsem_waiter; extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem)); extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem)); extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *)); extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem)); /* * the semaphore definition */ struct rw_semaphore { signed long count; #define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_ACTIVE_BIAS 0x00000001 #define RWSEM_ACTIVE_MASK 0x0000ffff #define RWSEM_WAITING_BIAS (-0x00010000) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) spinlock_t wait_lock; struct list_head wait_list; #if RWSEM_DEBUG int debug; #endif }; /* * initialisation */ #if RWSEM_DEBUG #define __RWSEM_DEBUG_INIT , 0 #else #define __RWSEM_DEBUG_INIT /* */ #endif #define __RWSEM_INITIALIZER(name) \ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ __RWSEM_DEBUG_INIT } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) static inline void init_rwsem(struct rw_semaphore *sem) { sem->count = RWSEM_UNLOCKED_VALUE; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); #if RWSEM_DEBUG sem->debug = 0; #endif } /* * lock for reading */ static inline void __down_read(struct rw_semaphore *sem) { __asm__ __volatile__( "# beginning down_read\n\t" LOCK " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ " js 2f\n\t" /* jump if we weren't granted the lock */ "1:\n\t" LOCK_SECTION_START("") "2:\n\t" " pushl %%ecx\n\t" " pushl %%edx\n\t" " call rwsem_down_read_failed\n\t" " popl %%edx\n\t" " popl %%ecx\n\t" " jmp 1b\n" LOCK_SECTION_END "# ending down_read\n\t" : "=m"(sem->count) : "a"(sem), "m"(sem->count) : "memory", "cc"); } /* * trylock for reading -- returns 1 if successful, 0 if contention */ static inline int __down_read_trylock(struct rw_semaphore *sem) { __s32 result, tmp; __asm__ __volatile__( "# beginning __down_read_trylock\n\t" " movl %0,%1\n\t" "1:\n\t" " movl %1,%2\n\t" " addl %3,%2\n\t" " jle 2f\n\t" LOCK " cmpxchgl %2,%0\n\t" " jnz 1b\n\t" "2:\n\t" "# ending __down_read_trylock\n\t" : "+m"(sem->count), "=&a"(result), "=&r"(tmp) : "i"(RWSEM_ACTIVE_READ_BIAS) : "memory", "cc"); return result>=0 ? 1 : 0; } /* * lock for writing */ static inline void __down_write(struct rw_semaphore *sem) { int tmp; tmp = RWSEM_ACTIVE_WRITE_BIAS; __asm__ __volatile__( "# beginning down_write\n\t" LOCK " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ " testl %%edx,%%edx\n\t" /* was the count 0 before? */ " jnz 2f\n\t" /* jump if we weren't granted the lock */ "1:\n\t" LOCK_SECTION_START("") "2:\n\t" " pushl %%ecx\n\t" " call rwsem_down_write_failed\n\t" " popl %%ecx\n\t" " jmp 1b\n" LOCK_SECTION_END "# ending down_write" : "=m"(sem->count), "=d"(tmp) : "a"(sem), "1"(tmp), "m"(sem->count) : "memory", "cc"); } /* * trylock for writing -- returns 1 if successful, 0 if contention */ static inline int __down_write_trylock(struct rw_semaphore *sem) { signed long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); if (ret == RWSEM_UNLOCKED_VALUE) return 1; return 0; } /* * unlock after reading */ static inline void __up_read(struct rw_semaphore *sem) { __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; __asm__ __volatile__( "# beginning __up_read\n\t" LOCK " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ " js 2f\n\t" /* jump if the lock is being waited upon */ "1:\n\t" LOCK_SECTION_START("") "2:\n\t" " decw %%dx\n\t" /* do nothing if still outstanding active readers */ " jnz 1b\n\t" " pushl %%ecx\n\t" " call rwsem_wake\n\t" " popl %%ecx\n\t" " jmp 1b\n" LOCK_SECTION_END "# ending __up_read\n" : "=m"(sem->count), "=d"(tmp) : "a"(sem), "1"(tmp), "m"(sem->count) : "memory", "cc"); } /* * unlock after writing */ static inline void __up_write(struct rw_semaphore *sem) { __asm__ __volatile__( "# beginning __up_write\n\t" " movl %2,%%edx\n\t" LOCK " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ " jnz 2f\n\t" /* jump if the lock is being waited upon */ "1:\n\t" LOCK_SECTION_START("") "2:\n\t" " decw %%dx\n\t" /* did the active count reduce to 0? */ " jnz 1b\n\t" /* jump back if not */ " pushl %%ecx\n\t" " call rwsem_wake\n\t" " popl %%ecx\n\t" " jmp 1b\n" LOCK_SECTION_END "# ending __up_write\n" : "=m"(sem->count) : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count) : "memory", "cc", "edx"); } /* * downgrade write lock to read lock */ static inline void __downgrade_write(struct rw_semaphore *sem) { __asm__ __volatile__( "# beginning __downgrade_write\n\t" LOCK " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ " js 2f\n\t" /* jump if the lock is being waited upon */ "1:\n\t" LOCK_SECTION_START("") "2:\n\t" " pushl %%ecx\n\t" " pushl %%edx\n\t" " call rwsem_downgrade_wake\n\t" " popl %%edx\n\t" " popl %%ecx\n\t" " jmp 1b\n" LOCK_SECTION_END "# ending __downgrade_write\n" : "=m"(sem->count) : "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count) : "memory", "cc"); } /* * implement atomic add functionality */ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) { __asm__ __volatile__( LOCK "addl %1,%0" : "=m"(sem->count) : "ir"(delta), "m"(sem->count)); } /* * implement exchange and add functionality */ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) { int tmp = delta; __asm__ __volatile__( LOCK "xadd %0,(%2)" : "+r"(tmp), "=m"(sem->count) : "r"(sem), "m"(sem->count) : "memory"); return tmp+delta; } static inline int rwsem_is_locked(struct rw_semaphore *sem) { return (sem->count != 0); } #endif /* __KERNEL__ */ #endif /* _I386_RWSEM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/scatterlist.h000066400000000000000000000010671314037446600253760ustar00rootroot00000000000000#ifndef _I386_SCATTERLIST_H #define _I386_SCATTERLIST_H struct scatterlist { struct page *page; unsigned int offset; dma_addr_t dma_address; unsigned int length; }; /* These macros should be used after a pci_map_sg call has been done * to get bus addresses of each of the SG entries and their lengths. * You should only work with the number of sg entries pci_map_sg * returns. */ #define sg_dma_address(sg) ((sg)->dma_address) #define sg_dma_len(sg) ((sg)->length) #define ISA_DMA_THRESHOLD (0x00ffffff) #endif /* !(_I386_SCATTERLIST_H) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/seccomp.h000066400000000000000000000005131314037446600244610ustar00rootroot00000000000000#ifndef _ASM_SECCOMP_H #include #ifdef TIF_32BIT #error "unexpected TIF_32BIT on i386" #endif #include #define __NR_seccomp_read __NR_read #define __NR_seccomp_write __NR_write #define __NR_seccomp_exit __NR_exit #define __NR_seccomp_sigreturn __NR_sigreturn #endif /* _ASM_SECCOMP_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/sections.h000066400000000000000000000001761314037446600246640ustar00rootroot00000000000000#ifndef _I386_SECTIONS_H #define _I386_SECTIONS_H /* nothing to see, move along */ #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/segment.h000066400000000000000000000063771314037446600245100ustar00rootroot00000000000000#ifndef _ASM_SEGMENT_H #define _ASM_SEGMENT_H /* * The layout of the per-CPU GDT under Linux: * * 0 - null * 1 - reserved * 2 - reserved * 3 - reserved * * 4 - unused <==== new cacheline * 5 - unused * * ------- start of TLS (Thread-Local Storage) segments: * * 6 - TLS segment #1 [ glibc's TLS segment ] * 7 - TLS segment #2 [ Wine's %fs Win32 segment ] * 8 - TLS segment #3 * 9 - reserved * 10 - reserved * 11 - reserved * * ------- start of kernel segments: * * 12 - kernel code segment <==== new cacheline * 13 - kernel data segment * 14 - default user CS * 15 - default user DS * 16 - TSS * 17 - LDT * 18 - PNPBIOS support (16->32 gate) * 19 - PNPBIOS support * 20 - PNPBIOS support * 21 - PNPBIOS support * 22 - PNPBIOS support * 23 - APM BIOS support * 24 - APM BIOS support * 25 - APM BIOS support * * 26 - ESPFIX small SS * 27 - unused * 28 - unused * 29 - unused * 30 - unused * 31 - TSS for double fault handler */ #define GDT_ENTRY_TLS_ENTRIES 3 #define GDT_ENTRY_TLS_MIN 6 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) #define GDT_ENTRY_DEFAULT_USER_CS 14 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3) #define GDT_ENTRY_DEFAULT_USER_DS 15 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3) #define GDT_ENTRY_KERNEL_BASE 12 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) #define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) #define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6) #define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11) #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) #define GDT_ENTRY_DOUBLEFAULT_TSS 31 /* * The GDT has 32 entries */ #define GDT_ENTRIES 32 #define GDT_SIZE (GDT_ENTRIES * 8) /* Simple and small GDT entries for booting only */ #define GDT_ENTRY_BOOT_CS 2 #define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) #define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) #define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) /* The PnP BIOS entries in the GDT */ #define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) #define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) #define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) #define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) #define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) /* The PnP BIOS selectors */ #define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ #define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ #define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ #define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ #define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ /* * The interrupt descriptor table has room for 256 idt's, * the global descriptor table is dependent on the number * of tasks we can have.. */ #define IDT_ENTRIES 256 #define SEGMENT_RPL_MASK 0x03 #define SEGMENT_TI_MASK 0x04 #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/semaphore.h000066400000000000000000000120211314037446600250100ustar00rootroot00000000000000#ifndef _I386_SEMAPHORE_H #define _I386_SEMAPHORE_H #include #ifdef __KERNEL__ /* * SMP- and interrupt-safe semaphores.. * * (C) Copyright 1996 Linus Torvalds * * Modified 1996-12-23 by Dave Grothe to fix bugs in * the original code and to make semaphore waits * interruptible so that processes waiting on * semaphores can be killed. * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper * functions in asm/sempahore-helper.h while fixing a * potential and subtle race discovered by Ulrich Schmid * in down_interruptible(). Since I started to play here I * also implemented the `trylock' semaphore operation. * 1999-07-02 Artur Skawina * Optimized "0(ecx)" -> "(ecx)" (the assembler does not * do this). Changed calling sequences from push/jmp to * traditional call/ret. * Modified 2001-01-01 Andreas Franck * Some hacks to ensure compatibility with recent * GCC snapshots, to avoid stack corruption when compiling * with -fomit-frame-pointer. It's not sure if this will * be fixed in GCC, as our previous implementation was a * bit dubious. * * If you would like to see an analysis of this implementation, please * ftp to gcom.com and download the file * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz. * */ #include #include #include #include struct semaphore { atomic_t count; int sleepers; wait_queue_head_t wait; }; #define __SEMAPHORE_INITIALIZER(name, n) \ { \ .count = ATOMIC_INIT(n), \ .sleepers = 0, \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ } #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) static inline void sema_init (struct semaphore *sem, int val) { /* * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); * * i'd rather use the more flexible initialization above, but sadly * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. */ atomic_set(&sem->count, val); sem->sleepers = 0; init_waitqueue_head(&sem->wait); } static inline void init_MUTEX (struct semaphore *sem) { sema_init(sem, 1); } static inline void init_MUTEX_LOCKED (struct semaphore *sem) { sema_init(sem, 0); } fastcall void __down_failed(void /* special register calling convention */); fastcall int __down_failed_interruptible(void /* params in registers */); fastcall int __down_failed_trylock(void /* params in registers */); fastcall void __up_wakeup(void /* special register calling convention */); /* * This is ugly, but we want the default case to fall through. * "__down_failed" is a special asm handler that calls the C * routine that actually waits. See arch/i386/kernel/semaphore.c */ static inline void down(struct semaphore * sem) { might_sleep(); __asm__ __volatile__( "# atomic down operation\n\t" LOCK "decl %0\n\t" /* --sem->count */ "js 2f\n" "1:\n" LOCK_SECTION_START("") "2:\tlea %0,%%eax\n\t" "call __down_failed\n\t" "jmp 1b\n" LOCK_SECTION_END :"=m" (sem->count) : :"memory","ax"); } /* * Interruptible try to acquire a semaphore. If we obtained * it, return zero. If we were interrupted, returns -EINTR */ static inline int down_interruptible(struct semaphore * sem) { int result; might_sleep(); __asm__ __volatile__( "# atomic interruptible down operation\n\t" LOCK "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" LOCK_SECTION_START("") "2:\tlea %1,%%eax\n\t" "call __down_failed_interruptible\n\t" "jmp 1b\n" LOCK_SECTION_END :"=a" (result), "=m" (sem->count) : :"memory"); return result; } /* * Non-blockingly attempt to down() a semaphore. * Returns zero if we acquired it */ static inline int down_trylock(struct semaphore * sem) { int result; __asm__ __volatile__( "# atomic interruptible down operation\n\t" LOCK "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" LOCK_SECTION_START("") "2:\tlea %1,%%eax\n\t" "call __down_failed_trylock\n\t" "jmp 1b\n" LOCK_SECTION_END :"=a" (result), "=m" (sem->count) : :"memory"); return result; } /* * Note! This is subtle. We jump to wake people up only if * the semaphore was negative (== somebody was waiting on it). * The default case (no contention) will result in NO * jumps for both down() and up(). */ static inline void up(struct semaphore * sem) { __asm__ __volatile__( "# atomic up operation\n\t" LOCK "incl %0\n\t" /* ++sem->count */ "jle 2f\n" "1:\n" LOCK_SECTION_START("") "2:\tlea %0,%%eax\n\t" "call __up_wakeup\n\t" "jmp 1b\n" LOCK_SECTION_END ".subsection 0\n" :"=m" (sem->count) : :"memory","ax"); } #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/sembuf.h000066400000000000000000000012711314037446600243130ustar00rootroot00000000000000#ifndef _I386_SEMBUF_H #define _I386_SEMBUF_H /* * The semid64_ds structure for i386 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * * Pad space is left for: * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ struct semid64_ds { struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ __kernel_time_t sem_otime; /* last semop time */ unsigned long __unused1; __kernel_time_t sem_ctime; /* last change time */ unsigned long __unused2; unsigned long sem_nsems; /* no. of semaphores in array */ unsigned long __unused3; unsigned long __unused4; }; #endif /* _I386_SEMBUF_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/serial.h000066400000000000000000000020071314037446600243070ustar00rootroot00000000000000/* * include/asm-i386/serial.h */ #include /* * This assumes you have a 1.8432 MHz clock for your UART. * * It'd be nice if someone built a serial card with a 24.576 MHz * clock, since the 16550A is capable of handling a top speed of 1.5 * megabits/second; but this requires the faster clock. */ #define BASE_BAUD ( 1843200 / 16 ) /* Standard COM flags (except for COM4, because of the 8514 problem) */ #ifdef CONFIG_SERIAL_DETECT_IRQ #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ) #define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ) #else #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) #define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF #endif #define SERIAL_PORT_DFNS \ /* UART CLK PORT IRQ FLAGS */ \ { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/setup.h000066400000000000000000000047541314037446600242030ustar00rootroot00000000000000/* * Just a place holder. We don't want to have to test x86 before * we include stuff */ #ifndef _i386_SETUP_H #define _i386_SETUP_H #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) #define PFN_PHYS(x) ((x) << PAGE_SHIFT) /* * Reserved space for vmalloc and iomap - defined in asm/page.h */ #define MAXMEM_PFN PFN_DOWN(MAXMEM) #define MAX_NONPAE_PFN (1 << 20) #define PARAM_SIZE 4096 #define COMMAND_LINE_SIZE 2048 #define OLD_CL_MAGIC_ADDR 0x90020 #define OLD_CL_MAGIC 0xA33F #define OLD_CL_BASE_ADDR 0x90000 #define OLD_CL_OFFSET 0x90022 #define NEW_CL_POINTER 0x228 /* Relative to real mode data */ #ifndef __ASSEMBLY__ /* * This is set up by the setup-routine at boot-time */ extern unsigned char boot_params[PARAM_SIZE]; #define PARAM (boot_params) #define SCREEN_INFO (*(struct screen_info *) (PARAM+0)) #define EXT_MEM_K (*(unsigned short *) (PARAM+2)) #define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0)) #define E820_MAP_NR (*(char*) (PARAM+E820NR)) #define E820_MAP ((struct e820entry *) (PARAM+E820MAP)) #define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40)) #define IST_INFO (*(struct ist_info *) (PARAM+0x60)) #define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80)) #define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0)) #define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4))) #define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8))) #define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc))) #define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0))) #define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4))) #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2)) #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8)) #define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA)) #define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC)) #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF)) #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210)) #define KERNEL_START (*(unsigned long *) (PARAM+0x214)) #define INITRD_START (*(unsigned long *) (PARAM+0x218)) #define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c)) #define EDID_INFO (*(struct edid_info *) (PARAM+0x140)) #define EDD_NR (*(unsigned char *) (PARAM+EDDNR)) #define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF)) #define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF)) #define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF)) #endif /* __ASSEMBLY__ */ #endif /* _i386_SETUP_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/shmbuf.h000066400000000000000000000022141314037446600243140ustar00rootroot00000000000000#ifndef _I386_SHMBUF_H #define _I386_SHMBUF_H /* * The shmid64_ds structure for i386 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * * Pad space is left for: * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ size_t shm_segsz; /* size of segment (bytes) */ __kernel_time_t shm_atime; /* last attach time */ unsigned long __unused1; __kernel_time_t shm_dtime; /* last detach time */ unsigned long __unused2; __kernel_time_t shm_ctime; /* last change time */ unsigned long __unused3; __kernel_pid_t shm_cpid; /* pid of creator */ __kernel_pid_t shm_lpid; /* pid of last operator */ unsigned long shm_nattch; /* no. of current attaches */ unsigned long __unused4; unsigned long __unused5; }; struct shminfo64 { unsigned long shmmax; unsigned long shmmin; unsigned long shmmni; unsigned long shmseg; unsigned long shmall; unsigned long __unused1; unsigned long __unused2; unsigned long __unused3; unsigned long __unused4; }; #endif /* _I386_SHMBUF_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/shmparam.h000066400000000000000000000002331314037446600246370ustar00rootroot00000000000000#ifndef _ASMI386_SHMPARAM_H #define _ASMI386_SHMPARAM_H #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ #endif /* _ASMI386_SHMPARAM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/sigcontext.h000066400000000000000000000037051314037446600252250ustar00rootroot00000000000000#ifndef _ASMi386_SIGCONTEXT_H #define _ASMi386_SIGCONTEXT_H #include /* * As documented in the iBCS2 standard.. * * The first part of "struct _fpstate" is just the normal i387 * hardware setup, the extra "status" word is used to save the * coprocessor status word before entering the handler. * * Pentium III FXSR, SSE support * Gareth Hughes , May 2000 * * The FPU state data structure has had to grow to accommodate the * extended FPU state required by the Streaming SIMD Extensions. * There is no documented standard to accomplish this at the moment. */ struct _fpreg { unsigned short significand[4]; unsigned short exponent; }; struct _fpxreg { unsigned short significand[4]; unsigned short exponent; unsigned short padding[3]; }; struct _xmmreg { unsigned long element[4]; }; struct _fpstate { /* Regular FPU environment */ unsigned long cw; unsigned long sw; unsigned long tag; unsigned long ipoff; unsigned long cssel; unsigned long dataoff; unsigned long datasel; struct _fpreg _st[8]; unsigned short status; unsigned short magic; /* 0xffff = regular FPU data only */ /* FXSR FPU environment */ unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */ unsigned long mxcsr; unsigned long reserved; struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ struct _xmmreg _xmm[8]; unsigned long padding[56]; }; #define X86_FXSR_MAGIC 0x0000 struct sigcontext { unsigned short gs, __gsh; unsigned short fs, __fsh; unsigned short es, __esh; unsigned short ds, __dsh; unsigned long edi; unsigned long esi; unsigned long ebp; unsigned long esp; unsigned long ebx; unsigned long edx; unsigned long ecx; unsigned long eax; unsigned long trapno; unsigned long err; unsigned long eip; unsigned short cs, __csh; unsigned long eflags; unsigned long esp_at_signal; unsigned short ss, __ssh; struct _fpstate __user * fpstate; unsigned long oldmask; unsigned long cr2; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/siginfo.h000066400000000000000000000001321314037446600244630ustar00rootroot00000000000000#ifndef _I386_SIGINFO_H #define _I386_SIGINFO_H #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/signal.h000066400000000000000000000122751314037446600243150ustar00rootroot00000000000000#ifndef _ASMi386_SIGNAL_H #define _ASMi386_SIGNAL_H #include #include #include #include /* Avoid too many header ordering problems. */ struct siginfo; #ifdef __KERNEL__ /* Most things should be clean enough to redefine this at will, if care is taken to make libc match. */ #define _NSIG 64 #define _NSIG_BPW 32 #define _NSIG_WORDS (_NSIG / _NSIG_BPW) typedef unsigned long old_sigset_t; /* at least 32 bits */ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; #else /* Here we must cater to libcs that poke about in kernel headers. */ #define NSIG 32 typedef unsigned long sigset_t; #endif /* __KERNEL__ */ #define SIGHUP 1 #define SIGINT 2 #define SIGQUIT 3 #define SIGILL 4 #define SIGTRAP 5 #define SIGABRT 6 #define SIGIOT 6 #define SIGBUS 7 #define SIGFPE 8 #define SIGKILL 9 #define SIGUSR1 10 #define SIGSEGV 11 #define SIGUSR2 12 #define SIGPIPE 13 #define SIGALRM 14 #define SIGTERM 15 #define SIGSTKFLT 16 #define SIGCHLD 17 #define SIGCONT 18 #define SIGSTOP 19 #define SIGTSTP 20 #define SIGTTIN 21 #define SIGTTOU 22 #define SIGURG 23 #define SIGXCPU 24 #define SIGXFSZ 25 #define SIGVTALRM 26 #define SIGPROF 27 #define SIGWINCH 28 #define SIGIO 29 #define SIGPOLL SIGIO /* #define SIGLOST 29 */ #define SIGPWR 30 #define SIGSYS 31 #define SIGUNUSED 31 /* These should not be considered constants from userland. */ #define SIGRTMIN 32 #define SIGRTMAX _NSIG /* * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. * SA_NODEFER prevents the current signal from being masked in the handler. * * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single * Unix names RESETHAND and NODEFER respectively. */ #define SA_NOCLDSTOP 0x00000001u #define SA_NOCLDWAIT 0x00000002u #define SA_SIGINFO 0x00000004u #define SA_ONSTACK 0x08000000u #define SA_RESTART 0x10000000u #define SA_NODEFER 0x40000000u #define SA_RESETHAND 0x80000000u #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND #define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 /* * sigaltstack controls */ #define SS_ONSTACK 1 #define SS_DISABLE 2 #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 #include #ifdef __KERNEL__ struct old_sigaction { __sighandler_t sa_handler; old_sigset_t sa_mask; unsigned long sa_flags; __sigrestore_t sa_restorer; }; struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; struct k_sigaction { struct sigaction sa; }; #else /* Here we must cater to libcs that poke about in kernel headers. */ struct sigaction { union { __sighandler_t _sa_handler; void (*_sa_sigaction)(int, struct siginfo *, void *); } _u; sigset_t sa_mask; unsigned long sa_flags; void (*sa_restorer)(void); }; #define sa_handler _u._sa_handler #define sa_sigaction _u._sa_sigaction #endif /* __KERNEL__ */ typedef struct sigaltstack { void __user *ss_sp; int ss_flags; size_t ss_size; } stack_t; #ifdef __KERNEL__ #include #define __HAVE_ARCH_SIG_BITOPS #define sigaddset(set,sig) \ (__builtin_constant_p(sig) ? \ __const_sigaddset((set),(sig)) : \ __gen_sigaddset((set),(sig))) static __inline__ void __gen_sigaddset(sigset_t *set, int _sig) { __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); } static __inline__ void __const_sigaddset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); } #define sigdelset(set,sig) \ (__builtin_constant_p(sig) ? \ __const_sigdelset((set),(sig)) : \ __gen_sigdelset((set),(sig))) static __inline__ void __gen_sigdelset(sigset_t *set, int _sig) { __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); } static __inline__ void __const_sigdelset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); } static __inline__ int __const_sigismember(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); } static __inline__ int __gen_sigismember(sigset_t *set, int _sig) { int ret; __asm__("btl %2,%1\n\tsbbl %0,%0" : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); return ret; } #define sigismember(set,sig) \ (__builtin_constant_p(sig) ? \ __const_sigismember((set),(sig)) : \ __gen_sigismember((set),(sig))) static __inline__ int sigfindinword(unsigned long word) { __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); return word; } struct pt_regs; #define ptrace_signal_deliver(regs, cookie) \ do { \ if (current->ptrace & PT_DTRACE) { \ current->ptrace &= ~PT_DTRACE; \ (regs)->eflags &= ~TF_MASK; \ } \ } while (0) #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/smp.h000066400000000000000000000045751314037446600236430ustar00rootroot00000000000000#ifndef __ASM_SMP_H #define __ASM_SMP_H /* * We need the APIC definitions automatically as part of 'smp.h' */ #ifndef __ASSEMBLY__ #include #include #include #include #endif #ifdef CONFIG_X86_LOCAL_APIC #ifndef __ASSEMBLY__ #include #include #include #ifdef CONFIG_X86_IO_APIC #include #endif #include #endif #endif #define BAD_APICID 0xFFu #ifdef CONFIG_SMP #ifndef __ASSEMBLY__ /* * Private routines/data */ extern void smp_alloc_memory(void); extern int pic_mode; extern int smp_num_siblings; extern cpumask_t cpu_sibling_map[]; extern cpumask_t cpu_core_map[]; extern void (*mtrr_hook) (void); extern void zap_low_mappings (void); extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); #define MAX_APICID 256 extern u8 x86_cpu_to_apicid[]; #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] #ifdef CONFIG_HOTPLUG_CPU extern void cpu_exit_clear(void); extern void cpu_uninit(void); #endif /* * This function is needed by all SMP systems. It must _always_ be valid * from the initial startup. We map APIC_BASE very early in page_setup(), * so this is correct in the x86 case. */ #define raw_smp_processor_id() (current_thread_info()->cpu) extern cpumask_t cpu_callout_map; extern cpumask_t cpu_callin_map; extern cpumask_t cpu_possible_map; /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) { return cpus_weight(cpu_callout_map); } #ifdef CONFIG_X86_LOCAL_APIC #ifdef APIC_DEFINITION extern int hard_smp_processor_id(void); #else #include static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); } #endif static __inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); } #endif extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); #endif /* !__ASSEMBLY__ */ #else /* CONFIG_SMP */ #define cpu_physical_id(cpu) boot_cpu_physical_apicid #define NO_PROC_ID 0xFF /* No processor magic marker */ #endif #ifndef __ASSEMBLY__ extern u8 bios_cpu_apicid[]; #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/smp_alt.h000066400000000000000000000011451314037446600244710ustar00rootroot00000000000000#ifndef __ASM_SMP_ALT_H__ #define __ASM_SMP_ALT_H__ #include #ifdef CONFIG_SMP #if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE) #define LOCK \ "6677: nop\n" \ ".section __smp_alternatives,\"a\"\n" \ ".long 6677b\n" \ ".long 6678f\n" \ ".previous\n" \ ".section __smp_replacements,\"a\"\n" \ "6678: .byte 1\n" \ ".byte 1\n" \ ".byte 0\n" \ ".byte 1\n" \ ".byte -1\n" \ "lock\n" \ "nop\n" \ ".previous\n" void prepare_for_smp(void); void unprepare_for_smp(void); #else #define LOCK "lock ; " #endif #else #define LOCK "" #endif #endif /* __ASM_SMP_ALT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/socket.h000066400000000000000000000021541314037446600243230ustar00rootroot00000000000000#ifndef _ASM_SOCKET_H #define _ASM_SOCKET_H #include /* For setsockopt(2) */ #define SOL_SOCKET 1 #define SO_DEBUG 1 #define SO_REUSEADDR 2 #define SO_TYPE 3 #define SO_ERROR 4 #define SO_DONTROUTE 5 #define SO_BROADCAST 6 #define SO_SNDBUF 7 #define SO_RCVBUF 8 #define SO_SNDBUFFORCE 32 #define SO_RCVBUFFORCE 33 #define SO_KEEPALIVE 9 #define SO_OOBINLINE 10 #define SO_NO_CHECK 11 #define SO_PRIORITY 12 #define SO_LINGER 13 #define SO_BSDCOMPAT 14 /* To add :#define SO_REUSEPORT 15 */ #define SO_PASSCRED 16 #define SO_PEERCRED 17 #define SO_RCVLOWAT 18 #define SO_SNDLOWAT 19 #define SO_RCVTIMEO 20 #define SO_SNDTIMEO 21 /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 22 #define SO_SECURITY_ENCRYPTION_TRANSPORT 23 #define SO_SECURITY_ENCRYPTION_NETWORK 24 #define SO_BINDTODEVICE 25 /* Socket filtering */ #define SO_ATTACH_FILTER 26 #define SO_DETACH_FILTER 27 #define SO_PEERNAME 28 #define SO_TIMESTAMP 29 #define SCM_TIMESTAMP SO_TIMESTAMP #define SO_ACCEPTCONN 30 #define SO_PEERSEC 31 #endif /* _ASM_SOCKET_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/sockios.h000066400000000000000000000004251314037446600245040ustar00rootroot00000000000000#ifndef __ARCH_I386_SOCKIOS__ #define __ARCH_I386_SOCKIOS__ /* Socket-level I/O control calls. */ #define FIOSETOWN 0x8901 #define SIOCSPGRP 0x8902 #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 #define SIOCGSTAMP 0x8906 /* Get stamp */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/sparsemem.h000066400000000000000000000014221314037446600250240ustar00rootroot00000000000000#ifndef _I386_SPARSEMEM_H #define _I386_SPARSEMEM_H #ifdef CONFIG_SPARSEMEM /* * generic non-linear memory support: * * 1) we will not split memory into more chunks than will fit into the * flags field of the struct page */ /* * SECTION_SIZE_BITS 2^N: how big each section will be * MAX_PHYSADDR_BITS 2^N: how much physical address space we have * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space */ #ifdef CONFIG_X86_PAE #define SECTION_SIZE_BITS 30 #define MAX_PHYSADDR_BITS 36 #define MAX_PHYSMEM_BITS 36 #else #define SECTION_SIZE_BITS 26 #define MAX_PHYSADDR_BITS 32 #define MAX_PHYSMEM_BITS 32 #endif /* XXX: FIXME -- wli */ #define kern_addr_valid(kaddr) (0) #endif /* CONFIG_SPARSEMEM */ #endif /* _I386_SPARSEMEM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/spinlock.h000066400000000000000000000114771314037446600246650ustar00rootroot00000000000000#ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H #include #include #include #include #include #include #include /* * Your basic SMP spinlocks, allowing only a single CPU anywhere * * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. * * We make no fairness assumptions. They have a cost. * * (the type definitions are in asm/spinlock_types.h) */ #define __raw_spin_is_locked(x) \ (*(volatile signed char *)(&(x)->slock) <= 0) #define __raw_spin_lock_string \ "\n1:\t" \ LOCK \ "decb %0\n\t" \ "jns 3f\n" \ "2:\t" \ "rep;nop\n\t" \ "cmpb $0,%0\n\t" \ "jle 2b\n\t" \ "jmp 1b\n" \ "3:\n\t" #define __raw_spin_lock_string_flags \ "\n1:\t" \ LOCK \ "decb %0\n\t" \ "jns 4f\n\t" \ "2:\t" \ "testl $0x200, %1\n\t" \ "jz 3f\n\t" \ STI_STRING "\n\t" \ "3:\t" \ "rep;nop\n\t" \ "cmpb $0, %0\n\t" \ "jle 3b\n\t" \ CLI_STRING "\n\t" \ "jmp 1b\n" \ "4:\n\t" static inline void __raw_spin_lock(raw_spinlock_t *lock) { __asm__ __volatile__( __raw_spin_lock_string :"=m" (lock->slock) : : "memory"); } static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { __asm__ __volatile__( __raw_spin_lock_string_flags :"=m" (lock->slock) : "r" (flags) : "memory"); } static inline int __raw_spin_trylock(raw_spinlock_t *lock) { char oldval; #ifdef CONFIG_SMP_ALTERNATIVES __asm__ __volatile__( "1:movb %1,%b0\n" "movb $0,%1\n" "2:" ".section __smp_alternatives,\"a\"\n" ".long 1b\n" ".long 3f\n" ".previous\n" ".section __smp_replacements,\"a\"\n" "3: .byte 2b - 1b\n" ".byte 5f-4f\n" ".byte 0\n" ".byte 6f-5f\n" ".byte -1\n" "4: xchgb %b0,%1\n" "5: movb %1,%b0\n" "movb $0,%1\n" "6:\n" ".previous\n" :"=q" (oldval), "=m" (lock->slock) :"0" (0) : "memory"); #else __asm__ __volatile__( "xchgb %b0,%1\n" :"=q" (oldval), "=m" (lock->slock) :"0" (0) : "memory"); #endif return oldval > 0; } /* * __raw_spin_unlock based on writing $1 to the low byte. * This method works. Despite all the confusion. * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) * (PPro errata 66, 92) */ #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) #define __raw_spin_unlock_string \ "movb $1,%0" \ :"=m" (lock->slock) : : "memory" static inline void __raw_spin_unlock(raw_spinlock_t *lock) { __asm__ __volatile__( __raw_spin_unlock_string ); } #else #define __raw_spin_unlock_string \ "xchgb %b0, %1" \ :"=q" (oldval), "=m" (lock->slock) \ :"0" (oldval) : "memory" static inline void __raw_spin_unlock(raw_spinlock_t *lock) { char oldval = 1; __asm__ __volatile__( __raw_spin_unlock_string ); } #endif #define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) /* * Read-write spinlocks, allowing multiple readers * but only one writer. * * NOTE! it is quite common to have readers in interrupts * but no interrupt writers. For those circumstances we * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. * * On x86, we implement read-write locks as a 32-bit counter * with the high bit (sign) being the "contended" bit. * * The inline assembly is non-obvious. Think about it. * * Changed to use the same technique as rw semaphores. See * semaphore.h for details. -ben * * the helpers are in arch/i386/kernel/semaphore.c */ /** * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ #define __raw_read_can_lock(x) ((int)(x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) static inline void __raw_read_lock(raw_rwlock_t *rw) { __build_read_lock(rw, "__read_lock_failed"); } static inline void __raw_write_lock(raw_rwlock_t *rw) { __build_write_lock(rw, "__write_lock_failed"); } static inline int __raw_read_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; atomic_dec(count); if (atomic_read(count) >= 0) return 1; atomic_inc(count); return 0; } static inline int __raw_write_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) return 1; atomic_add(RW_LOCK_BIAS, count); return 0; } static inline void __raw_read_unlock(raw_rwlock_t *rw) { asm volatile(LOCK "incl %0" :"=m" (rw->lock) : : "memory"); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ", %0" : "=m" (rw->lock) : : "memory"); } #define _raw_spin_relax(lock) cpu_relax() #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/spinlock_types.h000066400000000000000000000005741314037446600261050ustar00rootroot00000000000000#ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_H # error "please don't include this file directly" #endif typedef struct { volatile unsigned int slock; } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile unsigned int lock; } raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/srat.h000066400000000000000000000023041314037446600240010ustar00rootroot00000000000000/* * Some of the code in this file has been gleaned from the 64 bit * discontigmem support code base. * * Copyright (C) 2002, IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to Pat Gaughen */ #ifndef _ASM_SRAT_H_ #define _ASM_SRAT_H_ #ifndef CONFIG_ACPI_SRAT #error CONFIG_ACPI_SRAT not defined, and srat.h header has been included #endif extern int get_memcfg_from_srat(void); extern unsigned long *get_zholes_size(int); #endif /* _ASM_SRAT_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/stat.h000066400000000000000000000031651314037446600240110ustar00rootroot00000000000000#ifndef _I386_STAT_H #define _I386_STAT_H struct __old_kernel_stat { unsigned short st_dev; unsigned short st_ino; unsigned short st_mode; unsigned short st_nlink; unsigned short st_uid; unsigned short st_gid; unsigned short st_rdev; unsigned long st_size; unsigned long st_atime; unsigned long st_mtime; unsigned long st_ctime; }; struct stat { unsigned long st_dev; unsigned long st_ino; unsigned short st_mode; unsigned short st_nlink; unsigned short st_uid; unsigned short st_gid; unsigned long st_rdev; unsigned long st_size; unsigned long st_blksize; unsigned long st_blocks; unsigned long st_atime; unsigned long st_atime_nsec; unsigned long st_mtime; unsigned long st_mtime_nsec; unsigned long st_ctime; unsigned long st_ctime_nsec; unsigned long __unused4; unsigned long __unused5; }; /* This matches struct stat64 in glibc2.1, hence the absolutely * insane amounts of padding around dev_t's. */ struct stat64 { unsigned long long st_dev; unsigned char __pad0[4]; #define STAT64_HAS_BROKEN_ST_INO 1 unsigned long __st_ino; unsigned int st_mode; unsigned int st_nlink; unsigned long st_uid; unsigned long st_gid; unsigned long long st_rdev; unsigned char __pad3[4]; long long st_size; unsigned long st_blksize; unsigned long st_blocks; /* Number 512-byte blocks allocated. */ unsigned long __pad4; /* future possible st_blocks high bits */ unsigned long st_atime; unsigned long st_atime_nsec; unsigned long st_mtime; unsigned int st_mtime_nsec; unsigned long st_ctime; unsigned long st_ctime_nsec; unsigned long long st_ino; }; #define STAT_HAVE_NSEC 1 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/statfs.h000066400000000000000000000001271314037446600243350ustar00rootroot00000000000000#ifndef _I386_STATFS_H #define _I386_STATFS_H #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/string.h000066400000000000000000000256511314037446600243500ustar00rootroot00000000000000#ifndef _I386_STRING_H_ #define _I386_STRING_H_ #ifdef __KERNEL__ #include /* * On a 486 or Pentium, we are better off not using the * byte string operations. But on a 386 or a PPro the * byte string ops are faster than doing it by hand * (MUCH faster on a Pentium). */ /* * This string-include defines all string functions as inline * functions. Use gcc. It also assumes ds=es=data space, this should be * normal. Most of the string-functions are rather heavily hand-optimized, * see especially strsep,strstr,str[c]spn. They should work, but are not * very easy to understand. Everything is done entirely within the register * set, making the functions fast and clean. String instructions have been * used through-out, making for "slightly" unclear code :-) * * NO Copyright (C) 1991, 1992 Linus Torvalds, * consider these trivial functions to be PD. */ /* AK: in fact I bet it would be better to move this stuff all out of line. */ #define __HAVE_ARCH_STRCPY static inline char * strcpy(char * dest,const char *src) { int d0, d1, d2; __asm__ __volatile__( "1:\tlodsb\n\t" "stosb\n\t" "testb %%al,%%al\n\t" "jne 1b" : "=&S" (d0), "=&D" (d1), "=&a" (d2) :"0" (src),"1" (dest) : "memory"); return dest; } #define __HAVE_ARCH_STRNCPY static inline char * strncpy(char * dest,const char *src,size_t count) { int d0, d1, d2, d3; __asm__ __volatile__( "1:\tdecl %2\n\t" "js 2f\n\t" "lodsb\n\t" "stosb\n\t" "testb %%al,%%al\n\t" "jne 1b\n\t" "rep\n\t" "stosb\n" "2:" : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) :"0" (src),"1" (dest),"2" (count) : "memory"); return dest; } #define __HAVE_ARCH_STRCAT static inline char * strcat(char * dest,const char * src) { int d0, d1, d2, d3; __asm__ __volatile__( "repne\n\t" "scasb\n\t" "decl %1\n" "1:\tlodsb\n\t" "stosb\n\t" "testb %%al,%%al\n\t" "jne 1b" : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu):"memory"); return dest; } #define __HAVE_ARCH_STRNCAT static inline char * strncat(char * dest,const char * src,size_t count) { int d0, d1, d2, d3; __asm__ __volatile__( "repne\n\t" "scasb\n\t" "decl %1\n\t" "movl %8,%3\n" "1:\tdecl %3\n\t" "js 2f\n\t" "lodsb\n\t" "stosb\n\t" "testb %%al,%%al\n\t" "jne 1b\n" "2:\txorl %2,%2\n\t" "stosb" : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) : "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count) : "memory"); return dest; } #define __HAVE_ARCH_STRCMP static inline int strcmp(const char * cs,const char * ct) { int d0, d1; register int __res; __asm__ __volatile__( "1:\tlodsb\n\t" "scasb\n\t" "jne 2f\n\t" "testb %%al,%%al\n\t" "jne 1b\n\t" "xorl %%eax,%%eax\n\t" "jmp 3f\n" "2:\tsbbl %%eax,%%eax\n\t" "orb $1,%%al\n" "3:" :"=a" (__res), "=&S" (d0), "=&D" (d1) :"1" (cs),"2" (ct) :"memory"); return __res; } #define __HAVE_ARCH_STRNCMP static inline int strncmp(const char * cs,const char * ct,size_t count) { register int __res; int d0, d1, d2; __asm__ __volatile__( "1:\tdecl %3\n\t" "js 2f\n\t" "lodsb\n\t" "scasb\n\t" "jne 3f\n\t" "testb %%al,%%al\n\t" "jne 1b\n" "2:\txorl %%eax,%%eax\n\t" "jmp 4f\n" "3:\tsbbl %%eax,%%eax\n\t" "orb $1,%%al\n" "4:" :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2) :"1" (cs),"2" (ct),"3" (count) :"memory"); return __res; } #define __HAVE_ARCH_STRCHR static inline char * strchr(const char * s, int c) { int d0; register char * __res; __asm__ __volatile__( "movb %%al,%%ah\n" "1:\tlodsb\n\t" "cmpb %%ah,%%al\n\t" "je 2f\n\t" "testb %%al,%%al\n\t" "jne 1b\n\t" "movl $1,%1\n" "2:\tmovl %1,%0\n\t" "decl %0" :"=a" (__res), "=&S" (d0) :"1" (s),"0" (c) :"memory"); return __res; } #define __HAVE_ARCH_STRRCHR static inline char * strrchr(const char * s, int c) { int d0, d1; register char * __res; __asm__ __volatile__( "movb %%al,%%ah\n" "1:\tlodsb\n\t" "cmpb %%ah,%%al\n\t" "jne 2f\n\t" "leal -1(%%esi),%0\n" "2:\ttestb %%al,%%al\n\t" "jne 1b" :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c) :"memory"); return __res; } #define __HAVE_ARCH_STRLEN static inline size_t strlen(const char * s) { int d0; register int __res; __asm__ __volatile__( "repne\n\t" "scasb\n\t" "notl %0\n\t" "decl %0" :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu) :"memory"); return __res; } static __always_inline void * __memcpy(void * to, const void * from, size_t n) { int d0, d1, d2; __asm__ __volatile__( "rep ; movsl\n\t" "movl %4,%%ecx\n\t" "andl $3,%%ecx\n\t" #if 1 /* want to pay 2 byte penalty for a chance to skip microcoded rep? */ "jz 1f\n\t" #endif "rep ; movsb\n\t" "1:" : "=&c" (d0), "=&D" (d1), "=&S" (d2) : "0" (n/4), "g" (n), "1" ((long) to), "2" ((long) from) : "memory"); return (to); } /* * This looks ugly, but the compiler can optimize it totally, * as the count is constant. */ static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n) { long esi, edi; if (!n) return to; #if 1 /* want to do small copies with non-string ops? */ switch (n) { case 1: *(char*)to = *(char*)from; return to; case 2: *(short*)to = *(short*)from; return to; case 4: *(int*)to = *(int*)from; return to; #if 1 /* including those doable with two moves? */ case 3: *(short*)to = *(short*)from; *((char*)to+2) = *((char*)from+2); return to; case 5: *(int*)to = *(int*)from; *((char*)to+4) = *((char*)from+4); return to; case 6: *(int*)to = *(int*)from; *((short*)to+2) = *((short*)from+2); return to; case 8: *(int*)to = *(int*)from; *((int*)to+1) = *((int*)from+1); return to; #endif } #endif esi = (long) from; edi = (long) to; if (n >= 5*4) { /* large block: use rep prefix */ int ecx; __asm__ __volatile__( "rep ; movsl" : "=&c" (ecx), "=&D" (edi), "=&S" (esi) : "0" (n/4), "1" (edi),"2" (esi) : "memory" ); } else { /* small block: don't clobber ecx + smaller code */ if (n >= 4*4) __asm__ __volatile__("movsl" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); if (n >= 3*4) __asm__ __volatile__("movsl" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); if (n >= 2*4) __asm__ __volatile__("movsl" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); if (n >= 1*4) __asm__ __volatile__("movsl" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); } switch (n % 4) { /* tail */ case 0: return to; case 1: __asm__ __volatile__("movsb" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); return to; case 2: __asm__ __volatile__("movsw" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); return to; default: __asm__ __volatile__("movsw\n\tmovsb" :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); return to; } } #define __HAVE_ARCH_MEMCPY #ifdef CONFIG_X86_USE_3DNOW #include /* * This CPU favours 3DNow strongly (eg AMD Athlon) */ static inline void * __constant_memcpy3d(void * to, const void * from, size_t len) { if (len < 512) return __constant_memcpy(to, from, len); return _mmx_memcpy(to, from, len); } static __inline__ void *__memcpy3d(void *to, const void *from, size_t len) { if (len < 512) return __memcpy(to, from, len); return _mmx_memcpy(to, from, len); } #define memcpy(t, f, n) \ (__builtin_constant_p(n) ? \ __constant_memcpy3d((t),(f),(n)) : \ __memcpy3d((t),(f),(n))) #else /* * No 3D Now! */ #define memcpy(t, f, n) \ (__builtin_constant_p(n) ? \ __constant_memcpy((t),(f),(n)) : \ __memcpy((t),(f),(n))) #endif #define __HAVE_ARCH_MEMMOVE void *memmove(void * dest,const void * src, size_t n); #define memcmp __builtin_memcmp #define __HAVE_ARCH_MEMCHR static inline void * memchr(const void * cs,int c,size_t count) { int d0; register void * __res; if (!count) return NULL; __asm__ __volatile__( "repne\n\t" "scasb\n\t" "je 1f\n\t" "movl $1,%0\n" "1:\tdecl %0" :"=D" (__res), "=&c" (d0) :"a" (c),"0" (cs),"1" (count) :"memory"); return __res; } static inline void * __memset_generic(void * s, char c,size_t count) { int d0, d1; __asm__ __volatile__( "rep\n\t" "stosb" : "=&c" (d0), "=&D" (d1) :"a" (c),"1" (s),"0" (count) :"memory"); return s; } /* we might want to write optimized versions of these later */ #define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count)) /* * memset(x,0,y) is a reasonably common thing to do, so we want to fill * things 32 bits at a time even when we don't know the size of the * area at compile-time.. */ static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count) { int d0, d1; __asm__ __volatile__( "rep ; stosl\n\t" "testb $2,%b3\n\t" "je 1f\n\t" "stosw\n" "1:\ttestb $1,%b3\n\t" "je 2f\n\t" "stosb\n" "2:" :"=&c" (d0), "=&D" (d1) :"a" (c), "q" (count), "0" (count/4), "1" ((long) s) :"memory"); return (s); } /* Added by Gertjan van Wingerde to make minix and sysv module work */ #define __HAVE_ARCH_STRNLEN static inline size_t strnlen(const char * s, size_t count) { int d0; register int __res; __asm__ __volatile__( "movl %2,%0\n\t" "jmp 2f\n" "1:\tcmpb $0,(%0)\n\t" "je 3f\n\t" "incl %0\n" "2:\tdecl %1\n\t" "cmpl $-1,%1\n\t" "jne 1b\n" "3:\tsubl %2,%0" :"=a" (__res), "=&d" (d0) :"c" (s),"1" (count) :"memory"); return __res; } /* end of additional stuff */ #define __HAVE_ARCH_STRSTR extern char *strstr(const char *cs, const char *ct); /* * This looks horribly ugly, but the compiler can optimize it totally, * as we by now know that both pattern and count is constant.. */ static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) { switch (count) { case 0: return s; case 1: *(unsigned char *)s = pattern; return s; case 2: *(unsigned short *)s = pattern; return s; case 3: *(unsigned short *)s = pattern; *(2+(unsigned char *)s) = pattern; return s; case 4: *(unsigned long *)s = pattern; return s; } #define COMMON(x) \ __asm__ __volatile__( \ "rep ; stosl" \ x \ : "=&c" (d0), "=&D" (d1) \ : "a" (pattern),"0" (count/4),"1" ((long) s) \ : "memory") { int d0, d1; switch (count % 4) { case 0: COMMON(""); return s; case 1: COMMON("\n\tstosb"); return s; case 2: COMMON("\n\tstosw"); return s; default: COMMON("\n\tstosw\n\tstosb"); return s; } } #undef COMMON } #define __constant_c_x_memset(s, c, count) \ (__builtin_constant_p(count) ? \ __constant_c_and_count_memset((s),(c),(count)) : \ __constant_c_memset((s),(c),(count))) #define __memset(s, c, count) \ (__builtin_constant_p(count) ? \ __constant_count_memset((s),(c),(count)) : \ __memset_generic((s),(c),(count))) #define __HAVE_ARCH_MEMSET #define memset(s, c, count) \ (__builtin_constant_p(c) ? \ __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \ __memset((s),(c),(count))) /* * find the first occurrence of byte 'c', or 1 past the area if none */ #define __HAVE_ARCH_MEMSCAN static inline void * memscan(void * addr, int c, size_t size) { if (!size) return addr; __asm__("repnz; scasb\n\t" "jnz 1f\n\t" "dec %%edi\n" "1:" : "=D" (addr), "=c" (size) : "0" (addr), "1" (size), "a" (c) : "memory"); return addr; } #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/suspend.h000066400000000000000000000030631314037446600245140ustar00rootroot00000000000000/* * Copyright 2001-2002 Pavel Machek * Based on code * Copyright 2001 Patrick Mochel */ #include #include static inline int arch_prepare_suspend(void) { /* If you want to make non-PSE machine work, turn off paging in swsusp_arch_suspend. swsusp_pg_dir should have identity mapping, so it could work... */ if (!cpu_has_pse) { printk(KERN_ERR "PSE is required for swsusp.\n"); return -EPERM; } return 0; } /* image of the saved processor state */ struct saved_context { u16 es, fs, gs, ss; unsigned long cr0, cr2, cr3, cr4; u16 gdt_pad; u16 gdt_limit; unsigned long gdt_base; u16 idt_pad; u16 idt_limit; unsigned long idt_base; u16 ldt; u16 tss; unsigned long tr; unsigned long safety; unsigned long return_address; } __attribute__((packed)); #ifdef CONFIG_ACPI_SLEEP extern unsigned long saved_eip; extern unsigned long saved_esp; extern unsigned long saved_ebp; extern unsigned long saved_ebx; extern unsigned long saved_esi; extern unsigned long saved_edi; static inline void acpi_save_register_state(unsigned long return_point) { saved_eip = return_point; asm volatile ("movl %%esp,%0" : "=m" (saved_esp)); asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp)); asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx)); asm volatile ("movl %%edi,%0" : "=m" (saved_edi)); asm volatile ("movl %%esi,%0" : "=m" (saved_esi)); } #define acpi_restore_register_state() do {} while (0) /* routines for saving/restoring kernel state */ extern int acpi_save_state_mem(void); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/system.h000066400000000000000000000411011314037446600243520ustar00rootroot00000000000000#ifndef __ASM_SYSTEM_H #define __ASM_SYSTEM_H #include #include #include #include #include #include #ifdef __KERNEL__ #define AT_VECTOR_SIZE_ARCH 2 struct task_struct; /* one of the stranger aspects of C forward declarations.. */ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); /* * Saving eflags is important. It switches not only IOPL between tasks, * it also protects other tasks from NT leaking through sysenter etc. */ #define switch_to(prev,next,last) do { \ unsigned long esi,edi; \ asm volatile("pushfl\n\t" /* Save flags */ \ "pushl %%ebp\n\t" \ "movl %%esp,%0\n\t" /* save ESP */ \ "movl %5,%%esp\n\t" /* restore ESP */ \ "movl $1f,%1\n\t" /* save EIP */ \ "pushl %6\n\t" /* restore EIP */ \ "jmp __switch_to\n" \ "1:\t" \ "popl %%ebp\n\t" \ "popfl" \ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ "=a" (last),"=S" (esi),"=D" (edi) \ :"m" (next->thread.esp),"m" (next->thread.eip), \ "2" (prev), "d" (next)); \ } while (0) #define _set_base(addr,base) do { unsigned long __pr; \ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ "rorl $16,%%edx\n\t" \ "movb %%dl,%2\n\t" \ "movb %%dh,%3" \ :"=&d" (__pr) \ :"m" (*((addr)+2)), \ "m" (*((addr)+4)), \ "m" (*((addr)+7)), \ "0" (base) \ ); } while(0) #define _set_limit(addr,limit) do { unsigned long __lr; \ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ "rorl $16,%%edx\n\t" \ "movb %2,%%dh\n\t" \ "andb $0xf0,%%dh\n\t" \ "orb %%dh,%%dl\n\t" \ "movb %%dl,%2" \ :"=&d" (__lr) \ :"m" (*(addr)), \ "m" (*((addr)+6)), \ "0" (limit) \ ); } while(0) #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) /* * Load a segment. Fall back on loading the zero * segment if something goes wrong.. */ #define loadsegment(seg,value) \ asm volatile("\n" \ "1:\t" \ "mov %0,%%" #seg "\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3:\t" \ "pushl $0\n\t" \ "popl %%" #seg "\n\t" \ "jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n\t" \ ".align 4\n\t" \ ".long 1b,3b\n" \ ".previous" \ : :"rm" (value)) /* * Save a segment register away */ #define savesegment(seg, value) \ asm volatile("mov %%" #seg ",%0":"=rm" (value)) #endif /* __KERNEL__ */ static inline unsigned long get_limit(unsigned long segment) { unsigned long __limit; __asm__("lsll %1,%0" :"=r" (__limit):"r" (segment)); return __limit+1; } #define nop() __asm__ __volatile__ ("nop") #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) #define tas(ptr) (xchg((ptr),1)) struct __xchg_dummy { unsigned long a[100]; }; #define __xg(x) ((struct __xchg_dummy *)(x)) #ifdef CONFIG_X86_CMPXCHG64 /* * The semantics of XCHGCMP8B are a bit strange, this is why * there is a loop and the loading of %%eax and %%edx has to * be inside. This inlines well in most cases, the cached * cost is around ~38 cycles. (in the future we might want * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that * might have an implicit FPU-save as a cost, so it's not * clear which path to go.) * * cmpxchg8b must be used with the lock prefix here to allow * the instruction to be executed atomically, see page 3-102 * of the instruction set reference 24319102.pdf. We need * the reader side to see the coherent 64bit value. */ static inline void __set_64bit (unsigned long long * ptr, unsigned int low, unsigned int high) { __asm__ __volatile__ ( "\n1:\t" "movl (%0), %%eax\n\t" "movl 4(%0), %%edx\n\t" "lock cmpxchg8b (%0)\n\t" "jnz 1b" : /* no outputs */ : "D"(ptr), "b"(low), "c"(high) : "ax","dx","memory"); } static inline void __set_64bit_constant (unsigned long long *ptr, unsigned long long value) { __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); } #define ll_low(x) *(((unsigned int*)&(x))+0) #define ll_high(x) *(((unsigned int*)&(x))+1) static inline void __set_64bit_var (unsigned long long *ptr, unsigned long long value) { __set_64bit(ptr,ll_low(value), ll_high(value)); } #define set_64bit(ptr,value) \ (__builtin_constant_p(value) ? \ __set_64bit_constant(ptr, value) : \ __set_64bit_var(ptr, value) ) #define _set_64bit(ptr,value) \ (__builtin_constant_p(value) ? \ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ __set_64bit(ptr, ll_low(value), ll_high(value)) ) #endif /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note 2: xchg has side effect, so that attribute volatile is necessary, * but generally the primitive is invalid, *ptr is output argument. --ANK */ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { case 1: __asm__ __volatile__("xchgb %b0,%1" :"=q" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 2: __asm__ __volatile__("xchgw %w0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 4: __asm__ __volatile__("xchgl %0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; } return x; } /* * Atomic compare and exchange. Compare OLD with MEM, if identical, * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ #ifdef CONFIG_X86_CMPXCHG #define __HAVE_ARCH_CMPXCHG 1 #define cmpxchg(ptr,o,n)\ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ (unsigned long)(n),sizeof(*(ptr)))) #endif static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__(LOCK "cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__(LOCK "cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 4: __asm__ __volatile__(LOCK "cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; } return old; } #ifndef CONFIG_X86_CMPXCHG /* * Building a kernel capable running on 80386. It may be necessary to * simulate the cmpxchg on the 80386 CPU. For that purpose we define * a function for each of the sizes we support. */ extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, unsigned long new, int size) { switch (size) { case 1: return cmpxchg_386_u8(ptr, old, new); case 2: return cmpxchg_386_u16(ptr, old, new); case 4: return cmpxchg_386_u32(ptr, old, new); } return old; } #define cmpxchg(ptr,o,n) \ ({ \ __typeof__(*(ptr)) __ret; \ if (likely(boot_cpu_data.x86 > 3)) \ __ret = __cmpxchg((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr))); \ else \ __ret = cmpxchg_386((ptr), (unsigned long)(o), \ (unsigned long)(n), sizeof(*(ptr))); \ __ret; \ }) #endif #ifdef CONFIG_X86_CMPXCHG64 static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, unsigned long long new) { unsigned long long prev; __asm__ __volatile__(LOCK "cmpxchg8b %3" : "=A"(prev) : "b"((unsigned long)new), "c"((unsigned long)(new >> 32)), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; } #define cmpxchg64(ptr,o,n)\ ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\ (unsigned long long)(n))) #endif #ifdef __KERNEL__ struct alt_instr { __u8 *instr; /* original instruction */ __u8 *replacement; __u8 cpuid; /* cpuid bit set for replacement */ __u8 instrlen; /* length of original instruction */ __u8 replacementlen; /* length of new instruction, <= instrlen */ __u8 pad; }; #endif /* * Alternative instructions for different CPU types or capabilities. * * This allows to use optimized instructions even on generic binary * kernels. * * length of oldinstr must be longer or equal the length of newinstr * It can be padded with nops as needed. * * For non barrier like inlines please define new variants * without volatile and memory clobber. */ #define alternative(oldinstr, newinstr, feature) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ " .align 4\n" \ " .long 661b\n" /* label */ \ " .long 663f\n" /* new instruction */ \ " .byte %c0\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ ".section .altinstr_replacement,\"ax\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" :: "i" (feature) : "memory") /* * Alternative inline assembly with input. * * Pecularities: * No memory clobber here. * Argument numbers start with 1. * Best is to use constraints that are fixed size (like (%1) ... "r") * If you use variable sized constraints like "m" or "g" in the * replacement maake sure to pad to the worst case length. */ #define alternative_input(oldinstr, newinstr, feature, input...) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ " .align 4\n" \ " .long 661b\n" /* label */ \ " .long 663f\n" /* new instruction */ \ " .byte %c0\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ ".section .altinstr_replacement,\"ax\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" :: "i" (feature), ##input) /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking * to devices. * * For now, "wmb()" doesn't actually do anything, as all * Intel CPU's follow what Intel calls a *Processor Order*, * in which all writes are seen in the program order even * outside the CPU. * * I expect future Intel CPU's to have a weaker ordering, * but I'd also expect them to finally get their act together * and add some real memory barriers if so. * * Some non intel clones support out of order store. wmb() ceases to be a * nop for these. */ /* * Actually only lfence would be needed for mb() because all stores done * by the kernel should be already ordered. But keep a full barrier for now. */ #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) /** * read_barrier_depends - Flush all pending reads that subsequents reads * depend on. * * No data-dependent reads from memory-like regions are ever reordered * over this barrier. All reads preceding this primitive are guaranteed * to access memory (but not necessarily other CPUs' caches) before any * reads following this primitive that depend on the data return by * any of the preceding reads. This primitive is much lighter weight than * rmb() on most CPUs, and is never heavier weight than is * rmb(). * * These ordering constraints are respected by both the local CPU * and the compiler. * * Ordering is not guaranteed by anything other than these primitives, * not even by data dependencies. See the documentation for * memory_barrier() for examples and URLs to more information. * * For example, the following code would force ordering (the initial * value of "a" is zero, "b" is one, and "p" is "&a"): * * * CPU 0 CPU 1 * * b = 2; * memory_barrier(); * p = &b; q = p; * read_barrier_depends(); * d = *q; * * * because the read of "*q" depends on the read of "p" and these * two reads are separated by a read_barrier_depends(). However, * the following code, with the same initial values for "a" and "b": * * * CPU 0 CPU 1 * * a = 2; * memory_barrier(); * b = 3; y = b; * read_barrier_depends(); * x = a; * * * does not enforce ordering, since there is no data dependency between * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() * in cases like thiswhere there are no data dependencies. **/ #define read_barrier_depends() do { } while(0) #ifdef CONFIG_X86_OOSTORE /* Actually there are no OOO store capable CPUs for now that do SSE, but make it already an possibility. */ #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) #else #define wmb() __asm__ __volatile__ ("": : :"memory") #endif #ifdef CONFIG_SMP #if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE) #define smp_alt_mb(instr) \ __asm__ __volatile__("6667:\nnop\nnop\nnop\nnop\nnop\nnop\n6668:\n" \ ".section __smp_alternatives,\"a\"\n" \ ".long 6667b\n" \ ".long 6673f\n" \ ".previous\n" \ ".section __smp_replacements,\"a\"\n" \ "6673:.byte 6668b-6667b\n" \ ".byte 6670f-6669f\n" \ ".byte 6671f-6670f\n" \ ".byte 0\n" \ ".byte %c0\n" \ "6669:lock;addl $0,0(%%esp)\n" \ "6670:" instr "\n" \ "6671:\n" \ ".previous\n" \ : \ : "i" (X86_FEATURE_XMM2) \ : "memory") #define smp_rmb() smp_alt_mb("lfence") #define smp_mb() smp_alt_mb("mfence") #define set_mb(var, value) do { \ unsigned long __set_mb_temp; \ __asm__ __volatile__("6667:movl %1, %0\n6668:\n" \ ".section __smp_alternatives,\"a\"\n" \ ".long 6667b\n" \ ".long 6673f\n" \ ".previous\n" \ ".section __smp_replacements,\"a\"\n" \ "6673: .byte 6668b-6667b\n" \ ".byte 6670f-6669f\n" \ ".byte 0\n" \ ".byte 6671f-6670f\n" \ ".byte -1\n" \ "6669: xchg %1, %0\n" \ "6670:movl %1, %0\n" \ "6671:\n" \ ".previous\n" \ : "=m" (var), "=r" (__set_mb_temp) \ : "1" (value) \ : "memory"); } while (0) #else #define smp_mb() mb() #define smp_rmb() rmb() #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #endif #define smp_wmb() wmb() #define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define smp_read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif #define set_wmb(var, value) do { var = value; wmb(); } while (0) #define local_irq_save(x) do { typecheck(unsigned long,x); local_save_flags(x); local_irq_disable(); } while (0) #define irqs_disabled() \ ({ \ unsigned long flags; \ local_save_flags(flags); \ !(flags & (1<<9)); \ }) /* * disable hlt during certain critical i/o operations */ #define HAVE_DISABLE_HLT void disable_hlt(void); void enable_hlt(void); extern int es7000_plat; void cpu_idle_wait(void); /* * On SMP systems, when the scheduler does migration-cost autodetection, * it needs a way to flush as much of the CPU's caches as possible: */ static inline void sched_cacheflush(void) { wbinvd(); } extern unsigned long arch_align_stack(unsigned long sp); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/termbits.h000066400000000000000000000073511314037446600246700ustar00rootroot00000000000000#ifndef __ARCH_I386_TERMBITS_H__ #define __ARCH_I386_TERMBITS_H__ #include typedef unsigned char cc_t; typedef unsigned int speed_t; typedef unsigned int tcflag_t; #define NCCS 19 struct termios { tcflag_t c_iflag; /* input mode flags */ tcflag_t c_oflag; /* output mode flags */ tcflag_t c_cflag; /* control mode flags */ tcflag_t c_lflag; /* local mode flags */ cc_t c_line; /* line discipline */ cc_t c_cc[NCCS]; /* control characters */ }; /* c_cc characters */ #define VINTR 0 #define VQUIT 1 #define VERASE 2 #define VKILL 3 #define VEOF 4 #define VTIME 5 #define VMIN 6 #define VSWTC 7 #define VSTART 8 #define VSTOP 9 #define VSUSP 10 #define VEOL 11 #define VREPRINT 12 #define VDISCARD 13 #define VWERASE 14 #define VLNEXT 15 #define VEOL2 16 /* c_iflag bits */ #define IGNBRK 0000001 #define BRKINT 0000002 #define IGNPAR 0000004 #define PARMRK 0000010 #define INPCK 0000020 #define ISTRIP 0000040 #define INLCR 0000100 #define IGNCR 0000200 #define ICRNL 0000400 #define IUCLC 0001000 #define IXON 0002000 #define IXANY 0004000 #define IXOFF 0010000 #define IMAXBEL 0020000 #define IUTF8 0040000 /* c_oflag bits */ #define OPOST 0000001 #define OLCUC 0000002 #define ONLCR 0000004 #define OCRNL 0000010 #define ONOCR 0000020 #define ONLRET 0000040 #define OFILL 0000100 #define OFDEL 0000200 #define NLDLY 0000400 #define NL0 0000000 #define NL1 0000400 #define CRDLY 0003000 #define CR0 0000000 #define CR1 0001000 #define CR2 0002000 #define CR3 0003000 #define TABDLY 0014000 #define TAB0 0000000 #define TAB1 0004000 #define TAB2 0010000 #define TAB3 0014000 #define XTABS 0014000 #define BSDLY 0020000 #define BS0 0000000 #define BS1 0020000 #define VTDLY 0040000 #define VT0 0000000 #define VT1 0040000 #define FFDLY 0100000 #define FF0 0000000 #define FF1 0100000 /* c_cflag bit meaning */ #define CBAUD 0010017 #define B0 0000000 /* hang up */ #define B50 0000001 #define B75 0000002 #define B110 0000003 #define B134 0000004 #define B150 0000005 #define B200 0000006 #define B300 0000007 #define B600 0000010 #define B1200 0000011 #define B1800 0000012 #define B2400 0000013 #define B4800 0000014 #define B9600 0000015 #define B19200 0000016 #define B38400 0000017 #define EXTA B19200 #define EXTB B38400 #define CSIZE 0000060 #define CS5 0000000 #define CS6 0000020 #define CS7 0000040 #define CS8 0000060 #define CSTOPB 0000100 #define CREAD 0000200 #define PARENB 0000400 #define PARODD 0001000 #define HUPCL 0002000 #define CLOCAL 0004000 #define CBAUDEX 0010000 #define B57600 0010001 #define B115200 0010002 #define B230400 0010003 #define B460800 0010004 #define B500000 0010005 #define B576000 0010006 #define B921600 0010007 #define B1000000 0010010 #define B1152000 0010011 #define B1500000 0010012 #define B2000000 0010013 #define B2500000 0010014 #define B3000000 0010015 #define B3500000 0010016 #define B4000000 0010017 #define CIBAUD 002003600000 /* input baud rate (not used) */ #define CMSPAR 010000000000 /* mark or space (stick) parity */ #define CRTSCTS 020000000000 /* flow control */ /* c_lflag bits */ #define ISIG 0000001 #define ICANON 0000002 #define XCASE 0000004 #define ECHO 0000010 #define ECHOE 0000020 #define ECHOK 0000040 #define ECHONL 0000100 #define NOFLSH 0000200 #define TOSTOP 0000400 #define ECHOCTL 0001000 #define ECHOPRT 0002000 #define ECHOKE 0004000 #define FLUSHO 0010000 #define PENDIN 0040000 #define IEXTEN 0100000 /* tcflow() and TCXONC use these */ #define TCOOFF 0 #define TCOON 1 #define TCIOFF 2 #define TCION 3 /* tcflush() and TCFLSH use these */ #define TCIFLUSH 0 #define TCOFLUSH 1 #define TCIOFLUSH 2 /* tcsetattr uses these */ #define TCSANOW 0 #define TCSADRAIN 1 #define TCSAFLUSH 2 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/termios.h000066400000000000000000000061531314037446600245200ustar00rootroot00000000000000#ifndef _I386_TERMIOS_H #define _I386_TERMIOS_H #include #include struct winsize { unsigned short ws_row; unsigned short ws_col; unsigned short ws_xpixel; unsigned short ws_ypixel; }; #define NCC 8 struct termio { unsigned short c_iflag; /* input mode flags */ unsigned short c_oflag; /* output mode flags */ unsigned short c_cflag; /* control mode flags */ unsigned short c_lflag; /* local mode flags */ unsigned char c_line; /* line discipline */ unsigned char c_cc[NCC]; /* control characters */ }; /* modem lines */ #define TIOCM_LE 0x001 #define TIOCM_DTR 0x002 #define TIOCM_RTS 0x004 #define TIOCM_ST 0x008 #define TIOCM_SR 0x010 #define TIOCM_CTS 0x020 #define TIOCM_CAR 0x040 #define TIOCM_RNG 0x080 #define TIOCM_DSR 0x100 #define TIOCM_CD TIOCM_CAR #define TIOCM_RI TIOCM_RNG #define TIOCM_OUT1 0x2000 #define TIOCM_OUT2 0x4000 #define TIOCM_LOOP 0x8000 /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ /* line disciplines */ #define N_TTY 0 #define N_SLIP 1 #define N_MOUSE 2 #define N_PPP 3 #define N_STRIP 4 #define N_AX25 5 #define N_X25 6 /* X.25 async */ #define N_6PACK 7 #define N_MASC 8 /* Reserved for Mobitex module */ #define N_R3964 9 /* Reserved for Simatic R3964 module */ #define N_PROFIBUS_FDL 10 /* Reserved for Profibus */ #define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */ #define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */ #define N_HDLC 13 /* synchronous HDLC */ #define N_SYNC_PPP 14 /* synchronous PPP */ #define N_HCI 15 /* Bluetooth HCI UART */ #ifdef __KERNEL__ #include /* intr=^C quit=^\ erase=del kill=^U eof=^D vtime=\0 vmin=\1 sxtc=\0 start=^Q stop=^S susp=^Z eol=\0 reprint=^R discard=^U werase=^W lnext=^V eol2=\0 */ #define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" /* * Translate a "termio" structure into a "termios". Ugh. */ #define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ unsigned short __tmp; \ get_user(__tmp,&(termio)->x); \ *(unsigned short *) &(termios)->x = __tmp; \ } #define user_termio_to_kernel_termios(termios, termio) \ ({ \ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ }) /* * Translate a "termios" structure into a "termio". Ugh. */ #define kernel_termios_to_user_termio(termio, termios) \ ({ \ put_user((termios)->c_iflag, &(termio)->c_iflag); \ put_user((termios)->c_oflag, &(termio)->c_oflag); \ put_user((termios)->c_cflag, &(termio)->c_cflag); \ put_user((termios)->c_lflag, &(termio)->c_lflag); \ put_user((termios)->c_line, &(termio)->c_line); \ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ }) #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) #endif /* __KERNEL__ */ #endif /* _I386_TERMIOS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/thread_info.h000066400000000000000000000120531314037446600253140ustar00rootroot00000000000000/* thread_info.h: i386 low-level thread information * * Copyright (C) 2002 David Howells (dhowells@redhat.com) * - Incorporating suggestions made by Linus Torvalds and Dave Miller */ #ifndef _ASM_THREAD_INFO_H #define _ASM_THREAD_INFO_H #ifdef __KERNEL__ #include #include #include #ifndef __ASSEMBLY__ #include #endif /* * low level task data that entry.S needs immediate access to * - this struct should fit entirely inside of one cache line * - this struct shares the supervisor stack pages * - if the contents of this structure are changed, the assembly constants must also be changed */ #ifndef __ASSEMBLY__ struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ unsigned long status; /* thread-synchronous flags */ __u32 cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* thread address space: 0-0xBFFFFFFF for user-thead 0-0xFFFFFFFF for kernel-thread */ struct restart_block restart_block; unsigned long previous_esp; /* ESP of the previous stack in case of nested (IRQ) stacks */ __u8 supervisor_stack[0]; }; #else /* !__ASSEMBLY__ */ #include #endif #define PREEMPT_ACTIVE 0x10000000 #ifdef CONFIG_4KSTACKS #define THREAD_SIZE (4096) #else #define THREAD_SIZE (8192) #endif #define STACK_WARN (THREAD_SIZE/8) /* * macros/functions for gaining access to the thread information structure * * preempt_count needs to be 1 initially, until the scheduler is functional. */ #ifndef __ASSEMBLY__ #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ .exec_domain = &default_exec_domain, \ .flags = 0, \ .cpu = 0, \ .preempt_count = 1, \ .addr_limit = KERNEL_DS, \ .restart_block = { \ .fn = do_no_restart_syscall, \ }, \ } #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) /* how to get the thread information struct from C */ static inline struct thread_info *current_thread_info(void) { struct thread_info *ti; __asm__("andl %%esp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1))); return ti; } /* how to get the current stack pointer from C */ register unsigned long current_stack_pointer asm("esp") __attribute_used__; /* thread information allocation */ #ifdef CONFIG_DEBUG_STACK_USAGE #define alloc_thread_info(tsk) \ ({ \ struct thread_info *ret; \ \ ret = kmalloc(THREAD_SIZE, GFP_KERNEL); \ if (ret) \ memset(ret, 0, THREAD_SIZE); \ ret; \ }) #else #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) #endif #define free_thread_info(info) kfree(info) #else /* !__ASSEMBLY__ */ /* how to get the thread information struct from ASM */ #define GET_THREAD_INFO(reg) \ movl $-THREAD_SIZE, reg; \ andl %esp, reg /* use this one if reg already contains %esp */ #define GET_THREAD_INFO_WITH_ESP(reg) \ andl $-THREAD_SIZE, reg #endif /* * thread information flags * - these are process state flags that various assembly files may need to access * - pending work-to-be-done flags are in LSW * - other flags in MSW */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ #define TIF_IRET 5 /* return with iret */ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 17 #define _TIF_SYSCALL_TRACE (1< #include /** * struct timer_ops - used to define a timer source * * @name: name of the timer. * @init: Probes and initializes the timer. Takes clock= override * string as an argument. Returns 0 on success, anything else * on failure. * @mark_offset: called by the timer interrupt. * @get_offset: called by gettimeofday(). Returns the number of microseconds * since the last timer interupt. * @monotonic_clock: returns the number of nanoseconds since the init of the * timer. * @delay: delays this many clock cycles. */ struct timer_opts { char* name; void (*mark_offset)(void); unsigned long (*get_offset)(void); unsigned long long (*monotonic_clock)(void); void (*delay)(unsigned long); unsigned long (*read_timer)(void); int (*suspend)(pm_message_t state); int (*resume)(void); }; struct init_timer_opts { int (*init)(char *override); struct timer_opts *opts; }; #define TICK_SIZE (tick_nsec / 1000) extern struct timer_opts* __init select_timer(void); extern void clock_fallback(void); void setup_pit_timer(void); void init_xtime_from_cmos(void); extern int no_sync_cmos_timer; /* Modifiers for buggy PIT handling */ extern int pit_latch_buggy; extern struct timer_opts *cur_timer; extern int timer_ack; extern int use_sched_clock_cycles; /* list of externed timers */ extern struct timer_opts timer_none; extern struct timer_opts timer_pit; extern struct init_timer_opts timer_pit_init; extern struct init_timer_opts timer_tsc_init; #ifdef CONFIG_X86_CYCLONE_TIMER extern struct init_timer_opts timer_cyclone_init; #endif extern unsigned long calibrate_tsc(void); extern unsigned long read_timer_tsc(void); extern void init_cpu_khz(void); extern int recalibrate_cpu_khz(void); #ifdef CONFIG_HPET_TIMER extern struct init_timer_opts timer_hpet_init; extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr); #endif #ifdef CONFIG_X86_PM_TIMER extern struct init_timer_opts timer_pmtmr_init; #endif #ifdef CONFIG_X86_VMI extern struct init_timer_opts timer_vmi_init; void setup_vmi_timer(void); #endif static inline void setup_system_timer(void) { #ifdef CONFIG_X86_VMI setup_vmi_timer(); #else setup_pit_timer(); #endif } /* convert from cycles(64bits) => nanoseconds (64bits) * basic equation: * ns = cycles / (freq / ns_per_sec) * ns = cycles * (ns_per_sec / freq) * ns = cycles * (10^9 / (cpu_mhz * 10^6)) * ns = cycles * (10^3 / cpu_mhz) * * Then we use scaling math (suggested by george@mvista.com) to get: * ns = cycles * (10^3 * SC / cpu_mhz) / SC * ns = cycles * cyc2ns_scale / SC * * And since SC is a constant power of two, we can convert the div * into a shift. * -johnstul@us.ibm.com "math is hard, lets go shopping!" */ extern unsigned long cyc2ns_scale; extern unsigned long cyc2us_scale; #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ #define CYC2US_SCALE_FACTOR 20 extern void set_cyc_scales(unsigned long cpu_mhz); static inline unsigned long long cycles_2_ns(unsigned long long cyc) { return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; } static inline unsigned long long cycles_2_us(unsigned long long cyc) { return (cyc * cyc2us_scale) >> CYC2US_SCALE_FACTOR; } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/timex.h000066400000000000000000000024371314037446600241650ustar00rootroot00000000000000/* * linux/include/asm-i386/timex.h * * i386 architecture timex specifications */ #ifndef _ASMi386_TIMEX_H #define _ASMi386_TIMEX_H #include #include #ifdef CONFIG_X86_ELAN # define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */ #else # define CLOCK_TICK_RATE 1193182 /* Underlying HZ */ #endif /* * Standard way to access the cycle counter on i586+ CPUs. * Currently only used on SMP. * * If you really have a SMP machine with i486 chips or older, * compile for that, and this will just always return zero. * That's ok, it just means that the nicer scheduling heuristics * won't work for you. * * We only use the low 32 bits, and we'd simply better make sure * that we reschedule before that wraps. Scheduling at least every * four billion cycles just basically sounds like a good idea, * regardless of how fast the machine is. */ typedef unsigned long long cycles_t; static inline cycles_t get_cycles (void) { unsigned long long ret=0; #ifndef CONFIG_X86_TSC if (!cpu_has_tsc) return 0; #endif #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) rdtscll(ret); #endif return ret; } extern unsigned int cpu_khz; extern int read_current_timer(unsigned long *timer_value); #define ARCH_HAS_READ_CURRENT_TIMER 1 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/tlb.h000066400000000000000000000006531314037446600236160ustar00rootroot00000000000000#ifndef _I386_TLB_H #define _I386_TLB_H /* * x86 doesn't need any special per-pte or * per-vma handling.. */ #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) /* * .. because we flush the whole mm when it * fills up. */ #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/tlbflush.h000066400000000000000000000052411314037446600246560ustar00rootroot00000000000000#ifndef _I386_TLBFLUSH_H #define _I386_TLBFLUSH_H #include #include #include #include extern unsigned long pgkern_mask; # define __flush_tlb_all() \ do { \ if (cpu_has_pge) \ __flush_tlb_global(); \ else \ __flush_tlb(); \ } while (0) #define cpu_has_invlpg (boot_cpu_data.x86 > 3) #ifdef CONFIG_X86_INVLPG # define __flush_tlb_one(addr) __flush_tlb_single(addr) #else # define __flush_tlb_one(addr) \ do { \ if (cpu_has_invlpg) \ __flush_tlb_single(addr); \ else \ __flush_tlb(); \ } while (0) #endif /* * TLB flushing: * * - flush_tlb() flushes the current mm struct TLBs * - flush_tlb_all() flushes all processes TLBs * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * * ..but the i386 has somewhat limited tlb flushing capabilities, * and page-granular flushes are available only on i486 and up. */ #ifndef CONFIG_SMP #define flush_tlb() __flush_tlb() #define flush_tlb_all() __flush_tlb_all() #define local_flush_tlb() __flush_tlb() static inline void flush_tlb_mm(struct mm_struct *mm) { if (mm == current->active_mm) __flush_tlb(); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { if (vma->vm_mm == current->active_mm) __flush_tlb_one(addr); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (vma->vm_mm == current->active_mm) __flush_tlb(); } #else #include #define local_flush_tlb() \ __flush_tlb() extern void flush_tlb_all(void); extern void flush_tlb_current_task(void); extern void flush_tlb_mm(struct mm_struct *); extern void flush_tlb_page(struct vm_area_struct *, unsigned long); #define flush_tlb() flush_tlb_current_task() static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) { flush_tlb_mm(vma->vm_mm); } #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 struct tlb_state { struct mm_struct *active_mm; int state; char __cacheline_padding[L1_CACHE_BYTES-8]; }; DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); #endif #define flush_tlb_kernel_range(start, end) flush_tlb_all() static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { /* i386 does not keep any page table caches in TLB */ } #endif /* _I386_TLBFLUSH_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/topology.h000066400000000000000000000064101314037446600247060ustar00rootroot00000000000000/* * linux/include/asm-i386/topology.h * * Written by: Matthew Dobson, IBM Corporation * * Copyright (C) 2002, IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to */ #ifndef _ASM_I386_TOPOLOGY_H #define _ASM_I386_TOPOLOGY_H #ifdef CONFIG_X86_HT #define topology_physical_package_id(cpu) \ (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) #define topology_core_id(cpu) \ (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu]) #define topology_core_siblings(cpu) (cpu_core_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) #endif #ifdef CONFIG_NUMA #include #include /* Mappings between logical cpu number and node number */ extern cpumask_t node_2_cpu_mask[]; extern int cpu_2_node[]; /* Returns the number of the node containing CPU 'cpu' */ static inline int cpu_to_node(int cpu) { return cpu_2_node[cpu]; } /* Returns the number of the node containing Node 'node'. This architecture is flat, so it is a pretty simple function! */ #define parent_node(node) (node) /* Returns a bitmask of CPUs on Node 'node'. */ static inline cpumask_t node_to_cpumask(int node) { return node_2_cpu_mask[node]; } /* Returns the number of the first CPU on Node 'node'. */ static inline int node_to_first_cpu(int node) { cpumask_t mask = node_to_cpumask(node); return first_cpu(mask); } #define pcibus_to_node(bus) ((long) (bus)->sysdata) #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)) /* sched_domains SD_NODE_INIT for NUMAQ machines */ #define SD_NODE_INIT (struct sched_domain) { \ .span = CPU_MASK_NONE, \ .parent = NULL, \ .groups = NULL, \ .min_interval = 8, \ .max_interval = 32, \ .busy_factor = 32, \ .imbalance_pct = 125, \ .cache_nice_tries = 1, \ .busy_idx = 3, \ .idle_idx = 1, \ .newidle_idx = 2, \ .wake_idx = 1, \ .per_cpu_gain = 100, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_EXEC \ | SD_BALANCE_FORK \ | SD_WAKE_BALANCE, \ .last_balance = jiffies, \ .balance_interval = 1, \ .nr_balance_failed = 0, \ } extern unsigned long node_start_pfn[]; extern unsigned long node_end_pfn[]; extern unsigned long node_remap_size[]; #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) #else /* !CONFIG_NUMA */ /* * Other i386 platforms should define their own version of the * above macros here. */ #include #endif /* CONFIG_NUMA */ extern cpumask_t cpu_coregroup_map(int cpu); #endif /* _ASM_I386_TOPOLOGY_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/types.h000066400000000000000000000023421314037446600241760ustar00rootroot00000000000000#ifndef _I386_TYPES_H #define _I386_TYPES_H #ifndef __ASSEMBLY__ typedef unsigned short umode_t; /* * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the * header files exported to user space */ typedef __signed__ char __s8; typedef unsigned char __u8; typedef __signed__ short __s16; typedef unsigned short __u16; typedef __signed__ int __s32; typedef unsigned int __u32; #if defined(__GNUC__) && !defined(__STRICT_ANSI__) typedef __signed__ long long __s64; typedef unsigned long long __u64; #endif #endif /* __ASSEMBLY__ */ /* * These aren't exported outside the kernel to avoid name space clashes */ #ifdef __KERNEL__ #define BITS_PER_LONG 32 #ifndef __ASSEMBLY__ #include typedef signed char s8; typedef unsigned char u8; typedef signed short s16; typedef unsigned short u16; typedef signed int s32; typedef unsigned int u32; typedef signed long long s64; typedef unsigned long long u64; /* DMA addresses come in generic and 64-bit flavours. */ #ifdef CONFIG_HIGHMEM64G typedef u64 dma_addr_t; #else typedef u32 dma_addr_t; #endif typedef u64 dma64_addr_t; #ifdef CONFIG_LBD typedef u64 sector_t; #define HAVE_SECTOR_T #endif #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/uaccess.h000066400000000000000000000414231314037446600244630ustar00rootroot00000000000000#ifndef __i386_UACCESS_H #define __i386_UACCESS_H /* * User space memory access functions */ #include #include #include #include #include #include #define VERIFY_READ 0 #define VERIFY_WRITE 1 /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with * get_fs() == KERNEL_DS, checking is bypassed. * * For historical reasons, these macros are grossly misnamed. */ #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL) #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #define segment_eq(a,b) ((a).seg == (b).seg) /* * movsl can be slow when source and dest are not both 8-byte aligned */ #ifdef CONFIG_X86_INTEL_USERCOPY extern struct movsl_mask { int mask; } ____cacheline_aligned_in_smp movsl_mask; #endif #define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg)) /* * Test whether a block of memory is a valid user space address. * Returns 0 if the range is valid, nonzero otherwise. * * This is equivalent to the following test: * (u33)addr + (u33)size >= (u33)current->addr_limit.seg * * This needs 33-bit arithmetic. We have a carry... */ #define __range_ok(addr,size) ({ \ unsigned long flag,sum; \ __chk_user_ptr(addr); \ asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ :"=&r" (flag), "=r" (sum) \ :"1" (addr),"g" ((int)(size)),"g" (current_thread_info()->addr_limit.seg)); \ flag; }) /** * access_ok: - Checks if a user space pointer is valid * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe * to write to a block, it is always safe to read from it. * @addr: User space pointer to start of block to check * @size: Size of block to check * * Context: User context only. This function may sleep. * * Checks if a pointer to a block of memory in user space is valid. * * Returns true (nonzero) if the memory block may be valid, false (zero) * if it is definitely invalid. * * Note that, depending on architecture, this function probably just * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ #define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is * the address at which the program should continue. No registers are * modified, so it is entirely up to the continuation code to figure out * what to do. * * All the routines below use bits of fixup code that are out of line * with the main instruction path. This means when everything is well, * we don't even have to jump over them. Further, they do not intrude * on our cache or tlb entries. */ struct exception_table_entry { unsigned long insn, fixup; }; extern int fixup_exception(struct pt_regs *regs); /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. * * This gets kind of ugly. We want to return _two_ values in "get_user()" * and yet we don't want to do any pointers, because that is too much * of a performance impact. Thus we have a few rather ugly macros here, * and hide all the ugliness from the user. * * The "__xxx" versions of the user access functions are versions that * do not verify the address space, that must have been done previously * with a separate "access_ok()" call (this is used when we do multiple * accesses to the same area of user memory). */ extern void __get_user_1(void); extern void __get_user_2(void); extern void __get_user_4(void); #define __get_user_x(size,ret,x,ptr) \ __asm__ __volatile__("call __get_user_" #size \ :"=a" (ret),"=d" (x) \ :"0" (ptr)) /* Careful: we have to cast the result to the type of the pointer for sign reasons */ /** * get_user: - Get a simple variable from user space. * @x: Variable to store result. * @ptr: Source address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple variable from user space to kernel * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and the result of * dereferencing @ptr must be assignable to @x without a cast. * * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ #define get_user(x,ptr) \ ({ int __ret_gu; \ unsigned long __val_gu; \ __chk_user_ptr(ptr); \ switch(sizeof (*(ptr))) { \ case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \ } \ (x) = (__typeof__(*(ptr)))__val_gu; \ __ret_gu; \ }) extern void __put_user_bad(void); /* * Strange magic calling convention: pointer in %ecx, * value in %eax(:%edx), return value in %eax, no clobbers. */ extern void __put_user_1(void); extern void __put_user_2(void); extern void __put_user_4(void); extern void __put_user_8(void); #define __put_user_1(x, ptr) __asm__ __volatile__("call __put_user_1":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) #define __put_user_2(x, ptr) __asm__ __volatile__("call __put_user_2":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) #define __put_user_4(x, ptr) __asm__ __volatile__("call __put_user_4":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) #define __put_user_8(x, ptr) __asm__ __volatile__("call __put_user_8":"=a" (__ret_pu):"A" ((typeof(*(ptr)))(x)), "c" (ptr)) #define __put_user_X(x, ptr) __asm__ __volatile__("call __put_user_X":"=a" (__ret_pu):"c" (ptr)) /** * put_user: - Write a simple value into user space. * @x: Value to copy to user space. * @ptr: Destination address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple value from kernel space to user * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and @x must be assignable * to the result of dereferencing @ptr. * * Returns zero on success, or -EFAULT on error. */ #ifdef CONFIG_X86_WP_WORKS_OK #define put_user(x,ptr) \ ({ int __ret_pu; \ __chk_user_ptr(ptr); \ switch(sizeof(*(ptr))) { \ case 1: __put_user_1(x, ptr); break; \ case 2: __put_user_2(x, ptr); break; \ case 4: __put_user_4(x, ptr); break; \ case 8: __put_user_8(x, ptr); break; \ default:__put_user_X(x, ptr); break; \ } \ __ret_pu; \ }) #else #define put_user(x,ptr) \ ({ \ int __ret_pu; \ __typeof__(*(ptr)) __pus_tmp = x; \ __ret_pu=0; \ if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ sizeof(*(ptr))) != 0)) \ __ret_pu=-EFAULT; \ __ret_pu; \ }) #endif /** * __get_user: - Get a simple variable from user space, with less checking. * @x: Variable to store result. * @ptr: Source address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple variable from user space to kernel * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and the result of * dereferencing @ptr must be assignable to @x without a cast. * * Caller must check the pointer with access_ok() before calling this * function. * * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ #define __get_user(x,ptr) \ __get_user_nocheck((x),(ptr),sizeof(*(ptr))) /** * __put_user: - Write a simple value into user space, with less checking. * @x: Value to copy to user space. * @ptr: Destination address, in user space. * * Context: User context only. This function may sleep. * * This macro copies a single simple value from kernel space to user * space. It supports simple types like char and int, but not larger * data types like structures or arrays. * * @ptr must have pointer-to-simple-variable type, and @x must be assignable * to the result of dereferencing @ptr. * * Caller must check the pointer with access_ok() before calling this * function. * * Returns zero on success, or -EFAULT on error. */ #define __put_user(x,ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) #define __put_user_nocheck(x,ptr,size) \ ({ \ long __pu_err; \ __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \ __pu_err; \ }) #define __put_user_u64(x, addr, err) \ __asm__ __volatile__( \ "1: movl %%eax,0(%2)\n" \ "2: movl %%edx,4(%2)\n" \ "3:\n" \ ".section .fixup,\"ax\"\n" \ "4: movl %3,%0\n" \ " jmp 3b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ " .long 1b,4b\n" \ " .long 2b,4b\n" \ ".previous" \ : "=r"(err) \ : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err)) #ifdef CONFIG_X86_WP_WORKS_OK #define __put_user_size(x,ptr,size,retval,errret) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \ case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \ case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break; \ case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\ default: __put_user_bad(); \ } \ } while (0) #else #define __put_user_size(x,ptr,size,retval,errret) \ do { \ __typeof__(*(ptr)) __pus_tmp = x; \ retval = 0; \ \ if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ retval = errret; \ } while (0) #endif struct __large_struct { unsigned long buf[100]; }; #define __m(x) (*(struct __large_struct __user *)(x)) /* * Tell gcc we read from memory instead of writing: this is because * we do not write to any memory gcc knows about, so there are no * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ __asm__ __volatile__( \ "1: mov"itype" %"rtype"1,%2\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: movl %3,%0\n" \ " jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ " .long 1b,3b\n" \ ".previous" \ : "=r"(err) \ : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)) #define __get_user_nocheck(x,ptr,size) \ ({ \ long __gu_err; \ unsigned long __gu_val; \ __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\ (x) = (__typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) extern long __get_user_bad(void); #define __get_user_size(x,ptr,size,retval,errret) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \ case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \ case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break; \ default: (x) = __get_user_bad(); \ } \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ __asm__ __volatile__( \ "1: mov"itype" %2,%"rtype"1\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: movl %3,%0\n" \ " xor"itype" %"rtype"1,%"rtype"1\n" \ " jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ " .long 1b,3b\n" \ ".previous" \ : "=r"(err), ltype (x) \ : "m"(__m(addr)), "i"(errret), "0"(err)) unsigned long __must_check __copy_to_user_ll(void __user *to, const void *from, unsigned long n); unsigned long __must_check __copy_from_user_ll_nozero(void *to, const void __user *from, unsigned long n); unsigned long __must_check __copy_from_user_ll(void *to, const void __user *from, unsigned long n); /* * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault * we return the initial request size (1, 2 or 4), as copy_*_user should do. * If a store crosses a page boundary and gets a fault, the x86 will not write * anything, so this is accurate. */ /** * __copy_to_user: - Copy a block of data into user space, with less checking. * @to: Destination address, in user space. * @from: Source address, in kernel space. * @n: Number of bytes to copy. * * Context: User context only. This function may sleep. * * Copy data from kernel space to user space. Caller must check * the specified block with access_ok() before calling this function. * * Returns number of bytes that could not be copied. * On success, this will be zero. */ static __always_inline unsigned long __must_check __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { if (__builtin_constant_p(n)) { unsigned long ret; switch (n) { case 1: __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); return ret; case 2: __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); return ret; case 4: __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); return ret; } } return __copy_to_user_ll(to, from, n); } static __always_inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) { might_sleep(); return __copy_to_user_inatomic(to, from, n); } /** * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. * @from: Source address, in user space. * @n: Number of bytes to copy. * * Context: User context only. This function may sleep. * * Copy data from user space to kernel space. Caller must check * the specified block with access_ok() before calling this function. * * Returns number of bytes that could not be copied. * On success, this will be zero. * * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. * * An alternate version - __copy_from_user_inatomic() - may be called from * atomic context and will fail rather than sleep. In this case the * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h * for explanation of why this is needed. */ static __always_inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { /* Avoid zeroing the tail if the copy fails.. * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, * but as the zeroing behaviour is only significant when n is not * constant, that shouldn't be a problem. */ if (__builtin_constant_p(n)) { unsigned long ret; switch (n) { case 1: __get_user_size(*(u8 *)to, from, 1, ret, 1); return ret; case 2: __get_user_size(*(u16 *)to, from, 2, ret, 2); return ret; case 4: __get_user_size(*(u32 *)to, from, 4, ret, 4); return ret; } } return __copy_from_user_ll_nozero(to, from, n); } static __always_inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_sleep(); if (__builtin_constant_p(n)) { unsigned long ret; switch (n) { case 1: __get_user_size(*(u8 *)to, from, 1, ret, 1); return ret; case 2: __get_user_size(*(u16 *)to, from, 2, ret, 2); return ret; case 4: __get_user_size(*(u32 *)to, from, 4, ret, 4); return ret; } } return __copy_from_user_ll(to, from, n); } unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n); unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n); long __must_check strncpy_from_user(char *dst, const char __user *src, long count); long __must_check __strncpy_from_user(char *dst, const char __user *src, long count); /** * strlen_user: - Get the size of a string in user space. * @str: The string to measure. * * Context: User context only. This function may sleep. * * Get the size of a NUL-terminated string in user space. * * Returns the size of the string INCLUDING the terminating NUL. * On exception, returns 0. * * If there is a limit on the length of a valid string, you may wish to * consider using strnlen_user() instead. */ #define strlen_user(str) strnlen_user(str, ~0UL >> 1) long strnlen_user(const char __user *str, long n); unsigned long __must_check clear_user(void __user *mem, unsigned long len); unsigned long __must_check __clear_user(void __user *mem, unsigned long len); #endif /* __i386_UACCESS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/ucontext.h000066400000000000000000000004301314037446600246770ustar00rootroot00000000000000#ifndef _ASMi386_UCONTEXT_H #define _ASMi386_UCONTEXT_H struct ucontext { unsigned long uc_flags; struct ucontext *uc_link; stack_t uc_stack; struct sigcontext uc_mcontext; sigset_t uc_sigmask; /* mask last for extensibility */ }; #endif /* !_ASMi386_UCONTEXT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/unaligned.h000066400000000000000000000022471314037446600250040ustar00rootroot00000000000000#ifndef __I386_UNALIGNED_H #define __I386_UNALIGNED_H /* * The i386 can do unaligned accesses itself. * * The strange macros are there to make sure these can't * be misused in a way that makes them not work on other * architectures where unaligned accesses aren't as simple. */ /** * get_unaligned - get value from possibly mis-aligned location * @ptr: pointer to value * * This macro should be used for accessing values larger in size than * single bytes at locations that are expected to be improperly aligned, * e.g. retrieving a u16 value from a location not u16-aligned. * * Note that unaligned accesses can be very expensive on some architectures. */ #define get_unaligned(ptr) (*(ptr)) /** * put_unaligned - put value to a possibly mis-aligned location * @val: value to place * @ptr: pointer to location * * This macro should be used for placing values larger in size than * single bytes at locations that are expected to be improperly aligned, * e.g. writing a u16 value to a location not u16-aligned. * * Note that unaligned accesses can be very expensive on some architectures. */ #define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/unistd.h000066400000000000000000000336011314037446600243420ustar00rootroot00000000000000#ifndef _ASM_I386_UNISTD_H_ #define _ASM_I386_UNISTD_H_ /* * This file contains the system call numbers. */ #define __NR_restart_syscall 0 #define __NR_exit 1 #define __NR_fork 2 #define __NR_read 3 #define __NR_write 4 #define __NR_open 5 #define __NR_close 6 #define __NR_waitpid 7 #define __NR_creat 8 #define __NR_link 9 #define __NR_unlink 10 #define __NR_execve 11 #define __NR_chdir 12 #define __NR_time 13 #define __NR_mknod 14 #define __NR_chmod 15 #define __NR_lchown 16 #define __NR_break 17 #define __NR_oldstat 18 #define __NR_lseek 19 #define __NR_getpid 20 #define __NR_mount 21 #define __NR_umount 22 #define __NR_setuid 23 #define __NR_getuid 24 #define __NR_stime 25 #define __NR_ptrace 26 #define __NR_alarm 27 #define __NR_oldfstat 28 #define __NR_pause 29 #define __NR_utime 30 #define __NR_stty 31 #define __NR_gtty 32 #define __NR_access 33 #define __NR_nice 34 #define __NR_ftime 35 #define __NR_sync 36 #define __NR_kill 37 #define __NR_rename 38 #define __NR_mkdir 39 #define __NR_rmdir 40 #define __NR_dup 41 #define __NR_pipe 42 #define __NR_times 43 #define __NR_prof 44 #define __NR_brk 45 #define __NR_setgid 46 #define __NR_getgid 47 #define __NR_signal 48 #define __NR_geteuid 49 #define __NR_getegid 50 #define __NR_acct 51 #define __NR_umount2 52 #define __NR_lock 53 #define __NR_ioctl 54 #define __NR_fcntl 55 #define __NR_mpx 56 #define __NR_setpgid 57 #define __NR_ulimit 58 #define __NR_oldolduname 59 #define __NR_umask 60 #define __NR_chroot 61 #define __NR_ustat 62 #define __NR_dup2 63 #define __NR_getppid 64 #define __NR_getpgrp 65 #define __NR_setsid 66 #define __NR_sigaction 67 #define __NR_sgetmask 68 #define __NR_ssetmask 69 #define __NR_setreuid 70 #define __NR_setregid 71 #define __NR_sigsuspend 72 #define __NR_sigpending 73 #define __NR_sethostname 74 #define __NR_setrlimit 75 #define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ #define __NR_getrusage 77 #define __NR_gettimeofday 78 #define __NR_settimeofday 79 #define __NR_getgroups 80 #define __NR_setgroups 81 #define __NR_select 82 #define __NR_symlink 83 #define __NR_oldlstat 84 #define __NR_readlink 85 #define __NR_uselib 86 #define __NR_swapon 87 #define __NR_reboot 88 #define __NR_readdir 89 #define __NR_mmap 90 #define __NR_munmap 91 #define __NR_truncate 92 #define __NR_ftruncate 93 #define __NR_fchmod 94 #define __NR_fchown 95 #define __NR_getpriority 96 #define __NR_setpriority 97 #define __NR_profil 98 #define __NR_statfs 99 #define __NR_fstatfs 100 #define __NR_ioperm 101 #define __NR_socketcall 102 #define __NR_syslog 103 #define __NR_setitimer 104 #define __NR_getitimer 105 #define __NR_stat 106 #define __NR_lstat 107 #define __NR_fstat 108 #define __NR_olduname 109 #define __NR_iopl 110 #define __NR_vhangup 111 #define __NR_idle 112 #define __NR_vm86old 113 #define __NR_wait4 114 #define __NR_swapoff 115 #define __NR_sysinfo 116 #define __NR_ipc 117 #define __NR_fsync 118 #define __NR_sigreturn 119 #define __NR_clone 120 #define __NR_setdomainname 121 #define __NR_uname 122 #define __NR_modify_ldt 123 #define __NR_adjtimex 124 #define __NR_mprotect 125 #define __NR_sigprocmask 126 #define __NR_create_module 127 #define __NR_init_module 128 #define __NR_delete_module 129 #define __NR_get_kernel_syms 130 #define __NR_quotactl 131 #define __NR_getpgid 132 #define __NR_fchdir 133 #define __NR_bdflush 134 #define __NR_sysfs 135 #define __NR_personality 136 #define __NR_afs_syscall 137 /* Syscall for Andrew File System */ #define __NR_setfsuid 138 #define __NR_setfsgid 139 #define __NR__llseek 140 #define __NR_getdents 141 #define __NR__newselect 142 #define __NR_flock 143 #define __NR_msync 144 #define __NR_readv 145 #define __NR_writev 146 #define __NR_getsid 147 #define __NR_fdatasync 148 #define __NR__sysctl 149 #define __NR_mlock 150 #define __NR_munlock 151 #define __NR_mlockall 152 #define __NR_munlockall 153 #define __NR_sched_setparam 154 #define __NR_sched_getparam 155 #define __NR_sched_setscheduler 156 #define __NR_sched_getscheduler 157 #define __NR_sched_yield 158 #define __NR_sched_get_priority_max 159 #define __NR_sched_get_priority_min 160 #define __NR_sched_rr_get_interval 161 #define __NR_nanosleep 162 #define __NR_mremap 163 #define __NR_setresuid 164 #define __NR_getresuid 165 #define __NR_vm86 166 #define __NR_query_module 167 #define __NR_poll 168 #define __NR_nfsservctl 169 #define __NR_setresgid 170 #define __NR_getresgid 171 #define __NR_prctl 172 #define __NR_rt_sigreturn 173 #define __NR_rt_sigaction 174 #define __NR_rt_sigprocmask 175 #define __NR_rt_sigpending 176 #define __NR_rt_sigtimedwait 177 #define __NR_rt_sigqueueinfo 178 #define __NR_rt_sigsuspend 179 #define __NR_pread64 180 #define __NR_pwrite64 181 #define __NR_chown 182 #define __NR_getcwd 183 #define __NR_capget 184 #define __NR_capset 185 #define __NR_sigaltstack 186 #define __NR_sendfile 187 #define __NR_getpmsg 188 /* some people actually want streams */ #define __NR_putpmsg 189 /* some people actually want streams */ #define __NR_vfork 190 #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ #define __NR_mmap2 192 #define __NR_truncate64 193 #define __NR_ftruncate64 194 #define __NR_stat64 195 #define __NR_lstat64 196 #define __NR_fstat64 197 #define __NR_lchown32 198 #define __NR_getuid32 199 #define __NR_getgid32 200 #define __NR_geteuid32 201 #define __NR_getegid32 202 #define __NR_setreuid32 203 #define __NR_setregid32 204 #define __NR_getgroups32 205 #define __NR_setgroups32 206 #define __NR_fchown32 207 #define __NR_setresuid32 208 #define __NR_getresuid32 209 #define __NR_setresgid32 210 #define __NR_getresgid32 211 #define __NR_chown32 212 #define __NR_setuid32 213 #define __NR_setgid32 214 #define __NR_setfsuid32 215 #define __NR_setfsgid32 216 #define __NR_pivot_root 217 #define __NR_mincore 218 #define __NR_madvise 219 #define __NR_madvise1 219 /* delete when C lib stub is removed */ #define __NR_getdents64 220 #define __NR_fcntl64 221 /* 223 is unused */ #define __NR_gettid 224 #define __NR_readahead 225 #define __NR_setxattr 226 #define __NR_lsetxattr 227 #define __NR_fsetxattr 228 #define __NR_getxattr 229 #define __NR_lgetxattr 230 #define __NR_fgetxattr 231 #define __NR_listxattr 232 #define __NR_llistxattr 233 #define __NR_flistxattr 234 #define __NR_removexattr 235 #define __NR_lremovexattr 236 #define __NR_fremovexattr 237 #define __NR_tkill 238 #define __NR_sendfile64 239 #define __NR_futex 240 #define __NR_sched_setaffinity 241 #define __NR_sched_getaffinity 242 #define __NR_set_thread_area 243 #define __NR_get_thread_area 244 #define __NR_io_setup 245 #define __NR_io_destroy 246 #define __NR_io_getevents 247 #define __NR_io_submit 248 #define __NR_io_cancel 249 #define __NR_fadvise64 250 /* 251 is available for reuse (was briefly sys_set_zone_reclaim) */ #define __NR_exit_group 252 #define __NR_lookup_dcookie 253 #define __NR_epoll_create 254 #define __NR_epoll_ctl 255 #define __NR_epoll_wait 256 #define __NR_remap_file_pages 257 #define __NR_set_tid_address 258 #define __NR_timer_create 259 #define __NR_timer_settime (__NR_timer_create+1) #define __NR_timer_gettime (__NR_timer_create+2) #define __NR_timer_getoverrun (__NR_timer_create+3) #define __NR_timer_delete (__NR_timer_create+4) #define __NR_clock_settime (__NR_timer_create+5) #define __NR_clock_gettime (__NR_timer_create+6) #define __NR_clock_getres (__NR_timer_create+7) #define __NR_clock_nanosleep (__NR_timer_create+8) #define __NR_statfs64 268 #define __NR_fstatfs64 269 #define __NR_tgkill 270 #define __NR_utimes 271 #define __NR_fadvise64_64 272 #define __NR_vserver 273 #define __NR_mbind 274 #define __NR_get_mempolicy 275 #define __NR_set_mempolicy 276 #define __NR_mq_open 277 #define __NR_mq_unlink (__NR_mq_open+1) #define __NR_mq_timedsend (__NR_mq_open+2) #define __NR_mq_timedreceive (__NR_mq_open+3) #define __NR_mq_notify (__NR_mq_open+4) #define __NR_mq_getsetattr (__NR_mq_open+5) #define __NR_sys_kexec_load 283 #define __NR_waitid 284 /* #define __NR_sys_setaltroot 285 */ #define __NR_add_key 286 #define __NR_request_key 287 #define __NR_keyctl 288 #define __NR_ioprio_set 289 #define __NR_ioprio_get 290 #define __NR_inotify_init 291 #define __NR_inotify_add_watch 292 #define __NR_inotify_rm_watch 293 #define __NR_migrate_pages 294 #define __NR_openat 295 #define __NR_mkdirat 296 #define __NR_mknodat 297 #define __NR_fchownat 298 #define __NR_futimesat 299 #define __NR_fstatat64 300 #define __NR_unlinkat 301 #define __NR_renameat 302 #define __NR_linkat 303 #define __NR_symlinkat 304 #define __NR_readlinkat 305 #define __NR_fchmodat 306 #define __NR_faccessat 307 #define __NR_pselect6 308 #define __NR_ppoll 309 #define __NR_unshare 310 #define NR_syscalls 311 /* * user-visible error numbers are in the range -1 - -128: see * */ #define __syscall_return(type, res) \ do { \ if ((unsigned long)(res) >= (unsigned long)(-(128 + 1))) { \ errno = -(res); \ res = -1; \ } \ return (type) (res); \ } while (0) /* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */ #define _syscall0(type,name) \ type name(void) \ { \ long __res; \ __asm__ volatile ("int $0x80" \ : "=a" (__res) \ : "0" (__NR_##name)); \ __syscall_return(type,__res); \ } #define _syscall1(type,name,type1,arg1) \ type name(type1 arg1) \ { \ long __res; \ __asm__ volatile ("int $0x80" \ : "=a" (__res) \ : "0" (__NR_##name),"b" ((long)(arg1)) : "memory"); \ __syscall_return(type,__res); \ } #define _syscall2(type,name,type1,arg1,type2,arg2) \ type name(type1 arg1,type2 arg2) \ { \ long __res; \ __asm__ volatile ("int $0x80" \ : "=a" (__res) \ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)) : "memory"); \ __syscall_return(type,__res); \ } #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ type name(type1 arg1,type2 arg2,type3 arg3) \ { \ long __res; \ __asm__ volatile ("int $0x80" \ : "=a" (__res) \ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ "d" ((long)(arg3)) : "memory"); \ __syscall_return(type,__res); \ } #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ { \ long __res; \ __asm__ volatile ("int $0x80" \ : "=a" (__res) \ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \ __syscall_return(type,__res); \ } #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5) \ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ { \ long __res; \ __asm__ volatile ("int $0x80" \ : "=a" (__res) \ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) : "memory"); \ __syscall_return(type,__res); \ } #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5,type6,arg6) \ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \ { \ long __res; \ __asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; int $0x80 ; pop %%ebp" \ : "=a" (__res) \ : "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \ "0" ((long)(arg6)) : "memory"); \ __syscall_return(type,__res); \ } #ifdef __KERNEL__ #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_SGETMASK #define __ARCH_WANT_SYS_SIGNAL #define __ARCH_WANT_SYS_TIME #define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_WAITPID #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP #define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_OLD_GETRLIMIT #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND #endif #ifdef __KERNEL_SYSCALLS__ #include #include #include #include /* * we need this inline - forking from kernel space will result * in NO COPY ON WRITE (!!!), until an execve is executed. This * is no problem, but for the stack. This is handled by not letting * main() use the stack at all after fork(). Thus, no function * calls - which means inline code for fork too, as otherwise we * would use the stack upon exit from 'fork()'. * * Actually only pause and fork are needed inline, so that there * won't be any messing with the stack from main(), but we define * some others too. */ static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp) asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount); asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); asmlinkage int sys_execve(struct pt_regs regs); asmlinkage int sys_clone(struct pt_regs regs); asmlinkage int sys_fork(struct pt_regs regs); asmlinkage int sys_vfork(struct pt_regs regs); asmlinkage int sys_pipe(unsigned long __user *fildes); asmlinkage long sys_iopl(unsigned long unused); struct sigaction; asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act, struct sigaction __user *oact, size_t sigsetsize); #endif /* * "Conditional" syscalls * * What we want is __attribute__((weak,alias("sys_ni_syscall"))), * but it doesn't work on all toolchains, so we just do it by hand */ #ifndef cond_syscall #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") #endif #endif /* _ASM_I386_UNISTD_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/user.h000066400000000000000000000114371314037446600240150ustar00rootroot00000000000000#ifndef _I386_USER_H #define _I386_USER_H #include /* Core file format: The core file is written in such a way that gdb can understand it and provide useful information to the user (under linux we use the 'trad-core' bfd). There are quite a number of obstacles to being able to view the contents of the floating point registers, and until these are solved you will not be able to view the contents of them. Actually, you can read in the core file and look at the contents of the user struct to find out what the floating point registers contain. The actual file contents are as follows: UPAGE: 1 page consisting of a user struct that tells gdb what is present in the file. Directly after this is a copy of the task_struct, which is currently not used by gdb, but it may come in useful at some point. All of the registers are stored as part of the upage. The upage should always be only one page. DATA: The data area is stored. We use current->end_text to current->brk to pick up all of the user variables, plus any memory that may have been malloced. No attempt is made to determine if a page is demand-zero or if a page is totally unused, we just cover the entire range. All of the addresses are rounded in such a way that an integral number of pages is written. STACK: We need the stack information in order to get a meaningful backtrace. We need to write the data from (esp) to current->start_stack, so we round each of these off in order to be able to write an integer number of pages. The minimum core file size is 3 pages, or 12288 bytes. */ /* * Pentium III FXSR, SSE support * Gareth Hughes , May 2000 * * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for * interacting with the FXSR-format floating point environment. Floating * point data can be accessed in the regular format in the usual manner, * and both the standard and SIMD floating point data can be accessed via * the new ptrace requests. In either case, changes to the FPU environment * will be reflected in the task's state as expected. */ struct user_i387_struct { long cwd; long swd; long twd; long fip; long fcs; long foo; long fos; long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ }; struct user_fxsr_struct { unsigned short cwd; unsigned short swd; unsigned short twd; unsigned short fop; long fip; long fcs; long foo; long fos; long mxcsr; long reserved; long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ long padding[56]; }; /* * This is the old layout of "struct pt_regs", and * is still the layout used by user mode (the new * pt_regs doesn't have all registers as the kernel * doesn't use the extra segment registers) */ struct user_regs_struct { long ebx, ecx, edx, esi, edi, ebp, eax; unsigned short ds, __ds, es, __es; unsigned short fs, __fs, gs, __gs; long orig_eax, eip; unsigned short cs, __cs; long eflags, esp; unsigned short ss, __ss; }; /* When the kernel dumps core, it starts by dumping the user struct - this will be used by gdb to figure out where the data and stack segments are within the file, and what virtual addresses to use. */ struct user{ /* We start with the registers, to mimic the way that "memory" is returned from the ptrace(3,...) function. */ struct user_regs_struct regs; /* Where the registers are actually stored */ /* ptrace does not yet supply these. Someday.... */ int u_fpvalid; /* True if math co-processor being used. */ /* for this mess. Not yet used. */ struct user_i387_struct i387; /* Math Co-processor registers. */ /* The rest of this junk is to help gdb figure out what goes where */ unsigned long int u_tsize; /* Text segment size (pages). */ unsigned long int u_dsize; /* Data segment size (pages). */ unsigned long int u_ssize; /* Stack segment size (pages). */ unsigned long start_code; /* Starting virtual address of text. */ unsigned long start_stack; /* Starting virtual address of stack area. This is actually the bottom of the stack, the top of the stack is always found in the esp register. */ long int signal; /* Signal that caused the core dump. */ int reserved; /* No longer used */ struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */ /* the registers. */ struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ int u_debugreg[8]; }; #define NBPG PAGE_SIZE #define UPAGES 1 #define HOST_TEXT_START_ADDR (u.start_code) #define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) #endif /* _I386_USER_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/vga.h000066400000000000000000000005711314037446600236110ustar00rootroot00000000000000/* * Access to VGA videoram * * (c) 1998 Martin Mares */ #ifndef _LINUX_ASM_VGA_H_ #define _LINUX_ASM_VGA_H_ /* * On the PC, we can just recalculate addresses and then * access the videoram directly without any black magic. */ #define VGA_MAP_MEM(x) (unsigned long)phys_to_virt(x) #define vga_readb(x) (*(x)) #define vga_writeb(x,y) (*(y) = (x)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/vic.h000066400000000000000000000036211314037446600236140ustar00rootroot00000000000000/* Copyright (C) 1999,2001 * * Author: J.E.J.Bottomley@HansenPartnership.com * * Standard include definitions for the NCR Voyager Interrupt Controller */ /* The eight CPI vectors. To activate a CPI, you write a bit mask * corresponding to the processor set to be interrupted into the * relevant register. That set of CPUs will then be interrupted with * the CPI */ static const int VIC_CPI_Registers[] = {0xFC00, 0xFC01, 0xFC08, 0xFC09, 0xFC10, 0xFC11, 0xFC18, 0xFC19 }; #define VIC_PROC_WHO_AM_I 0xfc29 # define QUAD_IDENTIFIER 0xC0 # define EIGHT_SLOT_IDENTIFIER 0xE0 #define QIC_EXTENDED_PROCESSOR_SELECT 0xFC72 #define VIC_CPI_BASE_REGISTER 0xFC41 #define VIC_PROCESSOR_ID 0xFC21 # define VIC_CPU_MASQUERADE_ENABLE 0x8 #define VIC_CLAIM_REGISTER_0 0xFC38 #define VIC_CLAIM_REGISTER_1 0xFC39 #define VIC_REDIRECT_REGISTER_0 0xFC60 #define VIC_REDIRECT_REGISTER_1 0xFC61 #define VIC_PRIORITY_REGISTER 0xFC20 #define VIC_PRIMARY_MC_BASE 0xFC48 #define VIC_SECONDARY_MC_BASE 0xFC49 #define QIC_PROCESSOR_ID 0xFC71 # define QIC_CPUID_ENABLE 0x08 #define QIC_VIC_CPI_BASE_REGISTER 0xFC79 #define QIC_CPI_BASE_REGISTER 0xFC7A #define QIC_MASK_REGISTER0 0xFC80 /* NOTE: these are masked high, enabled low */ # define QIC_PERF_TIMER 0x01 # define QIC_LPE 0x02 # define QIC_SYS_INT 0x04 # define QIC_CMN_INT 0x08 /* at the moment, just enable CMN_INT, disable SYS_INT */ # define QIC_DEFAULT_MASK0 (~(QIC_CMN_INT /* | VIC_SYS_INT */)) #define QIC_MASK_REGISTER1 0xFC81 # define QIC_BOOT_CPI_MASK 0xFE /* Enable CPI's 1-6 inclusive */ # define QIC_CPI_ENABLE 0x81 #define QIC_INTERRUPT_CLEAR0 0xFC8A #define QIC_INTERRUPT_CLEAR1 0xFC8B /* this is where we place the CPI vectors */ #define VIC_DEFAULT_CPI_BASE 0xC0 /* this is where we place the QIC CPI vectors */ #define QIC_DEFAULT_CPI_BASE 0xD0 #define VIC_BOOT_INTERRUPT_MASK 0xfe extern void smp_vic_timer_interrupt(struct pt_regs *regs); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/vm86.h000066400000000000000000000134421314037446600236350ustar00rootroot00000000000000#ifndef _LINUX_VM86_H #define _LINUX_VM86_H /* * I'm guessing at the VIF/VIP flag usage, but hope that this is how * the Pentium uses them. Linux will return from vm86 mode when both * VIF and VIP is set. * * On a Pentium, we could probably optimize the virtual flags directly * in the eflags register instead of doing it "by hand" in vflags... * * Linus */ #define TF_MASK 0x00000100 #define IF_MASK 0x00000200 #define IOPL_MASK 0x00003000 #define NT_MASK 0x00004000 #ifdef CONFIG_VM86 #define VM_MASK 0x00020000 #else #define VM_MASK 0 /* ignored */ #endif #define AC_MASK 0x00040000 #define VIF_MASK 0x00080000 /* virtual interrupt flag */ #define VIP_MASK 0x00100000 /* virtual interrupt pending */ #define ID_MASK 0x00200000 #define BIOSSEG 0x0f000 #define CPU_086 0 #define CPU_186 1 #define CPU_286 2 #define CPU_386 3 #define CPU_486 4 #define CPU_586 5 /* * Return values for the 'vm86()' system call */ #define VM86_TYPE(retval) ((retval) & 0xff) #define VM86_ARG(retval) ((retval) >> 8) #define VM86_SIGNAL 0 /* return due to signal */ #define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */ #define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ #define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */ /* * Additional return values when invoking new vm86() */ #define VM86_PICRETURN 4 /* return due to pending PIC request */ #define VM86_TRAP 6 /* return due to DOS-debugger request */ /* * function codes when invoking new vm86() */ #define VM86_PLUS_INSTALL_CHECK 0 #define VM86_ENTER 1 #define VM86_ENTER_NO_BYPASS 2 #define VM86_REQUEST_IRQ 3 #define VM86_FREE_IRQ 4 #define VM86_GET_IRQ_BITS 5 #define VM86_GET_AND_RESET_IRQ 6 /* * This is the stack-layout seen by the user space program when we have * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout * is 'kernel_vm86_regs' (see below). */ struct vm86_regs { /* * normal regs, with special meaning for the segment descriptors.. */ long ebx; long ecx; long edx; long esi; long edi; long ebp; long eax; long __null_ds; long __null_es; long __null_fs; long __null_gs; long orig_eax; long eip; unsigned short cs, __csh; long eflags; long esp; unsigned short ss, __ssh; /* * these are specific to v86 mode: */ unsigned short es, __esh; unsigned short ds, __dsh; unsigned short fs, __fsh; unsigned short gs, __gsh; }; struct revectored_struct { unsigned long __map[8]; /* 256 bits */ }; struct vm86_struct { struct vm86_regs regs; unsigned long flags; unsigned long screen_bitmap; unsigned long cpu_type; struct revectored_struct int_revectored; struct revectored_struct int21_revectored; }; /* * flags masks */ #define VM86_SCREEN_BITMAP 0x0001 struct vm86plus_info_struct { unsigned long force_return_for_pic:1; unsigned long vm86dbg_active:1; /* for debugger */ unsigned long vm86dbg_TFpendig:1; /* for debugger */ unsigned long unused:28; unsigned long is_vm86pus:1; /* for vm86 internal use */ unsigned char vm86dbg_intxxtab[32]; /* for debugger */ }; struct vm86plus_struct { struct vm86_regs regs; unsigned long flags; unsigned long screen_bitmap; unsigned long cpu_type; struct revectored_struct int_revectored; struct revectored_struct int21_revectored; struct vm86plus_info_struct vm86plus; }; #ifdef __KERNEL__ /* * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86 * mode - the main change is that the old segment descriptors aren't * useful any more and are forced to be zero by the kernel (and the * hardware when a trap occurs), and the real segment descriptors are * at the end of the structure. Look at ptrace.h to see the "normal" * setup. For user space layout see 'struct vm86_regs' above. */ struct kernel_vm86_regs { /* * normal regs, with special meaning for the segment descriptors.. */ long ebx; long ecx; long edx; long esi; long edi; long ebp; long eax; long __null_ds; long __null_es; long orig_eax; long eip; unsigned short cs, __csh; long eflags; long esp; unsigned short ss, __ssh; /* * these are specific to v86 mode: */ unsigned short es, __esh; unsigned short ds, __dsh; unsigned short fs, __fsh; unsigned short gs, __gsh; }; struct kernel_vm86_struct { struct kernel_vm86_regs regs; /* * the below part remains on the kernel stack while we are in VM86 mode. * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above * 'struct kernel_vm86_regs' with the then actual values. * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct' * in kernelspace, hence we need not reget the data from userspace. */ #define VM86_TSS_ESP0 flags unsigned long flags; unsigned long screen_bitmap; unsigned long cpu_type; struct revectored_struct int_revectored; struct revectored_struct int21_revectored; struct vm86plus_info_struct vm86plus; struct pt_regs *regs32; /* here we save the pointer to the old regs */ /* * The below is not part of the structure, but the stack layout continues * this way. In front of 'return-eip' may be some data, depending on * compilation, so we don't rely on this and save the pointer to 'oldregs' * in 'regs32' above. * However, with GCC-2.7.2 and the current CFLAGS you see exactly this: long return-eip; from call to vm86() struct pt_regs oldregs; user space registers as saved by syscall */ }; #ifdef CONFIG_VM86 void handle_vm86_fault(struct kernel_vm86_regs *, long); int handle_vm86_trap(struct kernel_vm86_regs *, long, int); struct task_struct; void release_vm86_irqs(struct task_struct *); #else #define handle_vm86_fault(a, b) #define release_vm86_irqs(a) static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) { return 0; } #endif /* CONFIG_VM86 */ #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/voyager.h000066400000000000000000000414561314037446600245170ustar00rootroot00000000000000/* Copyright (C) 1999,2001 * * Author: J.E.J.Bottomley@HansenPartnership.com * * Standard include definitions for the NCR Voyager system */ #undef VOYAGER_DEBUG #undef VOYAGER_CAT_DEBUG #ifdef VOYAGER_DEBUG #define VDEBUG(x) printk x #else #define VDEBUG(x) #endif /* There are three levels of voyager machine: 3,4 and 5. The rule is * if it's less than 3435 it's a Level 3 except for a 3360 which is * a level 4. A 3435 or above is a Level 5 */ #define VOYAGER_LEVEL5_AND_ABOVE 0x3435 #define VOYAGER_LEVEL4 0x3360 /* The L4 DINO ASIC */ #define VOYAGER_DINO 0x43 /* voyager ports in standard I/O space */ #define VOYAGER_MC_SETUP 0x96 #define VOYAGER_CAT_CONFIG_PORT 0x97 # define VOYAGER_CAT_DESELECT 0xff #define VOYAGER_SSPB_RELOCATION_PORT 0x98 /* Valid CAT controller commands */ /* start instruction register cycle */ #define VOYAGER_CAT_IRCYC 0x01 /* start data register cycle */ #define VOYAGER_CAT_DRCYC 0x02 /* move to execute state */ #define VOYAGER_CAT_RUN 0x0F /* end operation */ #define VOYAGER_CAT_END 0x80 /* hold in idle state */ #define VOYAGER_CAT_HOLD 0x90 /* single step an "intest" vector */ #define VOYAGER_CAT_STEP 0xE0 /* return cat controller to CLEMSON mode */ #define VOYAGER_CAT_CLEMSON 0xFF /* the default cat command header */ #define VOYAGER_CAT_HEADER 0x7F /* the range of possible CAT module ids in the system */ #define VOYAGER_MIN_MODULE 0x10 #define VOYAGER_MAX_MODULE 0x1f /* The voyager registers per asic */ #define VOYAGER_ASIC_ID_REG 0x00 #define VOYAGER_ASIC_TYPE_REG 0x01 /* the sub address registers can be made auto incrementing on reads */ #define VOYAGER_AUTO_INC_REG 0x02 # define VOYAGER_AUTO_INC 0x04 # define VOYAGER_NO_AUTO_INC 0xfb #define VOYAGER_SUBADDRDATA 0x03 #define VOYAGER_SCANPATH 0x05 # define VOYAGER_CONNECT_ASIC 0x01 # define VOYAGER_DISCONNECT_ASIC 0xfe #define VOYAGER_SUBADDRLO 0x06 #define VOYAGER_SUBADDRHI 0x07 #define VOYAGER_SUBMODSELECT 0x08 #define VOYAGER_SUBMODPRESENT 0x09 #define VOYAGER_SUBADDR_LO 0xff #define VOYAGER_SUBADDR_HI 0xffff /* the maximum size of a scan path -- used to form instructions */ #define VOYAGER_MAX_SCAN_PATH 0x100 /* the biggest possible register size (in bytes) */ #define VOYAGER_MAX_REG_SIZE 4 /* Total number of possible modules (including submodules) */ #define VOYAGER_MAX_MODULES 16 /* Largest number of asics per module */ #define VOYAGER_MAX_ASICS_PER_MODULE 7 /* the CAT asic of each module is always the first one */ #define VOYAGER_CAT_ID 0 #define VOYAGER_PSI 0x1a /* voyager instruction operations and registers */ #define VOYAGER_READ_CONFIG 0x1 #define VOYAGER_WRITE_CONFIG 0x2 #define VOYAGER_BYPASS 0xff typedef struct voyager_asic { __u8 asic_addr; /* ASIC address; Level 4 */ __u8 asic_type; /* ASIC type */ __u8 asic_id; /* ASIC id */ __u8 jtag_id[4]; /* JTAG id */ __u8 asic_location; /* Location within scan path; start w/ 0 */ __u8 bit_location; /* Location within bit stream; start w/ 0 */ __u8 ireg_length; /* Instruction register length */ __u16 subaddr; /* Amount of sub address space */ struct voyager_asic *next; /* Next asic in linked list */ } voyager_asic_t; typedef struct voyager_module { __u8 module_addr; /* Module address */ __u8 scan_path_connected; /* Scan path connected */ __u16 ee_size; /* Size of the EEPROM */ __u16 num_asics; /* Number of Asics */ __u16 inst_bits; /* Instruction bits in the scan path */ __u16 largest_reg; /* Largest register in the scan path */ __u16 smallest_reg; /* Smallest register in the scan path */ voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */ struct voyager_module *submodule; /* Submodule pointer */ struct voyager_module *next; /* Next module in linked list */ } voyager_module_t; typedef struct voyager_eeprom_hdr { __u8 module_id[4] __attribute__((packed)); __u8 version_id __attribute__((packed)); __u8 config_id __attribute__((packed)); __u16 boundry_id __attribute__((packed)); /* boundary scan id */ __u16 ee_size __attribute__((packed)); /* size of EEPROM */ __u8 assembly[11] __attribute__((packed)); /* assembly # */ __u8 assembly_rev __attribute__((packed)); /* assembly rev */ __u8 tracer[4] __attribute__((packed)); /* tracer number */ __u16 assembly_cksum __attribute__((packed)); /* asm checksum */ __u16 power_consump __attribute__((packed)); /* pwr requirements */ __u16 num_asics __attribute__((packed)); /* number of asics */ __u16 bist_time __attribute__((packed)); /* min. bist time */ __u16 err_log_offset __attribute__((packed)); /* error log offset */ __u16 scan_path_offset __attribute__((packed));/* scan path offset */ __u16 cct_offset __attribute__((packed)); __u16 log_length __attribute__((packed)); /* length of err log */ __u16 xsum_end __attribute__((packed)); /* offset to end of checksum */ __u8 reserved[4] __attribute__((packed)); __u8 sflag __attribute__((packed)); /* starting sentinal */ __u8 part_number[13] __attribute__((packed)); /* prom part number */ __u8 version[10] __attribute__((packed)); /* version number */ __u8 signature[8] __attribute__((packed)); __u16 eeprom_chksum __attribute__((packed)); __u32 data_stamp_offset __attribute__((packed)); __u8 eflag __attribute__((packed)); /* ending sentinal */ } voyager_eprom_hdr_t; #define VOYAGER_EPROM_SIZE_OFFSET ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size))) #define VOYAGER_XSUM_END_OFFSET 0x2a /* the following three definitions are for internal table layouts * in the module EPROMs. We really only care about the IDs and * offsets */ typedef struct voyager_sp_table { __u8 asic_id __attribute__((packed)); __u8 bypass_flag __attribute__((packed)); __u16 asic_data_offset __attribute__((packed)); __u16 config_data_offset __attribute__((packed)); } voyager_sp_table_t; typedef struct voyager_jtag_table { __u8 icode[4] __attribute__((packed)); __u8 runbist[4] __attribute__((packed)); __u8 intest[4] __attribute__((packed)); __u8 samp_preld[4] __attribute__((packed)); __u8 ireg_len __attribute__((packed)); } voyager_jtt_t; typedef struct voyager_asic_data_table { __u8 jtag_id[4] __attribute__((packed)); __u16 length_bsr __attribute__((packed)); __u16 length_bist_reg __attribute__((packed)); __u32 bist_clk __attribute__((packed)); __u16 subaddr_bits __attribute__((packed)); __u16 seed_bits __attribute__((packed)); __u16 sig_bits __attribute__((packed)); __u16 jtag_offset __attribute__((packed)); } voyager_at_t; /* Voyager Interrupt Controller (VIC) registers */ /* Base to add to Cross Processor Interrupts (CPIs) when triggering * the CPU IRQ line */ /* register defines for the WCBICs (one per processor) */ #define VOYAGER_WCBIC0 0x41 /* bus A node P1 processor 0 */ #define VOYAGER_WCBIC1 0x49 /* bus A node P1 processor 1 */ #define VOYAGER_WCBIC2 0x51 /* bus A node P2 processor 0 */ #define VOYAGER_WCBIC3 0x59 /* bus A node P2 processor 1 */ #define VOYAGER_WCBIC4 0x61 /* bus B node P1 processor 0 */ #define VOYAGER_WCBIC5 0x69 /* bus B node P1 processor 1 */ #define VOYAGER_WCBIC6 0x71 /* bus B node P2 processor 0 */ #define VOYAGER_WCBIC7 0x79 /* bus B node P2 processor 1 */ /* top of memory registers */ #define VOYAGER_WCBIC_TOM_L 0x4 #define VOYAGER_WCBIC_TOM_H 0x5 /* register defines for Voyager Memory Contol (VMC) * these are present on L4 machines only */ #define VOYAGER_VMC1 0x81 #define VOYAGER_VMC2 0x91 #define VOYAGER_VMC3 0xa1 #define VOYAGER_VMC4 0xb1 /* VMC Ports */ #define VOYAGER_VMC_MEMORY_SETUP 0x9 # define VMC_Interleaving 0x01 # define VMC_4Way 0x02 # define VMC_EvenCacheLines 0x04 # define VMC_HighLine 0x08 # define VMC_Start0_Enable 0x20 # define VMC_Start1_Enable 0x40 # define VMC_Vremap 0x80 #define VOYAGER_VMC_BANK_DENSITY 0xa # define VMC_BANK_EMPTY 0 # define VMC_BANK_4MB 1 # define VMC_BANK_16MB 2 # define VMC_BANK_64MB 3 # define VMC_BANK0_MASK 0x03 # define VMC_BANK1_MASK 0x0C # define VMC_BANK2_MASK 0x30 # define VMC_BANK3_MASK 0xC0 /* Magellan Memory Controller (MMC) defines - present on L5 */ #define VOYAGER_MMC_ASIC_ID 1 /* the two memory modules corresponding to memory cards in the system */ #define VOYAGER_MMC_MEMORY0_MODULE 0x14 #define VOYAGER_MMC_MEMORY1_MODULE 0x15 /* the Magellan Memory Address (MMA) defines */ #define VOYAGER_MMA_ASIC_ID 2 /* Submodule number for the Quad Baseboard */ #define VOYAGER_QUAD_BASEBOARD 1 /* ASIC defines for the Quad Baseboard */ #define VOYAGER_QUAD_QDATA0 1 #define VOYAGER_QUAD_QDATA1 2 #define VOYAGER_QUAD_QABC 3 /* Useful areas in extended CMOS */ #define VOYAGER_PROCESSOR_PRESENT_MASK 0x88a #define VOYAGER_MEMORY_CLICKMAP 0xa23 #define VOYAGER_DUMP_LOCATION 0xb1a /* SUS In Control bit - used to tell SUS that we don't need to be * babysat anymore */ #define VOYAGER_SUS_IN_CONTROL_PORT 0x3ff # define VOYAGER_IN_CONTROL_FLAG 0x80 /* Voyager PSI defines */ #define VOYAGER_PSI_STATUS_REG 0x08 # define PSI_DC_FAIL 0x01 # define PSI_MON 0x02 # define PSI_FAULT 0x04 # define PSI_ALARM 0x08 # define PSI_CURRENT 0x10 # define PSI_DVM 0x20 # define PSI_PSCFAULT 0x40 # define PSI_STAT_CHG 0x80 #define VOYAGER_PSI_SUPPLY_REG 0x8000 /* read */ # define PSI_FAIL_DC 0x01 # define PSI_FAIL_AC 0x02 # define PSI_MON_INT 0x04 # define PSI_SWITCH_OFF 0x08 # define PSI_HX_OFF 0x10 # define PSI_SECURITY 0x20 # define PSI_CMOS_BATT_LOW 0x40 # define PSI_CMOS_BATT_FAIL 0x80 /* write */ # define PSI_CLR_SWITCH_OFF 0x13 # define PSI_CLR_HX_OFF 0x14 # define PSI_CLR_CMOS_BATT_FAIL 0x17 #define VOYAGER_PSI_MASK 0x8001 # define PSI_MASK_MASK 0x10 #define VOYAGER_PSI_AC_FAIL_REG 0x8004 #define AC_FAIL_STAT_CHANGE 0x80 #define VOYAGER_PSI_GENERAL_REG 0x8007 /* read */ # define PSI_SWITCH_ON 0x01 # define PSI_SWITCH_ENABLED 0x02 # define PSI_ALARM_ENABLED 0x08 # define PSI_SECURE_ENABLED 0x10 # define PSI_COLD_RESET 0x20 # define PSI_COLD_START 0x80 /* write */ # define PSI_POWER_DOWN 0x10 # define PSI_SWITCH_DISABLE 0x01 # define PSI_SWITCH_ENABLE 0x11 # define PSI_CLEAR 0x12 # define PSI_ALARM_DISABLE 0x03 # define PSI_ALARM_ENABLE 0x13 # define PSI_CLEAR_COLD_RESET 0x05 # define PSI_SET_COLD_RESET 0x15 # define PSI_CLEAR_COLD_START 0x07 # define PSI_SET_COLD_START 0x17 struct voyager_bios_info { __u8 len; __u8 major; __u8 minor; __u8 debug; __u8 num_classes; __u8 class_1; __u8 class_2; }; /* The following structures and definitions are for the Kernel/SUS * interface these are needed to find out how SUS initialised any Quad * boards in the system */ #define NUMBER_OF_MC_BUSSES 2 #define SLOTS_PER_MC_BUS 8 #define MAX_CPUS 16 /* 16 way CPU system */ #define MAX_PROCESSOR_BOARDS 4 /* 4 processor slot system */ #define MAX_CACHE_LEVELS 4 /* # of cache levels supported */ #define MAX_SHARED_CPUS 4 /* # of CPUs that can share a LARC */ #define NUMBER_OF_POS_REGS 8 typedef struct { __u8 MC_Slot __attribute__((packed)); __u8 POS_Values[NUMBER_OF_POS_REGS] __attribute__((packed)); } MC_SlotInformation_t; struct QuadDescription { __u8 Type __attribute__((packed)); /* for type 0 (DYADIC or MONADIC) all fields * will be zero except for slot */ __u8 StructureVersion __attribute__((packed)); __u32 CPI_BaseAddress __attribute__((packed)); __u32 LARC_BankSize __attribute__((packed)); __u32 LocalMemoryStateBits __attribute__((packed)); __u8 Slot __attribute__((packed)); /* Processor slots 1 - 4 */ }; struct ProcBoardInfo { __u8 Type __attribute__((packed)); __u8 StructureVersion __attribute__((packed)); __u8 NumberOfBoards __attribute__((packed)); struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS] __attribute__((packed)); }; struct CacheDescription { __u8 Level __attribute__((packed)); __u32 TotalSize __attribute__((packed)); __u16 LineSize __attribute__((packed)); __u8 Associativity __attribute__((packed)); __u8 CacheType __attribute__((packed)); __u8 WriteType __attribute__((packed)); __u8 Number_CPUs_SharedBy __attribute__((packed)); __u8 Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS] __attribute__((packed)); }; struct CPU_Description { __u8 CPU_HardwareId __attribute__((packed)); char *FRU_String __attribute__((packed)); __u8 NumberOfCacheLevels __attribute__((packed)); struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS] __attribute__((packed)); }; struct CPU_Info { __u8 Type __attribute__((packed)); __u8 StructureVersion __attribute__((packed)); __u8 NumberOf_CPUs __attribute__((packed)); struct CPU_Description CPU_Data[MAX_CPUS] __attribute__((packed)); }; /* * This structure will be used by SUS and the OS. * The assumption about this structure is that no blank space is * packed in it by our friend the compiler. */ typedef struct { __u8 Mailbox_SUS; /* Written to by SUS to give commands/response to the OS */ __u8 Mailbox_OS; /* Written to by the OS to give commands/response to SUS */ __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the interface SUS supports */ __u8 OS_MailboxVersion; /* Tells SUS which iteration of the interface the OS supports */ __u32 OS_Flags; /* Flags set by the OS as info for SUS */ __u32 SUS_Flags; /* Flags set by SUS as info for the OS */ __u32 WatchDogPeriod; /* Watchdog period (in seconds) which the DP uses to see if the OS is dead */ __u32 WatchDogCount; /* Updated by the OS on every tic. */ __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS where to stuff the SUS error log on a dump */ MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; /* Storage for MCA POS data */ /* All new SECOND_PASS_INTERFACE fields added from this point */ struct ProcBoardInfo *BoardData; struct CPU_Info *CPU_Data; /* All new fields must be added from this point */ } Voyager_KernelSUS_Mbox_t; /* structure for finding the right memory address to send a QIC CPI to */ struct voyager_qic_cpi { /* Each cache line (32 bytes) can trigger a cpi. The cpi * read/write may occur anywhere in the cache line---pick the * middle to be safe */ struct { __u32 pad1[3]; __u32 cpi; __u32 pad2[4]; } qic_cpi[8]; }; struct voyager_status { __u32 power_fail:1; __u32 switch_off:1; __u32 request_from_kernel:1; }; struct voyager_psi_regs { __u8 cat_id; __u8 cat_dev; __u8 cat_control; __u8 subaddr; __u8 dummy4; __u8 checkbit; __u8 subaddr_low; __u8 subaddr_high; __u8 intstatus; __u8 stat1; __u8 stat3; __u8 fault; __u8 tms; __u8 gen; __u8 sysconf; __u8 dummy15; }; struct voyager_psi_subregs { __u8 supply; __u8 mask; __u8 present; __u8 DCfail; __u8 ACfail; __u8 fail; __u8 UPSfail; __u8 genstatus; }; struct voyager_psi { struct voyager_psi_regs regs; struct voyager_psi_subregs subregs; }; struct voyager_SUS { #define VOYAGER_DUMP_BUTTON_NMI 0x1 #define VOYAGER_SUS_VALID 0x2 #define VOYAGER_SYSINT_COMPLETE 0x3 __u8 SUS_mbox; #define VOYAGER_NO_COMMAND 0x0 #define VOYAGER_IGNORE_DUMP 0x1 #define VOYAGER_DO_DUMP 0x2 #define VOYAGER_SYSINT_HANDSHAKE 0x3 #define VOYAGER_DO_MEM_DUMP 0x4 #define VOYAGER_SYSINT_WAS_RECOVERED 0x5 __u8 kernel_mbox; #define VOYAGER_MAILBOX_VERSION 0x10 __u8 SUS_version; __u8 kernel_version; #define VOYAGER_OS_HAS_SYSINT 0x1 #define VOYAGER_OS_IN_PROGRESS 0x2 #define VOYAGER_UPDATING_WDPERIOD 0x4 __u32 kernel_flags; #define VOYAGER_SUS_BOOTING 0x1 #define VOYAGER_SUS_IN_PROGRESS 0x2 __u32 SUS_flags; __u32 watchdog_period; __u32 watchdog_count; __u32 SUS_errorlog; /* lots of system configuration stuff under here */ }; /* Variables exported by voyager_smp */ extern __u32 voyager_extended_vic_processors; extern __u32 voyager_allowed_boot_processors; extern __u32 voyager_quad_processors; extern struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS]; extern struct voyager_SUS *voyager_SUS; /* variables exported always */ extern int voyager_level; extern int kvoyagerd_running; extern struct semaphore kvoyagerd_sem; extern struct voyager_status voyager_status; /* functions exported by the voyager and voyager_smp modules */ extern int voyager_cat_readb(__u8 module, __u8 asic, int reg); extern void voyager_cat_init(void); extern void voyager_detect(struct voyager_bios_info *); extern void voyager_trap_init(void); extern void voyager_setup_irqs(void); extern int voyager_memory_detect(int region, __u32 *addr, __u32 *length); extern void voyager_smp_intr_init(void); extern __u8 voyager_extended_cmos_read(__u16 cmos_address); extern void voyager_smp_dump(void); extern void voyager_timer_interrupt(struct pt_regs *regs); extern void smp_local_timer_interrupt(struct pt_regs * regs); extern void voyager_power_off(void); extern void smp_voyager_power_off(void *dummy); extern void voyager_restart(void); extern void voyager_cat_power_off(void); extern void voyager_cat_do_common_interrupt(void); extern void voyager_handle_nmi(void); /* Commands for the following are */ #define VOYAGER_PSI_READ 0 #define VOYAGER_PSI_WRITE 1 #define VOYAGER_PSI_SUBREAD 2 #define VOYAGER_PSI_SUBWRITE 3 extern void voyager_cat_psi(__u8, __u16, __u8 *); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-i386/xor.h000066400000000000000000000520551314037446600236500ustar00rootroot00000000000000/* * include/asm-i386/xor.h * * Optimized RAID-5 checksumming functions for MMX and SSE. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * You should have received a copy of the GNU General Public License * (for example /usr/src/linux/COPYING); if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * High-speed RAID5 checksumming functions utilizing MMX instructions. * Copyright (C) 1998 Ingo Molnar. */ #define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n" #define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n" #define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n" #define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n" #define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n" #define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n" #include static void xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { unsigned long lines = bytes >> 7; kernel_fpu_begin(); __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ XO1(i,0) \ ST(i,0) \ XO1(i+1,1) \ ST(i+1,1) \ XO1(i+2,2) \ ST(i+2,2) \ XO1(i+3,3) \ ST(i+3,3) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $128, %1 ;\n" " addl $128, %2 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2) : : "memory"); kernel_fpu_end(); } static void xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { unsigned long lines = bytes >> 7; kernel_fpu_begin(); __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ XO2(i,0) \ ST(i,0) \ XO2(i+1,1) \ ST(i+1,1) \ XO2(i+2,2) \ ST(i+2,2) \ XO2(i+3,3) \ ST(i+3,3) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $128, %1 ;\n" " addl $128, %2 ;\n" " addl $128, %3 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) : : "memory"); kernel_fpu_end(); } static void xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { unsigned long lines = bytes >> 7; kernel_fpu_begin(); __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ XO3(i,0) \ ST(i,0) \ XO3(i+1,1) \ ST(i+1,1) \ XO3(i+2,2) \ ST(i+2,2) \ XO3(i+3,3) \ ST(i+3,3) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $128, %1 ;\n" " addl $128, %2 ;\n" " addl $128, %3 ;\n" " addl $128, %4 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) : : "memory"); kernel_fpu_end(); } static void xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { unsigned long lines = bytes >> 7; kernel_fpu_begin(); /* Make sure GCC forgets anything it knows about p4 or p5, such that it won't pass to the asm volatile below a register that is shared with any other variable. That's because we modify p4 and p5 there, but we can't mark them as read/write, otherwise we'd overflow the 10-asm-operands limit of GCC < 3.1. */ __asm__ ("" : "+r" (p4), "+r" (p5)); __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ XO3(i,0) \ XO3(i+1,1) \ XO3(i+2,2) \ XO3(i+3,3) \ XO4(i,0) \ ST(i,0) \ XO4(i+1,1) \ ST(i+1,1) \ XO4(i+2,2) \ ST(i+2,2) \ XO4(i+3,3) \ ST(i+3,3) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $128, %1 ;\n" " addl $128, %2 ;\n" " addl $128, %3 ;\n" " addl $128, %4 ;\n" " addl $128, %5 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) : "r" (p4), "r" (p5) : "memory"); /* p4 and p5 were modified, and now the variables are dead. Clobber them just to be sure nobody does something stupid like assuming they have some legal value. */ __asm__ ("" : "=r" (p4), "=r" (p5)); kernel_fpu_end(); } #undef LD #undef XO1 #undef XO2 #undef XO3 #undef XO4 #undef ST #undef BLOCK static void xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { unsigned long lines = bytes >> 6; kernel_fpu_begin(); __asm__ __volatile__ ( " .align 32 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" " movq 8(%1), %%mm1 ;\n" " pxor (%2), %%mm0 ;\n" " movq 16(%1), %%mm2 ;\n" " movq %%mm0, (%1) ;\n" " pxor 8(%2), %%mm1 ;\n" " movq 24(%1), %%mm3 ;\n" " movq %%mm1, 8(%1) ;\n" " pxor 16(%2), %%mm2 ;\n" " movq 32(%1), %%mm4 ;\n" " movq %%mm2, 16(%1) ;\n" " pxor 24(%2), %%mm3 ;\n" " movq 40(%1), %%mm5 ;\n" " movq %%mm3, 24(%1) ;\n" " pxor 32(%2), %%mm4 ;\n" " movq 48(%1), %%mm6 ;\n" " movq %%mm4, 32(%1) ;\n" " pxor 40(%2), %%mm5 ;\n" " movq 56(%1), %%mm7 ;\n" " movq %%mm5, 40(%1) ;\n" " pxor 48(%2), %%mm6 ;\n" " pxor 56(%2), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" " addl $64, %1 ;\n" " addl $64, %2 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2) : : "memory"); kernel_fpu_end(); } static void xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { unsigned long lines = bytes >> 6; kernel_fpu_begin(); __asm__ __volatile__ ( " .align 32,0x90 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" " movq 8(%1), %%mm1 ;\n" " pxor (%2), %%mm0 ;\n" " movq 16(%1), %%mm2 ;\n" " pxor 8(%2), %%mm1 ;\n" " pxor (%3), %%mm0 ;\n" " pxor 16(%2), %%mm2 ;\n" " movq %%mm0, (%1) ;\n" " pxor 8(%3), %%mm1 ;\n" " pxor 16(%3), %%mm2 ;\n" " movq 24(%1), %%mm3 ;\n" " movq %%mm1, 8(%1) ;\n" " movq 32(%1), %%mm4 ;\n" " movq 40(%1), %%mm5 ;\n" " pxor 24(%2), %%mm3 ;\n" " movq %%mm2, 16(%1) ;\n" " pxor 32(%2), %%mm4 ;\n" " pxor 24(%3), %%mm3 ;\n" " pxor 40(%2), %%mm5 ;\n" " movq %%mm3, 24(%1) ;\n" " pxor 32(%3), %%mm4 ;\n" " pxor 40(%3), %%mm5 ;\n" " movq 48(%1), %%mm6 ;\n" " movq %%mm4, 32(%1) ;\n" " movq 56(%1), %%mm7 ;\n" " pxor 48(%2), %%mm6 ;\n" " movq %%mm5, 40(%1) ;\n" " pxor 56(%2), %%mm7 ;\n" " pxor 48(%3), %%mm6 ;\n" " pxor 56(%3), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" " addl $64, %1 ;\n" " addl $64, %2 ;\n" " addl $64, %3 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) : : "memory" ); kernel_fpu_end(); } static void xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { unsigned long lines = bytes >> 6; kernel_fpu_begin(); __asm__ __volatile__ ( " .align 32,0x90 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" " movq 8(%1), %%mm1 ;\n" " pxor (%2), %%mm0 ;\n" " movq 16(%1), %%mm2 ;\n" " pxor 8(%2), %%mm1 ;\n" " pxor (%3), %%mm0 ;\n" " pxor 16(%2), %%mm2 ;\n" " pxor 8(%3), %%mm1 ;\n" " pxor (%4), %%mm0 ;\n" " movq 24(%1), %%mm3 ;\n" " pxor 16(%3), %%mm2 ;\n" " pxor 8(%4), %%mm1 ;\n" " movq %%mm0, (%1) ;\n" " movq 32(%1), %%mm4 ;\n" " pxor 24(%2), %%mm3 ;\n" " pxor 16(%4), %%mm2 ;\n" " movq %%mm1, 8(%1) ;\n" " movq 40(%1), %%mm5 ;\n" " pxor 32(%2), %%mm4 ;\n" " pxor 24(%3), %%mm3 ;\n" " movq %%mm2, 16(%1) ;\n" " pxor 40(%2), %%mm5 ;\n" " pxor 32(%3), %%mm4 ;\n" " pxor 24(%4), %%mm3 ;\n" " movq %%mm3, 24(%1) ;\n" " movq 56(%1), %%mm7 ;\n" " movq 48(%1), %%mm6 ;\n" " pxor 40(%3), %%mm5 ;\n" " pxor 32(%4), %%mm4 ;\n" " pxor 48(%2), %%mm6 ;\n" " movq %%mm4, 32(%1) ;\n" " pxor 56(%2), %%mm7 ;\n" " pxor 40(%4), %%mm5 ;\n" " pxor 48(%3), %%mm6 ;\n" " pxor 56(%3), %%mm7 ;\n" " movq %%mm5, 40(%1) ;\n" " pxor 48(%4), %%mm6 ;\n" " pxor 56(%4), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" " addl $64, %1 ;\n" " addl $64, %2 ;\n" " addl $64, %3 ;\n" " addl $64, %4 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) : : "memory"); kernel_fpu_end(); } static void xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { unsigned long lines = bytes >> 6; kernel_fpu_begin(); /* Make sure GCC forgets anything it knows about p4 or p5, such that it won't pass to the asm volatile below a register that is shared with any other variable. That's because we modify p4 and p5 there, but we can't mark them as read/write, otherwise we'd overflow the 10-asm-operands limit of GCC < 3.1. */ __asm__ ("" : "+r" (p4), "+r" (p5)); __asm__ __volatile__ ( " .align 32,0x90 ;\n" " 1: ;\n" " movq (%1), %%mm0 ;\n" " movq 8(%1), %%mm1 ;\n" " pxor (%2), %%mm0 ;\n" " pxor 8(%2), %%mm1 ;\n" " movq 16(%1), %%mm2 ;\n" " pxor (%3), %%mm0 ;\n" " pxor 8(%3), %%mm1 ;\n" " pxor 16(%2), %%mm2 ;\n" " pxor (%4), %%mm0 ;\n" " pxor 8(%4), %%mm1 ;\n" " pxor 16(%3), %%mm2 ;\n" " movq 24(%1), %%mm3 ;\n" " pxor (%5), %%mm0 ;\n" " pxor 8(%5), %%mm1 ;\n" " movq %%mm0, (%1) ;\n" " pxor 16(%4), %%mm2 ;\n" " pxor 24(%2), %%mm3 ;\n" " movq %%mm1, 8(%1) ;\n" " pxor 16(%5), %%mm2 ;\n" " pxor 24(%3), %%mm3 ;\n" " movq 32(%1), %%mm4 ;\n" " movq %%mm2, 16(%1) ;\n" " pxor 24(%4), %%mm3 ;\n" " pxor 32(%2), %%mm4 ;\n" " movq 40(%1), %%mm5 ;\n" " pxor 24(%5), %%mm3 ;\n" " pxor 32(%3), %%mm4 ;\n" " pxor 40(%2), %%mm5 ;\n" " movq %%mm3, 24(%1) ;\n" " pxor 32(%4), %%mm4 ;\n" " pxor 40(%3), %%mm5 ;\n" " movq 48(%1), %%mm6 ;\n" " movq 56(%1), %%mm7 ;\n" " pxor 32(%5), %%mm4 ;\n" " pxor 40(%4), %%mm5 ;\n" " pxor 48(%2), %%mm6 ;\n" " pxor 56(%2), %%mm7 ;\n" " movq %%mm4, 32(%1) ;\n" " pxor 48(%3), %%mm6 ;\n" " pxor 56(%3), %%mm7 ;\n" " pxor 40(%5), %%mm5 ;\n" " pxor 48(%4), %%mm6 ;\n" " pxor 56(%4), %%mm7 ;\n" " movq %%mm5, 40(%1) ;\n" " pxor 48(%5), %%mm6 ;\n" " pxor 56(%5), %%mm7 ;\n" " movq %%mm6, 48(%1) ;\n" " movq %%mm7, 56(%1) ;\n" " addl $64, %1 ;\n" " addl $64, %2 ;\n" " addl $64, %3 ;\n" " addl $64, %4 ;\n" " addl $64, %5 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) : "r" (p4), "r" (p5) : "memory"); /* p4 and p5 were modified, and now the variables are dead. Clobber them just to be sure nobody does something stupid like assuming they have some legal value. */ __asm__ ("" : "=r" (p4), "=r" (p5)); kernel_fpu_end(); } static struct xor_block_template xor_block_pII_mmx = { .name = "pII_mmx", .do_2 = xor_pII_mmx_2, .do_3 = xor_pII_mmx_3, .do_4 = xor_pII_mmx_4, .do_5 = xor_pII_mmx_5, }; static struct xor_block_template xor_block_p5_mmx = { .name = "p5_mmx", .do_2 = xor_p5_mmx_2, .do_3 = xor_p5_mmx_3, .do_4 = xor_p5_mmx_4, .do_5 = xor_p5_mmx_5, }; /* * Cache avoiding checksumming functions utilizing KNI instructions * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) */ #define XMMS_SAVE do { \ preempt_disable(); \ cr0 = read_cr0(); \ clts(); \ __asm__ __volatile__ ( \ "movups %%xmm0,(%0) ;\n\t" \ "movups %%xmm1,0x10(%0) ;\n\t" \ "movups %%xmm2,0x20(%0) ;\n\t" \ "movups %%xmm3,0x30(%0) ;\n\t" \ : \ : "r" (xmm_save) \ : "memory"); \ } while(0) #define XMMS_RESTORE do { \ __asm__ __volatile__ ( \ "sfence ;\n\t" \ "movups (%0),%%xmm0 ;\n\t" \ "movups 0x10(%0),%%xmm1 ;\n\t" \ "movups 0x20(%0),%%xmm2 ;\n\t" \ "movups 0x30(%0),%%xmm3 ;\n\t" \ : \ : "r" (xmm_save) \ : "memory"); \ write_cr0(cr0); \ preempt_enable(); \ } while(0) #define ALIGN16 __attribute__((aligned(16))) #define OFFS(x) "16*("#x")" #define PF_OFFS(x) "256+16*("#x")" #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" #define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" #define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" #define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n" #define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n" #define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n" #define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n" #define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n" #define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" #define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" #define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" #define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" #define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" static void xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ LD(i,0) \ LD(i+1,1) \ PF1(i) \ PF1(i+2) \ LD(i+2,2) \ LD(i+3,3) \ PF0(i+4) \ PF0(i+6) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $256, %1 ;\n" " addl $256, %2 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2) : : "memory"); XMMS_RESTORE; } static void xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ PF1(i) \ PF1(i+2) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ PF2(i) \ PF2(i+2) \ PF0(i+4) \ PF0(i+6) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $256, %1 ;\n" " addl $256, %2 ;\n" " addl $256, %3 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r"(p2), "+r"(p3) : : "memory" ); XMMS_RESTORE; } static void xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ PF1(i) \ PF1(i+2) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ PF2(i) \ PF2(i+2) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ PF3(i) \ PF3(i+2) \ PF0(i+4) \ PF0(i+6) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ XO3(i,0) \ XO3(i+1,1) \ XO3(i+2,2) \ XO3(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $256, %1 ;\n" " addl $256, %2 ;\n" " addl $256, %3 ;\n" " addl $256, %4 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) : : "memory" ); XMMS_RESTORE; } static void xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { unsigned long lines = bytes >> 8; char xmm_save[16*4] ALIGN16; int cr0; XMMS_SAVE; /* Make sure GCC forgets anything it knows about p4 or p5, such that it won't pass to the asm volatile below a register that is shared with any other variable. That's because we modify p4 and p5 there, but we can't mark them as read/write, otherwise we'd overflow the 10-asm-operands limit of GCC < 3.1. */ __asm__ ("" : "+r" (p4), "+r" (p5)); __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ PF1(i) \ PF1(i+2) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ PF2(i) \ PF2(i+2) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ PF3(i) \ PF3(i+2) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ PF4(i) \ PF4(i+2) \ PF0(i+4) \ PF0(i+6) \ XO3(i,0) \ XO3(i+1,1) \ XO3(i+2,2) \ XO3(i+3,3) \ XO4(i,0) \ XO4(i+1,1) \ XO4(i+2,2) \ XO4(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addl $256, %1 ;\n" " addl $256, %2 ;\n" " addl $256, %3 ;\n" " addl $256, %4 ;\n" " addl $256, %5 ;\n" " decl %0 ;\n" " jnz 1b ;\n" : "+r" (lines), "+r" (p1), "+r" (p2), "+r" (p3) : "r" (p4), "r" (p5) : "memory"); /* p4 and p5 were modified, and now the variables are dead. Clobber them just to be sure nobody does something stupid like assuming they have some legal value. */ __asm__ ("" : "=r" (p4), "=r" (p5)); XMMS_RESTORE; } static struct xor_block_template xor_block_pIII_sse = { .name = "pIII_sse", .do_2 = xor_sse_2, .do_3 = xor_sse_3, .do_4 = xor_sse_4, .do_5 = xor_sse_5, }; /* Also try the generic routines. */ #include #undef XOR_TRY_TEMPLATES #define XOR_TRY_TEMPLATES \ do { \ xor_speed(&xor_block_8regs); \ xor_speed(&xor_block_8regs_p); \ xor_speed(&xor_block_32regs); \ xor_speed(&xor_block_32regs_p); \ if (cpu_has_xmm) \ xor_speed(&xor_block_pIII_sse); \ if (cpu_has_mmx) { \ xor_speed(&xor_block_pII_mmx); \ xor_speed(&xor_block_p5_mmx); \ } \ } while (0) /* We force the use of the SSE xor block because it can write around L2. We may also be able to load into the L1 only depending on how the cpu deals with a load to a line that is being prefetched. */ #define XOR_SELECT_TEMPLATE(FASTEST) \ (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/000077500000000000000000000000001314037446600231255ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/8253pit.h000066400000000000000000000002011314037446600244050ustar00rootroot00000000000000/* * 8253/8254 Programmable Interval Timer */ #ifndef _8253PIT_H #define _8253PIT_H #define PIT_TICK_RATE 1193182UL #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/a.out.h000066400000000000000000000014551314037446600243310ustar00rootroot00000000000000#ifndef __X8664_A_OUT_H__ #define __X8664_A_OUT_H__ /* 32bit a.out */ struct exec { unsigned int a_info; /* Use macros N_MAGIC, etc for access */ unsigned a_text; /* length of text, in bytes */ unsigned a_data; /* length of data, in bytes */ unsigned a_bss; /* length of uninitialized data area for file, in bytes */ unsigned a_syms; /* length of symbol table data in file, in bytes */ unsigned a_entry; /* start address */ unsigned a_trsize; /* length of relocation info for text, in bytes */ unsigned a_drsize; /* length of relocation info for data, in bytes */ }; #define N_TRSIZE(a) ((a).a_trsize) #define N_DRSIZE(a) ((a).a_drsize) #define N_SYMSIZE(a) ((a).a_syms) #ifdef __KERNEL__ #include #define STACK_TOP TASK_SIZE #endif #endif /* __A_OUT_GNU_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/acpi.h000066400000000000000000000111011314037446600242040ustar00rootroot00000000000000/* * asm-x86_64/acpi.h * * Copyright (C) 2001 Paul Diefenbaugh * Copyright (C) 2001 Patrick Mochel * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef _ASM_ACPI_H #define _ASM_ACPI_H #ifdef __KERNEL__ #include #define COMPILER_DEPENDENT_INT64 long long #define COMPILER_DEPENDENT_UINT64 unsigned long long /* * Calling conventions: * * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) * ACPI_EXTERNAL_XFACE - External ACPI interfaces * ACPI_INTERNAL_XFACE - Internal ACPI interfaces * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces */ #define ACPI_SYSTEM_XFACE #define ACPI_EXTERNAL_XFACE #define ACPI_INTERNAL_XFACE #define ACPI_INTERNAL_VAR_XFACE /* Asm macros */ #define ACPI_ASM_MACROS #define BREAKPOINT3 #define ACPI_DISABLE_IRQS() local_irq_disable() #define ACPI_ENABLE_IRQS() local_irq_enable() #define ACPI_FLUSH_CPU_CACHE() wbinvd() static inline int __acpi_acquire_global_lock (unsigned int *lock) { unsigned int old, new, val; do { old = *lock; new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); val = cmpxchg(lock, old, new); } while (unlikely (val != old)); return (new < 3) ? -1 : 0; } static inline int __acpi_release_global_lock (unsigned int *lock) { unsigned int old, new, val; do { old = *lock; new = old & ~0x3; val = cmpxchg(lock, old, new); } while (unlikely (val != old)); return old & 0x1; } #define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ ((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr)) #define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ ((Acq) = __acpi_release_global_lock((unsigned int *) GLptr)) /* * Math helper asm macros */ #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ asm("divl %2;" \ :"=a"(q32), "=d"(r32) \ :"r"(d32), \ "0"(n_lo), "1"(n_hi)) #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ asm("shrl $1,%2;" \ "rcrl $1,%3;" \ :"=r"(n_hi), "=r"(n_lo) \ :"0"(n_hi), "1"(n_lo)) #ifdef CONFIG_ACPI extern int acpi_lapic; extern int acpi_ioapic; extern int acpi_noirq; extern int acpi_strict; extern int acpi_disabled; extern int acpi_pci_disabled; extern int acpi_ht; static inline void disable_acpi(void) { acpi_disabled = 1; acpi_ht = 0; acpi_pci_disabled = 1; acpi_noirq = 1; } /* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ #define FIX_ACPI_PAGES 4 extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); static inline void acpi_noirq_set(void) { acpi_noirq = 1; } static inline void acpi_disable_pci(void) { acpi_pci_disabled = 1; acpi_noirq_set(); } extern int acpi_irq_balance_set(char *str); #else /* !CONFIG_ACPI */ #define acpi_lapic 0 #define acpi_ioapic 0 static inline void acpi_noirq_set(void) { } static inline void acpi_disable_pci(void) { } #endif /* !CONFIG_ACPI */ extern int acpi_numa; extern int acpi_scan_nodes(unsigned long start, unsigned long end); #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) #ifdef CONFIG_ACPI_SLEEP /* routines for saving/restoring kernel state */ extern int acpi_save_state_mem(void); extern void acpi_restore_state_mem(void); extern unsigned long acpi_wakeup_address; /* early initialization routine */ extern void acpi_reserve_bootmem(void); #ifdef CONFIG_ACPI_PV_SLEEP extern int acpi_notify_hypervisor_state(u8 sleep_state, u32 pm1a_cnt, u32 pm1b_cnt); #endif /* CONFIG_ACPI_PV_SLEEP */ #endif /*CONFIG_ACPI_SLEEP*/ #define boot_cpu_physical_apicid boot_cpu_id extern int acpi_disabled; extern int acpi_pci_disabled; extern u8 x86_acpiid_to_apicid[]; #ifndef CONFIG_XEN #define ARCH_HAS_POWER_INIT 1 #endif extern int acpi_skip_timer_override; #endif /*__KERNEL__*/ #endif /*_ASM_ACPI_H*/ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/agp.h000066400000000000000000000020551314037446600240470ustar00rootroot00000000000000#ifndef AGP_H #define AGP_H 1 #include /* * Functions to keep the agpgart mappings coherent. * The GART gives the CPU a physical alias of memory. The alias is * mapped uncacheable. Make sure there are no conflicting mappings * with different cachability attributes for the same page. */ int map_page_into_agp(struct page *page); int unmap_page_from_agp(struct page *page); #define flush_agp_mappings() global_flush_tlb() /* Could use CLFLUSH here if the cpu supports it. But then it would need to be called for each cacheline of the whole page so it may not be worth it. Would need a page for it. */ #define flush_agp_cache() asm volatile("wbinvd":::"memory") /* Convert a physical address to an address suitable for the GART. */ #define phys_to_gart(x) (x) #define gart_to_phys(x) (x) /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) \ ((char *)__get_free_pages(GFP_KERNEL, (order))) #define free_gatt_pages(table, order) \ free_pages((unsigned long)(table), (order)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/ansidecl.h000066400000000000000000000307731314037446600250720ustar00rootroot00000000000000/* ANSI and traditional C compatability macros Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001 Free Software Foundation, Inc. This file is part of the GNU C Library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as * required. * Keith Owens 15 May 2006 */ /* ANSI and traditional C compatibility macros ANSI C is assumed if __STDC__ is #defined. Macro ANSI C definition Traditional C definition ----- ---- - ---------- ----------- - ---------- ANSI_PROTOTYPES 1 not defined PTR `void *' `char *' PTRCONST `void *const' `char *' LONG_DOUBLE `long double' `double' const not defined `' volatile not defined `' signed not defined `' VA_START(ap, var) va_start(ap, var) va_start(ap) Note that it is safe to write "void foo();" indicating a function with no return value, in all K+R compilers we have been able to test. For declaring functions with prototypes, we also provide these: PARAMS ((prototype)) -- for functions which take a fixed number of arguments. Use this when declaring the function. When defining the function, write a K+R style argument list. For example: char *strcpy PARAMS ((char *dest, char *source)); ... char * strcpy (dest, source) char *dest; char *source; { ... } VPARAMS ((prototype, ...)) -- for functions which take a variable number of arguments. Use PARAMS to declare the function, VPARAMS to define it. For example: int printf PARAMS ((const char *format, ...)); ... int printf VPARAMS ((const char *format, ...)) { ... } For writing functions which take variable numbers of arguments, we also provide the VA_OPEN, VA_CLOSE, and VA_FIXEDARG macros. These hide the differences between K+R and C89 more thoroughly than the simple VA_START() macro mentioned above. VA_OPEN and VA_CLOSE are used *instead of* va_start and va_end. Immediately after VA_OPEN, put a sequence of VA_FIXEDARG calls corresponding to the list of fixed arguments. Then use va_arg normally to get the variable arguments, or pass your va_list object around. You do not declare the va_list yourself; VA_OPEN does it for you. Here is a complete example: int printf VPARAMS ((const char *format, ...)) { int result; VA_OPEN (ap, format); VA_FIXEDARG (ap, const char *, format); result = vfprintf (stdout, format, ap); VA_CLOSE (ap); return result; } You can declare variables either before or after the VA_OPEN, VA_FIXEDARG sequence. Also, VA_OPEN and VA_CLOSE are the beginning and end of a block. They must appear at the same nesting level, and any variables declared after VA_OPEN go out of scope at VA_CLOSE. Unfortunately, with a K+R compiler, that includes the argument list. You can have multiple instances of VA_OPEN/VA_CLOSE pairs in a single function in case you need to traverse the argument list more than once. For ease of writing code which uses GCC extensions but needs to be portable to other compilers, we provide the GCC_VERSION macro that simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various wrappers around __attribute__. Also, __extension__ will be #defined to nothing if it doesn't work. See below. This header also defines a lot of obsolete macros: CONST, VOLATILE, SIGNED, PROTO, EXFUN, DEFUN, DEFUN_VOID, AND, DOTS, NOARGS. Don't use them. */ #ifndef _ANSIDECL_H #define _ANSIDECL_H 1 /* Every source file includes this file, so they will all get the switch for lint. */ /* LINTLIBRARY */ /* Using MACRO(x,y) in cpp #if conditionals does not work with some older preprocessors. Thus we can't define something like this: #define HAVE_GCC_VERSION(MAJOR, MINOR) \ (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR))) and then test "#if HAVE_GCC_VERSION(2,7)". So instead we use the macro below and test it against specific values. */ /* This macro simplifies testing whether we are using gcc, and if it is of a particular minimum version. (Both major & minor numbers are significant.) This macro will evaluate to 0 if we are not using gcc at all. */ #ifndef GCC_VERSION #define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__) #endif /* GCC_VERSION */ #if defined (__STDC__) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32) || (defined(__alpha) && defined(__cplusplus)) /* All known AIX compilers implement these things (but don't always define __STDC__). The RISC/OS MIPS compiler defines these things in SVR4 mode, but does not define __STDC__. */ /* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other C++ compilers, does not define __STDC__, though it acts as if this was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */ #define ANSI_PROTOTYPES 1 #define PTR void * #define PTRCONST void *const #define LONG_DOUBLE long double /* PARAMS is often defined elsewhere (e.g. by libintl.h), so wrap it in a #ifndef. */ #ifndef PARAMS #define PARAMS(ARGS) ARGS #endif #define VPARAMS(ARGS) ARGS #define VA_START(VA_LIST, VAR) va_start(VA_LIST, VAR) /* variadic function helper macros */ /* "struct Qdmy" swallows the semicolon after VA_OPEN/VA_FIXEDARG's use without inhibiting further decls and without declaring an actual variable. */ #define VA_OPEN(AP, VAR) { va_list AP; va_start(AP, VAR); { struct Qdmy #define VA_CLOSE(AP) } va_end(AP); } #define VA_FIXEDARG(AP, T, N) struct Qdmy #undef const #undef volatile #undef signed #ifdef __KERNEL__ #ifndef __STDC_VERSION__ #define __STDC_VERSION__ 0 #endif #endif /* __KERNEL__ */ /* inline requires special treatment; it's in C99, and GCC >=2.7 supports it too, but it's not in C89. */ #undef inline #if __STDC_VERSION__ > 199901L /* it's a keyword */ #else # if GCC_VERSION >= 2007 # define inline __inline__ /* __inline__ prevents -pedantic warnings */ # else # define inline /* nothing */ # endif #endif /* These are obsolete. Do not use. */ #ifndef IN_GCC #define CONST const #define VOLATILE volatile #define SIGNED signed #define PROTO(type, name, arglist) type name arglist #define EXFUN(name, proto) name proto #define DEFUN(name, arglist, args) name(args) #define DEFUN_VOID(name) name(void) #define AND , #define DOTS , ... #define NOARGS void #endif /* ! IN_GCC */ #else /* Not ANSI C. */ #undef ANSI_PROTOTYPES #define PTR char * #define PTRCONST PTR #define LONG_DOUBLE double #define PARAMS(args) () #define VPARAMS(args) (va_alist) va_dcl #define VA_START(va_list, var) va_start(va_list) #define VA_OPEN(AP, VAR) { va_list AP; va_start(AP); { struct Qdmy #define VA_CLOSE(AP) } va_end(AP); } #define VA_FIXEDARG(AP, TYPE, NAME) TYPE NAME = va_arg(AP, TYPE) /* some systems define these in header files for non-ansi mode */ #undef const #undef volatile #undef signed #undef inline #define const #define volatile #define signed #define inline #ifndef IN_GCC #define CONST #define VOLATILE #define SIGNED #define PROTO(type, name, arglist) type name () #define EXFUN(name, proto) name() #define DEFUN(name, arglist, args) name arglist args; #define DEFUN_VOID(name) name() #define AND ; #define DOTS #define NOARGS #endif /* ! IN_GCC */ #endif /* ANSI C. */ /* Define macros for some gcc attributes. This permits us to use the macros freely, and know that they will come into play for the version of gcc in which they are supported. */ #if (GCC_VERSION < 2007) # define __attribute__(x) #endif /* Attribute __malloc__ on functions was valid as of gcc 2.96. */ #ifndef ATTRIBUTE_MALLOC # if (GCC_VERSION >= 2096) # define ATTRIBUTE_MALLOC __attribute__ ((__malloc__)) # else # define ATTRIBUTE_MALLOC # endif /* GNUC >= 2.96 */ #endif /* ATTRIBUTE_MALLOC */ /* Attributes on labels were valid as of gcc 2.93. */ #ifndef ATTRIBUTE_UNUSED_LABEL # if (!defined (__cplusplus) && GCC_VERSION >= 2093) # define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED # else # define ATTRIBUTE_UNUSED_LABEL # endif /* !__cplusplus && GNUC >= 2.93 */ #endif /* ATTRIBUTE_UNUSED_LABEL */ #ifndef ATTRIBUTE_UNUSED #define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) #endif /* ATTRIBUTE_UNUSED */ /* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the identifier name. */ #if ! defined(__cplusplus) || (GCC_VERSION >= 3004) # define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED #else /* !__cplusplus || GNUC >= 3.4 */ # define ARG_UNUSED(NAME) NAME #endif /* !__cplusplus || GNUC >= 3.4 */ #ifndef ATTRIBUTE_NORETURN #define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__)) #endif /* ATTRIBUTE_NORETURN */ /* Attribute `nonnull' was valid as of gcc 3.3. */ #ifndef ATTRIBUTE_NONNULL # if (GCC_VERSION >= 3003) # define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m))) # else # define ATTRIBUTE_NONNULL(m) # endif /* GNUC >= 3.3 */ #endif /* ATTRIBUTE_NONNULL */ /* Attribute `pure' was valid as of gcc 3.0. */ #ifndef ATTRIBUTE_PURE # if (GCC_VERSION >= 3000) # define ATTRIBUTE_PURE __attribute__ ((__pure__)) # else # define ATTRIBUTE_PURE # endif /* GNUC >= 3.0 */ #endif /* ATTRIBUTE_PURE */ /* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL. This was the case for the `printf' format attribute by itself before GCC 3.3, but as of 3.3 we need to add the `nonnull' attribute to retain this behavior. */ #ifndef ATTRIBUTE_PRINTF #define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m) #define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2) #define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3) #define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4) #define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5) #define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6) #endif /* ATTRIBUTE_PRINTF */ /* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on a function pointer. Format attributes were allowed on function pointers as of gcc 3.1. */ #ifndef ATTRIBUTE_FPTR_PRINTF # if (GCC_VERSION >= 3001) # define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n) # else # define ATTRIBUTE_FPTR_PRINTF(m, n) # endif /* GNUC >= 3.1 */ # define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2) # define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3) # define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4) # define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5) # define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6) #endif /* ATTRIBUTE_FPTR_PRINTF */ /* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A NULL format specifier was allowed as of gcc 3.3. */ #ifndef ATTRIBUTE_NULL_PRINTF # if (GCC_VERSION >= 3003) # define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) # else # define ATTRIBUTE_NULL_PRINTF(m, n) # endif /* GNUC >= 3.3 */ # define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2) # define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3) # define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4) # define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5) # define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6) #endif /* ATTRIBUTE_NULL_PRINTF */ /* Attribute `sentinel' was valid as of gcc 3.5. */ #ifndef ATTRIBUTE_SENTINEL # if (GCC_VERSION >= 3005) # define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__)) # else # define ATTRIBUTE_SENTINEL # endif /* GNUC >= 3.5 */ #endif /* ATTRIBUTE_SENTINEL */ #ifndef ATTRIBUTE_ALIGNED_ALIGNOF # if (GCC_VERSION >= 3000) # define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m)))) # else # define ATTRIBUTE_ALIGNED_ALIGNOF(m) # endif /* GNUC >= 3.0 */ #endif /* ATTRIBUTE_ALIGNED_ALIGNOF */ /* We use __extension__ in some places to suppress -pedantic warnings about GCC extensions. This feature didn't work properly before gcc 2.8. */ #if GCC_VERSION < 2008 #define __extension__ #endif #endif /* ansidecl.h */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/apic.h000066400000000000000000000061171314037446600242170ustar00rootroot00000000000000#ifndef __ASM_APIC_H #define __ASM_APIC_H #include #include #include #include #include #define Dprintk(x...) /* * Debugging macros */ #define APIC_QUIET 0 #define APIC_VERBOSE 1 #define APIC_DEBUG 2 extern int apic_verbosity; extern int apic_runs_main_timer; /* * Define the default level of output to be very little * This can be turned up by using apic=verbose for more * information and apic=debug for _lots_ of information. * apic_verbosity is defined in apic.c */ #define apic_printk(v, s, a...) do { \ if ((v) <= apic_verbosity) \ printk(s, ##a); \ } while (0) #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN) struct pt_regs; /* * Basic functions accessing APICs. */ static __inline void apic_write(unsigned long reg, unsigned int v) { *((volatile unsigned int *)(APIC_BASE+reg)) = v; } static __inline unsigned int apic_read(unsigned long reg) { return *((volatile unsigned int *)(APIC_BASE+reg)); } static __inline__ void apic_wait_icr_idle(void) { while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ); } static inline void ack_APIC_irq(void) { /* * ack_APIC_irq() actually gets compiled as a single instruction: * - a single rmw on Pentium/82489DX * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) * ... yummie. */ /* Docs say use 0 for future compatibility */ apic_write(APIC_EOI, 0); } extern int get_maxlvt (void); extern void clear_local_APIC (void); extern void connect_bsp_APIC (void); extern void disconnect_bsp_APIC (int virt_wire_setup); extern void disable_local_APIC (void); extern int verify_local_APIC (void); extern void cache_APIC_registers (void); extern void sync_Arb_IDs (void); extern void init_bsp_APIC (void); extern void setup_local_APIC (void); extern void init_apic_mappings (void); extern void smp_local_timer_interrupt (struct pt_regs * regs); extern void setup_boot_APIC_clock (void); extern void setup_secondary_APIC_clock (void); extern void setup_apic_nmi_watchdog (void); extern int reserve_lapic_nmi(void); extern void release_lapic_nmi(void); extern void disable_timer_nmi_watchdog(void); extern void enable_timer_nmi_watchdog(void); extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); extern int APIC_init_uniprocessor (void); extern void disable_APIC_timer(void); extern void enable_APIC_timer(void); extern void clustered_apic_check(void); extern void nmi_watchdog_default(void); extern int setup_nmi_watchdog(char *); extern unsigned int nmi_watchdog; #define NMI_DEFAULT -1 #define NMI_NONE 0 #define NMI_IO_APIC 1 #define NMI_LOCAL_APIC 2 #define NMI_INVALID 3 extern int disable_timer_pin_1; extern void setup_threshold_lvt(unsigned long lvt_off); void smp_send_timer_broadcast_ipi(void); void switch_APIC_timer_to_ipi(void *cpumask); void switch_ipi_to_APIC_timer(void *cpumask); #define ARCH_APICTIMER_STOPS_ON_C3 1 #elif defined(CONFIG_X86_LOCAL_APIC) extern int APIC_init_uniprocessor (void); extern void clustered_apic_check(void); #endif /* CONFIG_X86_LOCAL_APIC */ extern unsigned boot_cpu_id; #endif /* __ASM_APIC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/apicdef.h000066400000000000000000000242711314037446600246770ustar00rootroot00000000000000#ifndef __ASM_APICDEF_H #define __ASM_APICDEF_H #ifndef CONFIG_XEN /* * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) * * Alan Cox , 1995. * Ingo Molnar , 1999, 2000 */ #define APIC_DEFAULT_PHYS_BASE 0xfee00000 #define APIC_ID 0x20 #define APIC_ID_MASK (0xFFu<<24) #define GET_APIC_ID(x) (((x)>>24)&0xFFu) #define SET_APIC_ID(x) (((x)<<24)) #define APIC_LVR 0x30 #define APIC_LVR_MASK 0xFF00FF #define GET_APIC_VERSION(x) ((x)&0xFFu) #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu) #define APIC_INTEGRATED(x) ((x)&0xF0u) #define APIC_TASKPRI 0x80 #define APIC_TPRI_MASK 0xFFu #define APIC_ARBPRI 0x90 #define APIC_ARBPRI_MASK 0xFFu #define APIC_PROCPRI 0xA0 #define APIC_EOI 0xB0 #define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ #define APIC_RRR 0xC0 #define APIC_LDR 0xD0 #define APIC_LDR_MASK (0xFFu<<24) #define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu) #define SET_APIC_LOGICAL_ID(x) (((x)<<24)) #define APIC_ALL_CPUS 0xFFu #define APIC_DFR 0xE0 #define APIC_DFR_CLUSTER 0x0FFFFFFFul #define APIC_DFR_FLAT 0xFFFFFFFFul #define APIC_SPIV 0xF0 #define APIC_SPIV_FOCUS_DISABLED (1<<9) #define APIC_SPIV_APIC_ENABLED (1<<8) #define APIC_ISR 0x100 #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ #define APIC_TMR 0x180 #define APIC_IRR 0x200 #define APIC_ESR 0x280 #define APIC_ESR_SEND_CS 0x00001 #define APIC_ESR_RECV_CS 0x00002 #define APIC_ESR_SEND_ACC 0x00004 #define APIC_ESR_RECV_ACC 0x00008 #define APIC_ESR_SENDILL 0x00020 #define APIC_ESR_RECVILL 0x00040 #define APIC_ESR_ILLREGA 0x00080 #define APIC_ICR 0x300 #define APIC_DEST_SELF 0x40000 #define APIC_DEST_ALLINC 0x80000 #define APIC_DEST_ALLBUT 0xC0000 #define APIC_ICR_RR_MASK 0x30000 #define APIC_ICR_RR_INVALID 0x00000 #define APIC_ICR_RR_INPROG 0x10000 #define APIC_ICR_RR_VALID 0x20000 #define APIC_INT_LEVELTRIG 0x08000 #define APIC_INT_ASSERT 0x04000 #define APIC_ICR_BUSY 0x01000 #define APIC_DEST_LOGICAL 0x00800 #define APIC_DEST_PHYSICAL 0x00000 #define APIC_DM_FIXED 0x00000 #define APIC_DM_LOWEST 0x00100 #define APIC_DM_SMI 0x00200 #define APIC_DM_REMRD 0x00300 #define APIC_DM_NMI 0x00400 #define APIC_DM_INIT 0x00500 #define APIC_DM_STARTUP 0x00600 #define APIC_DM_EXTINT 0x00700 #define APIC_VECTOR_MASK 0x000FF #define APIC_ICR2 0x310 #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) #define SET_APIC_DEST_FIELD(x) ((x)<<24) #define APIC_LVTT 0x320 #define APIC_LVTTHMR 0x330 #define APIC_LVTPC 0x340 #define APIC_LVT0 0x350 #define APIC_LVT_TIMER_BASE_MASK (0x3<<18) #define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) #define SET_APIC_TIMER_BASE(x) (((x)<<18)) #define APIC_TIMER_BASE_CLKIN 0x0 #define APIC_TIMER_BASE_TMBASE 0x1 #define APIC_TIMER_BASE_DIV 0x2 #define APIC_LVT_TIMER_PERIODIC (1<<17) #define APIC_LVT_MASKED (1<<16) #define APIC_LVT_LEVEL_TRIGGER (1<<15) #define APIC_LVT_REMOTE_IRR (1<<14) #define APIC_INPUT_POLARITY (1<<13) #define APIC_SEND_PENDING (1<<12) #define APIC_MODE_MASK 0x700 #define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) #define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) #define APIC_MODE_FIXED 0x0 #define APIC_MODE_NMI 0x4 #define APIC_MODE_EXTINT 0x7 #define APIC_LVT1 0x360 #define APIC_LVTERR 0x370 #define APIC_TMICT 0x380 #define APIC_TMCCT 0x390 #define APIC_TDCR 0x3E0 #define APIC_TDR_DIV_TMBASE (1<<2) #define APIC_TDR_DIV_1 0xB #define APIC_TDR_DIV_2 0x0 #define APIC_TDR_DIV_4 0x1 #define APIC_TDR_DIV_8 0x2 #define APIC_TDR_DIV_16 0x3 #define APIC_TDR_DIV_32 0x8 #define APIC_TDR_DIV_64 0x9 #define APIC_TDR_DIV_128 0xA #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) #else /* CONFIG_XEN */ #define APIC_ALL_CPUS 0xFFu enum { APIC_DEST_ALLBUT = 0x1, APIC_DEST_SELF, APIC_DEST_ALLINC }; #endif /* CONFIG_XEN */ #define MAX_IO_APICS 128 #ifndef CONFIG_XEN #define MAX_LOCAL_APIC 256 /* * All x86-64 systems are xAPIC compatible. * In the following, "apicid" is a physical APIC ID. */ #define XAPIC_DEST_CPUS_SHIFT 4 #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) #define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) #define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT) #define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK) #define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT) /* * the local APIC register structure, memory mapped. Not terribly well * tested, but we might eventually use this one in the future - the * problem why we cannot use it right now is the P5 APIC, it has an * errata which cannot take 8-bit reads and writes, only 32-bit ones ... */ #define u32 unsigned int #define lapic ((volatile struct local_apic *)APIC_BASE) struct local_apic { /*000*/ struct { u32 __reserved[4]; } __reserved_01; /*010*/ struct { u32 __reserved[4]; } __reserved_02; /*020*/ struct { /* APIC ID Register */ u32 __reserved_1 : 24, phys_apic_id : 4, __reserved_2 : 4; u32 __reserved[3]; } id; /*030*/ const struct { /* APIC Version Register */ u32 version : 8, __reserved_1 : 8, max_lvt : 8, __reserved_2 : 8; u32 __reserved[3]; } version; /*040*/ struct { u32 __reserved[4]; } __reserved_03; /*050*/ struct { u32 __reserved[4]; } __reserved_04; /*060*/ struct { u32 __reserved[4]; } __reserved_05; /*070*/ struct { u32 __reserved[4]; } __reserved_06; /*080*/ struct { /* Task Priority Register */ u32 priority : 8, __reserved_1 : 24; u32 __reserved_2[3]; } tpr; /*090*/ const struct { /* Arbitration Priority Register */ u32 priority : 8, __reserved_1 : 24; u32 __reserved_2[3]; } apr; /*0A0*/ const struct { /* Processor Priority Register */ u32 priority : 8, __reserved_1 : 24; u32 __reserved_2[3]; } ppr; /*0B0*/ struct { /* End Of Interrupt Register */ u32 eoi; u32 __reserved[3]; } eoi; /*0C0*/ struct { u32 __reserved[4]; } __reserved_07; /*0D0*/ struct { /* Logical Destination Register */ u32 __reserved_1 : 24, logical_dest : 8; u32 __reserved_2[3]; } ldr; /*0E0*/ struct { /* Destination Format Register */ u32 __reserved_1 : 28, model : 4; u32 __reserved_2[3]; } dfr; /*0F0*/ struct { /* Spurious Interrupt Vector Register */ u32 spurious_vector : 8, apic_enabled : 1, focus_cpu : 1, __reserved_2 : 22; u32 __reserved_3[3]; } svr; /*100*/ struct { /* In Service Register */ /*170*/ u32 bitfield; u32 __reserved[3]; } isr [8]; /*180*/ struct { /* Trigger Mode Register */ /*1F0*/ u32 bitfield; u32 __reserved[3]; } tmr [8]; /*200*/ struct { /* Interrupt Request Register */ /*270*/ u32 bitfield; u32 __reserved[3]; } irr [8]; /*280*/ union { /* Error Status Register */ struct { u32 send_cs_error : 1, receive_cs_error : 1, send_accept_error : 1, receive_accept_error : 1, __reserved_1 : 1, send_illegal_vector : 1, receive_illegal_vector : 1, illegal_register_address : 1, __reserved_2 : 24; u32 __reserved_3[3]; } error_bits; struct { u32 errors; u32 __reserved_3[3]; } all_errors; } esr; /*290*/ struct { u32 __reserved[4]; } __reserved_08; /*2A0*/ struct { u32 __reserved[4]; } __reserved_09; /*2B0*/ struct { u32 __reserved[4]; } __reserved_10; /*2C0*/ struct { u32 __reserved[4]; } __reserved_11; /*2D0*/ struct { u32 __reserved[4]; } __reserved_12; /*2E0*/ struct { u32 __reserved[4]; } __reserved_13; /*2F0*/ struct { u32 __reserved[4]; } __reserved_14; /*300*/ struct { /* Interrupt Command Register 1 */ u32 vector : 8, delivery_mode : 3, destination_mode : 1, delivery_status : 1, __reserved_1 : 1, level : 1, trigger : 1, __reserved_2 : 2, shorthand : 2, __reserved_3 : 12; u32 __reserved_4[3]; } icr1; /*310*/ struct { /* Interrupt Command Register 2 */ union { u32 __reserved_1 : 24, phys_dest : 4, __reserved_2 : 4; u32 __reserved_3 : 24, logical_dest : 8; } dest; u32 __reserved_4[3]; } icr2; /*320*/ struct { /* LVT - Timer */ u32 vector : 8, __reserved_1 : 4, delivery_status : 1, __reserved_2 : 3, mask : 1, timer_mode : 1, __reserved_3 : 14; u32 __reserved_4[3]; } lvt_timer; /*330*/ struct { /* LVT - Thermal Sensor */ u32 vector : 8, delivery_mode : 3, __reserved_1 : 1, delivery_status : 1, __reserved_2 : 3, mask : 1, __reserved_3 : 15; u32 __reserved_4[3]; } lvt_thermal; /*340*/ struct { /* LVT - Performance Counter */ u32 vector : 8, delivery_mode : 3, __reserved_1 : 1, delivery_status : 1, __reserved_2 : 3, mask : 1, __reserved_3 : 15; u32 __reserved_4[3]; } lvt_pc; /*350*/ struct { /* LVT - LINT0 */ u32 vector : 8, delivery_mode : 3, __reserved_1 : 1, delivery_status : 1, polarity : 1, remote_irr : 1, trigger : 1, mask : 1, __reserved_2 : 15; u32 __reserved_3[3]; } lvt_lint0; /*360*/ struct { /* LVT - LINT1 */ u32 vector : 8, delivery_mode : 3, __reserved_1 : 1, delivery_status : 1, polarity : 1, remote_irr : 1, trigger : 1, mask : 1, __reserved_2 : 15; u32 __reserved_3[3]; } lvt_lint1; /*370*/ struct { /* LVT - Error */ u32 vector : 8, __reserved_1 : 4, delivery_status : 1, __reserved_2 : 3, mask : 1, __reserved_3 : 15; u32 __reserved_4[3]; } lvt_error; /*380*/ struct { /* Timer Initial Count Register */ u32 initial_count; u32 __reserved_2[3]; } timer_icr; /*390*/ const struct { /* Timer Current Count Register */ u32 curr_count; u32 __reserved_2[3]; } timer_ccr; /*3A0*/ struct { u32 __reserved[4]; } __reserved_16; /*3B0*/ struct { u32 __reserved[4]; } __reserved_17; /*3C0*/ struct { u32 __reserved[4]; } __reserved_18; /*3D0*/ struct { u32 __reserved[4]; } __reserved_19; /*3E0*/ struct { /* Timer Divide Configuration Register */ u32 divisor : 4, __reserved_1 : 28; u32 __reserved_2[3]; } timer_dcr; /*3F0*/ struct { u32 __reserved[4]; } __reserved_20; } __attribute__ ((packed)); #undef u32 #endif /* CONFIG_XEN */ #define BAD_APICID 0xFFu #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/atomic.h000066400000000000000000000230131314037446600245510ustar00rootroot00000000000000#ifndef __ARCH_X86_64_ATOMIC__ #define __ARCH_X86_64_ATOMIC__ #include #include /* atomic_t should be 32 bit signed type */ /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. */ #ifdef CONFIG_SMP #define LOCK "lock ; " #else #define LOCK "" #endif /* * Make sure gcc doesn't try to be clever and move things around * on us. We need to use _exactly_ the address the user gave us, * not some alias that contains the same information. */ typedef struct { volatile int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } /** * atomic_read - read atomic variable * @v: pointer of type atomic_t * * Atomically reads the value of @v. */ #define atomic_read(v) ((v)->counter) /** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value * * Atomically sets the value of @v to @i. */ #define atomic_set(v,i) (((v)->counter) = (i)) /** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t * * Atomically adds @i to @v. */ static __inline__ void atomic_add(int i, atomic_t *v) { __asm__ __volatile__( LOCK "addl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } /** * atomic_sub - subtract the atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t * * Atomically subtracts @i from @v. */ static __inline__ void atomic_sub(int i, atomic_t *v) { __asm__ __volatile__( LOCK "subl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } /** * atomic_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @v: pointer of type atomic_t * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all * other cases. */ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) { unsigned char c; __asm__ __volatile__( LOCK "subl %2,%0; sete %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; } /** * atomic_inc - increment atomic variable * @v: pointer of type atomic_t * * Atomically increments @v by 1. */ static __inline__ void atomic_inc(atomic_t *v) { __asm__ __volatile__( LOCK "incl %0" :"=m" (v->counter) :"m" (v->counter)); } /** * atomic_dec - decrement atomic variable * @v: pointer of type atomic_t * * Atomically decrements @v by 1. */ static __inline__ void atomic_dec(atomic_t *v) { __asm__ __volatile__( LOCK "decl %0" :"=m" (v->counter) :"m" (v->counter)); } /** * atomic_dec_and_test - decrement and test * @v: pointer of type atomic_t * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. */ static __inline__ int atomic_dec_and_test(atomic_t *v) { unsigned char c; __asm__ __volatile__( LOCK "decl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; } /** * atomic_inc_and_test - increment and test * @v: pointer of type atomic_t * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. */ static __inline__ int atomic_inc_and_test(atomic_t *v) { unsigned char c; __asm__ __volatile__( LOCK "incl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; } /** * atomic_add_negative - add and test if negative * @i: integer value to add * @v: pointer of type atomic_t * * Atomically adds @i to @v and returns true * if the result is negative, or false when * result is greater than or equal to zero. */ static __inline__ int atomic_add_negative(int i, atomic_t *v) { unsigned char c; __asm__ __volatile__( LOCK "addl %2,%0; sets %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; } /** * atomic_add_return - add and return * @i: integer value to add * @v: pointer of type atomic_t * * Atomically adds @i to @v and returns @i + @v */ static __inline__ int atomic_add_return(int i, atomic_t *v) { int __i = i; __asm__ __volatile__( LOCK "xaddl %0, %1;" :"=r"(i) :"m"(v->counter), "0"(i)); return i + __i; } static __inline__ int atomic_sub_return(int i, atomic_t *v) { return atomic_add_return(-i,v); } #define atomic_inc_return(v) (atomic_add_return(1,v)) #define atomic_dec_return(v) (atomic_sub_return(1,v)) /* An 64bit atomic type */ typedef struct { volatile long counter; } atomic64_t; #define ATOMIC64_INIT(i) { (i) } /** * atomic64_read - read atomic64 variable * @v: pointer of type atomic64_t * * Atomically reads the value of @v. * Doesn't imply a read memory barrier. */ #define atomic64_read(v) ((v)->counter) /** * atomic64_set - set atomic64 variable * @v: pointer to type atomic64_t * @i: required value * * Atomically sets the value of @v to @i. */ #define atomic64_set(v,i) (((v)->counter) = (i)) /** * atomic64_add - add integer to atomic64 variable * @i: integer value to add * @v: pointer to type atomic64_t * * Atomically adds @i to @v. */ static __inline__ void atomic64_add(long i, atomic64_t *v) { __asm__ __volatile__( LOCK "addq %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } /** * atomic64_sub - subtract the atomic64 variable * @i: integer value to subtract * @v: pointer to type atomic64_t * * Atomically subtracts @i from @v. */ static __inline__ void atomic64_sub(long i, atomic64_t *v) { __asm__ __volatile__( LOCK "subq %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } /** * atomic64_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @v: pointer to type atomic64_t * * Atomically subtracts @i from @v and returns * true if the result is zero, or false for all * other cases. */ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) { unsigned char c; __asm__ __volatile__( LOCK "subq %2,%0; sete %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; } /** * atomic64_inc - increment atomic64 variable * @v: pointer to type atomic64_t * * Atomically increments @v by 1. */ static __inline__ void atomic64_inc(atomic64_t *v) { __asm__ __volatile__( LOCK "incq %0" :"=m" (v->counter) :"m" (v->counter)); } /** * atomic64_dec - decrement atomic64 variable * @v: pointer to type atomic64_t * * Atomically decrements @v by 1. */ static __inline__ void atomic64_dec(atomic64_t *v) { __asm__ __volatile__( LOCK "decq %0" :"=m" (v->counter) :"m" (v->counter)); } /** * atomic64_dec_and_test - decrement and test * @v: pointer to type atomic64_t * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. */ static __inline__ int atomic64_dec_and_test(atomic64_t *v) { unsigned char c; __asm__ __volatile__( LOCK "decq %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; } /** * atomic64_inc_and_test - increment and test * @v: pointer to type atomic64_t * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. */ static __inline__ int atomic64_inc_and_test(atomic64_t *v) { unsigned char c; __asm__ __volatile__( LOCK "incq %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; } /** * atomic64_add_negative - add and test if negative * @i: integer value to add * @v: pointer to type atomic64_t * * Atomically adds @i to @v and returns true * if the result is negative, or false when * result is greater than or equal to zero. */ static __inline__ int atomic64_add_negative(long i, atomic64_t *v) { unsigned char c; __asm__ __volatile__( LOCK "addq %2,%0; sets %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; } /** * atomic64_add_return - add and return * @i: integer value to add * @v: pointer to type atomic64_t * * Atomically adds @i to @v and returns @i + @v */ static __inline__ long atomic64_add_return(long i, atomic64_t *v) { long __i = i; __asm__ __volatile__( LOCK "xaddq %0, %1;" :"=r"(i) :"m"(v->counter), "0"(i)); return i + __i; } static __inline__ long atomic64_sub_return(long i, atomic64_t *v) { return atomic64_add_return(-i,v); } #define atomic64_inc_return(v) (atomic64_add_return(1,v)) #define atomic64_dec_return(v) (atomic64_sub_return(1,v)) #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** * atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ #define atomic_add_unless(v, a, u) \ ({ \ int c, old; \ c = atomic_read(v); \ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ c = old; \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ __asm__ __volatile__(LOCK "andl %0,%1" \ : : "r" (~(mask)),"m" (*addr) : "memory") #define atomic_set_mask(mask, addr) \ __asm__ __volatile__(LOCK "orl %0,%1" \ : : "r" ((unsigned)mask),"m" (*(addr)) : "memory") /* Atomic operations are already serializing on x86 */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/auxvec.h000066400000000000000000000001041314037446600245640ustar00rootroot00000000000000#ifndef __ASM_X86_64_AUXVEC_H #define __ASM_X86_64_AUXVEC_H #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/bfd.h000066400000000000000000005025161314037446600240420ustar00rootroot00000000000000/* DO NOT EDIT! -*- buffer-read-only: t -*- This file is automatically generated from "bfd-in.h", "init.c", "opncls.c", "libbfd.c", "bfdio.c", "bfdwin.c", "section.c", "archures.c", "reloc.c", "syms.c", "bfd.c", "archive.c", "corefile.c", "targets.c", "format.c", "linker.c" and "simple.c". Run "make headers" in your build bfd/ to regenerate. */ /* Main header file for the bfd library -- portable access to object files. Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. Contributed by Cygnus Support. This file is part of BFD, the Binary File Descriptor library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ /* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use. * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as * required. * Keith Owens 15 May 2006 */ #ifndef __BFD_H_SEEN__ #define __BFD_H_SEEN__ #ifdef __cplusplus extern "C" { #endif #ifdef __KERNEL__ #include #else /* __KERNEL__ */ #include "ansidecl.h" #include "symcat.h" #endif /* __KERNEL__ */ #if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE) #ifndef SABER /* This hack is to avoid a problem with some strict ANSI C preprocessors. The problem is, "32_" is not a valid preprocessing token, and we don't want extra underscores (e.g., "nlm_32_"). The XCONCAT2 macro will cause the inner CONCAT2 macros to be evaluated first, producing still-valid pp-tokens. Then the final concatenation can be done. */ #undef CONCAT4 #define CONCAT4(a,b,c,d) XCONCAT2(CONCAT2(a,b),CONCAT2(c,d)) #endif #endif /* The word size used by BFD on the host. This may be 64 with a 32 bit target if the host is 64 bit, or if other 64 bit targets have been selected with --enable-targets, or if --enable-64-bit-bfd. */ #define BFD_ARCH_SIZE 64 /* The word size of the default bfd target. */ #define BFD_DEFAULT_TARGET_SIZE 64 #define BFD_HOST_64BIT_LONG 1 #define BFD_HOST_LONG_LONG 1 #if 1 #define BFD_HOST_64_BIT long #define BFD_HOST_U_64_BIT unsigned long typedef BFD_HOST_64_BIT bfd_int64_t; typedef BFD_HOST_U_64_BIT bfd_uint64_t; #endif #if BFD_ARCH_SIZE >= 64 #define BFD64 #endif #ifndef INLINE #if __GNUC__ >= 2 #define INLINE __inline__ #else #define INLINE #endif #endif /* Forward declaration. */ typedef struct bfd bfd; /* Boolean type used in bfd. Too many systems define their own versions of "boolean" for us to safely typedef a "boolean" of our own. Using an enum for "bfd_boolean" has its own set of problems, with strange looking casts required to avoid warnings on some older compilers. Thus we just use an int. General rule: Functions which are bfd_boolean return TRUE on success and FALSE on failure (unless they're a predicate). */ typedef int bfd_boolean; #undef FALSE #undef TRUE #define FALSE 0 #define TRUE 1 #ifdef BFD64 #ifndef BFD_HOST_64_BIT #error No 64 bit integer type available #endif /* ! defined (BFD_HOST_64_BIT) */ typedef BFD_HOST_U_64_BIT bfd_vma; typedef BFD_HOST_64_BIT bfd_signed_vma; typedef BFD_HOST_U_64_BIT bfd_size_type; typedef BFD_HOST_U_64_BIT symvalue; #ifndef fprintf_vma #if BFD_HOST_64BIT_LONG #define sprintf_vma(s,x) sprintf (s, "%016lx", x) #define fprintf_vma(f,x) fprintf (f, "%016lx", x) #else #define _bfd_int64_low(x) ((unsigned long) (((x) & 0xffffffff))) #define _bfd_int64_high(x) ((unsigned long) (((x) >> 32) & 0xffffffff)) #define fprintf_vma(s,x) \ fprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x)) #define sprintf_vma(s,x) \ sprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x)) #endif #endif #else /* not BFD64 */ /* Represent a target address. Also used as a generic unsigned type which is guaranteed to be big enough to hold any arithmetic types we need to deal with. */ typedef unsigned long bfd_vma; /* A generic signed type which is guaranteed to be big enough to hold any arithmetic types we need to deal with. Can be assumed to be compatible with bfd_vma in the same way that signed and unsigned ints are compatible (as parameters, in assignment, etc). */ typedef long bfd_signed_vma; typedef unsigned long symvalue; typedef unsigned long bfd_size_type; /* Print a bfd_vma x on stream s. */ #define fprintf_vma(s,x) fprintf (s, "%08lx", x) #define sprintf_vma(s,x) sprintf (s, "%08lx", x) #endif /* not BFD64 */ #define HALF_BFD_SIZE_TYPE \ (((bfd_size_type) 1) << (8 * sizeof (bfd_size_type) / 2)) #ifndef BFD_HOST_64_BIT /* Fall back on a 32 bit type. The idea is to make these types always available for function return types, but in the case that BFD_HOST_64_BIT is undefined such a function should abort or otherwise signal an error. */ typedef bfd_signed_vma bfd_int64_t; typedef bfd_vma bfd_uint64_t; #endif /* An offset into a file. BFD always uses the largest possible offset based on the build time availability of fseek, fseeko, or fseeko64. */ typedef BFD_HOST_64_BIT file_ptr; typedef unsigned BFD_HOST_64_BIT ufile_ptr; extern void bfd_sprintf_vma (bfd *, char *, bfd_vma); extern void bfd_fprintf_vma (bfd *, void *, bfd_vma); #define printf_vma(x) fprintf_vma(stdout,x) #define bfd_printf_vma(abfd,x) bfd_fprintf_vma (abfd,stdout,x) typedef unsigned int flagword; /* 32 bits of flags */ typedef unsigned char bfd_byte; /* File formats. */ typedef enum bfd_format { bfd_unknown = 0, /* File format is unknown. */ bfd_object, /* Linker/assembler/compiler output. */ bfd_archive, /* Object archive file. */ bfd_core, /* Core dump. */ bfd_type_end /* Marks the end; don't use it! */ } bfd_format; /* Values that may appear in the flags field of a BFD. These also appear in the object_flags field of the bfd_target structure, where they indicate the set of flags used by that backend (not all flags are meaningful for all object file formats) (FIXME: at the moment, the object_flags values have mostly just been copied from backend to another, and are not necessarily correct). */ /* No flags. */ #define BFD_NO_FLAGS 0x00 /* BFD contains relocation entries. */ #define HAS_RELOC 0x01 /* BFD is directly executable. */ #define EXEC_P 0x02 /* BFD has line number information (basically used for F_LNNO in a COFF header). */ #define HAS_LINENO 0x04 /* BFD has debugging information. */ #define HAS_DEBUG 0x08 /* BFD has symbols. */ #define HAS_SYMS 0x10 /* BFD has local symbols (basically used for F_LSYMS in a COFF header). */ #define HAS_LOCALS 0x20 /* BFD is a dynamic object. */ #define DYNAMIC 0x40 /* Text section is write protected (if D_PAGED is not set, this is like an a.out NMAGIC file) (the linker sets this by default, but clears it for -r or -N). */ #define WP_TEXT 0x80 /* BFD is dynamically paged (this is like an a.out ZMAGIC file) (the linker sets this by default, but clears it for -r or -n or -N). */ #define D_PAGED 0x100 /* BFD is relaxable (this means that bfd_relax_section may be able to do something) (sometimes bfd_relax_section can do something even if this is not set). */ #define BFD_IS_RELAXABLE 0x200 /* This may be set before writing out a BFD to request using a traditional format. For example, this is used to request that when writing out an a.out object the symbols not be hashed to eliminate duplicates. */ #define BFD_TRADITIONAL_FORMAT 0x400 /* This flag indicates that the BFD contents are actually cached in memory. If this is set, iostream points to a bfd_in_memory struct. */ #define BFD_IN_MEMORY 0x800 /* The sections in this BFD specify a memory page. */ #define HAS_LOAD_PAGE 0x1000 /* This BFD has been created by the linker and doesn't correspond to any input file. */ #define BFD_LINKER_CREATED 0x2000 /* Symbols and relocation. */ /* A count of carsyms (canonical archive symbols). */ typedef unsigned long symindex; /* How to perform a relocation. */ typedef const struct reloc_howto_struct reloc_howto_type; #define BFD_NO_MORE_SYMBOLS ((symindex) ~0) /* General purpose part of a symbol X; target specific parts are in libcoff.h, libaout.h, etc. */ #define bfd_get_section(x) ((x)->section) #define bfd_get_output_section(x) ((x)->section->output_section) #define bfd_set_section(x,y) ((x)->section) = (y) #define bfd_asymbol_base(x) ((x)->section->vma) #define bfd_asymbol_value(x) (bfd_asymbol_base(x) + (x)->value) #define bfd_asymbol_name(x) ((x)->name) /*Perhaps future: #define bfd_asymbol_bfd(x) ((x)->section->owner)*/ #define bfd_asymbol_bfd(x) ((x)->the_bfd) #define bfd_asymbol_flavour(x) (bfd_asymbol_bfd(x)->xvec->flavour) /* A canonical archive symbol. */ /* This is a type pun with struct ranlib on purpose! */ typedef struct carsym { char *name; file_ptr file_offset; /* Look here to find the file. */ } carsym; /* To make these you call a carsymogen. */ /* Used in generating armaps (archive tables of contents). Perhaps just a forward definition would do? */ struct orl /* Output ranlib. */ { char **name; /* Symbol name. */ union { file_ptr pos; bfd *abfd; } u; /* bfd* or file position. */ int namidx; /* Index into string table. */ }; /* Linenumber stuff. */ typedef struct lineno_cache_entry { unsigned int line_number; /* Linenumber from start of function. */ union { struct bfd_symbol *sym; /* Function name. */ bfd_vma offset; /* Offset into section. */ } u; } alent; /* Object and core file sections. */ #define align_power(addr, align) \ (((addr) + ((bfd_vma) 1 << (align)) - 1) & ((bfd_vma) -1 << (align))) typedef struct bfd_section *sec_ptr; #define bfd_get_section_name(bfd, ptr) ((ptr)->name + 0) #define bfd_get_section_vma(bfd, ptr) ((ptr)->vma + 0) #define bfd_get_section_lma(bfd, ptr) ((ptr)->lma + 0) #define bfd_get_section_alignment(bfd, ptr) ((ptr)->alignment_power + 0) #define bfd_section_name(bfd, ptr) ((ptr)->name) #define bfd_section_size(bfd, ptr) ((ptr)->size) #define bfd_get_section_size(ptr) ((ptr)->size) #define bfd_section_vma(bfd, ptr) ((ptr)->vma) #define bfd_section_lma(bfd, ptr) ((ptr)->lma) #define bfd_section_alignment(bfd, ptr) ((ptr)->alignment_power) #define bfd_get_section_flags(bfd, ptr) ((ptr)->flags + 0) #define bfd_get_section_userdata(bfd, ptr) ((ptr)->userdata) #define bfd_is_com_section(ptr) (((ptr)->flags & SEC_IS_COMMON) != 0) #define bfd_set_section_vma(bfd, ptr, val) (((ptr)->vma = (ptr)->lma = (val)), ((ptr)->user_set_vma = TRUE), TRUE) #define bfd_set_section_alignment(bfd, ptr, val) (((ptr)->alignment_power = (val)),TRUE) #define bfd_set_section_userdata(bfd, ptr, val) (((ptr)->userdata = (val)),TRUE) /* Find the address one past the end of SEC. */ #define bfd_get_section_limit(bfd, sec) \ (((sec)->rawsize ? (sec)->rawsize : (sec)->size) \ / bfd_octets_per_byte (bfd)) typedef struct stat stat_type; typedef enum bfd_print_symbol { bfd_print_symbol_name, bfd_print_symbol_more, bfd_print_symbol_all } bfd_print_symbol_type; /* Information about a symbol that nm needs. */ typedef struct _symbol_info { symvalue value; char type; const char *name; /* Symbol name. */ unsigned char stab_type; /* Stab type. */ char stab_other; /* Stab other. */ short stab_desc; /* Stab desc. */ const char *stab_name; /* String for stab type. */ } symbol_info; /* Get the name of a stabs type code. */ extern const char *bfd_get_stab_name (int); /* Hash table routines. There is no way to free up a hash table. */ /* An element in the hash table. Most uses will actually use a larger structure, and an instance of this will be the first field. */ struct bfd_hash_entry { /* Next entry for this hash code. */ struct bfd_hash_entry *next; /* String being hashed. */ const char *string; /* Hash code. This is the full hash code, not the index into the table. */ unsigned long hash; }; /* A hash table. */ struct bfd_hash_table { /* The hash array. */ struct bfd_hash_entry **table; /* The number of slots in the hash table. */ unsigned int size; /* A function used to create new elements in the hash table. The first entry is itself a pointer to an element. When this function is first invoked, this pointer will be NULL. However, having the pointer permits a hierarchy of method functions to be built each of which calls the function in the superclass. Thus each function should be written to allocate a new block of memory only if the argument is NULL. */ struct bfd_hash_entry *(*newfunc) (struct bfd_hash_entry *, struct bfd_hash_table *, const char *); /* An objalloc for this hash table. This is a struct objalloc *, but we use void * to avoid requiring the inclusion of objalloc.h. */ void *memory; }; /* Initialize a hash table. */ extern bfd_boolean bfd_hash_table_init (struct bfd_hash_table *, struct bfd_hash_entry *(*) (struct bfd_hash_entry *, struct bfd_hash_table *, const char *)); /* Initialize a hash table specifying a size. */ extern bfd_boolean bfd_hash_table_init_n (struct bfd_hash_table *, struct bfd_hash_entry *(*) (struct bfd_hash_entry *, struct bfd_hash_table *, const char *), unsigned int size); /* Free up a hash table. */ extern void bfd_hash_table_free (struct bfd_hash_table *); /* Look up a string in a hash table. If CREATE is TRUE, a new entry will be created for this string if one does not already exist. The COPY argument must be TRUE if this routine should copy the string into newly allocated memory when adding an entry. */ extern struct bfd_hash_entry *bfd_hash_lookup (struct bfd_hash_table *, const char *, bfd_boolean create, bfd_boolean copy); /* Replace an entry in a hash table. */ extern void bfd_hash_replace (struct bfd_hash_table *, struct bfd_hash_entry *old, struct bfd_hash_entry *nw); /* Base method for creating a hash table entry. */ extern struct bfd_hash_entry *bfd_hash_newfunc (struct bfd_hash_entry *, struct bfd_hash_table *, const char *); /* Grab some space for a hash table entry. */ extern void *bfd_hash_allocate (struct bfd_hash_table *, unsigned int); /* Traverse a hash table in a random order, calling a function on each element. If the function returns FALSE, the traversal stops. The INFO argument is passed to the function. */ extern void bfd_hash_traverse (struct bfd_hash_table *, bfd_boolean (*) (struct bfd_hash_entry *, void *), void *info); /* Allows the default size of a hash table to be configured. New hash tables allocated using bfd_hash_table_init will be created with this size. */ extern void bfd_hash_set_default_size (bfd_size_type); /* This structure is used to keep track of stabs in sections information while linking. */ struct stab_info { /* A hash table used to hold stabs strings. */ struct bfd_strtab_hash *strings; /* The header file hash table. */ struct bfd_hash_table includes; /* The first .stabstr section. */ struct bfd_section *stabstr; }; #define COFF_SWAP_TABLE (void *) &bfd_coff_std_swap_table /* User program access to BFD facilities. */ /* Direct I/O routines, for programs which know more about the object file than BFD does. Use higher level routines if possible. */ extern bfd_size_type bfd_bread (void *, bfd_size_type, bfd *); extern bfd_size_type bfd_bwrite (const void *, bfd_size_type, bfd *); extern int bfd_seek (bfd *, file_ptr, int); extern file_ptr bfd_tell (bfd *); extern int bfd_flush (bfd *); extern int bfd_stat (bfd *, struct stat *); /* Deprecated old routines. */ #if __GNUC__ #define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \ (warn_deprecated ("bfd_read", __FILE__, __LINE__, __FUNCTION__), \ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) #define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \ (warn_deprecated ("bfd_write", __FILE__, __LINE__, __FUNCTION__), \ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) #else #define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \ (warn_deprecated ("bfd_read", (const char *) 0, 0, (const char *) 0), \ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) #define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \ (warn_deprecated ("bfd_write", (const char *) 0, 0, (const char *) 0),\ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD))) #endif extern void warn_deprecated (const char *, const char *, int, const char *); /* Cast from const char * to char * so that caller can assign to a char * without a warning. */ #define bfd_get_filename(abfd) ((char *) (abfd)->filename) #define bfd_get_cacheable(abfd) ((abfd)->cacheable) #define bfd_get_format(abfd) ((abfd)->format) #define bfd_get_target(abfd) ((abfd)->xvec->name) #define bfd_get_flavour(abfd) ((abfd)->xvec->flavour) #define bfd_family_coff(abfd) \ (bfd_get_flavour (abfd) == bfd_target_coff_flavour || \ bfd_get_flavour (abfd) == bfd_target_xcoff_flavour) #define bfd_big_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG) #define bfd_little_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_LITTLE) #define bfd_header_big_endian(abfd) \ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_BIG) #define bfd_header_little_endian(abfd) \ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_LITTLE) #define bfd_get_file_flags(abfd) ((abfd)->flags) #define bfd_applicable_file_flags(abfd) ((abfd)->xvec->object_flags) #define bfd_applicable_section_flags(abfd) ((abfd)->xvec->section_flags) #define bfd_my_archive(abfd) ((abfd)->my_archive) #define bfd_has_map(abfd) ((abfd)->has_armap) #define bfd_valid_reloc_types(abfd) ((abfd)->xvec->valid_reloc_types) #define bfd_usrdata(abfd) ((abfd)->usrdata) #define bfd_get_start_address(abfd) ((abfd)->start_address) #define bfd_get_symcount(abfd) ((abfd)->symcount) #define bfd_get_outsymbols(abfd) ((abfd)->outsymbols) #define bfd_count_sections(abfd) ((abfd)->section_count) #define bfd_get_dynamic_symcount(abfd) ((abfd)->dynsymcount) #define bfd_get_symbol_leading_char(abfd) ((abfd)->xvec->symbol_leading_char) #define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE) extern bfd_boolean bfd_cache_close (bfd *abfd); /* NB: This declaration should match the autogenerated one in libbfd.h. */ extern bfd_boolean bfd_cache_close_all (void); extern bfd_boolean bfd_record_phdr (bfd *, unsigned long, bfd_boolean, flagword, bfd_boolean, bfd_vma, bfd_boolean, bfd_boolean, unsigned int, struct bfd_section **); /* Byte swapping routines. */ bfd_uint64_t bfd_getb64 (const void *); bfd_uint64_t bfd_getl64 (const void *); bfd_int64_t bfd_getb_signed_64 (const void *); bfd_int64_t bfd_getl_signed_64 (const void *); bfd_vma bfd_getb32 (const void *); bfd_vma bfd_getl32 (const void *); bfd_signed_vma bfd_getb_signed_32 (const void *); bfd_signed_vma bfd_getl_signed_32 (const void *); bfd_vma bfd_getb16 (const void *); bfd_vma bfd_getl16 (const void *); bfd_signed_vma bfd_getb_signed_16 (const void *); bfd_signed_vma bfd_getl_signed_16 (const void *); void bfd_putb64 (bfd_uint64_t, void *); void bfd_putl64 (bfd_uint64_t, void *); void bfd_putb32 (bfd_vma, void *); void bfd_putl32 (bfd_vma, void *); void bfd_putb16 (bfd_vma, void *); void bfd_putl16 (bfd_vma, void *); /* Byte swapping routines which take size and endiannes as arguments. */ bfd_uint64_t bfd_get_bits (const void *, int, bfd_boolean); void bfd_put_bits (bfd_uint64_t, void *, int, bfd_boolean); extern bfd_boolean bfd_section_already_linked_table_init (void); extern void bfd_section_already_linked_table_free (void); /* Externally visible ECOFF routines. */ #if defined(__STDC__) || defined(ALMOST_STDC) struct ecoff_debug_info; struct ecoff_debug_swap; struct ecoff_extr; struct bfd_symbol; struct bfd_link_info; struct bfd_link_hash_entry; struct bfd_elf_version_tree; #endif extern bfd_vma bfd_ecoff_get_gp_value (bfd * abfd); extern bfd_boolean bfd_ecoff_set_gp_value (bfd *abfd, bfd_vma gp_value); extern bfd_boolean bfd_ecoff_set_regmasks (bfd *abfd, unsigned long gprmask, unsigned long fprmask, unsigned long *cprmask); extern void *bfd_ecoff_debug_init (bfd *output_bfd, struct ecoff_debug_info *output_debug, const struct ecoff_debug_swap *output_swap, struct bfd_link_info *); extern void bfd_ecoff_debug_free (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, const struct ecoff_debug_swap *output_swap, struct bfd_link_info *); extern bfd_boolean bfd_ecoff_debug_accumulate (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, const struct ecoff_debug_swap *output_swap, bfd *input_bfd, struct ecoff_debug_info *input_debug, const struct ecoff_debug_swap *input_swap, struct bfd_link_info *); extern bfd_boolean bfd_ecoff_debug_accumulate_other (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug, const struct ecoff_debug_swap *output_swap, bfd *input_bfd, struct bfd_link_info *); extern bfd_boolean bfd_ecoff_debug_externals (bfd *abfd, struct ecoff_debug_info *debug, const struct ecoff_debug_swap *swap, bfd_boolean relocatable, bfd_boolean (*get_extr) (struct bfd_symbol *, struct ecoff_extr *), void (*set_index) (struct bfd_symbol *, bfd_size_type)); extern bfd_boolean bfd_ecoff_debug_one_external (bfd *abfd, struct ecoff_debug_info *debug, const struct ecoff_debug_swap *swap, const char *name, struct ecoff_extr *esym); extern bfd_size_type bfd_ecoff_debug_size (bfd *abfd, struct ecoff_debug_info *debug, const struct ecoff_debug_swap *swap); extern bfd_boolean bfd_ecoff_write_debug (bfd *abfd, struct ecoff_debug_info *debug, const struct ecoff_debug_swap *swap, file_ptr where); extern bfd_boolean bfd_ecoff_write_accumulated_debug (void *handle, bfd *abfd, struct ecoff_debug_info *debug, const struct ecoff_debug_swap *swap, struct bfd_link_info *info, file_ptr where); /* Externally visible ELF routines. */ struct bfd_link_needed_list { struct bfd_link_needed_list *next; bfd *by; const char *name; }; enum dynamic_lib_link_class { DYN_NORMAL = 0, DYN_AS_NEEDED = 1, DYN_DT_NEEDED = 2, DYN_NO_ADD_NEEDED = 4, DYN_NO_NEEDED = 8 }; extern bfd_boolean bfd_elf_record_link_assignment (struct bfd_link_info *, const char *, bfd_boolean); extern struct bfd_link_needed_list *bfd_elf_get_needed_list (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_elf_get_bfd_needed_list (bfd *, struct bfd_link_needed_list **); extern bfd_boolean bfd_elf_size_dynamic_sections (bfd *, const char *, const char *, const char *, const char * const *, struct bfd_link_info *, struct bfd_section **, struct bfd_elf_version_tree *); extern bfd_boolean bfd_elf_size_dynsym_hash_dynstr (bfd *, struct bfd_link_info *); extern void bfd_elf_set_dt_needed_name (bfd *, const char *); extern const char *bfd_elf_get_dt_soname (bfd *); extern void bfd_elf_set_dyn_lib_class (bfd *, int); extern int bfd_elf_get_dyn_lib_class (bfd *); extern struct bfd_link_needed_list *bfd_elf_get_runpath_list (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_elf_discard_info (bfd *, struct bfd_link_info *); extern unsigned int _bfd_elf_default_action_discarded (struct bfd_section *); /* Return an upper bound on the number of bytes required to store a copy of ABFD's program header table entries. Return -1 if an error occurs; bfd_get_error will return an appropriate code. */ extern long bfd_get_elf_phdr_upper_bound (bfd *abfd); /* Copy ABFD's program header table entries to *PHDRS. The entries will be stored as an array of Elf_Internal_Phdr structures, as defined in include/elf/internal.h. To find out how large the buffer needs to be, call bfd_get_elf_phdr_upper_bound. Return the number of program header table entries read, or -1 if an error occurs; bfd_get_error will return an appropriate code. */ extern int bfd_get_elf_phdrs (bfd *abfd, void *phdrs); /* Create a new BFD as if by bfd_openr. Rather than opening a file, reconstruct an ELF file by reading the segments out of remote memory based on the ELF file header at EHDR_VMA and the ELF program headers it points to. If not null, *LOADBASEP is filled in with the difference between the VMAs from which the segments were read, and the VMAs the file headers (and hence BFD's idea of each section's VMA) put them at. The function TARGET_READ_MEMORY is called to copy LEN bytes from the remote memory at target address VMA into the local buffer at MYADDR; it should return zero on success or an `errno' code on failure. TEMPL must be a BFD for an ELF target with the word size and byte order found in the remote memory. */ extern bfd *bfd_elf_bfd_from_remote_memory (bfd *templ, bfd_vma ehdr_vma, bfd_vma *loadbasep, int (*target_read_memory) (bfd_vma vma, bfd_byte *myaddr, int len)); /* Return the arch_size field of an elf bfd, or -1 if not elf. */ extern int bfd_get_arch_size (bfd *); /* Return TRUE if address "naturally" sign extends, or -1 if not elf. */ extern int bfd_get_sign_extend_vma (bfd *); extern struct bfd_section *_bfd_elf_tls_setup (bfd *, struct bfd_link_info *); extern void _bfd_elf_provide_symbol (struct bfd_link_info *, const char *, bfd_vma, struct bfd_section *); extern void _bfd_elf_provide_section_bound_symbols (struct bfd_link_info *, struct bfd_section *, const char *, const char *); extern void _bfd_elf_fix_excluded_sec_syms (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_m68k_elf32_create_embedded_relocs (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, char **); /* SunOS shared library support routines for the linker. */ extern struct bfd_link_needed_list *bfd_sunos_get_needed_list (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_sunos_record_link_assignment (bfd *, struct bfd_link_info *, const char *); extern bfd_boolean bfd_sunos_size_dynamic_sections (bfd *, struct bfd_link_info *, struct bfd_section **, struct bfd_section **, struct bfd_section **); /* Linux shared library support routines for the linker. */ extern bfd_boolean bfd_i386linux_size_dynamic_sections (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_m68klinux_size_dynamic_sections (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_sparclinux_size_dynamic_sections (bfd *, struct bfd_link_info *); /* mmap hacks */ struct _bfd_window_internal; typedef struct _bfd_window_internal bfd_window_internal; typedef struct _bfd_window { /* What the user asked for. */ void *data; bfd_size_type size; /* The actual window used by BFD. Small user-requested read-only regions sharing a page may share a single window into the object file. Read-write versions shouldn't until I've fixed things to keep track of which portions have been claimed by the application; don't want to give the same region back when the application wants two writable copies! */ struct _bfd_window_internal *i; } bfd_window; extern void bfd_init_window (bfd_window *); extern void bfd_free_window (bfd_window *); extern bfd_boolean bfd_get_file_window (bfd *, file_ptr, bfd_size_type, bfd_window *, bfd_boolean); /* XCOFF support routines for the linker. */ extern bfd_boolean bfd_xcoff_link_record_set (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_size_type); extern bfd_boolean bfd_xcoff_import_symbol (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_vma, const char *, const char *, const char *, unsigned int); extern bfd_boolean bfd_xcoff_export_symbol (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *); extern bfd_boolean bfd_xcoff_link_count_reloc (bfd *, struct bfd_link_info *, const char *); extern bfd_boolean bfd_xcoff_record_link_assignment (bfd *, struct bfd_link_info *, const char *); extern bfd_boolean bfd_xcoff_size_dynamic_sections (bfd *, struct bfd_link_info *, const char *, const char *, unsigned long, unsigned long, unsigned long, bfd_boolean, int, bfd_boolean, bfd_boolean, struct bfd_section **, bfd_boolean); extern bfd_boolean bfd_xcoff_link_generate_rtinit (bfd *, const char *, const char *, bfd_boolean); /* XCOFF support routines for ar. */ extern bfd_boolean bfd_xcoff_ar_archive_set_magic (bfd *, char *); /* Externally visible COFF routines. */ #if defined(__STDC__) || defined(ALMOST_STDC) struct internal_syment; union internal_auxent; #endif extern bfd_boolean bfd_coff_get_syment (bfd *, struct bfd_symbol *, struct internal_syment *); extern bfd_boolean bfd_coff_get_auxent (bfd *, struct bfd_symbol *, int, union internal_auxent *); extern bfd_boolean bfd_coff_set_symbol_class (bfd *, struct bfd_symbol *, unsigned int); extern bfd_boolean bfd_m68k_coff_create_embedded_relocs (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, char **); /* ARM Interworking support. Called from linker. */ extern bfd_boolean bfd_arm_allocate_interworking_sections (struct bfd_link_info *); extern bfd_boolean bfd_arm_process_before_allocation (bfd *, struct bfd_link_info *, int); extern bfd_boolean bfd_arm_get_bfd_for_interworking (bfd *, struct bfd_link_info *); /* PE ARM Interworking support. Called from linker. */ extern bfd_boolean bfd_arm_pe_allocate_interworking_sections (struct bfd_link_info *); extern bfd_boolean bfd_arm_pe_process_before_allocation (bfd *, struct bfd_link_info *, int); extern bfd_boolean bfd_arm_pe_get_bfd_for_interworking (bfd *, struct bfd_link_info *); /* ELF ARM Interworking support. Called from linker. */ extern bfd_boolean bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info *); extern bfd_boolean bfd_elf32_arm_process_before_allocation (bfd *, struct bfd_link_info *, int); void bfd_elf32_arm_set_target_relocs (struct bfd_link_info *, int, char *, int, int); extern bfd_boolean bfd_elf32_arm_get_bfd_for_interworking (bfd *, struct bfd_link_info *); extern bfd_boolean bfd_elf32_arm_add_glue_sections_to_bfd (bfd *, struct bfd_link_info *); /* ELF ARM mapping symbol support */ extern bfd_boolean bfd_is_arm_mapping_symbol_name (const char * name); /* ARM Note section processing. */ extern bfd_boolean bfd_arm_merge_machines (bfd *, bfd *); extern bfd_boolean bfd_arm_update_notes (bfd *, const char *); extern unsigned int bfd_arm_get_mach_from_notes (bfd *, const char *); /* TI COFF load page support. */ extern void bfd_ticoff_set_section_load_page (struct bfd_section *, int); extern int bfd_ticoff_get_section_load_page (struct bfd_section *); /* H8/300 functions. */ extern bfd_vma bfd_h8300_pad_address (bfd *, bfd_vma); /* IA64 Itanium code generation. Called from linker. */ extern void bfd_elf32_ia64_after_parse (int); extern void bfd_elf64_ia64_after_parse (int); /* This structure is used for a comdat section, as in PE. A comdat section is associated with a particular symbol. When the linker sees a comdat section, it keeps only one of the sections with a given name and associated with a given symbol. */ struct coff_comdat_info { /* The name of the symbol associated with a comdat section. */ const char *name; /* The local symbol table index of the symbol associated with a comdat section. This is only meaningful to the object file format specific code; it is not an index into the list returned by bfd_canonicalize_symtab. */ long symbol; }; extern struct coff_comdat_info *bfd_coff_get_comdat_section (bfd *, struct bfd_section *); /* Extracted from init.c. */ void bfd_init (void); /* Extracted from opncls.c. */ bfd *bfd_fopen (const char *filename, const char *target, const char *mode, int fd); bfd *bfd_openr (const char *filename, const char *target); bfd *bfd_fdopenr (const char *filename, const char *target, int fd); bfd *bfd_openstreamr (const char *, const char *, void *); bfd *bfd_openr_iovec (const char *filename, const char *target, void *(*open) (struct bfd *nbfd, void *open_closure), void *open_closure, file_ptr (*pread) (struct bfd *nbfd, void *stream, void *buf, file_ptr nbytes, file_ptr offset), int (*close) (struct bfd *nbfd, void *stream)); bfd *bfd_openw (const char *filename, const char *target); bfd_boolean bfd_close (bfd *abfd); bfd_boolean bfd_close_all_done (bfd *); bfd *bfd_create (const char *filename, bfd *templ); bfd_boolean bfd_make_writable (bfd *abfd); bfd_boolean bfd_make_readable (bfd *abfd); unsigned long bfd_calc_gnu_debuglink_crc32 (unsigned long crc, const unsigned char *buf, bfd_size_type len); char *bfd_follow_gnu_debuglink (bfd *abfd, const char *dir); struct bfd_section *bfd_create_gnu_debuglink_section (bfd *abfd, const char *filename); bfd_boolean bfd_fill_in_gnu_debuglink_section (bfd *abfd, struct bfd_section *sect, const char *filename); /* Extracted from libbfd.c. */ /* Byte swapping macros for user section data. */ #define bfd_put_8(abfd, val, ptr) \ ((void) (*((unsigned char *) (ptr)) = (val) & 0xff)) #define bfd_put_signed_8 \ bfd_put_8 #define bfd_get_8(abfd, ptr) \ (*(unsigned char *) (ptr) & 0xff) #define bfd_get_signed_8(abfd, ptr) \ (((*(unsigned char *) (ptr) & 0xff) ^ 0x80) - 0x80) #define bfd_put_16(abfd, val, ptr) \ BFD_SEND (abfd, bfd_putx16, ((val),(ptr))) #define bfd_put_signed_16 \ bfd_put_16 #define bfd_get_16(abfd, ptr) \ BFD_SEND (abfd, bfd_getx16, (ptr)) #define bfd_get_signed_16(abfd, ptr) \ BFD_SEND (abfd, bfd_getx_signed_16, (ptr)) #define bfd_put_32(abfd, val, ptr) \ BFD_SEND (abfd, bfd_putx32, ((val),(ptr))) #define bfd_put_signed_32 \ bfd_put_32 #define bfd_get_32(abfd, ptr) \ BFD_SEND (abfd, bfd_getx32, (ptr)) #define bfd_get_signed_32(abfd, ptr) \ BFD_SEND (abfd, bfd_getx_signed_32, (ptr)) #define bfd_put_64(abfd, val, ptr) \ BFD_SEND (abfd, bfd_putx64, ((val), (ptr))) #define bfd_put_signed_64 \ bfd_put_64 #define bfd_get_64(abfd, ptr) \ BFD_SEND (abfd, bfd_getx64, (ptr)) #define bfd_get_signed_64(abfd, ptr) \ BFD_SEND (abfd, bfd_getx_signed_64, (ptr)) #define bfd_get(bits, abfd, ptr) \ ((bits) == 8 ? (bfd_vma) bfd_get_8 (abfd, ptr) \ : (bits) == 16 ? bfd_get_16 (abfd, ptr) \ : (bits) == 32 ? bfd_get_32 (abfd, ptr) \ : (bits) == 64 ? bfd_get_64 (abfd, ptr) \ : (abort (), (bfd_vma) - 1)) #define bfd_put(bits, abfd, val, ptr) \ ((bits) == 8 ? bfd_put_8 (abfd, val, ptr) \ : (bits) == 16 ? bfd_put_16 (abfd, val, ptr) \ : (bits) == 32 ? bfd_put_32 (abfd, val, ptr) \ : (bits) == 64 ? bfd_put_64 (abfd, val, ptr) \ : (abort (), (void) 0)) /* Byte swapping macros for file header data. */ #define bfd_h_put_8(abfd, val, ptr) \ bfd_put_8 (abfd, val, ptr) #define bfd_h_put_signed_8(abfd, val, ptr) \ bfd_put_8 (abfd, val, ptr) #define bfd_h_get_8(abfd, ptr) \ bfd_get_8 (abfd, ptr) #define bfd_h_get_signed_8(abfd, ptr) \ bfd_get_signed_8 (abfd, ptr) #define bfd_h_put_16(abfd, val, ptr) \ BFD_SEND (abfd, bfd_h_putx16, (val, ptr)) #define bfd_h_put_signed_16 \ bfd_h_put_16 #define bfd_h_get_16(abfd, ptr) \ BFD_SEND (abfd, bfd_h_getx16, (ptr)) #define bfd_h_get_signed_16(abfd, ptr) \ BFD_SEND (abfd, bfd_h_getx_signed_16, (ptr)) #define bfd_h_put_32(abfd, val, ptr) \ BFD_SEND (abfd, bfd_h_putx32, (val, ptr)) #define bfd_h_put_signed_32 \ bfd_h_put_32 #define bfd_h_get_32(abfd, ptr) \ BFD_SEND (abfd, bfd_h_getx32, (ptr)) #define bfd_h_get_signed_32(abfd, ptr) \ BFD_SEND (abfd, bfd_h_getx_signed_32, (ptr)) #define bfd_h_put_64(abfd, val, ptr) \ BFD_SEND (abfd, bfd_h_putx64, (val, ptr)) #define bfd_h_put_signed_64 \ bfd_h_put_64 #define bfd_h_get_64(abfd, ptr) \ BFD_SEND (abfd, bfd_h_getx64, (ptr)) #define bfd_h_get_signed_64(abfd, ptr) \ BFD_SEND (abfd, bfd_h_getx_signed_64, (ptr)) /* Aliases for the above, which should eventually go away. */ #define H_PUT_64 bfd_h_put_64 #define H_PUT_32 bfd_h_put_32 #define H_PUT_16 bfd_h_put_16 #define H_PUT_8 bfd_h_put_8 #define H_PUT_S64 bfd_h_put_signed_64 #define H_PUT_S32 bfd_h_put_signed_32 #define H_PUT_S16 bfd_h_put_signed_16 #define H_PUT_S8 bfd_h_put_signed_8 #define H_GET_64 bfd_h_get_64 #define H_GET_32 bfd_h_get_32 #define H_GET_16 bfd_h_get_16 #define H_GET_8 bfd_h_get_8 #define H_GET_S64 bfd_h_get_signed_64 #define H_GET_S32 bfd_h_get_signed_32 #define H_GET_S16 bfd_h_get_signed_16 #define H_GET_S8 bfd_h_get_signed_8 /* Extracted from bfdio.c. */ long bfd_get_mtime (bfd *abfd); long bfd_get_size (bfd *abfd); /* Extracted from bfdwin.c. */ /* Extracted from section.c. */ typedef struct bfd_section { /* The name of the section; the name isn't a copy, the pointer is the same as that passed to bfd_make_section. */ const char *name; /* A unique sequence number. */ int id; /* Which section in the bfd; 0..n-1 as sections are created in a bfd. */ int index; /* The next section in the list belonging to the BFD, or NULL. */ struct bfd_section *next; /* The previous section in the list belonging to the BFD, or NULL. */ struct bfd_section *prev; /* The field flags contains attributes of the section. Some flags are read in from the object file, and some are synthesized from other information. */ flagword flags; #define SEC_NO_FLAGS 0x000 /* Tells the OS to allocate space for this section when loading. This is clear for a section containing debug information only. */ #define SEC_ALLOC 0x001 /* Tells the OS to load the section from the file when loading. This is clear for a .bss section. */ #define SEC_LOAD 0x002 /* The section contains data still to be relocated, so there is some relocation information too. */ #define SEC_RELOC 0x004 /* A signal to the OS that the section contains read only data. */ #define SEC_READONLY 0x008 /* The section contains code only. */ #define SEC_CODE 0x010 /* The section contains data only. */ #define SEC_DATA 0x020 /* The section will reside in ROM. */ #define SEC_ROM 0x040 /* The section contains constructor information. This section type is used by the linker to create lists of constructors and destructors used by <>. When a back end sees a symbol which should be used in a constructor list, it creates a new section for the type of name (e.g., <<__CTOR_LIST__>>), attaches the symbol to it, and builds a relocation. To build the lists of constructors, all the linker has to do is catenate all the sections called <<__CTOR_LIST__>> and relocate the data contained within - exactly the operations it would peform on standard data. */ #define SEC_CONSTRUCTOR 0x080 /* The section has contents - a data section could be <> | <>; a debug section could be <> */ #define SEC_HAS_CONTENTS 0x100 /* An instruction to the linker to not output the section even if it has information which would normally be written. */ #define SEC_NEVER_LOAD 0x200 /* The section contains thread local data. */ #define SEC_THREAD_LOCAL 0x400 /* The section has GOT references. This flag is only for the linker, and is currently only used by the elf32-hppa back end. It will be set if global offset table references were detected in this section, which indicate to the linker that the section contains PIC code, and must be handled specially when doing a static link. */ #define SEC_HAS_GOT_REF 0x800 /* The section contains common symbols (symbols may be defined multiple times, the value of a symbol is the amount of space it requires, and the largest symbol value is the one used). Most targets have exactly one of these (which we translate to bfd_com_section_ptr), but ECOFF has two. */ #define SEC_IS_COMMON 0x1000 /* The section contains only debugging information. For example, this is set for ELF .debug and .stab sections. strip tests this flag to see if a section can be discarded. */ #define SEC_DEBUGGING 0x2000 /* The contents of this section are held in memory pointed to by the contents field. This is checked by bfd_get_section_contents, and the data is retrieved from memory if appropriate. */ #define SEC_IN_MEMORY 0x4000 /* The contents of this section are to be excluded by the linker for executable and shared objects unless those objects are to be further relocated. */ #define SEC_EXCLUDE 0x8000 /* The contents of this section are to be sorted based on the sum of the symbol and addend values specified by the associated relocation entries. Entries without associated relocation entries will be appended to the end of the section in an unspecified order. */ #define SEC_SORT_ENTRIES 0x10000 /* When linking, duplicate sections of the same name should be discarded, rather than being combined into a single section as is usually done. This is similar to how common symbols are handled. See SEC_LINK_DUPLICATES below. */ #define SEC_LINK_ONCE 0x20000 /* If SEC_LINK_ONCE is set, this bitfield describes how the linker should handle duplicate sections. */ #define SEC_LINK_DUPLICATES 0x40000 /* This value for SEC_LINK_DUPLICATES means that duplicate sections with the same name should simply be discarded. */ #define SEC_LINK_DUPLICATES_DISCARD 0x0 /* This value for SEC_LINK_DUPLICATES means that the linker should warn if there are any duplicate sections, although it should still only link one copy. */ #define SEC_LINK_DUPLICATES_ONE_ONLY 0x80000 /* This value for SEC_LINK_DUPLICATES means that the linker should warn if any duplicate sections are a different size. */ #define SEC_LINK_DUPLICATES_SAME_SIZE 0x100000 /* This value for SEC_LINK_DUPLICATES means that the linker should warn if any duplicate sections contain different contents. */ #define SEC_LINK_DUPLICATES_SAME_CONTENTS \ (SEC_LINK_DUPLICATES_ONE_ONLY | SEC_LINK_DUPLICATES_SAME_SIZE) /* This section was created by the linker as part of dynamic relocation or other arcane processing. It is skipped when going through the first-pass output, trusting that someone else up the line will take care of it later. */ #define SEC_LINKER_CREATED 0x200000 /* This section should not be subject to garbage collection. */ #define SEC_KEEP 0x400000 /* This section contains "short" data, and should be placed "near" the GP. */ #define SEC_SMALL_DATA 0x800000 /* Attempt to merge identical entities in the section. Entity size is given in the entsize field. */ #define SEC_MERGE 0x1000000 /* If given with SEC_MERGE, entities to merge are zero terminated strings where entsize specifies character size instead of fixed size entries. */ #define SEC_STRINGS 0x2000000 /* This section contains data about section groups. */ #define SEC_GROUP 0x4000000 /* The section is a COFF shared library section. This flag is only for the linker. If this type of section appears in the input file, the linker must copy it to the output file without changing the vma or size. FIXME: Although this was originally intended to be general, it really is COFF specific (and the flag was renamed to indicate this). It might be cleaner to have some more general mechanism to allow the back end to control what the linker does with sections. */ #define SEC_COFF_SHARED_LIBRARY 0x10000000 /* This section contains data which may be shared with other executables or shared objects. This is for COFF only. */ #define SEC_COFF_SHARED 0x20000000 /* When a section with this flag is being linked, then if the size of the input section is less than a page, it should not cross a page boundary. If the size of the input section is one page or more, it should be aligned on a page boundary. This is for TI TMS320C54X only. */ #define SEC_TIC54X_BLOCK 0x40000000 /* Conditionally link this section; do not link if there are no references found to any symbol in the section. This is for TI TMS320C54X only. */ #define SEC_TIC54X_CLINK 0x80000000 /* End of section flags. */ /* Some internal packed boolean fields. */ /* See the vma field. */ unsigned int user_set_vma : 1; /* A mark flag used by some of the linker backends. */ unsigned int linker_mark : 1; /* Another mark flag used by some of the linker backends. Set for output sections that have an input section. */ unsigned int linker_has_input : 1; /* Mark flags used by some linker backends for garbage collection. */ unsigned int gc_mark : 1; unsigned int gc_mark_from_eh : 1; /* The following flags are used by the ELF linker. */ /* Mark sections which have been allocated to segments. */ unsigned int segment_mark : 1; /* Type of sec_info information. */ unsigned int sec_info_type:3; #define ELF_INFO_TYPE_NONE 0 #define ELF_INFO_TYPE_STABS 1 #define ELF_INFO_TYPE_MERGE 2 #define ELF_INFO_TYPE_EH_FRAME 3 #define ELF_INFO_TYPE_JUST_SYMS 4 /* Nonzero if this section uses RELA relocations, rather than REL. */ unsigned int use_rela_p:1; /* Bits used by various backends. The generic code doesn't touch these fields. */ /* Nonzero if this section has TLS related relocations. */ unsigned int has_tls_reloc:1; /* Nonzero if this section has a gp reloc. */ unsigned int has_gp_reloc:1; /* Nonzero if this section needs the relax finalize pass. */ unsigned int need_finalize_relax:1; /* Whether relocations have been processed. */ unsigned int reloc_done : 1; /* End of internal packed boolean fields. */ /* The virtual memory address of the section - where it will be at run time. The symbols are relocated against this. The user_set_vma flag is maintained by bfd; if it's not set, the backend can assign addresses (for example, in <>, where the default address for <<.data>> is dependent on the specific target and various flags). */ bfd_vma vma; /* The load address of the section - where it would be in a rom image; really only used for writing section header information. */ bfd_vma lma; /* The size of the section in octets, as it will be output. Contains a value even if the section has no contents (e.g., the size of <<.bss>>). */ bfd_size_type size; /* For input sections, the original size on disk of the section, in octets. This field is used by the linker relaxation code. It is currently only set for sections where the linker relaxation scheme doesn't cache altered section and reloc contents (stabs, eh_frame, SEC_MERGE, some coff relaxing targets), and thus the original size needs to be kept to read the section multiple times. For output sections, rawsize holds the section size calculated on a previous linker relaxation pass. */ bfd_size_type rawsize; /* If this section is going to be output, then this value is the offset in *bytes* into the output section of the first byte in the input section (byte ==> smallest addressable unit on the target). In most cases, if this was going to start at the 100th octet (8-bit quantity) in the output section, this value would be 100. However, if the target byte size is 16 bits (bfd_octets_per_byte is "2"), this value would be 50. */ bfd_vma output_offset; /* The output section through which to map on output. */ struct bfd_section *output_section; /* The alignment requirement of the section, as an exponent of 2 - e.g., 3 aligns to 2^3 (or 8). */ unsigned int alignment_power; /* If an input section, a pointer to a vector of relocation records for the data in this section. */ struct reloc_cache_entry *relocation; /* If an output section, a pointer to a vector of pointers to relocation records for the data in this section. */ struct reloc_cache_entry **orelocation; /* The number of relocation records in one of the above. */ unsigned reloc_count; /* Information below is back end specific - and not always used or updated. */ /* File position of section data. */ file_ptr filepos; /* File position of relocation info. */ file_ptr rel_filepos; /* File position of line data. */ file_ptr line_filepos; /* Pointer to data for applications. */ void *userdata; /* If the SEC_IN_MEMORY flag is set, this points to the actual contents. */ unsigned char *contents; /* Attached line number information. */ alent *lineno; /* Number of line number records. */ unsigned int lineno_count; /* Entity size for merging purposes. */ unsigned int entsize; /* Points to the kept section if this section is a link-once section, and is discarded. */ struct bfd_section *kept_section; /* When a section is being output, this value changes as more linenumbers are written out. */ file_ptr moving_line_filepos; /* What the section number is in the target world. */ int target_index; void *used_by_bfd; /* If this is a constructor section then here is a list of the relocations created to relocate items within it. */ struct relent_chain *constructor_chain; /* The BFD which owns the section. */ bfd *owner; /* A symbol which points at this section only. */ struct bfd_symbol *symbol; struct bfd_symbol **symbol_ptr_ptr; /* Early in the link process, map_head and map_tail are used to build a list of input sections attached to an output section. Later, output sections use these fields for a list of bfd_link_order structs. */ union { struct bfd_link_order *link_order; struct bfd_section *s; } map_head, map_tail; } asection; /* These sections are global, and are managed by BFD. The application and target back end are not permitted to change the values in these sections. New code should use the section_ptr macros rather than referring directly to the const sections. The const sections may eventually vanish. */ #define BFD_ABS_SECTION_NAME "*ABS*" #define BFD_UND_SECTION_NAME "*UND*" #define BFD_COM_SECTION_NAME "*COM*" #define BFD_IND_SECTION_NAME "*IND*" /* The absolute section. */ extern asection bfd_abs_section; #define bfd_abs_section_ptr ((asection *) &bfd_abs_section) #define bfd_is_abs_section(sec) ((sec) == bfd_abs_section_ptr) /* Pointer to the undefined section. */ extern asection bfd_und_section; #define bfd_und_section_ptr ((asection *) &bfd_und_section) #define bfd_is_und_section(sec) ((sec) == bfd_und_section_ptr) /* Pointer to the common section. */ extern asection bfd_com_section; #define bfd_com_section_ptr ((asection *) &bfd_com_section) /* Pointer to the indirect section. */ extern asection bfd_ind_section; #define bfd_ind_section_ptr ((asection *) &bfd_ind_section) #define bfd_is_ind_section(sec) ((sec) == bfd_ind_section_ptr) #define bfd_is_const_section(SEC) \ ( ((SEC) == bfd_abs_section_ptr) \ || ((SEC) == bfd_und_section_ptr) \ || ((SEC) == bfd_com_section_ptr) \ || ((SEC) == bfd_ind_section_ptr)) extern const struct bfd_symbol * const bfd_abs_symbol; extern const struct bfd_symbol * const bfd_com_symbol; extern const struct bfd_symbol * const bfd_und_symbol; extern const struct bfd_symbol * const bfd_ind_symbol; /* Macros to handle insertion and deletion of a bfd's sections. These only handle the list pointers, ie. do not adjust section_count, target_index etc. */ #define bfd_section_list_remove(ABFD, S) \ do \ { \ asection *_s = S; \ asection *_next = _s->next; \ asection *_prev = _s->prev; \ if (_prev) \ _prev->next = _next; \ else \ (ABFD)->sections = _next; \ if (_next) \ _next->prev = _prev; \ else \ (ABFD)->section_last = _prev; \ } \ while (0) #define bfd_section_list_append(ABFD, S) \ do \ { \ asection *_s = S; \ bfd *_abfd = ABFD; \ _s->next = NULL; \ if (_abfd->section_last) \ { \ _s->prev = _abfd->section_last; \ _abfd->section_last->next = _s; \ } \ else \ { \ _s->prev = NULL; \ _abfd->sections = _s; \ } \ _abfd->section_last = _s; \ } \ while (0) #define bfd_section_list_prepend(ABFD, S) \ do \ { \ asection *_s = S; \ bfd *_abfd = ABFD; \ _s->prev = NULL; \ if (_abfd->sections) \ { \ _s->next = _abfd->sections; \ _abfd->sections->prev = _s; \ } \ else \ { \ _s->next = NULL; \ _abfd->section_last = _s; \ } \ _abfd->sections = _s; \ } \ while (0) #define bfd_section_list_insert_after(ABFD, A, S) \ do \ { \ asection *_a = A; \ asection *_s = S; \ asection *_next = _a->next; \ _s->next = _next; \ _s->prev = _a; \ _a->next = _s; \ if (_next) \ _next->prev = _s; \ else \ (ABFD)->section_last = _s; \ } \ while (0) #define bfd_section_list_insert_before(ABFD, B, S) \ do \ { \ asection *_b = B; \ asection *_s = S; \ asection *_prev = _b->prev; \ _s->prev = _prev; \ _s->next = _b; \ _b->prev = _s; \ if (_prev) \ _prev->next = _s; \ else \ (ABFD)->sections = _s; \ } \ while (0) #define bfd_section_removed_from_list(ABFD, S) \ ((S)->next == NULL ? (ABFD)->section_last != (S) : (S)->next->prev != (S)) void bfd_section_list_clear (bfd *); asection *bfd_get_section_by_name (bfd *abfd, const char *name); asection *bfd_get_section_by_name_if (bfd *abfd, const char *name, bfd_boolean (*func) (bfd *abfd, asection *sect, void *obj), void *obj); char *bfd_get_unique_section_name (bfd *abfd, const char *templat, int *count); asection *bfd_make_section_old_way (bfd *abfd, const char *name); asection *bfd_make_section_anyway_with_flags (bfd *abfd, const char *name, flagword flags); asection *bfd_make_section_anyway (bfd *abfd, const char *name); asection *bfd_make_section_with_flags (bfd *, const char *name, flagword flags); asection *bfd_make_section (bfd *, const char *name); bfd_boolean bfd_set_section_flags (bfd *abfd, asection *sec, flagword flags); void bfd_map_over_sections (bfd *abfd, void (*func) (bfd *abfd, asection *sect, void *obj), void *obj); asection *bfd_sections_find_if (bfd *abfd, bfd_boolean (*operation) (bfd *abfd, asection *sect, void *obj), void *obj); bfd_boolean bfd_set_section_size (bfd *abfd, asection *sec, bfd_size_type val); bfd_boolean bfd_set_section_contents (bfd *abfd, asection *section, const void *data, file_ptr offset, bfd_size_type count); bfd_boolean bfd_get_section_contents (bfd *abfd, asection *section, void *location, file_ptr offset, bfd_size_type count); bfd_boolean bfd_malloc_and_get_section (bfd *abfd, asection *section, bfd_byte **buf); bfd_boolean bfd_copy_private_section_data (bfd *ibfd, asection *isec, bfd *obfd, asection *osec); #define bfd_copy_private_section_data(ibfd, isection, obfd, osection) \ BFD_SEND (obfd, _bfd_copy_private_section_data, \ (ibfd, isection, obfd, osection)) bfd_boolean bfd_generic_is_group_section (bfd *, const asection *sec); bfd_boolean bfd_generic_discard_group (bfd *abfd, asection *group); /* Extracted from archures.c. */ enum bfd_architecture { bfd_arch_unknown, /* File arch not known. */ bfd_arch_obscure, /* Arch known, not one of these. */ bfd_arch_m68k, /* Motorola 68xxx */ #define bfd_mach_m68000 1 #define bfd_mach_m68008 2 #define bfd_mach_m68010 3 #define bfd_mach_m68020 4 #define bfd_mach_m68030 5 #define bfd_mach_m68040 6 #define bfd_mach_m68060 7 #define bfd_mach_cpu32 8 #define bfd_mach_mcf5200 9 #define bfd_mach_mcf5206e 10 #define bfd_mach_mcf5307 11 #define bfd_mach_mcf5407 12 #define bfd_mach_mcf528x 13 #define bfd_mach_mcfv4e 14 #define bfd_mach_mcf521x 15 #define bfd_mach_mcf5249 16 #define bfd_mach_mcf547x 17 #define bfd_mach_mcf548x 18 bfd_arch_vax, /* DEC Vax */ bfd_arch_i960, /* Intel 960 */ /* The order of the following is important. lower number indicates a machine type that only accepts a subset of the instructions available to machines with higher numbers. The exception is the "ca", which is incompatible with all other machines except "core". */ #define bfd_mach_i960_core 1 #define bfd_mach_i960_ka_sa 2 #define bfd_mach_i960_kb_sb 3 #define bfd_mach_i960_mc 4 #define bfd_mach_i960_xa 5 #define bfd_mach_i960_ca 6 #define bfd_mach_i960_jx 7 #define bfd_mach_i960_hx 8 bfd_arch_or32, /* OpenRISC 32 */ bfd_arch_a29k, /* AMD 29000 */ bfd_arch_sparc, /* SPARC */ #define bfd_mach_sparc 1 /* The difference between v8plus and v9 is that v9 is a true 64 bit env. */ #define bfd_mach_sparc_sparclet 2 #define bfd_mach_sparc_sparclite 3 #define bfd_mach_sparc_v8plus 4 #define bfd_mach_sparc_v8plusa 5 /* with ultrasparc add'ns. */ #define bfd_mach_sparc_sparclite_le 6 #define bfd_mach_sparc_v9 7 #define bfd_mach_sparc_v9a 8 /* with ultrasparc add'ns. */ #define bfd_mach_sparc_v8plusb 9 /* with cheetah add'ns. */ #define bfd_mach_sparc_v9b 10 /* with cheetah add'ns. */ /* Nonzero if MACH has the v9 instruction set. */ #define bfd_mach_sparc_v9_p(mach) \ ((mach) >= bfd_mach_sparc_v8plus && (mach) <= bfd_mach_sparc_v9b \ && (mach) != bfd_mach_sparc_sparclite_le) /* Nonzero if MACH is a 64 bit sparc architecture. */ #define bfd_mach_sparc_64bit_p(mach) \ ((mach) >= bfd_mach_sparc_v9 && (mach) != bfd_mach_sparc_v8plusb) bfd_arch_mips, /* MIPS Rxxxx */ #define bfd_mach_mips3000 3000 #define bfd_mach_mips3900 3900 #define bfd_mach_mips4000 4000 #define bfd_mach_mips4010 4010 #define bfd_mach_mips4100 4100 #define bfd_mach_mips4111 4111 #define bfd_mach_mips4120 4120 #define bfd_mach_mips4300 4300 #define bfd_mach_mips4400 4400 #define bfd_mach_mips4600 4600 #define bfd_mach_mips4650 4650 #define bfd_mach_mips5000 5000 #define bfd_mach_mips5400 5400 #define bfd_mach_mips5500 5500 #define bfd_mach_mips6000 6000 #define bfd_mach_mips7000 7000 #define bfd_mach_mips8000 8000 #define bfd_mach_mips9000 9000 #define bfd_mach_mips10000 10000 #define bfd_mach_mips12000 12000 #define bfd_mach_mips16 16 #define bfd_mach_mips5 5 #define bfd_mach_mips_sb1 12310201 /* octal 'SB', 01 */ #define bfd_mach_mipsisa32 32 #define bfd_mach_mipsisa32r2 33 #define bfd_mach_mipsisa64 64 #define bfd_mach_mipsisa64r2 65 bfd_arch_i386, /* Intel 386 */ #define bfd_mach_i386_i386 1 #define bfd_mach_i386_i8086 2 #define bfd_mach_i386_i386_intel_syntax 3 #define bfd_mach_x86_64 64 #define bfd_mach_x86_64_intel_syntax 65 bfd_arch_we32k, /* AT&T WE32xxx */ bfd_arch_tahoe, /* CCI/Harris Tahoe */ bfd_arch_i860, /* Intel 860 */ bfd_arch_i370, /* IBM 360/370 Mainframes */ bfd_arch_romp, /* IBM ROMP PC/RT */ bfd_arch_alliant, /* Alliant */ bfd_arch_convex, /* Convex */ bfd_arch_m88k, /* Motorola 88xxx */ bfd_arch_m98k, /* Motorola 98xxx */ bfd_arch_pyramid, /* Pyramid Technology */ bfd_arch_h8300, /* Renesas H8/300 (formerly Hitachi H8/300) */ #define bfd_mach_h8300 1 #define bfd_mach_h8300h 2 #define bfd_mach_h8300s 3 #define bfd_mach_h8300hn 4 #define bfd_mach_h8300sn 5 #define bfd_mach_h8300sx 6 #define bfd_mach_h8300sxn 7 bfd_arch_pdp11, /* DEC PDP-11 */ bfd_arch_powerpc, /* PowerPC */ #define bfd_mach_ppc 32 #define bfd_mach_ppc64 64 #define bfd_mach_ppc_403 403 #define bfd_mach_ppc_403gc 4030 #define bfd_mach_ppc_505 505 #define bfd_mach_ppc_601 601 #define bfd_mach_ppc_602 602 #define bfd_mach_ppc_603 603 #define bfd_mach_ppc_ec603e 6031 #define bfd_mach_ppc_604 604 #define bfd_mach_ppc_620 620 #define bfd_mach_ppc_630 630 #define bfd_mach_ppc_750 750 #define bfd_mach_ppc_860 860 #define bfd_mach_ppc_a35 35 #define bfd_mach_ppc_rs64ii 642 #define bfd_mach_ppc_rs64iii 643 #define bfd_mach_ppc_7400 7400 #define bfd_mach_ppc_e500 500 bfd_arch_rs6000, /* IBM RS/6000 */ #define bfd_mach_rs6k 6000 #define bfd_mach_rs6k_rs1 6001 #define bfd_mach_rs6k_rsc 6003 #define bfd_mach_rs6k_rs2 6002 bfd_arch_hppa, /* HP PA RISC */ #define bfd_mach_hppa10 10 #define bfd_mach_hppa11 11 #define bfd_mach_hppa20 20 #define bfd_mach_hppa20w 25 bfd_arch_d10v, /* Mitsubishi D10V */ #define bfd_mach_d10v 1 #define bfd_mach_d10v_ts2 2 #define bfd_mach_d10v_ts3 3 bfd_arch_d30v, /* Mitsubishi D30V */ bfd_arch_dlx, /* DLX */ bfd_arch_m68hc11, /* Motorola 68HC11 */ bfd_arch_m68hc12, /* Motorola 68HC12 */ #define bfd_mach_m6812_default 0 #define bfd_mach_m6812 1 #define bfd_mach_m6812s 2 bfd_arch_z8k, /* Zilog Z8000 */ #define bfd_mach_z8001 1 #define bfd_mach_z8002 2 bfd_arch_h8500, /* Renesas H8/500 (formerly Hitachi H8/500) */ bfd_arch_sh, /* Renesas / SuperH SH (formerly Hitachi SH) */ #define bfd_mach_sh 1 #define bfd_mach_sh2 0x20 #define bfd_mach_sh_dsp 0x2d #define bfd_mach_sh2a 0x2a #define bfd_mach_sh2a_nofpu 0x2b #define bfd_mach_sh2a_nofpu_or_sh4_nommu_nofpu 0x2a1 #define bfd_mach_sh2a_nofpu_or_sh3_nommu 0x2a2 #define bfd_mach_sh2a_or_sh4 0x2a3 #define bfd_mach_sh2a_or_sh3e 0x2a4 #define bfd_mach_sh2e 0x2e #define bfd_mach_sh3 0x30 #define bfd_mach_sh3_nommu 0x31 #define bfd_mach_sh3_dsp 0x3d #define bfd_mach_sh3e 0x3e #define bfd_mach_sh4 0x40 #define bfd_mach_sh4_nofpu 0x41 #define bfd_mach_sh4_nommu_nofpu 0x42 #define bfd_mach_sh4a 0x4a #define bfd_mach_sh4a_nofpu 0x4b #define bfd_mach_sh4al_dsp 0x4d #define bfd_mach_sh5 0x50 bfd_arch_alpha, /* Dec Alpha */ #define bfd_mach_alpha_ev4 0x10 #define bfd_mach_alpha_ev5 0x20 #define bfd_mach_alpha_ev6 0x30 bfd_arch_arm, /* Advanced Risc Machines ARM. */ #define bfd_mach_arm_unknown 0 #define bfd_mach_arm_2 1 #define bfd_mach_arm_2a 2 #define bfd_mach_arm_3 3 #define bfd_mach_arm_3M 4 #define bfd_mach_arm_4 5 #define bfd_mach_arm_4T 6 #define bfd_mach_arm_5 7 #define bfd_mach_arm_5T 8 #define bfd_mach_arm_5TE 9 #define bfd_mach_arm_XScale 10 #define bfd_mach_arm_ep9312 11 #define bfd_mach_arm_iWMMXt 12 bfd_arch_ns32k, /* National Semiconductors ns32000 */ bfd_arch_w65, /* WDC 65816 */ bfd_arch_tic30, /* Texas Instruments TMS320C30 */ bfd_arch_tic4x, /* Texas Instruments TMS320C3X/4X */ #define bfd_mach_tic3x 30 #define bfd_mach_tic4x 40 bfd_arch_tic54x, /* Texas Instruments TMS320C54X */ bfd_arch_tic80, /* TI TMS320c80 (MVP) */ bfd_arch_v850, /* NEC V850 */ #define bfd_mach_v850 1 #define bfd_mach_v850e 'E' #define bfd_mach_v850e1 '1' bfd_arch_arc, /* ARC Cores */ #define bfd_mach_arc_5 5 #define bfd_mach_arc_6 6 #define bfd_mach_arc_7 7 #define bfd_mach_arc_8 8 bfd_arch_m32c, /* Renesas M16C/M32C. */ #define bfd_mach_m16c 0x75 #define bfd_mach_m32c 0x78 bfd_arch_m32r, /* Renesas M32R (formerly Mitsubishi M32R/D) */ #define bfd_mach_m32r 1 /* For backwards compatibility. */ #define bfd_mach_m32rx 'x' #define bfd_mach_m32r2 '2' bfd_arch_mn10200, /* Matsushita MN10200 */ bfd_arch_mn10300, /* Matsushita MN10300 */ #define bfd_mach_mn10300 300 #define bfd_mach_am33 330 #define bfd_mach_am33_2 332 bfd_arch_fr30, #define bfd_mach_fr30 0x46523330 bfd_arch_frv, #define bfd_mach_frv 1 #define bfd_mach_frvsimple 2 #define bfd_mach_fr300 300 #define bfd_mach_fr400 400 #define bfd_mach_fr450 450 #define bfd_mach_frvtomcat 499 /* fr500 prototype */ #define bfd_mach_fr500 500 #define bfd_mach_fr550 550 bfd_arch_mcore, bfd_arch_ia64, /* HP/Intel ia64 */ #define bfd_mach_ia64_elf64 64 #define bfd_mach_ia64_elf32 32 bfd_arch_ip2k, /* Ubicom IP2K microcontrollers. */ #define bfd_mach_ip2022 1 #define bfd_mach_ip2022ext 2 bfd_arch_iq2000, /* Vitesse IQ2000. */ #define bfd_mach_iq2000 1 #define bfd_mach_iq10 2 bfd_arch_ms1, #define bfd_mach_ms1 1 #define bfd_mach_mrisc2 2 bfd_arch_pj, bfd_arch_avr, /* Atmel AVR microcontrollers. */ #define bfd_mach_avr1 1 #define bfd_mach_avr2 2 #define bfd_mach_avr3 3 #define bfd_mach_avr4 4 #define bfd_mach_avr5 5 bfd_arch_cr16c, /* National Semiconductor CompactRISC. */ #define bfd_mach_cr16c 1 bfd_arch_crx, /* National Semiconductor CRX. */ #define bfd_mach_crx 1 bfd_arch_cris, /* Axis CRIS */ #define bfd_mach_cris_v0_v10 255 #define bfd_mach_cris_v32 32 #define bfd_mach_cris_v10_v32 1032 bfd_arch_s390, /* IBM s390 */ #define bfd_mach_s390_31 31 #define bfd_mach_s390_64 64 bfd_arch_openrisc, /* OpenRISC */ bfd_arch_mmix, /* Donald Knuth's educational processor. */ bfd_arch_xstormy16, #define bfd_mach_xstormy16 1 bfd_arch_msp430, /* Texas Instruments MSP430 architecture. */ #define bfd_mach_msp11 11 #define bfd_mach_msp110 110 #define bfd_mach_msp12 12 #define bfd_mach_msp13 13 #define bfd_mach_msp14 14 #define bfd_mach_msp15 15 #define bfd_mach_msp16 16 #define bfd_mach_msp31 31 #define bfd_mach_msp32 32 #define bfd_mach_msp33 33 #define bfd_mach_msp41 41 #define bfd_mach_msp42 42 #define bfd_mach_msp43 43 #define bfd_mach_msp44 44 bfd_arch_xtensa, /* Tensilica's Xtensa cores. */ #define bfd_mach_xtensa 1 bfd_arch_maxq, /* Dallas MAXQ 10/20 */ #define bfd_mach_maxq10 10 #define bfd_mach_maxq20 20 bfd_arch_last }; typedef struct bfd_arch_info { int bits_per_word; int bits_per_address; int bits_per_byte; enum bfd_architecture arch; unsigned long mach; const char *arch_name; const char *printable_name; unsigned int section_align_power; /* TRUE if this is the default machine for the architecture. The default arch should be the first entry for an arch so that all the entries for that arch can be accessed via <>. */ bfd_boolean the_default; const struct bfd_arch_info * (*compatible) (const struct bfd_arch_info *a, const struct bfd_arch_info *b); bfd_boolean (*scan) (const struct bfd_arch_info *, const char *); const struct bfd_arch_info *next; } bfd_arch_info_type; const char *bfd_printable_name (bfd *abfd); const bfd_arch_info_type *bfd_scan_arch (const char *string); const char **bfd_arch_list (void); const bfd_arch_info_type *bfd_arch_get_compatible (const bfd *abfd, const bfd *bbfd, bfd_boolean accept_unknowns); void bfd_set_arch_info (bfd *abfd, const bfd_arch_info_type *arg); enum bfd_architecture bfd_get_arch (bfd *abfd); unsigned long bfd_get_mach (bfd *abfd); unsigned int bfd_arch_bits_per_byte (bfd *abfd); unsigned int bfd_arch_bits_per_address (bfd *abfd); const bfd_arch_info_type *bfd_get_arch_info (bfd *abfd); const bfd_arch_info_type *bfd_lookup_arch (enum bfd_architecture arch, unsigned long machine); const char *bfd_printable_arch_mach (enum bfd_architecture arch, unsigned long machine); unsigned int bfd_octets_per_byte (bfd *abfd); unsigned int bfd_arch_mach_octets_per_byte (enum bfd_architecture arch, unsigned long machine); /* Extracted from reloc.c. */ typedef enum bfd_reloc_status { /* No errors detected. */ bfd_reloc_ok, /* The relocation was performed, but there was an overflow. */ bfd_reloc_overflow, /* The address to relocate was not within the section supplied. */ bfd_reloc_outofrange, /* Used by special functions. */ bfd_reloc_continue, /* Unsupported relocation size requested. */ bfd_reloc_notsupported, /* Unused. */ bfd_reloc_other, /* The symbol to relocate against was undefined. */ bfd_reloc_undefined, /* The relocation was performed, but may not be ok - presently generated only when linking i960 coff files with i960 b.out symbols. If this type is returned, the error_message argument to bfd_perform_relocation will be set. */ bfd_reloc_dangerous } bfd_reloc_status_type; typedef struct reloc_cache_entry { /* A pointer into the canonical table of pointers. */ struct bfd_symbol **sym_ptr_ptr; /* offset in section. */ bfd_size_type address; /* addend for relocation value. */ bfd_vma addend; /* Pointer to how to perform the required relocation. */ reloc_howto_type *howto; } arelent; enum complain_overflow { /* Do not complain on overflow. */ complain_overflow_dont, /* Complain if the bitfield overflows, whether it is considered as signed or unsigned. */ complain_overflow_bitfield, /* Complain if the value overflows when considered as signed number. */ complain_overflow_signed, /* Complain if the value overflows when considered as an unsigned number. */ complain_overflow_unsigned }; struct reloc_howto_struct { /* The type field has mainly a documentary use - the back end can do what it wants with it, though normally the back end's external idea of what a reloc number is stored in this field. For example, a PC relative word relocation in a coff environment has the type 023 - because that's what the outside world calls a R_PCRWORD reloc. */ unsigned int type; /* The value the final relocation is shifted right by. This drops unwanted data from the relocation. */ unsigned int rightshift; /* The size of the item to be relocated. This is *not* a power-of-two measure. To get the number of bytes operated on by a type of relocation, use bfd_get_reloc_size. */ int size; /* The number of bits in the item to be relocated. This is used when doing overflow checking. */ unsigned int bitsize; /* Notes that the relocation is relative to the location in the data section of the addend. The relocation function will subtract from the relocation value the address of the location being relocated. */ bfd_boolean pc_relative; /* The bit position of the reloc value in the destination. The relocated value is left shifted by this amount. */ unsigned int bitpos; /* What type of overflow error should be checked for when relocating. */ enum complain_overflow complain_on_overflow; /* If this field is non null, then the supplied function is called rather than the normal function. This allows really strange relocation methods to be accommodated (e.g., i960 callj instructions). */ bfd_reloc_status_type (*special_function) (bfd *, arelent *, struct bfd_symbol *, void *, asection *, bfd *, char **); /* The textual name of the relocation type. */ char *name; /* Some formats record a relocation addend in the section contents rather than with the relocation. For ELF formats this is the distinction between USE_REL and USE_RELA (though the code checks for USE_REL == 1/0). The value of this field is TRUE if the addend is recorded with the section contents; when performing a partial link (ld -r) the section contents (the data) will be modified. The value of this field is FALSE if addends are recorded with the relocation (in arelent.addend); when performing a partial link the relocation will be modified. All relocations for all ELF USE_RELA targets should set this field to FALSE (values of TRUE should be looked on with suspicion). However, the converse is not true: not all relocations of all ELF USE_REL targets set this field to TRUE. Why this is so is peculiar to each particular target. For relocs that aren't used in partial links (e.g. GOT stuff) it doesn't matter what this is set to. */ bfd_boolean partial_inplace; /* src_mask selects the part of the instruction (or data) to be used in the relocation sum. If the target relocations don't have an addend in the reloc, eg. ELF USE_REL, src_mask will normally equal dst_mask to extract the addend from the section contents. If relocations do have an addend in the reloc, eg. ELF USE_RELA, this field should be zero. Non-zero values for ELF USE_RELA targets are bogus as in those cases the value in the dst_mask part of the section contents should be treated as garbage. */ bfd_vma src_mask; /* dst_mask selects which parts of the instruction (or data) are replaced with a relocated value. */ bfd_vma dst_mask; /* When some formats create PC relative instructions, they leave the value of the pc of the place being relocated in the offset slot of the instruction, so that a PC relative relocation can be made just by adding in an ordinary offset (e.g., sun3 a.out). Some formats leave the displacement part of an instruction empty (e.g., m88k bcs); this flag signals the fact. */ bfd_boolean pcrel_offset; }; #define HOWTO(C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC) \ { (unsigned) C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC } #define NEWHOWTO(FUNCTION, NAME, SIZE, REL, IN) \ HOWTO (0, 0, SIZE, 0, REL, 0, complain_overflow_dont, FUNCTION, \ NAME, FALSE, 0, 0, IN) #define EMPTY_HOWTO(C) \ HOWTO ((C), 0, 0, 0, FALSE, 0, complain_overflow_dont, NULL, \ NULL, FALSE, 0, 0, FALSE) #define HOWTO_PREPARE(relocation, symbol) \ { \ if (symbol != NULL) \ { \ if (bfd_is_com_section (symbol->section)) \ { \ relocation = 0; \ } \ else \ { \ relocation = symbol->value; \ } \ } \ } unsigned int bfd_get_reloc_size (reloc_howto_type *); typedef struct relent_chain { arelent relent; struct relent_chain *next; } arelent_chain; bfd_reloc_status_type bfd_check_overflow (enum complain_overflow how, unsigned int bitsize, unsigned int rightshift, unsigned int addrsize, bfd_vma relocation); bfd_reloc_status_type bfd_perform_relocation (bfd *abfd, arelent *reloc_entry, void *data, asection *input_section, bfd *output_bfd, char **error_message); bfd_reloc_status_type bfd_install_relocation (bfd *abfd, arelent *reloc_entry, void *data, bfd_vma data_start, asection *input_section, char **error_message); enum bfd_reloc_code_real { _dummy_first_bfd_reloc_code_real, /* Basic absolute relocations of N bits. */ BFD_RELOC_64, BFD_RELOC_32, BFD_RELOC_26, BFD_RELOC_24, BFD_RELOC_16, BFD_RELOC_14, BFD_RELOC_8, /* PC-relative relocations. Sometimes these are relative to the address of the relocation itself; sometimes they are relative to the start of the section containing the relocation. It depends on the specific target. The 24-bit relocation is used in some Intel 960 configurations. */ BFD_RELOC_64_PCREL, BFD_RELOC_32_PCREL, BFD_RELOC_24_PCREL, BFD_RELOC_16_PCREL, BFD_RELOC_12_PCREL, BFD_RELOC_8_PCREL, /* Section relative relocations. Some targets need this for DWARF2. */ BFD_RELOC_32_SECREL, /* For ELF. */ BFD_RELOC_32_GOT_PCREL, BFD_RELOC_16_GOT_PCREL, BFD_RELOC_8_GOT_PCREL, BFD_RELOC_32_GOTOFF, BFD_RELOC_16_GOTOFF, BFD_RELOC_LO16_GOTOFF, BFD_RELOC_HI16_GOTOFF, BFD_RELOC_HI16_S_GOTOFF, BFD_RELOC_8_GOTOFF, BFD_RELOC_64_PLT_PCREL, BFD_RELOC_32_PLT_PCREL, BFD_RELOC_24_PLT_PCREL, BFD_RELOC_16_PLT_PCREL, BFD_RELOC_8_PLT_PCREL, BFD_RELOC_64_PLTOFF, BFD_RELOC_32_PLTOFF, BFD_RELOC_16_PLTOFF, BFD_RELOC_LO16_PLTOFF, BFD_RELOC_HI16_PLTOFF, BFD_RELOC_HI16_S_PLTOFF, BFD_RELOC_8_PLTOFF, /* Relocations used by 68K ELF. */ BFD_RELOC_68K_GLOB_DAT, BFD_RELOC_68K_JMP_SLOT, BFD_RELOC_68K_RELATIVE, /* Linkage-table relative. */ BFD_RELOC_32_BASEREL, BFD_RELOC_16_BASEREL, BFD_RELOC_LO16_BASEREL, BFD_RELOC_HI16_BASEREL, BFD_RELOC_HI16_S_BASEREL, BFD_RELOC_8_BASEREL, BFD_RELOC_RVA, /* Absolute 8-bit relocation, but used to form an address like 0xFFnn. */ BFD_RELOC_8_FFnn, /* These PC-relative relocations are stored as word displacements -- i.e., byte displacements shifted right two bits. The 30-bit word displacement (<<32_PCREL_S2>> -- 32 bits, shifted 2) is used on the SPARC. (SPARC tools generally refer to this as <>.) The signed 16-bit displacement is used on the MIPS, and the 23-bit displacement is used on the Alpha. */ BFD_RELOC_32_PCREL_S2, BFD_RELOC_16_PCREL_S2, BFD_RELOC_23_PCREL_S2, /* High 22 bits and low 10 bits of 32-bit value, placed into lower bits of the target word. These are used on the SPARC. */ BFD_RELOC_HI22, BFD_RELOC_LO10, /* For systems that allocate a Global Pointer register, these are displacements off that register. These relocation types are handled specially, because the value the register will have is decided relatively late. */ BFD_RELOC_GPREL16, BFD_RELOC_GPREL32, /* Reloc types used for i960/b.out. */ BFD_RELOC_I960_CALLJ, /* SPARC ELF relocations. There is probably some overlap with other relocation types already defined. */ BFD_RELOC_NONE, BFD_RELOC_SPARC_WDISP22, BFD_RELOC_SPARC22, BFD_RELOC_SPARC13, BFD_RELOC_SPARC_GOT10, BFD_RELOC_SPARC_GOT13, BFD_RELOC_SPARC_GOT22, BFD_RELOC_SPARC_PC10, BFD_RELOC_SPARC_PC22, BFD_RELOC_SPARC_WPLT30, BFD_RELOC_SPARC_COPY, BFD_RELOC_SPARC_GLOB_DAT, BFD_RELOC_SPARC_JMP_SLOT, BFD_RELOC_SPARC_RELATIVE, BFD_RELOC_SPARC_UA16, BFD_RELOC_SPARC_UA32, BFD_RELOC_SPARC_UA64, /* I think these are specific to SPARC a.out (e.g., Sun 4). */ BFD_RELOC_SPARC_BASE13, BFD_RELOC_SPARC_BASE22, /* SPARC64 relocations */ #define BFD_RELOC_SPARC_64 BFD_RELOC_64 BFD_RELOC_SPARC_10, BFD_RELOC_SPARC_11, BFD_RELOC_SPARC_OLO10, BFD_RELOC_SPARC_HH22, BFD_RELOC_SPARC_HM10, BFD_RELOC_SPARC_LM22, BFD_RELOC_SPARC_PC_HH22, BFD_RELOC_SPARC_PC_HM10, BFD_RELOC_SPARC_PC_LM22, BFD_RELOC_SPARC_WDISP16, BFD_RELOC_SPARC_WDISP19, BFD_RELOC_SPARC_7, BFD_RELOC_SPARC_6, BFD_RELOC_SPARC_5, #define BFD_RELOC_SPARC_DISP64 BFD_RELOC_64_PCREL BFD_RELOC_SPARC_PLT32, BFD_RELOC_SPARC_PLT64, BFD_RELOC_SPARC_HIX22, BFD_RELOC_SPARC_LOX10, BFD_RELOC_SPARC_H44, BFD_RELOC_SPARC_M44, BFD_RELOC_SPARC_L44, BFD_RELOC_SPARC_REGISTER, /* SPARC little endian relocation */ BFD_RELOC_SPARC_REV32, /* SPARC TLS relocations */ BFD_RELOC_SPARC_TLS_GD_HI22, BFD_RELOC_SPARC_TLS_GD_LO10, BFD_RELOC_SPARC_TLS_GD_ADD, BFD_RELOC_SPARC_TLS_GD_CALL, BFD_RELOC_SPARC_TLS_LDM_HI22, BFD_RELOC_SPARC_TLS_LDM_LO10, BFD_RELOC_SPARC_TLS_LDM_ADD, BFD_RELOC_SPARC_TLS_LDM_CALL, BFD_RELOC_SPARC_TLS_LDO_HIX22, BFD_RELOC_SPARC_TLS_LDO_LOX10, BFD_RELOC_SPARC_TLS_LDO_ADD, BFD_RELOC_SPARC_TLS_IE_HI22, BFD_RELOC_SPARC_TLS_IE_LO10, BFD_RELOC_SPARC_TLS_IE_LD, BFD_RELOC_SPARC_TLS_IE_LDX, BFD_RELOC_SPARC_TLS_IE_ADD, BFD_RELOC_SPARC_TLS_LE_HIX22, BFD_RELOC_SPARC_TLS_LE_LOX10, BFD_RELOC_SPARC_TLS_DTPMOD32, BFD_RELOC_SPARC_TLS_DTPMOD64, BFD_RELOC_SPARC_TLS_DTPOFF32, BFD_RELOC_SPARC_TLS_DTPOFF64, BFD_RELOC_SPARC_TLS_TPOFF32, BFD_RELOC_SPARC_TLS_TPOFF64, /* Alpha ECOFF and ELF relocations. Some of these treat the symbol or "addend" in some special way. For GPDISP_HI16 ("gpdisp") relocations, the symbol is ignored when writing; when reading, it will be the absolute section symbol. The addend is the displacement in bytes of the "lda" instruction from the "ldah" instruction (which is at the address of this reloc). */ BFD_RELOC_ALPHA_GPDISP_HI16, /* For GPDISP_LO16 ("ignore") relocations, the symbol is handled as with GPDISP_HI16 relocs. The addend is ignored when writing the relocations out, and is filled in with the file's GP value on reading, for convenience. */ BFD_RELOC_ALPHA_GPDISP_LO16, /* The ELF GPDISP relocation is exactly the same as the GPDISP_HI16 relocation except that there is no accompanying GPDISP_LO16 relocation. */ BFD_RELOC_ALPHA_GPDISP, /* The Alpha LITERAL/LITUSE relocs are produced by a symbol reference; the assembler turns it into a LDQ instruction to load the address of the symbol, and then fills in a register in the real instruction. The LITERAL reloc, at the LDQ instruction, refers to the .lita section symbol. The addend is ignored when writing, but is filled in with the file's GP value on reading, for convenience, as with the GPDISP_LO16 reloc. The ELF_LITERAL reloc is somewhere between 16_GOTOFF and GPDISP_LO16. It should refer to the symbol to be referenced, as with 16_GOTOFF, but it generates output not based on the position within the .got section, but relative to the GP value chosen for the file during the final link stage. The LITUSE reloc, on the instruction using the loaded address, gives information to the linker that it might be able to use to optimize away some literal section references. The symbol is ignored (read as the absolute section symbol), and the "addend" indicates the type of instruction using the register: 1 - "memory" fmt insn 2 - byte-manipulation (byte offset reg) 3 - jsr (target of branch) */ BFD_RELOC_ALPHA_LITERAL, BFD_RELOC_ALPHA_ELF_LITERAL, BFD_RELOC_ALPHA_LITUSE, /* The HINT relocation indicates a value that should be filled into the "hint" field of a jmp/jsr/ret instruction, for possible branch- prediction logic which may be provided on some processors. */ BFD_RELOC_ALPHA_HINT, /* The LINKAGE relocation outputs a linkage pair in the object file, which is filled by the linker. */ BFD_RELOC_ALPHA_LINKAGE, /* The CODEADDR relocation outputs a STO_CA in the object file, which is filled by the linker. */ BFD_RELOC_ALPHA_CODEADDR, /* The GPREL_HI/LO relocations together form a 32-bit offset from the GP register. */ BFD_RELOC_ALPHA_GPREL_HI16, BFD_RELOC_ALPHA_GPREL_LO16, /* Like BFD_RELOC_23_PCREL_S2, except that the source and target must share a common GP, and the target address is adjusted for STO_ALPHA_STD_GPLOAD. */ BFD_RELOC_ALPHA_BRSGP, /* Alpha thread-local storage relocations. */ BFD_RELOC_ALPHA_TLSGD, BFD_RELOC_ALPHA_TLSLDM, BFD_RELOC_ALPHA_DTPMOD64, BFD_RELOC_ALPHA_GOTDTPREL16, BFD_RELOC_ALPHA_DTPREL64, BFD_RELOC_ALPHA_DTPREL_HI16, BFD_RELOC_ALPHA_DTPREL_LO16, BFD_RELOC_ALPHA_DTPREL16, BFD_RELOC_ALPHA_GOTTPREL16, BFD_RELOC_ALPHA_TPREL64, BFD_RELOC_ALPHA_TPREL_HI16, BFD_RELOC_ALPHA_TPREL_LO16, BFD_RELOC_ALPHA_TPREL16, /* Bits 27..2 of the relocation address shifted right 2 bits; simple reloc otherwise. */ BFD_RELOC_MIPS_JMP, /* The MIPS16 jump instruction. */ BFD_RELOC_MIPS16_JMP, /* MIPS16 GP relative reloc. */ BFD_RELOC_MIPS16_GPREL, /* High 16 bits of 32-bit value; simple reloc. */ BFD_RELOC_HI16, /* High 16 bits of 32-bit value but the low 16 bits will be sign extended and added to form the final result. If the low 16 bits form a negative number, we need to add one to the high value to compensate for the borrow when the low bits are added. */ BFD_RELOC_HI16_S, /* Low 16 bits. */ BFD_RELOC_LO16, /* High 16 bits of 32-bit pc-relative value */ BFD_RELOC_HI16_PCREL, /* High 16 bits of 32-bit pc-relative value, adjusted */ BFD_RELOC_HI16_S_PCREL, /* Low 16 bits of pc-relative value */ BFD_RELOC_LO16_PCREL, /* MIPS16 high 16 bits of 32-bit value. */ BFD_RELOC_MIPS16_HI16, /* MIPS16 high 16 bits of 32-bit value but the low 16 bits will be sign extended and added to form the final result. If the low 16 bits form a negative number, we need to add one to the high value to compensate for the borrow when the low bits are added. */ BFD_RELOC_MIPS16_HI16_S, /* MIPS16 low 16 bits. */ BFD_RELOC_MIPS16_LO16, /* Relocation against a MIPS literal section. */ BFD_RELOC_MIPS_LITERAL, /* MIPS ELF relocations. */ BFD_RELOC_MIPS_GOT16, BFD_RELOC_MIPS_CALL16, BFD_RELOC_MIPS_GOT_HI16, BFD_RELOC_MIPS_GOT_LO16, BFD_RELOC_MIPS_CALL_HI16, BFD_RELOC_MIPS_CALL_LO16, BFD_RELOC_MIPS_SUB, BFD_RELOC_MIPS_GOT_PAGE, BFD_RELOC_MIPS_GOT_OFST, BFD_RELOC_MIPS_GOT_DISP, BFD_RELOC_MIPS_SHIFT5, BFD_RELOC_MIPS_SHIFT6, BFD_RELOC_MIPS_INSERT_A, BFD_RELOC_MIPS_INSERT_B, BFD_RELOC_MIPS_DELETE, BFD_RELOC_MIPS_HIGHEST, BFD_RELOC_MIPS_HIGHER, BFD_RELOC_MIPS_SCN_DISP, BFD_RELOC_MIPS_REL16, BFD_RELOC_MIPS_RELGOT, BFD_RELOC_MIPS_JALR, BFD_RELOC_MIPS_TLS_DTPMOD32, BFD_RELOC_MIPS_TLS_DTPREL32, BFD_RELOC_MIPS_TLS_DTPMOD64, BFD_RELOC_MIPS_TLS_DTPREL64, BFD_RELOC_MIPS_TLS_GD, BFD_RELOC_MIPS_TLS_LDM, BFD_RELOC_MIPS_TLS_DTPREL_HI16, BFD_RELOC_MIPS_TLS_DTPREL_LO16, BFD_RELOC_MIPS_TLS_GOTTPREL, BFD_RELOC_MIPS_TLS_TPREL32, BFD_RELOC_MIPS_TLS_TPREL64, BFD_RELOC_MIPS_TLS_TPREL_HI16, BFD_RELOC_MIPS_TLS_TPREL_LO16, /* Fujitsu Frv Relocations. */ BFD_RELOC_FRV_LABEL16, BFD_RELOC_FRV_LABEL24, BFD_RELOC_FRV_LO16, BFD_RELOC_FRV_HI16, BFD_RELOC_FRV_GPREL12, BFD_RELOC_FRV_GPRELU12, BFD_RELOC_FRV_GPREL32, BFD_RELOC_FRV_GPRELHI, BFD_RELOC_FRV_GPRELLO, BFD_RELOC_FRV_GOT12, BFD_RELOC_FRV_GOTHI, BFD_RELOC_FRV_GOTLO, BFD_RELOC_FRV_FUNCDESC, BFD_RELOC_FRV_FUNCDESC_GOT12, BFD_RELOC_FRV_FUNCDESC_GOTHI, BFD_RELOC_FRV_FUNCDESC_GOTLO, BFD_RELOC_FRV_FUNCDESC_VALUE, BFD_RELOC_FRV_FUNCDESC_GOTOFF12, BFD_RELOC_FRV_FUNCDESC_GOTOFFHI, BFD_RELOC_FRV_FUNCDESC_GOTOFFLO, BFD_RELOC_FRV_GOTOFF12, BFD_RELOC_FRV_GOTOFFHI, BFD_RELOC_FRV_GOTOFFLO, BFD_RELOC_FRV_GETTLSOFF, BFD_RELOC_FRV_TLSDESC_VALUE, BFD_RELOC_FRV_GOTTLSDESC12, BFD_RELOC_FRV_GOTTLSDESCHI, BFD_RELOC_FRV_GOTTLSDESCLO, BFD_RELOC_FRV_TLSMOFF12, BFD_RELOC_FRV_TLSMOFFHI, BFD_RELOC_FRV_TLSMOFFLO, BFD_RELOC_FRV_GOTTLSOFF12, BFD_RELOC_FRV_GOTTLSOFFHI, BFD_RELOC_FRV_GOTTLSOFFLO, BFD_RELOC_FRV_TLSOFF, BFD_RELOC_FRV_TLSDESC_RELAX, BFD_RELOC_FRV_GETTLSOFF_RELAX, BFD_RELOC_FRV_TLSOFF_RELAX, BFD_RELOC_FRV_TLSMOFF, /* This is a 24bit GOT-relative reloc for the mn10300. */ BFD_RELOC_MN10300_GOTOFF24, /* This is a 32bit GOT-relative reloc for the mn10300, offset by two bytes in the instruction. */ BFD_RELOC_MN10300_GOT32, /* This is a 24bit GOT-relative reloc for the mn10300, offset by two bytes in the instruction. */ BFD_RELOC_MN10300_GOT24, /* This is a 16bit GOT-relative reloc for the mn10300, offset by two bytes in the instruction. */ BFD_RELOC_MN10300_GOT16, /* Copy symbol at runtime. */ BFD_RELOC_MN10300_COPY, /* Create GOT entry. */ BFD_RELOC_MN10300_GLOB_DAT, /* Create PLT entry. */ BFD_RELOC_MN10300_JMP_SLOT, /* Adjust by program base. */ BFD_RELOC_MN10300_RELATIVE, /* i386/elf relocations */ BFD_RELOC_386_GOT32, BFD_RELOC_386_PLT32, BFD_RELOC_386_COPY, BFD_RELOC_386_GLOB_DAT, BFD_RELOC_386_JUMP_SLOT, BFD_RELOC_386_RELATIVE, BFD_RELOC_386_GOTOFF, BFD_RELOC_386_GOTPC, BFD_RELOC_386_TLS_TPOFF, BFD_RELOC_386_TLS_IE, BFD_RELOC_386_TLS_GOTIE, BFD_RELOC_386_TLS_LE, BFD_RELOC_386_TLS_GD, BFD_RELOC_386_TLS_LDM, BFD_RELOC_386_TLS_LDO_32, BFD_RELOC_386_TLS_IE_32, BFD_RELOC_386_TLS_LE_32, BFD_RELOC_386_TLS_DTPMOD32, BFD_RELOC_386_TLS_DTPOFF32, BFD_RELOC_386_TLS_TPOFF32, /* x86-64/elf relocations */ BFD_RELOC_X86_64_GOT32, BFD_RELOC_X86_64_PLT32, BFD_RELOC_X86_64_COPY, BFD_RELOC_X86_64_GLOB_DAT, BFD_RELOC_X86_64_JUMP_SLOT, BFD_RELOC_X86_64_RELATIVE, BFD_RELOC_X86_64_GOTPCREL, BFD_RELOC_X86_64_32S, BFD_RELOC_X86_64_DTPMOD64, BFD_RELOC_X86_64_DTPOFF64, BFD_RELOC_X86_64_TPOFF64, BFD_RELOC_X86_64_TLSGD, BFD_RELOC_X86_64_TLSLD, BFD_RELOC_X86_64_DTPOFF32, BFD_RELOC_X86_64_GOTTPOFF, BFD_RELOC_X86_64_TPOFF32, BFD_RELOC_X86_64_GOTOFF64, BFD_RELOC_X86_64_GOTPC32, /* ns32k relocations */ BFD_RELOC_NS32K_IMM_8, BFD_RELOC_NS32K_IMM_16, BFD_RELOC_NS32K_IMM_32, BFD_RELOC_NS32K_IMM_8_PCREL, BFD_RELOC_NS32K_IMM_16_PCREL, BFD_RELOC_NS32K_IMM_32_PCREL, BFD_RELOC_NS32K_DISP_8, BFD_RELOC_NS32K_DISP_16, BFD_RELOC_NS32K_DISP_32, BFD_RELOC_NS32K_DISP_8_PCREL, BFD_RELOC_NS32K_DISP_16_PCREL, BFD_RELOC_NS32K_DISP_32_PCREL, /* PDP11 relocations */ BFD_RELOC_PDP11_DISP_8_PCREL, BFD_RELOC_PDP11_DISP_6_PCREL, /* Picojava relocs. Not all of these appear in object files. */ BFD_RELOC_PJ_CODE_HI16, BFD_RELOC_PJ_CODE_LO16, BFD_RELOC_PJ_CODE_DIR16, BFD_RELOC_PJ_CODE_DIR32, BFD_RELOC_PJ_CODE_REL16, BFD_RELOC_PJ_CODE_REL32, /* Power(rs6000) and PowerPC relocations. */ BFD_RELOC_PPC_B26, BFD_RELOC_PPC_BA26, BFD_RELOC_PPC_TOC16, BFD_RELOC_PPC_B16, BFD_RELOC_PPC_B16_BRTAKEN, BFD_RELOC_PPC_B16_BRNTAKEN, BFD_RELOC_PPC_BA16, BFD_RELOC_PPC_BA16_BRTAKEN, BFD_RELOC_PPC_BA16_BRNTAKEN, BFD_RELOC_PPC_COPY, BFD_RELOC_PPC_GLOB_DAT, BFD_RELOC_PPC_JMP_SLOT, BFD_RELOC_PPC_RELATIVE, BFD_RELOC_PPC_LOCAL24PC, BFD_RELOC_PPC_EMB_NADDR32, BFD_RELOC_PPC_EMB_NADDR16, BFD_RELOC_PPC_EMB_NADDR16_LO, BFD_RELOC_PPC_EMB_NADDR16_HI, BFD_RELOC_PPC_EMB_NADDR16_HA, BFD_RELOC_PPC_EMB_SDAI16, BFD_RELOC_PPC_EMB_SDA2I16, BFD_RELOC_PPC_EMB_SDA2REL, BFD_RELOC_PPC_EMB_SDA21, BFD_RELOC_PPC_EMB_MRKREF, BFD_RELOC_PPC_EMB_RELSEC16, BFD_RELOC_PPC_EMB_RELST_LO, BFD_RELOC_PPC_EMB_RELST_HI, BFD_RELOC_PPC_EMB_RELST_HA, BFD_RELOC_PPC_EMB_BIT_FLD, BFD_RELOC_PPC_EMB_RELSDA, BFD_RELOC_PPC64_HIGHER, BFD_RELOC_PPC64_HIGHER_S, BFD_RELOC_PPC64_HIGHEST, BFD_RELOC_PPC64_HIGHEST_S, BFD_RELOC_PPC64_TOC16_LO, BFD_RELOC_PPC64_TOC16_HI, BFD_RELOC_PPC64_TOC16_HA, BFD_RELOC_PPC64_TOC, BFD_RELOC_PPC64_PLTGOT16, BFD_RELOC_PPC64_PLTGOT16_LO, BFD_RELOC_PPC64_PLTGOT16_HI, BFD_RELOC_PPC64_PLTGOT16_HA, BFD_RELOC_PPC64_ADDR16_DS, BFD_RELOC_PPC64_ADDR16_LO_DS, BFD_RELOC_PPC64_GOT16_DS, BFD_RELOC_PPC64_GOT16_LO_DS, BFD_RELOC_PPC64_PLT16_LO_DS, BFD_RELOC_PPC64_SECTOFF_DS, BFD_RELOC_PPC64_SECTOFF_LO_DS, BFD_RELOC_PPC64_TOC16_DS, BFD_RELOC_PPC64_TOC16_LO_DS, BFD_RELOC_PPC64_PLTGOT16_DS, BFD_RELOC_PPC64_PLTGOT16_LO_DS, /* PowerPC and PowerPC64 thread-local storage relocations. */ BFD_RELOC_PPC_TLS, BFD_RELOC_PPC_DTPMOD, BFD_RELOC_PPC_TPREL16, BFD_RELOC_PPC_TPREL16_LO, BFD_RELOC_PPC_TPREL16_HI, BFD_RELOC_PPC_TPREL16_HA, BFD_RELOC_PPC_TPREL, BFD_RELOC_PPC_DTPREL16, BFD_RELOC_PPC_DTPREL16_LO, BFD_RELOC_PPC_DTPREL16_HI, BFD_RELOC_PPC_DTPREL16_HA, BFD_RELOC_PPC_DTPREL, BFD_RELOC_PPC_GOT_TLSGD16, BFD_RELOC_PPC_GOT_TLSGD16_LO, BFD_RELOC_PPC_GOT_TLSGD16_HI, BFD_RELOC_PPC_GOT_TLSGD16_HA, BFD_RELOC_PPC_GOT_TLSLD16, BFD_RELOC_PPC_GOT_TLSLD16_LO, BFD_RELOC_PPC_GOT_TLSLD16_HI, BFD_RELOC_PPC_GOT_TLSLD16_HA, BFD_RELOC_PPC_GOT_TPREL16, BFD_RELOC_PPC_GOT_TPREL16_LO, BFD_RELOC_PPC_GOT_TPREL16_HI, BFD_RELOC_PPC_GOT_TPREL16_HA, BFD_RELOC_PPC_GOT_DTPREL16, BFD_RELOC_PPC_GOT_DTPREL16_LO, BFD_RELOC_PPC_GOT_DTPREL16_HI, BFD_RELOC_PPC_GOT_DTPREL16_HA, BFD_RELOC_PPC64_TPREL16_DS, BFD_RELOC_PPC64_TPREL16_LO_DS, BFD_RELOC_PPC64_TPREL16_HIGHER, BFD_RELOC_PPC64_TPREL16_HIGHERA, BFD_RELOC_PPC64_TPREL16_HIGHEST, BFD_RELOC_PPC64_TPREL16_HIGHESTA, BFD_RELOC_PPC64_DTPREL16_DS, BFD_RELOC_PPC64_DTPREL16_LO_DS, BFD_RELOC_PPC64_DTPREL16_HIGHER, BFD_RELOC_PPC64_DTPREL16_HIGHERA, BFD_RELOC_PPC64_DTPREL16_HIGHEST, BFD_RELOC_PPC64_DTPREL16_HIGHESTA, /* IBM 370/390 relocations */ BFD_RELOC_I370_D12, /* The type of reloc used to build a constructor table - at the moment probably a 32 bit wide absolute relocation, but the target can choose. It generally does map to one of the other relocation types. */ BFD_RELOC_CTOR, /* ARM 26 bit pc-relative branch. The lowest two bits must be zero and are not stored in the instruction. */ BFD_RELOC_ARM_PCREL_BRANCH, /* ARM 26 bit pc-relative branch. The lowest bit must be zero and is not stored in the instruction. The 2nd lowest bit comes from a 1 bit field in the instruction. */ BFD_RELOC_ARM_PCREL_BLX, /* Thumb 22 bit pc-relative branch. The lowest bit must be zero and is not stored in the instruction. The 2nd lowest bit comes from a 1 bit field in the instruction. */ BFD_RELOC_THUMB_PCREL_BLX, /* Thumb 7-, 9-, 12-, 20-, 23-, and 25-bit pc-relative branches. The lowest bit must be zero and is not stored in the instruction. Note that the corresponding ELF R_ARM_THM_JUMPnn constant has an "nn" one smaller in all cases. Note further that BRANCH23 corresponds to R_ARM_THM_CALL. */ BFD_RELOC_THUMB_PCREL_BRANCH7, BFD_RELOC_THUMB_PCREL_BRANCH9, BFD_RELOC_THUMB_PCREL_BRANCH12, BFD_RELOC_THUMB_PCREL_BRANCH20, BFD_RELOC_THUMB_PCREL_BRANCH23, BFD_RELOC_THUMB_PCREL_BRANCH25, /* 12-bit immediate offset, used in ARM-format ldr and str instructions. */ BFD_RELOC_ARM_OFFSET_IMM, /* 5-bit immediate offset, used in Thumb-format ldr and str instructions. */ BFD_RELOC_ARM_THUMB_OFFSET, /* Pc-relative or absolute relocation depending on target. Used for entries in .init_array sections. */ BFD_RELOC_ARM_TARGET1, /* Read-only segment base relative address. */ BFD_RELOC_ARM_ROSEGREL32, /* Data segment base relative address. */ BFD_RELOC_ARM_SBREL32, /* This reloc is used for references to RTTI data from exception handling tables. The actual definition depends on the target. It may be a pc-relative or some form of GOT-indirect relocation. */ BFD_RELOC_ARM_TARGET2, /* 31-bit PC relative address. */ BFD_RELOC_ARM_PREL31, /* Relocations for setting up GOTs and PLTs for shared libraries. */ BFD_RELOC_ARM_JUMP_SLOT, BFD_RELOC_ARM_GLOB_DAT, BFD_RELOC_ARM_GOT32, BFD_RELOC_ARM_PLT32, BFD_RELOC_ARM_RELATIVE, BFD_RELOC_ARM_GOTOFF, BFD_RELOC_ARM_GOTPC, /* ARM thread-local storage relocations. */ BFD_RELOC_ARM_TLS_GD32, BFD_RELOC_ARM_TLS_LDO32, BFD_RELOC_ARM_TLS_LDM32, BFD_RELOC_ARM_TLS_DTPOFF32, BFD_RELOC_ARM_TLS_DTPMOD32, BFD_RELOC_ARM_TLS_TPOFF32, BFD_RELOC_ARM_TLS_IE32, BFD_RELOC_ARM_TLS_LE32, /* These relocs are only used within the ARM assembler. They are not (at present) written to any object files. */ BFD_RELOC_ARM_IMMEDIATE, BFD_RELOC_ARM_ADRL_IMMEDIATE, BFD_RELOC_ARM_T32_IMMEDIATE, BFD_RELOC_ARM_SHIFT_IMM, BFD_RELOC_ARM_SMI, BFD_RELOC_ARM_SWI, BFD_RELOC_ARM_MULTI, BFD_RELOC_ARM_CP_OFF_IMM, BFD_RELOC_ARM_CP_OFF_IMM_S2, BFD_RELOC_ARM_ADR_IMM, BFD_RELOC_ARM_LDR_IMM, BFD_RELOC_ARM_LITERAL, BFD_RELOC_ARM_IN_POOL, BFD_RELOC_ARM_OFFSET_IMM8, BFD_RELOC_ARM_T32_OFFSET_U8, BFD_RELOC_ARM_T32_OFFSET_IMM, BFD_RELOC_ARM_HWLITERAL, BFD_RELOC_ARM_THUMB_ADD, BFD_RELOC_ARM_THUMB_IMM, BFD_RELOC_ARM_THUMB_SHIFT, /* Renesas / SuperH SH relocs. Not all of these appear in object files. */ BFD_RELOC_SH_PCDISP8BY2, BFD_RELOC_SH_PCDISP12BY2, BFD_RELOC_SH_IMM3, BFD_RELOC_SH_IMM3U, BFD_RELOC_SH_DISP12, BFD_RELOC_SH_DISP12BY2, BFD_RELOC_SH_DISP12BY4, BFD_RELOC_SH_DISP12BY8, BFD_RELOC_SH_DISP20, BFD_RELOC_SH_DISP20BY8, BFD_RELOC_SH_IMM4, BFD_RELOC_SH_IMM4BY2, BFD_RELOC_SH_IMM4BY4, BFD_RELOC_SH_IMM8, BFD_RELOC_SH_IMM8BY2, BFD_RELOC_SH_IMM8BY4, BFD_RELOC_SH_PCRELIMM8BY2, BFD_RELOC_SH_PCRELIMM8BY4, BFD_RELOC_SH_SWITCH16, BFD_RELOC_SH_SWITCH32, BFD_RELOC_SH_USES, BFD_RELOC_SH_COUNT, BFD_RELOC_SH_ALIGN, BFD_RELOC_SH_CODE, BFD_RELOC_SH_DATA, BFD_RELOC_SH_LABEL, BFD_RELOC_SH_LOOP_START, BFD_RELOC_SH_LOOP_END, BFD_RELOC_SH_COPY, BFD_RELOC_SH_GLOB_DAT, BFD_RELOC_SH_JMP_SLOT, BFD_RELOC_SH_RELATIVE, BFD_RELOC_SH_GOTPC, BFD_RELOC_SH_GOT_LOW16, BFD_RELOC_SH_GOT_MEDLOW16, BFD_RELOC_SH_GOT_MEDHI16, BFD_RELOC_SH_GOT_HI16, BFD_RELOC_SH_GOTPLT_LOW16, BFD_RELOC_SH_GOTPLT_MEDLOW16, BFD_RELOC_SH_GOTPLT_MEDHI16, BFD_RELOC_SH_GOTPLT_HI16, BFD_RELOC_SH_PLT_LOW16, BFD_RELOC_SH_PLT_MEDLOW16, BFD_RELOC_SH_PLT_MEDHI16, BFD_RELOC_SH_PLT_HI16, BFD_RELOC_SH_GOTOFF_LOW16, BFD_RELOC_SH_GOTOFF_MEDLOW16, BFD_RELOC_SH_GOTOFF_MEDHI16, BFD_RELOC_SH_GOTOFF_HI16, BFD_RELOC_SH_GOTPC_LOW16, BFD_RELOC_SH_GOTPC_MEDLOW16, BFD_RELOC_SH_GOTPC_MEDHI16, BFD_RELOC_SH_GOTPC_HI16, BFD_RELOC_SH_COPY64, BFD_RELOC_SH_GLOB_DAT64, BFD_RELOC_SH_JMP_SLOT64, BFD_RELOC_SH_RELATIVE64, BFD_RELOC_SH_GOT10BY4, BFD_RELOC_SH_GOT10BY8, BFD_RELOC_SH_GOTPLT10BY4, BFD_RELOC_SH_GOTPLT10BY8, BFD_RELOC_SH_GOTPLT32, BFD_RELOC_SH_SHMEDIA_CODE, BFD_RELOC_SH_IMMU5, BFD_RELOC_SH_IMMS6, BFD_RELOC_SH_IMMS6BY32, BFD_RELOC_SH_IMMU6, BFD_RELOC_SH_IMMS10, BFD_RELOC_SH_IMMS10BY2, BFD_RELOC_SH_IMMS10BY4, BFD_RELOC_SH_IMMS10BY8, BFD_RELOC_SH_IMMS16, BFD_RELOC_SH_IMMU16, BFD_RELOC_SH_IMM_LOW16, BFD_RELOC_SH_IMM_LOW16_PCREL, BFD_RELOC_SH_IMM_MEDLOW16, BFD_RELOC_SH_IMM_MEDLOW16_PCREL, BFD_RELOC_SH_IMM_MEDHI16, BFD_RELOC_SH_IMM_MEDHI16_PCREL, BFD_RELOC_SH_IMM_HI16, BFD_RELOC_SH_IMM_HI16_PCREL, BFD_RELOC_SH_PT_16, BFD_RELOC_SH_TLS_GD_32, BFD_RELOC_SH_TLS_LD_32, BFD_RELOC_SH_TLS_LDO_32, BFD_RELOC_SH_TLS_IE_32, BFD_RELOC_SH_TLS_LE_32, BFD_RELOC_SH_TLS_DTPMOD32, BFD_RELOC_SH_TLS_DTPOFF32, BFD_RELOC_SH_TLS_TPOFF32, /* ARC Cores relocs. ARC 22 bit pc-relative branch. The lowest two bits must be zero and are not stored in the instruction. The high 20 bits are installed in bits 26 through 7 of the instruction. */ BFD_RELOC_ARC_B22_PCREL, /* ARC 26 bit absolute branch. The lowest two bits must be zero and are not stored in the instruction. The high 24 bits are installed in bits 23 through 0. */ BFD_RELOC_ARC_B26, /* Mitsubishi D10V relocs. This is a 10-bit reloc with the right 2 bits assumed to be 0. */ BFD_RELOC_D10V_10_PCREL_R, /* Mitsubishi D10V relocs. This is a 10-bit reloc with the right 2 bits assumed to be 0. This is the same as the previous reloc except it is in the left container, i.e., shifted left 15 bits. */ BFD_RELOC_D10V_10_PCREL_L, /* This is an 18-bit reloc with the right 2 bits assumed to be 0. */ BFD_RELOC_D10V_18, /* This is an 18-bit reloc with the right 2 bits assumed to be 0. */ BFD_RELOC_D10V_18_PCREL, /* Mitsubishi D30V relocs. This is a 6-bit absolute reloc. */ BFD_RELOC_D30V_6, /* This is a 6-bit pc-relative reloc with the right 3 bits assumed to be 0. */ BFD_RELOC_D30V_9_PCREL, /* This is a 6-bit pc-relative reloc with the right 3 bits assumed to be 0. Same as the previous reloc but on the right side of the container. */ BFD_RELOC_D30V_9_PCREL_R, /* This is a 12-bit absolute reloc with the right 3 bitsassumed to be 0. */ BFD_RELOC_D30V_15, /* This is a 12-bit pc-relative reloc with the right 3 bits assumed to be 0. */ BFD_RELOC_D30V_15_PCREL, /* This is a 12-bit pc-relative reloc with the right 3 bits assumed to be 0. Same as the previous reloc but on the right side of the container. */ BFD_RELOC_D30V_15_PCREL_R, /* This is an 18-bit absolute reloc with the right 3 bits assumed to be 0. */ BFD_RELOC_D30V_21, /* This is an 18-bit pc-relative reloc with the right 3 bits assumed to be 0. */ BFD_RELOC_D30V_21_PCREL, /* This is an 18-bit pc-relative reloc with the right 3 bits assumed to be 0. Same as the previous reloc but on the right side of the container. */ BFD_RELOC_D30V_21_PCREL_R, /* This is a 32-bit absolute reloc. */ BFD_RELOC_D30V_32, /* This is a 32-bit pc-relative reloc. */ BFD_RELOC_D30V_32_PCREL, /* DLX relocs */ BFD_RELOC_DLX_HI16_S, /* DLX relocs */ BFD_RELOC_DLX_LO16, /* DLX relocs */ BFD_RELOC_DLX_JMP26, /* Renesas M16C/M32C Relocations. */ BFD_RELOC_M16C_8_PCREL8, BFD_RELOC_M16C_16_PCREL8, BFD_RELOC_M16C_8_PCREL16, BFD_RELOC_M16C_8_ELABEL24, BFD_RELOC_M16C_8_ABS16, BFD_RELOC_M16C_16_ABS16, BFD_RELOC_M16C_16_ABS24, BFD_RELOC_M16C_16_ABS32, BFD_RELOC_M16C_24_ABS16, BFD_RELOC_M16C_24_ABS24, BFD_RELOC_M16C_24_ABS32, BFD_RELOC_M16C_32_ABS16, BFD_RELOC_M16C_32_ABS24, BFD_RELOC_M16C_32_ABS32, BFD_RELOC_M16C_40_ABS16, BFD_RELOC_M16C_40_ABS24, BFD_RELOC_M16C_40_ABS32, /* Renesas M32R (formerly Mitsubishi M32R) relocs. This is a 24 bit absolute address. */ BFD_RELOC_M32R_24, /* This is a 10-bit pc-relative reloc with the right 2 bits assumed to be 0. */ BFD_RELOC_M32R_10_PCREL, /* This is an 18-bit reloc with the right 2 bits assumed to be 0. */ BFD_RELOC_M32R_18_PCREL, /* This is a 26-bit reloc with the right 2 bits assumed to be 0. */ BFD_RELOC_M32R_26_PCREL, /* This is a 16-bit reloc containing the high 16 bits of an address used when the lower 16 bits are treated as unsigned. */ BFD_RELOC_M32R_HI16_ULO, /* This is a 16-bit reloc containing the high 16 bits of an address used when the lower 16 bits are treated as signed. */ BFD_RELOC_M32R_HI16_SLO, /* This is a 16-bit reloc containing the lower 16 bits of an address. */ BFD_RELOC_M32R_LO16, /* This is a 16-bit reloc containing the small data area offset for use in add3, load, and store instructions. */ BFD_RELOC_M32R_SDA16, /* For PIC. */ BFD_RELOC_M32R_GOT24, BFD_RELOC_M32R_26_PLTREL, BFD_RELOC_M32R_COPY, BFD_RELOC_M32R_GLOB_DAT, BFD_RELOC_M32R_JMP_SLOT, BFD_RELOC_M32R_RELATIVE, BFD_RELOC_M32R_GOTOFF, BFD_RELOC_M32R_GOTOFF_HI_ULO, BFD_RELOC_M32R_GOTOFF_HI_SLO, BFD_RELOC_M32R_GOTOFF_LO, BFD_RELOC_M32R_GOTPC24, BFD_RELOC_M32R_GOT16_HI_ULO, BFD_RELOC_M32R_GOT16_HI_SLO, BFD_RELOC_M32R_GOT16_LO, BFD_RELOC_M32R_GOTPC_HI_ULO, BFD_RELOC_M32R_GOTPC_HI_SLO, BFD_RELOC_M32R_GOTPC_LO, /* This is a 9-bit reloc */ BFD_RELOC_V850_9_PCREL, /* This is a 22-bit reloc */ BFD_RELOC_V850_22_PCREL, /* This is a 16 bit offset from the short data area pointer. */ BFD_RELOC_V850_SDA_16_16_OFFSET, /* This is a 16 bit offset (of which only 15 bits are used) from the short data area pointer. */ BFD_RELOC_V850_SDA_15_16_OFFSET, /* This is a 16 bit offset from the zero data area pointer. */ BFD_RELOC_V850_ZDA_16_16_OFFSET, /* This is a 16 bit offset (of which only 15 bits are used) from the zero data area pointer. */ BFD_RELOC_V850_ZDA_15_16_OFFSET, /* This is an 8 bit offset (of which only 6 bits are used) from the tiny data area pointer. */ BFD_RELOC_V850_TDA_6_8_OFFSET, /* This is an 8bit offset (of which only 7 bits are used) from the tiny data area pointer. */ BFD_RELOC_V850_TDA_7_8_OFFSET, /* This is a 7 bit offset from the tiny data area pointer. */ BFD_RELOC_V850_TDA_7_7_OFFSET, /* This is a 16 bit offset from the tiny data area pointer. */ BFD_RELOC_V850_TDA_16_16_OFFSET, /* This is a 5 bit offset (of which only 4 bits are used) from the tiny data area pointer. */ BFD_RELOC_V850_TDA_4_5_OFFSET, /* This is a 4 bit offset from the tiny data area pointer. */ BFD_RELOC_V850_TDA_4_4_OFFSET, /* This is a 16 bit offset from the short data area pointer, with the bits placed non-contiguously in the instruction. */ BFD_RELOC_V850_SDA_16_16_SPLIT_OFFSET, /* This is a 16 bit offset from the zero data area pointer, with the bits placed non-contiguously in the instruction. */ BFD_RELOC_V850_ZDA_16_16_SPLIT_OFFSET, /* This is a 6 bit offset from the call table base pointer. */ BFD_RELOC_V850_CALLT_6_7_OFFSET, /* This is a 16 bit offset from the call table base pointer. */ BFD_RELOC_V850_CALLT_16_16_OFFSET, /* Used for relaxing indirect function calls. */ BFD_RELOC_V850_LONGCALL, /* Used for relaxing indirect jumps. */ BFD_RELOC_V850_LONGJUMP, /* Used to maintain alignment whilst relaxing. */ BFD_RELOC_V850_ALIGN, /* This is a variation of BFD_RELOC_LO16 that can be used in v850e ld.bu instructions. */ BFD_RELOC_V850_LO16_SPLIT_OFFSET, /* This is a 32bit pcrel reloc for the mn10300, offset by two bytes in the instruction. */ BFD_RELOC_MN10300_32_PCREL, /* This is a 16bit pcrel reloc for the mn10300, offset by two bytes in the instruction. */ BFD_RELOC_MN10300_16_PCREL, /* This is a 8bit DP reloc for the tms320c30, where the most significant 8 bits of a 24 bit word are placed into the least significant 8 bits of the opcode. */ BFD_RELOC_TIC30_LDP, /* This is a 7bit reloc for the tms320c54x, where the least significant 7 bits of a 16 bit word are placed into the least significant 7 bits of the opcode. */ BFD_RELOC_TIC54X_PARTLS7, /* This is a 9bit DP reloc for the tms320c54x, where the most significant 9 bits of a 16 bit word are placed into the least significant 9 bits of the opcode. */ BFD_RELOC_TIC54X_PARTMS9, /* This is an extended address 23-bit reloc for the tms320c54x. */ BFD_RELOC_TIC54X_23, /* This is a 16-bit reloc for the tms320c54x, where the least significant 16 bits of a 23-bit extended address are placed into the opcode. */ BFD_RELOC_TIC54X_16_OF_23, /* This is a reloc for the tms320c54x, where the most significant 7 bits of a 23-bit extended address are placed into the opcode. */ BFD_RELOC_TIC54X_MS7_OF_23, /* This is a 48 bit reloc for the FR30 that stores 32 bits. */ BFD_RELOC_FR30_48, /* This is a 32 bit reloc for the FR30 that stores 20 bits split up into two sections. */ BFD_RELOC_FR30_20, /* This is a 16 bit reloc for the FR30 that stores a 6 bit word offset in 4 bits. */ BFD_RELOC_FR30_6_IN_4, /* This is a 16 bit reloc for the FR30 that stores an 8 bit byte offset into 8 bits. */ BFD_RELOC_FR30_8_IN_8, /* This is a 16 bit reloc for the FR30 that stores a 9 bit short offset into 8 bits. */ BFD_RELOC_FR30_9_IN_8, /* This is a 16 bit reloc for the FR30 that stores a 10 bit word offset into 8 bits. */ BFD_RELOC_FR30_10_IN_8, /* This is a 16 bit reloc for the FR30 that stores a 9 bit pc relative short offset into 8 bits. */ BFD_RELOC_FR30_9_PCREL, /* This is a 16 bit reloc for the FR30 that stores a 12 bit pc relative short offset into 11 bits. */ BFD_RELOC_FR30_12_PCREL, /* Motorola Mcore relocations. */ BFD_RELOC_MCORE_PCREL_IMM8BY4, BFD_RELOC_MCORE_PCREL_IMM11BY2, BFD_RELOC_MCORE_PCREL_IMM4BY2, BFD_RELOC_MCORE_PCREL_32, BFD_RELOC_MCORE_PCREL_JSR_IMM11BY2, BFD_RELOC_MCORE_RVA, /* These are relocations for the GETA instruction. */ BFD_RELOC_MMIX_GETA, BFD_RELOC_MMIX_GETA_1, BFD_RELOC_MMIX_GETA_2, BFD_RELOC_MMIX_GETA_3, /* These are relocations for a conditional branch instruction. */ BFD_RELOC_MMIX_CBRANCH, BFD_RELOC_MMIX_CBRANCH_J, BFD_RELOC_MMIX_CBRANCH_1, BFD_RELOC_MMIX_CBRANCH_2, BFD_RELOC_MMIX_CBRANCH_3, /* These are relocations for the PUSHJ instruction. */ BFD_RELOC_MMIX_PUSHJ, BFD_RELOC_MMIX_PUSHJ_1, BFD_RELOC_MMIX_PUSHJ_2, BFD_RELOC_MMIX_PUSHJ_3, BFD_RELOC_MMIX_PUSHJ_STUBBABLE, /* These are relocations for the JMP instruction. */ BFD_RELOC_MMIX_JMP, BFD_RELOC_MMIX_JMP_1, BFD_RELOC_MMIX_JMP_2, BFD_RELOC_MMIX_JMP_3, /* This is a relocation for a relative address as in a GETA instruction or a branch. */ BFD_RELOC_MMIX_ADDR19, /* This is a relocation for a relative address as in a JMP instruction. */ BFD_RELOC_MMIX_ADDR27, /* This is a relocation for an instruction field that may be a general register or a value 0..255. */ BFD_RELOC_MMIX_REG_OR_BYTE, /* This is a relocation for an instruction field that may be a general register. */ BFD_RELOC_MMIX_REG, /* This is a relocation for two instruction fields holding a register and an offset, the equivalent of the relocation. */ BFD_RELOC_MMIX_BASE_PLUS_OFFSET, /* This relocation is an assertion that the expression is not allocated as a global register. It does not modify contents. */ BFD_RELOC_MMIX_LOCAL, /* This is a 16 bit reloc for the AVR that stores 8 bit pc relative short offset into 7 bits. */ BFD_RELOC_AVR_7_PCREL, /* This is a 16 bit reloc for the AVR that stores 13 bit pc relative short offset into 12 bits. */ BFD_RELOC_AVR_13_PCREL, /* This is a 16 bit reloc for the AVR that stores 17 bit value (usually program memory address) into 16 bits. */ BFD_RELOC_AVR_16_PM, /* This is a 16 bit reloc for the AVR that stores 8 bit value (usually data memory address) into 8 bit immediate value of LDI insn. */ BFD_RELOC_AVR_LO8_LDI, /* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit of data memory address) into 8 bit immediate value of LDI insn. */ BFD_RELOC_AVR_HI8_LDI, /* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit of program memory address) into 8 bit immediate value of LDI insn. */ BFD_RELOC_AVR_HH8_LDI, /* This is a 16 bit reloc for the AVR that stores negated 8 bit value (usually data memory address) into 8 bit immediate value of SUBI insn. */ BFD_RELOC_AVR_LO8_LDI_NEG, /* This is a 16 bit reloc for the AVR that stores negated 8 bit value (high 8 bit of data memory address) into 8 bit immediate value of SUBI insn. */ BFD_RELOC_AVR_HI8_LDI_NEG, /* This is a 16 bit reloc for the AVR that stores negated 8 bit value (most high 8 bit of program memory address) into 8 bit immediate value of LDI or SUBI insn. */ BFD_RELOC_AVR_HH8_LDI_NEG, /* This is a 16 bit reloc for the AVR that stores 8 bit value (usually command address) into 8 bit immediate value of LDI insn. */ BFD_RELOC_AVR_LO8_LDI_PM, /* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit of command address) into 8 bit immediate value of LDI insn. */ BFD_RELOC_AVR_HI8_LDI_PM, /* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit of command address) into 8 bit immediate value of LDI insn. */ BFD_RELOC_AVR_HH8_LDI_PM, /* This is a 16 bit reloc for the AVR that stores negated 8 bit value (usually command address) into 8 bit immediate value of SUBI insn. */ BFD_RELOC_AVR_LO8_LDI_PM_NEG, /* This is a 16 bit reloc for the AVR that stores negated 8 bit value (high 8 bit of 16 bit command address) into 8 bit immediate value of SUBI insn. */ BFD_RELOC_AVR_HI8_LDI_PM_NEG, /* This is a 16 bit reloc for the AVR that stores negated 8 bit value (high 6 bit of 22 bit command address) into 8 bit immediate value of SUBI insn. */ BFD_RELOC_AVR_HH8_LDI_PM_NEG, /* This is a 32 bit reloc for the AVR that stores 23 bit value into 22 bits. */ BFD_RELOC_AVR_CALL, /* This is a 16 bit reloc for the AVR that stores all needed bits for absolute addressing with ldi with overflow check to linktime */ BFD_RELOC_AVR_LDI, /* This is a 6 bit reloc for the AVR that stores offset for ldd/std instructions */ BFD_RELOC_AVR_6, /* This is a 6 bit reloc for the AVR that stores offset for adiw/sbiw instructions */ BFD_RELOC_AVR_6_ADIW, /* Direct 12 bit. */ BFD_RELOC_390_12, /* 12 bit GOT offset. */ BFD_RELOC_390_GOT12, /* 32 bit PC relative PLT address. */ BFD_RELOC_390_PLT32, /* Copy symbol at runtime. */ BFD_RELOC_390_COPY, /* Create GOT entry. */ BFD_RELOC_390_GLOB_DAT, /* Create PLT entry. */ BFD_RELOC_390_JMP_SLOT, /* Adjust by program base. */ BFD_RELOC_390_RELATIVE, /* 32 bit PC relative offset to GOT. */ BFD_RELOC_390_GOTPC, /* 16 bit GOT offset. */ BFD_RELOC_390_GOT16, /* PC relative 16 bit shifted by 1. */ BFD_RELOC_390_PC16DBL, /* 16 bit PC rel. PLT shifted by 1. */ BFD_RELOC_390_PLT16DBL, /* PC relative 32 bit shifted by 1. */ BFD_RELOC_390_PC32DBL, /* 32 bit PC rel. PLT shifted by 1. */ BFD_RELOC_390_PLT32DBL, /* 32 bit PC rel. GOT shifted by 1. */ BFD_RELOC_390_GOTPCDBL, /* 64 bit GOT offset. */ BFD_RELOC_390_GOT64, /* 64 bit PC relative PLT address. */ BFD_RELOC_390_PLT64, /* 32 bit rel. offset to GOT entry. */ BFD_RELOC_390_GOTENT, /* 64 bit offset to GOT. */ BFD_RELOC_390_GOTOFF64, /* 12-bit offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_390_GOTPLT12, /* 16-bit offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_390_GOTPLT16, /* 32-bit offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_390_GOTPLT32, /* 64-bit offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_390_GOTPLT64, /* 32-bit rel. offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_390_GOTPLTENT, /* 16-bit rel. offset from the GOT to a PLT entry. */ BFD_RELOC_390_PLTOFF16, /* 32-bit rel. offset from the GOT to a PLT entry. */ BFD_RELOC_390_PLTOFF32, /* 64-bit rel. offset from the GOT to a PLT entry. */ BFD_RELOC_390_PLTOFF64, /* s390 tls relocations. */ BFD_RELOC_390_TLS_LOAD, BFD_RELOC_390_TLS_GDCALL, BFD_RELOC_390_TLS_LDCALL, BFD_RELOC_390_TLS_GD32, BFD_RELOC_390_TLS_GD64, BFD_RELOC_390_TLS_GOTIE12, BFD_RELOC_390_TLS_GOTIE32, BFD_RELOC_390_TLS_GOTIE64, BFD_RELOC_390_TLS_LDM32, BFD_RELOC_390_TLS_LDM64, BFD_RELOC_390_TLS_IE32, BFD_RELOC_390_TLS_IE64, BFD_RELOC_390_TLS_IEENT, BFD_RELOC_390_TLS_LE32, BFD_RELOC_390_TLS_LE64, BFD_RELOC_390_TLS_LDO32, BFD_RELOC_390_TLS_LDO64, BFD_RELOC_390_TLS_DTPMOD, BFD_RELOC_390_TLS_DTPOFF, BFD_RELOC_390_TLS_TPOFF, /* Long displacement extension. */ BFD_RELOC_390_20, BFD_RELOC_390_GOT20, BFD_RELOC_390_GOTPLT20, BFD_RELOC_390_TLS_GOTIE20, /* Scenix IP2K - 9-bit register number / data address */ BFD_RELOC_IP2K_FR9, /* Scenix IP2K - 4-bit register/data bank number */ BFD_RELOC_IP2K_BANK, /* Scenix IP2K - low 13 bits of instruction word address */ BFD_RELOC_IP2K_ADDR16CJP, /* Scenix IP2K - high 3 bits of instruction word address */ BFD_RELOC_IP2K_PAGE3, /* Scenix IP2K - ext/low/high 8 bits of data address */ BFD_RELOC_IP2K_LO8DATA, BFD_RELOC_IP2K_HI8DATA, BFD_RELOC_IP2K_EX8DATA, /* Scenix IP2K - low/high 8 bits of instruction word address */ BFD_RELOC_IP2K_LO8INSN, BFD_RELOC_IP2K_HI8INSN, /* Scenix IP2K - even/odd PC modifier to modify snb pcl.0 */ BFD_RELOC_IP2K_PC_SKIP, /* Scenix IP2K - 16 bit word address in text section. */ BFD_RELOC_IP2K_TEXT, /* Scenix IP2K - 7-bit sp or dp offset */ BFD_RELOC_IP2K_FR_OFFSET, /* Scenix VPE4K coprocessor - data/insn-space addressing */ BFD_RELOC_VPE4KMATH_DATA, BFD_RELOC_VPE4KMATH_INSN, /* These two relocations are used by the linker to determine which of the entries in a C++ virtual function table are actually used. When the --gc-sections option is given, the linker will zero out the entries that are not used, so that the code for those functions need not be included in the output. VTABLE_INHERIT is a zero-space relocation used to describe to the linker the inheritance tree of a C++ virtual function table. The relocation's symbol should be the parent class' vtable, and the relocation should be located at the child vtable. VTABLE_ENTRY is a zero-space relocation that describes the use of a virtual function table entry. The reloc's symbol should refer to the table of the class mentioned in the code. Off of that base, an offset describes the entry that is being used. For Rela hosts, this offset is stored in the reloc's addend. For Rel hosts, we are forced to put this offset in the reloc's section offset. */ BFD_RELOC_VTABLE_INHERIT, BFD_RELOC_VTABLE_ENTRY, /* Intel IA64 Relocations. */ BFD_RELOC_IA64_IMM14, BFD_RELOC_IA64_IMM22, BFD_RELOC_IA64_IMM64, BFD_RELOC_IA64_DIR32MSB, BFD_RELOC_IA64_DIR32LSB, BFD_RELOC_IA64_DIR64MSB, BFD_RELOC_IA64_DIR64LSB, BFD_RELOC_IA64_GPREL22, BFD_RELOC_IA64_GPREL64I, BFD_RELOC_IA64_GPREL32MSB, BFD_RELOC_IA64_GPREL32LSB, BFD_RELOC_IA64_GPREL64MSB, BFD_RELOC_IA64_GPREL64LSB, BFD_RELOC_IA64_LTOFF22, BFD_RELOC_IA64_LTOFF64I, BFD_RELOC_IA64_PLTOFF22, BFD_RELOC_IA64_PLTOFF64I, BFD_RELOC_IA64_PLTOFF64MSB, BFD_RELOC_IA64_PLTOFF64LSB, BFD_RELOC_IA64_FPTR64I, BFD_RELOC_IA64_FPTR32MSB, BFD_RELOC_IA64_FPTR32LSB, BFD_RELOC_IA64_FPTR64MSB, BFD_RELOC_IA64_FPTR64LSB, BFD_RELOC_IA64_PCREL21B, BFD_RELOC_IA64_PCREL21BI, BFD_RELOC_IA64_PCREL21M, BFD_RELOC_IA64_PCREL21F, BFD_RELOC_IA64_PCREL22, BFD_RELOC_IA64_PCREL60B, BFD_RELOC_IA64_PCREL64I, BFD_RELOC_IA64_PCREL32MSB, BFD_RELOC_IA64_PCREL32LSB, BFD_RELOC_IA64_PCREL64MSB, BFD_RELOC_IA64_PCREL64LSB, BFD_RELOC_IA64_LTOFF_FPTR22, BFD_RELOC_IA64_LTOFF_FPTR64I, BFD_RELOC_IA64_LTOFF_FPTR32MSB, BFD_RELOC_IA64_LTOFF_FPTR32LSB, BFD_RELOC_IA64_LTOFF_FPTR64MSB, BFD_RELOC_IA64_LTOFF_FPTR64LSB, BFD_RELOC_IA64_SEGREL32MSB, BFD_RELOC_IA64_SEGREL32LSB, BFD_RELOC_IA64_SEGREL64MSB, BFD_RELOC_IA64_SEGREL64LSB, BFD_RELOC_IA64_SECREL32MSB, BFD_RELOC_IA64_SECREL32LSB, BFD_RELOC_IA64_SECREL64MSB, BFD_RELOC_IA64_SECREL64LSB, BFD_RELOC_IA64_REL32MSB, BFD_RELOC_IA64_REL32LSB, BFD_RELOC_IA64_REL64MSB, BFD_RELOC_IA64_REL64LSB, BFD_RELOC_IA64_LTV32MSB, BFD_RELOC_IA64_LTV32LSB, BFD_RELOC_IA64_LTV64MSB, BFD_RELOC_IA64_LTV64LSB, BFD_RELOC_IA64_IPLTMSB, BFD_RELOC_IA64_IPLTLSB, BFD_RELOC_IA64_COPY, BFD_RELOC_IA64_LTOFF22X, BFD_RELOC_IA64_LDXMOV, BFD_RELOC_IA64_TPREL14, BFD_RELOC_IA64_TPREL22, BFD_RELOC_IA64_TPREL64I, BFD_RELOC_IA64_TPREL64MSB, BFD_RELOC_IA64_TPREL64LSB, BFD_RELOC_IA64_LTOFF_TPREL22, BFD_RELOC_IA64_DTPMOD64MSB, BFD_RELOC_IA64_DTPMOD64LSB, BFD_RELOC_IA64_LTOFF_DTPMOD22, BFD_RELOC_IA64_DTPREL14, BFD_RELOC_IA64_DTPREL22, BFD_RELOC_IA64_DTPREL64I, BFD_RELOC_IA64_DTPREL32MSB, BFD_RELOC_IA64_DTPREL32LSB, BFD_RELOC_IA64_DTPREL64MSB, BFD_RELOC_IA64_DTPREL64LSB, BFD_RELOC_IA64_LTOFF_DTPREL22, /* Motorola 68HC11 reloc. This is the 8 bit high part of an absolute address. */ BFD_RELOC_M68HC11_HI8, /* Motorola 68HC11 reloc. This is the 8 bit low part of an absolute address. */ BFD_RELOC_M68HC11_LO8, /* Motorola 68HC11 reloc. This is the 3 bit of a value. */ BFD_RELOC_M68HC11_3B, /* Motorola 68HC11 reloc. This reloc marks the beginning of a jump/call instruction. It is used for linker relaxation to correctly identify beginning of instruction and change some branches to use PC-relative addressing mode. */ BFD_RELOC_M68HC11_RL_JUMP, /* Motorola 68HC11 reloc. This reloc marks a group of several instructions that gcc generates and for which the linker relaxation pass can modify and/or remove some of them. */ BFD_RELOC_M68HC11_RL_GROUP, /* Motorola 68HC11 reloc. This is the 16-bit lower part of an address. It is used for 'call' instruction to specify the symbol address without any special transformation (due to memory bank window). */ BFD_RELOC_M68HC11_LO16, /* Motorola 68HC11 reloc. This is a 8-bit reloc that specifies the page number of an address. It is used by 'call' instruction to specify the page number of the symbol. */ BFD_RELOC_M68HC11_PAGE, /* Motorola 68HC11 reloc. This is a 24-bit reloc that represents the address with a 16-bit value and a 8-bit page number. The symbol address is transformed to follow the 16K memory bank of 68HC12 (seen as mapped in the window). */ BFD_RELOC_M68HC11_24, /* Motorola 68HC12 reloc. This is the 5 bits of a value. */ BFD_RELOC_M68HC12_5B, /* NS CR16C Relocations. */ BFD_RELOC_16C_NUM08, BFD_RELOC_16C_NUM08_C, BFD_RELOC_16C_NUM16, BFD_RELOC_16C_NUM16_C, BFD_RELOC_16C_NUM32, BFD_RELOC_16C_NUM32_C, BFD_RELOC_16C_DISP04, BFD_RELOC_16C_DISP04_C, BFD_RELOC_16C_DISP08, BFD_RELOC_16C_DISP08_C, BFD_RELOC_16C_DISP16, BFD_RELOC_16C_DISP16_C, BFD_RELOC_16C_DISP24, BFD_RELOC_16C_DISP24_C, BFD_RELOC_16C_DISP24a, BFD_RELOC_16C_DISP24a_C, BFD_RELOC_16C_REG04, BFD_RELOC_16C_REG04_C, BFD_RELOC_16C_REG04a, BFD_RELOC_16C_REG04a_C, BFD_RELOC_16C_REG14, BFD_RELOC_16C_REG14_C, BFD_RELOC_16C_REG16, BFD_RELOC_16C_REG16_C, BFD_RELOC_16C_REG20, BFD_RELOC_16C_REG20_C, BFD_RELOC_16C_ABS20, BFD_RELOC_16C_ABS20_C, BFD_RELOC_16C_ABS24, BFD_RELOC_16C_ABS24_C, BFD_RELOC_16C_IMM04, BFD_RELOC_16C_IMM04_C, BFD_RELOC_16C_IMM16, BFD_RELOC_16C_IMM16_C, BFD_RELOC_16C_IMM20, BFD_RELOC_16C_IMM20_C, BFD_RELOC_16C_IMM24, BFD_RELOC_16C_IMM24_C, BFD_RELOC_16C_IMM32, BFD_RELOC_16C_IMM32_C, /* NS CRX Relocations. */ BFD_RELOC_CRX_REL4, BFD_RELOC_CRX_REL8, BFD_RELOC_CRX_REL8_CMP, BFD_RELOC_CRX_REL16, BFD_RELOC_CRX_REL24, BFD_RELOC_CRX_REL32, BFD_RELOC_CRX_REGREL12, BFD_RELOC_CRX_REGREL22, BFD_RELOC_CRX_REGREL28, BFD_RELOC_CRX_REGREL32, BFD_RELOC_CRX_ABS16, BFD_RELOC_CRX_ABS32, BFD_RELOC_CRX_NUM8, BFD_RELOC_CRX_NUM16, BFD_RELOC_CRX_NUM32, BFD_RELOC_CRX_IMM16, BFD_RELOC_CRX_IMM32, BFD_RELOC_CRX_SWITCH8, BFD_RELOC_CRX_SWITCH16, BFD_RELOC_CRX_SWITCH32, /* These relocs are only used within the CRIS assembler. They are not (at present) written to any object files. */ BFD_RELOC_CRIS_BDISP8, BFD_RELOC_CRIS_UNSIGNED_5, BFD_RELOC_CRIS_SIGNED_6, BFD_RELOC_CRIS_UNSIGNED_6, BFD_RELOC_CRIS_SIGNED_8, BFD_RELOC_CRIS_UNSIGNED_8, BFD_RELOC_CRIS_SIGNED_16, BFD_RELOC_CRIS_UNSIGNED_16, BFD_RELOC_CRIS_LAPCQ_OFFSET, BFD_RELOC_CRIS_UNSIGNED_4, /* Relocs used in ELF shared libraries for CRIS. */ BFD_RELOC_CRIS_COPY, BFD_RELOC_CRIS_GLOB_DAT, BFD_RELOC_CRIS_JUMP_SLOT, BFD_RELOC_CRIS_RELATIVE, /* 32-bit offset to symbol-entry within GOT. */ BFD_RELOC_CRIS_32_GOT, /* 16-bit offset to symbol-entry within GOT. */ BFD_RELOC_CRIS_16_GOT, /* 32-bit offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_CRIS_32_GOTPLT, /* 16-bit offset to symbol-entry within GOT, with PLT handling. */ BFD_RELOC_CRIS_16_GOTPLT, /* 32-bit offset to symbol, relative to GOT. */ BFD_RELOC_CRIS_32_GOTREL, /* 32-bit offset to symbol with PLT entry, relative to GOT. */ BFD_RELOC_CRIS_32_PLT_GOTREL, /* 32-bit offset to symbol with PLT entry, relative to this relocation. */ BFD_RELOC_CRIS_32_PLT_PCREL, /* Intel i860 Relocations. */ BFD_RELOC_860_COPY, BFD_RELOC_860_GLOB_DAT, BFD_RELOC_860_JUMP_SLOT, BFD_RELOC_860_RELATIVE, BFD_RELOC_860_PC26, BFD_RELOC_860_PLT26, BFD_RELOC_860_PC16, BFD_RELOC_860_LOW0, BFD_RELOC_860_SPLIT0, BFD_RELOC_860_LOW1, BFD_RELOC_860_SPLIT1, BFD_RELOC_860_LOW2, BFD_RELOC_860_SPLIT2, BFD_RELOC_860_LOW3, BFD_RELOC_860_LOGOT0, BFD_RELOC_860_SPGOT0, BFD_RELOC_860_LOGOT1, BFD_RELOC_860_SPGOT1, BFD_RELOC_860_LOGOTOFF0, BFD_RELOC_860_SPGOTOFF0, BFD_RELOC_860_LOGOTOFF1, BFD_RELOC_860_SPGOTOFF1, BFD_RELOC_860_LOGOTOFF2, BFD_RELOC_860_LOGOTOFF3, BFD_RELOC_860_LOPC, BFD_RELOC_860_HIGHADJ, BFD_RELOC_860_HAGOT, BFD_RELOC_860_HAGOTOFF, BFD_RELOC_860_HAPC, BFD_RELOC_860_HIGH, BFD_RELOC_860_HIGOT, BFD_RELOC_860_HIGOTOFF, /* OpenRISC Relocations. */ BFD_RELOC_OPENRISC_ABS_26, BFD_RELOC_OPENRISC_REL_26, /* H8 elf Relocations. */ BFD_RELOC_H8_DIR16A8, BFD_RELOC_H8_DIR16R8, BFD_RELOC_H8_DIR24A8, BFD_RELOC_H8_DIR24R8, BFD_RELOC_H8_DIR32A16, /* Sony Xstormy16 Relocations. */ BFD_RELOC_XSTORMY16_REL_12, BFD_RELOC_XSTORMY16_12, BFD_RELOC_XSTORMY16_24, BFD_RELOC_XSTORMY16_FPTR16, /* Relocations used by VAX ELF. */ BFD_RELOC_VAX_GLOB_DAT, BFD_RELOC_VAX_JMP_SLOT, BFD_RELOC_VAX_RELATIVE, /* Morpho MS1 - 16 bit immediate relocation. */ BFD_RELOC_MS1_PC16, /* Morpho MS1 - Hi 16 bits of an address. */ BFD_RELOC_MS1_HI16, /* Morpho MS1 - Low 16 bits of an address. */ BFD_RELOC_MS1_LO16, /* Morpho MS1 - Used to tell the linker which vtable entries are used. */ BFD_RELOC_MS1_GNU_VTINHERIT, /* Morpho MS1 - Used to tell the linker which vtable entries are used. */ BFD_RELOC_MS1_GNU_VTENTRY, /* msp430 specific relocation codes */ BFD_RELOC_MSP430_10_PCREL, BFD_RELOC_MSP430_16_PCREL, BFD_RELOC_MSP430_16, BFD_RELOC_MSP430_16_PCREL_BYTE, BFD_RELOC_MSP430_16_BYTE, BFD_RELOC_MSP430_2X_PCREL, BFD_RELOC_MSP430_RL_PCREL, /* IQ2000 Relocations. */ BFD_RELOC_IQ2000_OFFSET_16, BFD_RELOC_IQ2000_OFFSET_21, BFD_RELOC_IQ2000_UHI16, /* Special Xtensa relocation used only by PLT entries in ELF shared objects to indicate that the runtime linker should set the value to one of its own internal functions or data structures. */ BFD_RELOC_XTENSA_RTLD, /* Xtensa relocations for ELF shared objects. */ BFD_RELOC_XTENSA_GLOB_DAT, BFD_RELOC_XTENSA_JMP_SLOT, BFD_RELOC_XTENSA_RELATIVE, /* Xtensa relocation used in ELF object files for symbols that may require PLT entries. Otherwise, this is just a generic 32-bit relocation. */ BFD_RELOC_XTENSA_PLT, /* Xtensa relocations to mark the difference of two local symbols. These are only needed to support linker relaxation and can be ignored when not relaxing. The field is set to the value of the difference assuming no relaxation. The relocation encodes the position of the first symbol so the linker can determine whether to adjust the field value. */ BFD_RELOC_XTENSA_DIFF8, BFD_RELOC_XTENSA_DIFF16, BFD_RELOC_XTENSA_DIFF32, /* Generic Xtensa relocations for instruction operands. Only the slot number is encoded in the relocation. The relocation applies to the last PC-relative immediate operand, or if there are no PC-relative immediates, to the last immediate operand. */ BFD_RELOC_XTENSA_SLOT0_OP, BFD_RELOC_XTENSA_SLOT1_OP, BFD_RELOC_XTENSA_SLOT2_OP, BFD_RELOC_XTENSA_SLOT3_OP, BFD_RELOC_XTENSA_SLOT4_OP, BFD_RELOC_XTENSA_SLOT5_OP, BFD_RELOC_XTENSA_SLOT6_OP, BFD_RELOC_XTENSA_SLOT7_OP, BFD_RELOC_XTENSA_SLOT8_OP, BFD_RELOC_XTENSA_SLOT9_OP, BFD_RELOC_XTENSA_SLOT10_OP, BFD_RELOC_XTENSA_SLOT11_OP, BFD_RELOC_XTENSA_SLOT12_OP, BFD_RELOC_XTENSA_SLOT13_OP, BFD_RELOC_XTENSA_SLOT14_OP, /* Alternate Xtensa relocations. Only the slot is encoded in the relocation. The meaning of these relocations is opcode-specific. */ BFD_RELOC_XTENSA_SLOT0_ALT, BFD_RELOC_XTENSA_SLOT1_ALT, BFD_RELOC_XTENSA_SLOT2_ALT, BFD_RELOC_XTENSA_SLOT3_ALT, BFD_RELOC_XTENSA_SLOT4_ALT, BFD_RELOC_XTENSA_SLOT5_ALT, BFD_RELOC_XTENSA_SLOT6_ALT, BFD_RELOC_XTENSA_SLOT7_ALT, BFD_RELOC_XTENSA_SLOT8_ALT, BFD_RELOC_XTENSA_SLOT9_ALT, BFD_RELOC_XTENSA_SLOT10_ALT, BFD_RELOC_XTENSA_SLOT11_ALT, BFD_RELOC_XTENSA_SLOT12_ALT, BFD_RELOC_XTENSA_SLOT13_ALT, BFD_RELOC_XTENSA_SLOT14_ALT, /* Xtensa relocations for backward compatibility. These have all been replaced by BFD_RELOC_XTENSA_SLOT0_OP. */ BFD_RELOC_XTENSA_OP0, BFD_RELOC_XTENSA_OP1, BFD_RELOC_XTENSA_OP2, /* Xtensa relocation to mark that the assembler expanded the instructions from an original target. The expansion size is encoded in the reloc size. */ BFD_RELOC_XTENSA_ASM_EXPAND, /* Xtensa relocation to mark that the linker should simplify assembler-expanded instructions. This is commonly used internally by the linker after analysis of a BFD_RELOC_XTENSA_ASM_EXPAND. */ BFD_RELOC_XTENSA_ASM_SIMPLIFY, BFD_RELOC_UNUSED }; typedef enum bfd_reloc_code_real bfd_reloc_code_real_type; reloc_howto_type *bfd_reloc_type_lookup (bfd *abfd, bfd_reloc_code_real_type code); const char *bfd_get_reloc_code_name (bfd_reloc_code_real_type code); /* Extracted from syms.c. */ typedef struct bfd_symbol { /* A pointer to the BFD which owns the symbol. This information is necessary so that a back end can work out what additional information (invisible to the application writer) is carried with the symbol. This field is *almost* redundant, since you can use section->owner instead, except that some symbols point to the global sections bfd_{abs,com,und}_section. This could be fixed by making these globals be per-bfd (or per-target-flavor). FIXME. */ struct bfd *the_bfd; /* Use bfd_asymbol_bfd(sym) to access this field. */ /* The text of the symbol. The name is left alone, and not copied; the application may not alter it. */ const char *name; /* The value of the symbol. This really should be a union of a numeric value with a pointer, since some flags indicate that a pointer to another symbol is stored here. */ symvalue value; /* Attributes of a symbol. */ #define BSF_NO_FLAGS 0x00 /* The symbol has local scope; <> in <>. The value is the offset into the section of the data. */ #define BSF_LOCAL 0x01 /* The symbol has global scope; initialized data in <>. The value is the offset into the section of the data. */ #define BSF_GLOBAL 0x02 /* The symbol has global scope and is exported. The value is the offset into the section of the data. */ #define BSF_EXPORT BSF_GLOBAL /* No real difference. */ /* A normal C symbol would be one of: <>, <>, <> or <>. */ /* The symbol is a debugging record. The value has an arbitrary meaning, unless BSF_DEBUGGING_RELOC is also set. */ #define BSF_DEBUGGING 0x08 /* The symbol denotes a function entry point. Used in ELF, perhaps others someday. */ #define BSF_FUNCTION 0x10 /* Used by the linker. */ #define BSF_KEEP 0x20 #define BSF_KEEP_G 0x40 /* A weak global symbol, overridable without warnings by a regular global symbol of the same name. */ #define BSF_WEAK 0x80 /* This symbol was created to point to a section, e.g. ELF's STT_SECTION symbols. */ #define BSF_SECTION_SYM 0x100 /* The symbol used to be a common symbol, but now it is allocated. */ #define BSF_OLD_COMMON 0x200 /* The default value for common data. */ #define BFD_FORT_COMM_DEFAULT_VALUE 0 /* In some files the type of a symbol sometimes alters its location in an output file - ie in coff a <> symbol which is also <> symbol appears where it was declared and not at the end of a section. This bit is set by the target BFD part to convey this information. */ #define BSF_NOT_AT_END 0x400 /* Signal that the symbol is the label of constructor section. */ #define BSF_CONSTRUCTOR 0x800 /* Signal that the symbol is a warning symbol. The name is a warning. The name of the next symbol is the one to warn about; if a reference is made to a symbol with the same name as the next symbol, a warning is issued by the linker. */ #define BSF_WARNING 0x1000 /* Signal that the symbol is indirect. This symbol is an indirect pointer to the symbol with the same name as the next symbol. */ #define BSF_INDIRECT 0x2000 /* BSF_FILE marks symbols that contain a file name. This is used for ELF STT_FILE symbols. */ #define BSF_FILE 0x4000 /* Symbol is from dynamic linking information. */ #define BSF_DYNAMIC 0x8000 /* The symbol denotes a data object. Used in ELF, and perhaps others someday. */ #define BSF_OBJECT 0x10000 /* This symbol is a debugging symbol. The value is the offset into the section of the data. BSF_DEBUGGING should be set as well. */ #define BSF_DEBUGGING_RELOC 0x20000 /* This symbol is thread local. Used in ELF. */ #define BSF_THREAD_LOCAL 0x40000 flagword flags; /* A pointer to the section to which this symbol is relative. This will always be non NULL, there are special sections for undefined and absolute symbols. */ struct bfd_section *section; /* Back end special data. */ union { void *p; bfd_vma i; } udata; } asymbol; #define bfd_get_symtab_upper_bound(abfd) \ BFD_SEND (abfd, _bfd_get_symtab_upper_bound, (abfd)) bfd_boolean bfd_is_local_label (bfd *abfd, asymbol *sym); bfd_boolean bfd_is_local_label_name (bfd *abfd, const char *name); #define bfd_is_local_label_name(abfd, name) \ BFD_SEND (abfd, _bfd_is_local_label_name, (abfd, name)) bfd_boolean bfd_is_target_special_symbol (bfd *abfd, asymbol *sym); #define bfd_is_target_special_symbol(abfd, sym) \ BFD_SEND (abfd, _bfd_is_target_special_symbol, (abfd, sym)) #define bfd_canonicalize_symtab(abfd, location) \ BFD_SEND (abfd, _bfd_canonicalize_symtab, (abfd, location)) bfd_boolean bfd_set_symtab (bfd *abfd, asymbol **location, unsigned int count); void bfd_print_symbol_vandf (bfd *abfd, void *file, asymbol *symbol); #define bfd_make_empty_symbol(abfd) \ BFD_SEND (abfd, _bfd_make_empty_symbol, (abfd)) asymbol *_bfd_generic_make_empty_symbol (bfd *); #define bfd_make_debug_symbol(abfd,ptr,size) \ BFD_SEND (abfd, _bfd_make_debug_symbol, (abfd, ptr, size)) int bfd_decode_symclass (asymbol *symbol); bfd_boolean bfd_is_undefined_symclass (int symclass); void bfd_symbol_info (asymbol *symbol, symbol_info *ret); bfd_boolean bfd_copy_private_symbol_data (bfd *ibfd, asymbol *isym, bfd *obfd, asymbol *osym); #define bfd_copy_private_symbol_data(ibfd, isymbol, obfd, osymbol) \ BFD_SEND (obfd, _bfd_copy_private_symbol_data, \ (ibfd, isymbol, obfd, osymbol)) /* Extracted from bfd.c. */ struct bfd { /* A unique identifier of the BFD */ unsigned int id; /* The filename the application opened the BFD with. */ const char *filename; /* A pointer to the target jump table. */ const struct bfd_target *xvec; /* The IOSTREAM, and corresponding IO vector that provide access to the file backing the BFD. */ void *iostream; const struct bfd_iovec *iovec; /* Is the file descriptor being cached? That is, can it be closed as needed, and re-opened when accessed later? */ bfd_boolean cacheable; /* Marks whether there was a default target specified when the BFD was opened. This is used to select which matching algorithm to use to choose the back end. */ bfd_boolean target_defaulted; /* The caching routines use these to maintain a least-recently-used list of BFDs. */ struct bfd *lru_prev, *lru_next; /* When a file is closed by the caching routines, BFD retains state information on the file here... */ ufile_ptr where; /* ... and here: (``once'' means at least once). */ bfd_boolean opened_once; /* Set if we have a locally maintained mtime value, rather than getting it from the file each time. */ bfd_boolean mtime_set; /* File modified time, if mtime_set is TRUE. */ long mtime; /* Reserved for an unimplemented file locking extension. */ int ifd; /* The format which belongs to the BFD. (object, core, etc.) */ bfd_format format; /* The direction with which the BFD was opened. */ enum bfd_direction { no_direction = 0, read_direction = 1, write_direction = 2, both_direction = 3 } direction; /* Format_specific flags. */ flagword flags; /* Currently my_archive is tested before adding origin to anything. I believe that this can become always an add of origin, with origin set to 0 for non archive files. */ ufile_ptr origin; /* Remember when output has begun, to stop strange things from happening. */ bfd_boolean output_has_begun; /* A hash table for section names. */ struct bfd_hash_table section_htab; /* Pointer to linked list of sections. */ struct bfd_section *sections; /* The last section on the section list. */ struct bfd_section *section_last; /* The number of sections. */ unsigned int section_count; /* Stuff only useful for object files: The start address. */ bfd_vma start_address; /* Used for input and output. */ unsigned int symcount; /* Symbol table for output BFD (with symcount entries). */ struct bfd_symbol **outsymbols; /* Used for slurped dynamic symbol tables. */ unsigned int dynsymcount; /* Pointer to structure which contains architecture information. */ const struct bfd_arch_info *arch_info; /* Flag set if symbols from this BFD should not be exported. */ bfd_boolean no_export; /* Stuff only useful for archives. */ void *arelt_data; struct bfd *my_archive; /* The containing archive BFD. */ struct bfd *next; /* The next BFD in the archive. */ struct bfd *archive_head; /* The first BFD in the archive. */ bfd_boolean has_armap; /* A chain of BFD structures involved in a link. */ struct bfd *link_next; /* A field used by _bfd_generic_link_add_archive_symbols. This will be used only for archive elements. */ int archive_pass; /* Used by the back end to hold private data. */ union { struct aout_data_struct *aout_data; struct artdata *aout_ar_data; struct _oasys_data *oasys_obj_data; struct _oasys_ar_data *oasys_ar_data; struct coff_tdata *coff_obj_data; struct pe_tdata *pe_obj_data; struct xcoff_tdata *xcoff_obj_data; struct ecoff_tdata *ecoff_obj_data; struct ieee_data_struct *ieee_data; struct ieee_ar_data_struct *ieee_ar_data; struct srec_data_struct *srec_data; struct ihex_data_struct *ihex_data; struct tekhex_data_struct *tekhex_data; struct elf_obj_tdata *elf_obj_data; struct nlm_obj_tdata *nlm_obj_data; struct bout_data_struct *bout_data; struct mmo_data_struct *mmo_data; struct sun_core_struct *sun_core_data; struct sco5_core_struct *sco5_core_data; struct trad_core_struct *trad_core_data; struct som_data_struct *som_data; struct hpux_core_struct *hpux_core_data; struct hppabsd_core_struct *hppabsd_core_data; struct sgi_core_struct *sgi_core_data; struct lynx_core_struct *lynx_core_data; struct osf_core_struct *osf_core_data; struct cisco_core_struct *cisco_core_data; struct versados_data_struct *versados_data; struct netbsd_core_struct *netbsd_core_data; struct mach_o_data_struct *mach_o_data; struct mach_o_fat_data_struct *mach_o_fat_data; struct bfd_pef_data_struct *pef_data; struct bfd_pef_xlib_data_struct *pef_xlib_data; struct bfd_sym_data_struct *sym_data; void *any; } tdata; /* Used by the application to hold private data. */ void *usrdata; /* Where all the allocated stuff under this BFD goes. This is a struct objalloc *, but we use void * to avoid requiring the inclusion of objalloc.h. */ void *memory; }; typedef enum bfd_error { bfd_error_no_error = 0, bfd_error_system_call, bfd_error_invalid_target, bfd_error_wrong_format, bfd_error_wrong_object_format, bfd_error_invalid_operation, bfd_error_no_memory, bfd_error_no_symbols, bfd_error_no_armap, bfd_error_no_more_archived_files, bfd_error_malformed_archive, bfd_error_file_not_recognized, bfd_error_file_ambiguously_recognized, bfd_error_no_contents, bfd_error_nonrepresentable_section, bfd_error_no_debug_section, bfd_error_bad_value, bfd_error_file_truncated, bfd_error_file_too_big, bfd_error_invalid_error_code } bfd_error_type; bfd_error_type bfd_get_error (void); void bfd_set_error (bfd_error_type error_tag); const char *bfd_errmsg (bfd_error_type error_tag); void bfd_perror (const char *message); typedef void (*bfd_error_handler_type) (const char *, ...); bfd_error_handler_type bfd_set_error_handler (bfd_error_handler_type); void bfd_set_error_program_name (const char *); bfd_error_handler_type bfd_get_error_handler (void); long bfd_get_reloc_upper_bound (bfd *abfd, asection *sect); long bfd_canonicalize_reloc (bfd *abfd, asection *sec, arelent **loc, asymbol **syms); void bfd_set_reloc (bfd *abfd, asection *sec, arelent **rel, unsigned int count); bfd_boolean bfd_set_file_flags (bfd *abfd, flagword flags); int bfd_get_arch_size (bfd *abfd); int bfd_get_sign_extend_vma (bfd *abfd); bfd_boolean bfd_set_start_address (bfd *abfd, bfd_vma vma); unsigned int bfd_get_gp_size (bfd *abfd); void bfd_set_gp_size (bfd *abfd, unsigned int i); bfd_vma bfd_scan_vma (const char *string, const char **end, int base); bfd_boolean bfd_copy_private_header_data (bfd *ibfd, bfd *obfd); #define bfd_copy_private_header_data(ibfd, obfd) \ BFD_SEND (obfd, _bfd_copy_private_header_data, \ (ibfd, obfd)) bfd_boolean bfd_copy_private_bfd_data (bfd *ibfd, bfd *obfd); #define bfd_copy_private_bfd_data(ibfd, obfd) \ BFD_SEND (obfd, _bfd_copy_private_bfd_data, \ (ibfd, obfd)) bfd_boolean bfd_merge_private_bfd_data (bfd *ibfd, bfd *obfd); #define bfd_merge_private_bfd_data(ibfd, obfd) \ BFD_SEND (obfd, _bfd_merge_private_bfd_data, \ (ibfd, obfd)) bfd_boolean bfd_set_private_flags (bfd *abfd, flagword flags); #define bfd_set_private_flags(abfd, flags) \ BFD_SEND (abfd, _bfd_set_private_flags, (abfd, flags)) #define bfd_sizeof_headers(abfd, reloc) \ BFD_SEND (abfd, _bfd_sizeof_headers, (abfd, reloc)) #define bfd_find_nearest_line(abfd, sec, syms, off, file, func, line) \ BFD_SEND (abfd, _bfd_find_nearest_line, \ (abfd, sec, syms, off, file, func, line)) #define bfd_find_line(abfd, syms, sym, file, line) \ BFD_SEND (abfd, _bfd_find_line, \ (abfd, syms, sym, file, line)) #define bfd_find_inliner_info(abfd, file, func, line) \ BFD_SEND (abfd, _bfd_find_inliner_info, \ (abfd, file, func, line)) #define bfd_debug_info_start(abfd) \ BFD_SEND (abfd, _bfd_debug_info_start, (abfd)) #define bfd_debug_info_end(abfd) \ BFD_SEND (abfd, _bfd_debug_info_end, (abfd)) #define bfd_debug_info_accumulate(abfd, section) \ BFD_SEND (abfd, _bfd_debug_info_accumulate, (abfd, section)) #define bfd_stat_arch_elt(abfd, stat) \ BFD_SEND (abfd, _bfd_stat_arch_elt,(abfd, stat)) #define bfd_update_armap_timestamp(abfd) \ BFD_SEND (abfd, _bfd_update_armap_timestamp, (abfd)) #define bfd_set_arch_mach(abfd, arch, mach)\ BFD_SEND ( abfd, _bfd_set_arch_mach, (abfd, arch, mach)) #define bfd_relax_section(abfd, section, link_info, again) \ BFD_SEND (abfd, _bfd_relax_section, (abfd, section, link_info, again)) #define bfd_gc_sections(abfd, link_info) \ BFD_SEND (abfd, _bfd_gc_sections, (abfd, link_info)) #define bfd_merge_sections(abfd, link_info) \ BFD_SEND (abfd, _bfd_merge_sections, (abfd, link_info)) #define bfd_is_group_section(abfd, sec) \ BFD_SEND (abfd, _bfd_is_group_section, (abfd, sec)) #define bfd_discard_group(abfd, sec) \ BFD_SEND (abfd, _bfd_discard_group, (abfd, sec)) #define bfd_link_hash_table_create(abfd) \ BFD_SEND (abfd, _bfd_link_hash_table_create, (abfd)) #define bfd_link_hash_table_free(abfd, hash) \ BFD_SEND (abfd, _bfd_link_hash_table_free, (hash)) #define bfd_link_add_symbols(abfd, info) \ BFD_SEND (abfd, _bfd_link_add_symbols, (abfd, info)) #define bfd_link_just_syms(abfd, sec, info) \ BFD_SEND (abfd, _bfd_link_just_syms, (sec, info)) #define bfd_final_link(abfd, info) \ BFD_SEND (abfd, _bfd_final_link, (abfd, info)) #define bfd_free_cached_info(abfd) \ BFD_SEND (abfd, _bfd_free_cached_info, (abfd)) #define bfd_get_dynamic_symtab_upper_bound(abfd) \ BFD_SEND (abfd, _bfd_get_dynamic_symtab_upper_bound, (abfd)) #define bfd_print_private_bfd_data(abfd, file)\ BFD_SEND (abfd, _bfd_print_private_bfd_data, (abfd, file)) #define bfd_canonicalize_dynamic_symtab(abfd, asymbols) \ BFD_SEND (abfd, _bfd_canonicalize_dynamic_symtab, (abfd, asymbols)) #define bfd_get_synthetic_symtab(abfd, count, syms, dyncount, dynsyms, ret) \ BFD_SEND (abfd, _bfd_get_synthetic_symtab, (abfd, count, syms, \ dyncount, dynsyms, ret)) #define bfd_get_dynamic_reloc_upper_bound(abfd) \ BFD_SEND (abfd, _bfd_get_dynamic_reloc_upper_bound, (abfd)) #define bfd_canonicalize_dynamic_reloc(abfd, arels, asyms) \ BFD_SEND (abfd, _bfd_canonicalize_dynamic_reloc, (abfd, arels, asyms)) extern bfd_byte *bfd_get_relocated_section_contents (bfd *, struct bfd_link_info *, struct bfd_link_order *, bfd_byte *, bfd_boolean, asymbol **); bfd_boolean bfd_alt_mach_code (bfd *abfd, int alternative); struct bfd_preserve { void *marker; void *tdata; flagword flags; const struct bfd_arch_info *arch_info; struct bfd_section *sections; struct bfd_section *section_last; unsigned int section_count; struct bfd_hash_table section_htab; }; bfd_boolean bfd_preserve_save (bfd *, struct bfd_preserve *); void bfd_preserve_restore (bfd *, struct bfd_preserve *); void bfd_preserve_finish (bfd *, struct bfd_preserve *); /* Extracted from archive.c. */ symindex bfd_get_next_mapent (bfd *abfd, symindex previous, carsym **sym); bfd_boolean bfd_set_archive_head (bfd *output, bfd *new_head); bfd *bfd_openr_next_archived_file (bfd *archive, bfd *previous); /* Extracted from corefile.c. */ const char *bfd_core_file_failing_command (bfd *abfd); int bfd_core_file_failing_signal (bfd *abfd); bfd_boolean core_file_matches_executable_p (bfd *core_bfd, bfd *exec_bfd); /* Extracted from targets.c. */ #define BFD_SEND(bfd, message, arglist) \ ((*((bfd)->xvec->message)) arglist) #ifdef DEBUG_BFD_SEND #undef BFD_SEND #define BFD_SEND(bfd, message, arglist) \ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \ ((*((bfd)->xvec->message)) arglist) : \ (bfd_assert (__FILE__,__LINE__), NULL)) #endif #define BFD_SEND_FMT(bfd, message, arglist) \ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) #ifdef DEBUG_BFD_SEND #undef BFD_SEND_FMT #define BFD_SEND_FMT(bfd, message, arglist) \ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) : \ (bfd_assert (__FILE__,__LINE__), NULL)) #endif enum bfd_flavour { bfd_target_unknown_flavour, bfd_target_aout_flavour, bfd_target_coff_flavour, bfd_target_ecoff_flavour, bfd_target_xcoff_flavour, bfd_target_elf_flavour, bfd_target_ieee_flavour, bfd_target_nlm_flavour, bfd_target_oasys_flavour, bfd_target_tekhex_flavour, bfd_target_srec_flavour, bfd_target_ihex_flavour, bfd_target_som_flavour, bfd_target_os9k_flavour, bfd_target_versados_flavour, bfd_target_msdos_flavour, bfd_target_ovax_flavour, bfd_target_evax_flavour, bfd_target_mmo_flavour, bfd_target_mach_o_flavour, bfd_target_pef_flavour, bfd_target_pef_xlib_flavour, bfd_target_sym_flavour }; enum bfd_endian { BFD_ENDIAN_BIG, BFD_ENDIAN_LITTLE, BFD_ENDIAN_UNKNOWN }; /* Forward declaration. */ typedef struct bfd_link_info _bfd_link_info; typedef struct bfd_target { /* Identifies the kind of target, e.g., SunOS4, Ultrix, etc. */ char *name; /* The "flavour" of a back end is a general indication about the contents of a file. */ enum bfd_flavour flavour; /* The order of bytes within the data area of a file. */ enum bfd_endian byteorder; /* The order of bytes within the header parts of a file. */ enum bfd_endian header_byteorder; /* A mask of all the flags which an executable may have set - from the set <>, <>, ...<>. */ flagword object_flags; /* A mask of all the flags which a section may have set - from the set <>, <>, ...<>. */ flagword section_flags; /* The character normally found at the front of a symbol. (if any), perhaps `_'. */ char symbol_leading_char; /* The pad character for file names within an archive header. */ char ar_pad_char; /* The maximum number of characters in an archive header. */ unsigned short ar_max_namelen; /* Entries for byte swapping for data. These are different from the other entry points, since they don't take a BFD as the first argument. Certain other handlers could do the same. */ bfd_uint64_t (*bfd_getx64) (const void *); bfd_int64_t (*bfd_getx_signed_64) (const void *); void (*bfd_putx64) (bfd_uint64_t, void *); bfd_vma (*bfd_getx32) (const void *); bfd_signed_vma (*bfd_getx_signed_32) (const void *); void (*bfd_putx32) (bfd_vma, void *); bfd_vma (*bfd_getx16) (const void *); bfd_signed_vma (*bfd_getx_signed_16) (const void *); void (*bfd_putx16) (bfd_vma, void *); /* Byte swapping for the headers. */ bfd_uint64_t (*bfd_h_getx64) (const void *); bfd_int64_t (*bfd_h_getx_signed_64) (const void *); void (*bfd_h_putx64) (bfd_uint64_t, void *); bfd_vma (*bfd_h_getx32) (const void *); bfd_signed_vma (*bfd_h_getx_signed_32) (const void *); void (*bfd_h_putx32) (bfd_vma, void *); bfd_vma (*bfd_h_getx16) (const void *); bfd_signed_vma (*bfd_h_getx_signed_16) (const void *); void (*bfd_h_putx16) (bfd_vma, void *); /* Format dependent routines: these are vectors of entry points within the target vector structure, one for each format to check. */ /* Check the format of a file being read. Return a <> or zero. */ const struct bfd_target *(*_bfd_check_format[bfd_type_end]) (bfd *); /* Set the format of a file being written. */ bfd_boolean (*_bfd_set_format[bfd_type_end]) (bfd *); /* Write cached information into a file being written, at <>. */ bfd_boolean (*_bfd_write_contents[bfd_type_end]) (bfd *); /* Generic entry points. */ #define BFD_JUMP_TABLE_GENERIC(NAME) \ NAME##_close_and_cleanup, \ NAME##_bfd_free_cached_info, \ NAME##_new_section_hook, \ NAME##_get_section_contents, \ NAME##_get_section_contents_in_window /* Called when the BFD is being closed to do any necessary cleanup. */ bfd_boolean (*_close_and_cleanup) (bfd *); /* Ask the BFD to free all cached information. */ bfd_boolean (*_bfd_free_cached_info) (bfd *); /* Called when a new section is created. */ bfd_boolean (*_new_section_hook) (bfd *, sec_ptr); /* Read the contents of a section. */ bfd_boolean (*_bfd_get_section_contents) (bfd *, sec_ptr, void *, file_ptr, bfd_size_type); bfd_boolean (*_bfd_get_section_contents_in_window) (bfd *, sec_ptr, bfd_window *, file_ptr, bfd_size_type); /* Entry points to copy private data. */ #define BFD_JUMP_TABLE_COPY(NAME) \ NAME##_bfd_copy_private_bfd_data, \ NAME##_bfd_merge_private_bfd_data, \ NAME##_bfd_copy_private_section_data, \ NAME##_bfd_copy_private_symbol_data, \ NAME##_bfd_copy_private_header_data, \ NAME##_bfd_set_private_flags, \ NAME##_bfd_print_private_bfd_data /* Called to copy BFD general private data from one object file to another. */ bfd_boolean (*_bfd_copy_private_bfd_data) (bfd *, bfd *); /* Called to merge BFD general private data from one object file to a common output file when linking. */ bfd_boolean (*_bfd_merge_private_bfd_data) (bfd *, bfd *); /* Called to copy BFD private section data from one object file to another. */ bfd_boolean (*_bfd_copy_private_section_data) (bfd *, sec_ptr, bfd *, sec_ptr); /* Called to copy BFD private symbol data from one symbol to another. */ bfd_boolean (*_bfd_copy_private_symbol_data) (bfd *, asymbol *, bfd *, asymbol *); /* Called to copy BFD private header data from one object file to another. */ bfd_boolean (*_bfd_copy_private_header_data) (bfd *, bfd *); /* Called to set private backend flags. */ bfd_boolean (*_bfd_set_private_flags) (bfd *, flagword); /* Called to print private BFD data. */ bfd_boolean (*_bfd_print_private_bfd_data) (bfd *, void *); /* Core file entry points. */ #define BFD_JUMP_TABLE_CORE(NAME) \ NAME##_core_file_failing_command, \ NAME##_core_file_failing_signal, \ NAME##_core_file_matches_executable_p char * (*_core_file_failing_command) (bfd *); int (*_core_file_failing_signal) (bfd *); bfd_boolean (*_core_file_matches_executable_p) (bfd *, bfd *); /* Archive entry points. */ #define BFD_JUMP_TABLE_ARCHIVE(NAME) \ NAME##_slurp_armap, \ NAME##_slurp_extended_name_table, \ NAME##_construct_extended_name_table, \ NAME##_truncate_arname, \ NAME##_write_armap, \ NAME##_read_ar_hdr, \ NAME##_openr_next_archived_file, \ NAME##_get_elt_at_index, \ NAME##_generic_stat_arch_elt, \ NAME##_update_armap_timestamp bfd_boolean (*_bfd_slurp_armap) (bfd *); bfd_boolean (*_bfd_slurp_extended_name_table) (bfd *); bfd_boolean (*_bfd_construct_extended_name_table) (bfd *, char **, bfd_size_type *, const char **); void (*_bfd_truncate_arname) (bfd *, const char *, char *); bfd_boolean (*write_armap) (bfd *, unsigned int, struct orl *, unsigned int, int); void * (*_bfd_read_ar_hdr_fn) (bfd *); bfd * (*openr_next_archived_file) (bfd *, bfd *); #define bfd_get_elt_at_index(b,i) BFD_SEND (b, _bfd_get_elt_at_index, (b,i)) bfd * (*_bfd_get_elt_at_index) (bfd *, symindex); int (*_bfd_stat_arch_elt) (bfd *, struct stat *); bfd_boolean (*_bfd_update_armap_timestamp) (bfd *); /* Entry points used for symbols. */ #define BFD_JUMP_TABLE_SYMBOLS(NAME) \ NAME##_get_symtab_upper_bound, \ NAME##_canonicalize_symtab, \ NAME##_make_empty_symbol, \ NAME##_print_symbol, \ NAME##_get_symbol_info, \ NAME##_bfd_is_local_label_name, \ NAME##_bfd_is_target_special_symbol, \ NAME##_get_lineno, \ NAME##_find_nearest_line, \ _bfd_generic_find_line, \ NAME##_find_inliner_info, \ NAME##_bfd_make_debug_symbol, \ NAME##_read_minisymbols, \ NAME##_minisymbol_to_symbol long (*_bfd_get_symtab_upper_bound) (bfd *); long (*_bfd_canonicalize_symtab) (bfd *, struct bfd_symbol **); struct bfd_symbol * (*_bfd_make_empty_symbol) (bfd *); void (*_bfd_print_symbol) (bfd *, void *, struct bfd_symbol *, bfd_print_symbol_type); #define bfd_print_symbol(b,p,s,e) BFD_SEND (b, _bfd_print_symbol, (b,p,s,e)) void (*_bfd_get_symbol_info) (bfd *, struct bfd_symbol *, symbol_info *); #define bfd_get_symbol_info(b,p,e) BFD_SEND (b, _bfd_get_symbol_info, (b,p,e)) bfd_boolean (*_bfd_is_local_label_name) (bfd *, const char *); bfd_boolean (*_bfd_is_target_special_symbol) (bfd *, asymbol *); alent * (*_get_lineno) (bfd *, struct bfd_symbol *); bfd_boolean (*_bfd_find_nearest_line) (bfd *, struct bfd_section *, struct bfd_symbol **, bfd_vma, const char **, const char **, unsigned int *); bfd_boolean (*_bfd_find_line) (bfd *, struct bfd_symbol **, struct bfd_symbol *, const char **, unsigned int *); bfd_boolean (*_bfd_find_inliner_info) (bfd *, const char **, const char **, unsigned int *); /* Back-door to allow format-aware applications to create debug symbols while using BFD for everything else. Currently used by the assembler when creating COFF files. */ asymbol * (*_bfd_make_debug_symbol) (bfd *, void *, unsigned long size); #define bfd_read_minisymbols(b, d, m, s) \ BFD_SEND (b, _read_minisymbols, (b, d, m, s)) long (*_read_minisymbols) (bfd *, bfd_boolean, void **, unsigned int *); #define bfd_minisymbol_to_symbol(b, d, m, f) \ BFD_SEND (b, _minisymbol_to_symbol, (b, d, m, f)) asymbol * (*_minisymbol_to_symbol) (bfd *, bfd_boolean, const void *, asymbol *); /* Routines for relocs. */ #define BFD_JUMP_TABLE_RELOCS(NAME) \ NAME##_get_reloc_upper_bound, \ NAME##_canonicalize_reloc, \ NAME##_bfd_reloc_type_lookup long (*_get_reloc_upper_bound) (bfd *, sec_ptr); long (*_bfd_canonicalize_reloc) (bfd *, sec_ptr, arelent **, struct bfd_symbol **); /* See documentation on reloc types. */ reloc_howto_type * (*reloc_type_lookup) (bfd *, bfd_reloc_code_real_type); /* Routines used when writing an object file. */ #define BFD_JUMP_TABLE_WRITE(NAME) \ NAME##_set_arch_mach, \ NAME##_set_section_contents bfd_boolean (*_bfd_set_arch_mach) (bfd *, enum bfd_architecture, unsigned long); bfd_boolean (*_bfd_set_section_contents) (bfd *, sec_ptr, const void *, file_ptr, bfd_size_type); /* Routines used by the linker. */ #define BFD_JUMP_TABLE_LINK(NAME) \ NAME##_sizeof_headers, \ NAME##_bfd_get_relocated_section_contents, \ NAME##_bfd_relax_section, \ NAME##_bfd_link_hash_table_create, \ NAME##_bfd_link_hash_table_free, \ NAME##_bfd_link_add_symbols, \ NAME##_bfd_link_just_syms, \ NAME##_bfd_final_link, \ NAME##_bfd_link_split_section, \ NAME##_bfd_gc_sections, \ NAME##_bfd_merge_sections, \ NAME##_bfd_is_group_section, \ NAME##_bfd_discard_group, \ NAME##_section_already_linked \ int (*_bfd_sizeof_headers) (bfd *, bfd_boolean); bfd_byte * (*_bfd_get_relocated_section_contents) (bfd *, struct bfd_link_info *, struct bfd_link_order *, bfd_byte *, bfd_boolean, struct bfd_symbol **); bfd_boolean (*_bfd_relax_section) (bfd *, struct bfd_section *, struct bfd_link_info *, bfd_boolean *); /* Create a hash table for the linker. Different backends store different information in this table. */ struct bfd_link_hash_table * (*_bfd_link_hash_table_create) (bfd *); /* Release the memory associated with the linker hash table. */ void (*_bfd_link_hash_table_free) (struct bfd_link_hash_table *); /* Add symbols from this object file into the hash table. */ bfd_boolean (*_bfd_link_add_symbols) (bfd *, struct bfd_link_info *); /* Indicate that we are only retrieving symbol values from this section. */ void (*_bfd_link_just_syms) (asection *, struct bfd_link_info *); /* Do a link based on the link_order structures attached to each section of the BFD. */ bfd_boolean (*_bfd_final_link) (bfd *, struct bfd_link_info *); /* Should this section be split up into smaller pieces during linking. */ bfd_boolean (*_bfd_link_split_section) (bfd *, struct bfd_section *); /* Remove sections that are not referenced from the output. */ bfd_boolean (*_bfd_gc_sections) (bfd *, struct bfd_link_info *); /* Attempt to merge SEC_MERGE sections. */ bfd_boolean (*_bfd_merge_sections) (bfd *, struct bfd_link_info *); /* Is this section a member of a group? */ bfd_boolean (*_bfd_is_group_section) (bfd *, const struct bfd_section *); /* Discard members of a group. */ bfd_boolean (*_bfd_discard_group) (bfd *, struct bfd_section *); /* Check if SEC has been already linked during a reloceatable or final link. */ void (*_section_already_linked) (bfd *, struct bfd_section *); /* Routines to handle dynamic symbols and relocs. */ #define BFD_JUMP_TABLE_DYNAMIC(NAME) \ NAME##_get_dynamic_symtab_upper_bound, \ NAME##_canonicalize_dynamic_symtab, \ NAME##_get_synthetic_symtab, \ NAME##_get_dynamic_reloc_upper_bound, \ NAME##_canonicalize_dynamic_reloc /* Get the amount of memory required to hold the dynamic symbols. */ long (*_bfd_get_dynamic_symtab_upper_bound) (bfd *); /* Read in the dynamic symbols. */ long (*_bfd_canonicalize_dynamic_symtab) (bfd *, struct bfd_symbol **); /* Create synthetized symbols. */ long (*_bfd_get_synthetic_symtab) (bfd *, long, struct bfd_symbol **, long, struct bfd_symbol **, struct bfd_symbol **); /* Get the amount of memory required to hold the dynamic relocs. */ long (*_bfd_get_dynamic_reloc_upper_bound) (bfd *); /* Read in the dynamic relocs. */ long (*_bfd_canonicalize_dynamic_reloc) (bfd *, arelent **, struct bfd_symbol **); /* Opposite endian version of this target. */ const struct bfd_target * alternative_target; /* Data for use by back-end routines, which isn't generic enough to belong in this structure. */ const void *backend_data; } bfd_target; bfd_boolean bfd_set_default_target (const char *name); const bfd_target *bfd_find_target (const char *target_name, bfd *abfd); const char ** bfd_target_list (void); const bfd_target *bfd_search_for_target (int (*search_func) (const bfd_target *, void *), void *); /* Extracted from format.c. */ bfd_boolean bfd_check_format (bfd *abfd, bfd_format format); bfd_boolean bfd_check_format_matches (bfd *abfd, bfd_format format, char ***matching); bfd_boolean bfd_set_format (bfd *abfd, bfd_format format); const char *bfd_format_string (bfd_format format); /* Extracted from linker.c. */ bfd_boolean bfd_link_split_section (bfd *abfd, asection *sec); #define bfd_link_split_section(abfd, sec) \ BFD_SEND (abfd, _bfd_link_split_section, (abfd, sec)) void bfd_section_already_linked (bfd *abfd, asection *sec); #define bfd_section_already_linked(abfd, sec) \ BFD_SEND (abfd, _section_already_linked, (abfd, sec)) /* Extracted from simple.c. */ bfd_byte *bfd_simple_get_relocated_section_contents (bfd *abfd, asection *sec, bfd_byte *outbuf, asymbol **symbol_table); #ifdef __cplusplus } #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/bitops.h000066400000000000000000000262561314037446600246110ustar00rootroot00000000000000#ifndef _X86_64_BITOPS_H #define _X86_64_BITOPS_H /* * Copyright 1992, Linus Torvalds. */ #include #ifdef CONFIG_SMP #define LOCK_PREFIX "lock ; " #else #define LOCK_PREFIX "" #endif #define ADDR (*(volatile long *) addr) /** * set_bit - Atomically set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * This function is atomic and may not be reordered. See __set_bit() * if you do not require the atomic guarantees. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static __inline__ void set_bit(int nr, volatile void * addr) { __asm__ __volatile__( LOCK_PREFIX "btsl %1,%0" :"+m" (ADDR) :"dIr" (nr) : "memory"); } /** * __set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * Unlike set_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ static __inline__ void __set_bit(int nr, volatile void * addr) { __asm__ volatile( "btsl %1,%0" :"+m" (ADDR) :"dIr" (nr) : "memory"); } /** * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ static __inline__ void clear_bit(int nr, volatile void * addr) { __asm__ __volatile__( LOCK_PREFIX "btrl %1,%0" :"+m" (ADDR) :"dIr" (nr)); } static __inline__ void __clear_bit(int nr, volatile void * addr) { __asm__ __volatile__( "btrl %1,%0" :"+m" (ADDR) :"dIr" (nr)); } #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() /** * __change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * * Unlike change_bit(), this function is non-atomic and may be reordered. * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ static __inline__ void __change_bit(int nr, volatile void * addr) { __asm__ __volatile__( "btcl %1,%0" :"+m" (ADDR) :"dIr" (nr)); } /** * change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from * * change_bit() is atomic and may not be reordered. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static __inline__ void change_bit(int nr, volatile void * addr) { __asm__ __volatile__( LOCK_PREFIX "btcl %1,%0" :"+m" (ADDR) :"dIr" (nr)); } /** * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ static __inline__ int test_and_set_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__( LOCK_PREFIX "btsl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"dIr" (nr) : "memory"); return oldbit; } /** * __test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ static __inline__ int __test_and_set_bit(int nr, volatile void * addr) { int oldbit; __asm__( "btsl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"dIr" (nr)); return oldbit; } /** * test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__( LOCK_PREFIX "btrl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"dIr" (nr) : "memory"); return oldbit; } /** * __test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) { int oldbit; __asm__( "btrl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"dIr" (nr)); return oldbit; } /* WARNING: non atomic and it can be reordered! */ static __inline__ int __test_and_change_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__( "btcl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"dIr" (nr) : "memory"); return oldbit; } /** * test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ static __inline__ int test_and_change_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__( LOCK_PREFIX "btcl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit),"+m" (ADDR) :"dIr" (nr) : "memory"); return oldbit; } #if 0 /* Fool kernel-doc since it doesn't do macros yet */ /** * test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ static int test_bit(int nr, const volatile void * addr); #endif static __inline__ int constant_test_bit(int nr, const volatile void * addr) { return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; } static __inline__ int variable_test_bit(int nr, volatile const void * addr) { int oldbit; __asm__ __volatile__( "btl %2,%1\n\tsbbl %0,%0" :"=r" (oldbit) :"m" (ADDR),"dIr" (nr)); return oldbit; } #define test_bit(nr,addr) \ (__builtin_constant_p(nr) ? \ constant_test_bit((nr),(addr)) : \ variable_test_bit((nr),(addr))) #undef ADDR extern long find_first_zero_bit(const unsigned long * addr, unsigned long size); extern long find_next_zero_bit (const unsigned long * addr, long size, long offset); extern long find_first_bit(const unsigned long * addr, unsigned long size); extern long find_next_bit(const unsigned long * addr, long size, long offset); /* return index of first bet set in val or max when no bit is set */ static inline unsigned long __scanbit(unsigned long val, unsigned long max) { asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max)); return val; } #define find_first_bit(addr,size) \ ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ (__scanbit(*(unsigned long *)addr,(size))) : \ find_first_bit(addr,size))) #define find_next_bit(addr,size,off) \ ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \ find_next_bit(addr,size,off))) #define find_first_zero_bit(addr,size) \ ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ (__scanbit(~*(unsigned long *)addr,(size))) : \ find_first_zero_bit(addr,size))) #define find_next_zero_bit(addr,size,off) \ ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ find_next_zero_bit(addr,size,off))) /* * Find string of zero bits in a bitmap. -1 when not found. */ extern unsigned long find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len); static inline void set_bit_string(unsigned long *bitmap, unsigned long i, int len) { unsigned long end = i + len; while (i < end) { __set_bit(i, bitmap); i++; } } static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i, int len) { unsigned long end = i + len; while (i < end) { __clear_bit(i, bitmap); i++; } } /** * ffz - find first zero in word. * @word: The word to search * * Undefined if no zero exists, so code should check against ~0UL first. */ static __inline__ unsigned long ffz(unsigned long word) { __asm__("bsfq %1,%0" :"=r" (word) :"r" (~word)); return word; } /** * __ffs - find first bit in word. * @word: The word to search * * Undefined if no bit exists, so code should check against 0 first. */ static __inline__ unsigned long __ffs(unsigned long word) { __asm__("bsfq %1,%0" :"=r" (word) :"rm" (word)); return word; } /* * __fls: find last bit set. * @word: The word to search * * Undefined if no zero exists, so code should check against ~0UL first. */ static __inline__ unsigned long __fls(unsigned long word) { __asm__("bsrq %1,%0" :"=r" (word) :"rm" (word)); return word; } #ifdef __KERNEL__ static inline int sched_find_first_bit(const unsigned long *b) { if (b[0]) return __ffs(b[0]); if (b[1]) return __ffs(b[1]) + 64; return __ffs(b[2]) + 128; } /** * ffs - find first bit set * @x: the word to search * * This is defined the same way as * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). */ static __inline__ int ffs(int x) { int r; __asm__("bsfl %1,%0\n\t" "cmovzl %2,%0" : "=r" (r) : "rm" (x), "r" (-1)); return r+1; } /** * fls64 - find last bit set in 64 bit word * @x: the word to search * * This is defined the same way as fls. */ static __inline__ int fls64(__u64 x) { if (x == 0) return 0; return __fls(x) + 1; } /** * fls - find last bit set * @x: the word to search * * This is defined the same way as ffs. */ static __inline__ int fls(int x) { int r; __asm__("bsrl %1,%0\n\t" "cmovzl %2,%0" : "=&r" (r) : "rm" (x), "rm" (-1)); return r+1; } /** * hweightN - returns the hamming weight of a N-bit word * @x: the word to weigh * * The Hamming Weight of a number is the total number of bits set in it. */ #define hweight64(x) generic_hweight64(x) #define hweight32(x) generic_hweight32(x) #define hweight16(x) generic_hweight16(x) #define hweight8(x) generic_hweight8(x) #endif /* __KERNEL__ */ #ifdef __KERNEL__ #define ext2_set_bit(nr,addr) \ __test_and_set_bit((nr),(unsigned long*)addr) #define ext2_set_bit_atomic(lock,nr,addr) \ test_and_set_bit((nr),(unsigned long*)addr) #define ext2_clear_bit(nr, addr) \ __test_and_clear_bit((nr),(unsigned long*)addr) #define ext2_clear_bit_atomic(lock,nr,addr) \ test_and_clear_bit((nr),(unsigned long*)addr) #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) #define ext2_find_first_zero_bit(addr, size) \ find_first_zero_bit((unsigned long*)addr, size) #define ext2_find_next_zero_bit(addr, size, off) \ find_next_zero_bit((unsigned long*)addr, size, off) /* Bitmap functions for the minix filesystem. */ #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) #define minix_find_first_zero_bit(addr,size) \ find_first_zero_bit((void*)addr,size) #endif /* __KERNEL__ */ #endif /* _X86_64_BITOPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/boot.h000066400000000000000000000006311314037446600242410ustar00rootroot00000000000000#ifndef _LINUX_BOOT_H #define _LINUX_BOOT_H /* Don't touch these, unless you really know what you're doing. */ #define DEF_INITSEG 0x9000 #define DEF_SYSSEG 0x1000 #define DEF_SETUPSEG 0x9020 #define DEF_SYSSIZE 0x7F00 /* Internal svga startup constants */ #define NORMAL_VGA 0xffff /* 80x25 mode */ #define EXTENDED_VGA 0xfffe /* 80x50 mode */ #define ASK_VGA 0xfffd /* ask for it at bootup */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/bootsetup.h000066400000000000000000000032321314037446600253220ustar00rootroot00000000000000 #ifndef _X86_64_BOOTSETUP_H #define _X86_64_BOOTSETUP_H 1 #define BOOT_PARAM_SIZE 4096 extern char x86_boot_params[BOOT_PARAM_SIZE]; /* * This is set up by the setup-routine at boot-time */ #define PARAM ((unsigned char *)x86_boot_params) #define SCREEN_INFO (*(struct screen_info *) (PARAM+0)) #define EXT_MEM_K (*(unsigned short *) (PARAM+2)) #define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0)) #define E820_MAP_NR (*(char*) (PARAM+E820NR)) #define E820_MAP ((struct e820entry *) (PARAM+E820MAP)) #define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40)) #define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80)) #define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0)) #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2)) #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8)) #define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA)) #define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC)) #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF)) #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210)) #define KERNEL_START (*(unsigned int *) (PARAM+0x214)) #define INITRD_START (*(unsigned int *) (PARAM+0x218)) #define INITRD_SIZE (*(unsigned int *) (PARAM+0x21c)) #define EDID_INFO (*(struct edid_info *) (PARAM+0x140)) #define EDD_NR (*(unsigned char *) (PARAM+EDDNR)) #define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF)) #define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF)) #define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF)) #define COMMAND_LINE boot_command_line #define RAMDISK_IMAGE_START_MASK 0x07FF #define RAMDISK_PROMPT_FLAG 0x8000 #define RAMDISK_LOAD_FLAG 0x4000 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/bug.h000066400000000000000000000015171314037446600240570ustar00rootroot00000000000000#ifndef __ASM_X8664_BUG_H #define __ASM_X8664_BUG_H 1 #include /* * Tell the user there is some problem. The exception handler decodes * this frame. */ struct bug_frame { unsigned char ud2[2]; unsigned char push; signed int filename; unsigned char ret; unsigned short line; } __attribute__((packed)); #ifdef CONFIG_BUG #define HAVE_ARCH_BUG /* We turn the bug frame into valid instructions to not confuse the disassembler. Thanks to Jan Beulich & Suresh Siddha for nice instruction selection. The magic numbers generate mov $64bitimm,%eax ; ret $offset. */ #define BUG() \ asm volatile( \ "ud2 ; pushq $%c1 ; ret $%c0" :: \ "i"(__LINE__), "i" (__FILE__)) void out_of_line_bug(void); #else static inline void out_of_line_bug(void) { } #endif #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/bugs.h000066400000000000000000000010771314037446600242430ustar00rootroot00000000000000/* * include/asm-x86_64/bugs.h * * Copyright (C) 1994 Linus Torvalds * Copyright (C) 2000 SuSE * * This is included by init/main.c to check for architecture-dependent bugs. * * Needs: * void check_bugs(void); */ #include #include #include #include #include extern void alternative_instructions(void); static void __init check_bugs(void) { identify_cpu(&boot_cpu_data); #if !defined(CONFIG_SMP) printk("CPU: "); print_cpu_info(&boot_cpu_data); #endif alternative_instructions(); } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/byteorder.h000066400000000000000000000013261314037446600252770ustar00rootroot00000000000000#ifndef _X86_64_BYTEORDER_H #define _X86_64_BYTEORDER_H #include #include #ifdef __GNUC__ static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) { __asm__("bswapq %0" : "=r" (x) : "0" (x)); return x; } static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) { __asm__("bswapl %0" : "=r" (x) : "0" (x)); return x; } /* Do not define swab16. Gcc is smart enough to recognize "C" version and convert it into rotation or exhange. */ #define __arch__swab32(x) ___arch__swab32(x) #define __arch__swab64(x) ___arch__swab64(x) #endif /* __GNUC__ */ #define __BYTEORDER_HAS_U64__ #include #endif /* _X86_64_BYTEORDER_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/cache.h000066400000000000000000000010541314037446600243410ustar00rootroot00000000000000/* * include/asm-x8664/cache.h */ #ifndef __ARCH_X8664_CACHE_H #define __ARCH_X8664_CACHE_H #include /* L1 cache line size */ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #ifdef CONFIG_X86_VSMP /* vSMP Internode cacheline shift */ #define INTERNODE_CACHE_SHIFT (12) #ifdef CONFIG_SMP #define __cacheline_aligned_in_smp \ __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \ __attribute__((__section__(".data.page_aligned"))) #endif #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/cacheflush.h000066400000000000000000000023771314037446600254140ustar00rootroot00000000000000#ifndef _X8664_CACHEFLUSH_H #define _X8664_CACHEFLUSH_H /* Keep includes the same across arches. */ #include /* Caches aren't brain-dead on the intel. */ #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_icache_range(start, end) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ memcpy(dst, src, len) void global_flush_tlb(void); int change_page_attr(struct page *page, int numpages, pgprot_t prot); int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot); #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); #endif #endif /* _X8664_CACHEFLUSH_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/calgary.h000066400000000000000000000041721314037446600247240ustar00rootroot00000000000000/* * Derived from include/asm-powerpc/iommu.h * * Copyright (C) IBM Corporation, 2006 * * Author: Jon Mason * Author: Muli Ben-Yehuda * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _ASM_X86_64_CALGARY_H #define _ASM_X86_64_CALGARY_H #include #include #include #include struct iommu_table { unsigned long it_base; /* mapped address of tce table */ unsigned long it_hint; /* Hint for next alloc */ unsigned long *it_map; /* A simple allocation bitmap for now */ void __iomem *bbar; /* Bridge BAR */ u64 tar_val; /* Table Address Register */ struct timer_list watchdog_timer; spinlock_t it_lock; /* Protects it_map */ unsigned int it_size; /* Size of iommu table in entries */ unsigned char it_busno; /* Bus number this table belongs to */ }; #define TCE_TABLE_SIZE_UNSPECIFIED ~0 #define TCE_TABLE_SIZE_64K 0 #define TCE_TABLE_SIZE_128K 1 #define TCE_TABLE_SIZE_256K 2 #define TCE_TABLE_SIZE_512K 3 #define TCE_TABLE_SIZE_1M 4 #define TCE_TABLE_SIZE_2M 5 #define TCE_TABLE_SIZE_4M 6 #define TCE_TABLE_SIZE_8M 7 extern int use_calgary; #ifdef CONFIG_CALGARY_IOMMU extern int calgary_iommu_init(void); extern void detect_calgary(void); #else static inline int calgary_iommu_init(void) { return 1; } static inline void detect_calgary(void) { return; } #endif #endif /* _ASM_X86_64_CALGARY_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/calling.h000066400000000000000000000060031314037446600247060ustar00rootroot00000000000000/* * Some macros to handle stack frames in assembly. */ #include #define R15 0 #define R14 8 #define R13 16 #define R12 24 #define RBP 32 #define RBX 40 /* arguments: interrupts/non tracing syscalls only save upto here*/ #define R11 48 #define R10 56 #define R9 64 #define R8 72 #define RAX 80 #define RCX 88 #define RDX 96 #define RSI 104 #define RDI 112 #define ORIG_RAX 120 /* + error_code */ /* end of arguments */ /* cpu exception frame or undefined in case of fast syscall. */ #define RIP 128 #define CS 136 #define EFLAGS 144 #define RSP 152 #define SS 160 #define ARGOFFSET R11 #define SWFRAME ORIG_RAX .macro SAVE_ARGS addskip=0,norcx=0,nor891011=0 subq $9*8+\addskip,%rsp CFI_ADJUST_CFA_OFFSET 9*8+\addskip movq %rdi,8*8(%rsp) CFI_REL_OFFSET rdi,8*8 movq %rsi,7*8(%rsp) CFI_REL_OFFSET rsi,7*8 movq %rdx,6*8(%rsp) CFI_REL_OFFSET rdx,6*8 .if \norcx .else movq %rcx,5*8(%rsp) CFI_REL_OFFSET rcx,5*8 .endif movq %rax,4*8(%rsp) CFI_REL_OFFSET rax,4*8 .if \nor891011 .else movq %r8,3*8(%rsp) CFI_REL_OFFSET r8,3*8 movq %r9,2*8(%rsp) CFI_REL_OFFSET r9,2*8 movq %r10,1*8(%rsp) CFI_REL_OFFSET r10,1*8 movq %r11,(%rsp) CFI_REL_OFFSET r11,0*8 .endif .endm #define ARG_SKIP 9*8 .macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0 .if \skipr11 .else movq (%rsp),%r11 CFI_RESTORE r11 .endif .if \skipr8910 .else movq 1*8(%rsp),%r10 CFI_RESTORE r10 movq 2*8(%rsp),%r9 CFI_RESTORE r9 movq 3*8(%rsp),%r8 CFI_RESTORE r8 .endif .if \skiprax .else movq 4*8(%rsp),%rax CFI_RESTORE rax .endif .if \skiprcx .else movq 5*8(%rsp),%rcx CFI_RESTORE rcx .endif .if \skiprdx .else movq 6*8(%rsp),%rdx CFI_RESTORE rdx .endif movq 7*8(%rsp),%rsi CFI_RESTORE rsi movq 8*8(%rsp),%rdi CFI_RESTORE rdi .if ARG_SKIP+\addskip > 0 addq $ARG_SKIP+\addskip,%rsp CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) .endif .endm .macro LOAD_ARGS offset movq \offset(%rsp),%r11 movq \offset+8(%rsp),%r10 movq \offset+16(%rsp),%r9 movq \offset+24(%rsp),%r8 movq \offset+40(%rsp),%rcx movq \offset+48(%rsp),%rdx movq \offset+56(%rsp),%rsi movq \offset+64(%rsp),%rdi movq \offset+72(%rsp),%rax .endm #define REST_SKIP 6*8 .macro SAVE_REST subq $REST_SKIP,%rsp CFI_ADJUST_CFA_OFFSET REST_SKIP movq %rbx,5*8(%rsp) CFI_REL_OFFSET rbx,5*8 movq %rbp,4*8(%rsp) CFI_REL_OFFSET rbp,4*8 movq %r12,3*8(%rsp) CFI_REL_OFFSET r12,3*8 movq %r13,2*8(%rsp) CFI_REL_OFFSET r13,2*8 movq %r14,1*8(%rsp) CFI_REL_OFFSET r14,1*8 movq %r15,(%rsp) CFI_REL_OFFSET r15,0*8 .endm .macro RESTORE_REST movq (%rsp),%r15 CFI_RESTORE r15 movq 1*8(%rsp),%r14 CFI_RESTORE r14 movq 2*8(%rsp),%r13 CFI_RESTORE r13 movq 3*8(%rsp),%r12 CFI_RESTORE r12 movq 4*8(%rsp),%rbp CFI_RESTORE rbp movq 5*8(%rsp),%rbx CFI_RESTORE rbx addq $REST_SKIP,%rsp CFI_ADJUST_CFA_OFFSET -(REST_SKIP) .endm .macro SAVE_ALL SAVE_ARGS SAVE_REST .endm .macro RESTORE_ALL addskip=0 RESTORE_REST RESTORE_ARGS 0,\addskip .endm .macro icebp .byte 0xf1 .endm UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/checksum.h000066400000000000000000000125321314037446600251030ustar00rootroot00000000000000#ifndef _X86_64_CHECKSUM_H #define _X86_64_CHECKSUM_H /* * Checksums for x86-64 * Copyright 2002 by Andi Kleen, SuSE Labs * with some code from asm-i386/checksum.h */ #include #include #include /** * csum_fold - Fold and invert a 32bit checksum. * sum: 32bit unfolded sum * * Fold a 32bit running checksum to 16bit and invert it. This is usually * the last step before putting a checksum into a packet. * Make sure not to mix with 64bit checksums. */ static inline unsigned int csum_fold(unsigned int sum) { __asm__( " addl %1,%0\n" " adcl $0xffff,%0" : "=r" (sum) : "r" (sum << 16), "0" (sum & 0xffff0000) ); return (~sum) >> 16; } /* * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. * * By Jorge Cwik , adapted for linux by * Arnt Gulbrandsen. */ /** * ip_fast_csum - Compute the IPv4 header checksum efficiently. * iph: ipv4 header * ihl: length of header / 4 */ static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) { unsigned int sum; asm( " movl (%1), %0\n" " subl $4, %2\n" " jbe 2f\n" " addl 4(%1), %0\n" " adcl 8(%1), %0\n" " adcl 12(%1), %0\n" "1: adcl 16(%1), %0\n" " lea 4(%1), %1\n" " decl %2\n" " jne 1b\n" " adcl $0, %0\n" " movl %0, %2\n" " shrl $16, %0\n" " addw %w2, %w0\n" " adcl $0, %0\n" " notl %0\n" "2:" /* Since the input registers which are loaded with iph and ihl are modified, we must also specify them as outputs, or gcc will assume they contain their original values. */ : "=r" (sum), "=r" (iph), "=r" (ihl) : "1" (iph), "2" (ihl) : "memory"); return(sum); } /** * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum. * @saddr: source address * @daddr: destination address * @len: length of packet * @proto: ip protocol of packet * @sum: initial sum to be added in (32bit unfolded) * * Returns the pseudo header checksum the input data. Result is * 32bit unfolded. */ static inline unsigned long csum_tcpudp_nofold(unsigned saddr, unsigned daddr, unsigned short len, unsigned short proto, unsigned int sum) { asm(" addl %1, %0\n" " adcl %2, %0\n" " adcl %3, %0\n" " adcl $0, %0\n" : "=r" (sum) : "g" (daddr), "g" (saddr), "g" ((ntohs(len)<<16)+proto*256), "0" (sum)); return sum; } /** * csum_tcpup_magic - Compute an IPv4 pseudo header checksum. * @saddr: source address * @daddr: destination address * @len: length of packet * @proto: ip protocol of packet * @sum: initial sum to be added in (32bit unfolded) * * Returns the 16bit pseudo header checksum the input data already * complemented and ready to be filled in. */ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, unsigned short proto, unsigned int sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } /** * csum_partial - Compute an internet checksum. * @buff: buffer to be checksummed * @len: length of buffer. * @sum: initial sum to be added in (32bit unfolded) * * Returns the 32bit unfolded internet checksum of the buffer. * Before filling it in it needs to be csum_fold()'ed. * buff should be aligned to a 64bit boundary if possible. */ extern unsigned int csum_partial(const unsigned char *buff, unsigned len, unsigned int sum); #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1 #define HAVE_CSUM_COPY_USER 1 /* Do not call this directly. Use the wrappers below */ extern unsigned long csum_partial_copy_generic(const unsigned char *src, const unsigned char *dst, unsigned len, unsigned sum, int *src_err_ptr, int *dst_err_ptr); extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst, int len, unsigned int isum, int *errp); extern unsigned int csum_partial_copy_to_user(const unsigned char *src, unsigned char __user *dst, int len, unsigned int isum, int *errp); extern unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, unsigned int sum); /* Old names. To be removed. */ #define csum_and_copy_to_user csum_partial_copy_to_user #define csum_and_copy_from_user csum_partial_copy_from_user /** * ip_compute_csum - Compute an 16bit IP checksum. * @buff: buffer address. * @len: length of buffer. * * Returns the 16bit folded/inverted checksum of the passed buffer. * Ready to fill in. */ extern unsigned short ip_compute_csum(unsigned char * buff, int len); /** * csum_ipv6_magic - Compute checksum of an IPv6 pseudo header. * @saddr: source address * @daddr: destination address * @len: length of packet * @proto: protocol of packet * @sum: initial sum (32bit unfolded) to be added in * * Computes an IPv6 pseudo header checksum. This sum is added the checksum * into UDP/TCP packets and contains some link layer information. * Returns the unfolded 32bit checksum. */ struct in6_addr; #define _HAVE_ARCH_IPV6_CSUM 1 extern unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len, unsigned short proto, unsigned int sum); static inline unsigned add32_with_carry(unsigned a, unsigned b) { asm("addl %2,%0\n\t" "adcl $0,%0" : "=r" (a) : "0" (a), "r" (b)); return a; } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/compat.h000066400000000000000000000107501314037446600245640ustar00rootroot00000000000000#ifndef _ASM_X86_64_COMPAT_H #define _ASM_X86_64_COMPAT_H /* * Architecture specific compatibility types */ #include #include #define COMPAT_USER_HZ 100 typedef u32 compat_size_t; typedef s32 compat_ssize_t; typedef s32 compat_time_t; typedef s32 compat_clock_t; typedef s32 compat_pid_t; typedef u16 __compat_uid_t; typedef u16 __compat_gid_t; typedef u32 __compat_uid32_t; typedef u32 __compat_gid32_t; typedef u16 compat_mode_t; typedef u32 compat_ino_t; typedef u16 compat_dev_t; typedef s32 compat_off_t; typedef s64 compat_loff_t; typedef u16 compat_nlink_t; typedef u16 compat_ipc_pid_t; typedef s32 compat_daddr_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; typedef s32 compat_timer_t; typedef s32 compat_key_t; typedef s32 compat_int_t; typedef s32 compat_long_t; typedef u32 compat_uint_t; typedef u32 compat_ulong_t; struct compat_timespec { compat_time_t tv_sec; s32 tv_nsec; }; struct compat_timeval { compat_time_t tv_sec; s32 tv_usec; }; struct compat_stat { compat_dev_t st_dev; u16 __pad1; compat_ino_t st_ino; compat_mode_t st_mode; compat_nlink_t st_nlink; __compat_uid_t st_uid; __compat_gid_t st_gid; compat_dev_t st_rdev; u16 __pad2; u32 st_size; u32 st_blksize; u32 st_blocks; u32 st_atime; u32 st_atime_nsec; u32 st_mtime; u32 st_mtime_nsec; u32 st_ctime; u32 st_ctime_nsec; u32 __unused4; u32 __unused5; }; struct compat_flock { short l_type; short l_whence; compat_off_t l_start; compat_off_t l_len; compat_pid_t l_pid; }; #define F_GETLK64 12 /* using 'struct flock64' */ #define F_SETLK64 13 #define F_SETLKW64 14 /* * IA32 uses 4 byte alignment for 64 bit quantities, * so we need to pack this structure. */ struct compat_flock64 { short l_type; short l_whence; compat_loff_t l_start; compat_loff_t l_len; compat_pid_t l_pid; } __attribute__((packed)); struct compat_statfs { int f_type; int f_bsize; int f_blocks; int f_bfree; int f_bavail; int f_files; int f_ffree; compat_fsid_t f_fsid; int f_namelen; /* SunOS ignores this field. */ int f_frsize; int f_spare[5]; }; #define COMPAT_RLIM_OLD_INFINITY 0x7fffffff #define COMPAT_RLIM_INFINITY 0xffffffff typedef u32 compat_old_sigset_t; /* at least 32 bits */ #define _COMPAT_NSIG 64 #define _COMPAT_NSIG_BPW 32 typedef u32 compat_sigset_word; #define COMPAT_OFF_T_MAX 0x7fffffff #define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL struct compat_ipc64_perm { compat_key_t key; __compat_uid32_t uid; __compat_gid32_t gid; __compat_uid32_t cuid; __compat_gid32_t cgid; unsigned short mode; unsigned short __pad1; unsigned short seq; unsigned short __pad2; compat_ulong_t unused1; compat_ulong_t unused2; }; struct compat_semid64_ds { struct compat_ipc64_perm sem_perm; compat_time_t sem_otime; compat_ulong_t __unused1; compat_time_t sem_ctime; compat_ulong_t __unused2; compat_ulong_t sem_nsems; compat_ulong_t __unused3; compat_ulong_t __unused4; }; struct compat_msqid64_ds { struct compat_ipc64_perm msg_perm; compat_time_t msg_stime; compat_ulong_t __unused1; compat_time_t msg_rtime; compat_ulong_t __unused2; compat_time_t msg_ctime; compat_ulong_t __unused3; compat_ulong_t msg_cbytes; compat_ulong_t msg_qnum; compat_ulong_t msg_qbytes; compat_pid_t msg_lspid; compat_pid_t msg_lrpid; compat_ulong_t __unused4; compat_ulong_t __unused5; }; struct compat_shmid64_ds { struct compat_ipc64_perm shm_perm; compat_size_t shm_segsz; compat_time_t shm_atime; compat_ulong_t __unused1; compat_time_t shm_dtime; compat_ulong_t __unused2; compat_time_t shm_ctime; compat_ulong_t __unused3; compat_pid_t shm_cpid; compat_pid_t shm_lpid; compat_ulong_t shm_nattch; compat_ulong_t __unused4; compat_ulong_t __unused5; }; /* * A pointer passed in from user mode. This should not * be used for syscall parameters, just declare them * as pointers because the syscall entry code will have * appropriately comverted them already. */ typedef u32 compat_uptr_t; static inline void __user *compat_ptr(compat_uptr_t uptr) { return (void __user *)(unsigned long)uptr; } static inline compat_uptr_t ptr_to_compat(void __user *uptr) { return (u32)(unsigned long)uptr; } static __inline__ void __user *compat_alloc_user_space(long len) { struct pt_regs *regs = task_pt_regs(current); return (void __user *)regs->rsp - len; } static inline int is_compat_task(void) { return current_thread_info()->status & TS_COMPAT; } #endif /* _ASM_X86_64_COMPAT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/cpu.h000066400000000000000000000000321314037446600240600ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/cpufeature.h000066400000000000000000000136321314037446600254460ustar00rootroot00000000000000/* * cpufeature.h * * Defines x86 CPU feature bits */ #ifndef __ASM_X8664_CPUFEATURE_H #define __ASM_X8664_CPUFEATURE_H #define NCAPINTS 7 /* N 32-bit words worth of info */ /* Intel-defined CPU features, CPUID level 0x00000001, word 0 */ #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ #define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ #define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ #define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ #define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ #define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */ #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ /* of FPU context), and CR4.OSFXSR available */ #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ #define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ #define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ /* Don't duplicate feature flags which are redundant with Intel! */ #define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ #define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSR optimizations */ #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ #define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ #define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ #define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ #define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ /* Other features, Linux-defined mapping, word 3 */ /* This range is used for feature bits which conflict or are synthesized */ #define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ #define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ #define X86_FEATURE_REP_GOOD (3*32+ 4) /* rep microcode works well on this CPU */ #define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ #define X86_FEATURE_MFENCE_RDTSC (3*32+6) /* Mfence synchronizes RDTSC */ #define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ #define X86_FEATURE_LFENCE_RDTSC (3*32+8) /* Lfence synchronizes RDTSC */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ #define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ #define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ #define X86_FEATURE_CID (4*32+10) /* Context ID */ #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ #define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ /* * Auxiliary flags: Linux defined - For features scattered in various * CPUID levels like 0x6, 0xA etc */ #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) #define cpu_has_fpu 1 #define cpu_has_vme 0 #define cpu_has_de 1 #define cpu_has_pse 1 #define cpu_has_tsc 1 #define cpu_has_pae ___BUG___ #define cpu_has_pge 1 #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) #define cpu_has_mtrr 1 #define cpu_has_mmx 1 #define cpu_has_fxsr 1 #define cpu_has_xmm 1 #define cpu_has_xmm2 1 #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) #define cpu_has_mp 1 /* XXX */ #define cpu_has_k6_mtrr 0 #define cpu_has_cyrix_arr 0 #define cpu_has_centaur_mcr 0 #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) #endif /* __ASM_X8664_CPUFEATURE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/cputime.h000066400000000000000000000001711314037446600247430ustar00rootroot00000000000000#ifndef __X86_64_CPUTIME_H #define __X86_64_CPUTIME_H #include #endif /* __X86_64_CPUTIME_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/current.h000066400000000000000000000006621314037446600247640ustar00rootroot00000000000000#ifndef _X86_64_CURRENT_H #define _X86_64_CURRENT_H #if !defined(__ASSEMBLY__) struct task_struct; #include static inline struct task_struct *get_current(void) { struct task_struct *t = read_pda(pcurrent); return t; } #define current get_current() #else #ifndef ASM_OFFSET_H #include #endif #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg #endif #endif /* !(_X86_64_CURRENT_H) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/debugreg.h000066400000000000000000000053411314037446600250650ustar00rootroot00000000000000#ifndef _X86_64_DEBUGREG_H #define _X86_64_DEBUGREG_H /* Indicate the register numbers for a number of the specific debug registers. Registers 0-3 contain the addresses we wish to trap on */ #define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */ #define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */ #define DR_STATUS 6 /* u_debugreg[DR_STATUS] */ #define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */ /* Define a few things for the status register. We can use this to determine which debugging register was responsible for the trap. The other bits are either reserved or not of interest to us. */ #define DR_TRAP0 (0x1) /* db0 */ #define DR_TRAP1 (0x2) /* db1 */ #define DR_TRAP2 (0x4) /* db2 */ #define DR_TRAP3 (0x8) /* db3 */ #define DR_STEP (0x4000) /* single-step */ #define DR_SWITCH (0x8000) /* task switch */ /* Now define a bunch of things for manipulating the control register. The top two bytes of the control register consist of 4 fields of 4 bits - each field corresponds to one of the four debug registers, and indicates what types of access we trap on, and how large the data field is that we are looking at */ #define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */ #define DR_CONTROL_SIZE 4 /* 4 control bits per register */ #define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */ #define DR_RW_WRITE (0x1) #define DR_RW_READ (0x3) #define DR_LEN_1 (0x0) /* Settings for data length to trap on */ #define DR_LEN_2 (0x4) #define DR_LEN_4 (0xC) #define DR_LEN_8 (0x8) /* The low byte to the control register determine which registers are enabled. There are 4 fields of two bits. One bit is "local", meaning that the processor will reset the bit after a task switch and the other is global meaning that we have to explicitly reset the bit. With linux, you can use either one, since we explicitly zero the register when we enter kernel mode. */ #define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ #define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ #define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ #define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ #define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */ /* The second byte to the control register has a few special things. We can slow the instruction pipeline for instructions coming via the gdt or the ldt if we want to. I am not sure why this is an advantage */ #define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */ #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/delay.h000066400000000000000000000013051314037446600243730ustar00rootroot00000000000000#ifndef _X8664_DELAY_H #define _X8664_DELAY_H /* * Copyright (C) 1993 Linus Torvalds * * Delay routines calling functions in arch/x86_64/lib/delay.c */ extern void __bad_udelay(void); extern void __bad_ndelay(void); extern void __udelay(unsigned long usecs); extern void __ndelay(unsigned long usecs); extern void __const_udelay(unsigned long usecs); extern void __delay(unsigned long loops); #define udelay(n) (__builtin_constant_p(n) ? \ ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \ __udelay(n)) #define ndelay(n) (__builtin_constant_p(n) ? \ ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ __ndelay(n)) #endif /* defined(_X8664_DELAY_H) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/desc.h000066400000000000000000000134721314037446600242230ustar00rootroot00000000000000/* Written 2000 by Andi Kleen */ #ifndef __ARCH_DESC_H #define __ARCH_DESC_H #include #include #ifndef __ASSEMBLY__ #include #include #include #include // 8 byte segment descriptor struct desc_struct { u16 limit0; u16 base0; unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1; unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8; } __attribute__((packed)); struct n_desc_struct { unsigned int a,b; }; extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; enum { GATE_INTERRUPT = 0xE, GATE_TRAP = 0xF, GATE_CALL = 0xC, }; // 16byte gate struct gate_struct { u16 offset_low; u16 segment; unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1; u16 offset_middle; u32 offset_high; u32 zero1; } __attribute__((packed)); #define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) #define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF) #define PTR_HIGH(x) ((unsigned long)(x) >> 32) enum { DESC_TSS = 0x9, DESC_LDT = 0x2, }; // LDT or TSS descriptor in the GDT. 16 bytes. struct ldttss_desc { u16 limit0; u16 base0; unsigned base1 : 8, type : 5, dpl : 2, p : 1; unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8; u32 base3; u32 zero1; } __attribute__((packed)); struct desc_ptr { unsigned short size; unsigned long address; } __attribute__((packed)) ; #define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8)) #define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8)) #define clear_LDT() asm volatile("lldt %w0"::"r" (0)) /* * This is the ldt that every process will get unless we need * something other than this. */ extern struct desc_struct default_ldt[]; extern struct gate_struct idt_table[]; extern struct desc_ptr cpu_gdt_descr[]; /* the cpu gdt accessor */ #define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address) static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist) { struct gate_struct s; s.offset_low = PTR_LOW(func); s.segment = __KERNEL_CS; s.ist = ist; s.p = 1; s.dpl = dpl; s.zero0 = 0; s.zero1 = 0; s.type = type; s.offset_middle = PTR_MIDDLE(func); s.offset_high = PTR_HIGH(func); /* does not need to be atomic because it is only done once at setup time */ memcpy(adr, &s, 16); } static inline void set_intr_gate(int nr, void *func) { BUG_ON((unsigned)nr > 0xFF); _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0); } static inline void set_intr_gate_ist(int nr, void *func, unsigned ist) { BUG_ON((unsigned)nr > 0xFF); _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist); } static inline void set_system_gate(int nr, void *func) { BUG_ON((unsigned)nr > 0xFF); _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0); } static inline void set_system_gate_ist(int nr, void *func, unsigned ist) { _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist); } static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type, unsigned size) { struct ldttss_desc d; memset(&d,0,sizeof(d)); d.limit0 = size & 0xFFFF; d.base0 = PTR_LOW(tss); d.base1 = PTR_MIDDLE(tss) & 0xFF; d.type = type; d.p = 1; d.limit1 = (size >> 16) & 0xF; d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF; d.base3 = PTR_HIGH(tss); memcpy(ptr, &d, 16); } static inline void set_tss_desc(unsigned cpu, void *addr) { /* * sizeof(unsigned long) coming from an extra "long" at the end * of the iobitmap. See tss_struct definition in processor.h * * -1? seg base+limit should be pointing to the address of the * last valid byte */ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS], (unsigned long)addr, DESC_TSS, IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1); } static inline void set_ldt_desc(unsigned cpu, void *addr, int size) { set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr, DESC_LDT, size * 8 - 1); } static inline void set_seg_base(unsigned cpu, int entry, void *base) { struct desc_struct *d = &cpu_gdt(cpu)[entry]; u32 addr = (u32)(u64)base; BUG_ON((u64)base >> 32); d->base0 = addr & 0xffff; d->base1 = (addr >> 16) & 0xff; d->base2 = (addr >> 24) & 0xff; } #define LDT_entry_a(info) \ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) /* Don't allow setting of the lm bit. It is useless anyways because 64bit system calls require __USER_CS. */ #define LDT_entry_b(info) \ (((info)->base_addr & 0xff000000) | \ (((info)->base_addr & 0x00ff0000) >> 16) | \ ((info)->limit & 0xf0000) | \ (((info)->read_exec_only ^ 1) << 9) | \ ((info)->contents << 10) | \ (((info)->seg_not_present ^ 1) << 15) | \ ((info)->seg_32bit << 22) | \ ((info)->limit_in_pages << 23) | \ ((info)->useable << 20) | \ /* ((info)->lm << 21) | */ \ 0x7000) #define LDT_empty(info) (\ (info)->base_addr == 0 && \ (info)->limit == 0 && \ (info)->contents == 0 && \ (info)->read_exec_only == 1 && \ (info)->seg_32bit == 0 && \ (info)->limit_in_pages == 0 && \ (info)->seg_not_present == 1 && \ (info)->useable == 0 && \ (info)->lm == 0) #if TLS_SIZE != 24 # error update this code. #endif static inline void load_TLS(struct thread_struct *t, unsigned int cpu) { u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN); gdt[0] = t->tls_array[0]; gdt[1] = t->tls_array[1]; gdt[2] = t->tls_array[2]; } /* * load one particular LDT into the current CPU */ static inline void load_LDT_nolock (mm_context_t *pc, int cpu) { int count = pc->size; if (likely(!count)) { clear_LDT(); return; } set_ldt_desc(cpu, pc->ldt, count); load_LDT_desc(); } static inline void load_LDT(mm_context_t *pc) { int cpu = get_cpu(); load_LDT_nolock(pc, cpu); put_cpu(); } extern struct desc_ptr idt_descr; #endif /* !__ASSEMBLY__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/div64.h000066400000000000000000000000371314037446600242320ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/dma-mapping.h000066400000000000000000000131351314037446600254730ustar00rootroot00000000000000#ifndef _X8664_DMA_MAPPING_H #define _X8664_DMA_MAPPING_H 1 /* * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for * documentation. */ #include #include #include #include struct dma_mapping_ops { int (*mapping_error)(dma_addr_t dma_addr); void* (*alloc_coherent)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); void (*free_coherent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); dma_addr_t (*map_single)(struct device *hwdev, void *ptr, size_t size, int direction); /* like map_single, but doesn't check the device mask */ dma_addr_t (*map_simple)(struct device *hwdev, char *ptr, size_t size, int direction); void (*unmap_single)(struct device *dev, dma_addr_t addr, size_t size, int direction); void (*sync_single_for_cpu)(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction); void (*sync_single_for_device)(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction); void (*sync_single_range_for_cpu)(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction); void (*sync_single_range_for_device)(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction); void (*sync_sg_for_cpu)(struct device *hwdev, struct scatterlist *sg, int nelems, int direction); void (*sync_sg_for_device)(struct device *hwdev, struct scatterlist *sg, int nelems, int direction); int (*map_sg)(struct device *hwdev, struct scatterlist *sg, int nents, int direction); void (*unmap_sg)(struct device *hwdev, struct scatterlist *sg, int nents, int direction); int (*dma_supported)(struct device *hwdev, u64 mask); int is_phys; }; extern dma_addr_t bad_dma_address; extern struct dma_mapping_ops* dma_ops; extern int iommu_merge; static inline int dma_mapping_error(dma_addr_t dma_addr) { if (dma_ops->mapping_error) return dma_ops->mapping_error(dma_addr); return (dma_addr == bad_dma_address); } extern void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size, int direction) { return dma_ops->map_single(hwdev, ptr, size, direction); } static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size, int direction) { dma_ops->unmap_single(dev, addr, size, direction); } #define dma_map_page(dev,page,offset,size,dir) \ dma_map_single((dev), page_address(page)+(offset), (size), (dir)) #define dma_unmap_page dma_unmap_single static inline void dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { if (dma_ops->sync_single_for_cpu) dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); flush_write_buffers(); } static inline void dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { if (dma_ops->sync_single_for_device) dma_ops->sync_single_for_device(hwdev, dma_handle, size, direction); flush_write_buffers(); } static inline void dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { if (dma_ops->sync_single_range_for_cpu) { dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction); } flush_write_buffers(); } static inline void dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { if (dma_ops->sync_single_range_for_device) dma_ops->sync_single_range_for_device(hwdev, dma_handle, offset, size, direction); flush_write_buffers(); } static inline void dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { if (dma_ops->sync_sg_for_cpu) dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); flush_write_buffers(); } static inline void dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { if (dma_ops->sync_sg_for_device) { dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); } flush_write_buffers(); } static inline int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { return dma_ops->map_sg(hwdev, sg, nents, direction); } static inline void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { dma_ops->unmap_sg(hwdev, sg, nents, direction); } extern int dma_supported(struct device *hwdev, u64 mask); /* same for gart, swiotlb, and nommu */ static inline int dma_get_cache_alignment(void) { return boot_cpu_data.x86_clflush_size; } #define dma_is_consistent(h) 1 extern int dma_set_mask(struct device *dev, u64 mask); static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) { flush_write_buffers(); } extern struct device fallback_dev; extern int panic_on_overflow; #endif /* _X8664_DMA_MAPPING_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/dma.h000066400000000000000000000234171314037446600240460ustar00rootroot00000000000000/* $Id: dma.h,v 1.1.1.1 2001/04/19 20:00:38 ak Exp $ * linux/include/asm/dma.h: Defines for using and allocating dma channels. * Written by Hennus Bergman, 1992. * High DMA channel support & info by Hannu Savolainen * and John Boyd, Nov. 1992. */ #ifndef _ASM_DMA_H #define _ASM_DMA_H #include #include /* And spinlocks */ #include /* need byte IO */ #include #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER #define dma_outb outb_p #else #define dma_outb outb #endif #define dma_inb inb /* * NOTES about DMA transfers: * * controller 1: channels 0-3, byte operations, ports 00-1F * controller 2: channels 4-7, word operations, ports C0-DF * * - ALL registers are 8 bits only, regardless of transfer size * - channel 4 is not used - cascades 1 into 2. * - channels 0-3 are byte - addresses/counts are for physical bytes * - channels 5-7 are word - addresses/counts are for physical words * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries * - transfer count loaded to registers is 1 less than actual count * - controller 2 offsets are all even (2x offsets for controller 1) * - page registers for 5-7 don't use data bit 0, represent 128K pages * - page registers for 0-3 use bit 0, represent 64K pages * * DMA transfers are limited to the lower 16MB of _physical_ memory. * Note that addresses loaded into registers must be _physical_ addresses, * not logical addresses (which may differ if paging is active). * * Address mapping for channels 0-3: * * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) * | ... | | ... | | ... | * | ... | | ... | | ... | * | ... | | ... | | ... | * P7 ... P0 A7 ... A0 A7 ... A0 * | Page | Addr MSB | Addr LSB | (DMA registers) * * Address mapping for channels 5-7: * * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) * | ... | \ \ ... \ \ \ ... \ \ * | ... | \ \ ... \ \ \ ... \ (not used) * | ... | \ \ ... \ \ \ ... \ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 * | Page | Addr MSB | Addr LSB | (DMA registers) * * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at * the hardware level, so odd-byte transfers aren't possible). * * Transfer count (_not # bytes_) is limited to 64K, represented as actual * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, * and up to 128K bytes may be transferred on channels 5-7 in one operation. * */ #define MAX_DMA_CHANNELS 8 /* 16MB ISA DMA zone */ #define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT) /* 4GB broken PCI/AGP hardware bus master zone */ #define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT) /* Compat define for old dma zone */ #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) /* 8237 DMA controllers */ #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ /* DMA controller registers */ #define DMA1_CMD_REG 0x08 /* command register (w) */ #define DMA1_STAT_REG 0x08 /* status register (r) */ #define DMA1_REQ_REG 0x09 /* request register (w) */ #define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ #define DMA1_MODE_REG 0x0B /* mode register (w) */ #define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ #define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ #define DMA1_RESET_REG 0x0D /* Master Clear (w) */ #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ #define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ #define DMA2_CMD_REG 0xD0 /* command register (w) */ #define DMA2_STAT_REG 0xD0 /* status register (r) */ #define DMA2_REQ_REG 0xD2 /* request register (w) */ #define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ #define DMA2_MODE_REG 0xD6 /* mode register (w) */ #define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ #define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ #define DMA2_RESET_REG 0xDA /* Master Clear (w) */ #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ #define DMA_ADDR_0 0x00 /* DMA address registers */ #define DMA_ADDR_1 0x02 #define DMA_ADDR_2 0x04 #define DMA_ADDR_3 0x06 #define DMA_ADDR_4 0xC0 #define DMA_ADDR_5 0xC4 #define DMA_ADDR_6 0xC8 #define DMA_ADDR_7 0xCC #define DMA_CNT_0 0x01 /* DMA count registers */ #define DMA_CNT_1 0x03 #define DMA_CNT_2 0x05 #define DMA_CNT_3 0x07 #define DMA_CNT_4 0xC2 #define DMA_CNT_5 0xC6 #define DMA_CNT_6 0xCA #define DMA_CNT_7 0xCE #define DMA_PAGE_0 0x87 /* DMA page registers */ #define DMA_PAGE_1 0x83 #define DMA_PAGE_2 0x81 #define DMA_PAGE_3 0x82 #define DMA_PAGE_5 0x8B #define DMA_PAGE_6 0x89 #define DMA_PAGE_7 0x8A #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ #define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ #define DMA_AUTOINIT 0x10 extern spinlock_t dma_spin_lock; static __inline__ unsigned long claim_dma_lock(void) { unsigned long flags; spin_lock_irqsave(&dma_spin_lock, flags); return flags; } static __inline__ void release_dma_lock(unsigned long flags) { spin_unlock_irqrestore(&dma_spin_lock, flags); } /* enable/disable a specific DMA channel */ static __inline__ void enable_dma(unsigned int dmanr) { if (dmanr<=3) dma_outb(dmanr, DMA1_MASK_REG); else dma_outb(dmanr & 3, DMA2_MASK_REG); } static __inline__ void disable_dma(unsigned int dmanr) { if (dmanr<=3) dma_outb(dmanr | 4, DMA1_MASK_REG); else dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); } /* Clear the 'DMA Pointer Flip Flop'. * Write 0 for LSB/MSB, 1 for MSB/LSB access. * Use this once to initialize the FF to a known state. * After that, keep track of it. :-) * --- In order to do that, the DMA routines below should --- * --- only be used while holding the DMA lock ! --- */ static __inline__ void clear_dma_ff(unsigned int dmanr) { if (dmanr<=3) dma_outb(0, DMA1_CLEAR_FF_REG); else dma_outb(0, DMA2_CLEAR_FF_REG); } /* set mode (above) for a specific DMA channel */ static __inline__ void set_dma_mode(unsigned int dmanr, char mode) { if (dmanr<=3) dma_outb(mode | dmanr, DMA1_MODE_REG); else dma_outb(mode | (dmanr&3), DMA2_MODE_REG); } /* Set only the page register bits of the transfer address. * This is used for successive transfers when we know the contents of * the lower 16 bits of the DMA current address register, but a 64k boundary * may have been crossed. */ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) { switch(dmanr) { case 0: dma_outb(pagenr, DMA_PAGE_0); break; case 1: dma_outb(pagenr, DMA_PAGE_1); break; case 2: dma_outb(pagenr, DMA_PAGE_2); break; case 3: dma_outb(pagenr, DMA_PAGE_3); break; case 5: dma_outb(pagenr & 0xfe, DMA_PAGE_5); break; case 6: dma_outb(pagenr & 0xfe, DMA_PAGE_6); break; case 7: dma_outb(pagenr & 0xfe, DMA_PAGE_7); break; } } /* Set transfer address & page bits for specific DMA channel. * Assumes dma flipflop is clear. */ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) { set_dma_page(dmanr, a>>16); if (dmanr <= 3) { dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); } else { dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); } } /* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for * a specific DMA channel. * You must ensure the parameters are valid. * NOTE: from a manual: "the number of transfers is one more * than the initial word count"! This is taken into account. * Assumes dma flip-flop is clear. * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. */ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) { count--; if (dmanr <= 3) { dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); } else { dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); } } /* Get DMA residue count. After a DMA transfer, this * should return zero. Reading this while a DMA transfer is * still in progress will return unpredictable results. * If called before the channel has been used, it may return 1. * Otherwise, it returns the number of _bytes_ left to transfer. * * Assumes DMA flip-flop is clear. */ static __inline__ int get_dma_residue(unsigned int dmanr) { unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; /* using short to get 16-bit wrap around */ unsigned short count; count = 1 + dma_inb(io_port); count += dma_inb(io_port) << 8; return (dmanr<=3)? count : (count<<1); } /* These are in kernel/dma.c: */ extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ extern void free_dma(unsigned int dmanr); /* release it again */ /* From PCI */ #ifdef CONFIG_PCI extern int isa_dma_bridge_buggy; #else #define isa_dma_bridge_buggy (0) #endif #endif /* _ASM_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/dmi.h000066400000000000000000000010031314037446600240410ustar00rootroot00000000000000#ifndef _ASM_DMI_H #define _ASM_DMI_H 1 #include #define DMI_MAX_DATA 2048 extern int dmi_alloc_index; extern char dmi_alloc_data[DMI_MAX_DATA]; /* This is so early that there is no good way to allocate dynamic memory. Allocate data in an BSS array. */ static inline void *dmi_alloc(unsigned len) { int idx = dmi_alloc_index; if ((dmi_alloc_index += len) > DMI_MAX_DATA) return NULL; return dmi_alloc_data + idx; } #define dmi_ioremap early_ioremap #define dmi_iounmap early_iounmap #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/dump.h000066400000000000000000000060701314037446600242460ustar00rootroot00000000000000/* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * x86_64 lkcd port Sachin Sant ( sachinp@in.ibm.com) * This code is released under version 2 of the GNU GPL. */ /* This header file holds the architecture specific crash dump header */ #ifndef _ASM_DUMP_H #define _ASM_DUMP_H /* necessary header files */ #include /* for pt_regs */ #include /* definitions */ #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ #define DUMP_ASM_VERSION_NUMBER 0x2 /* version number */ #define platform_timestamp(x) rdtscll(x) /* * Structure: dump_header_asm_t * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. */ struct __dump_header_asm { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* the dump registers */ struct pt_regs dha_regs; /* smp specific */ uint32_t dha_smp_num_cpus; int dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; uint64_t dha_smp_current_task[NR_CPUS]; uint64_t dha_stack[NR_CPUS]; uint64_t dha_stack_ptr[NR_CPUS]; } __attribute__((packed)); #ifdef __KERNEL__ static inline void get_current_regs(struct pt_regs *regs) { unsigned seg; __asm__ __volatile__("movq %%r15,%0" : "=m"(regs->r15)); __asm__ __volatile__("movq %%r14,%0" : "=m"(regs->r14)); __asm__ __volatile__("movq %%r13,%0" : "=m"(regs->r13)); __asm__ __volatile__("movq %%r12,%0" : "=m"(regs->r12)); __asm__ __volatile__("movq %%r11,%0" : "=m"(regs->r11)); __asm__ __volatile__("movq %%r10,%0" : "=m"(regs->r10)); __asm__ __volatile__("movq %%r9,%0" : "=m"(regs->r9)); __asm__ __volatile__("movq %%r8,%0" : "=m"(regs->r8)); __asm__ __volatile__("movq %%rbx,%0" : "=m"(regs->rbx)); __asm__ __volatile__("movq %%rcx,%0" : "=m"(regs->rcx)); __asm__ __volatile__("movq %%rdx,%0" : "=m"(regs->rdx)); __asm__ __volatile__("movq %%rsi,%0" : "=m"(regs->rsi)); __asm__ __volatile__("movq %%rdi,%0" : "=m"(regs->rdi)); __asm__ __volatile__("movq %%rbp,%0" : "=m"(regs->rbp)); __asm__ __volatile__("movq %%rax,%0" : "=m"(regs->rax)); __asm__ __volatile__("movq %%rsp,%0" : "=m"(regs->rsp)); __asm__ __volatile__("movl %%ss, %0" :"=r"(seg)); regs->ss = (unsigned long)seg; __asm__ __volatile__("movl %%cs, %0" :"=r"(seg)); regs->cs = (unsigned long)seg; __asm__ __volatile__("pushfq; popq %0" :"=m"(regs->eflags)); regs->rip = (unsigned long)current_text_addr(); } extern volatile int dump_in_progress; extern struct __dump_header_asm dump_header_asm; #ifdef CONFIG_SMP extern void dump_send_ipi(void); #else #define dump_send_ipi() do { } while(0) #endif #endif /* __KERNEL__ */ #endif /* _ASM_DUMP_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/dwarf2.h000066400000000000000000000025121314037446600244630ustar00rootroot00000000000000#ifndef _DWARF2_H #define _DWARF2_H 1 #include #ifndef __ASSEMBLY__ #warning "asm/dwarf2.h should be only included in pure assembly files" #endif /* Macros for dwarf2 CFI unwind table entries. See "as.info" for details on these pseudo ops. Unfortunately they are only supported in very new binutils, so define them away for older version. */ #ifdef CONFIG_UNWIND_INFO #define CFI_STARTPROC .cfi_startproc #define CFI_ENDPROC .cfi_endproc #define CFI_DEF_CFA .cfi_def_cfa #define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register #define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset #define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset #define CFI_OFFSET .cfi_offset #define CFI_REL_OFFSET .cfi_rel_offset #define CFI_REGISTER .cfi_register #define CFI_RESTORE .cfi_restore #define CFI_REMEMBER_STATE .cfi_remember_state #define CFI_RESTORE_STATE .cfi_restore_state #define CFI_UNDEFINED .cfi_undefined #else /* use assembler line comment character # to ignore the arguments. */ #define CFI_STARTPROC # #define CFI_ENDPROC # #define CFI_DEF_CFA # #define CFI_DEF_CFA_REGISTER # #define CFI_DEF_CFA_OFFSET # #define CFI_ADJUST_CFA_OFFSET # #define CFI_OFFSET # #define CFI_REL_OFFSET # #define CFI_REGISTER # #define CFI_RESTORE # #define CFI_REMEMBER_STATE # #define CFI_RESTORE_STATE # #define CFI_UNDEFINED # #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/e820.h000066400000000000000000000040531314037446600237560ustar00rootroot00000000000000/* * structures and definitions for the int 15, ax=e820 memory map * scheme. * * In a nutshell, setup.S populates a scratch table in the * empty_zero_block that contains a list of usable address/size * duples. setup.c, this information is transferred into the e820map, * and in init.c/numa.c, that new information is used to mark pages * reserved or not. */ #ifndef __E820_HEADER #define __E820_HEADER #include #define E820MAP 0x2d0 /* our map */ #define E820MAX 128 /* number of entries in E820MAP */ #define E820NR 0x1e8 /* # entries in E820MAP */ #define E820_RAM 1 #define E820_RESERVED 2 #define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */ #define E820_NVS 4 #define HIGH_MEMORY (1024*1024) #define LOWMEMSIZE() (0x9f000) #ifndef __ASSEMBLY__ struct e820entry { u64 addr; /* start of memory segment */ u64 size; /* size of memory segment */ u32 type; /* type of memory segment */ } __attribute__((packed)); struct e820map { int nr_map; struct e820entry map[E820MAX]; }; extern unsigned long find_e820_area(unsigned long start, unsigned long end, unsigned size); extern void add_memory_region(unsigned long start, unsigned long size, int type); extern void setup_memory_region(void); extern void contig_e820_setup(void); extern unsigned long e820_end_of_ram(void); extern void e820_reserve_resources(void); extern void e820_mark_nosave_regions(void); extern void e820_print_map(char *who); extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type); extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end); extern void e820_setup_gap(void); extern unsigned long e820_hole_size(unsigned long start_pfn, unsigned long end_pfn); extern void __init parse_memopt(char *p, char **end); extern void __init parse_memmapopt(char *p, char **end); extern struct e820map e820; extern unsigned ebda_addr, ebda_size; #endif/*!__ASSEMBLY__*/ #endif/*__E820_HEADER*/ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/edac.h000066400000000000000000000006461314037446600242000ustar00rootroot00000000000000#ifndef ASM_EDAC_H #define ASM_EDAC_H /* ECC atomic, DMA, SMP and interrupt safe scrub function */ static __inline__ void atomic_scrub(void *va, u32 size) { unsigned int *virt_addr = va; u32 i; for (i = 0; i < size / 4; i++, virt_addr++) /* Very carefully read and write to memory atomically * so we are interrupt, DMA and SMP safe. */ __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/elf.h000066400000000000000000000131361314037446600240500ustar00rootroot00000000000000#ifndef __ASM_X86_64_ELF_H #define __ASM_X86_64_ELF_H /* * ELF register definitions.. */ #include #include #include #include /* x86-64 relocation types */ #define R_X86_64_NONE 0 /* No reloc */ #define R_X86_64_64 1 /* Direct 64 bit */ #define R_X86_64_PC32 2 /* PC relative 32 bit signed */ #define R_X86_64_GOT32 3 /* 32 bit GOT entry */ #define R_X86_64_PLT32 4 /* 32 bit PLT address */ #define R_X86_64_COPY 5 /* Copy symbol at runtime */ #define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ #define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ #define R_X86_64_RELATIVE 8 /* Adjust by program base */ #define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative offset to GOT */ #define R_X86_64_32 10 /* Direct 32 bit zero extended */ #define R_X86_64_32S 11 /* Direct 32 bit sign extended */ #define R_X86_64_16 12 /* Direct 16 bit zero extended */ #define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ #define R_X86_64_8 14 /* Direct 8 bit sign extended */ #define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ #define R_X86_64_NUM 16 typedef unsigned long elf_greg_t; #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_i387_struct elf_fpregset_t; /* * This is used to ensure we don't load something for the wrong architecture. */ #define elf_check_arch(x) \ ((x)->e_machine == EM_X86_64) /* * These are used to set parameters in the core dumps. */ #define ELF_CLASS ELFCLASS64 #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_X86_64 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx contains a pointer to a function which might be registered using `atexit'. This provides a mean for the dynamic linker to call DT_FINI functions for shared libraries that have been loaded before the code runs. A value of 0 tells we have no such handler. We might as well make sure everything else is cleared too (except for %esp), just to make things more deterministic. */ #define ELF_PLAT_INIT(_r, load_addr) do { \ struct task_struct *cur = current; \ (_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \ (_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \ (_r)->rax = 0; \ (_r)->r8 = 0; \ (_r)->r9 = 0; \ (_r)->r10 = 0; \ (_r)->r11 = 0; \ (_r)->r12 = 0; \ (_r)->r13 = 0; \ (_r)->r14 = 0; \ (_r)->r15 = 0; \ cur->thread.fs = 0; cur->thread.gs = 0; \ cur->thread.fsindex = 0; cur->thread.gsindex = 0; \ cur->thread.ds = 0; cur->thread.es = 0; \ clear_thread_flag(TIF_IA32); \ } while (0) #define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical use of this is to invoke "./ld.so someprog" to test out a new version of the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is now struct_user_regs, they are different). Assumes current is the process getting dumped. */ #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ unsigned v; \ (pr_reg)[0] = (regs)->r15; \ (pr_reg)[1] = (regs)->r14; \ (pr_reg)[2] = (regs)->r13; \ (pr_reg)[3] = (regs)->r12; \ (pr_reg)[4] = (regs)->rbp; \ (pr_reg)[5] = (regs)->rbx; \ (pr_reg)[6] = (regs)->r11; \ (pr_reg)[7] = (regs)->r10; \ (pr_reg)[8] = (regs)->r9; \ (pr_reg)[9] = (regs)->r8; \ (pr_reg)[10] = (regs)->rax; \ (pr_reg)[11] = (regs)->rcx; \ (pr_reg)[12] = (regs)->rdx; \ (pr_reg)[13] = (regs)->rsi; \ (pr_reg)[14] = (regs)->rdi; \ (pr_reg)[15] = (regs)->orig_rax; \ (pr_reg)[16] = (regs)->rip; \ (pr_reg)[17] = (regs)->cs; \ (pr_reg)[18] = (regs)->eflags; \ (pr_reg)[19] = (regs)->rsp; \ (pr_reg)[20] = (regs)->ss; \ (pr_reg)[21] = current->thread.fs; \ (pr_reg)[22] = current->thread.gs; \ asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \ asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \ asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \ asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \ } while(0); /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, but it's not easy, and we've already done it here. */ #define ELF_HWCAP (boot_cpu_data.x86_capability[0]) /* This yields a string that ld.so will use to load implementation specific libraries for optimization. This is more specific in intent than poking at uname or /proc/cpuinfo. For the moment, we have only optimizations for the Intel generations, but that could change... */ /* I'm not sure if we can use '-' here */ #define ELF_PLATFORM ("x86_64") #ifdef __KERNEL__ extern void set_personality_64bit(void); #define SET_PERSONALITY(ex, ibcs2) set_personality_64bit() /* * An executable for which elf_read_implies_exec() returns TRUE will * have the READ_IMPLIES_EXEC personality flag set automatically. */ #define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X) struct task_struct; extern int dump_task_regs (struct task_struct *, elf_gregset_t *); extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) /* 1GB for 64bit, 8MB for 32bit */ #define STACK_RND_MASK (is_compat_task() ? 0x7ff : 0x3fffff) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/emergency-restart.h000066400000000000000000000002271314037446600267370ustar00rootroot00000000000000#ifndef _ASM_EMERGENCY_RESTART_H #define _ASM_EMERGENCY_RESTART_H extern void machine_emergency_restart(void); #endif /* _ASM_EMERGENCY_RESTART_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/errno.h000066400000000000000000000001261314037446600244220ustar00rootroot00000000000000#ifndef _X8664_ERRNO_H #define _X8664_ERRNO_H #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/fcntl.h000066400000000000000000000000371314037446600244040ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/fixmap.h000066400000000000000000000055211314037446600245650ustar00rootroot00000000000000/* * fixmap.h: compile-time virtual memory allocation * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Ingo Molnar */ #ifndef _ASM_FIXMAP_H #define _ASM_FIXMAP_H #include #include #include #include #include #include /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at * compile time, but to set the physical address only * in the boot process. * * these 'compile-time allocated' memory buffers are * fixed-size 4k pages. (or larger if used with an increment * highger than 1) use fixmap_set(idx,phys) to associate * physical memory with fixmap indices. * * TLB entries of such buffers will not be flushed across * task switches. */ enum fixed_addresses { VSYSCALL_LAST_PAGE, VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, VSYSCALL_HPET, FIX_HPET_BASE, #ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ #endif #ifdef CONFIG_X86_IO_APIC FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, #endif __end_of_fixed_addresses }; extern void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags); #define set_fixmap(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL) /* * Some hardware wants to get fixmapped without caching. */ #define set_fixmap_nocache(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) /* Only covers 32bit vsyscalls currently. Need another set for 64bit. */ #define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) #define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) extern void __this_fixmap_does_not_exist(void); /* * 'index to address' translation. If anyone tries to use the idx * directly without translation, we catch the bug with a NULL-deference * kernel oops. Illegal ranges of incoming indices are caught too. */ static __always_inline unsigned long fix_to_virt(const unsigned int idx) { /* * this branch gets completely eliminated after inlining, * except when someone tries to use fixaddr indices in an * illegal way. (such as mixing up address types or using * out-of-range indices). * * If it doesn't get removed, the linker will complain * loudly with a reasonably clear error message.. */ if (idx >= __end_of_fixed_addresses) __this_fixmap_does_not_exist(); return __fix_to_virt(idx); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/floppy.h000066400000000000000000000150621314037446600246130ustar00rootroot00000000000000/* * Architecture specific parts of the Floppy driver * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995 */ #ifndef __ASM_X86_64_FLOPPY_H #define __ASM_X86_64_FLOPPY_H #include /* * The DMA channel used by the floppy controller cannot access data at * addresses >= 16MB * * Went back to the 1MB limit, as some people had problems with the floppy * driver otherwise. It doesn't matter much for performance anyway, as most * floppy accesses go through the track buffer. */ #define _CROSS_64KB(a,s,vdma) \ (!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) #define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) #define SW fd_routine[use_virtual_dma&1] #define CSW fd_routine[can_use_virtual_dma & 1] #define fd_inb(port) inb_p(port) #define fd_outb(value,port) outb_p(value,port) #define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy") #define fd_free_dma() CSW._free_dma(FLOPPY_DMA) #define fd_enable_irq() enable_irq(FLOPPY_IRQ) #define fd_disable_irq() disable_irq(FLOPPY_IRQ) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) #define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA) #define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size) #define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io) #define FLOPPY_CAN_FALLBACK_ON_NODMA static int virtual_dma_count; static int virtual_dma_residue; static char *virtual_dma_addr; static int virtual_dma_mode; static int doing_pdma; static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs) { register unsigned char st; #undef TRACE_FLPY_INT #ifdef TRACE_FLPY_INT static int calls=0; static int bytes=0; static int dma_wait=0; #endif if (!doing_pdma) return floppy_interrupt(irq, dev_id, regs); #ifdef TRACE_FLPY_INT if(!calls) bytes = virtual_dma_count; #endif { register int lcount; register char *lptr; st = 1; for(lcount=virtual_dma_count, lptr=virtual_dma_addr; lcount; lcount--, lptr++) { st=inb(virtual_dma_port+4) & 0xa0 ; if(st != 0xa0) break; if(virtual_dma_mode) outb_p(*lptr, virtual_dma_port+5); else *lptr = inb_p(virtual_dma_port+5); } virtual_dma_count = lcount; virtual_dma_addr = lptr; st = inb(virtual_dma_port+4); } #ifdef TRACE_FLPY_INT calls++; #endif if(st == 0x20) return IRQ_HANDLED; if(!(st & 0x20)) { virtual_dma_residue += virtual_dma_count; virtual_dma_count=0; #ifdef TRACE_FLPY_INT printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", virtual_dma_count, virtual_dma_residue, calls, bytes, dma_wait); calls = 0; dma_wait=0; #endif doing_pdma = 0; floppy_interrupt(irq, dev_id, regs); return IRQ_HANDLED; } #ifdef TRACE_FLPY_INT if(!virtual_dma_count) dma_wait++; #endif return IRQ_HANDLED; } static void fd_disable_dma(void) { if(! (can_use_virtual_dma & 1)) disable_dma(FLOPPY_DMA); doing_pdma = 0; virtual_dma_residue += virtual_dma_count; virtual_dma_count=0; } static int vdma_request_dma(unsigned int dmanr, const char * device_id) { return 0; } static void vdma_nop(unsigned int dummy) { } static int vdma_get_dma_residue(unsigned int dummy) { return virtual_dma_count + virtual_dma_residue; } static int fd_request_irq(void) { if(can_use_virtual_dma) return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, "floppy", NULL); else return request_irq(FLOPPY_IRQ, floppy_interrupt, SA_INTERRUPT|SA_SAMPLE_RANDOM, "floppy", NULL); } static unsigned long dma_mem_alloc(unsigned long size) { return __get_dma_pages(GFP_KERNEL,get_order(size)); } static unsigned long vdma_mem_alloc(unsigned long size) { return (unsigned long) vmalloc(size); } #define nodma_mem_alloc(size) vdma_mem_alloc(size) static void _fd_dma_mem_free(unsigned long addr, unsigned long size) { if((unsigned long) addr >= (unsigned long) high_memory) vfree((void *)addr); else free_pages(addr, get_order(size)); } #define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size) static void _fd_chose_dma_mode(char *addr, unsigned long size) { if(can_use_virtual_dma == 2) { if((unsigned long) addr >= (unsigned long) high_memory || isa_virt_to_bus(addr) >= 0x1000000 || _CROSS_64KB(addr, size, 0)) use_virtual_dma = 1; else use_virtual_dma = 0; } else { use_virtual_dma = can_use_virtual_dma & 1; } } #define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size) static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) { doing_pdma = 1; virtual_dma_port = io; virtual_dma_mode = (mode == DMA_MODE_WRITE); virtual_dma_addr = addr; virtual_dma_count = size; virtual_dma_residue = 0; return 0; } static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) { #ifdef FLOPPY_SANITY_CHECK if (CROSS_64KB(addr, size)) { printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size); return -1; } #endif /* actual, physical DMA */ doing_pdma = 0; clear_dma_ff(FLOPPY_DMA); set_dma_mode(FLOPPY_DMA,mode); set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr)); set_dma_count(FLOPPY_DMA,size); enable_dma(FLOPPY_DMA); return 0; } static struct fd_routine_l { int (*_request_dma)(unsigned int dmanr, const char * device_id); void (*_free_dma)(unsigned int dmanr); int (*_get_dma_residue)(unsigned int dummy); unsigned long (*_dma_mem_alloc) (unsigned long size); int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); } fd_routine[] = { { request_dma, free_dma, get_dma_residue, dma_mem_alloc, hard_dma_setup }, { vdma_request_dma, vdma_nop, vdma_get_dma_residue, vdma_mem_alloc, vdma_dma_setup } }; static int FDC1 = 0x3f0; static int FDC2 = -1; /* * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock * is needed to prevent corrupted CMOS RAM in case "insmod floppy" * coincides with another rtc CMOS user. Paul G. */ #define FLOPPY0_TYPE ({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ val = (CMOS_READ(0x10) >> 4) & 15; \ spin_unlock_irqrestore(&rtc_lock, flags); \ val; \ }) #define FLOPPY1_TYPE ({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ val = CMOS_READ(0x10) & 15; \ spin_unlock_irqrestore(&rtc_lock, flags); \ val; \ }) #define N_FDC 2 #define N_DRIVE 8 #define FLOPPY_MOTOR_MASK 0xf0 #define AUTO_DMA #define EXTRA_FLOPPY_PARAMS #endif /* __ASM_X86_64_FLOPPY_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/fpu32.h000066400000000000000000000004271314037446600242400ustar00rootroot00000000000000#ifndef _FPU32_H #define _FPU32_H 1 struct _fpstate_ia32; int restore_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, int fsave); int save_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, struct pt_regs *regs, int fsave); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/futex.h000066400000000000000000000045611314037446600244370ustar00rootroot00000000000000#ifndef _ASM_FUTEX_H #define _ASM_FUTEX_H #ifdef __KERNEL__ #include #include #include #include #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile ( \ "1: " insn "\n" \ "2: .section .fixup,\"ax\"\n\ 3: mov %3, %1\n\ jmp 2b\n\ .previous\n\ .section __ex_table,\"a\"\n\ .align 8\n\ .quad 1b,3b\n\ .previous" \ : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \ : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0)) #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile ( \ "1: movl %2, %0\n\ movl %0, %3\n" \ insn "\n" \ "2: " LOCK_PREFIX "cmpxchgl %3, %2\n\ jnz 1b\n\ 3: .section .fixup,\"ax\"\n\ 4: mov %5, %1\n\ jmp 3b\n\ .previous\n\ .section __ex_table,\"a\"\n\ .align 8\n\ .quad 1b,4b,2b,4b\n\ .previous" \ : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \ "=&r" (tem) \ : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0)) static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; int oparg = (encoded_op << 8) >> 20; int cmparg = (encoded_op << 20) >> 20; int oldval = 0, ret, tem; if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; inc_preempt_count(); switch (op) { case FUTEX_OP_SET: __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ADD: __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, uaddr, oparg); break; case FUTEX_OP_OR: __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ANDN: __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg); break; case FUTEX_OP_XOR: __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg); break; default: ret = -ENOSYS; } dec_preempt_count(); if (!ret) { switch (cmp) { case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; default: ret = -ENOSYS; } } return ret; } #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/gart-mapping.h000066400000000000000000000005221314037446600256630ustar00rootroot00000000000000#ifndef _X8664_GART_MAPPING_H #define _X8664_GART_MAPPING_H 1 #include #include struct device; extern void* gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); extern int gart_dma_supported(struct device *hwdev, u64 mask); #endif /* _X8664_GART_MAPPING_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/genapic.h000066400000000000000000000015161314037446600247070ustar00rootroot00000000000000#ifndef _ASM_GENAPIC_H #define _ASM_GENAPIC_H 1 /* * Copyright 2004 James Cleverdon, IBM. * Subject to the GNU Public License, v.2 * * Generic APIC sub-arch data struct. * * Hacked for x86-64 by James Cleverdon from i386 architecture code by * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ struct genapic { char *name; u32 int_delivery_mode; u32 int_dest_mode; u32 int_delivery_dest; /* for quick IPIs */ int (*apic_id_registered)(void); cpumask_t (*target_cpus)(void); void (*init_apic_ldr)(void); /* ipi */ void (*send_IPI_mask)(cpumask_t mask, int vector); void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); /* */ unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); unsigned int (*phys_pkg_id)(int index_msb); }; extern struct genapic *genapic; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/hardirq.h000066400000000000000000000007601314037446600247330ustar00rootroot00000000000000#ifndef __ASM_HARDIRQ_H #define __ASM_HARDIRQ_H #include #include #include #include #include #define __ARCH_IRQ_STAT 1 #define local_softirq_pending() read_pda(__softirq_pending) #define __ARCH_SET_SOFTIRQ_PENDING 1 #define set_softirq_pending(x) write_pda(__softirq_pending, (x)) #define or_softirq_pending(x) or_pda(__softirq_pending, (x)) extern void ack_bad_irq(unsigned int irq); #endif /* __ASM_HARDIRQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/hpet.h000066400000000000000000000040241314037446600242360ustar00rootroot00000000000000#ifndef _ASM_X8664_HPET_H #define _ASM_X8664_HPET_H 1 /* * Documentation on HPET can be found at: * http://www.intel.com/ial/home/sp/pcmmspec.htm * ftp://download.intel.com/ial/home/sp/mmts098.pdf */ #define HPET_MMAP_SIZE 1024 #define HPET_ID 0x000 #define HPET_PERIOD 0x004 #define HPET_CFG 0x010 #define HPET_STATUS 0x020 #define HPET_COUNTER 0x0f0 #define HPET_Tn_OFFSET 0x20 #define HPET_Tn_CFG(n) (0x100 + (n) * HPET_Tn_OFFSET) #define HPET_Tn_ROUTE(n) (0x104 + (n) * HPET_Tn_OFFSET) #define HPET_Tn_CMP(n) (0x108 + (n) * HPET_Tn_OFFSET) #define HPET_T0_CFG HPET_Tn_CFG(0) #define HPET_T0_CMP HPET_Tn_CMP(0) #define HPET_T1_CFG HPET_Tn_CFG(1) #define HPET_T1_CMP HPET_Tn_CMP(1) #define HPET_ID_VENDOR 0xffff0000 #define HPET_ID_LEGSUP 0x00008000 #define HPET_ID_64BIT 0x00002000 #define HPET_ID_NUMBER 0x00001f00 #define HPET_ID_REV 0x000000ff #define HPET_ID_NUMBER_SHIFT 8 #define HPET_ID_VENDOR_SHIFT 16 #define HPET_ID_VENDOR_8086 0x8086 #define HPET_CFG_ENABLE 0x001 #define HPET_CFG_LEGACY 0x002 #define HPET_LEGACY_8254 2 #define HPET_LEGACY_RTC 8 #define HPET_TN_LEVEL 0x0002 #define HPET_TN_ENABLE 0x0004 #define HPET_TN_PERIODIC 0x0008 #define HPET_TN_PERIODIC_CAP 0x0010 #define HPET_TN_64BIT_CAP 0x0020 #define HPET_TN_SETVAL 0x0040 #define HPET_TN_32BIT 0x0100 #define HPET_TN_ROUTE 0x3e00 #define HPET_TN_FSB 0x4000 #define HPET_TN_FSB_CAP 0x8000 #define HPET_TN_ROUTE_SHIFT 9 #define HPET_TICK_RATE (HZ * 100000UL) extern int is_hpet_enabled(void); extern int is_hpet_legacy_int_enabled(void); extern int hpet_rtc_timer_init(void); extern int oem_force_hpet_timer(void); extern int hpet_use_timer; #ifdef CONFIG_HPET_EMULATE_RTC extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec); extern int hpet_set_periodic_freq(unsigned long freq); extern int hpet_rtc_dropped_irq(void); extern int hpet_rtc_timer_init(void); #endif /* CONFIG_HPET_EMULATE_RTC */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/hw_irq.h000066400000000000000000000075011314037446600245720ustar00rootroot00000000000000#ifndef _ASM_HW_IRQ_H #define _ASM_HW_IRQ_H /* * linux/include/asm/hw_irq.h * * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar * * moved some of the old arch/i386/kernel/irq.h to here. VY * * IRQ/IPI changes taken from work by Thomas Radke * * * hacked by Andi Kleen for x86-64. * * $Id: hw_irq.h,v 1.24 2001/09/14 20:55:03 vojtech Exp $ */ #ifndef __ASSEMBLY__ #include #include #include #include #include struct hw_interrupt_type; #endif #define NMI_VECTOR 0x02 /* * IDT vectors usable for external interrupt sources start * at 0x20: */ #define FIRST_EXTERNAL_VECTOR 0x20 #define IA32_SYSCALL_VECTOR 0x80 #define KDBENTER_VECTOR 0x81 /* * Vectors 0x20-0x2f are used for ISA interrupts. */ /* FIXME: LKCD We are out of free vectors 0xf0-0xf9, was using 0xf9 * for now and because I don't know any better I'm going to * hijack a ISA vector. */ #define DUMP_VECTOR 0x20 /* * Special IRQ vectors used by the SMP architecture, 0xf0-0xff * * some of the following vectors are 'rare', they are merged * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. * TLB, reschedule and local APIC vectors are performance-critical. */ #define SPURIOUS_APIC_VECTOR 0xff #define ERROR_APIC_VECTOR 0xfe #define RESCHEDULE_VECTOR 0xfd #define CALL_FUNCTION_VECTOR 0xfc #define KDB_VECTOR 0xfb #define THERMAL_APIC_VECTOR 0xfa #define THRESHOLD_APIC_VECTOR 0xf9 /* f8 free */ #define INVALIDATE_TLB_VECTOR_END 0xf7 #define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */ #define NUM_INVALIDATE_TLB_VECTORS 8 /* * Local APIC timer IRQ vector is on a different priority level, * to work around the 'lost local interrupt if more than 2 IRQ * sources per level' errata. */ #define LOCAL_TIMER_VECTOR 0xef /* * First APIC vector available to drivers: (vectors 0x30-0xee) * we start at 0x31 to spread out vectors evenly between priority * levels. (0x80 is the syscall vector) */ #define FIRST_DEVICE_VECTOR 0x31 #define FIRST_SYSTEM_VECTOR 0xef /* duplicated in irq.h */ #ifndef __ASSEMBLY__ extern u8 irq_vector[NR_IRQ_VECTORS]; #define IO_APIC_VECTOR(irq) (irq_vector[irq]) #define AUTO_ASSIGN -1 /* * Various low-level irq details needed by irq.c, process.c, * time.c, io_apic.c and smp.c * * Interrupt entry/exit code at both C and assembly level */ extern void disable_8259A_irq(unsigned int irq); extern void enable_8259A_irq(unsigned int irq); extern int i8259A_irq_pending(unsigned int irq); extern void make_8259A_irq(unsigned int irq); extern void init_8259A(int aeoi); extern void FASTCALL(send_IPI_self(int vector)); extern void init_VISWS_APIC_irqs(void); extern void setup_IO_APIC(void); extern void disable_IO_APIC(void); extern void print_IO_APIC(void); extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); extern void send_IPI(int dest, int vector); extern void setup_ioapic_dest(void); extern unsigned long io_apic_irqs; extern atomic_t irq_err_count; extern atomic_t irq_mis_count; #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) #define __STR(x) #x #define STR(x) __STR(x) #include #define IRQ_NAME2(nr) nr##_interrupt(void) #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr) /* * SMP has a few special interrupts for IPI messages */ #define BUILD_IRQ(nr) \ asmlinkage void IRQ_NAME(nr); \ __asm__( \ "\n.p2align\n" \ "IRQ" #nr "_interrupt:\n\t" \ "push $" #nr "-256 ; " \ "jmp common_interrupt"); #if defined(CONFIG_X86_IO_APIC) static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { if (IO_APIC_IRQ(i)) send_IPI_self(IO_APIC_VECTOR(i)); } #else static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {} #endif #define platform_legacy_irq(irq) ((irq) < 16) #endif #endif /* _ASM_HW_IRQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/i387.h000066400000000000000000000135061314037446600237750ustar00rootroot00000000000000/* * include/asm-x86_64/i387.h * * Copyright (C) 1994 Linus Torvalds * * Pentium III FXSR, SSE support * General FPU state handling cleanups * Gareth Hughes , May 2000 * x86-64 work by Andi Kleen 2002 */ #ifndef __ASM_X86_64_I387_H #define __ASM_X86_64_I387_H #include #include #include #include #include #include extern void fpu_init(void); extern unsigned int mxcsr_feature_mask; extern void mxcsr_feature_mask_init(void); extern void init_fpu(struct task_struct *child); extern int save_i387(struct _fpstate __user *buf); /* * FPU lazy state save handling... */ #define unlazy_fpu(tsk) do { \ if (task_thread_info(tsk)->status & TS_USEDFPU) \ save_init_fpu(tsk); \ } while (0) /* Ignore delayed exceptions from user space */ static inline void tolerant_fwait(void) { asm volatile("1: fwait\n" "2:\n" " .section __ex_table,\"a\"\n" " .align 8\n" " .quad 1b,2b\n" " .previous\n"); } #define clear_fpu(tsk) do { \ if (task_thread_info(tsk)->status & TS_USEDFPU) { \ tolerant_fwait(); \ task_thread_info(tsk)->status &= ~TS_USEDFPU; \ stts(); \ } \ } while (0) /* * ptrace request handers... */ extern int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *tsk); extern int set_fpregs(struct task_struct *tsk, struct user_i387_struct __user *buf); /* * i387 state interaction */ #define get_fpu_mxcsr(t) ((t)->thread.i387.fxsave.mxcsr) #define get_fpu_cwd(t) ((t)->thread.i387.fxsave.cwd) #define get_fpu_fxsr_twd(t) ((t)->thread.i387.fxsave.twd) #define get_fpu_swd(t) ((t)->thread.i387.fxsave.swd) #define set_fpu_cwd(t,val) ((t)->thread.i387.fxsave.cwd = (val)) #define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val)) #define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val)) #define X87_FSW_ES (1 << 7) /* Exception Summary */ /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception is pending. Clear the x87 state here by setting it to fixed values. The kernel data segment can be sometimes 0 and sometimes new user value. Both should be ok. Use the PDA as safe address because it should be already in L1. */ static inline void clear_fpu_state(struct i387_fxsave_struct *fx) { if (unlikely(fx->swd & X87_FSW_ES)) asm volatile("fnclex"); alternative_input(ASM_NOP8 ASM_NOP2, " emms\n" /* clear stack tags */ " fildl %%gs:0", /* load to clear state */ X86_FEATURE_FXSAVE_LEAK); } static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) { int err; asm volatile("1: rex64/fxrstor (%[fx])\n\t" "2:\n" ".section .fixup,\"ax\"\n" "3: movl $-1,%[err]\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 8\n" " .quad 1b,3b\n" ".previous" : [err] "=r" (err) #if 0 /* See comment in __fxsave_clear() below. */ : [fx] "r" (fx), "m" (*fx), "0" (0)); #else : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); #endif if (unlikely(err)) init_fpu(current); return err; } static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) { int err; asm volatile("1: rex64/fxsave (%[fx])\n\t" "2:\n" ".section .fixup,\"ax\"\n" "3: movl $-1,%[err]\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 8\n" " .quad 1b,3b\n" ".previous" : [err] "=r" (err), "=m" (*fx) #if 0 /* See comment in __fxsave_clear() below. */ : [fx] "r" (fx), "0" (0)); #else : [fx] "cdaSDb" (fx), "0" (0)); #endif if (unlikely(err)) __clear_user(fx, sizeof(struct i387_fxsave_struct)); /* No need to clear here because the caller clears USED_MATH */ return err; } static inline void __fxsave_clear(struct task_struct *tsk) { /* Using "rex64; fxsave %0" is broken because, if the memory operand uses any extended registers for addressing, a second REX prefix will be generated (to the assembler, rex64 followed by semicolon is a separate instruction), and hence the 64-bitness is lost. */ #if 0 /* Using "fxsaveq %0" would be the ideal choice, but is only supported starting with gas 2.16. */ __asm__ __volatile__("fxsaveq %0" : "=m" (tsk->thread.i387.fxsave)); #elif 0 /* Using, as a workaround, the properly prefixed form below isn't accepted by any binutils version so far released, complaining that the same type of prefix is used twice if an extended register is needed for addressing (fix submitted to mainline 2005-11-21). */ __asm__ __volatile__("rex64/fxsave %0" : "=m" (tsk->thread.i387.fxsave)); #else /* This, however, we can work around by forcing the compiler to select an addressing mode that doesn't require extended registers. */ __asm__ __volatile__("rex64/fxsave %P2(%1)" : "=m" (tsk->thread.i387.fxsave) : "cdaSDb" (tsk), "i" (offsetof(__typeof__(*tsk), thread.i387.fxsave))); #endif clear_fpu_state(&tsk->thread.i387.fxsave); } static inline void kernel_fpu_begin(void) { struct thread_info *me = current_thread_info(); preempt_disable(); if (me->status & TS_USEDFPU) { __fxsave_clear(me->task); me->status &= ~TS_USEDFPU; return; } clts(); } static inline void kernel_fpu_end(void) { stts(); preempt_enable(); } static inline void __save_init_fpu(struct task_struct *tsk) { __fxsave_clear(tsk); task_thread_info(tsk)->status &= ~TS_USEDFPU; } static inline void save_init_fpu(struct task_struct *tsk) { __save_init_fpu(tsk); stts(); } /* * This restores directly out of user space. Exceptions are handled. */ static inline int restore_i387(struct _fpstate __user *buf) { return restore_fpu_checking((__force struct i387_fxsave_struct *)buf); } #endif /* __ASM_X86_64_I387_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/ia32.h000066400000000000000000000076751314037446600240530ustar00rootroot00000000000000#ifndef _ASM_X86_64_IA32_H #define _ASM_X86_64_IA32_H #include #ifdef CONFIG_IA32_EMULATION #include /* * 32 bit structures for IA32 support. */ #include /* signal.h */ struct sigaction32 { unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */ unsigned int sa_flags; unsigned int sa_restorer; /* Another 32 bit pointer */ compat_sigset_t sa_mask; /* A 32 bit mask */ }; struct old_sigaction32 { unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */ compat_old_sigset_t sa_mask; /* A 32 bit mask */ unsigned int sa_flags; unsigned int sa_restorer; /* Another 32 bit pointer */ }; typedef struct sigaltstack_ia32 { unsigned int ss_sp; int ss_flags; unsigned int ss_size; } stack_ia32_t; struct ucontext_ia32 { unsigned int uc_flags; unsigned int uc_link; stack_ia32_t uc_stack; struct sigcontext_ia32 uc_mcontext; compat_sigset_t uc_sigmask; /* mask last for extensibility */ }; /* This matches struct stat64 in glibc2.2, hence the absolutely * insane amounts of padding around dev_t's. */ struct stat64 { unsigned long long st_dev; unsigned char __pad0[4]; #define STAT64_HAS_BROKEN_ST_INO 1 unsigned int __st_ino; unsigned int st_mode; unsigned int st_nlink; unsigned int st_uid; unsigned int st_gid; unsigned long long st_rdev; unsigned char __pad3[4]; long long st_size; unsigned int st_blksize; long long st_blocks;/* Number 512-byte blocks allocated. */ unsigned st_atime; unsigned st_atime_nsec; unsigned st_mtime; unsigned st_mtime_nsec; unsigned st_ctime; unsigned st_ctime_nsec; unsigned long long st_ino; } __attribute__((packed)); typedef struct compat_siginfo{ int si_signo; int si_errno; int si_code; union { int _pad[((128/sizeof(int)) - 3)]; /* kill() */ struct { unsigned int _pid; /* sender's pid */ unsigned int _uid; /* sender's uid */ } _kill; /* POSIX.1b timers */ struct { compat_timer_t _tid; /* timer id */ int _overrun; /* overrun count */ compat_sigval_t _sigval; /* same as below */ int _sys_private; /* not to be passed to user */ int _overrun_incr; /* amount to add to overrun */ } _timer; /* POSIX.1b signals */ struct { unsigned int _pid; /* sender's pid */ unsigned int _uid; /* sender's uid */ compat_sigval_t _sigval; } _rt; /* SIGCHLD */ struct { unsigned int _pid; /* which child */ unsigned int _uid; /* sender's uid */ int _status; /* exit code */ compat_clock_t _utime; compat_clock_t _stime; } _sigchld; /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ struct { unsigned int _addr; /* faulting insn/memory ref. */ } _sigfault; /* SIGPOLL */ struct { int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ int _fd; } _sigpoll; } _sifields; } compat_siginfo_t; struct sigframe32 { u32 pretcode; int sig; struct sigcontext_ia32 sc; struct _fpstate_ia32 fpstate; unsigned int extramask[_COMPAT_NSIG_WORDS-1]; }; struct rt_sigframe32 { u32 pretcode; int sig; u32 pinfo; u32 puc; compat_siginfo_t info; struct ucontext_ia32 uc; struct _fpstate_ia32 fpstate; }; struct ustat32 { __u32 f_tfree; compat_ino_t f_tinode; char f_fname[6]; char f_fpack[6]; }; #define IA32_STACK_TOP IA32_PAGE_OFFSET #ifdef __KERNEL__ struct user_desc; struct siginfo_t; int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info); int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info); int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs); struct linux_binprm; extern int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int exec_stack); struct mm_struct; extern void ia32_pick_mmap_layout(struct mm_struct *mm); #endif #endif /* !CONFIG_IA32_SUPPORT */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/ia32_unistd.h000066400000000000000000000247331314037446600254330ustar00rootroot00000000000000#ifndef _ASM_X86_64_IA32_UNISTD_H_ #define _ASM_X86_64_IA32_UNISTD_H_ /* * This file contains the system call numbers of the ia32 port, * this is for the kernel only. */ #define __NR_ia32_restart_syscall 0 #define __NR_ia32_exit 1 #define __NR_ia32_fork 2 #define __NR_ia32_read 3 #define __NR_ia32_write 4 #define __NR_ia32_open 5 #define __NR_ia32_close 6 #define __NR_ia32_waitpid 7 #define __NR_ia32_creat 8 #define __NR_ia32_link 9 #define __NR_ia32_unlink 10 #define __NR_ia32_execve 11 #define __NR_ia32_chdir 12 #define __NR_ia32_time 13 #define __NR_ia32_mknod 14 #define __NR_ia32_chmod 15 #define __NR_ia32_lchown 16 #define __NR_ia32_break 17 #define __NR_ia32_oldstat 18 #define __NR_ia32_lseek 19 #define __NR_ia32_getpid 20 #define __NR_ia32_mount 21 #define __NR_ia32_umount 22 #define __NR_ia32_setuid 23 #define __NR_ia32_getuid 24 #define __NR_ia32_stime 25 #define __NR_ia32_ptrace 26 #define __NR_ia32_alarm 27 #define __NR_ia32_oldfstat 28 #define __NR_ia32_pause 29 #define __NR_ia32_utime 30 #define __NR_ia32_stty 31 #define __NR_ia32_gtty 32 #define __NR_ia32_access 33 #define __NR_ia32_nice 34 #define __NR_ia32_ftime 35 #define __NR_ia32_sync 36 #define __NR_ia32_kill 37 #define __NR_ia32_rename 38 #define __NR_ia32_mkdir 39 #define __NR_ia32_rmdir 40 #define __NR_ia32_dup 41 #define __NR_ia32_pipe 42 #define __NR_ia32_times 43 #define __NR_ia32_prof 44 #define __NR_ia32_brk 45 #define __NR_ia32_setgid 46 #define __NR_ia32_getgid 47 #define __NR_ia32_signal 48 #define __NR_ia32_geteuid 49 #define __NR_ia32_getegid 50 #define __NR_ia32_acct 51 #define __NR_ia32_umount2 52 #define __NR_ia32_lock 53 #define __NR_ia32_ioctl 54 #define __NR_ia32_fcntl 55 #define __NR_ia32_mpx 56 #define __NR_ia32_setpgid 57 #define __NR_ia32_ulimit 58 #define __NR_ia32_oldolduname 59 #define __NR_ia32_umask 60 #define __NR_ia32_chroot 61 #define __NR_ia32_ustat 62 #define __NR_ia32_dup2 63 #define __NR_ia32_getppid 64 #define __NR_ia32_getpgrp 65 #define __NR_ia32_setsid 66 #define __NR_ia32_sigaction 67 #define __NR_ia32_sgetmask 68 #define __NR_ia32_ssetmask 69 #define __NR_ia32_setreuid 70 #define __NR_ia32_setregid 71 #define __NR_ia32_sigsuspend 72 #define __NR_ia32_sigpending 73 #define __NR_ia32_sethostname 74 #define __NR_ia32_setrlimit 75 #define __NR_ia32_getrlimit 76 /* Back compatible 2Gig limited rlimit */ #define __NR_ia32_getrusage 77 #define __NR_ia32_gettimeofday 78 #define __NR_ia32_settimeofday 79 #define __NR_ia32_getgroups 80 #define __NR_ia32_setgroups 81 #define __NR_ia32_select 82 #define __NR_ia32_symlink 83 #define __NR_ia32_oldlstat 84 #define __NR_ia32_readlink 85 #define __NR_ia32_uselib 86 #define __NR_ia32_swapon 87 #define __NR_ia32_reboot 88 #define __NR_ia32_readdir 89 #define __NR_ia32_mmap 90 #define __NR_ia32_munmap 91 #define __NR_ia32_truncate 92 #define __NR_ia32_ftruncate 93 #define __NR_ia32_fchmod 94 #define __NR_ia32_fchown 95 #define __NR_ia32_getpriority 96 #define __NR_ia32_setpriority 97 #define __NR_ia32_profil 98 #define __NR_ia32_statfs 99 #define __NR_ia32_fstatfs 100 #define __NR_ia32_ioperm 101 #define __NR_ia32_socketcall 102 #define __NR_ia32_syslog 103 #define __NR_ia32_setitimer 104 #define __NR_ia32_getitimer 105 #define __NR_ia32_stat 106 #define __NR_ia32_lstat 107 #define __NR_ia32_fstat 108 #define __NR_ia32_olduname 109 #define __NR_ia32_iopl 110 #define __NR_ia32_vhangup 111 #define __NR_ia32_idle 112 #define __NR_ia32_vm86old 113 #define __NR_ia32_wait4 114 #define __NR_ia32_swapoff 115 #define __NR_ia32_sysinfo 116 #define __NR_ia32_ipc 117 #define __NR_ia32_fsync 118 #define __NR_ia32_sigreturn 119 #define __NR_ia32_clone 120 #define __NR_ia32_setdomainname 121 #define __NR_ia32_uname 122 #define __NR_ia32_modify_ldt 123 #define __NR_ia32_adjtimex 124 #define __NR_ia32_mprotect 125 #define __NR_ia32_sigprocmask 126 #define __NR_ia32_create_module 127 #define __NR_ia32_init_module 128 #define __NR_ia32_delete_module 129 #define __NR_ia32_get_kernel_syms 130 #define __NR_ia32_quotactl 131 #define __NR_ia32_getpgid 132 #define __NR_ia32_fchdir 133 #define __NR_ia32_bdflush 134 #define __NR_ia32_sysfs 135 #define __NR_ia32_personality 136 #define __NR_ia32_afs_syscall 137 /* Syscall for Andrew File System */ #define __NR_ia32_setfsuid 138 #define __NR_ia32_setfsgid 139 #define __NR_ia32__llseek 140 #define __NR_ia32_getdents 141 #define __NR_ia32__newselect 142 #define __NR_ia32_flock 143 #define __NR_ia32_msync 144 #define __NR_ia32_readv 145 #define __NR_ia32_writev 146 #define __NR_ia32_getsid 147 #define __NR_ia32_fdatasync 148 #define __NR_ia32__sysctl 149 #define __NR_ia32_mlock 150 #define __NR_ia32_munlock 151 #define __NR_ia32_mlockall 152 #define __NR_ia32_munlockall 153 #define __NR_ia32_sched_setparam 154 #define __NR_ia32_sched_getparam 155 #define __NR_ia32_sched_setscheduler 156 #define __NR_ia32_sched_getscheduler 157 #define __NR_ia32_sched_yield 158 #define __NR_ia32_sched_get_priority_max 159 #define __NR_ia32_sched_get_priority_min 160 #define __NR_ia32_sched_rr_get_interval 161 #define __NR_ia32_nanosleep 162 #define __NR_ia32_mremap 163 #define __NR_ia32_setresuid 164 #define __NR_ia32_getresuid 165 #define __NR_ia32_vm86 166 #define __NR_ia32_query_module 167 #define __NR_ia32_poll 168 #define __NR_ia32_nfsservctl 169 #define __NR_ia32_setresgid 170 #define __NR_ia32_getresgid 171 #define __NR_ia32_prctl 172 #define __NR_ia32_rt_sigreturn 173 #define __NR_ia32_rt_sigaction 174 #define __NR_ia32_rt_sigprocmask 175 #define __NR_ia32_rt_sigpending 176 #define __NR_ia32_rt_sigtimedwait 177 #define __NR_ia32_rt_sigqueueinfo 178 #define __NR_ia32_rt_sigsuspend 179 #define __NR_ia32_pread 180 #define __NR_ia32_pwrite 181 #define __NR_ia32_chown 182 #define __NR_ia32_getcwd 183 #define __NR_ia32_capget 184 #define __NR_ia32_capset 185 #define __NR_ia32_sigaltstack 186 #define __NR_ia32_sendfile 187 #define __NR_ia32_getpmsg 188 /* some people actually want streams */ #define __NR_ia32_putpmsg 189 /* some people actually want streams */ #define __NR_ia32_vfork 190 #define __NR_ia32_ugetrlimit 191 /* SuS compliant getrlimit */ #define __NR_ia32_mmap2 192 #define __NR_ia32_truncate64 193 #define __NR_ia32_ftruncate64 194 #define __NR_ia32_stat64 195 #define __NR_ia32_lstat64 196 #define __NR_ia32_fstat64 197 #define __NR_ia32_lchown32 198 #define __NR_ia32_getuid32 199 #define __NR_ia32_getgid32 200 #define __NR_ia32_geteuid32 201 #define __NR_ia32_getegid32 202 #define __NR_ia32_setreuid32 203 #define __NR_ia32_setregid32 204 #define __NR_ia32_getgroups32 205 #define __NR_ia32_setgroups32 206 #define __NR_ia32_fchown32 207 #define __NR_ia32_setresuid32 208 #define __NR_ia32_getresuid32 209 #define __NR_ia32_setresgid32 210 #define __NR_ia32_getresgid32 211 #define __NR_ia32_chown32 212 #define __NR_ia32_setuid32 213 #define __NR_ia32_setgid32 214 #define __NR_ia32_setfsuid32 215 #define __NR_ia32_setfsgid32 216 #define __NR_ia32_pivot_root 217 #define __NR_ia32_mincore 218 #define __NR_ia32_madvise 219 #define __NR_ia32_madvise1 219 /* delete when C lib stub is removed */ #define __NR_ia32_getdents64 220 #define __NR_ia32_fcntl64 221 #define __NR_ia32_tuxcall 222 #define __NR_ia32_security 223 #define __NR_ia32_gettid 224 #define __NR_ia32_readahead 225 #define __NR_ia32_setxattr 226 #define __NR_ia32_lsetxattr 227 #define __NR_ia32_fsetxattr 228 #define __NR_ia32_getxattr 229 #define __NR_ia32_lgetxattr 230 #define __NR_ia32_fgetxattr 231 #define __NR_ia32_listxattr 232 #define __NR_ia32_llistxattr 233 #define __NR_ia32_flistxattr 234 #define __NR_ia32_removexattr 235 #define __NR_ia32_lremovexattr 236 #define __NR_ia32_fremovexattr 237 #define __NR_ia32_tkill 238 #define __NR_ia32_sendfile64 239 #define __NR_ia32_futex 240 #define __NR_ia32_sched_setaffinity 241 #define __NR_ia32_sched_getaffinity 242 #define __NR_ia32_set_thread_area 243 #define __NR_ia32_get_thread_area 244 #define __NR_ia32_io_setup 245 #define __NR_ia32_io_destroy 246 #define __NR_ia32_io_getevents 247 #define __NR_ia32_io_submit 248 #define __NR_ia32_io_cancel 249 #define __NR_ia32_exit_group 252 #define __NR_ia32_lookup_dcookie 253 #define __NR_ia32_sys_epoll_create 254 #define __NR_ia32_sys_epoll_ctl 255 #define __NR_ia32_sys_epoll_wait 256 #define __NR_ia32_remap_file_pages 257 #define __NR_ia32_set_tid_address 258 #define __NR_ia32_timer_create 259 #define __NR_ia32_timer_settime (__NR_ia32_timer_create+1) #define __NR_ia32_timer_gettime (__NR_ia32_timer_create+2) #define __NR_ia32_timer_getoverrun (__NR_ia32_timer_create+3) #define __NR_ia32_timer_delete (__NR_ia32_timer_create+4) #define __NR_ia32_clock_settime (__NR_ia32_timer_create+5) #define __NR_ia32_clock_gettime (__NR_ia32_timer_create+6) #define __NR_ia32_clock_getres (__NR_ia32_timer_create+7) #define __NR_ia32_clock_nanosleep (__NR_ia32_timer_create+8) #define __NR_ia32_statfs64 268 #define __NR_ia32_fstatfs64 269 #define __NR_ia32_tgkill 270 #define __NR_ia32_utimes 271 #define __NR_ia32_fadvise64_64 272 #define __NR_ia32_vserver 273 #define __NR_ia32_mbind 274 #define __NR_ia32_get_mempolicy 275 #define __NR_ia32_set_mempolicy 276 #define __NR_ia32_mq_open 277 #define __NR_ia32_mq_unlink (__NR_ia32_mq_open+1) #define __NR_ia32_mq_timedsend (__NR_ia32_mq_open+2) #define __NR_ia32_mq_timedreceive (__NR_ia32_mq_open+3) #define __NR_ia32_mq_notify (__NR_ia32_mq_open+4) #define __NR_ia32_mq_getsetattr (__NR_ia32_mq_open+5) #define __NR_ia32_kexec 283 #define __NR_ia32_waitid 284 /* #define __NR_sys_setaltroot 285 */ #define __NR_ia32_add_key 286 #define __NR_ia32_request_key 287 #define __NR_ia32_keyctl 288 #define __NR_ia32_ioprio_set 289 #define __NR_ia32_ioprio_get 290 #define __NR_ia32_inotify_init 291 #define __NR_ia32_inotify_add_watch 292 #define __NR_ia32_inotify_rm_watch 293 #define __NR_ia32_migrate_pages 294 #define __NR_ia32_openat 295 #define __NR_ia32_mkdirat 296 #define __NR_ia32_mknodat 297 #define __NR_ia32_fchownat 298 #define __NR_ia32_futimesat 299 #define __NR_ia32_fstatat64 300 #define __NR_ia32_unlinkat 301 #define __NR_ia32_renameat 302 #define __NR_ia32_linkat 303 #define __NR_ia32_symlinkat 304 #define __NR_ia32_readlinkat 305 #define __NR_ia32_fchmodat 306 #define __NR_ia32_faccessat 307 #define __NR_ia32_pselect6 308 #define __NR_ia32_ppoll 309 #define __NR_ia32_unshare 310 #endif /* _ASM_X86_64_IA32_UNISTD_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/ide.h000066400000000000000000000000321314037446600240320ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/idle.h000066400000000000000000000004371314037446600242170ustar00rootroot00000000000000#ifndef _ASM_X86_64_IDLE_H #define _ASM_X86_64_IDLE_H 1 #define IDLE_START 1 #define IDLE_END 2 struct notifier_block; void idle_notifier_register(struct notifier_block *n); void idle_notifier_unregister(struct notifier_block *n); void enter_idle(void); void exit_idle(void); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/io.h000066400000000000000000000226261314037446600237150ustar00rootroot00000000000000#ifndef _ASM_IO_H #define _ASM_IO_H #include /* * This file contains the definitions for the x86 IO instructions * inb/inw/inl/outb/outw/outl and the "string versions" of the same * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" * versions of the single-IO instructions (inb_p/inw_p/..). * * This file is not meant to be obfuscating: it's just complicated * to (a) handle it all in a way that makes gcc able to optimize it * as well as possible and (b) trying to avoid writing the same thing * over and over again with slight variations and possibly making a * mistake somewhere. */ /* * Thanks to James van Artsdalen for a better timing-fix than * the two short jumps: using outb's to a nonexistent port seems * to guarantee better timings even on fast machines. * * On the other hand, I'd like to be sure of a non-existent port: * I feel a bit unsafe about using 0x80 (should be safe, though) * * Linus */ /* * Bit simplified and optimized by Jan Hubicka * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. * * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, * isa_read[wl] and isa_write[wl] fixed * - Arnaldo Carvalho de Melo */ #define __SLOW_DOWN_IO "\noutb %%al,$0x80" #ifdef REALLY_SLOW_IO #define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO #else #define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO #endif /* * Talk about misusing macros.. */ #define __OUT1(s,x) \ static inline void out##s(unsigned x value, unsigned short port) { #define __OUT2(s,s1,s2) \ __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" #define __OUT(s,s1,x) \ __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ __OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ #define __IN1(s) \ static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; #define __IN2(s,s1,s2) \ __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" #define __IN(s,s1,i...) \ __IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ __IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ #define __INS(s) \ static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ { __asm__ __volatile__ ("rep ; ins" #s \ : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } #define __OUTS(s) \ static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ { __asm__ __volatile__ ("rep ; outs" #s \ : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } #define RETURN_TYPE unsigned char __IN(b,"") #undef RETURN_TYPE #define RETURN_TYPE unsigned short __IN(w,"") #undef RETURN_TYPE #define RETURN_TYPE unsigned int __IN(l,"") #undef RETURN_TYPE __OUT(b,"b",char) __OUT(w,"w",short) __OUT(l,,int) __INS(b) __INS(w) __INS(l) __OUTS(b) __OUTS(w) __OUTS(l) #define IO_SPACE_LIMIT 0xffff #if defined(__KERNEL__) && __x86_64__ #include #ifndef __i386__ /* * Change virtual addresses to physical addresses and vv. * These are pretty trivial */ static inline unsigned long virt_to_phys(volatile void * address) { return __pa(address); } static inline void * phys_to_virt(unsigned long address) { return __va(address); } #endif /* * Change "struct page" to physical address. */ #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) #include extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); static inline void __iomem * ioremap (unsigned long offset, unsigned long size) { return __ioremap(offset, size, 0); } /* * This one maps high address device memory and turns off caching for that area. * it's useful if some control registers are in such an area and write combining * or read caching is not desirable: */ extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); extern void iounmap(volatile void __iomem *addr); extern void *early_ioremap(unsigned long addr, unsigned long size); extern void early_iounmap(void *addr, unsigned long size); /* * ISA I/O bus memory addresses are 1:1 with the physical address. */ #define isa_virt_to_bus virt_to_phys #define isa_page_to_bus page_to_phys #define isa_bus_to_virt phys_to_virt /* * However PCI ones are not necessarily 1:1 and therefore these interfaces * are forbidden in portable PCI drivers. * * Allow them on x86 for legacy drivers, though. */ #define virt_to_bus virt_to_phys #define bus_to_virt phys_to_virt /* * readX/writeX() are used to access memory mapped devices. On some * architectures the memory mapped IO stuff needs to be accessed * differently. On the x86 architecture, we just read/write the * memory location directly. */ static inline __u8 __readb(const volatile void __iomem *addr) { return *(__force volatile __u8 *)addr; } static inline __u16 __readw(const volatile void __iomem *addr) { return *(__force volatile __u16 *)addr; } static inline __u32 __readl(const volatile void __iomem *addr) { return *(__force volatile __u32 *)addr; } static inline __u64 __readq(const volatile void __iomem *addr) { return *(__force volatile __u64 *)addr; } #define readb(x) __readb(x) #define readw(x) __readw(x) #define readl(x) __readl(x) #define readq(x) __readq(x) #define readb_relaxed(a) readb(a) #define readw_relaxed(a) readw(a) #define readl_relaxed(a) readl(a) #define readq_relaxed(a) readq(a) #define __raw_readb readb #define __raw_readw readw #define __raw_readl readl #define __raw_readq readq #define mmiowb() #ifdef CONFIG_UNORDERED_IO static inline void __writel(__u32 val, volatile void __iomem *addr) { volatile __u32 __iomem *target = addr; asm volatile("movnti %1,%0" : "=m" (*target) : "r" (val) : "memory"); } static inline void __writeq(__u64 val, volatile void __iomem *addr) { volatile __u64 __iomem *target = addr; asm volatile("movnti %1,%0" : "=m" (*target) : "r" (val) : "memory"); } #else static inline void __writel(__u32 b, volatile void __iomem *addr) { *(__force volatile __u32 *)addr = b; } static inline void __writeq(__u64 b, volatile void __iomem *addr) { *(__force volatile __u64 *)addr = b; } #endif static inline void __writeb(__u8 b, volatile void __iomem *addr) { *(__force volatile __u8 *)addr = b; } static inline void __writew(__u16 b, volatile void __iomem *addr) { *(__force volatile __u16 *)addr = b; } #define writeq(val,addr) __writeq((val),(addr)) #define writel(val,addr) __writel((val),(addr)) #define writew(val,addr) __writew((val),(addr)) #define writeb(val,addr) __writeb((val),(addr)) #define __raw_writeb writeb #define __raw_writew writew #define __raw_writel writel #define __raw_writeq writeq void __memcpy_fromio(void*,unsigned long,unsigned); void __memcpy_toio(unsigned long,const void*,unsigned); static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len) { __memcpy_fromio(to,(unsigned long)from,len); } static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len) { __memcpy_toio((unsigned long)to,from,len); } void memset_io(volatile void __iomem *a, int b, size_t c); /* * ISA space is 'always mapped' on a typical x86 system, no need to * explicitly ioremap() it. The fact that the ISA IO space is mapped * to PAGE_OFFSET is pure coincidence - it does not mean ISA values * are physical addresses. The following constant pointer can be * used as the IO-area pointer (it can be iounmapped as well, so the * analogy with PCI is quite large): */ #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) #define isa_readb(a) readb(__ISA_IO_base + (a)) #define isa_readw(a) readw(__ISA_IO_base + (a)) #define isa_readl(a) readl(__ISA_IO_base + (a)) #define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) #define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) #define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) #define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) #define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) #define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) /* * Again, x86-64 does not require mem IO specific function. */ #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d)) #define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(__ISA_IO_base + (b)),(c),(d)) /** * check_signature - find BIOS signatures * @io_addr: mmio address to check * @signature: signature block * @length: length of signature * * Perform a signature comparison with the mmio address io_addr. This * address should have been obtained by ioremap. * Returns 1 on a match. */ static inline int check_signature(void __iomem *io_addr, const unsigned char *signature, int length) { int retval = 0; do { if (readb(io_addr) != *signature) goto out; io_addr++; signature++; length--; } while (length); retval = 1; out: return retval; } /* Nothing to do */ #define dma_cache_inv(_start,_size) do { } while (0) #define dma_cache_wback(_start,_size) do { } while (0) #define dma_cache_wback_inv(_start,_size) do { } while (0) #define flush_write_buffers() extern int iommu_bio_merge; #define BIO_VMERGE_BOUNDARY iommu_bio_merge /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access */ #define xlate_dev_mem_ptr(p) __va(p) /* * Convert a virtual cached pointer to an uncached pointer */ #define xlate_dev_kmem_ptr(p) p #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/io_apic.h000066400000000000000000000145071314037446600247100ustar00rootroot00000000000000#ifndef __ASM_IO_APIC_H #define __ASM_IO_APIC_H #include #include #include /* * Intel IO-APIC support for SMP and UP systems. * * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar */ #ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_PCI_MSI static inline int use_pci_vector(void) {return 1;} static inline void disable_edge_ioapic_vector(unsigned int vector) { } static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { } static inline void end_edge_ioapic_vector (unsigned int vector) { } #define startup_level_ioapic startup_level_ioapic_vector #define shutdown_level_ioapic mask_IO_APIC_vector #define enable_level_ioapic unmask_IO_APIC_vector #define disable_level_ioapic mask_IO_APIC_vector #define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_vector #define end_level_ioapic end_level_ioapic_vector #define set_ioapic_affinity set_ioapic_affinity_vector #define startup_edge_ioapic startup_edge_ioapic_vector #define shutdown_edge_ioapic disable_edge_ioapic_vector #define enable_edge_ioapic unmask_IO_APIC_vector #define disable_edge_ioapic disable_edge_ioapic_vector #define ack_edge_ioapic ack_edge_ioapic_vector #define end_edge_ioapic end_edge_ioapic_vector #else static inline int use_pci_vector(void) {return 0;} static inline void disable_edge_ioapic_irq(unsigned int irq) { } static inline void mask_and_ack_level_ioapic_irq(unsigned int irq) { } static inline void end_edge_ioapic_irq (unsigned int irq) { } #define startup_level_ioapic startup_level_ioapic_irq #define shutdown_level_ioapic mask_IO_APIC_irq #define enable_level_ioapic unmask_IO_APIC_irq #define disable_level_ioapic mask_IO_APIC_irq #define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_irq #define end_level_ioapic end_level_ioapic_irq #define set_ioapic_affinity set_ioapic_affinity_irq #define startup_edge_ioapic startup_edge_ioapic_irq #define shutdown_edge_ioapic disable_edge_ioapic_irq #define enable_edge_ioapic unmask_IO_APIC_irq #define disable_edge_ioapic disable_edge_ioapic_irq #define ack_edge_ioapic ack_edge_ioapic_irq #define end_edge_ioapic end_edge_ioapic_irq #endif #define APIC_MISMATCH_DEBUG #ifndef CONFIG_XEN #define IO_APIC_BASE(idx) \ ((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \ + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK))) #else #include #include #endif /* * The structure of the IO-APIC: */ union IO_APIC_reg_00 { u32 raw; struct { u32 __reserved_2 : 14, LTS : 1, delivery_type : 1, __reserved_1 : 8, ID : 8; } __attribute__ ((packed)) bits; }; union IO_APIC_reg_01 { u32 raw; struct { u32 version : 8, __reserved_2 : 7, PRQ : 1, entries : 8, __reserved_1 : 8; } __attribute__ ((packed)) bits; }; union IO_APIC_reg_02 { u32 raw; struct { u32 __reserved_2 : 24, arbitration : 4, __reserved_1 : 4; } __attribute__ ((packed)) bits; }; union IO_APIC_reg_03 { u32 raw; struct { u32 boot_DT : 1, __reserved_1 : 31; } __attribute__ ((packed)) bits; }; /* * # of IO-APICs and # of IRQ routing registers */ extern int nr_ioapics; extern int nr_ioapic_registers[MAX_IO_APICS]; enum ioapic_irq_destination_types { dest_Fixed = 0, dest_LowestPrio = 1, dest_SMI = 2, dest__reserved_1 = 3, dest_NMI = 4, dest_INIT = 5, dest__reserved_2 = 6, dest_ExtINT = 7 }; struct IO_APIC_route_entry { __u32 vector : 8, delivery_mode : 3, /* 000: FIXED * 001: lowest prio * 111: ExtINT */ dest_mode : 1, /* 0: physical, 1: logical */ delivery_status : 1, polarity : 1, irr : 1, trigger : 1, /* 0: edge, 1: level */ mask : 1, /* 0: enabled, 1: disabled */ __reserved_2 : 15; __u32 __reserved_3 : 24, dest : 8; } __attribute__ ((packed)); /* * MP-BIOS irq configuration table structures: */ /* I/O APIC entries */ extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; /* # of MP IRQ source entries */ extern int mp_irq_entries; /* MP IRQ source entries */ extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; /* non-0 if default (table-less) MP configuration */ extern int mpc_default_type; static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) { #ifndef CONFIG_XEN *IO_APIC_BASE(apic) = reg; return *(IO_APIC_BASE(apic)+4); #else struct physdev_apic apic_op; int ret; apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr; apic_op.reg = reg; ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op); if (ret) return ret; return apic_op.value; #endif } static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) { #ifndef CONFIG_XEN *IO_APIC_BASE(apic) = reg; *(IO_APIC_BASE(apic)+4) = value; #else struct physdev_apic apic_op; apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr; apic_op.reg = reg; apic_op.value = value; WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op)); #endif } #ifndef CONFIG_XEN /* * Re-write a value: to be used for read-modify-write * cycles where the read already set up the index register. */ static inline void io_apic_modify(unsigned int apic, unsigned int value) { *(IO_APIC_BASE(apic)+4) = value; } /* * Synchronize the IO-APIC and the CPU by doing * a dummy read from the IO-APIC */ static inline void io_apic_sync(unsigned int apic) { (void) *(IO_APIC_BASE(apic)+4); } #else #define io_apic_modify io_apic_write #endif /* 1 if "noapic" boot option passed */ extern int skip_ioapic_setup; /* * If we use the IO-APIC for IRQ routing, disable automatic * assignment of PCI IRQ's. */ #define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) #ifdef CONFIG_ACPI extern int io_apic_get_version (int ioapic); extern int io_apic_get_redir_entries (int ioapic); extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int); #endif extern int sis_apic_bug; /* dummy */ #else /* !CONFIG_X86_IO_APIC */ #define io_apic_assign_pci_irqs 0 #endif extern int assign_irq_vector(int irq); void enable_NMI_through_LVT0 (void * dummy); extern spinlock_t i8259A_lock; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/ioctl.h000066400000000000000000000000371314037446600244100ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/ioctls.h000066400000000000000000000050711314037446600245760ustar00rootroot00000000000000#ifndef __ARCH_X8664_IOCTLS_H__ #define __ARCH_X8664_IOCTLS_H__ #include /* 0x54 is just a magic number to make these relatively unique ('T') */ #define TCGETS 0x5401 #define TCSETS 0x5402 #define TCSETSW 0x5403 #define TCSETSF 0x5404 #define TCGETA 0x5405 #define TCSETA 0x5406 #define TCSETAW 0x5407 #define TCSETAF 0x5408 #define TCSBRK 0x5409 #define TCXONC 0x540A #define TCFLSH 0x540B #define TIOCEXCL 0x540C #define TIOCNXCL 0x540D #define TIOCSCTTY 0x540E #define TIOCGPGRP 0x540F #define TIOCSPGRP 0x5410 #define TIOCOUTQ 0x5411 #define TIOCSTI 0x5412 #define TIOCGWINSZ 0x5413 #define TIOCSWINSZ 0x5414 #define TIOCMGET 0x5415 #define TIOCMBIS 0x5416 #define TIOCMBIC 0x5417 #define TIOCMSET 0x5418 #define TIOCGSOFTCAR 0x5419 #define TIOCSSOFTCAR 0x541A #define FIONREAD 0x541B #define TIOCINQ FIONREAD #define TIOCLINUX 0x541C #define TIOCCONS 0x541D #define TIOCGSERIAL 0x541E #define TIOCSSERIAL 0x541F #define TIOCPKT 0x5420 #define FIONBIO 0x5421 #define TIOCNOTTY 0x5422 #define TIOCSETD 0x5423 #define TIOCGETD 0x5424 #define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ #define TIOCSBRK 0x5427 /* BSD compatibility */ #define TIOCCBRK 0x5428 /* BSD compatibility */ #define TIOCGSID 0x5429 /* Return the session ID of FD */ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get real dev no below /dev/console */ #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ #define FIOCLEX 0x5451 #define FIOASYNC 0x5452 #define TIOCSERCONFIG 0x5453 #define TIOCSERGWILD 0x5454 #define TIOCSERSWILD 0x5455 #define TIOCGLCKTRMIOS 0x5456 #define TIOCSLCKTRMIOS 0x5457 #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ #define TIOCSERGETLSR 0x5459 /* Get line status register */ #define TIOCSERGETMULTI 0x545A /* Get multiport config */ #define TIOCSERSETMULTI 0x545B /* Set multiport config */ #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ #define FIOQSIZE 0x5460 /* Used for packet mode */ #define TIOCPKT_DATA 0 #define TIOCPKT_FLUSHREAD 1 #define TIOCPKT_FLUSHWRITE 2 #define TIOCPKT_STOP 4 #define TIOCPKT_START 8 #define TIOCPKT_NOSTOP 16 #define TIOCPKT_DOSTOP 32 #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/ipcbuf.h000066400000000000000000000011741314037446600245510ustar00rootroot00000000000000#ifndef __x86_64_IPCBUF_H__ #define __x86_64_IPCBUF_H__ /* * The ipc64_perm structure for x86_64 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * * Pad space is left for: * - 32-bit mode_t and seq * - 2 miscellaneous 32-bit values */ struct ipc64_perm { __kernel_key_t key; __kernel_uid32_t uid; __kernel_gid32_t gid; __kernel_uid32_t cuid; __kernel_gid32_t cgid; __kernel_mode_t mode; unsigned short __pad1; unsigned short seq; unsigned short __pad2; unsigned long __unused1; unsigned long __unused2; }; #endif /* __x86_64_IPCBUF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/ipi.h000066400000000000000000000055021314037446600240610ustar00rootroot00000000000000#ifndef __ASM_IPI_H #define __ASM_IPI_H /* * Copyright 2004 James Cleverdon, IBM. * Subject to the GNU Public License, v.2 * * Generic APIC InterProcessor Interrupt code. * * Moved to include file by James Cleverdon from * arch/x86-64/kernel/smp.c * * Copyrights from kernel/smp.c: * * (c) 1995 Alan Cox, Building #3 * (c) 1998-99, 2000 Ingo Molnar * (c) 2002,2003 Andi Kleen, SuSE Labs. * Subject to the GNU Public License, v.2 */ #include #include #include #include #ifndef CONFIG_XEN /* * the following functions deal with sending IPIs between CPUs. * * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. */ static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest) { unsigned int icr = shortcut | dest; switch (vector) { default: icr |= APIC_DM_FIXED | vector; break; case NMI_VECTOR: icr |= APIC_DM_NMI; break; } return icr; } static inline int __prepare_ICR2 (unsigned int mask) { return SET_APIC_DEST_FIELD(mask); } static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) { /* * Subtle. In the case of the 'never do double writes' workaround * we have to lock out interrupts to be safe. As we don't care * of the value read we use an atomic rmw access to avoid costly * cli/sti. Otherwise we use an even cheaper single atomic write * to the APIC. */ unsigned int cfg; /* * Wait for idle. */ apic_wait_icr_idle(); /* * No need to touch the target chip field */ cfg = __prepare_ICR(shortcut, vector, dest); #ifdef DUMP_VECTOR if (vector == DUMP_VECTOR) { /* * Setup DUMP IPI to be delivered as an NMI */ cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI; } #endif /* * Send the IPI. The write to APIC_ICR fires this off. */ apic_write(APIC_ICR, cfg); } static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) { unsigned long cfg, flags; unsigned long query_cpu; /* * Hack. The clustered APIC addressing mode doesn't allow us to send * to an arbitrary mask, so I do a unicast to each CPU instead. * - mbligh */ local_irq_save(flags); for_each_cpu_mask(query_cpu, mask) { /* * Wait for idle. */ apic_wait_icr_idle(); /* * prepare target chip field */ cfg = __prepare_ICR2(x86_cpu_to_apicid[query_cpu]); apic_write(APIC_ICR2, cfg); /* * program the ICR */ cfg = __prepare_ICR(0, vector, APIC_DEST_PHYSICAL); #ifdef DUMP_VECTOR if (vector == DUMP_VECTOR) { /* * Setup DUMP IPI to be delivered as an NMI */ cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI; } #endif /* * Send the IPI. The write to APIC_ICR fires this off. */ apic_write(APIC_ICR, cfg); } local_irq_restore(flags); } #endif /* CONFIG_XEN */ #endif /* __ASM_IPI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/irq.h000066400000000000000000000025431314037446600240750ustar00rootroot00000000000000#ifndef _ASM_IRQ_H #define _ASM_IRQ_H /* * linux/include/asm/irq.h * * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar * * IRQ/IPI changes taken from work by Thomas Radke * */ #define TIMER_IRQ 0 /* * 16 8259A IRQ's, 208 potential APIC interrupt sources. * Right now the APIC is mostly only used for SMP. * 256 vectors is an architectural limit. (we can have * more than 256 devices theoretically, but they will * have to use shared interrupts) * Since vectors 0x00-0x1f are used/reserved for the CPU, * the usable vector space is 0x20-0xff (224 vectors) */ /* * The maximum number of vectors supported by x86_64 processors * is limited to 256. For processors other than x86_64, NR_VECTORS * should be changed accordingly. */ #define NR_VECTORS 256 #define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */ #ifdef CONFIG_PCI_MSI #define NR_IRQS FIRST_SYSTEM_VECTOR #define NR_IRQ_VECTORS NR_IRQS #else #define NR_IRQS 224 #define NR_IRQ_VECTORS (32 * NR_CPUS) #endif static __inline__ int irq_canonicalize(int irq) { return ((irq == 2) ? 9 : irq); } #ifdef CONFIG_X86_LOCAL_APIC #define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ #endif #ifdef CONFIG_HOTPLUG_CPU #include extern void fixup_irqs(cpumask_t map); #endif #define __ARCH_HAS_DO_SOFTIRQ 1 #endif /* _ASM_IRQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/kdb.h000066400000000000000000000065501314037446600240440ustar00rootroot00000000000000#ifndef _ASM_KDB_H #define _ASM_KDB_H #include /* * Kernel Debugger Architecture Dependent Global Headers * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. */ /* * KDB_ENTER() is a macro which causes entry into the kernel * debugger from any point in the kernel code stream. If it * is intended to be used from interrupt level, it must use * a non-maskable entry method. The vector is KDB_VECTOR, * defined in hw_irq.h */ #define KDB_ENTER() do {if (kdb_on && !KDB_IS_RUNNING()) { asm("int %0" :: "i" (KDBENTER_VECTOR)); }} while(0) /* * Needed for exported symbols. */ typedef unsigned long kdb_machreg_t; #define kdb_machreg_fmt "0x%lx" #define kdb_machreg_fmt0 "0x%016lx" #define kdb_bfd_vma_fmt "0x%lx" #define kdb_bfd_vma_fmt0 "0x%016lx" #define kdb_elfw_addr_fmt "0x%x" #define kdb_elfw_addr_fmt0 "0x%016x" /* * Per cpu arch specific kdb state. Must be in range 0xff000000. */ #define KDB_STATE_A_IF 0x01000000 /* Saved IF flag */ /* * Functions to safely read and write kernel areas. The {to,from}_xxx * addresses are not necessarily valid, these functions must check for * validity. If the arch already supports get and put routines with * suitable validation and/or recovery on invalid addresses then use * those routines, otherwise check it yourself. */ /* * asm-i386 uaccess.h supplies __copy_to_user which relies on MMU to * trap invalid addresses in the _xxx fields. Verify the other address * of the pair is valid by accessing the first and last byte ourselves, * then any access violations should only be caused by the _xxx * addresses, */ #include static inline int __kdba_putarea_size(unsigned long to_xxx, void *from, size_t size) { mm_segment_t oldfs = get_fs(); int r; char c; c = *((volatile char *)from); c = *((volatile char *)from + size - 1); if (to_xxx < PAGE_OFFSET) { return kdb_putuserarea_size(to_xxx, from, size); } set_fs(KERNEL_DS); r = __copy_to_user((void *)to_xxx, from, size); set_fs(oldfs); return r; } static inline int __kdba_getarea_size(void *to, unsigned long from_xxx, size_t size) { mm_segment_t oldfs = get_fs(); int r; *((volatile char *)to) = '\0'; *((volatile char *)to + size - 1) = '\0'; if (from_xxx < PAGE_OFFSET) { return kdb_getuserarea_size(to, from_xxx, size); } set_fs(KERNEL_DS); r = __copy_to_user(to, (void *)from_xxx, size); set_fs(oldfs); return r; } /* For numa with replicated code/data, the platform must supply its own * kdba_putarea_size and kdba_getarea_size routines. Without replication kdb * uses the standard architecture routines. */ #ifdef CONFIG_NUMA_REPLICATE extern int kdba_putarea_size(unsigned long to_xxx, void *from, size_t size); extern int kdba_getarea_size(void *to, unsigned long from_xxx, size_t size); #else #define kdba_putarea_size __kdba_putarea_size #define kdba_getarea_size __kdba_getarea_size #endif static inline int kdba_verify_rw(unsigned long addr, size_t size) { unsigned char data[size]; return(kdba_getarea_size(data, addr, size) || kdba_putarea_size(addr, data, size)); } static inline unsigned long kdba_funcptr_value(void *fp) { return (unsigned long)fp; } #endif /* !_ASM_KDB_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/kdbprivate.h000066400000000000000000000135001314037446600254300ustar00rootroot00000000000000#ifndef _ASM_KDBPRIVATE_H #define _ASM_KDBPRIVATE_H /* * Kernel Debugger Architecture Dependent Private Headers * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved. */ typedef unsigned char kdb_machinst_t; /* * KDB_MAXBPT describes the total number of breakpoints * supported by this architecure. */ #define KDB_MAXBPT 16 /* * KDB_MAXHARDBPT describes the total number of hardware * breakpoint registers that exist. */ #define KDB_MAXHARDBPT 4 /* Maximum number of arguments to a function */ #define KDBA_MAXARGS 16 /* * Platform specific environment entries */ #define KDB_PLATFORM_ENV "IDMODE=x86_64", "BYTESPERWORD=8", "IDCOUNT=16" /* * Define the direction that the stack grows */ #define KDB_STACK_DIRECTION (-1) /* Stack grows down */ /* * Support for ia32 debug registers */ typedef struct _kdbhard_bp { kdb_machreg_t bph_reg; /* Register this breakpoint uses */ unsigned int bph_free:1; /* Register available for use */ unsigned int bph_data:1; /* Data Access breakpoint */ unsigned int bph_write:1; /* Write Data breakpoint */ unsigned int bph_mode:2; /* 0=inst, 1=write, 2=io, 3=read */ unsigned int bph_length:2; /* 0=1, 1=2, 2=BAD, 3=4 (bytes) */ } kdbhard_bp_t; extern kdbhard_bp_t kdb_hardbreaks[/* KDB_MAXHARDBPT */]; #define IA32_BREAKPOINT_INSTRUCTION 0xcc #define DR6_BT 0x00008000 #define DR6_BS 0x00004000 #define DR6_BD 0x00002000 #define DR6_B3 0x00000008 #define DR6_B2 0x00000004 #define DR6_B1 0x00000002 #define DR6_B0 0x00000001 #define DR6_DR_MASK 0x0000000F #define DR7_RW_VAL(dr, drnum) \ (((dr) >> (16 + (4 * (drnum)))) & 0x3) #define DR7_RW_SET(dr, drnum, rw) \ do { \ (dr) &= ~(0x3 << (16 + (4 * (drnum)))); \ (dr) |= (((rw) & 0x3) << (16 + (4 * (drnum)))); \ } while (0) #define DR7_RW0(dr) DR7_RW_VAL(dr, 0) #define DR7_RW0SET(dr,rw) DR7_RW_SET(dr, 0, rw) #define DR7_RW1(dr) DR7_RW_VAL(dr, 1) #define DR7_RW1SET(dr,rw) DR7_RW_SET(dr, 1, rw) #define DR7_RW2(dr) DR7_RW_VAL(dr, 2) #define DR7_RW2SET(dr,rw) DR7_RW_SET(dr, 2, rw) #define DR7_RW3(dr) DR7_RW_VAL(dr, 3) #define DR7_RW3SET(dr,rw) DR7_RW_SET(dr, 3, rw) #define DR7_LEN_VAL(dr, drnum) \ (((dr) >> (18 + (4 * (drnum)))) & 0x3) #define DR7_LEN_SET(dr, drnum, rw) \ do { \ (dr) &= ~(0x3 << (18 + (4 * (drnum)))); \ (dr) |= (((rw) & 0x3) << (18 + (4 * (drnum)))); \ } while (0) #define DR7_LEN0(dr) DR7_LEN_VAL(dr, 0) #define DR7_LEN0SET(dr,len) DR7_LEN_SET(dr, 0, len) #define DR7_LEN1(dr) DR7_LEN_VAL(dr, 1) #define DR7_LEN1SET(dr,len) DR7_LEN_SET(dr, 1, len) #define DR7_LEN2(dr) DR7_LEN_VAL(dr, 2) #define DR7_LEN2SET(dr,len) DR7_LEN_SET(dr, 2, len) #define DR7_LEN3(dr) DR7_LEN_VAL(dr, 3) #define DR7_LEN3SET(dr,len) DR7_LEN_SET(dr, 3, len) #define DR7_G0(dr) (((dr)>>1)&0x1) #define DR7_G0SET(dr) ((dr) |= 0x2) #define DR7_G0CLR(dr) ((dr) &= ~0x2) #define DR7_G1(dr) (((dr)>>3)&0x1) #define DR7_G1SET(dr) ((dr) |= 0x8) #define DR7_G1CLR(dr) ((dr) &= ~0x8) #define DR7_G2(dr) (((dr)>>5)&0x1) #define DR7_G2SET(dr) ((dr) |= 0x20) #define DR7_G2CLR(dr) ((dr) &= ~0x20) #define DR7_G3(dr) (((dr)>>7)&0x1) #define DR7_G3SET(dr) ((dr) |= 0x80) #define DR7_G3CLR(dr) ((dr) &= ~0x80) #define DR7_L0(dr) (((dr))&0x1) #define DR7_L0SET(dr) ((dr) |= 0x1) #define DR7_L0CLR(dr) ((dr) &= ~0x1) #define DR7_L1(dr) (((dr)>>2)&0x1) #define DR7_L1SET(dr) ((dr) |= 0x4) #define DR7_L1CLR(dr) ((dr) &= ~0x4) #define DR7_L2(dr) (((dr)>>4)&0x1) #define DR7_L2SET(dr) ((dr) |= 0x10) #define DR7_L2CLR(dr) ((dr) &= ~0x10) #define DR7_L3(dr) (((dr)>>6)&0x1) #define DR7_L3SET(dr) ((dr) |= 0x40) #define DR7_L3CLR(dr) ((dr) &= ~0x40) #define DR7_GD 0x00002000 /* General Detect Enable */ #define DR7_GE 0x00000200 /* Global exact */ #define DR7_LE 0x00000100 /* Local exact */ extern kdb_machreg_t kdba_getdr6(void); extern void kdba_putdr6(kdb_machreg_t); extern kdb_machreg_t kdba_getdr7(void); extern kdb_machreg_t kdba_getdr(int); extern void kdba_putdr(int, kdb_machreg_t); extern kdb_machreg_t kdb_getcr(int); /* * reg indicies for x86_64 setjmp/longjmp */ #define JB_RBX 0 #define JB_RBP 1 #define JB_R12 2 #define JB_R13 3 #define JB_R14 4 #define JB_R15 5 #define JB_RSP 6 #define JB_PC 7 typedef struct __kdb_jmp_buf { unsigned long regs[8]; /* kdba_setjmp assumes fixed offsets here */ } kdb_jmp_buf; extern int asmlinkage kdba_setjmp(kdb_jmp_buf *); extern void asmlinkage kdba_longjmp(kdb_jmp_buf *, int); #define kdba_setjmp kdba_setjmp extern kdb_jmp_buf *kdbjmpbuf; /* Arch specific data saved for running processes */ struct kdba_running_process { long rsp; /* KDB may be on a different stack */ long rip; /* rip when rsp was set */ }; register unsigned long current_stack_pointer asm("rsp") __attribute_used__; static inline void kdba_save_running(struct kdba_running_process *k, struct pt_regs *regs) { k->rsp = current_stack_pointer; __asm__ __volatile__ ( " lea 0(%%rip),%%rax; movq %%rax,%0 ; " : "=r"(k->rip) : : "rax" ); } static inline void kdba_unsave_running(struct kdba_running_process *k, struct pt_regs *regs) { } struct kdb_activation_record; extern void kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu, struct kdb_activation_record *ar); extern void kdba_wait_for_cpus(void); extern asmlinkage void kdb_interrupt(void); #define KDB_INT_REGISTERS 16 #endif /* !_ASM_KDBPRIVATE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/kdebug.h000066400000000000000000000024631314037446600245440ustar00rootroot00000000000000#ifndef _X86_64_KDEBUG_H #define _X86_64_KDEBUG_H 1 #include struct pt_regs; struct die_args { struct pt_regs *regs; const char *str; long err; int trapnr; int signr; }; /* Note - you should never unregister because that can race with NMIs. If you really want to do it first unregister - then synchronize_sched - then free. */ int register_die_notifier(struct notifier_block *nb); extern struct notifier_block *die_chain; /* Grossly misnamed. */ enum die_val { DIE_OOPS = 1, DIE_INT3, DIE_DEBUG, DIE_PANIC, DIE_NMI, DIE_DIE, DIE_NMIWATCHDOG, DIE_KERNELDEBUG, DIE_TRAP, DIE_GPF, DIE_CALL, DIE_NMI_IPI, DIE_PAGE_FAULT, DIE_KDEBUG_ENTER, DIE_KDEBUG_LEAVE, }; static inline int notify_die(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig) { struct die_args args = { .regs = regs, .str = str, .err = err, .trapnr = trap, .signr = sig }; return notifier_call_chain(&die_chain, val, &args); } extern int printk_address(unsigned long address); extern void die(const char *,struct pt_regs *,long); extern void __die(const char *,struct pt_regs *,long); extern void show_registers(struct pt_regs *regs); extern void dump_pagetable(unsigned long); extern unsigned long oops_begin(void); extern void oops_end(unsigned long); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/kerntypes.h000066400000000000000000000011661314037446600253260ustar00rootroot00000000000000/* * asm-x86_64/kerntypes.h * * Arch-dependent header file that includes headers for all arch-specific * types of interest. * The kernel type information is used by the lcrash utility when * analyzing system crash dumps or the live system. Using the type * information for the running system, rather than kernel header files, * makes for a more flexible and robust analysis tool. * * This source code is released under the GNU GPL. */ /* x86_64-specific header files */ #ifndef _X86_64_KERNTYPES_H #define _X86_64_KERNTYPES_H /* Use the default */ #include #endif /* _X86_64_KERNTYPES_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/kexec.h000066400000000000000000000072561314037446600244070ustar00rootroot00000000000000#ifndef _X86_64_KEXEC_H #define _X86_64_KEXEC_H #define PA_CONTROL_PAGE 0 #define VA_CONTROL_PAGE 1 #define PA_PGD 2 #define VA_PGD 3 #define PA_PUD_0 4 #define VA_PUD_0 5 #define PA_PMD_0 6 #define VA_PMD_0 7 #define PA_PTE_0 8 #define VA_PTE_0 9 #define PA_PUD_1 10 #define VA_PUD_1 11 #define PA_PMD_1 12 #define VA_PMD_1 13 #define PA_PTE_1 14 #define VA_PTE_1 15 #define PA_TABLE_PAGE 16 #define PAGES_NR 17 #ifndef __ASSEMBLY__ #include #include #include /* * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. * I.e. Maximum page that is mapped directly into kernel memory, * and kmap is not required. * * So far x86_64 is limited to 40 physical address bits. */ /* Maximum physical address we can use pages from */ #define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL) /* Maximum address we can reach in physical address mode */ #define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL) /* Maximum address we can use for the control pages */ #define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL) /* Allocate one page for the pdp and the second for the code */ #define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL) /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_X86_64 #define MAX_NOTE_BYTES 1024 /* * Saving the registers of the cpu on which panic occured in * crash_kexec to save a valid sp. The registers of other cpus * will be saved in machine_crash_shutdown while shooting down them. */ static inline void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) { if (oldregs) memcpy(newregs, oldregs, sizeof(*newregs)); else { __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->rbx)); __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->rcx)); __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->rdx)); __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->rsi)); __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->rdi)); __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->rbp)); __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->rax)); __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->rsp)); __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8)); __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9)); __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10)); __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11)); __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12)); __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13)); __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14)); __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15)); __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->eflags)); newregs->rip = (unsigned long)current_text_addr(); } } NORET_TYPE void relocate_kernel(unsigned long indirection_page, unsigned long page_list, unsigned long start_address) ATTRIB_NORET; /* Under Xen we need to work with machine addresses. These macros give the * machine address of a certain page to the generic kexec code instead of * the pseudo physical address which would be given by the default macros. */ #ifdef CONFIG_XEN #define KEXEC_ARCH_HAS_PAGE_MACROS #define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page)) #define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn)) #define kexec_virt_to_phys(addr) virt_to_machine(addr) #define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr)) #endif #endif /* __ASSEMBLY__ */ #endif /* _X86_64_KEXEC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/kmap_types.h000066400000000000000000000004121314037446600254470ustar00rootroot00000000000000#ifndef _ASM_KMAP_TYPES_H #define _ASM_KMAP_TYPES_H enum km_type { KM_BOUNCE_READ, KM_SKB_SUNRPC_DATA, KM_SKB_DATA_SOFTIRQ, KM_USER0, KM_USER1, KM_BIO_SRC_IRQ, KM_BIO_DST_IRQ, KM_IRQ0, KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, KM_DUMP, KM_TYPE_NR, }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/kprobes.h000066400000000000000000000053521314037446600247500ustar00rootroot00000000000000#ifndef _ASM_KPROBES_H #define _ASM_KPROBES_H /* * Kernel Probes (KProbes) * include/asm-x86_64/kprobes.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2002, 2004 * * 2004-Oct Prasanna S Panchamukhi and Jim Keniston * kenistoj@us.ibm.com adopted from i386. */ #include #include #include #define __ARCH_WANT_KPROBES_INSN_SLOT struct pt_regs; struct kprobe; typedef u8 kprobe_opcode_t; #define BREAKPOINT_INSTRUCTION 0xcc #define MAX_INSN_SIZE 15 #define MAX_STACK_SIZE 64 #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \ ? (MAX_STACK_SIZE) \ : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES void kretprobe_trampoline(void); extern void arch_remove_kprobe(struct kprobe *p); /* Architecture specific copy of original instruction*/ struct arch_specific_insn { /* copy of the original instruction */ kprobe_opcode_t *insn; }; struct prev_kprobe { struct kprobe *kp; unsigned long status; unsigned long old_rflags; unsigned long saved_rflags; }; /* per-cpu kprobe control block */ struct kprobe_ctlblk { unsigned long kprobe_status; unsigned long kprobe_old_rflags; unsigned long kprobe_saved_rflags; long *jprobe_saved_rsp; struct pt_regs jprobe_saved_regs; kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; struct prev_kprobe prev_kprobe; }; /* trap3/1 are intr gates for kprobes. So, restore the status of IF, * if necessary, before executing the original int3/1 (trap) handler. */ static inline void restore_interrupts(struct pt_regs *regs) { if (regs->eflags & IF_MASK) local_irq_enable(); } extern int post_kprobe_handler(struct pt_regs *regs); extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_handler(struct pt_regs *regs); extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); #endif /* _ASM_KPROBES_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/ldt.h000066400000000000000000000016151314037446600240640ustar00rootroot00000000000000/* * ldt.h * * Definitions of structures used with the modify_ldt system call. */ #ifndef _LINUX_LDT_H #define _LINUX_LDT_H /* Maximum number of LDT entries supported. */ #define LDT_ENTRIES 8192 /* The size of each LDT entry. */ #define LDT_ENTRY_SIZE 8 #ifndef __ASSEMBLY__ /* Note on 64bit base and limit is ignored and you cannot set DS/ES/CS not to the default values if you still want to do syscalls. This call is more for 32bit mode therefore. */ struct user_desc { unsigned int entry_number; unsigned int base_addr; unsigned int limit; unsigned int seg_32bit:1; unsigned int contents:2; unsigned int read_exec_only:1; unsigned int limit_in_pages:1; unsigned int seg_not_present:1; unsigned int useable:1; unsigned int lm:1; }; #define MODIFY_LDT_CONTENTS_DATA 0 #define MODIFY_LDT_CONTENTS_STACK 1 #define MODIFY_LDT_CONTENTS_CODE 2 #endif /* !__ASSEMBLY__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/linkage.h000066400000000000000000000001261314037446600247070ustar00rootroot00000000000000#ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H /* Nothing to see here... */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/local.h000066400000000000000000000036301314037446600243720ustar00rootroot00000000000000#ifndef _ARCH_X8664_LOCAL_H #define _ARCH_X8664_LOCAL_H #include typedef struct { volatile unsigned int counter; } local_t; #define LOCAL_INIT(i) { (i) } #define local_read(v) ((v)->counter) #define local_set(v,i) (((v)->counter) = (i)) static __inline__ void local_inc(local_t *v) { __asm__ __volatile__( "incl %0" :"=m" (v->counter) :"m" (v->counter)); } static __inline__ void local_dec(local_t *v) { __asm__ __volatile__( "decl %0" :"=m" (v->counter) :"m" (v->counter)); } static __inline__ void local_add(unsigned int i, local_t *v) { __asm__ __volatile__( "addl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } static __inline__ void local_sub(unsigned int i, local_t *v) { __asm__ __volatile__( "subl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } /* On x86-64 these are better than the atomic variants on SMP kernels because they dont use a lock prefix. */ #define __local_inc(l) local_inc(l) #define __local_dec(l) local_dec(l) #define __local_add(i,l) local_add((i),(l)) #define __local_sub(i,l) local_sub((i),(l)) /* Use these for per-cpu local_t variables: on some archs they are * much more efficient than these naive implementations. Note they take * a variable, not an address. * * This could be done better if we moved the per cpu data directly * after GS. */ #define cpu_local_read(v) local_read(&__get_cpu_var(v)) #define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) #define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) #define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) #define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) #define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) #define __cpu_local_inc(v) cpu_local_inc(v) #define __cpu_local_dec(v) cpu_local_dec(v) #define __cpu_local_add(i, v) cpu_local_add((i), (v)) #define __cpu_local_sub(i, v) cpu_local_sub((i), (v)) #endif /* _ARCH_I386_LOCAL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/000077500000000000000000000000001314037446600246255ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/000077500000000000000000000000001314037446600254055ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/agp.h000066400000000000000000000030361314037446600263270ustar00rootroot00000000000000#ifndef AGP_H #define AGP_H 1 #include #include /* * Functions to keep the agpgart mappings coherent. * The GART gives the CPU a physical alias of memory. The alias is * mapped uncacheable. Make sure there are no conflicting mappings * with different cachability attributes for the same page. */ #define map_page_into_agp(page) ( \ xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \ ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)) #define unmap_page_from_agp(page) ( \ xen_destroy_contiguous_region((unsigned long)page_address(page), 0), \ /* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \ change_page_attr(page, 1, PAGE_KERNEL)) #define flush_agp_mappings() global_flush_tlb() /* Could use CLFLUSH here if the cpu supports it. But then it would need to be called for each cacheline of the whole page so it may not be worth it. Would need a page for it. */ #define flush_agp_cache() wbinvd() /* Convert a physical address to an address suitable for the GART. */ #define phys_to_gart(x) phys_to_machine(x) #define gart_to_phys(x) machine_to_phys(x) /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) ({ \ char *_t; dma_addr_t _d; \ _t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); \ _t; }) #define free_gatt_pages(table, order) \ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/arch_hooks.h000066400000000000000000000012701314037446600276760ustar00rootroot00000000000000#ifndef _ASM_ARCH_HOOKS_H #define _ASM_ARCH_HOOKS_H #include /* * linux/include/asm/arch_hooks.h * * define the architecture specific hooks */ /* these aren't arch hooks, they are generic routines * that can be used by the hooks */ extern void init_ISA_irqs(void); extern void apic_intr_init(void); extern void smp_intr_init(void); extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs); /* these are the defined hooks */ extern void intr_init_hook(void); extern void pre_intr_init_hook(void); extern void pre_setup_arch_hook(void); extern void trap_init_hook(void); extern void time_init_hook(void); extern void mca_nmi_hook(void); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/bootsetup.h000066400000000000000000000032261314037446600276050ustar00rootroot00000000000000 #ifndef _X86_64_BOOTSETUP_H #define _X86_64_BOOTSETUP_H 1 #define BOOT_PARAM_SIZE 4096 extern char x86_boot_params[BOOT_PARAM_SIZE]; /* * This is set up by the setup-routine at boot-time */ #define PARAM ((unsigned char *)x86_boot_params) #define SCREEN_INFO (*(struct screen_info *) (PARAM+0)) #define EXT_MEM_K (*(unsigned short *) (PARAM+2)) #define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0)) #define E820_MAP_NR (*(char*) (PARAM+E820NR)) #define E820_MAP ((struct e820entry *) (PARAM+E820MAP)) #define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40)) #define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80)) #define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0)) #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2)) #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8)) #define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA)) #define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC)) #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF)) #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210)) #define KERNEL_START (*(unsigned int *) (PARAM+0x214)) #define INITRD_START (__pa(xen_start_info->mod_start)) #define INITRD_SIZE (xen_start_info->mod_len) #define EDID_INFO (*(struct edid_info *) (PARAM+0x440)) #define EDD_NR (*(unsigned char *) (PARAM+EDDNR)) #define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF)) #define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF)) #define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF)) #define COMMAND_LINE boot_command_line #define RAMDISK_IMAGE_START_MASK 0x07FF #define RAMDISK_PROMPT_FLAG 0x8000 #define RAMDISK_LOAD_FLAG 0x4000 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/desc.h000066400000000000000000000145661314037446600265100ustar00rootroot00000000000000/* Written 2000 by Andi Kleen */ #ifndef __ARCH_DESC_H #define __ARCH_DESC_H #include #include #ifndef __ASSEMBLY__ #include #include #include #include // 8 byte segment descriptor struct desc_struct { u16 limit0; u16 base0; unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1; unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8; } __attribute__((packed)); struct n_desc_struct { unsigned int a,b; }; enum { GATE_INTERRUPT = 0xE, GATE_TRAP = 0xF, GATE_CALL = 0xC, }; // 16byte gate struct gate_struct { u16 offset_low; u16 segment; unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1; u16 offset_middle; u32 offset_high; u32 zero1; } __attribute__((packed)); #define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) #define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF) #define PTR_HIGH(x) ((unsigned long)(x) >> 32) enum { DESC_TSS = 0x9, DESC_LDT = 0x2, }; // LDT or TSS descriptor in the GDT. 16 bytes. struct ldttss_desc { u16 limit0; u16 base0; unsigned base1 : 8, type : 5, dpl : 2, p : 1; unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8; u32 base3; u32 zero1; } __attribute__((packed)); struct desc_ptr { unsigned short size; unsigned long address; } __attribute__((packed)) ; extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS]; extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; #define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8)) #define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8)) static inline void clear_LDT(void) { int cpu = get_cpu(); /* * NB. We load the default_ldt for lcall7/27 handling on demand, as * it slows down context switching. Noone uses it anyway. */ cpu = cpu; /* XXX avoid compiler warning */ xen_set_ldt(NULL, 0); put_cpu(); } /* * This is the ldt that every process will get unless we need * something other than this. */ extern struct desc_struct default_ldt[]; #ifndef CONFIG_X86_NO_IDT extern struct gate_struct idt_table[]; #endif extern struct desc_ptr cpu_gdt_descr[]; /* the cpu gdt accessor */ #define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address) static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist) { struct gate_struct s; s.offset_low = PTR_LOW(func); s.segment = __KERNEL_CS; s.ist = ist; s.p = 1; s.dpl = dpl; s.zero0 = 0; s.zero1 = 0; s.type = type; s.offset_middle = PTR_MIDDLE(func); s.offset_high = PTR_HIGH(func); /* does not need to be atomic because it is only done once at setup time */ memcpy(adr, &s, 16); } #ifndef CONFIG_X86_NO_IDT static inline void set_intr_gate(int nr, void *func) { BUG_ON((unsigned)nr > 0xFF); _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0); } static inline void set_intr_gate_ist(int nr, void *func, unsigned ist) { BUG_ON((unsigned)nr > 0xFF); _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist); } static inline void set_system_gate(int nr, void *func) { BUG_ON((unsigned)nr > 0xFF); _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0); } static inline void set_system_gate_ist(int nr, void *func, unsigned ist) { _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist); } #endif static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type, unsigned size) { struct ldttss_desc d; memset(&d,0,sizeof(d)); d.limit0 = size & 0xFFFF; d.base0 = PTR_LOW(tss); d.base1 = PTR_MIDDLE(tss) & 0xFF; d.type = type; d.p = 1; d.limit1 = (size >> 16) & 0xF; d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF; d.base3 = PTR_HIGH(tss); memcpy(ptr, &d, 16); } #ifndef CONFIG_X86_NO_TSS static inline void set_tss_desc(unsigned cpu, void *addr) { /* * sizeof(unsigned long) coming from an extra "long" at the end * of the iobitmap. See tss_struct definition in processor.h * * -1? seg base+limit should be pointing to the address of the * last valid byte */ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS], (unsigned long)addr, DESC_TSS, IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1); } #endif static inline void set_ldt_desc(unsigned cpu, void *addr, int size) { set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr, DESC_LDT, size * 8 - 1); } static inline void set_seg_base(unsigned cpu, int entry, void *base) { struct desc_struct *d = &cpu_gdt(cpu)[entry]; u32 addr = (u32)(u64)base; BUG_ON((u64)base >> 32); d->base0 = addr & 0xffff; d->base1 = (addr >> 16) & 0xff; d->base2 = (addr >> 24) & 0xff; } #define LDT_entry_a(info) \ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) /* Don't allow setting of the lm bit. It is useless anyways because 64bit system calls require __USER_CS. */ #define LDT_entry_b(info) \ (((info)->base_addr & 0xff000000) | \ (((info)->base_addr & 0x00ff0000) >> 16) | \ ((info)->limit & 0xf0000) | \ (((info)->read_exec_only ^ 1) << 9) | \ ((info)->contents << 10) | \ (((info)->seg_not_present ^ 1) << 15) | \ ((info)->seg_32bit << 22) | \ ((info)->limit_in_pages << 23) | \ ((info)->useable << 20) | \ /* ((info)->lm << 21) | */ \ 0x7000) #define LDT_empty(info) (\ (info)->base_addr == 0 && \ (info)->limit == 0 && \ (info)->contents == 0 && \ (info)->read_exec_only == 1 && \ (info)->seg_32bit == 0 && \ (info)->limit_in_pages == 0 && \ (info)->seg_not_present == 1 && \ (info)->useable == 0 && \ (info)->lm == 0) #if TLS_SIZE != 24 # error update this code. #endif static inline void load_TLS(struct thread_struct *t, unsigned int cpu) { #if 0 u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN); gdt[0] = t->tls_array[0]; gdt[1] = t->tls_array[1]; gdt[2] = t->tls_array[2]; #endif #define C(i) \ if (HYPERVISOR_update_descriptor(virt_to_machine(&cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]), \ t->tls_array[i])) \ BUG(); C(0); C(1); C(2); #undef C } /* * load one particular LDT into the current CPU */ static inline void load_LDT_nolock (mm_context_t *pc, int cpu) { void *segments = pc->ldt; int count = pc->size; if (likely(!count)) segments = NULL; xen_set_ldt(segments, count); } static inline void load_LDT(mm_context_t *pc) { int cpu = get_cpu(); load_LDT_nolock(pc, cpu); put_cpu(); } extern struct desc_ptr idt_descr; #endif /* !__ASSEMBLY__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/dma-mapping.h000066400000000000000000000143401314037446600277520ustar00rootroot00000000000000#ifndef _X8664_DMA_MAPPING_H #define _X8664_DMA_MAPPING_H 1 /* * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for * documentation. */ #include #include #include struct dma_mapping_ops { int (*mapping_error)(dma_addr_t dma_addr); void* (*alloc_coherent)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); void (*free_coherent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); dma_addr_t (*map_single)(struct device *hwdev, void *ptr, size_t size, int direction); /* like map_single, but doesn't check the device mask */ dma_addr_t (*map_simple)(struct device *hwdev, char *ptr, size_t size, int direction); void (*unmap_single)(struct device *dev, dma_addr_t addr, size_t size, int direction); void (*sync_single_for_cpu)(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction); void (*sync_single_for_device)(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction); void (*sync_single_range_for_cpu)(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction); void (*sync_single_range_for_device)(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction); void (*sync_sg_for_cpu)(struct device *hwdev, struct scatterlist *sg, int nelems, int direction); void (*sync_sg_for_device)(struct device *hwdev, struct scatterlist *sg, int nelems, int direction); int (*map_sg)(struct device *hwdev, struct scatterlist *sg, int nents, int direction); void (*unmap_sg)(struct device *hwdev, struct scatterlist *sg, int nents, int direction); int (*dma_supported)(struct device *hwdev, u64 mask); int is_phys; }; extern dma_addr_t bad_dma_address; extern struct dma_mapping_ops* dma_ops; extern int iommu_merge; static inline int valid_dma_direction(int dma_direction) { return ((dma_direction == DMA_BIDIRECTIONAL) || (dma_direction == DMA_TO_DEVICE) || (dma_direction == DMA_FROM_DEVICE)); } #if 0 static inline int dma_mapping_error(dma_addr_t dma_addr) { if (dma_ops->mapping_error) return dma_ops->mapping_error(dma_addr); return (dma_addr == bad_dma_address); } extern void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size, int direction) { BUG_ON(!valid_dma_direction(direction)); return dma_ops->map_single(hwdev, ptr, size, direction); } static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size, int direction) { BUG_ON(!valid_dma_direction(direction)); dma_ops->unmap_single(dev, addr, size, direction); } #define dma_map_page(dev,page,offset,size,dir) \ dma_map_single((dev), page_address(page)+(offset), (size), (dir)) #define dma_unmap_page dma_unmap_single static inline void dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_single_for_cpu) dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); flush_write_buffers(); } static inline void dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, size_t size, int direction) { BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_single_for_device) dma_ops->sync_single_for_device(hwdev, dma_handle, size, direction); flush_write_buffers(); } static inline void dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_single_range_for_cpu) { dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction); } flush_write_buffers(); } static inline void dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_single_range_for_device) dma_ops->sync_single_range_for_device(hwdev, dma_handle, offset, size, direction); flush_write_buffers(); } static inline void dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_sg_for_cpu) dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); flush_write_buffers(); } static inline void dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) { BUG_ON(!valid_dma_direction(direction)); if (dma_ops->sync_sg_for_device) { dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); } flush_write_buffers(); } static inline int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { BUG_ON(!valid_dma_direction(direction)); return dma_ops->map_sg(hwdev, sg, nents, direction); } static inline void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction) { BUG_ON(!valid_dma_direction(direction)); dma_ops->unmap_sg(hwdev, sg, nents, direction); } extern int dma_supported(struct device *hwdev, u64 mask); /* same for gart, swiotlb, and nommu */ static inline int dma_get_cache_alignment(void) { return boot_cpu_data.x86_clflush_size; } #define dma_is_consistent(h) 1 extern int dma_set_mask(struct device *dev, u64 mask); static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) { flush_write_buffers(); } extern struct device fallback_dev; extern int panic_on_overflow; #endif #endif /* _X8664_DMA_MAPPING_H */ #include UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/e820.h000066400000000000000000000040731314037446600262400ustar00rootroot00000000000000/* * structures and definitions for the int 15, ax=e820 memory map * scheme. * * In a nutshell, setup.S populates a scratch table in the * empty_zero_block that contains a list of usable address/size * duples. setup.c, this information is transferred into the e820map, * and in init.c/numa.c, that new information is used to mark pages * reserved or not. */ #ifndef __E820_HEADER #define __E820_HEADER #include #define E820MAP 0x2d0 /* our map */ #define E820MAX 128 /* number of entries in E820MAP */ #define E820NR 0x1e8 /* # entries in E820MAP */ #define E820_RAM 1 #define E820_RESERVED 2 #define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */ #define E820_NVS 4 #define HIGH_MEMORY (1024*1024) #define LOWMEMSIZE() (0x9f000) #ifndef __ASSEMBLY__ struct e820entry { u64 addr; /* start of memory segment */ u64 size; /* size of memory segment */ u32 type; /* type of memory segment */ } __attribute__((packed)); struct e820map { int nr_map; struct e820entry map[E820MAX]; }; extern unsigned long find_e820_area(unsigned long start, unsigned long end, unsigned size); extern void add_memory_region(unsigned long start, unsigned long size, int type); extern void setup_memory_region(void); extern void contig_e820_setup(void); extern unsigned long e820_end_of_ram(void); extern void e820_reserve_resources(struct e820entry *e820, int nr_map); extern void e820_print_map(char *who); extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type); extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end); extern void e820_setup_gap(struct e820entry *e820, int nr_map); extern unsigned long e820_hole_size(unsigned long start_pfn, unsigned long end_pfn); extern void __init parse_memopt(char *p, char **end); extern void __init parse_memmapopt(char *p, char **end); extern struct e820map e820; extern unsigned ebda_addr, ebda_size; #endif/*!__ASSEMBLY__*/ #endif/*__E820_HEADER*/ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/fixmap.h000066400000000000000000000065771314037446600270610ustar00rootroot00000000000000/* * fixmap.h: compile-time virtual memory allocation * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Ingo Molnar */ #ifndef _ASM_FIXMAP_H #define _ASM_FIXMAP_H #include #include #include #include #include #include /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at * compile time, but to set the physical address only * in the boot process. * * these 'compile-time allocated' memory buffers are * fixed-size 4k pages. (or larger if used with an increment * highger than 1) use fixmap_set(idx,phys) to associate * physical memory with fixmap indices. * * TLB entries of such buffers will not be flushed across * task switches. */ enum fixed_addresses { VSYSCALL_LAST_PAGE, VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, VSYSCALL_HPET, FIX_HPET_BASE, #ifndef CONFIG_XEN #ifdef CONFIG_X86_LOCAL_APIC FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ #endif #ifdef CONFIG_X86_IO_APIC FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, #endif #endif #ifdef CONFIG_ACPI FIX_ACPI_BEGIN, FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, #endif FIX_SHARED_INFO, #define NR_FIX_ISAMAPS 256 FIX_ISAMAP_END, FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1, __end_of_permanent_fixed_addresses, /* temporary boot-time mappings, used before ioremap() is functional */ #define NR_FIX_BTMAPS 16 FIX_BTMAP_END = __end_of_permanent_fixed_addresses, FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, __end_of_fixed_addresses }; extern void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags); #define set_fixmap(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL) /* * Some hardware wants to get fixmapped without caching. */ #define set_fixmap_nocache(idx, phys) \ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) #define clear_fixmap(idx) \ __set_fixmap(idx, 0, __pgprot(0)) #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) /* Only covers 32bit vsyscalls currently. Need another set for 64bit. */ #define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) #define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) extern void __this_fixmap_does_not_exist(void); /* * 'index to address' translation. If anyone tries to use the idx * directly without translation, we catch the bug with a NULL-deference * kernel oops. Illegal ranges of incoming indices are caught too. */ static __always_inline unsigned long fix_to_virt(const unsigned int idx) { /* * this branch gets completely eliminated after inlining, * except when someone tries to use fixaddr indices in an * illegal way. (such as mixing up address types or using * out-of-range indices). * * If it doesn't get removed, the linker will complain * loudly with a reasonably clear error message.. */ if (idx >= __end_of_fixed_addresses) __this_fixmap_does_not_exist(); return __fix_to_virt(idx); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/floppy.h000066400000000000000000000117241314037446600270740ustar00rootroot00000000000000/* * Architecture specific parts of the Floppy driver * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995 * * Modifications for Xen are Copyright (c) 2004, Keir Fraser. */ #ifndef __ASM_XEN_X86_64_FLOPPY_H #define __ASM_XEN_X86_64_FLOPPY_H #include /* * The DMA channel used by the floppy controller cannot access data at * addresses >= 16MB * * Went back to the 1MB limit, as some people had problems with the floppy * driver otherwise. It doesn't matter much for performance anyway, as most * floppy accesses go through the track buffer. */ #define _CROSS_64KB(a,s,vdma) \ (!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) /* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */ #include #undef MAX_DMA_ADDRESS #define MAX_DMA_ADDRESS 0 #define CROSS_64KB(a,s) (0) #define fd_inb(port) inb_p(port) #define fd_outb(value,port) outb_p(value,port) #define fd_request_dma() (0) #define fd_free_dma() ((void)0) #define fd_enable_irq() enable_irq(FLOPPY_IRQ) #define fd_disable_irq() disable_irq(FLOPPY_IRQ) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) #define fd_get_dma_residue() vdma_get_dma_residue(FLOPPY_DMA) /* * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from * softirq context via motor_off_callback. A generic bug we happen to trigger. */ #define fd_dma_mem_alloc(size) __get_free_pages(GFP_KERNEL, get_order(size)) #define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size)) #define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io) static int virtual_dma_count; static int virtual_dma_residue; static char *virtual_dma_addr; static int virtual_dma_mode; static int doing_pdma; static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs) { register unsigned char st; #undef TRACE_FLPY_INT #ifdef TRACE_FLPY_INT static int calls=0; static int bytes=0; static int dma_wait=0; #endif if (!doing_pdma) return floppy_interrupt(irq, dev_id, regs); #ifdef TRACE_FLPY_INT if(!calls) bytes = virtual_dma_count; #endif { register int lcount; register char *lptr; st = 1; for(lcount=virtual_dma_count, lptr=virtual_dma_addr; lcount; lcount--, lptr++) { st=inb(virtual_dma_port+4) & 0xa0 ; if(st != 0xa0) break; if(virtual_dma_mode) outb_p(*lptr, virtual_dma_port+5); else *lptr = inb_p(virtual_dma_port+5); } virtual_dma_count = lcount; virtual_dma_addr = lptr; st = inb(virtual_dma_port+4); } #ifdef TRACE_FLPY_INT calls++; #endif if(st == 0x20) return IRQ_HANDLED; if(!(st & 0x20)) { virtual_dma_residue += virtual_dma_count; virtual_dma_count=0; #ifdef TRACE_FLPY_INT printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", virtual_dma_count, virtual_dma_residue, calls, bytes, dma_wait); calls = 0; dma_wait=0; #endif doing_pdma = 0; floppy_interrupt(irq, dev_id, regs); return IRQ_HANDLED; } #ifdef TRACE_FLPY_INT if(!virtual_dma_count) dma_wait++; #endif return IRQ_HANDLED; } static void fd_disable_dma(void) { doing_pdma = 0; virtual_dma_residue += virtual_dma_count; virtual_dma_count=0; } static int vdma_get_dma_residue(unsigned int dummy) { return virtual_dma_count + virtual_dma_residue; } static int fd_request_irq(void) { return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, "floppy", NULL); } #if 0 static unsigned long vdma_mem_alloc(unsigned long size) { return (unsigned long) vmalloc(size); } static void vdma_mem_free(unsigned long addr, unsigned long size) { vfree((void *)addr); } #endif static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) { doing_pdma = 1; virtual_dma_port = io; virtual_dma_mode = (mode == DMA_MODE_WRITE); virtual_dma_addr = addr; virtual_dma_count = size; virtual_dma_residue = 0; return 0; } /* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */ #define FDC1 xen_floppy_init() static int FDC2 = -1; static int xen_floppy_init(void) { use_virtual_dma = 1; can_use_virtual_dma = 1; return 0x3f0; } /* * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock * is needed to prevent corrupted CMOS RAM in case "insmod floppy" * coincides with another rtc CMOS user. Paul G. */ #define FLOPPY0_TYPE ({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ val = (CMOS_READ(0x10) >> 4) & 15; \ spin_unlock_irqrestore(&rtc_lock, flags); \ val; \ }) #define FLOPPY1_TYPE ({ \ unsigned long flags; \ unsigned char val; \ spin_lock_irqsave(&rtc_lock, flags); \ val = CMOS_READ(0x10) & 15; \ spin_unlock_irqrestore(&rtc_lock, flags); \ val; \ }) #define N_FDC 2 #define N_DRIVE 8 #define FLOPPY_MOTOR_MASK 0xf0 #define EXTRA_FLOPPY_PARAMS #endif /* __ASM_XEN_X86_64_FLOPPY_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/gnttab_dma.h000066400000000000000000000000561314037446600276570ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/hw_irq.h000066400000000000000000000070311314037446600270500ustar00rootroot00000000000000#ifndef _ASM_HW_IRQ_H #define _ASM_HW_IRQ_H /* * linux/include/asm/hw_irq.h * * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar * * moved some of the old arch/i386/kernel/irq.h to here. VY * * IRQ/IPI changes taken from work by Thomas Radke * * * hacked by Andi Kleen for x86-64. */ #ifndef __ASSEMBLY__ #include #include #include #include struct hw_interrupt_type; #endif /* * IDT vectors usable for external interrupt sources start * at 0x20: */ #define FIRST_EXTERNAL_VECTOR 0x20 #define IA32_SYSCALL_VECTOR 0x80 /* * Vectors 0x20-0x2f are used for ISA interrupts. */ /* * Special IRQ vectors used by the SMP architecture, 0xf0-0xff * * some of the following vectors are 'rare', they are merged * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. * TLB, reschedule and local APIC vectors are performance-critical. */ #ifndef CONFIG_XEN #define SPURIOUS_APIC_VECTOR 0xff #define ERROR_APIC_VECTOR 0xfe #define RESCHEDULE_VECTOR 0xfd #define CALL_FUNCTION_VECTOR 0xfc /* fb free - please don't readd KDB here because it's useless (hint - think what a NMI bit does to a vector) */ #define THERMAL_APIC_VECTOR 0xfa #define THRESHOLD_APIC_VECTOR 0xf9 /* f8 free */ #define INVALIDATE_TLB_VECTOR_END 0xf7 #define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */ #define NUM_INVALIDATE_TLB_VECTORS 8 #endif /* * Local APIC timer IRQ vector is on a different priority level, * to work around the 'lost local interrupt if more than 2 IRQ * sources per level' errata. */ #define LOCAL_TIMER_VECTOR 0xef /* * First APIC vector available to drivers: (vectors 0x30-0xee) * we start at 0x31 to spread out vectors evenly between priority * levels. (0x80 is the syscall vector) */ #define FIRST_DEVICE_VECTOR 0x31 #define FIRST_SYSTEM_VECTOR 0xef /* duplicated in irq.h */ #ifndef __ASSEMBLY__ extern u8 irq_vector[NR_IRQ_VECTORS]; #define IO_APIC_VECTOR(irq) (irq_vector[irq]) #define AUTO_ASSIGN -1 /* * Various low-level irq details needed by irq.c, process.c, * time.c, io_apic.c and smp.c * * Interrupt entry/exit code at both C and assembly level */ extern void disable_8259A_irq(unsigned int irq); extern void enable_8259A_irq(unsigned int irq); extern int i8259A_irq_pending(unsigned int irq); extern void make_8259A_irq(unsigned int irq); extern void init_8259A(int aeoi); extern void FASTCALL(send_IPI_self(int vector)); extern void init_VISWS_APIC_irqs(void); extern void setup_IO_APIC(void); extern void disable_IO_APIC(void); #define print_IO_APIC() extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); extern void send_IPI(int dest, int vector); extern void setup_ioapic_dest(void); extern unsigned long io_apic_irqs; extern atomic_t irq_err_count; extern atomic_t irq_mis_count; #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) #define __STR(x) #x #define STR(x) __STR(x) #include #define IRQ_NAME2(nr) nr##_interrupt(void) #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr) /* * SMP has a few special interrupts for IPI messages */ #define BUILD_IRQ(nr) \ asmlinkage void IRQ_NAME(nr); \ __asm__( \ "\n.p2align\n" \ "IRQ" #nr "_interrupt:\n\t" \ "push $~(" #nr ") ; " \ "jmp common_interrupt"); extern void resend_irq_on_evtchn(struct hw_interrupt_type *h, unsigned int i); static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { resend_irq_on_evtchn(h, i); } #define platform_legacy_irq(irq) ((irq) < 16) #endif #endif /* _ASM_HW_IRQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/hypercall.h000066400000000000000000000245351314037446600275520ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * 64-bit updates: * Benjamin Liu * Jun Nakajima * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #include /* memcpy() */ #include #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #ifdef CONFIG_XEN #define HYPERCALL_STR(name) \ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" #else #define HYPERCALL_STR(name) \ "mov $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ "add hypercall_stubs(%%rip),%%rax; " \ "call *%%rax" #endif #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res) \ : \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, a1) \ ({ \ type __res; \ long __ign1; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1) \ : "1" ((long)(a1)) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ long __ign1, __ign2; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \ : "1" ((long)(a1)), "2" ((long)(a2)) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ register long __arg4 asm("r10") = (long)(a4); \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3), "+r" (__arg4) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ register long __arg4 asm("r10") = (long)(a4); \ register long __arg5 asm("r8") = (long)(a5); \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3), "+r" (__arg4), "+r" (__arg5) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_address, unsigned long failsafe_address, unsigned long syscall_address) { return _hypercall3(int, set_callbacks, event_address, failsafe_address, syscall_address); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall1(long, set_timer_op, timeout); } static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_update_descriptor( unsigned long ma, unsigned long word) { return _hypercall2(int, update_descriptor, ma, word); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; /* when hypercall memory_op failed, retry*/ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { return _hypercall3(int, update_va_mapping, va, new_val.pte, flags); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_acm_op( int cmd, void *arg) { return _hypercall2(int, acm_op, cmd, arg); } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { return _hypercall3(int, grant_table_op, cmd, uop, count); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { return _hypercall4(int, update_va_mapping_otherdomain, va, new_val.pte, flags, domid); } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_set_segment_base( int reg, unsigned long value) { return _hypercall2(int, set_segment_base, reg, value); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/hypervisor.h000066400000000000000000000160101314037446600277660ustar00rootroot00000000000000/****************************************************************************** * hypervisor.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERVISOR_H__ #define __HYPERVISOR_H__ #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) # ifdef CONFIG_X86_PAE # include # else # include # endif #elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) # include #endif extern shared_info_t *HYPERVISOR_shared_info; #define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu)) #ifdef CONFIG_SMP #define current_vcpu_info() vcpu_info(smp_processor_id()) #else #define current_vcpu_info() vcpu_info(0) #endif #ifdef CONFIG_X86_32 extern unsigned long hypervisor_virt_start; #endif /* arch/xen/i386/kernel/setup.c */ extern start_info_t *xen_start_info; #ifdef CONFIG_XEN_PRIVILEGED_GUEST #define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN) #else #define is_initial_xendomain() 0 #endif /* arch/xen/kernel/evtchn.c */ /* Force a proper event-channel callback from Xen. */ void force_evtchn_callback(void); /* arch/xen/kernel/process.c */ void xen_cpu_idle (void); /* arch/xen/i386/kernel/hypervisor.c */ void do_hypervisor_callback(struct pt_regs *regs); /* arch/xen/i386/mm/hypervisor.c */ /* * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already * be MACHINE addresses. */ void xen_pt_switch(unsigned long ptr); void xen_new_user_pt(unsigned long ptr); /* x86_64 only */ void xen_load_gs(unsigned int selector); /* x86_64 only */ void xen_tlb_flush(void); void xen_invlpg(unsigned long ptr); void xen_l1_entry_update(pte_t *ptr, pte_t val); void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */ void xen_pgd_pin(unsigned long ptr); void xen_pgd_unpin(unsigned long ptr); void xen_set_ldt(const void *ptr, unsigned int ents); #ifdef CONFIG_SMP #include void xen_tlb_flush_all(void); void xen_invlpg_all(unsigned long ptr); void xen_tlb_flush_mask(cpumask_t *mask); void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr); #endif /* Returns zero on success else negative errno. */ int xen_create_contiguous_region( unsigned long vstart, unsigned int order, unsigned int address_bits); void xen_destroy_contiguous_region( unsigned long vstart, unsigned int order); struct page; int xen_limit_pages_to_max_mfn( struct page *pages, unsigned int order, unsigned int address_bits); /* Turn jiffies into Xen system time. */ u64 jiffies_to_st(unsigned long jiffies); #ifdef CONFIG_XEN_SCRUB_PAGES void scrub_pages(void *, unsigned int); #else #define scrub_pages(_p,_n) ((void)0) #endif #include #if defined(CONFIG_X86_64) #define MULTI_UVMFLAGS_INDEX 2 #define MULTI_UVMDOMID_INDEX 3 #else #define MULTI_UVMFLAGS_INDEX 3 #define MULTI_UVMDOMID_INDEX 4 #endif #ifdef CONFIG_XEN #define is_running_on_xen() 1 #else extern char *hypercall_stubs; #define is_running_on_xen() (!!hypercall_stubs) #endif static inline int HYPERVISOR_yield( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int HYPERVISOR_block( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0); #endif return rc; } static inline void /*__noreturn*/ HYPERVISOR_shutdown( unsigned int reason) { struct sched_shutdown sched_shutdown = { .reason = reason }; VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown)); #if CONFIG_XEN_COMPAT <= 0x030002 VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason)); #endif /* Don't recurse needlessly. */ BUG_ON(reason != SHUTDOWN_crash); for(;;); } static inline int __must_check HYPERVISOR_poll( evtchn_port_t *ports, unsigned int nr_ports, u64 timeout) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports, .timeout = jiffies_to_st(timeout) }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } #ifdef CONFIG_XEN static inline void MULTI_update_va_mapping( multicall_entry_t *mcl, unsigned long va, pte_t new_val, unsigned long flags) { mcl->op = __HYPERVISOR_update_va_mapping; mcl->args[0] = va; #if defined(CONFIG_X86_64) mcl->args[1] = new_val.pte; #elif defined(CONFIG_X86_PAE) mcl->args[1] = new_val.pte_low; mcl->args[2] = new_val.pte_high; #else mcl->args[1] = new_val.pte_low; mcl->args[2] = 0; #endif mcl->args[MULTI_UVMFLAGS_INDEX] = flags; } static inline void MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd, void *uop, unsigned int count) { mcl->op = __HYPERVISOR_grant_table_op; mcl->args[0] = cmd; mcl->args[1] = (unsigned long)uop; mcl->args[2] = count; } #else /* !defined(CONFIG_XEN) */ /* Multicalls not supported for HVM guests. */ #define MULTI_update_va_mapping(a,b,c,d) ((void)0) #define MULTI_grant_table_op(a,b,c,d) ((void)0) #endif #endif /* __HYPERVISOR_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/io.h000066400000000000000000000244311314037446600261710ustar00rootroot00000000000000#ifndef _ASM_IO_H #define _ASM_IO_H #include /* * This file contains the definitions for the x86 IO instructions * inb/inw/inl/outb/outw/outl and the "string versions" of the same * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" * versions of the single-IO instructions (inb_p/inw_p/..). * * This file is not meant to be obfuscating: it's just complicated * to (a) handle it all in a way that makes gcc able to optimize it * as well as possible and (b) trying to avoid writing the same thing * over and over again with slight variations and possibly making a * mistake somewhere. */ /* * Thanks to James van Artsdalen for a better timing-fix than * the two short jumps: using outb's to a nonexistent port seems * to guarantee better timings even on fast machines. * * On the other hand, I'd like to be sure of a non-existent port: * I feel a bit unsafe about using 0x80 (should be safe, though) * * Linus */ /* * Bit simplified and optimized by Jan Hubicka * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. * * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, * isa_read[wl] and isa_write[wl] fixed * - Arnaldo Carvalho de Melo */ #define __SLOW_DOWN_IO "\noutb %%al,$0x80" #ifdef REALLY_SLOW_IO #define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO #else #define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO #endif /* * Talk about misusing macros.. */ #define __OUT1(s,x) \ static inline void out##s(unsigned x value, unsigned short port) { #define __OUT2(s,s1,s2) \ __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" #define __OUT(s,s1,x) \ __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ __OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ #define __IN1(s) \ static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; #define __IN2(s,s1,s2) \ __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" #define __IN(s,s1,i...) \ __IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ __IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ #define __INS(s) \ static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ { __asm__ __volatile__ ("rep ; ins" #s \ : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } #define __OUTS(s) \ static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ { __asm__ __volatile__ ("rep ; outs" #s \ : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } #define RETURN_TYPE unsigned char __IN(b,"") #undef RETURN_TYPE #define RETURN_TYPE unsigned short __IN(w,"") #undef RETURN_TYPE #define RETURN_TYPE unsigned int __IN(l,"") #undef RETURN_TYPE __OUT(b,"b",char) __OUT(w,"w",short) __OUT(l,,int) __INS(b) __INS(w) __INS(l) __OUTS(b) __OUTS(w) __OUTS(l) #define IO_SPACE_LIMIT 0xffff #if defined(__KERNEL__) && __x86_64__ #include #ifndef __i386__ /* * Change virtual addresses to physical addresses and vv. * These are pretty trivial */ static inline unsigned long virt_to_phys(volatile void * address) { return __pa(address); } static inline void * phys_to_virt(unsigned long address) { return __va(address); } #define virt_to_bus(_x) phys_to_machine(__pa(_x)) #define bus_to_virt(_x) __va(machine_to_phys(_x)) #endif /* * Change "struct page" to physical address. */ #define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) #define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page))) #define page_to_bus(page) (phys_to_machine(page_to_pseudophys(page))) #define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \ (unsigned long) bio_offset((bio))) #define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \ (unsigned long) (bv)->bv_offset) #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \ bvec_to_pseudophys((vec2)))) #include extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); static inline void __iomem * ioremap (unsigned long offset, unsigned long size) { return __ioremap(offset, size, 0); } extern void *bt_ioremap(unsigned long addr, unsigned long size); extern void bt_iounmap(void *addr, unsigned long size); #define early_ioremap bt_ioremap #define early_iounmap bt_iounmap /* * This one maps high address device memory and turns off caching for that area. * it's useful if some control registers are in such an area and write combining * or read caching is not desirable: */ extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); extern void iounmap(volatile void __iomem *addr); /* * ISA I/O bus memory addresses are 1:1 with the physical address. */ #define isa_virt_to_bus(_x) ({ BUG(); virt_to_bus(_x); }) #define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x #define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x)) /* * However PCI ones are not necessarily 1:1 and therefore these interfaces * are forbidden in portable PCI drivers. * * Allow them on x86 for legacy drivers, though. */ #define virt_to_bus(_x) phys_to_machine(__pa(_x)) #define bus_to_virt(_x) __va(machine_to_phys(_x)) /* * readX/writeX() are used to access memory mapped devices. On some * architectures the memory mapped IO stuff needs to be accessed * differently. On the x86 architecture, we just read/write the * memory location directly. */ static inline __u8 __readb(const volatile void __iomem *addr) { return *(__force volatile __u8 *)addr; } static inline __u16 __readw(const volatile void __iomem *addr) { return *(__force volatile __u16 *)addr; } static inline __u32 __readl(const volatile void __iomem *addr) { return *(__force volatile __u32 *)addr; } static inline __u64 __readq(const volatile void __iomem *addr) { return *(__force volatile __u64 *)addr; } #define readb(x) __readb(x) #define readw(x) __readw(x) #define readl(x) __readl(x) #define readq(x) __readq(x) #define readb_relaxed(a) readb(a) #define readw_relaxed(a) readw(a) #define readl_relaxed(a) readl(a) #define readq_relaxed(a) readq(a) #define __raw_readb readb #define __raw_readw readw #define __raw_readl readl #define __raw_readq readq #define mmiowb() #ifdef CONFIG_UNORDERED_IO static inline void __writel(__u32 val, volatile void __iomem *addr) { volatile __u32 __iomem *target = addr; asm volatile("movnti %1,%0" : "=m" (*target) : "r" (val) : "memory"); } static inline void __writeq(__u64 val, volatile void __iomem *addr) { volatile __u64 __iomem *target = addr; asm volatile("movnti %1,%0" : "=m" (*target) : "r" (val) : "memory"); } #else static inline void __writel(__u32 b, volatile void __iomem *addr) { *(__force volatile __u32 *)addr = b; } static inline void __writeq(__u64 b, volatile void __iomem *addr) { *(__force volatile __u64 *)addr = b; } #endif static inline void __writeb(__u8 b, volatile void __iomem *addr) { *(__force volatile __u8 *)addr = b; } static inline void __writew(__u16 b, volatile void __iomem *addr) { *(__force volatile __u16 *)addr = b; } #define writeq(val,addr) __writeq((val),(addr)) #define writel(val,addr) __writel((val),(addr)) #define writew(val,addr) __writew((val),(addr)) #define writeb(val,addr) __writeb((val),(addr)) #define __raw_writeb writeb #define __raw_writew writew #define __raw_writel writel #define __raw_writeq writeq void __memcpy_fromio(void*,unsigned long,unsigned); void __memcpy_toio(unsigned long,const void*,unsigned); static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len) { __memcpy_fromio(to,(unsigned long)from,len); } static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len) { __memcpy_toio((unsigned long)to,from,len); } void memset_io(volatile void __iomem *a, int b, size_t c); /* * ISA space is 'always mapped' on a typical x86 system, no need to * explicitly ioremap() it. The fact that the ISA IO space is mapped * to PAGE_OFFSET is pure coincidence - it does not mean ISA values * are physical addresses. The following constant pointer can be * used as the IO-area pointer (it can be iounmapped as well, so the * analogy with PCI is quite large): */ #define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN))) #define isa_readb(a) readb(__ISA_IO_base + (a)) #define isa_readw(a) readw(__ISA_IO_base + (a)) #define isa_readl(a) readl(__ISA_IO_base + (a)) #define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) #define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) #define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) #define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) #define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) #define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) /* * Again, x86-64 does not require mem IO specific function. */ #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d)) #define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(__ISA_IO_base + (b)),(c),(d)) /** * check_signature - find BIOS signatures * @io_addr: mmio address to check * @signature: signature block * @length: length of signature * * Perform a signature comparison with the mmio address io_addr. This * address should have been obtained by ioremap. * Returns 1 on a match. */ static inline int check_signature(void __iomem *io_addr, const unsigned char *signature, int length) { int retval = 0; do { if (readb(io_addr) != *signature) goto out; io_addr++; signature++; length--; } while (length); retval = 1; out: return retval; } /* Nothing to do */ #define dma_cache_inv(_start,_size) do { } while (0) #define dma_cache_wback(_start,_size) do { } while (0) #define dma_cache_wback_inv(_start,_size) do { } while (0) #define flush_write_buffers() extern int iommu_bio_merge; #define BIO_VMERGE_BOUNDARY iommu_bio_merge /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access */ #define xlate_dev_mem_ptr(p) __va(p) /* * Convert a virtual cached pointer to an uncached pointer */ #define xlate_dev_kmem_ptr(p) p #endif /* __KERNEL__ */ #define ARCH_HAS_DEV_MEM #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/irq.h000066400000000000000000000012361314037446600263530ustar00rootroot00000000000000#ifndef _ASM_IRQ_H #define _ASM_IRQ_H /* * linux/include/asm/irq.h * * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar * * IRQ/IPI changes taken from work by Thomas Radke * */ #include /* include comes from machine specific directory */ #include "irq_vectors.h" #include static __inline__ int irq_canonicalize(int irq) { return ((irq == 2) ? 9 : irq); } #define KDB_VECTOR 0xf9 # define irq_ctx_init(cpu) do { } while (0) #ifdef CONFIG_HOTPLUG_CPU #include extern void fixup_irqs(cpumask_t map); #endif #define __ARCH_HAS_DO_SOFTIRQ 1 #endif /* _ASM_IRQ_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/maddr.h000066400000000000000000000114541314037446600266520ustar00rootroot00000000000000#ifndef _X86_64_MADDR_H #define _X86_64_MADDR_H #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<63) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ typedef unsigned long paddr_t; typedef unsigned long maddr_t; #ifdef CONFIG_XEN extern unsigned long *phys_to_machine_mapping; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return pfn; BUG_ON(end_pfn && pfn >= end_pfn); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return 1; BUG_ON(end_pfn && pfn >= end_pfn); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return end_pfn; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movq %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movq %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 8\n" " .quad 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if ((pfn < end_pfn) && !xen_feature(XENFEAT_auto_translated_physmap) && (phys_to_machine_mapping[pfn] != mfn)) return end_pfn; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { BUG_ON(end_pfn && pfn >= end_pfn); if (xen_feature(XENFEAT_auto_translated_physmap)) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } static inline paddr_t pte_phys_to_machine(paddr_t phys) { maddr_t machine; machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { paddr_t phys; phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #define __pte_ma(x) ((pte_t) { (x) } ) #define pfn_pte_ma(pfn, prot) __pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask) #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _X86_64_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/mmu.h000066400000000000000000000015231314037446600263550ustar00rootroot00000000000000#ifndef __x86_64_MMU_H #define __x86_64_MMU_H #include #include /* * The x86_64 doesn't have a mmu context, but * we put the segment information here. * * cpu_vm_mask is used to optimize ldt flushing. */ typedef struct { void *ldt; rwlock_t ldtlock; int size; struct semaphore sem; #ifdef CONFIG_XEN unsigned pinned:1; unsigned has_foreign_mappings:1; struct list_head unpinned; #endif } mm_context_t; #ifdef CONFIG_XEN extern struct list_head mm_unpinned; extern spinlock_t mm_unpinned_lock; /* mm/memory.c:exit_mmap hook */ extern void _arch_exit_mmap(struct mm_struct *mm); #define arch_exit_mmap(_mm) _arch_exit_mmap(_mm) /* kernel/fork.c:dup_mmap hook */ extern void _arch_dup_mmap(struct mm_struct *mm); #define arch_dup_mmap(mm, oldmm) ((void)(oldmm), _arch_dup_mmap(mm)) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/mmu_context.h000066400000000000000000000070601314037446600301230ustar00rootroot00000000000000#ifndef __X86_64_MMU_CONTEXT_H #define __X86_64_MMU_CONTEXT_H #include #include #include #include #include #include #include /* * possibly do the LDT unload here? */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm); void destroy_context(struct mm_struct *mm); static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) if (read_pda(mmu_state) == TLBSTATE_OK) write_pda(mmu_state, TLBSTATE_LAZY); #endif } #define prepare_arch_switch(next) __prepare_arch_switch() static inline void __prepare_arch_switch(void) { /* * Save away %es, %ds, %fs and %gs. Must happen before reload * of cr3/ldt (i.e., not in __switch_to). */ __asm__ __volatile__ ( "mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3" : "=m" (current->thread.es), "=m" (current->thread.ds), "=m" (current->thread.fsindex), "=m" (current->thread.gsindex) ); if (current->thread.ds) __asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) ); if (current->thread.es) __asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) ); if (current->thread.fsindex) { __asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) ); current->thread.fs = 0; } if (current->thread.gsindex) { load_gs_index(0); current->thread.gs = 0; } } extern void mm_pin(struct mm_struct *mm); extern void mm_unpin(struct mm_struct *mm); void mm_pin_all(void); static inline void load_cr3(pgd_t *pgd) { asm volatile("movq %0,%%cr3" :: "r" (phys_to_machine(__pa(pgd))) : "memory"); } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned cpu = smp_processor_id(); struct mmuext_op _op[3], *op = _op; if (likely(prev != next)) { BUG_ON(!xen_feature(XENFEAT_writable_page_tables) && !next->context.pinned); /* stop flush ipis for the previous mm */ clear_bit(cpu, &prev->cpu_vm_mask); #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) write_pda(mmu_state, TLBSTATE_OK); write_pda(active_mm, next); #endif set_bit(cpu, &next->cpu_vm_mask); /* load_cr3(next->pgd) */ op->cmd = MMUEXT_NEW_BASEPTR; op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT); op++; /* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */ op->cmd = MMUEXT_NEW_USER_BASEPTR; op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT); op++; if (unlikely(next->context.ldt != prev->context.ldt)) { /* load_LDT_nolock(&next->context, cpu) */ op->cmd = MMUEXT_SET_LDT; op->arg1.linear_addr = (unsigned long)next->context.ldt; op->arg2.nr_ents = next->context.size; op++; } BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF)); } #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) else { write_pda(mmu_state, TLBSTATE_OK); if (read_pda(active_mm) != next) out_of_line_bug(); if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload CR3 * to make sure to use no freed page tables. */ load_cr3(next->pgd); xen_new_user_pt(__pa(__user_pgd(next->pgd))); load_LDT_nolock(&next->context, cpu); } } #endif } #define deactivate_mm(tsk,mm) do { \ load_gs_index(0); \ asm volatile("movl %0,%%fs"::"r"(0)); \ } while(0) static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { if (!next->context.pinned) mm_pin(next); switch_mm(prev, next, NULL); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/msr.h000066400000000000000000000270131314037446600263620ustar00rootroot00000000000000#ifndef X86_64_MSR_H #define X86_64_MSR_H 1 #ifndef __ASSEMBLY__ /* * Access to machine-specific registers (available on 586 and better only) * Note: the rd* operations modify the parameters directly (without using * pointer indirection), this allows gcc to optimize better */ #define rdmsr(msr,val1,val2) \ __asm__ __volatile__("rdmsr" \ : "=a" (val1), "=d" (val2) \ : "c" (msr)) #define rdmsrl(msr,val) do { unsigned long a__,b__; \ __asm__ __volatile__("rdmsr" \ : "=a" (a__), "=d" (b__) \ : "c" (msr)); \ val = a__ | (b__<<32); \ } while(0) #define wrmsr(msr,val1,val2) \ __asm__ __volatile__("wrmsr" \ : /* no outputs */ \ : "c" (msr), "a" (val1), "d" (val2)) #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) /* wrmsr with exception handling */ #define wrmsr_safe(msr,a,b) ({ int ret__; \ asm volatile("2: wrmsr ; xorl %0,%0\n" \ "1:\n\t" \ ".section .fixup,\"ax\"\n\t" \ "3: movl %4,%0 ; jmp 1b\n\t" \ ".previous\n\t" \ ".section __ex_table,\"a\"\n" \ " .align 8\n\t" \ " .quad 2b,3b\n\t" \ ".previous" \ : "=a" (ret__) \ : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ ret__; }) #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) #define rdmsr_safe(msr,a,b) \ ({ int ret__; \ asm volatile ("1: rdmsr\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: movl %4,%0\n" \ " jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 8\n" \ " .quad 1b,3b\n" \ ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\ :"c"(msr), "i"(-EIO), "0"(0)); \ ret__; }) #define rdtsc(low,high) \ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) #define rdtscl(low) \ __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") #define rdtscll(val) do { \ unsigned int __a,__d; \ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ } while(0) #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) #define rdpmc(counter,low,high) \ __asm__ __volatile__("rdpmc" \ : "=a" (low), "=d" (high) \ : "c" (counter)) static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { __asm__(XEN_CPUID : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op)); } /* Some CPUID calls want 'count' to be placed in ecx */ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, int *edx) { __asm__(XEN_CPUID : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "c" (count)); } /* * CPUID functions returning a single datum */ static inline unsigned int cpuid_eax(unsigned int op) { unsigned int eax; __asm__(XEN_CPUID : "=a" (eax) : "0" (op) : "bx", "cx", "dx"); return eax; } static inline unsigned int cpuid_ebx(unsigned int op) { unsigned int eax, ebx; __asm__(XEN_CPUID : "=a" (eax), "=b" (ebx) : "0" (op) : "cx", "dx" ); return ebx; } static inline unsigned int cpuid_ecx(unsigned int op) { unsigned int eax, ecx; __asm__(XEN_CPUID : "=a" (eax), "=c" (ecx) : "0" (op) : "bx", "dx" ); return ecx; } static inline unsigned int cpuid_edx(unsigned int op) { unsigned int eax, edx; __asm__(XEN_CPUID : "=a" (eax), "=d" (edx) : "0" (op) : "bx", "cx"); return edx; } #define MSR_IA32_UCODE_WRITE 0x79 #define MSR_IA32_UCODE_REV 0x8b #endif /* AMD/K8 specific MSRs */ #define MSR_EFER 0xc0000080 /* extended feature register */ #define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ #define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ #define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */ #define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ #define MSR_FS_BASE 0xc0000100 /* 64bit GS base */ #define MSR_GS_BASE 0xc0000101 /* 64bit FS base */ #define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */ /* EFER bits: */ #define _EFER_SCE 0 /* SYSCALL/SYSRET */ #define _EFER_LME 8 /* Long mode enable */ #define _EFER_LMA 10 /* Long mode active (read-only) */ #define _EFER_NX 11 /* No execute enable */ #define EFER_SCE (1<<_EFER_SCE) #define EFER_LME (1<<_EFER_LME) #define EFER_LMA (1<<_EFER_LMA) #define EFER_NX (1<<_EFER_NX) /* Intel MSRs. Some also available on other CPUs */ #define MSR_IA32_TSC 0x10 #define MSR_IA32_PLATFORM_ID 0x17 #define MSR_IA32_PERFCTR0 0xc1 #define MSR_IA32_PERFCTR1 0xc2 #define MSR_MTRRcap 0x0fe #define MSR_IA32_BBL_CR_CTL 0x119 #define MSR_IA32_SYSENTER_CS 0x174 #define MSR_IA32_SYSENTER_ESP 0x175 #define MSR_IA32_SYSENTER_EIP 0x176 #define MSR_IA32_MCG_CAP 0x179 #define MSR_IA32_MCG_STATUS 0x17a #define MSR_IA32_MCG_CTL 0x17b #define MSR_IA32_EVNTSEL0 0x186 #define MSR_IA32_EVNTSEL1 0x187 #define MSR_IA32_DEBUGCTLMSR 0x1d9 #define MSR_IA32_LASTBRANCHFROMIP 0x1db #define MSR_IA32_LASTBRANCHTOIP 0x1dc #define MSR_IA32_LASTINTFROMIP 0x1dd #define MSR_IA32_LASTINTTOIP 0x1de #define MSR_MTRRfix64K_00000 0x250 #define MSR_MTRRfix16K_80000 0x258 #define MSR_MTRRfix16K_A0000 0x259 #define MSR_MTRRfix4K_C0000 0x268 #define MSR_MTRRfix4K_C8000 0x269 #define MSR_MTRRfix4K_D0000 0x26a #define MSR_MTRRfix4K_D8000 0x26b #define MSR_MTRRfix4K_E0000 0x26c #define MSR_MTRRfix4K_E8000 0x26d #define MSR_MTRRfix4K_F0000 0x26e #define MSR_MTRRfix4K_F8000 0x26f #define MSR_MTRRdefType 0x2ff #define MSR_IA32_MC0_CTL 0x400 #define MSR_IA32_MC0_STATUS 0x401 #define MSR_IA32_MC0_ADDR 0x402 #define MSR_IA32_MC0_MISC 0x403 #define MSR_P6_PERFCTR0 0xc1 #define MSR_P6_PERFCTR1 0xc2 #define MSR_P6_EVNTSEL0 0x186 #define MSR_P6_EVNTSEL1 0x187 /* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */ #define MSR_K7_EVNTSEL0 0xC0010000 #define MSR_K7_PERFCTR0 0xC0010004 #define MSR_K7_EVNTSEL1 0xC0010001 #define MSR_K7_PERFCTR1 0xC0010005 #define MSR_K7_EVNTSEL2 0xC0010002 #define MSR_K7_PERFCTR2 0xC0010006 #define MSR_K7_EVNTSEL3 0xC0010003 #define MSR_K7_PERFCTR3 0xC0010007 #define MSR_K8_TOP_MEM1 0xC001001A #define MSR_K8_TOP_MEM2 0xC001001D #define MSR_K8_SYSCFG 0xC0010010 #define MSR_K8_HWCR 0xC0010015 /* K6 MSRs */ #define MSR_K6_EFER 0xC0000080 #define MSR_K6_STAR 0xC0000081 #define MSR_K6_WHCR 0xC0000082 #define MSR_K6_UWCCR 0xC0000085 #define MSR_K6_PSOR 0xC0000087 #define MSR_K6_PFIR 0xC0000088 /* Centaur-Hauls/IDT defined MSRs. */ #define MSR_IDT_FCR1 0x107 #define MSR_IDT_FCR2 0x108 #define MSR_IDT_FCR3 0x109 #define MSR_IDT_FCR4 0x10a #define MSR_IDT_MCR0 0x110 #define MSR_IDT_MCR1 0x111 #define MSR_IDT_MCR2 0x112 #define MSR_IDT_MCR3 0x113 #define MSR_IDT_MCR4 0x114 #define MSR_IDT_MCR5 0x115 #define MSR_IDT_MCR6 0x116 #define MSR_IDT_MCR7 0x117 #define MSR_IDT_MCR_CTRL 0x120 /* VIA Cyrix defined MSRs*/ #define MSR_VIA_FCR 0x1107 #define MSR_VIA_LONGHAUL 0x110a #define MSR_VIA_RNG 0x110b #define MSR_VIA_BCR2 0x1147 /* Intel defined MSRs. */ #define MSR_IA32_P5_MC_ADDR 0 #define MSR_IA32_P5_MC_TYPE 1 #define MSR_IA32_PLATFORM_ID 0x17 #define MSR_IA32_EBL_CR_POWERON 0x2a #define MSR_IA32_APICBASE 0x1b #define MSR_IA32_APICBASE_BSP (1<<8) #define MSR_IA32_APICBASE_ENABLE (1<<11) #define MSR_IA32_APICBASE_BASE (0xfffff<<12) /* P4/Xeon+ specific */ #define MSR_IA32_MCG_EAX 0x180 #define MSR_IA32_MCG_EBX 0x181 #define MSR_IA32_MCG_ECX 0x182 #define MSR_IA32_MCG_EDX 0x183 #define MSR_IA32_MCG_ESI 0x184 #define MSR_IA32_MCG_EDI 0x185 #define MSR_IA32_MCG_EBP 0x186 #define MSR_IA32_MCG_ESP 0x187 #define MSR_IA32_MCG_EFLAGS 0x188 #define MSR_IA32_MCG_EIP 0x189 #define MSR_IA32_MCG_RESERVED 0x18A #define MSR_P6_EVNTSEL0 0x186 #define MSR_P6_EVNTSEL1 0x187 #define MSR_IA32_PERF_STATUS 0x198 #define MSR_IA32_PERF_CTL 0x199 #define MSR_IA32_MPERF 0xE7 #define MSR_IA32_APERF 0xE8 #define MSR_IA32_THERM_CONTROL 0x19a #define MSR_IA32_THERM_INTERRUPT 0x19b #define MSR_IA32_THERM_STATUS 0x19c #define MSR_IA32_MISC_ENABLE 0x1a0 #define MSR_IA32_DEBUGCTLMSR 0x1d9 #define MSR_IA32_LASTBRANCHFROMIP 0x1db #define MSR_IA32_LASTBRANCHTOIP 0x1dc #define MSR_IA32_LASTINTFROMIP 0x1dd #define MSR_IA32_LASTINTTOIP 0x1de #define MSR_IA32_MC0_CTL 0x400 #define MSR_IA32_MC0_STATUS 0x401 #define MSR_IA32_MC0_ADDR 0x402 #define MSR_IA32_MC0_MISC 0x403 /* Pentium IV performance counter MSRs */ #define MSR_P4_BPU_PERFCTR0 0x300 #define MSR_P4_BPU_PERFCTR1 0x301 #define MSR_P4_BPU_PERFCTR2 0x302 #define MSR_P4_BPU_PERFCTR3 0x303 #define MSR_P4_MS_PERFCTR0 0x304 #define MSR_P4_MS_PERFCTR1 0x305 #define MSR_P4_MS_PERFCTR2 0x306 #define MSR_P4_MS_PERFCTR3 0x307 #define MSR_P4_FLAME_PERFCTR0 0x308 #define MSR_P4_FLAME_PERFCTR1 0x309 #define MSR_P4_FLAME_PERFCTR2 0x30a #define MSR_P4_FLAME_PERFCTR3 0x30b #define MSR_P4_IQ_PERFCTR0 0x30c #define MSR_P4_IQ_PERFCTR1 0x30d #define MSR_P4_IQ_PERFCTR2 0x30e #define MSR_P4_IQ_PERFCTR3 0x30f #define MSR_P4_IQ_PERFCTR4 0x310 #define MSR_P4_IQ_PERFCTR5 0x311 #define MSR_P4_BPU_CCCR0 0x360 #define MSR_P4_BPU_CCCR1 0x361 #define MSR_P4_BPU_CCCR2 0x362 #define MSR_P4_BPU_CCCR3 0x363 #define MSR_P4_MS_CCCR0 0x364 #define MSR_P4_MS_CCCR1 0x365 #define MSR_P4_MS_CCCR2 0x366 #define MSR_P4_MS_CCCR3 0x367 #define MSR_P4_FLAME_CCCR0 0x368 #define MSR_P4_FLAME_CCCR1 0x369 #define MSR_P4_FLAME_CCCR2 0x36a #define MSR_P4_FLAME_CCCR3 0x36b #define MSR_P4_IQ_CCCR0 0x36c #define MSR_P4_IQ_CCCR1 0x36d #define MSR_P4_IQ_CCCR2 0x36e #define MSR_P4_IQ_CCCR3 0x36f #define MSR_P4_IQ_CCCR4 0x370 #define MSR_P4_IQ_CCCR5 0x371 #define MSR_P4_ALF_ESCR0 0x3ca #define MSR_P4_ALF_ESCR1 0x3cb #define MSR_P4_BPU_ESCR0 0x3b2 #define MSR_P4_BPU_ESCR1 0x3b3 #define MSR_P4_BSU_ESCR0 0x3a0 #define MSR_P4_BSU_ESCR1 0x3a1 #define MSR_P4_CRU_ESCR0 0x3b8 #define MSR_P4_CRU_ESCR1 0x3b9 #define MSR_P4_CRU_ESCR2 0x3cc #define MSR_P4_CRU_ESCR3 0x3cd #define MSR_P4_CRU_ESCR4 0x3e0 #define MSR_P4_CRU_ESCR5 0x3e1 #define MSR_P4_DAC_ESCR0 0x3a8 #define MSR_P4_DAC_ESCR1 0x3a9 #define MSR_P4_FIRM_ESCR0 0x3a4 #define MSR_P4_FIRM_ESCR1 0x3a5 #define MSR_P4_FLAME_ESCR0 0x3a6 #define MSR_P4_FLAME_ESCR1 0x3a7 #define MSR_P4_FSB_ESCR0 0x3a2 #define MSR_P4_FSB_ESCR1 0x3a3 #define MSR_P4_IQ_ESCR0 0x3ba #define MSR_P4_IQ_ESCR1 0x3bb #define MSR_P4_IS_ESCR0 0x3b4 #define MSR_P4_IS_ESCR1 0x3b5 #define MSR_P4_ITLB_ESCR0 0x3b6 #define MSR_P4_ITLB_ESCR1 0x3b7 #define MSR_P4_IX_ESCR0 0x3c8 #define MSR_P4_IX_ESCR1 0x3c9 #define MSR_P4_MOB_ESCR0 0x3aa #define MSR_P4_MOB_ESCR1 0x3ab #define MSR_P4_MS_ESCR0 0x3c0 #define MSR_P4_MS_ESCR1 0x3c1 #define MSR_P4_PMH_ESCR0 0x3ac #define MSR_P4_PMH_ESCR1 0x3ad #define MSR_P4_RAT_ESCR0 0x3bc #define MSR_P4_RAT_ESCR1 0x3bd #define MSR_P4_SAAT_ESCR0 0x3ae #define MSR_P4_SAAT_ESCR1 0x3af #define MSR_P4_SSU_ESCR0 0x3be #define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */ #define MSR_P4_TBPU_ESCR0 0x3c2 #define MSR_P4_TBPU_ESCR1 0x3c3 #define MSR_P4_TC_ESCR0 0x3c4 #define MSR_P4_TC_ESCR1 0x3c5 #define MSR_P4_U2L_ESCR0 0x3b0 #define MSR_P4_U2L_ESCR1 0x3b1 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/nmi.h000066400000000000000000000031301314037446600263360ustar00rootroot00000000000000/* * linux/include/asm-i386/nmi.h */ #ifndef ASM_NMI_H #define ASM_NMI_H #include #include struct pt_regs; typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); /** * set_nmi_callback * * Set a handler for an NMI. Only one handler may be * set. Return 1 if the NMI was handled. */ void set_nmi_callback(nmi_callback_t callback); /** * unset_nmi_callback * * Remove the handler previously set. */ void unset_nmi_callback(void); #ifdef CONFIG_PM /** Replace the PM callback routine for NMI. */ struct pm_dev * set_nmi_pm_callback(pm_callback callback); /** Unset the PM callback routine back to the default. */ void unset_nmi_pm_callback(struct pm_dev * dev); #else static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback) { return 0; } static inline void unset_nmi_pm_callback(struct pm_dev * dev) { } #endif /* CONFIG_PM */ extern void default_do_nmi(struct pt_regs *); extern void die_nmi(char *str, struct pt_regs *regs); static inline unsigned char get_nmi_reason(void) { shared_info_t *s = HYPERVISOR_shared_info; unsigned char reason = 0; /* construct a value which looks like it came from * port 0x61. */ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason)) reason |= 0x40; if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason)) reason |= 0x80; return reason; } extern int panic_on_timeout; extern int unknown_nmi_panic; #ifndef CONFIG_XEN extern int check_nmi_watchdog(void); #endif #endif /* ASM_NMI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/page.h000066400000000000000000000137131314037446600264770ustar00rootroot00000000000000#ifndef _X86_64_PAGE_H #define _X86_64_PAGE_H /* #include */ #ifndef __ASSEMBLY__ #include #include #include #endif #include /* * Need to repeat this here in order to not include pgtable.h (which in turn * depends on definitions made here), but to be able to use the symbolic * below. The preprocessor will warn if the two definitions aren't identical. */ #define _PAGE_PRESENT 0x001 /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 #ifdef __ASSEMBLY__ #define PAGE_SIZE (0x1 << PAGE_SHIFT) #else #define PAGE_SIZE (1UL << PAGE_SHIFT) #endif #define PAGE_MASK (~(PAGE_SIZE-1)) /* See Documentation/x86_64/mm.txt for a description of the memory map. */ #define __PHYSICAL_MASK_SHIFT 46 #define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1) #define __VIRTUAL_MASK_SHIFT 48 #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) #define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK) #define THREAD_ORDER 1 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) #define CURRENT_MASK (~(THREAD_SIZE-1)) #define EXCEPTION_STACK_ORDER 0 #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) #define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) #define IRQSTACK_ORDER 2 #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER) #define STACKFAULT_STACK 1 #define DOUBLEFAULT_STACK 2 #define NMI_STACK 3 #define DEBUG_STACK 4 #define MCE_STACK 5 #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) #define HPAGE_SHIFT PMD_SHIFT #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #ifdef __KERNEL__ #ifndef __ASSEMBLY__ extern unsigned long end_pfn; #include void clear_page(void *); void copy_page(void *, void *); #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE /* * These are used to make use of C type-checking.. */ typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pud; } pud_t; typedef struct { unsigned long pgd; } pgd_t; #define PTE_MASK PHYSICAL_PAGE_MASK typedef struct { unsigned long pgprot; } pgprot_t; #define __pte_val(x) ((x).pte) #define pte_val(x) ((__pte_val(x) & _PAGE_PRESENT) ? \ pte_machine_to_phys(__pte_val(x)) : \ __pte_val(x)) #define __pmd_val(x) ((x).pmd) static inline unsigned long pmd_val(pmd_t x) { unsigned long ret = __pmd_val(x); #if CONFIG_XEN_COMPAT <= 0x030002 if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT; #else if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret); #endif return ret; } #define __pud_val(x) ((x).pud) static inline unsigned long pud_val(pud_t x) { unsigned long ret = __pud_val(x); if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret); return ret; } #define __pgd_val(x) ((x).pgd) static inline unsigned long pgd_val(pgd_t x) { unsigned long ret = __pgd_val(x); if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret); return ret; } #define pgprot_val(x) ((x).pgprot) static inline pte_t __pte(unsigned long x) { if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x); return ((pte_t) { (x) }); } static inline pmd_t __pmd(unsigned long x) { if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x); return ((pmd_t) { (x) }); } static inline pud_t __pud(unsigned long x) { if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x); return ((pud_t) { (x) }); } static inline pgd_t __pgd(unsigned long x) { if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x); return ((pgd_t) { (x) }); } #define __pgprot(x) ((pgprot_t) { (x) } ) #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) #define __START_KERNEL_map 0xffffffff80000000UL #define __PAGE_OFFSET 0xffff880000000000UL #else #define __PHYSICAL_START CONFIG_PHYSICAL_START #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) #define __START_KERNEL_map 0xffffffff80000000 #define __PAGE_OFFSET 0xffff880000000000 #endif /* !__ASSEMBLY__ */ #if CONFIG_XEN_COMPAT <= 0x030002 #undef LOAD_OFFSET #define LOAD_OFFSET 0 #endif /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) #define KERNEL_TEXT_SIZE (40UL*1024*1024) #define KERNEL_TEXT_START 0xffffffff80000000UL #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) /* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol. Otherwise you risk miscompilation. */ #define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET) /* __pa_symbol should be used for C visible symbols. This seems to be the official gcc blessed way to do such arithmetic. */ #define __pa_symbol(x) \ ({unsigned long v; \ asm("" : "=r" (v) : "0" (x)); \ __pa(v); }) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define __boot_va(x) __va(x) #define __boot_pa(x) __pa(x) #ifdef CONFIG_FLATMEM #define pfn_to_page(pfn) (mem_map + (pfn)) #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < end_pfn) #endif #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #define VM_DATA_DEFAULT_FLAGS \ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define __HAVE_ARCH_GATE_AREA 1 #endif /* __KERNEL__ */ #include #endif /* _X86_64_PAGE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/pci.h000066400000000000000000000111771314037446600263400ustar00rootroot00000000000000#ifndef __x8664_PCI_H #define __x8664_PCI_H #include #ifdef __KERNEL__ #include /* for struct page */ /* Can be used to override the logic in pci_scan_bus for skipping already-configured bus numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the loader */ #ifdef CONFIG_PCI extern unsigned int pcibios_assign_all_busses(void); #else #define pcibios_assign_all_busses() 0 #endif #include #define pcibios_scan_all_fns(a, b) (!is_initial_xendomain()) extern unsigned long pci_mem_start; #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM (pci_mem_start) #define PCIBIOS_MIN_CARDBUS_IO 0x4000 void pcibios_config_init(void); struct pci_bus * pcibios_scan_root(int bus); extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value); extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value); void pcibios_set_master(struct pci_dev *dev); void pcibios_penalize_isa_irq(int irq, int active); struct irq_routing_table *pcibios_get_irq_routing_table(void); int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); #include #include #include #include #include #include /* for have_iommu */ extern int iommu_setup(char *opt); /* The PCI address space does equal the physical memory * address space. The networking and block device layers use * this boolean for bounce buffer decisions * * On AMD64 it mostly equals, but we set it to zero if a hardware * IOMMU (gart) of sotware IOMMU (swiotlb) is available. */ #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) #if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU) /* * x86-64 always supports DAC, but sometimes it is useful to force * devices through the IOMMU to get automatic sg list merging. * Optional right now. */ extern int iommu_sac_force; #define pci_dac_dma_supported(pci_dev, mask) (!iommu_sac_force) #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ dma_addr_t ADDR_NAME; #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ __u32 LEN_NAME; #define pci_unmap_addr(PTR, ADDR_NAME) \ ((PTR)->ADDR_NAME) #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ (((PTR)->ADDR_NAME) = (VAL)) #define pci_unmap_len(PTR, LEN_NAME) \ ((PTR)->LEN_NAME) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ (((PTR)->LEN_NAME) = (VAL)) #elif defined(CONFIG_SWIOTLB) #define pci_dac_dma_supported(pci_dev, mask) 1 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ dma_addr_t ADDR_NAME; #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ __u32 LEN_NAME; #define pci_unmap_addr(PTR, ADDR_NAME) \ ((PTR)->ADDR_NAME) #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ (((PTR)->ADDR_NAME) = (VAL)) #define pci_unmap_len(PTR, LEN_NAME) \ ((PTR)->LEN_NAME) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ (((PTR)->LEN_NAME) = (VAL)) #else /* No IOMMU */ #define pci_dac_dma_supported(pci_dev, mask) 1 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) #define pci_unmap_addr(PTR, ADDR_NAME) (0) #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) #define pci_unmap_len(PTR, LEN_NAME) (0) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) #endif #include static inline dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction) { return ((dma64_addr_t) page_to_phys(page) + (dma64_addr_t) offset); } static inline struct page * pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr) { return virt_to_page(__va(dma_addr)); } static inline unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr) { return (dma_addr & ~PAGE_MASK); } static inline void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) { } static inline void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) { flush_write_buffers(); } #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, unsigned long *strategy_parameter) { *strat = PCI_DMA_BURST_INFINITY; *strategy_parameter = ~0UL; } #endif #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); static inline void pcibios_add_platform_entries(struct pci_dev *dev) { } #endif /* __KERNEL__ */ /* generic pci stuff */ #ifdef CONFIG_PCI #include #endif #endif /* __x8664_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/pgalloc.h000066400000000000000000000127471314037446600272120ustar00rootroot00000000000000#ifndef _X86_64_PGALLOC_H #define _X86_64_PGALLOC_H #include #include #include #include #include /* for phys_to_virt and page_to_pseudophys */ #include void make_page_readonly(void *va, unsigned int feature); void make_page_writable(void *va, unsigned int feature); void make_pages_readonly(void *va, unsigned int nr, unsigned int feature); void make_pages_writable(void *va, unsigned int nr, unsigned int feature); #define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD) static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))); } static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) { if (unlikely((mm)->context.pinned)) { BUG_ON(HYPERVISOR_update_va_mapping( (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT), pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0)); set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); } else { *(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)); } } static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { if (unlikely((mm)->context.pinned)) { BUG_ON(HYPERVISOR_update_va_mapping( (unsigned long)pmd, pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, PAGE_KERNEL_RO), 0)); set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); } else { *(pud) = __pud(_PAGE_TABLE | __pa(pmd)); } } /* * We need to use the batch mode here, but pgd_pupulate() won't be * be called frequently. */ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) { if (unlikely((mm)->context.pinned)) { BUG_ON(HYPERVISOR_update_va_mapping( (unsigned long)pud, pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, PAGE_KERNEL_RO), 0)); set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud))); set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud))); } else { *(pgd) = __pgd(_PAGE_TABLE | __pa(pud)); *(__user_pgd(pgd)) = *(pgd); } } extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr); extern void pte_free(struct page *pte); static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { struct page *pg; pg = pte_alloc_one(mm, addr); return pg ? page_address(pg) : NULL; } static inline void pmd_free(pmd_t *pmd) { BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); pte_free(virt_to_page(pmd)); } static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { struct page *pg; pg = pte_alloc_one(mm, addr); return pg ? page_address(pg) : NULL; } static inline void pud_free(pud_t *pud) { BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); pte_free(virt_to_page(pud)); } static inline void pgd_list_add(pgd_t *pgd) { struct page *page = virt_to_page(pgd); spin_lock(&pgd_lock); page->index = (pgoff_t)pgd_list; if (pgd_list) pgd_list->private = (unsigned long)&page->index; pgd_list = page; page->private = (unsigned long)&pgd_list; spin_unlock(&pgd_lock); } static inline void pgd_list_del(pgd_t *pgd) { struct page *next, **pprev, *page = virt_to_page(pgd); spin_lock(&pgd_lock); next = (struct page *)page->index; pprev = (struct page **)page->private; *pprev = next; if (next) next->private = (unsigned long)pprev; spin_unlock(&pgd_lock); } static inline pgd_t *pgd_alloc(struct mm_struct *mm) { /* * We allocate two contiguous pages for kernel and user. */ unsigned boundary; pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1); if (!pgd) return NULL; pgd_list_add(pgd); /* * Copy kernel pointers in from init. * Could keep a freelist or slab cache of those because the kernel * part never changes. */ boundary = pgd_index(__PAGE_OFFSET); memset(pgd, 0, boundary * sizeof(pgd_t)); memcpy(pgd + boundary, init_level4_pgt + boundary, (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */ /* * Set level3_user_pgt for vsyscall area */ __user_pgd(pgd)[pgd_index(VSYSCALL_START)] = __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE); return pgd; } static inline void pgd_free(pgd_t *pgd) { pte_t *ptep = virt_to_ptep(pgd); if (!pte_write(*ptep)) { xen_pgd_unpin(__pa(pgd)); BUG_ON(HYPERVISOR_update_va_mapping( (unsigned long)pgd, pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0)); } ptep = virt_to_ptep(__user_pgd(pgd)); if (!pte_write(*ptep)) { xen_pgd_unpin(__pa(__user_pgd(pgd))); BUG_ON(HYPERVISOR_update_va_mapping( (unsigned long)__user_pgd(pgd), pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT, PAGE_KERNEL), 0)); } pgd_list_del(pgd); free_pages((unsigned long)pgd, 1); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); if (pte) make_page_readonly(pte, XENFEAT_writable_page_tables); return pte; } /* Should really implement gc for free page table pages. This could be done with a reference count in struct page. */ static inline void pte_free_kernel(pte_t *pte) { BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); make_page_writable(pte, XENFEAT_writable_page_tables); free_page((unsigned long)pte); } #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) #define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) #define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) #endif /* _X86_64_PGALLOC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/pgtable.h000066400000000000000000000467621314037446600272130ustar00rootroot00000000000000#ifndef _X86_64_PGTABLE_H #define _X86_64_PGTABLE_H /* * This file contains the functions and defines necessary to modify and use * the x86-64 page table tree. */ #include #include #include #include #include #include #ifdef CONFIG_XEN #include extern pud_t level3_user_pgt[512]; extern void xen_init_pt(void); #define virt_to_ptep(__va) \ ({ \ pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \ pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \ pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \ pte_offset_kernel(__pmd, (unsigned long)(__va)); \ }) #define arbitrary_virt_to_machine(__va) \ ({ \ maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\ m | ((unsigned long)(__va) & (PAGE_SIZE-1)); \ }) #endif extern pud_t level3_kernel_pgt[512]; extern pud_t level3_physmem_pgt[512]; extern pud_t level3_ident_pgt[512]; extern pmd_t level2_kernel_pgt[512]; extern pgd_t init_level4_pgt[]; extern pgd_t boot_level4_pgt[]; extern unsigned long __supported_pte_mask; #define swapper_pg_dir init_level4_pgt extern int nonx_setup(char *str); extern void paging_init(void); extern void clear_kernel_mapping(unsigned long addr, unsigned long size); extern unsigned long pgkern_mask; /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) /* * PGDIR_SHIFT determines what a top-level page table entry can map */ #define PGDIR_SHIFT 39 #define PTRS_PER_PGD 512 /* * 3rd level page */ #define PUD_SHIFT 30 #define PTRS_PER_PUD 512 /* * PMD_SHIFT determines the size of the area a middle-level * page table can map */ #define PMD_SHIFT 21 #define PTRS_PER_PMD 512 /* * entries per page directory level */ #define PTRS_PER_PTE 512 #define pte_ERROR(e) \ printk("%s:%d: bad pte %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \ &(e), __pte_val(e), pte_pfn(e)) #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \ &(e), __pmd_val(e), pmd_pfn(e)) #define pud_ERROR(e) \ printk("%s:%d: bad pud %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \ &(e), __pud_val(e), (pud_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \ &(e), __pgd_val(e), (pgd_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT) #define pgd_none(x) (!__pgd_val(x)) #define pud_none(x) (!__pud_val(x)) static inline void set_pte(pte_t *dst, pte_t val) { *dst = val; } #define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval)) #define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval)) #define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval)) static inline void pud_clear (pud_t * pud) { set_pud(pud, __pud(0)); } #define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD) static inline void pgd_clear (pgd_t * pgd) { set_pgd(pgd, __pgd(0)); set_pgd(__user_pgd(pgd), __pgd(0)); } #define pud_page(pud) \ ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK)) #define pte_same(a, b) ((a).pte == (b).pte) #define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) #define FIRST_USER_ADDRESS 0 #ifndef __ASSEMBLY__ #define MAXMEM 0x3fffffffffffUL #define VMALLOC_START 0xffffc20000000000UL #define VMALLOC_END 0xffffe1ffffffffffUL #define MODULES_VADDR 0xffffffff88000000UL #define MODULES_END 0xfffffffffff00000UL #define MODULES_LEN (MODULES_END - MODULES_VADDR) #define _PAGE_BIT_PRESENT 0 #define _PAGE_BIT_RW 1 #define _PAGE_BIT_USER 2 #define _PAGE_BIT_PWT 3 #define _PAGE_BIT_PCD 4 #define _PAGE_BIT_ACCESSED 5 #define _PAGE_BIT_DIRTY 6 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ #define _PAGE_PRESENT 0x001 #define _PAGE_RW 0x002 #define _PAGE_USER 0x004 #define _PAGE_PWT 0x008 #define _PAGE_PCD 0x010 #define _PAGE_ACCESSED 0x020 #define _PAGE_DIRTY 0x040 #define _PAGE_PSE 0x080 /* 2MB page */ #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ #define _PAGE_GLOBAL 0x100 /* Global TLB entry */ #define _PAGE_PROTNONE 0x080 /* If not present */ #define _PAGE_NX (1UL<<_PAGE_BIT_NX) #if CONFIG_XEN_COMPAT <= 0x030002 extern unsigned int __kernel_page_user; #else #define __kernel_page_user 0 #endif #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | __kernel_page_user) #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_COPY PAGE_COPY_NOEXEC #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define __PAGE_KERNEL \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user) #define __PAGE_KERNEL_EXEC \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | __kernel_page_user) #define __PAGE_KERNEL_NOCACHE \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user) #define __PAGE_KERNEL_RO \ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user) #define __PAGE_KERNEL_VSYSCALL \ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define __PAGE_KERNEL_VSYSCALL_NOCACHE \ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD) #define __PAGE_KERNEL_LARGE \ (__PAGE_KERNEL | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_EXEC \ (__PAGE_KERNEL_EXEC | _PAGE_PSE) /* * We don't support GLOBAL page in xenolinux64 */ #define MAKE_GLOBAL(x) __pgprot((x)) #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) #define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL) #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL) #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE) #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE) /* xwr */ #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_COPY #define __P011 PAGE_COPY #define __P100 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_COPY_EXEC #define __P111 PAGE_COPY_EXEC #define __S000 PAGE_NONE #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED #define __S100 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC static inline unsigned long pgd_bad(pgd_t pgd) { unsigned long val = __pgd_val(pgd); val &= ~PTE_MASK; val &= ~(_PAGE_USER | _PAGE_DIRTY); return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED); } static inline unsigned long pud_bad(pud_t pud) { unsigned long val = __pud_val(pud); val &= ~PTE_MASK; val &= ~(_PAGE_USER | _PAGE_DIRTY); return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED); } #define set_pte_at(_mm,addr,ptep,pteval) do { \ if (((_mm) != current->mm && (_mm) != &init_mm) || \ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \ set_pte((ptep), (pteval)); \ } while (0) #define pte_none(x) (!(x).pte) #define pte_present(x) ((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE)) #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) #define __pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT) #define pte_mfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte))) #define pte_pfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \ mfn_to_local_pfn(__pte_mfn(_pte)) : __pte_mfn(_pte)) #define pte_page(x) pfn_to_page(pte_pfn(x)) static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { unsigned long pte = page_nr << PAGE_SHIFT; pte |= pgprot_val(pgprot); pte &= __supported_pte_mask; return __pte(pte); } static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; if (!pte_none(pte)) { if ((mm != &init_mm) || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) pte = __pte_ma(xchg(&ptep->pte, 0)); } return pte; } static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) { if (full) { pte_t pte = *ptep; if (mm->context.pinned) xen_l1_entry_update(ptep, __pte(0)); else *ptep = __pte(0); return pte; } return ptep_get_and_clear(mm, addr, ptep); } #define ptep_clear_flush(vma, addr, ptep) \ ({ \ pte_t *__ptep = (ptep); \ pte_t __res = *__ptep; \ if (!pte_none(__res) && \ ((vma)->vm_mm != current->mm || \ HYPERVISOR_update_va_mapping(addr, __pte(0), \ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \ UVMF_INVLPG|UVMF_MULTI))) { \ __ptep->pte = 0; \ flush_tlb_page(vma, addr); \ } \ __res; \ }) /* * The following only work if pte_present() is true. * Undefined behaviour if not.. */ #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; } static inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; } static inline int pte_exec(pte_t pte) { return __pte_val(pte) & _PAGE_USER; } static inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; } static inline int pte_file(pte_t pte) { return __pte_val(pte) & _PAGE_FILE; } static inline int pte_huge(pte_t pte) { return (__pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; } static inline pte_t pte_rdprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; } static inline pte_t pte_exprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; } static inline pte_t pte_mkclean(pte_t pte) { __pte_val(pte) &= ~_PAGE_DIRTY; return pte; } static inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } static inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; } static inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; } static inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; } static inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { __pte_val(pte) |= _PAGE_RW; return pte; } static inline pte_t pte_mkhuge(pte_t pte) { __pte_val(pte) |= __LARGE_PTE; return pte; } #define ptep_test_and_clear_dirty(vma, addr, ptep) \ ({ \ pte_t __pte = *(ptep); \ int __ret = pte_dirty(__pte); \ if (__ret) \ set_pte_at((vma)->vm_mm, addr, ptep, pte_mkclean(__pte)); \ __ret; \ }) #define ptep_test_and_clear_young(vma, addr, ptep) \ ({ \ pte_t __pte = *(ptep); \ int __ret = pte_young(__pte); \ if (__ret) \ set_pte_at((vma)->vm_mm, addr, ptep, pte_mkold(__pte)); \ __ret; \ }) static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; if (pte_write(pte)) set_pte_at(mm, addr, ptep, pte_wrprotect(pte)); } /* * Macro to mark a page protection value as "uncacheable". */ #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) static inline int pmd_large(pmd_t pte) { return (__pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; } /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ /* * Level 4 access. * Never use these in the common code. */ #define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK)) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) #define pgd_offset_k(address) (init_level4_pgt + pgd_index(address)) #define pgd_present(pgd) (__pgd_val(pgd) & _PAGE_PRESENT) #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) /* PUD - Level3 access */ /* to find an entry in a page-table-directory. */ #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) #define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address)) /* This accesses the reference page table of the boot cpu. Other CPUs get synced lazily via the page fault handler. */ static inline pud_t *pud_offset_k(pgd_t *pgd, unsigned long address) { return pud_offset(pgd_offset_k(address), address); } #define pud_present(pud) (__pud_val(pud) & _PAGE_PRESENT) /* PMD - Level 2 access */ #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) #define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \ pmd_index(address)) #define pmd_none(x) (!__pmd_val(x)) #if CONFIG_XEN_COMPAT <= 0x030002 /* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t. can temporarily clear it. */ #define pmd_present(x) (__pmd_val(x)) #else #define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT) #endif #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pmd_bad(x) ((__pmd_val(x) & ~(PTE_MASK | _PAGE_USER | _PAGE_PRESENT)) \ != (_KERNPG_TABLE & ~(_PAGE_USER | _PAGE_PRESENT))) #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) #define pte_to_pgoff(pte) ((__pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) #define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE }) #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT /* PTE - Level 1 access. */ /* page, protection -> pte */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) #define mk_pte_huge(entry) (__pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE) /* physical address -> PTE */ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) { unsigned long pteval; pteval = physpage | pgprot_val(pgprot); return __pte(pteval); } /* Change flags of a PTE */ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { /* * Since this might change the present bit (which controls whether * a pte_t object has undergone p2m translation), we must use * pte_val() on the input pte and __pte() for the return value. */ unsigned long pteval = pte_val(pte); pteval &= _PAGE_CHG_MASK; pteval |= pgprot_val(newprot); pteval &= __supported_pte_mask; return __pte(pteval); } #define pte_index(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \ pte_index(address)) /* x86-64 always has all page tables mapped. */ #define pte_offset_map(dir,address) pte_offset_kernel(dir,address) #define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address) #define pte_unmap(pte) /* NOP */ #define pte_unmap_nested(pte) /* NOP */ #define update_mmu_cache(vma,address,pte) do { } while (0) /* * Rules for using ptep_establish: the pte MUST be a user pte, and * must be a present->present transition. */ #define __HAVE_ARCH_PTEP_ESTABLISH #define ptep_establish(vma, address, ptep, pteval) \ do { \ if ( likely((vma)->vm_mm == current->mm) ) { \ BUG_ON(HYPERVISOR_update_va_mapping(address, \ pteval, \ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \ UVMF_INVLPG|UVMF_MULTI)); \ } else { \ xen_l1_entry_update(ptep, pteval); \ flush_tlb_page(vma, address); \ } \ } while (0) /* We only update the dirty/accessed state if we set * the dirty bit by hand in the kernel, since the hardware * will do the accessed bit for us, and we don't want to * race with other CPU's that might be updating the dirty * bit at the same time. */ #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ do { \ if (dirty) \ ptep_establish(vma, address, ptep, entry); \ } while (0) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x3f) #define __swp_offset(x) ((x).val >> 8) #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { __pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) extern spinlock_t pgd_lock; extern struct page *pgd_list; void vmalloc_sync_all(void); #endif /* !__ASSEMBLY__ */ extern int kern_addr_valid(unsigned long addr); #define DOMID_LOCAL (0xFFFFU) struct vm_area_struct; int direct_remap_pfn_range(struct vm_area_struct *vma, unsigned long address, unsigned long mfn, unsigned long size, pgprot_t prot, domid_t domid); int direct_kernel_remap_pfn_range(unsigned long address, unsigned long mfn, unsigned long size, pgprot_t prot, domid_t domid); int create_lookup_pte_addr(struct mm_struct *mm, unsigned long address, uint64_t *ptep); int touch_pte_range(struct mm_struct *mm, unsigned long address, unsigned long size); int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot); #define arch_change_pte_range(mm, pmd, addr, end, newprot) \ xen_change_pte_range(mm, pmd, addr, end, newprot) #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO) #define MK_IOSPACE_PFN(space, pfn) (pfn) #define GET_IOSPACE(pfn) 0 #define GET_PFN(pfn) (pfn) #define HAVE_ARCH_UNMAPPED_AREA #define pgtable_cache_init() do { } while (0) #define check_pgt_cache() do { } while (0) #define PAGE_AGP PAGE_KERNEL_NOCACHE #define HAVE_PAGE_AGP 1 /* fs/proc/kcore.c */ #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) #define kc_offset_to_vaddr(o) \ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL #define __HAVE_ARCH_PTEP_CLEAR_FLUSH #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME #include #endif /* _X86_64_PGTABLE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/processor.h000066400000000000000000000327331314037446600276050ustar00rootroot00000000000000/* * include/asm-x86_64/processor.h * * Copyright (C) 1994 Linus Torvalds */ #ifndef __ASM_X86_64_PROCESSOR_H #define __ASM_X86_64_PROCESSOR_H #include #include #include #include #include #include #include #include #include #include #include #include #define TF_MASK 0x00000100 #define IF_MASK 0x00000200 #define IOPL_MASK 0x00003000 #define NT_MASK 0x00004000 #define VM_MASK 0x00020000 #define AC_MASK 0x00040000 #define VIF_MASK 0x00080000 /* virtual interrupt flag */ #define VIP_MASK 0x00100000 /* virtual interrupt pending */ #define ID_MASK 0x00200000 #define desc_empty(desc) \ (!((desc)->a | (desc)->b)) #define desc_equal(desc1, desc2) \ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) /* * Default implementation of macro that returns current * instruction pointer ("program counter"). */ #define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; }) /* * CPU type and hardware bug flags. Kept separately for each CPU. */ struct cpuinfo_x86 { __u8 x86; /* CPU family */ __u8 x86_vendor; /* CPU vendor */ __u8 x86_model; __u8 x86_mask; int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ __u32 x86_capability[NCAPINTS]; char x86_vendor_id[16]; char x86_model_id[64]; int x86_cache_size; /* in KB */ int x86_clflush_size; int x86_cache_alignment; int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/ __u8 x86_virt_bits, x86_phys_bits; __u8 x86_max_cores; /* cpuid returned max cores value */ __u32 x86_power; __u32 extended_cpuid_level; /* Max extended CPUID function supported */ unsigned long loops_per_jiffy; __u8 apicid; __u8 booted_cores; /* number of cores as seen by OS */ } ____cacheline_aligned; #define X86_VENDOR_INTEL 0 #define X86_VENDOR_CYRIX 1 #define X86_VENDOR_AMD 2 #define X86_VENDOR_UMC 3 #define X86_VENDOR_NEXGEN 4 #define X86_VENDOR_CENTAUR 5 #define X86_VENDOR_RISE 6 #define X86_VENDOR_TRANSMETA 7 #define X86_VENDOR_NUM 8 #define X86_VENDOR_UNKNOWN 0xff #ifdef CONFIG_SMP extern struct cpuinfo_x86 cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] #else #define cpu_data (&boot_cpu_data) #define current_cpu_data boot_cpu_data #endif extern char ignore_irq13; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); /* * EFLAGS bits */ #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ #define X86_EFLAGS_NT 0x00004000 /* Nested Task */ #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ /* * Intel CPU features in CR4 */ #define X86_CR4_VME 0x0001 /* enable vm86 extensions */ #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */ #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */ #define X86_CR4_DE 0x0008 /* enable debugging extensions */ #define X86_CR4_PSE 0x0010 /* enable page size extensions */ #define X86_CR4_PAE 0x0020 /* enable physical address extensions */ #define X86_CR4_MCE 0x0040 /* Machine check enable */ #define X86_CR4_PGE 0x0080 /* enable global pages */ #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */ #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ /* * Save the cr4 feature set we're using (ie * Pentium 4MB enable and PPro Global page * enable), so that any CPU's that boot up * after us can get the correct flags. */ extern unsigned long mmu_cr4_features; static inline void set_in_cr4 (unsigned long mask) { mmu_cr4_features |= mask; __asm__("movq %%cr4,%%rax\n\t" "orq %0,%%rax\n\t" "movq %%rax,%%cr4\n" : : "irg" (mask) :"ax"); } static inline void clear_in_cr4 (unsigned long mask) { mmu_cr4_features &= ~mask; __asm__("movq %%cr4,%%rax\n\t" "andq %0,%%rax\n\t" "movq %%rax,%%cr4\n" : : "irg" (~mask) :"ax"); } /* * Bus types */ #define MCA_bus 0 #define MCA_bus__is_a_macro /* * User space process size. 47bits minus one guard page. */ #define TASK_SIZE64 (0x800000000000UL - 4096) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. * * /proc/pid/unmap_base is only supported for 32bit processes without * 3GB personality for now. */ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000) #define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64) #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64) /* FIXME: Used in initializer, do we need to check for 64bit here? */ #define __TASK_UNMAPPED_BASE (PAGE_ALIGN(0xffffe000 / 3)) #define TASK_UNMAPPED_32 ((current->personality & ADDR_LIMIT_3GB) ? \ PAGE_ALIGN(0xc0000000 / 3) : PAGE_ALIGN(current->map_base)) #define TASK_UNMAPPED_64 PAGE_ALIGN(TASK_SIZE64/3) #define TASK_UNMAPPED_BASE \ (test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64) /* * Size of io_bitmap. */ #define IO_BITMAP_BITS 65536 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) #ifndef CONFIG_X86_NO_TSS #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) #endif #define INVALID_IO_BITMAP_OFFSET 0x8000 struct i387_fxsave_struct { u16 cwd; u16 swd; u16 twd; u16 fop; u64 rip; u64 rdp; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */ u32 padding[24]; } __attribute__ ((aligned (16))); union i387_union { struct i387_fxsave_struct fxsave; }; #ifndef CONFIG_X86_NO_TSS struct tss_struct { u32 reserved1; u64 rsp0; u64 rsp1; u64 rsp2; u64 reserved2; u64 ist[7]; u32 reserved3; u32 reserved4; u16 reserved5; u16 io_bitmap_base; /* * The extra 1 is there because the CPU will access an * additional byte beyond the end of the IO permission * bitmap. The extra byte must be all 1 bits, and must * be within the limit. Thus we have: * * 128 bytes, the bitmap itself, for ports 0..0x3ff * 8 bytes, for an extra "long" of ~0UL */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; } __attribute__((packed)) ____cacheline_aligned; DECLARE_PER_CPU(struct tss_struct,init_tss); #endif extern struct cpuinfo_x86 boot_cpu_data; #ifdef CONFIG_X86_VSMP #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) #define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) #else #define ARCH_MIN_TASKALIGN 16 #define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif struct thread_struct { unsigned long rsp0; unsigned long rsp; unsigned long userrsp; /* Copy from PDA */ unsigned long fs; unsigned long gs; unsigned short es, ds, fsindex, gsindex; /* Hardware debugging registers */ unsigned long debugreg0; unsigned long debugreg1; unsigned long debugreg2; unsigned long debugreg3; unsigned long debugreg6; unsigned long debugreg7; /* fault info */ unsigned long cr2, trap_no, error_code; /* floating point info */ union i387_union i387 __attribute__((aligned(16))); /* IO permissions. the bitmap could be moved into the GDT, that would make switch faster for a limited number of ioperm using tasks. -AK */ int ioperm; unsigned long *io_bitmap_ptr; unsigned io_bitmap_max; /* cached TLS descriptors. */ u64 tls_array[GDT_ENTRY_TLS_ENTRIES]; unsigned int iopl; } __attribute__((aligned(16))); #define INIT_THREAD { \ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ } #ifndef CONFIG_X86_NO_TSS #define INIT_TSS { \ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ } #endif #define INIT_MMAP \ { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } #define start_thread(regs,new_rip,new_rsp) do { \ asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ load_gs_index(0); \ (regs)->rip = (new_rip); \ (regs)->rsp = (new_rsp); \ write_pda(oldrsp, (new_rsp)); \ (regs)->cs = __USER_CS; \ (regs)->ss = __USER_DS; \ (regs)->eflags = 0x200; \ set_fs(USER_DS); \ } while(0) #define get_debugreg(var, register) \ var = HYPERVISOR_get_debugreg(register) #define set_debugreg(value, register) do { \ if (HYPERVISOR_set_debugreg(register, value)) \ BUG(); \ } while (0) struct task_struct; struct mm_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); /* Prepare to copy thread state - unlazy all lazy status */ extern void prepare_to_copy(struct task_struct *tsk); /* * create a kernel thread without removing it from tasklists */ extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); /* * Return saved PC of a blocked thread. * What is this good for? it will be always the scheduler or ret_from_fork. */ #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8)) extern unsigned long get_wchan(struct task_struct *p); #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1) #define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip) #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ struct microcode_header { unsigned int hdrver; unsigned int rev; unsigned int date; unsigned int sig; unsigned int cksum; unsigned int ldrver; unsigned int pf; unsigned int datasize; unsigned int totalsize; unsigned int reserved[3]; }; struct microcode { struct microcode_header hdr; unsigned int bits[0]; }; typedef struct microcode microcode_t; typedef struct microcode_header microcode_header_t; /* microcode format is extended from prescott processors */ struct extended_signature { unsigned int sig; unsigned int pf; unsigned int cksum; }; struct extended_sigtable { unsigned int count; unsigned int cksum; unsigned int reserved[3]; struct extended_signature sigs[0]; }; /* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ #define MICROCODE_IOCFREE _IO('6',0) #define ASM_NOP1 K8_NOP1 #define ASM_NOP2 K8_NOP2 #define ASM_NOP3 K8_NOP3 #define ASM_NOP4 K8_NOP4 #define ASM_NOP5 K8_NOP5 #define ASM_NOP6 K8_NOP6 #define ASM_NOP7 K8_NOP7 #define ASM_NOP8 K8_NOP8 /* Opteron nops */ #define K8_NOP1 ".byte 0x90\n" #define K8_NOP2 ".byte 0x66,0x90\n" #define K8_NOP3 ".byte 0x66,0x66,0x90\n" #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" #define K8_NOP5 K8_NOP3 K8_NOP2 #define K8_NOP6 K8_NOP3 K8_NOP3 #define K8_NOP7 K8_NOP4 K8_NOP3 #define K8_NOP8 K8_NOP4 K8_NOP4 #define ASM_NOP_MAX 8 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) { __asm__ __volatile__("rep;nop": : :"memory"); } /* Stop speculative execution */ static inline void sync_core(void) { int tmp; asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); } #define cpu_has_fpu 1 #define ARCH_HAS_PREFETCH static inline void prefetch(void *x) { asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } #define ARCH_HAS_PREFETCHW 1 static inline void prefetchw(void *x) { alternative_input("prefetcht0 (%1)", "prefetchw (%1)", X86_FEATURE_3DNOW, "r" (x)); } #define ARCH_HAS_SPINLOCK_PREFETCH 1 #define spin_lock_prefetch(x) prefetchw(x) #define cpu_relax() rep_nop() /* * NSC/Cyrix CPU configuration register indexes */ #define CX86_CCR0 0xc0 #define CX86_CCR1 0xc1 #define CX86_CCR2 0xc2 #define CX86_CCR3 0xc3 #define CX86_CCR4 0xe8 #define CX86_CCR5 0xe9 #define CX86_CCR6 0xea #define CX86_CCR7 0xeb #define CX86_DIR0 0xfe #define CX86_DIR1 0xff #define CX86_ARR_BASE 0xc4 #define CX86_RCR_BASE 0xdc /* * NSC/Cyrix CPU indexed register access macros */ #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); }) #define setCx86(reg, data) do { \ outb((reg), 0x22); \ outb((data), 0x23); \ } while (0) static inline void serialize_cpu(void) { __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); } static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { /* "monitor %eax,%ecx,%edx;" */ asm volatile( ".byte 0x0f,0x01,0xc8;" : :"a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax,%ecx;" */ asm volatile( ".byte 0x0f,0x01,0xc9;" : :"a" (eax), "c" (ecx)); } #define stack_current() \ ({ \ struct thread_info *ti; \ asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ ti->task; \ }) #define cache_line_size() (boot_cpu_data.x86_cache_alignment) extern unsigned long boot_option_idle_override; /* Boot loader type from the setup header */ extern int bootloader_type; #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 #endif /* __ASM_X86_64_PROCESSOR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/ptrace.h000066400000000000000000000060561314037446600270430ustar00rootroot00000000000000#ifndef _X86_64_PTRACE_H #define _X86_64_PTRACE_H #if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS) #define R15 0 #define R14 8 #define R13 16 #define R12 24 #define RBP 32 #define RBX 40 /* arguments: interrupts/non tracing syscalls only save upto here*/ #define R11 48 #define R10 56 #define R9 64 #define R8 72 #define RAX 80 #define RCX 88 #define RDX 96 #define RSI 104 #define RDI 112 #define ORIG_RAX 120 /* = ERROR */ /* end of arguments */ /* cpu exception frame or undefined in case of fast syscall. */ #define RIP 128 #define CS 136 #define EFLAGS 144 #define RSP 152 #define SS 160 #define ARGOFFSET R11 #endif /* __ASSEMBLY__ */ /* top of stack page */ #define FRAME_SIZE 168 #define PTRACE_OLDSETOPTIONS 21 #ifndef __ASSEMBLY__ struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long rbp; unsigned long rbx; /* arguments: non interrupts/non tracing syscalls only save upto here*/ unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long rax; unsigned long rcx; unsigned long rdx; unsigned long rsi; unsigned long rdi; unsigned long orig_rax; /* end of arguments */ /* cpu exception frame or undefined */ unsigned long rip; unsigned long cs; unsigned long eflags; unsigned long rsp; unsigned long ss; /* top of stack page */ }; #endif /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ #define PTRACE_GETREGS 12 #define PTRACE_SETREGS 13 #define PTRACE_GETFPREGS 14 #define PTRACE_SETFPREGS 15 #define PTRACE_GETFPXREGS 18 #define PTRACE_SETFPXREGS 19 /* only useful for access 32bit programs */ #define PTRACE_GET_THREAD_AREA 25 #define PTRACE_SET_THREAD_AREA 26 #define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */ #if defined(__KERNEL__) && !defined(__ASSEMBLY__) #define user_mode(regs) (!!((regs)->cs & 3)) #define user_mode_vm(regs) user_mode(regs) #define instruction_pointer(regs) ((regs)->rip) extern unsigned long profile_pc(struct pt_regs *regs); #include void signal_fault(struct pt_regs *regs, void __user *frame, char *where); struct task_struct; extern unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs); enum { EF_CF = 0x00000001, EF_PF = 0x00000004, EF_AF = 0x00000010, EF_ZF = 0x00000040, EF_SF = 0x00000080, EF_TF = 0x00000100, EF_IE = 0x00000200, EF_DF = 0x00000400, EF_OF = 0x00000800, EF_IOPL = 0x00003000, EF_IOPL_RING0 = 0x00000000, EF_IOPL_RING1 = 0x00001000, EF_IOPL_RING2 = 0x00002000, EF_NT = 0x00004000, /* nested task */ EF_RF = 0x00010000, /* resume */ EF_VM = 0x00020000, /* virtual mode */ EF_AC = 0x00040000, /* alignment */ EF_VIF = 0x00080000, /* virtual interrupt */ EF_VIP = 0x00100000, /* virtual interrupt pending */ EF_ID = 0x00200000, /* id */ }; #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/smp.h000066400000000000000000000065551314037446600263700ustar00rootroot00000000000000#ifndef __ASM_SMP_H #define __ASM_SMP_H /* * We need the APIC definitions automatically as part of 'smp.h' */ #ifndef __ASSEMBLY__ #include #include #include extern int disable_apic; #endif #ifdef CONFIG_X86_LOCAL_APIC #ifndef __ASSEMBLY__ #include #include #ifdef CONFIG_X86_IO_APIC #include #endif #include #include #endif #endif #ifdef CONFIG_SMP #ifndef ASSEMBLY #include struct pt_regs; extern cpumask_t cpu_present_mask; extern cpumask_t cpu_possible_map; extern cpumask_t cpu_online_map; extern cpumask_t cpu_initialized; /* * Private routines/data */ extern void smp_alloc_memory(void); extern volatile unsigned long smp_invalidate_needed; extern int pic_mode; extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); extern int smp_num_siblings; extern void smp_send_reschedule(int cpu); void smp_stop_cpu(void); extern int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, int retry, int wait); extern cpumask_t cpu_sibling_map[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS]; extern int phys_proc_id[NR_CPUS]; extern int cpu_core_id[NR_CPUS]; #define SMP_TRAMPOLINE_BASE 0x6000 /* * On x86 all CPUs are mapped 1:1 to the APIC space. * This simplifies scheduling and IPI sending and * compresses data structures. */ static inline int num_booting_cpus(void) { return cpus_weight(cpu_possible_map); } #define raw_smp_processor_id() read_pda(cpunumber) #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN) static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); } #endif extern int safe_smp_processor_id(void); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); extern void prefill_possible_map(void); extern unsigned num_processors; extern unsigned disabled_cpus; #endif /* !ASSEMBLY */ #define NO_PROC_ID 0xFF /* No processor magic marker */ #endif #ifndef ASSEMBLY /* * Some lowlevel functions might want to know about * the real APIC ID <-> CPU # mapping. */ extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */ extern u8 x86_cpu_to_log_apicid[NR_CPUS]; extern u8 bios_cpu_apicid[]; #ifdef CONFIG_X86_LOCAL_APIC static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) { return cpus_addr(cpumask)[0]; } static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < NR_CPUS) return (int)bios_cpu_apicid[mps_cpu]; else return BAD_APICID; } #endif #endif /* !ASSEMBLY */ #ifndef CONFIG_SMP #define stack_smp_processor_id() 0 #define safe_smp_processor_id() 0 #define cpu_logical_map(x) (x) #else #include #define stack_smp_processor_id() \ ({ \ struct thread_info *ti; \ __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ ti->cpu; \ }) #endif #ifndef __ASSEMBLY__ #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN) static __inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); } #endif #endif #ifdef CONFIG_SMP #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] #else #define cpu_physical_id(cpu) boot_cpu_id #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/synch_bitops.h000066400000000000000000000000611314037446600302570ustar00rootroot00000000000000 #include UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/system.h000066400000000000000000000305031314037446600271030ustar00rootroot00000000000000#ifndef __ASM_SYSTEM_H #define __ASM_SYSTEM_H #include #include #include #include #include #ifdef __KERNEL__ #ifdef CONFIG_IA32_EMULATION #define AT_VECTOR_SIZE_ARCH 2 #else #define AT_VECTOR_SIZE_ARCH 1 #endif #ifdef CONFIG_SMP #define LOCK_PREFIX "lock ; " #else #define LOCK_PREFIX "" #endif #define __STR(x) #x #define STR(x) __STR(x) #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" /* frame pointer must be last for get_wchan */ #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\n\t" #define __EXTRA_CLOBBER \ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" #define switch_to(prev,next,last) \ asm volatile(SAVE_CONTEXT \ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ "call __switch_to\n\t" \ ".globl thread_return\n" \ "thread_return:\n\t" \ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ "movq %P[thread_info](%%rsi),%%r8\n\t" \ LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ "movq %%rax,%%rdi\n\t" \ "jc ret_from_fork\n\t" \ RESTORE_CONTEXT \ : "=a" (last) \ : [next] "S" (next), [prev] "D" (prev), \ [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ [ti_flags] "i" (offsetof(struct thread_info, flags)),\ [tif_fork] "i" (TIF_FORK), \ [thread_info] "i" (offsetof(struct task_struct, thread_info)), \ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ : "memory", "cc" __EXTRA_CLOBBER) extern void load_gs_index(unsigned); /* * Load a segment. Fall back on loading the zero * segment if something goes wrong.. */ #define loadsegment(seg,value) \ asm volatile("\n" \ "1:\t" \ "movl %k0,%%" #seg "\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3:\t" \ "movl %1,%%" #seg "\n\t" \ "jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n\t" \ ".align 8\n\t" \ ".quad 1b,3b\n" \ ".previous" \ : :"r" (value), "r" (0)) #define set_debug(value,register) \ __asm__("movq %0,%%db" #register \ : /* no output */ \ :"r" ((unsigned long) value)) #ifdef __KERNEL__ struct alt_instr { __u8 *instr; /* original instruction */ __u8 *replacement; __u8 cpuid; /* cpuid bit set for replacement */ __u8 instrlen; /* length of original instruction */ __u8 replacementlen; /* length of new instruction, <= instrlen */ __u8 pad[5]; }; #endif /* * Alternative instructions for different CPU types or capabilities. * * This allows to use optimized instructions even on generic binary * kernels. * * length of oldinstr must be longer or equal the length of newinstr * It can be padded with nops as needed. * * For non barrier like inlines please define new variants * without volatile and memory clobber. */ #define alternative(oldinstr, newinstr, feature) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ " .align 8\n" \ " .quad 661b\n" /* label */ \ " .quad 663f\n" /* new instruction */ \ " .byte %c0\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ ".section .altinstr_replacement,\"ax\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" :: "i" (feature) : "memory") /* * Alternative inline assembly with input. * * Peculiarities: * No memory clobber here. * Argument numbers start with 1. * Best is to use constraints that are fixed size (like (%1) ... "r") * If you use variable sized constraints like "m" or "g" in the * replacement make sure to pad to the worst case length. */ #define alternative_input(oldinstr, newinstr, feature, input...) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ " .align 8\n" \ " .quad 661b\n" /* label */ \ " .quad 663f\n" /* new instruction */ \ " .byte %c0\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ ".section .altinstr_replacement,\"ax\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" :: "i" (feature), ##input) /* Like alternative_input, but with a single output argument */ #define alternative_io(oldinstr, newinstr, feature, output, input...) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ " .align 8\n" \ " .quad 661b\n" /* label */ \ " .quad 663f\n" /* new instruction */ \ " .byte %c[feat]\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ ".section .altinstr_replacement,\"ax\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" : output : [feat] "i" (feature), ##input) /* * Clear and set 'TS' bit respectively */ #define clts() (HYPERVISOR_fpu_taskswitch(0)) static inline unsigned long __raw_local_save_flags(void) { unsigned long flags; __asm__ __volatile__( "# __raw_save_flags\n\t" "pushfq ; popq %q0" : "=g" (flags) : /* no input */ : "memory" ); return flags; } #define raw_local_save_flags(flags) \ do { (flags) = __raw_local_save_flags(); } while (0) static inline unsigned long read_cr0(void) { unsigned long cr0; asm volatile("movq %%cr0,%0" : "=r" (cr0)); return cr0; } static inline void write_cr0(unsigned long val) { asm volatile("movq %0,%%cr0" :: "r" (val)); } #define read_cr3() ({ \ unsigned long __dummy; \ asm("movq %%cr3,%0" : "=r" (__dummy)); \ machine_to_phys(__dummy); \ }) static inline unsigned long read_cr4(void) { unsigned long cr4; asm("movq %%cr4,%0" : "=r" (cr4)); return cr4; } static inline void write_cr4(unsigned long val) { asm volatile("movq %0,%%cr4" :: "r" (val)); } #define stts() (HYPERVISOR_fpu_taskswitch(1)) #define wbinvd() \ __asm__ __volatile__ ("wbinvd": : :"memory"); /* * On SMP systems, when the scheduler does migration-cost autodetection, * it needs a way to flush as much of the CPU's caches as possible. */ static inline void sched_cacheflush(void) { wbinvd(); } #endif /* __KERNEL__ */ #define nop() __asm__ __volatile__ ("nop") #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) #define tas(ptr) (xchg((ptr),1)) #define __xg(x) ((volatile long *)(x)) static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) { *ptr = val; } #define _set_64bit set_64bit /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note 2: xchg has side effect, so that attribute volatile is necessary, * but generally the primitive is invalid, *ptr is output argument. --ANK */ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { case 1: __asm__ __volatile__("xchgb %b0,%1" :"=q" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 2: __asm__ __volatile__("xchgw %w0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 4: __asm__ __volatile__("xchgl %k0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 8: __asm__ __volatile__("xchgq %0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; } return x; } /* * Atomic compare and exchange. Compare OLD with MEM, if identical, * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ #define __HAVE_ARCH_CMPXCHG 1 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 4: __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; } return old; } #define cmpxchg(ptr,o,n)\ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ (unsigned long)(n),sizeof(*(ptr)))) #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() #define smp_read_barrier_depends() do {} while(0) #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define smp_read_barrier_depends() do {} while(0) #endif /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking * to devices. */ #define mb() asm volatile("mfence":::"memory") #define rmb() asm volatile("lfence":::"memory") #ifdef CONFIG_UNORDERED_IO #define wmb() asm volatile("sfence" ::: "memory") #else #define wmb() asm volatile("" ::: "memory") #endif #define read_barrier_depends() do {} while(0) #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) /* * The use of 'barrier' in the following reflects their use as local-lock * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following * critical operations are executed. All critical operations must complete * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also * includes these barriers, for example. */ #define __cli() \ do { \ preempt_disable(); \ current_vcpu_info()->evtchn_upcall_mask = 1; \ preempt_enable_no_resched(); \ barrier(); \ } while (0) #define __sti() \ do { \ vcpu_info_t *_vcpu; \ barrier(); \ preempt_disable(); \ _vcpu = current_vcpu_info(); \ _vcpu->evtchn_upcall_mask = 0; \ barrier(); /* unmask then check (avoid races) */ \ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ force_evtchn_callback(); \ preempt_enable(); \ } while (0) #define __save_flags(x) \ do { \ preempt_disable(); \ (x) = current_vcpu_info()->evtchn_upcall_mask; \ preempt_enable(); \ } while (0) #define __restore_flags(x) \ do { \ vcpu_info_t *_vcpu; \ barrier(); \ preempt_disable(); \ _vcpu = current_vcpu_info(); \ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ barrier(); /* unmask then check (avoid races) */ \ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ force_evtchn_callback(); \ preempt_enable(); \ } else \ preempt_enable_no_resched(); \ } while (0) #define __save_and_cli(x) \ do { \ vcpu_info_t *_vcpu; \ preempt_disable(); \ _vcpu = current_vcpu_info(); \ (x) = _vcpu->evtchn_upcall_mask; \ _vcpu->evtchn_upcall_mask = 1; \ preempt_enable_no_resched(); \ barrier(); \ } while (0) #define local_irq_save(x) __save_and_cli(x) #define local_irq_restore(x) __restore_flags(x) #define local_save_flags(x) __save_flags(x) #define local_irq_disable() __cli() #define local_irq_enable() __sti() /* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */ #define irqs_disabled() \ ({ int ___x; \ preempt_disable(); \ ___x = (current_vcpu_info()->evtchn_upcall_mask != 0); \ preempt_enable_no_resched(); \ ___x; }) void safe_halt(void); void halt(void); void cpu_idle_wait(void); extern unsigned long arch_align_stack(unsigned long sp); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/timer.h000066400000000000000000000035461314037446600267060ustar00rootroot00000000000000#ifndef _ASMi386_TIMER_H #define _ASMi386_TIMER_H #include /** * struct timer_ops - used to define a timer source * * @name: name of the timer. * @init: Probes and initializes the timer. Takes clock= override * string as an argument. Returns 0 on success, anything else * on failure. * @mark_offset: called by the timer interrupt. * @get_offset: called by gettimeofday(). Returns the number of microseconds * since the last timer interupt. * @monotonic_clock: returns the number of nanoseconds since the init of the * timer. * @delay: delays this many clock cycles. */ struct timer_opts { char* name; void (*mark_offset)(void); unsigned long (*get_offset)(void); unsigned long long (*monotonic_clock)(void); void (*delay)(unsigned long); unsigned long (*read_timer)(void); int (*suspend)(pm_message_t state); int (*resume)(void); }; struct init_timer_opts { int (*init)(char *override); struct timer_opts *opts; }; #define TICK_SIZE (tick_nsec / 1000) extern struct timer_opts* __init select_timer(void); extern void clock_fallback(void); void setup_pit_timer(void); /* Modifiers for buggy PIT handling */ extern int pit_latch_buggy; extern struct timer_opts *cur_timer; extern int timer_ack; /* list of externed timers */ extern struct timer_opts timer_none; extern struct timer_opts timer_pit; extern struct init_timer_opts timer_pit_init; extern struct init_timer_opts timer_tsc_init; #ifdef CONFIG_X86_CYCLONE_TIMER extern struct init_timer_opts timer_cyclone_init; #endif extern unsigned long calibrate_tsc(void); extern void init_cpu_khz(void); #ifdef CONFIG_HPET_TIMER extern struct init_timer_opts timer_hpet_init; extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr); #endif #ifdef CONFIG_X86_PM_TIMER extern struct init_timer_opts timer_pmtmr_init; #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/tlbflush.h000066400000000000000000000053241314037446600274050ustar00rootroot00000000000000#ifndef _X8664_TLBFLUSH_H #define _X8664_TLBFLUSH_H #include #include #define __flush_tlb() xen_tlb_flush() /* * Global pages have to be flushed a bit differently. Not a real * performance problem because this does not happen often. */ #define __flush_tlb_global() xen_tlb_flush() extern unsigned long pgkern_mask; #define __flush_tlb_all() __flush_tlb_global() #define __flush_tlb_one(addr) xen_invlpg((unsigned long)addr) /* * TLB flushing: * * - flush_tlb() flushes the current mm struct TLBs * - flush_tlb_all() flushes all processes TLBs * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * * x86-64 can only flush individual pages or full VMs. For a range flush * we always do the full VM. Might be worth trying if for a small * range a few INVLPGs in a row are a win. */ #ifndef CONFIG_SMP #define flush_tlb() __flush_tlb() #define flush_tlb_all() __flush_tlb_all() #define local_flush_tlb() __flush_tlb() static inline void flush_tlb_mm(struct mm_struct *mm) { if (mm == current->active_mm) __flush_tlb(); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { if (vma->vm_mm == current->active_mm) __flush_tlb_one(addr); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (vma->vm_mm == current->active_mm) __flush_tlb(); } #else #include #define local_flush_tlb() \ __flush_tlb() #define flush_tlb_all xen_tlb_flush_all #define flush_tlb_current_task() xen_tlb_flush_mask(¤t->mm->cpu_vm_mask) #define flush_tlb_mm(mm) xen_tlb_flush_mask(&(mm)->cpu_vm_mask) #define flush_tlb_page(vma, va) xen_invlpg_mask(&(vma)->vm_mm->cpu_vm_mask, va) #define flush_tlb() flush_tlb_current_task() static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) { flush_tlb_mm(vma->vm_mm); } #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 /* Roughly an IPI every 20MB with 4k pages for freeing page table ranges. Cost is about 42k of memory for each CPU. */ #define ARCH_FREE_PTE_NR 5350 #endif #define flush_tlb_kernel_range(start, end) flush_tlb_all() static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { /* x86_64 does not keep any page table caches in a software TLB. The CPUs do in their hardware TLBs, but they are handled by the normal TLB flushing algorithms. */ } #endif /* _X8664_TLBFLUSH_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/vga.h000066400000000000000000000005741314037446600263410ustar00rootroot00000000000000/* * Access to VGA videoram * * (c) 1998 Martin Mares */ #ifndef _LINUX_ASM_VGA_H_ #define _LINUX_ASM_VGA_H_ /* * On the PC, we can just recalculate addresses and then * access the videoram directly without any black magic. */ #define VGA_MAP_MEM(x) (unsigned long)isa_bus_to_virt(x) #define vga_readb(x) (*(x)) #define vga_writeb(x,y) (*(y) = (x)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/xenoprof.h000066400000000000000000000000541314037446600274150ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/asm/xor.h000066400000000000000000000161531314037446600263740ustar00rootroot00000000000000/* * x86-64 changes / gcc fixes from Andi Kleen. * Copyright 2002 Andi Kleen, SuSE Labs. * * This hasn't been optimized for the hammer yet, but there are likely * no advantages to be gotten from x86-64 here anyways. */ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; /* Doesn't use gcc to save the XMM registers, because there is no easy way to tell it to do a clts before the register saving. */ #define XMMS_SAVE do { \ preempt_disable(); \ if (!(current_thread_info()->status & TS_USEDFPU)) \ clts(); \ __asm__ __volatile__ ( \ "movups %%xmm0,(%1) ;\n\t" \ "movups %%xmm1,0x10(%1) ;\n\t" \ "movups %%xmm2,0x20(%1) ;\n\t" \ "movups %%xmm3,0x30(%1) ;\n\t" \ : "=&r" (cr0) \ : "r" (xmm_save) \ : "memory"); \ } while(0) #define XMMS_RESTORE do { \ asm volatile ( \ "sfence ;\n\t" \ "movups (%1),%%xmm0 ;\n\t" \ "movups 0x10(%1),%%xmm1 ;\n\t" \ "movups 0x20(%1),%%xmm2 ;\n\t" \ "movups 0x30(%1),%%xmm3 ;\n\t" \ : \ : "r" (cr0), "r" (xmm_save) \ : "memory"); \ if (!(current_thread_info()->status & TS_USEDFPU)) \ stts(); \ preempt_enable(); \ } while(0) #define OFFS(x) "16*("#x")" #define PF_OFFS(x) "256+16*("#x")" #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n" #define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" #define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n" #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n" #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n" #define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n" #define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" #define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" #define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" #define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" #define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n" static void xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { unsigned int lines = bytes >> 8; unsigned long cr0; xmm_store_t xmm_save[4]; XMMS_SAVE; asm volatile ( #undef BLOCK #define BLOCK(i) \ LD(i,0) \ LD(i+1,1) \ PF1(i) \ PF1(i+2) \ LD(i+2,2) \ LD(i+3,3) \ PF0(i+4) \ PF0(i+6) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addq %[inc], %[p1] ;\n" " addq %[inc], %[p2] ;\n" " decl %[cnt] ; jnz 1b" : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines) : [inc] "r" (256UL) : "memory"); XMMS_RESTORE; } static void xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { unsigned int lines = bytes >> 8; xmm_store_t xmm_save[4]; unsigned long cr0; XMMS_SAVE; __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ PF1(i) \ PF1(i+2) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ PF2(i) \ PF2(i+2) \ PF0(i+4) \ PF0(i+6) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addq %[inc], %[p1] ;\n" " addq %[inc], %[p2] ;\n" " addq %[inc], %[p3] ;\n" " decl %[cnt] ; jnz 1b" : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) : [inc] "r" (256UL) : "memory"); XMMS_RESTORE; } static void xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { unsigned int lines = bytes >> 8; xmm_store_t xmm_save[4]; unsigned long cr0; XMMS_SAVE; __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ PF1(i) \ PF1(i+2) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ PF2(i) \ PF2(i+2) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ PF3(i) \ PF3(i+2) \ PF0(i+4) \ PF0(i+6) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ XO3(i,0) \ XO3(i+1,1) \ XO3(i+2,2) \ XO3(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addq %[inc], %[p1] ;\n" " addq %[inc], %[p2] ;\n" " addq %[inc], %[p3] ;\n" " addq %[inc], %[p4] ;\n" " decl %[cnt] ; jnz 1b" : [cnt] "+c" (lines), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) : [inc] "r" (256UL) : "memory" ); XMMS_RESTORE; } static void xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { unsigned int lines = bytes >> 8; xmm_store_t xmm_save[4]; unsigned long cr0; XMMS_SAVE; __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ PF1(i) \ PF1(i+2) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ PF2(i) \ PF2(i+2) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ PF3(i) \ PF3(i+2) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ PF4(i) \ PF4(i+2) \ PF0(i+4) \ PF0(i+6) \ XO3(i,0) \ XO3(i+1,1) \ XO3(i+2,2) \ XO3(i+3,3) \ XO4(i,0) \ XO4(i+1,1) \ XO4(i+2,2) \ XO4(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addq %[inc], %[p1] ;\n" " addq %[inc], %[p2] ;\n" " addq %[inc], %[p3] ;\n" " addq %[inc], %[p4] ;\n" " addq %[inc], %[p5] ;\n" " decl %[cnt] ; jnz 1b" : [cnt] "+c" (lines), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5) : [inc] "r" (256UL) : "memory"); XMMS_RESTORE; } static struct xor_block_template xor_block_sse = { .name = "generic_sse", .do_2 = xor_sse_2, .do_3 = xor_sse_3, .do_4 = xor_sse_4, .do_5 = xor_sse_5, }; #undef XOR_TRY_TEMPLATES #define XOR_TRY_TEMPLATES \ do { \ xor_speed(&xor_block_sse); \ } while (0) /* We force the use of the SSE xor block because it can write around L2. We may also be able to load into the L1 only depending on how the cpu deals with a load to a line that is being prefetched. */ #define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse) UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/irq_vectors.h000066400000000000000000000066101314037446600273410ustar00rootroot00000000000000/* * This file should contain #defines for all of the interrupt vector * numbers used by this architecture. * * In addition, there are some standard defines: * * FIRST_EXTERNAL_VECTOR: * The first free place for external interrupts * * SYSCALL_VECTOR: * The IRQ vector a syscall makes the user to kernel transition * under. * * TIMER_IRQ: * The IRQ number the timer interrupt comes in at. * * NR_IRQS: * The total number of interrupt vectors (including all the * architecture specific interrupts) needed. * */ #ifndef _ASM_IRQ_VECTORS_H #define _ASM_IRQ_VECTORS_H /* * IDT vectors usable for external interrupt sources start * at 0x20: */ #define FIRST_EXTERNAL_VECTOR 0x20 #define SYSCALL_VECTOR 0x80 /* * Vectors 0x20-0x2f are used for ISA interrupts. */ #if 0 /* * Special IRQ vectors used by the SMP architecture, 0xf0-0xff * * some of the following vectors are 'rare', they are merged * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. * TLB, reschedule and local APIC vectors are performance-critical. * * Vectors 0xf0-0xfa are free (reserved for future Linux use). */ #define INVALIDATE_TLB_VECTOR 0xfd #define RESCHEDULE_VECTOR 0xfc #define CALL_FUNCTION_VECTOR 0xfb #define THERMAL_APIC_VECTOR 0xf0 /* * Local APIC timer IRQ vector is on a different priority level, * to work around the 'lost local interrupt if more than 2 IRQ * sources per level' errata. */ #define LOCAL_TIMER_VECTOR 0xef #endif #define SPURIOUS_APIC_VECTOR 0xff #define ERROR_APIC_VECTOR 0xfe /* * First APIC vector available to drivers: (vectors 0x30-0xee) * we start at 0x31 to spread out vectors evenly between priority * levels. (0x80 is the syscall vector) */ #define FIRST_DEVICE_VECTOR 0x31 #define FIRST_SYSTEM_VECTOR 0xef /* * 16 8259A IRQ's, 208 potential APIC interrupt sources. * Right now the APIC is mostly only used for SMP. * 256 vectors is an architectural limit. (we can have * more than 256 devices theoretically, but they will * have to use shared interrupts) * Since vectors 0x00-0x1f are used/reserved for the CPU, * the usable vector space is 0x20-0xff (224 vectors) */ #define RESCHEDULE_VECTOR 0 #define CALL_FUNCTION_VECTOR 1 #define NR_IPIS 2 /* * The maximum number of vectors supported by i386 processors * is limited to 256. For processors other than i386, NR_VECTORS * should be changed accordingly. */ #define NR_VECTORS 256 #define FPU_IRQ 13 #define FIRST_VM86_IRQ 3 #define LAST_VM86_IRQ 15 #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) /* * The flat IRQ space is divided into two regions: * 1. A one-to-one mapping of real physical IRQs. This space is only used * if we have physical device-access privilege. This region is at the * start of the IRQ space so that existing device drivers do not need * to be modified to translate physical IRQ numbers into our IRQ space. * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These * are bound using the provided bind/unbind functions. */ #define PIRQ_BASE 0 #define NR_PIRQS 256 #define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS) #define NR_DYNIRQS 256 #define NR_IRQS (NR_PIRQS + NR_DYNIRQS) #define NR_IRQ_VECTORS NR_IRQS #define pirq_to_irq(_x) ((_x) + PIRQ_BASE) #define irq_to_pirq(_x) ((_x) - PIRQ_BASE) #define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE) #define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE) #endif /* _ASM_IRQ_VECTORS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/mach_time.h000066400000000000000000000077731314037446600267420ustar00rootroot00000000000000/* * include/asm-i386/mach-default/mach_time.h * * Machine specific set RTC function for generic. * Split out from time.c by Osamu Tomita */ #ifndef _MACH_TIME_H #define _MACH_TIME_H #include /* for check timing call set_rtc_mmss() 500ms */ /* used in arch/i386/time.c::do_timer_interrupt() */ #define USEC_AFTER 500000 #define USEC_BEFORE 500000 /* * In order to set the CMOS clock precisely, set_rtc_mmss has to be * called 500 ms after the second nowtime has started, because when * nowtime is written into the registers of the CMOS clock, it will * jump to the next second precisely 500 ms later. Check the Motorola * MC146818A or Dallas DS12887 data sheet for details. * * BUG: This routine does not handle hour overflow properly; it just * sets the minutes. Usually you'll only notice that after reboot! */ static inline int mach_set_rtc_mmss(unsigned long nowtime) { int retval = 0; int real_seconds, real_minutes, cmos_minutes; unsigned char save_control, save_freq_select; save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); cmos_minutes = CMOS_READ(RTC_MINUTES); if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) BCD_TO_BIN(cmos_minutes); /* * since we're only adjusting minutes and seconds, * don't interfere with hour overflow. This avoids * messing with unknown time zones but requires your * RTC not to be off by more than 15 minutes */ real_seconds = nowtime % 60; real_minutes = nowtime / 60; if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) real_minutes += 30; /* correct for half hour time zone */ real_minutes %= 60; if (abs(real_minutes - cmos_minutes) < 30) { if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { BIN_TO_BCD(real_seconds); BIN_TO_BCD(real_minutes); } CMOS_WRITE(real_seconds,RTC_SECONDS); CMOS_WRITE(real_minutes,RTC_MINUTES); } else { printk(KERN_WARNING "set_rtc_mmss: can't update from %d to %d\n", cmos_minutes, real_minutes); retval = -1; } /* The following flags have to be released exactly in this order, * otherwise the DS12887 (popular MC146818A clone with integrated * battery and quartz) will not reset the oscillator and will not * update precisely 500 ms later. You won't find this mentioned in * the Dallas Semiconductor data sheets, but who believes data * sheets anyway ... -- Markus Kuhn */ CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); return retval; } static inline unsigned long mach_get_cmos_time(void) { unsigned int year, mon, day, hour, min, sec; int i; /* The Linux interpretation of the CMOS clock register contents: * When the Update-In-Progress (UIP) flag goes from 1 to 0, the * RTC registers show the second which has precisely just started. * Let's hope other operating systems interpret the RTC the same way. */ /* read RTC exactly on falling edge of update flag */ for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) break; for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) break; do { /* Isn't this overkill ? UIP above should guarantee consistency */ sec = CMOS_READ(RTC_SECONDS); min = CMOS_READ(RTC_MINUTES); hour = CMOS_READ(RTC_HOURS); day = CMOS_READ(RTC_DAY_OF_MONTH); mon = CMOS_READ(RTC_MONTH); year = CMOS_READ(RTC_YEAR); } while (sec != CMOS_READ(RTC_SECONDS)); if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { BCD_TO_BIN(sec); BCD_TO_BIN(min); BCD_TO_BIN(hour); BCD_TO_BIN(day); BCD_TO_BIN(mon); BCD_TO_BIN(year); } if ((year += 1900) < 1970) year += 100; return mktime(year, mon, day, hour, min, sec); } #endif /* !_MACH_TIME_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/mach_timer.h000066400000000000000000000026501314037446600271110ustar00rootroot00000000000000/* * include/asm-i386/mach-default/mach_timer.h * * Machine specific calibrate_tsc() for generic. * Split out from timer_tsc.c by Osamu Tomita */ /* ------ Calibrate the TSC ------- * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset(). * Too much 64-bit arithmetic here to do this cleanly in C, and for * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2) * output busy loop as low as possible. We avoid reading the CTC registers * directly because of the awkward 8-bit access mechanism of the 82C54 * device. */ #ifndef _MACH_TIMER_H #define _MACH_TIMER_H #define CALIBRATE_LATCH (5 * LATCH) static inline void mach_prepare_counter(void) { /* Set the Gate high, disable speaker */ outb((inb(0x61) & ~0x02) | 0x01, 0x61); /* * Now let's take care of CTC channel 2 * * Set the Gate high, program CTC channel 2 for mode 0, * (interrupt on terminal count mode), binary count, * load 5 * LATCH count, (LSB and MSB) to begin countdown. * * Some devices need a delay here. */ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ } static inline void mach_countup(unsigned long *count_p) { unsigned long count = 0; do { count++; } while ((inb_p(0x61) & 0x20) == 0); *count_p = count; } #endif /* !_MACH_TIMER_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/setup_arch_post.h000066400000000000000000000030671314037446600302060ustar00rootroot00000000000000/** * machine_specific_* - Hooks for machine specific setup. * * Description: * This is included late in kernel/setup.c so that it can make * use of all of the static functions. **/ #include extern void hypervisor_callback(void); extern void failsafe_callback(void); extern void nmi(void); static void __init machine_specific_arch_setup(void) { int ret; static struct callback_register __initdata event = { .type = CALLBACKTYPE_event, .address = (unsigned long) hypervisor_callback, }; static struct callback_register __initdata failsafe = { .type = CALLBACKTYPE_failsafe, .address = (unsigned long)failsafe_callback, }; static struct callback_register __initdata syscall = { .type = CALLBACKTYPE_syscall, .address = (unsigned long)system_call, }; static struct callback_register __initdata nmi_cb = { .type = CALLBACKTYPE_nmi, .address = (unsigned long)nmi, }; ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event); if (ret == 0) ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe); if (ret == 0) ret = HYPERVISOR_callback_op(CALLBACKOP_register, &syscall); #if CONFIG_XEN_COMPAT <= 0x030002 if (ret == -ENOSYS) ret = HYPERVISOR_set_callbacks( event.address, failsafe.address, syscall.address); #endif BUG_ON(ret); ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb); #if CONFIG_XEN_COMPAT <= 0x030002 if (ret == -ENOSYS) { static struct xennmi_callback __initdata cb = { .handler_address = (unsigned long)nmi }; HYPERVISOR_nmi_op(XENNMI_register_callback, &cb); } #endif } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach-xen/setup_arch_pre.h000066400000000000000000000002321314037446600277760ustar00rootroot00000000000000/* Hook to call BIOS initialisation function */ #define ARCH_SETUP machine_specific_arch_setup(); static void __init machine_specific_arch_setup(void); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mach_apic.h000066400000000000000000000016721314037446600252100ustar00rootroot00000000000000#ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H /* * Copyright 2004 James Cleverdon, IBM. * Subject to the GNU Public License, v.2 * * Generic APIC sub-arch defines. * * Hacked for x86-64 by James Cleverdon from i386 architecture code by * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ #include #define INT_DELIVERY_MODE (genapic->int_delivery_mode) #define INT_DEST_MODE (genapic->int_dest_mode) #define INT_DELIVERY_DEST (genapic->int_delivery_dest) #define TARGET_CPUS (genapic->target_cpus()) #define apic_id_registered (genapic->apic_id_registered) #define init_apic_ldr (genapic->init_apic_ldr) #define send_IPI_mask (genapic->send_IPI_mask) #define send_IPI_allbutself (genapic->send_IPI_allbutself) #define send_IPI_all (genapic->send_IPI_all) #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) #define phys_pkg_id (genapic->phys_pkg_id) #endif /* __ASM_MACH_APIC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mc146818rtc.h000066400000000000000000000011771314037446600251100ustar00rootroot00000000000000/* * Machine dependent access functions for RTC registers. */ #ifndef _ASM_MC146818RTC_H #define _ASM_MC146818RTC_H #include #ifndef RTC_PORT #define RTC_PORT(x) (0x70 + (x)) #define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ #endif /* * The yet supported machines all access the RTC index register via * an ISA port access but the way to access the date register differs ... */ #define CMOS_READ(addr) ({ \ outb_p((addr),RTC_PORT(0)); \ inb_p(RTC_PORT(1)); \ }) #define CMOS_WRITE(val, addr) ({ \ outb_p((addr),RTC_PORT(0)); \ outb_p((val),RTC_PORT(1)); \ }) #define RTC_IRQ 8 #endif /* _ASM_MC146818RTC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mce.h000066400000000000000000000050131314037446600240410ustar00rootroot00000000000000#ifndef _ASM_MCE_H #define _ASM_MCE_H 1 #include #include /* * Machine Check support for x86 */ #define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ #define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ #define MCG_STATUS_EIPV (1UL<<1) /* eip points to correct instruction */ #define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */ #define MCI_STATUS_VAL (1UL<<63) /* valid error */ #define MCI_STATUS_OVER (1UL<<62) /* previous errors lost */ #define MCI_STATUS_UC (1UL<<61) /* uncorrected error */ #define MCI_STATUS_EN (1UL<<60) /* error enabled */ #define MCI_STATUS_MISCV (1UL<<59) /* misc error reg. valid */ #define MCI_STATUS_ADDRV (1UL<<58) /* addr reg. valid */ #define MCI_STATUS_PCC (1UL<<57) /* processor context corrupt */ /* Fields are zero when not available */ struct mce { __u64 status; __u64 misc; __u64 addr; __u64 mcgstatus; __u64 rip; __u64 tsc; /* cpu time stamp counter */ __u64 res1; /* for future extension */ __u64 res2; /* dito. */ __u8 cs; /* code segment */ __u8 bank; /* machine check bank */ __u8 cpu; /* cpu that raised the error */ __u8 finished; /* entry is valid */ __u32 pad; }; /* * This structure contains all data related to the MCE log. * Also carries a signature to make it easier to find from external debugging tools. * Each entry is only valid when its finished flag is set. */ #define MCE_LOG_LEN 32 struct mce_log { char signature[12]; /* "MACHINECHECK" */ unsigned len; /* = MCE_LOG_LEN */ unsigned next; unsigned flags; unsigned pad0; struct mce entry[MCE_LOG_LEN]; }; #define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */ #define MCE_LOG_SIGNATURE "MACHINECHECK" #define MCE_GET_RECORD_LEN _IOR('M', 1, int) #define MCE_GET_LOG_LEN _IOR('M', 2, int) #define MCE_GETCLEAR_FLAGS _IOR('M', 3, int) /* Software defined banks */ #define MCE_EXTENDED_BANK 128 #define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 #define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */ #define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4 #ifdef __KERNEL__ #include void mce_log(struct mce *m); #ifdef CONFIG_X86_MCE_INTEL void mce_intel_feature_init(struct cpuinfo_x86 *c); #else static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } #endif #ifdef CONFIG_X86_MCE_AMD void mce_amd_feature_init(struct cpuinfo_x86 *c); #else static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } #endif extern atomic_t mce_entry; #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mman.h000066400000000000000000000012211314037446600242220ustar00rootroot00000000000000#ifndef __X8664_MMAN_H__ #define __X8664_MMAN_H__ #include #define MAP_32BIT 0x40 /* only give out 32bit addresses */ #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ #define MAP_LOCKED 0x2000 /* pages are locked */ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mmsegment.h000066400000000000000000000001621314037446600252710ustar00rootroot00000000000000#ifndef _ASM_MMSEGMENT_H #define _ASM_MMSEGMENT_H 1 typedef struct { unsigned long seg; } mm_segment_t; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mmu.h000066400000000000000000000005451314037446600241000ustar00rootroot00000000000000#ifndef __x86_64_MMU_H #define __x86_64_MMU_H #include #include /* * The x86_64 doesn't have a mmu context, but * we put the segment information here. * * cpu_vm_mask is used to optimize ldt flushing. */ typedef struct { void *ldt; rwlock_t ldtlock; int size; struct semaphore sem; } mm_context_t; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mmu_context.h000066400000000000000000000034071314037446600256440ustar00rootroot00000000000000#ifndef __X86_64_MMU_CONTEXT_H #define __X86_64_MMU_CONTEXT_H #include #include #include #include #include #include #include /* * possibly do the LDT unload here? */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm); void destroy_context(struct mm_struct *mm); static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { #ifdef CONFIG_SMP if (read_pda(mmu_state) == TLBSTATE_OK) write_pda(mmu_state, TLBSTATE_LAZY); #endif } static inline void load_cr3(pgd_t *pgd) { asm volatile("movq %0,%%cr3" :: "r" (__pa(pgd)) : "memory"); } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned cpu = smp_processor_id(); if (likely(prev != next)) { /* stop flush ipis for the previous mm */ clear_bit(cpu, &prev->cpu_vm_mask); #ifdef CONFIG_SMP write_pda(mmu_state, TLBSTATE_OK); write_pda(active_mm, next); #endif set_bit(cpu, &next->cpu_vm_mask); load_cr3(next->pgd); if (unlikely(next->context.ldt != prev->context.ldt)) load_LDT_nolock(&next->context, cpu); } #ifdef CONFIG_SMP else { write_pda(mmu_state, TLBSTATE_OK); if (read_pda(active_mm) != next) out_of_line_bug(); if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload CR3 * to make sure to use no freed page tables. */ load_cr3(next->pgd); load_LDT_nolock(&next->context, cpu); } } #endif } #define deactivate_mm(tsk,mm) do { \ load_gs_index(0); \ asm volatile("movl %0,%%fs"::"r"(0)); \ } while(0) #define activate_mm(prev, next) \ switch_mm((prev),(next),NULL) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mmx.h000066400000000000000000000004161314037446600241000ustar00rootroot00000000000000#ifndef _ASM_MMX_H #define _ASM_MMX_H /* * MMX 3Dnow! helper operations */ #include extern void *_mmx_memcpy(void *to, const void *from, size_t size); extern void mmx_clear_page(void *page); extern void mmx_copy_page(void *to, void *from); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mmzone.h000066400000000000000000000026751314037446600246150ustar00rootroot00000000000000/* K8 NUMA support */ /* Copyright 2002,2003 by Andi Kleen, SuSE Labs */ /* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */ #ifndef _ASM_X86_64_MMZONE_H #define _ASM_X86_64_MMZONE_H 1 #include #ifdef CONFIG_NUMA #define VIRTUAL_BUG_ON(x) #include /* Should really switch to dynamic allocation at some point */ #define NODEMAPSIZE 0x4fff /* Simple perfect hash to map physical addresses to node numbers */ extern int memnode_shift; extern u8 memnodemap[NODEMAPSIZE]; extern struct pglist_data *node_data[]; static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) { unsigned nid; VIRTUAL_BUG_ON((addr >> memnode_shift) >= NODEMAPSIZE); nid = memnodemap[addr >> memnode_shift]; VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); return nid; } #define NODE_DATA(nid) (node_data[nid]) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ NODE_DATA(nid)->node_spanned_pages) #ifdef CONFIG_DISCONTIGMEM #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) #define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) extern struct page *pfn_to_page(unsigned long pfn); extern unsigned long page_to_pfn(struct page *page); extern int pfn_valid(unsigned long pfn); #endif #define local_mapnr(kvaddr) \ ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) ) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/module.h000066400000000000000000000002631314037446600245640ustar00rootroot00000000000000#ifndef _ASM_X8664_MODULE_H #define _ASM_X8664_MODULE_H struct mod_arch_specific {}; #define Elf_Shdr Elf64_Shdr #define Elf_Sym Elf64_Sym #define Elf_Ehdr Elf64_Ehdr #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mpspec.h000066400000000000000000000152041314037446600245670ustar00rootroot00000000000000#ifndef __ASM_MPSPEC_H #define __ASM_MPSPEC_H /* * Structure definitions for SMP machines following the * Intel Multiprocessing Specification 1.1 and 1.4. */ /* * This tag identifies where the SMP configuration * information is. */ #define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') /* * A maximum of 255 APICs with the current APIC ID architecture. */ #define MAX_APICS 255 struct intel_mp_floating { char mpf_signature[4]; /* "_MP_" */ unsigned int mpf_physptr; /* Configuration table address */ unsigned char mpf_length; /* Our length (paragraphs) */ unsigned char mpf_specification;/* Specification version */ unsigned char mpf_checksum; /* Checksum (makes sum 0) */ unsigned char mpf_feature1; /* Standard or configuration ? */ unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ unsigned char mpf_feature3; /* Unused (0) */ unsigned char mpf_feature4; /* Unused (0) */ unsigned char mpf_feature5; /* Unused (0) */ }; struct mp_config_table { char mpc_signature[4]; #define MPC_SIGNATURE "PCMP" unsigned short mpc_length; /* Size of table */ char mpc_spec; /* 0x01 */ char mpc_checksum; char mpc_oem[8]; char mpc_productid[12]; unsigned int mpc_oemptr; /* 0 if not present */ unsigned short mpc_oemsize; /* 0 if not present */ unsigned short mpc_oemcount; unsigned int mpc_lapic; /* APIC address */ unsigned int reserved; }; /* Followed by entries */ #define MP_PROCESSOR 0 #define MP_BUS 1 #define MP_IOAPIC 2 #define MP_INTSRC 3 #define MP_LINTSRC 4 struct mpc_config_processor { unsigned char mpc_type; unsigned char mpc_apicid; /* Local APIC number */ unsigned char mpc_apicver; /* Its versions */ unsigned char mpc_cpuflag; #define CPU_ENABLED 1 /* Processor is available */ #define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ unsigned int mpc_cpufeature; #define CPU_STEPPING_MASK 0x0F #define CPU_MODEL_MASK 0xF0 #define CPU_FAMILY_MASK 0xF00 unsigned int mpc_featureflag; /* CPUID feature value */ unsigned int mpc_reserved[2]; }; struct mpc_config_bus { unsigned char mpc_type; unsigned char mpc_busid; unsigned char mpc_bustype[6]; }; /* List of Bus Type string values, Intel MP Spec. */ #define BUSTYPE_EISA "EISA" #define BUSTYPE_ISA "ISA" #define BUSTYPE_INTERN "INTERN" /* Internal BUS */ #define BUSTYPE_MCA "MCA" #define BUSTYPE_VL "VL" /* Local bus */ #define BUSTYPE_PCI "PCI" #define BUSTYPE_PCMCIA "PCMCIA" #define BUSTYPE_CBUS "CBUS" #define BUSTYPE_CBUSII "CBUSII" #define BUSTYPE_FUTURE "FUTURE" #define BUSTYPE_MBI "MBI" #define BUSTYPE_MBII "MBII" #define BUSTYPE_MPI "MPI" #define BUSTYPE_MPSA "MPSA" #define BUSTYPE_NUBUS "NUBUS" #define BUSTYPE_TC "TC" #define BUSTYPE_VME "VME" #define BUSTYPE_XPRESS "XPRESS" struct mpc_config_ioapic { unsigned char mpc_type; unsigned char mpc_apicid; unsigned char mpc_apicver; unsigned char mpc_flags; #define MPC_APIC_USABLE 0x01 unsigned int mpc_apicaddr; }; struct mpc_config_intsrc { unsigned char mpc_type; unsigned char mpc_irqtype; unsigned short mpc_irqflag; unsigned char mpc_srcbus; unsigned char mpc_srcbusirq; unsigned char mpc_dstapic; unsigned char mpc_dstirq; }; enum mp_irq_source_types { mp_INT = 0, mp_NMI = 1, mp_SMI = 2, mp_ExtINT = 3 }; #define MP_IRQDIR_DEFAULT 0 #define MP_IRQDIR_HIGH 1 #define MP_IRQDIR_LOW 3 struct mpc_config_lintsrc { unsigned char mpc_type; unsigned char mpc_irqtype; unsigned short mpc_irqflag; unsigned char mpc_srcbusid; unsigned char mpc_srcbusirq; unsigned char mpc_destapic; #define MP_APIC_ALL 0xFF unsigned char mpc_destapiclint; }; /* * Default configurations * * 1 2 CPU ISA 82489DX * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining * 3 2 CPU EISA 82489DX * 4 2 CPU MCA 82489DX * 5 2 CPU ISA+PCI * 6 2 CPU EISA+PCI * 7 2 CPU MCA+PCI */ #define MAX_MP_BUSSES 256 /* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ #define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) enum mp_bustype { MP_BUS_ISA = 1, MP_BUS_EISA, MP_BUS_PCI, MP_BUS_MCA }; extern unsigned char mp_bus_id_to_type [MAX_MP_BUSSES]; extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; extern unsigned int boot_cpu_physical_apicid; extern int smp_found_config; extern void find_smp_config (void); extern void get_smp_config (void); extern int nr_ioapics; extern unsigned char apic_version [MAX_APICS]; extern int mp_irq_entries; extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; extern int mpc_default_type; extern unsigned long mp_lapic_addr; extern int pic_mode; #ifdef CONFIG_ACPI extern void mp_register_lapic (u8 id, u8 enabled); extern void mp_register_lapic_address (u64 address); #ifdef CONFIG_X86_IO_APIC extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); extern void mp_config_acpi_legacy_irqs (void); extern int mp_register_gsi (u32 gsi, int triggering, int polarity); #endif /*CONFIG_X86_IO_APIC*/ #endif extern int using_apic_timer; #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) struct physid_mask { unsigned long mask[PHYSID_ARRAY_SIZE]; }; typedef struct physid_mask physid_mask_t; #define physid_set(physid, map) set_bit(physid, (map).mask) #define physid_clear(physid, map) clear_bit(physid, (map).mask) #define physid_isset(physid, map) test_bit(physid, (map).mask) #define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) #define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) #define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) #define physids_clear(map) bitmap_zero((map).mask, MAX_APICS) #define physids_complement(dst, src) bitmap_complement((dst).mask, (src).mask, MAX_APICS) #define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) #define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) #define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) #define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) #define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) #define physids_coerce(map) ((map).mask[0]) #define physids_promote(physids) \ ({ \ physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ __physid_mask.mask[0] = physids; \ __physid_mask; \ }) #define physid_mask_of_physid(physid) \ ({ \ physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ physid_set(physid, __physid_mask); \ __physid_mask; \ }) #define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } #define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } extern physid_mask_t phys_cpu_present_map; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/msgbuf.h000066400000000000000000000015061314037446600245630ustar00rootroot00000000000000#ifndef _X8664_MSGBUF_H #define _X8664_MSGBUF_H /* * The msqid64_ds structure for x86-64 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * * Pad space is left for: * - 2 miscellaneous 64-bit values */ struct msqid64_ds { struct ipc64_perm msg_perm; __kernel_time_t msg_stime; /* last msgsnd time */ __kernel_time_t msg_rtime; /* last msgrcv time */ __kernel_time_t msg_ctime; /* last change time */ unsigned long msg_cbytes; /* current number of bytes on queue */ unsigned long msg_qnum; /* number of messages in queue */ unsigned long msg_qbytes; /* max number of bytes on queue */ __kernel_pid_t msg_lspid; /* pid of last msgsnd */ __kernel_pid_t msg_lrpid; /* last receive pid */ unsigned long __unused4; unsigned long __unused5; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/msi.h000066400000000000000000000004471314037446600240730ustar00rootroot00000000000000/* * Copyright (C) 2003-2004 Intel * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) */ #ifndef ASM_MSI_H #define ASM_MSI_H #include #include #include #define LAST_DEVICE_VECTOR 232 #define MSI_TARGET_CPU_SHIFT 12 #endif /* ASM_MSI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/msr.h000066400000000000000000000267771314037446600241220ustar00rootroot00000000000000#ifndef X86_64_MSR_H #define X86_64_MSR_H 1 #ifndef __ASSEMBLY__ /* * Access to machine-specific registers (available on 586 and better only) * Note: the rd* operations modify the parameters directly (without using * pointer indirection), this allows gcc to optimize better */ #define rdmsr(msr,val1,val2) \ __asm__ __volatile__("rdmsr" \ : "=a" (val1), "=d" (val2) \ : "c" (msr)) #define rdmsrl(msr,val) do { unsigned long a__,b__; \ __asm__ __volatile__("rdmsr" \ : "=a" (a__), "=d" (b__) \ : "c" (msr)); \ val = a__ | (b__<<32); \ } while(0) #define wrmsr(msr,val1,val2) \ __asm__ __volatile__("wrmsr" \ : /* no outputs */ \ : "c" (msr), "a" (val1), "d" (val2)) #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) /* wrmsr with exception handling */ #define wrmsr_safe(msr,a,b) ({ int ret__; \ asm volatile("2: wrmsr ; xorl %0,%0\n" \ "1:\n\t" \ ".section .fixup,\"ax\"\n\t" \ "3: movl %4,%0 ; jmp 1b\n\t" \ ".previous\n\t" \ ".section __ex_table,\"a\"\n" \ " .align 8\n\t" \ " .quad 2b,3b\n\t" \ ".previous" \ : "=a" (ret__) \ : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ ret__; }) #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) #define rdmsr_safe(msr,a,b) \ ({ int ret__; \ asm volatile ("1: rdmsr\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: movl %4,%0\n" \ " jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 8\n" \ " .quad 1b,3b\n" \ ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\ :"c"(msr), "i"(-EIO), "0"(0)); \ ret__; }) #define rdtsc(low,high) \ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) #define rdtscl(low) \ __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") #define rdtscll(val) do { \ unsigned int __a,__d; \ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ } while(0) #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) #define rdpmc(counter,low,high) \ __asm__ __volatile__("rdpmc" \ : "=a" (low), "=d" (high) \ : "c" (counter)) static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { __asm__("cpuid" : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op)); } /* Some CPUID calls want 'count' to be placed in ecx */ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, int *edx) { __asm__("cpuid" : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "0" (op), "c" (count)); } /* * CPUID functions returning a single datum */ static inline unsigned int cpuid_eax(unsigned int op) { unsigned int eax; __asm__("cpuid" : "=a" (eax) : "0" (op) : "bx", "cx", "dx"); return eax; } static inline unsigned int cpuid_ebx(unsigned int op) { unsigned int eax, ebx; __asm__("cpuid" : "=a" (eax), "=b" (ebx) : "0" (op) : "cx", "dx" ); return ebx; } static inline unsigned int cpuid_ecx(unsigned int op) { unsigned int eax, ecx; __asm__("cpuid" : "=a" (eax), "=c" (ecx) : "0" (op) : "bx", "dx" ); return ecx; } static inline unsigned int cpuid_edx(unsigned int op) { unsigned int eax, edx; __asm__("cpuid" : "=a" (eax), "=d" (edx) : "0" (op) : "bx", "cx"); return edx; } #define MSR_IA32_UCODE_WRITE 0x79 #define MSR_IA32_UCODE_REV 0x8b #endif /* AMD/K8 specific MSRs */ #define MSR_EFER 0xc0000080 /* extended feature register */ #define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ #define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ #define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */ #define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ #define MSR_FS_BASE 0xc0000100 /* 64bit GS base */ #define MSR_GS_BASE 0xc0000101 /* 64bit FS base */ #define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */ /* EFER bits: */ #define _EFER_SCE 0 /* SYSCALL/SYSRET */ #define _EFER_LME 8 /* Long mode enable */ #define _EFER_LMA 10 /* Long mode active (read-only) */ #define _EFER_NX 11 /* No execute enable */ #define EFER_SCE (1<<_EFER_SCE) #define EFER_LME (1<<_EFER_LME) #define EFER_LMA (1<<_EFER_LMA) #define EFER_NX (1<<_EFER_NX) /* Intel MSRs. Some also available on other CPUs */ #define MSR_IA32_TSC 0x10 #define MSR_IA32_PLATFORM_ID 0x17 #define MSR_IA32_PERFCTR0 0xc1 #define MSR_IA32_PERFCTR1 0xc2 #define MSR_MTRRcap 0x0fe #define MSR_IA32_BBL_CR_CTL 0x119 #define MSR_IA32_SYSENTER_CS 0x174 #define MSR_IA32_SYSENTER_ESP 0x175 #define MSR_IA32_SYSENTER_EIP 0x176 #define MSR_IA32_MCG_CAP 0x179 #define MSR_IA32_MCG_STATUS 0x17a #define MSR_IA32_MCG_CTL 0x17b #define MSR_IA32_EVNTSEL0 0x186 #define MSR_IA32_EVNTSEL1 0x187 #define MSR_IA32_DEBUGCTLMSR 0x1d9 #define MSR_IA32_LASTBRANCHFROMIP 0x1db #define MSR_IA32_LASTBRANCHTOIP 0x1dc #define MSR_IA32_LASTINTFROMIP 0x1dd #define MSR_IA32_LASTINTTOIP 0x1de #define MSR_MTRRfix64K_00000 0x250 #define MSR_MTRRfix16K_80000 0x258 #define MSR_MTRRfix16K_A0000 0x259 #define MSR_MTRRfix4K_C0000 0x268 #define MSR_MTRRfix4K_C8000 0x269 #define MSR_MTRRfix4K_D0000 0x26a #define MSR_MTRRfix4K_D8000 0x26b #define MSR_MTRRfix4K_E0000 0x26c #define MSR_MTRRfix4K_E8000 0x26d #define MSR_MTRRfix4K_F0000 0x26e #define MSR_MTRRfix4K_F8000 0x26f #define MSR_MTRRdefType 0x2ff #define MSR_IA32_MC0_CTL 0x400 #define MSR_IA32_MC0_STATUS 0x401 #define MSR_IA32_MC0_ADDR 0x402 #define MSR_IA32_MC0_MISC 0x403 #define MSR_P6_PERFCTR0 0xc1 #define MSR_P6_PERFCTR1 0xc2 #define MSR_P6_EVNTSEL0 0x186 #define MSR_P6_EVNTSEL1 0x187 /* K7/K8 MSRs. Not complete. See the architecture manual for a more complete list. */ #define MSR_K7_EVNTSEL0 0xC0010000 #define MSR_K7_PERFCTR0 0xC0010004 #define MSR_K7_EVNTSEL1 0xC0010001 #define MSR_K7_PERFCTR1 0xC0010005 #define MSR_K7_EVNTSEL2 0xC0010002 #define MSR_K7_PERFCTR2 0xC0010006 #define MSR_K7_EVNTSEL3 0xC0010003 #define MSR_K7_PERFCTR3 0xC0010007 #define MSR_K8_TOP_MEM1 0xC001001A #define MSR_K8_TOP_MEM2 0xC001001D #define MSR_K8_SYSCFG 0xC0010010 #define MSR_K8_HWCR 0xC0010015 /* K6 MSRs */ #define MSR_K6_EFER 0xC0000080 #define MSR_K6_STAR 0xC0000081 #define MSR_K6_WHCR 0xC0000082 #define MSR_K6_UWCCR 0xC0000085 #define MSR_K6_PSOR 0xC0000087 #define MSR_K6_PFIR 0xC0000088 /* Centaur-Hauls/IDT defined MSRs. */ #define MSR_IDT_FCR1 0x107 #define MSR_IDT_FCR2 0x108 #define MSR_IDT_FCR3 0x109 #define MSR_IDT_FCR4 0x10a #define MSR_IDT_MCR0 0x110 #define MSR_IDT_MCR1 0x111 #define MSR_IDT_MCR2 0x112 #define MSR_IDT_MCR3 0x113 #define MSR_IDT_MCR4 0x114 #define MSR_IDT_MCR5 0x115 #define MSR_IDT_MCR6 0x116 #define MSR_IDT_MCR7 0x117 #define MSR_IDT_MCR_CTRL 0x120 /* VIA Cyrix defined MSRs*/ #define MSR_VIA_FCR 0x1107 #define MSR_VIA_LONGHAUL 0x110a #define MSR_VIA_RNG 0x110b #define MSR_VIA_BCR2 0x1147 /* Intel defined MSRs. */ #define MSR_IA32_P5_MC_ADDR 0 #define MSR_IA32_P5_MC_TYPE 1 #define MSR_IA32_PLATFORM_ID 0x17 #define MSR_IA32_EBL_CR_POWERON 0x2a #define MSR_IA32_APICBASE 0x1b #define MSR_IA32_APICBASE_BSP (1<<8) #define MSR_IA32_APICBASE_ENABLE (1<<11) #define MSR_IA32_APICBASE_BASE (0xfffff<<12) /* P4/Xeon+ specific */ #define MSR_IA32_MCG_EAX 0x180 #define MSR_IA32_MCG_EBX 0x181 #define MSR_IA32_MCG_ECX 0x182 #define MSR_IA32_MCG_EDX 0x183 #define MSR_IA32_MCG_ESI 0x184 #define MSR_IA32_MCG_EDI 0x185 #define MSR_IA32_MCG_EBP 0x186 #define MSR_IA32_MCG_ESP 0x187 #define MSR_IA32_MCG_EFLAGS 0x188 #define MSR_IA32_MCG_EIP 0x189 #define MSR_IA32_MCG_RESERVED 0x18A #define MSR_P6_EVNTSEL0 0x186 #define MSR_P6_EVNTSEL1 0x187 #define MSR_IA32_PERF_STATUS 0x198 #define MSR_IA32_PERF_CTL 0x199 #define MSR_IA32_MPERF 0xE7 #define MSR_IA32_APERF 0xE8 #define MSR_IA32_THERM_CONTROL 0x19a #define MSR_IA32_THERM_INTERRUPT 0x19b #define MSR_IA32_THERM_STATUS 0x19c #define MSR_IA32_MISC_ENABLE 0x1a0 #define MSR_IA32_DEBUGCTLMSR 0x1d9 #define MSR_IA32_LASTBRANCHFROMIP 0x1db #define MSR_IA32_LASTBRANCHTOIP 0x1dc #define MSR_IA32_LASTINTFROMIP 0x1dd #define MSR_IA32_LASTINTTOIP 0x1de #define MSR_IA32_MC0_CTL 0x400 #define MSR_IA32_MC0_STATUS 0x401 #define MSR_IA32_MC0_ADDR 0x402 #define MSR_IA32_MC0_MISC 0x403 /* Pentium IV performance counter MSRs */ #define MSR_P4_BPU_PERFCTR0 0x300 #define MSR_P4_BPU_PERFCTR1 0x301 #define MSR_P4_BPU_PERFCTR2 0x302 #define MSR_P4_BPU_PERFCTR3 0x303 #define MSR_P4_MS_PERFCTR0 0x304 #define MSR_P4_MS_PERFCTR1 0x305 #define MSR_P4_MS_PERFCTR2 0x306 #define MSR_P4_MS_PERFCTR3 0x307 #define MSR_P4_FLAME_PERFCTR0 0x308 #define MSR_P4_FLAME_PERFCTR1 0x309 #define MSR_P4_FLAME_PERFCTR2 0x30a #define MSR_P4_FLAME_PERFCTR3 0x30b #define MSR_P4_IQ_PERFCTR0 0x30c #define MSR_P4_IQ_PERFCTR1 0x30d #define MSR_P4_IQ_PERFCTR2 0x30e #define MSR_P4_IQ_PERFCTR3 0x30f #define MSR_P4_IQ_PERFCTR4 0x310 #define MSR_P4_IQ_PERFCTR5 0x311 #define MSR_P4_BPU_CCCR0 0x360 #define MSR_P4_BPU_CCCR1 0x361 #define MSR_P4_BPU_CCCR2 0x362 #define MSR_P4_BPU_CCCR3 0x363 #define MSR_P4_MS_CCCR0 0x364 #define MSR_P4_MS_CCCR1 0x365 #define MSR_P4_MS_CCCR2 0x366 #define MSR_P4_MS_CCCR3 0x367 #define MSR_P4_FLAME_CCCR0 0x368 #define MSR_P4_FLAME_CCCR1 0x369 #define MSR_P4_FLAME_CCCR2 0x36a #define MSR_P4_FLAME_CCCR3 0x36b #define MSR_P4_IQ_CCCR0 0x36c #define MSR_P4_IQ_CCCR1 0x36d #define MSR_P4_IQ_CCCR2 0x36e #define MSR_P4_IQ_CCCR3 0x36f #define MSR_P4_IQ_CCCR4 0x370 #define MSR_P4_IQ_CCCR5 0x371 #define MSR_P4_ALF_ESCR0 0x3ca #define MSR_P4_ALF_ESCR1 0x3cb #define MSR_P4_BPU_ESCR0 0x3b2 #define MSR_P4_BPU_ESCR1 0x3b3 #define MSR_P4_BSU_ESCR0 0x3a0 #define MSR_P4_BSU_ESCR1 0x3a1 #define MSR_P4_CRU_ESCR0 0x3b8 #define MSR_P4_CRU_ESCR1 0x3b9 #define MSR_P4_CRU_ESCR2 0x3cc #define MSR_P4_CRU_ESCR3 0x3cd #define MSR_P4_CRU_ESCR4 0x3e0 #define MSR_P4_CRU_ESCR5 0x3e1 #define MSR_P4_DAC_ESCR0 0x3a8 #define MSR_P4_DAC_ESCR1 0x3a9 #define MSR_P4_FIRM_ESCR0 0x3a4 #define MSR_P4_FIRM_ESCR1 0x3a5 #define MSR_P4_FLAME_ESCR0 0x3a6 #define MSR_P4_FLAME_ESCR1 0x3a7 #define MSR_P4_FSB_ESCR0 0x3a2 #define MSR_P4_FSB_ESCR1 0x3a3 #define MSR_P4_IQ_ESCR0 0x3ba #define MSR_P4_IQ_ESCR1 0x3bb #define MSR_P4_IS_ESCR0 0x3b4 #define MSR_P4_IS_ESCR1 0x3b5 #define MSR_P4_ITLB_ESCR0 0x3b6 #define MSR_P4_ITLB_ESCR1 0x3b7 #define MSR_P4_IX_ESCR0 0x3c8 #define MSR_P4_IX_ESCR1 0x3c9 #define MSR_P4_MOB_ESCR0 0x3aa #define MSR_P4_MOB_ESCR1 0x3ab #define MSR_P4_MS_ESCR0 0x3c0 #define MSR_P4_MS_ESCR1 0x3c1 #define MSR_P4_PMH_ESCR0 0x3ac #define MSR_P4_PMH_ESCR1 0x3ad #define MSR_P4_RAT_ESCR0 0x3bc #define MSR_P4_RAT_ESCR1 0x3bd #define MSR_P4_SAAT_ESCR0 0x3ae #define MSR_P4_SAAT_ESCR1 0x3af #define MSR_P4_SSU_ESCR0 0x3be #define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */ #define MSR_P4_TBPU_ESCR0 0x3c2 #define MSR_P4_TBPU_ESCR1 0x3c3 #define MSR_P4_TC_ESCR0 0x3c4 #define MSR_P4_TC_ESCR1 0x3c5 #define MSR_P4_U2L_ESCR0 0x3b0 #define MSR_P4_U2L_ESCR1 0x3b1 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mtrr.h000066400000000000000000000121401314037446600242600ustar00rootroot00000000000000/* Generic MTRR (Memory Type Range Register) ioctls. Copyright (C) 1997-1999 Richard Gooch This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. Richard Gooch may be reached by email at rgooch@atnf.csiro.au The postal address is: Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. */ #ifndef _LINUX_MTRR_H #define _LINUX_MTRR_H #include #include #include #define MTRR_IOCTL_BASE 'M' struct mtrr_sentry { unsigned long base; /* Base address */ unsigned int size; /* Size of region */ unsigned int type; /* Type of region */ }; /* Warning: this structure has a different order from i386 on x86-64. The 32bit emulation code takes care of that. But you need to use this for 64bit, otherwise your X server will break. */ struct mtrr_gentry { unsigned long base; /* Base address */ unsigned int size; /* Size of region */ unsigned int regnum; /* Register number */ unsigned int type; /* Type of region */ }; /* These are the various ioctls */ #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) #define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry) #define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry) #define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry) #define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry) #define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry) #define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry) #define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry) #define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry) /* These are the region types */ #define MTRR_TYPE_UNCACHABLE 0 #define MTRR_TYPE_WRCOMB 1 /*#define MTRR_TYPE_ 2*/ /*#define MTRR_TYPE_ 3*/ #define MTRR_TYPE_WRTHROUGH 4 #define MTRR_TYPE_WRPROT 5 #define MTRR_TYPE_WRBACK 6 #define MTRR_NUM_TYPES 7 #ifdef __KERNEL__ /* The following functions are for use by other drivers */ # ifdef CONFIG_MTRR extern int mtrr_add (unsigned long base, unsigned long size, unsigned int type, char increment); extern int mtrr_add_page (unsigned long base, unsigned long size, unsigned int type, char increment); extern int mtrr_del (int reg, unsigned long base, unsigned long size); extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); # else static __inline__ int mtrr_add (unsigned long base, unsigned long size, unsigned int type, char increment) { return -ENODEV; } static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, unsigned int type, char increment) { return -ENODEV; } static __inline__ int mtrr_del (int reg, unsigned long base, unsigned long size) { return -ENODEV; } static __inline__ int mtrr_del_page (int reg, unsigned long base, unsigned long size) { return -ENODEV; } # endif #endif #ifdef CONFIG_COMPAT struct mtrr_sentry32 { compat_ulong_t base; /* Base address */ compat_uint_t size; /* Size of region */ compat_uint_t type; /* Type of region */ }; struct mtrr_gentry32 { compat_ulong_t regnum; /* Register number */ compat_uint_t base; /* Base address */ compat_uint_t size; /* Size of region */ compat_uint_t type; /* Type of region */ }; #define MTRR_IOCTL_BASE 'M' #define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32) #define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32) #define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32) #define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32) #define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32) #define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32) #define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32) #define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32) #define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32) #define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) #endif /* CONFIG_COMPAT */ #endif /* _LINUX_MTRR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/mutex.h000066400000000000000000000060711314037446600244440ustar00rootroot00000000000000/* * Assembly implementation of the mutex fastpath, based on atomic * decrement/increment. * * started by Ingo Molnar: * * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar */ #ifndef _ASM_MUTEX_H #define _ASM_MUTEX_H /** * __mutex_fastpath_lock - decrement and call function if negative * @v: pointer of type atomic_t * @fail_fn: function to call if the result is negative * * Atomically decrements @v and calls if the result is negative. */ #define __mutex_fastpath_lock(v, fail_fn) \ do { \ unsigned long dummy; \ \ typecheck(atomic_t *, v); \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ LOCK " decl (%%rdi) \n" \ " js 2f \n" \ "1: \n" \ \ LOCK_SECTION_START("") \ "2: call "#fail_fn" \n" \ " jmp 1b \n" \ LOCK_SECTION_END \ \ :"=D" (dummy) \ : "D" (v) \ : "rax", "rsi", "rdx", "rcx", \ "r8", "r9", "r10", "r11", "memory"); \ } while (0) /** * __mutex_fastpath_lock_retval - try to take the lock by moving the count * from 1 to a 0 value * @count: pointer of type atomic_t * @fail_fn: function to call if the original value was not 1 * * Change the count from 1 to a value lower than 1, and call if * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, * or anything the slow path function returns */ static inline int __mutex_fastpath_lock_retval(atomic_t *count, int fastcall (*fail_fn)(atomic_t *)) { if (unlikely(atomic_dec_return(count) < 0)) return fail_fn(count); else return 0; } /** * __mutex_fastpath_unlock - increment and call function if nonpositive * @v: pointer of type atomic_t * @fail_fn: function to call if the result is nonpositive * * Atomically increments @v and calls if the result is nonpositive. */ #define __mutex_fastpath_unlock(v, fail_fn) \ do { \ unsigned long dummy; \ \ typecheck(atomic_t *, v); \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ LOCK " incl (%%rdi) \n" \ " jle 2f \n" \ "1: \n" \ \ LOCK_SECTION_START("") \ "2: call "#fail_fn" \n" \ " jmp 1b \n" \ LOCK_SECTION_END \ \ :"=D" (dummy) \ : "D" (v) \ : "rax", "rsi", "rdx", "rcx", \ "r8", "r9", "r10", "r11", "memory"); \ } while (0) #define __mutex_slowpath_needs_to_unlock() 1 /** * __mutex_fastpath_trylock - try to acquire the mutex, without waiting * * @count: pointer of type atomic_t * @fail_fn: fallback function * * Change the count from 1 to 0 and return 1 (success), or return 0 (failure) * if it wasn't 1 originally. [the fallback function is never used on * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.] */ static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { if (likely(atomic_cmpxchg(count, 1, 0) == 1)) return 1; else return 0; } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/namei.h000066400000000000000000000003421314037446600243660ustar00rootroot00000000000000#ifndef __X8664_NAMEI_H #define __X8664_NAMEI_H /* This dummy routine maybe changed to something useful * for /usr/gnemul/ emulation stuff. * Look at asm-sparc/namei.h for details. */ #define __emul_prefix() NULL #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/nmi.h000066400000000000000000000021741314037446600240650ustar00rootroot00000000000000/* * linux/include/asm-i386/nmi.h */ #ifndef ASM_NMI_H #define ASM_NMI_H #include struct pt_regs; typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); /** * set_nmi_callback * * Set a handler for an NMI. Only one handler may be * set. Return 1 if the NMI was handled. */ void set_nmi_callback(nmi_callback_t callback); /** * unset_nmi_callback * * Remove the handler previously set. */ void unset_nmi_callback(void); #ifdef CONFIG_PM /** Replace the PM callback routine for NMI. */ struct pm_dev * set_nmi_pm_callback(pm_callback callback); /** Unset the PM callback routine back to the default. */ void unset_nmi_pm_callback(struct pm_dev * dev); #else static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback) { return 0; } static inline void unset_nmi_pm_callback(struct pm_dev * dev) { } #endif /* CONFIG_PM */ extern void default_do_nmi(struct pt_regs *); extern void die_nmi(char *str, struct pt_regs *regs); #define get_nmi_reason() inb(0x61) extern int panic_on_timeout; extern int unknown_nmi_panic; extern int check_nmi_watchdog(void); #endif /* ASM_NMI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/node.h000066400000000000000000000000331314037446600242170ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/numa.h000066400000000000000000000015531314037446600242420ustar00rootroot00000000000000#ifndef _ASM_X8664_NUMA_H #define _ASM_X8664_NUMA_H 1 #include #include struct bootnode { u64 start,end; }; extern int compute_hash_shift(struct bootnode *nodes, int numnodes); extern int pxm_to_node(int nid); #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) extern void numa_add_cpu(int cpu); extern void numa_init_array(void); extern int numa_off; extern void numa_set_node(int cpu, int node); extern void srat_reserve_add_area(int nodeid); extern unsigned long hotadd_allow; extern unsigned char apicid_to_node[256]; #ifdef CONFIG_NUMA extern void __init init_cpu_to_node(void); static inline void clear_node_cpumask(int cpu) { clear_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]); } #else #define init_cpu_to_node() do {} while (0) #define clear_node_cpumask(cpu) do {} while (0) #endif #define NUMA_NO_NODE 0xff #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/numnodes.h000066400000000000000000000002561314037446600251310ustar00rootroot00000000000000#ifndef _ASM_X8664_NUMNODES_H #define _ASM_X8664_NUMNODES_H 1 #include #ifdef CONFIG_NUMA #define NODES_SHIFT 6 #else #define NODES_SHIFT 0 #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/page.h000066400000000000000000000107141314037446600242150ustar00rootroot00000000000000#ifndef _X86_64_PAGE_H #define _X86_64_PAGE_H #include /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 #ifdef __ASSEMBLY__ #define PAGE_SIZE (0x1 << PAGE_SHIFT) #else #define PAGE_SIZE (1UL << PAGE_SHIFT) #endif #define PAGE_MASK (~(PAGE_SIZE-1)) #define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK) #define THREAD_ORDER 1 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) #define CURRENT_MASK (~(THREAD_SIZE-1)) #define EXCEPTION_STACK_ORDER 0 #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) #define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) #define IRQSTACK_ORDER 2 #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER) #define STACKFAULT_STACK 1 #define DOUBLEFAULT_STACK 2 #define NMI_STACK 3 #define DEBUG_STACK 4 #define MCE_STACK 5 #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) #define HPAGE_SHIFT PMD_SHIFT #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #ifdef __KERNEL__ #ifndef __ASSEMBLY__ extern unsigned long end_pfn; void clear_page(void *); void copy_page(void *, void *); #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE /* * These are used to make use of C type-checking.. */ typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pud; } pud_t; typedef struct { unsigned long pgd; } pgd_t; #define PTE_MASK PHYSICAL_PAGE_MASK typedef struct { unsigned long pgprot; } pgprot_t; #define pte_val(x) ((x).pte) #define pmd_val(x) ((x).pmd) #define pud_val(x) ((x).pud) #define pgd_val(x) ((x).pgd) #define pgprot_val(x) ((x).pgprot) #define __pte(x) ((pte_t) { (x) } ) #define __pmd(x) ((pmd_t) { (x) } ) #define __pud(x) ((pud_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) #define __START_KERNEL_map 0xffffffff80000000UL #define __PAGE_OFFSET 0xffff810000000000UL #else #define __PHYSICAL_START CONFIG_PHYSICAL_START #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) #define __START_KERNEL_map 0xffffffff80000000 #define __PAGE_OFFSET 0xffff810000000000 #endif /* !__ASSEMBLY__ */ /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) /* See Documentation/x86_64/mm.txt for a description of the memory map. */ #define __PHYSICAL_MASK_SHIFT 46 #define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1) #define __VIRTUAL_MASK_SHIFT 48 #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) #define KERNEL_TEXT_SIZE (40UL*1024*1024) #define KERNEL_TEXT_START 0xffffffff80000000UL #ifndef __ASSEMBLY__ #include #endif /* __ASSEMBLY__ */ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) /* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol. Otherwise you risk miscompilation. */ #define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET) /* __pa_symbol should be used for C visible symbols. This seems to be the official gcc blessed way to do such arithmetic. */ #define __pa_symbol(x) \ ({unsigned long v; \ asm("" : "=r" (v) : "0" (x)); \ __pa(v); }) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define __boot_va(x) __va(x) #define __boot_pa(x) __pa(x) #ifdef CONFIG_FLATMEM #define pfn_to_page(pfn) (mem_map + (pfn)) #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < end_pfn) #endif #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #define VM_DATA_DEFAULT_FLAGS \ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define __HAVE_ARCH_GATE_AREA 1 #endif /* __KERNEL__ */ #include #endif /* _X86_64_PAGE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/param.h000066400000000000000000000007471314037446600244060ustar00rootroot00000000000000#ifndef _ASMx86_64_PARAM_H #define _ASMx86_64_PARAM_H #ifdef __KERNEL__ # include # define HZ CONFIG_HZ /* Internal kernel timer frequency */ # define USER_HZ 100 /* .. some user interfaces are in "ticks */ #define CLOCKS_PER_SEC (USER_HZ) /* like times() */ #endif #ifndef HZ #define HZ 100 #endif #define EXEC_PAGESIZE 4096 #ifndef NOGROUP #define NOGROUP (-1) #endif #define MAXHOSTNAMELEN 64 /* max length of hostname */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/parport.h000066400000000000000000000007411314037446600247670ustar00rootroot00000000000000/* * parport.h: ia32-specific parport initialisation * * Copyright (C) 1999, 2000 Tim Waugh * * This file should only be included by drivers/parport/parport_pc.c. */ #ifndef _ASM_X8664_PARPORT_H #define _ASM_X8664_PARPORT_H 1 static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) { return parport_pc_find_isa_ports (autoirq, autodma); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/pci-direct.h000066400000000000000000000023631314037446600253250ustar00rootroot00000000000000#ifndef ASM_PCI_DIRECT_H #define ASM_PCI_DIRECT_H 1 #include #include /* Direct PCI access. This is used for PCI accesses in early boot before the PCI subsystem works. */ #define PDprintk(x...) static inline u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset) { u32 v; outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); v = inl(0xcfc); if (v != 0xffffffff) PDprintk("%x reading 4 from %x: %x\n", slot, offset, v); return v; } static inline u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset) { u8 v; outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); v = inb(0xcfc + (offset&3)); PDprintk("%x reading 1 from %x: %x\n", slot, offset, v); return v; } static inline u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset) { u16 v; outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); v = inw(0xcfc + (offset&2)); PDprintk("%x reading 2 from %x: %x\n", slot, offset, v); return v; } static inline void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val) { PDprintk("%x writing to %x: %x\n", slot, offset, val); outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); outl(val, 0xcfc); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/pci.h000066400000000000000000000102401314037446600240460ustar00rootroot00000000000000#ifndef __x8664_PCI_H #define __x8664_PCI_H #include #include #ifdef __KERNEL__ #include /* for struct page */ /* Can be used to override the logic in pci_scan_bus for skipping already-configured bus numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the loader */ #ifdef CONFIG_PCI extern unsigned int pcibios_assign_all_busses(void); #else #define pcibios_assign_all_busses() 0 #endif #define pcibios_scan_all_fns(a, b) 0 extern unsigned long pci_mem_start; #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM (pci_mem_start) #define PCIBIOS_MIN_CARDBUS_IO 0x4000 void pcibios_config_init(void); struct pci_bus * pcibios_scan_root(int bus); extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value); extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value); void pcibios_set_master(struct pci_dev *dev); void pcibios_penalize_isa_irq(int irq, int active); struct irq_routing_table *pcibios_get_irq_routing_table(void); int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); #include #include #include #include #include #include /* for have_iommu */ extern void pci_iommu_alloc(void); extern int iommu_setup(char *opt); /* The PCI address space does equal the physical memory * address space. The networking and block device layers use * this boolean for bounce buffer decisions * * On AMD64 it mostly equals, but we set it to zero if a hardware * IOMMU (gart) of sotware IOMMU (swiotlb) is available. */ #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) #if defined(CONFIG_GART_IOMMU) || defined(CONFIG_CALGARY_IOMMU) /* * x86-64 always supports DAC, but sometimes it is useful to force * devices through the IOMMU to get automatic sg list merging. * Optional right now. */ extern int iommu_sac_force; #define pci_dac_dma_supported(pci_dev, mask) (!iommu_sac_force) #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ dma_addr_t ADDR_NAME; #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ __u32 LEN_NAME; #define pci_unmap_addr(PTR, ADDR_NAME) \ ((PTR)->ADDR_NAME) #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ (((PTR)->ADDR_NAME) = (VAL)) #define pci_unmap_len(PTR, LEN_NAME) \ ((PTR)->LEN_NAME) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ (((PTR)->LEN_NAME) = (VAL)) #else /* No IOMMU */ #define pci_dac_dma_supported(pci_dev, mask) 1 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) #define pci_unmap_addr(PTR, ADDR_NAME) (0) #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) #define pci_unmap_len(PTR, LEN_NAME) (0) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) #endif #include static inline dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction) { return ((dma64_addr_t) page_to_phys(page) + (dma64_addr_t) offset); } static inline struct page * pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr) { return virt_to_page(__va(dma_addr)); } static inline unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr) { return (dma_addr & ~PAGE_MASK); } static inline void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) { } static inline void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) { flush_write_buffers(); } #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, unsigned long *strategy_parameter) { *strat = PCI_DMA_BURST_INFINITY; *strategy_parameter = ~0UL; } #endif #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); static inline void pcibios_add_platform_entries(struct pci_dev *dev) { } #endif /* __KERNEL__ */ /* generic pci stuff */ #ifdef CONFIG_PCI #include #endif #endif /* __x8664_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/pda.h000066400000000000000000000057221314037446600240500ustar00rootroot00000000000000#ifndef X86_64_PDA_H #define X86_64_PDA_H #ifndef __ASSEMBLY__ #include #include #include #include /* Per processor datastructure. %gs points to it while the kernel runs */ struct x8664_pda { struct task_struct *pcurrent; /* Current process */ unsigned long data_offset; /* Per cpu data offset from linker address */ unsigned long kernelstack; /* top of kernel stack for current */ unsigned long oldrsp; /* user rsp for system call */ #if DEBUG_STKSZ > EXCEPTION_STKSZ unsigned long debugstack; /* #DB/#BP stack. */ #endif int irqcount; /* Irq nesting counter. Starts with -1 */ int cpunumber; /* Logical CPU number */ char *irqstackptr; /* top of irqstack */ int nodenumber; /* number of current node */ unsigned int __softirq_pending; unsigned int __nmi_count; /* number of NMI on this CPUs */ struct mm_struct *active_mm; int mmu_state; unsigned apic_timer_irqs; } ____cacheline_aligned_in_smp; extern struct x8664_pda *_cpu_pda[]; extern struct x8664_pda boot_cpu_pda[]; #define cpu_pda(i) (_cpu_pda[i]) /* * There is no fast way to get the base address of the PDA, all the accesses * have to mention %fs/%gs. So it needs to be done this Torvaldian way. */ #define sizeof_field(type,field) (sizeof(((type *)0)->field)) #define typeof_field(type,field) typeof(((type *)0)->field) extern void __bad_pda_field(void); #define pda_offset(field) offsetof(struct x8664_pda, field) #define pda_to_op(op,field,val) do { \ typedef typeof_field(struct x8664_pda, field) T__; \ switch (sizeof_field(struct x8664_pda, field)) { \ case 2: \ asm volatile(op "w %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ case 4: \ asm volatile(op "l %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ case 8: \ asm volatile(op "q %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ default: __bad_pda_field(); \ } \ } while (0) /* * AK: PDA read accesses should be neither volatile nor have an memory clobber. * Unfortunately removing them causes all hell to break lose currently. */ #define pda_from_op(op,field) ({ \ typeof_field(struct x8664_pda, field) ret__; \ switch (sizeof_field(struct x8664_pda, field)) { \ case 2: \ asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ case 4: \ asm volatile(op "l %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ case 8: \ asm volatile(op "q %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ default: __bad_pda_field(); \ } \ ret__; }) #define read_pda(field) pda_from_op("mov",field) #define write_pda(field,val) pda_to_op("mov",field,val) #define add_pda(field,val) pda_to_op("add",field,val) #define sub_pda(field,val) pda_to_op("sub",field,val) #define or_pda(field,val) pda_to_op("or",field,val) #endif #define PDA_STACKOFFSET (5*8) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/percpu.h000066400000000000000000000031571314037446600246020ustar00rootroot00000000000000#ifndef _ASM_X8664_PERCPU_H_ #define _ASM_X8664_PERCPU_H_ #include /* Same as asm-generic/percpu.h, except that we store the per cpu offset in the PDA. Longer term the PDA and every per cpu variable should be just put into a single section and referenced directly from %gs */ #ifdef CONFIG_SMP #include #define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) #define __my_cpu_offset() read_pda(data_offset) /* Separate out the type, so (int[3], foo) works. */ #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) /* A macro to avoid #include hell... */ #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ for (__i = 0; __i < NR_CPUS; __i++) \ if (cpu_possible(__i)) \ memcpy((pcpudst)+__per_cpu_offset(__i), \ (src), (size)); \ } while (0) extern void setup_per_cpu_areas(void); #else /* ! SMP */ #define DEFINE_PER_CPU(type, name) \ __typeof__(type) per_cpu__##name #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #endif /* SMP */ #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) #endif /* _ASM_X8664_PERCPU_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/pgalloc.h000066400000000000000000000052511314037446600247220ustar00rootroot00000000000000#ifndef _X86_64_PGALLOC_H #define _X86_64_PGALLOC_H #include #include #include #include #define pmd_populate_kernel(mm, pmd, pte) \ set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))) #define pud_populate(mm, pud, pmd) \ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))) #define pgd_populate(mm, pgd, pud) \ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud))) static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) { set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); } static inline pmd_t *get_pmd(void) { return (pmd_t *)get_zeroed_page(GFP_KERNEL); } static inline void pmd_free(pmd_t *pmd) { BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); free_page((unsigned long)pmd); } static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr) { return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); } static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); } static inline void pud_free (pud_t *pud) { BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); free_page((unsigned long)pud); } static inline pgd_t *pgd_alloc(struct mm_struct *mm) { unsigned boundary; pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); if (!pgd) return NULL; /* * Copy kernel pointers in from init. * Could keep a freelist or slab cache of those because the kernel * part never changes. */ boundary = pgd_index(__PAGE_OFFSET); memset(pgd, 0, boundary * sizeof(pgd_t)); memcpy(pgd + boundary, init_level4_pgt + boundary, (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); return pgd; } static inline void pgd_free(pgd_t *pgd) { BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); free_page((unsigned long)pgd); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); } static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) { void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); if (!p) return NULL; return virt_to_page(p); } /* Should really implement gc for free page table pages. This could be done with a reference count in struct page. */ static inline void pte_free_kernel(pte_t *pte) { BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); free_page((unsigned long)pte); } static inline void pte_free(struct page *pte) { __free_page(pte); } #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) #define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) #define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) #endif /* _X86_64_PGALLOC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/pgtable.h000066400000000000000000000365171314037446600247300ustar00rootroot00000000000000#ifndef _X86_64_PGTABLE_H #define _X86_64_PGTABLE_H /* * This file contains the functions and defines necessary to modify and use * the x86-64 page table tree. */ #include #include #include #include #include extern pud_t level3_kernel_pgt[512]; extern pud_t level3_physmem_pgt[512]; extern pud_t level3_ident_pgt[512]; extern pmd_t level2_kernel_pgt[512]; extern pgd_t init_level4_pgt[]; extern pgd_t boot_level4_pgt[]; extern unsigned long __supported_pte_mask; #define swapper_pg_dir init_level4_pgt extern int nonx_setup(char *str); extern void paging_init(void); extern void clear_kernel_mapping(unsigned long addr, unsigned long size); extern unsigned long pgkern_mask; /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) /* * PGDIR_SHIFT determines what a top-level page table entry can map */ #define PGDIR_SHIFT 39 #define PTRS_PER_PGD 512 /* * 3rd level page */ #define PUD_SHIFT 30 #define PTRS_PER_PUD 512 /* * PMD_SHIFT determines the size of the area a middle-level * page table can map */ #define PMD_SHIFT 21 #define PTRS_PER_PMD 512 /* * entries per page directory level */ #define PTRS_PER_PTE 512 #define pte_ERROR(e) \ printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) #define pud_ERROR(e) \ printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e)) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) #define pgd_none(x) (!pgd_val(x)) #define pud_none(x) (!pud_val(x)) static inline void set_pte(pte_t *dst, pte_t val) { pte_val(*dst) = pte_val(val); } #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) static inline void set_pmd(pmd_t *dst, pmd_t val) { pmd_val(*dst) = pmd_val(val); } static inline void set_pud(pud_t *dst, pud_t val) { pud_val(*dst) = pud_val(val); } static inline void pud_clear (pud_t *pud) { set_pud(pud, __pud(0)); } static inline void set_pgd(pgd_t *dst, pgd_t val) { pgd_val(*dst) = pgd_val(val); } static inline void pgd_clear (pgd_t * pgd) { set_pgd(pgd, __pgd(0)); } #define pud_page(pud) \ ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK)) #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) struct mm_struct; static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) { pte_t pte; if (full) { pte = *ptep; *ptep = __pte(0); } else { pte = ptep_get_and_clear(mm, addr, ptep); } return pte; } #define pte_same(a, b) ((a).pte == (b).pte) #define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) #define FIRST_USER_ADDRESS 0 #ifndef __ASSEMBLY__ #define MAXMEM 0x3fffffffffffUL #define VMALLOC_START 0xffffc20000000000UL #define VMALLOC_END 0xffffe1ffffffffffUL #define MODULES_VADDR 0xffffffff88000000UL #define MODULES_END 0xfffffffffff00000UL #define MODULES_LEN (MODULES_END - MODULES_VADDR) #define _PAGE_BIT_PRESENT 0 #define _PAGE_BIT_RW 1 #define _PAGE_BIT_USER 2 #define _PAGE_BIT_PWT 3 #define _PAGE_BIT_PCD 4 #define _PAGE_BIT_ACCESSED 5 #define _PAGE_BIT_DIRTY 6 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ #define _PAGE_PRESENT 0x001 #define _PAGE_RW 0x002 #define _PAGE_USER 0x004 #define _PAGE_PWT 0x008 #define _PAGE_PCD 0x010 #define _PAGE_ACCESSED 0x020 #define _PAGE_DIRTY 0x040 #define _PAGE_PSE 0x080 /* 2MB page */ #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ #define _PAGE_GLOBAL 0x100 /* Global TLB entry */ #define _PAGE_PROTNONE 0x080 /* If not present */ #define _PAGE_NX (1UL<<_PAGE_BIT_NX) #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_COPY PAGE_COPY_NOEXEC #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define __PAGE_KERNEL \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) #define __PAGE_KERNEL_EXEC \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) #define __PAGE_KERNEL_NOCACHE \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX) #define __PAGE_KERNEL_RO \ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) #define __PAGE_KERNEL_VSYSCALL \ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define __PAGE_KERNEL_VSYSCALL_NOCACHE \ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD) #define __PAGE_KERNEL_LARGE \ (__PAGE_KERNEL | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_EXEC \ (__PAGE_KERNEL_EXEC | _PAGE_PSE) #define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL) #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) #define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL) #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL) #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE) #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE) /* xwr */ #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_COPY #define __P011 PAGE_COPY #define __P100 PAGE_READONLY_EXEC #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_COPY_EXEC #define __P111 PAGE_COPY_EXEC #define __S000 PAGE_NONE #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED #define __S100 PAGE_READONLY_EXEC #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC static inline unsigned long pgd_bad(pgd_t pgd) { unsigned long val = pgd_val(pgd); val &= ~PTE_MASK; val &= ~(_PAGE_USER | _PAGE_DIRTY); return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED); } static inline unsigned long pud_bad(pud_t pud) { unsigned long val = pud_val(pud); val &= ~PTE_MASK; val &= ~(_PAGE_USER | _PAGE_DIRTY); return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED); } #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { pte_t pte; pte_val(pte) = (page_nr << PAGE_SHIFT); pte_val(pte) |= pgprot_val(pgprot); pte_val(pte) &= __supported_pte_mask; return pte; } /* * The following only work if pte_present() is true. * Undefined behaviour if not.. */ #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; } static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } static inline int pte_huge(pte_t pte) { return (pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; } static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; } static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; } struct vm_area_struct; static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_dirty(*ptep)) return 0; return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); } static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_young(*ptep)) return 0; return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); } static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { clear_bit(_PAGE_BIT_RW, ptep); } /* * Macro to mark a page protection value as "uncacheable". */ #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) static inline int pmd_large(pmd_t pte) { return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; } /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ /* * Level 4 access. */ #define pgd_page(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK)) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) #define pgd_offset_k(address) (init_level4_pgt + pgd_index(address)) #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) #define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE }) /* PUD - Level3 access */ /* to find an entry in a page-table-directory. */ #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) #define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address)) #define pud_offset_k(pgd, addr) pud_offset(pgd, addr) #define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address) { return pud + pud_index(address); } /* PMD - Level 2 access */ #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) #define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \ pmd_index(address)) #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) #define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE ) #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) #define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) #define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE }) #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT /* PTE - Level 1 access. */ /* page, protection -> pte */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) #define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE) /* physical address -> PTE */ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) { pte_t pte; pte_val(pte) = physpage | pgprot_val(pgprot); return pte; } /* Change flags of a PTE */ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) &= _PAGE_CHG_MASK; pte_val(pte) |= pgprot_val(newprot); pte_val(pte) &= __supported_pte_mask; return pte; } #define pte_index(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \ pte_index(address)) /* x86-64 always has all page tables mapped. */ #define pte_offset_map(dir,address) pte_offset_kernel(dir,address) #define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address) #define pte_unmap(pte) /* NOP */ #define pte_unmap_nested(pte) /* NOP */ #define update_mmu_cache(vma,address,pte) do { } while (0) /* We only update the dirty/accessed state if we set * the dirty bit by hand in the kernel, since the hardware * will do the accessed bit for us, and we don't want to * race with other CPU's that might be updating the dirty * bit at the same time. */ #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ do { \ if (__dirty) { \ set_pte(__ptep, __entry); \ flush_tlb_page(__vma, __address); \ } \ } while (0) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x3f) #define __swp_offset(x) ((x).val >> 8) #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #endif /* !__ASSEMBLY__ */ extern int kern_addr_valid(unsigned long addr); #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) #define MK_IOSPACE_PFN(space, pfn) (pfn) #define GET_IOSPACE(pfn) 0 #define GET_PFN(pfn) (pfn) #define HAVE_ARCH_UNMAPPED_AREA #define pgtable_cache_init() do { } while (0) #define check_pgt_cache() do { } while (0) #define PAGE_AGP PAGE_KERNEL_NOCACHE #define HAVE_PAGE_AGP 1 /* fs/proc/kcore.c */ #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) #define kc_offset_to_vaddr(o) \ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME #include #endif /* _X86_64_PGTABLE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/poll.h000066400000000000000000000010021314037446600242350ustar00rootroot00000000000000#ifndef __x86_64_POLL_H #define __x86_64_POLL_H /* These are specified by iBCS2 */ #define POLLIN 0x0001 #define POLLPRI 0x0002 #define POLLOUT 0x0004 #define POLLERR 0x0008 #define POLLHUP 0x0010 #define POLLNVAL 0x0020 /* The rest seem to be more-or-less nonstandard. Check them! */ #define POLLRDNORM 0x0040 #define POLLRDBAND 0x0080 #define POLLWRNORM 0x0100 #define POLLWRBAND 0x0200 #define POLLMSG 0x0400 #define POLLREMOVE 0x1000 struct pollfd { int fd; short events; short revents; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/posix_types.h000066400000000000000000000063711314037446600256730ustar00rootroot00000000000000#ifndef _ASM_X86_64_POSIX_TYPES_H #define _ASM_X86_64_POSIX_TYPES_H /* * This file is generally used by user-level software, so you need to * be a little careful about namespace pollution etc. Also, we cannot * assume GCC is being used. */ typedef unsigned long __kernel_ino_t; typedef unsigned int __kernel_mode_t; typedef unsigned long __kernel_nlink_t; typedef long __kernel_off_t; typedef int __kernel_pid_t; typedef int __kernel_ipc_pid_t; typedef unsigned int __kernel_uid_t; typedef unsigned int __kernel_gid_t; typedef unsigned long __kernel_size_t; typedef long __kernel_ssize_t; typedef long __kernel_ptrdiff_t; typedef long __kernel_time_t; typedef long __kernel_suseconds_t; typedef long __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef int __kernel_daddr_t; typedef char * __kernel_caddr_t; typedef unsigned short __kernel_uid16_t; typedef unsigned short __kernel_gid16_t; #ifdef __GNUC__ typedef long long __kernel_loff_t; #endif typedef struct { int val[2]; } __kernel_fsid_t; typedef unsigned short __kernel_old_uid_t; typedef unsigned short __kernel_old_gid_t; typedef __kernel_uid_t __kernel_uid32_t; typedef __kernel_gid_t __kernel_gid32_t; typedef unsigned long __kernel_old_dev_t; #ifdef __KERNEL__ #undef __FD_SET static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) { unsigned long _tmp = fd / __NFDBITS; unsigned long _rem = fd % __NFDBITS; fdsetp->fds_bits[_tmp] |= (1UL<<_rem); } #undef __FD_CLR static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) { unsigned long _tmp = fd / __NFDBITS; unsigned long _rem = fd % __NFDBITS; fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem); } #undef __FD_ISSET static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) { unsigned long _tmp = fd / __NFDBITS; unsigned long _rem = fd % __NFDBITS; return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0; } /* * This will unroll the loop for the normal constant cases (8 or 32 longs, * for 256 and 1024-bit fd_sets respectively) */ #undef __FD_ZERO static __inline__ void __FD_ZERO(__kernel_fd_set *p) { unsigned long *tmp = p->fds_bits; int i; if (__builtin_constant_p(__FDSET_LONGS)) { switch (__FDSET_LONGS) { case 32: tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0; tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0; tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0; tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0; return; case 16: tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; return; case 8: tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; return; case 4: tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; return; } } i = __FDSET_LONGS; while (i) { i--; *tmp = 0; tmp++; } } #endif /* defined(__KERNEL__) */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/prctl.h000066400000000000000000000002461314037446600244240ustar00rootroot00000000000000#ifndef X86_64_PRCTL_H #define X86_64_PRCTL_H 1 #define ARCH_SET_GS 0x1001 #define ARCH_SET_FS 0x1002 #define ARCH_GET_FS 0x1003 #define ARCH_GET_GS 0x1004 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/processor.h000066400000000000000000000333051314037446600253210ustar00rootroot00000000000000/* * include/asm-x86_64/processor.h * * Copyright (C) 1994 Linus Torvalds */ #ifndef __ASM_X86_64_PROCESSOR_H #define __ASM_X86_64_PROCESSOR_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define TF_MASK 0x00000100 #define IF_MASK 0x00000200 #define IOPL_MASK 0x00003000 #define NT_MASK 0x00004000 #define VM_MASK 0x00020000 #define AC_MASK 0x00040000 #define VIF_MASK 0x00080000 /* virtual interrupt flag */ #define VIP_MASK 0x00100000 /* virtual interrupt pending */ #define ID_MASK 0x00200000 #define desc_empty(desc) \ (!((desc)->a | (desc)->b)) #define desc_equal(desc1, desc2) \ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) /* * Default implementation of macro that returns current * instruction pointer ("program counter"). */ #define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; }) /* * CPU type and hardware bug flags. Kept separately for each CPU. */ struct cpuinfo_x86 { __u8 x86; /* CPU family */ __u8 x86_vendor; /* CPU vendor */ __u8 x86_model; __u8 x86_mask; int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ __u32 x86_capability[NCAPINTS]; char x86_vendor_id[16]; char x86_model_id[64]; int x86_cache_size; /* in KB */ int x86_clflush_size; int x86_cache_alignment; int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/ __u8 x86_virt_bits, x86_phys_bits; __u8 x86_max_cores; /* cpuid returned max cores value */ __u32 x86_power; __u32 extended_cpuid_level; /* Max extended CPUID function supported */ unsigned long loops_per_jiffy; #ifdef CONFIG_SMP cpumask_t llc_shared_map; /* cpus sharing the last level cache */ #endif __u8 apicid; __u8 booted_cores; /* number of cores as seen by OS */ __u8 x86_hyper_vendor; } ____cacheline_aligned; #define X86_VENDOR_INTEL 0 #define X86_VENDOR_CYRIX 1 #define X86_VENDOR_AMD 2 #define X86_VENDOR_UMC 3 #define X86_VENDOR_NEXGEN 4 #define X86_VENDOR_CENTAUR 5 #define X86_VENDOR_RISE 6 #define X86_VENDOR_TRANSMETA 7 #define X86_VENDOR_NUM 8 #define X86_VENDOR_UNKNOWN 0xff #define X86_HYPER_VENDOR_NONE 0 #define X86_HYPER_VENDOR_VMWARE 1 #ifdef CONFIG_SMP extern struct cpuinfo_x86 cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] #else #define cpu_data (&boot_cpu_data) #define current_cpu_data boot_cpu_data #endif extern char ignore_irq13; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); /* * EFLAGS bits */ #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ #define X86_EFLAGS_NT 0x00004000 /* Nested Task */ #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ /* * Intel CPU features in CR4 */ #define X86_CR4_VME 0x0001 /* enable vm86 extensions */ #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */ #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */ #define X86_CR4_DE 0x0008 /* enable debugging extensions */ #define X86_CR4_PSE 0x0010 /* enable page size extensions */ #define X86_CR4_PAE 0x0020 /* enable physical address extensions */ #define X86_CR4_MCE 0x0040 /* Machine check enable */ #define X86_CR4_PGE 0x0080 /* enable global pages */ #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */ #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ /* * Save the cr4 feature set we're using (ie * Pentium 4MB enable and PPro Global page * enable), so that any CPU's that boot up * after us can get the correct flags. */ extern unsigned long mmu_cr4_features; static inline void set_in_cr4 (unsigned long mask) { mmu_cr4_features |= mask; __asm__("movq %%cr4,%%rax\n\t" "orq %0,%%rax\n\t" "movq %%rax,%%cr4\n" : : "irg" (mask) :"ax"); } static inline void clear_in_cr4 (unsigned long mask) { mmu_cr4_features &= ~mask; __asm__("movq %%cr4,%%rax\n\t" "andq %0,%%rax\n\t" "movq %%rax,%%cr4\n" : : "irg" (~mask) :"ax"); } /* * User space process size. 47bits minus one guard page. */ #define TASK_SIZE64 (0x800000000000UL - 4096) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. * * /proc/pid/unmap_base is only supported for 32bit processes without * 3GB personality for now. */ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000) #define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64) #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64) /* FIXME: Used in initializer, do we need to check for 64bit here? */ #define __TASK_UNMAPPED_BASE (PAGE_ALIGN(0xffffe000 / 3)) #define TASK_UNMAPPED_32 ((current->personality & ADDR_LIMIT_3GB) ? \ PAGE_ALIGN(0xc0000000 / 3) : PAGE_ALIGN(current->map_base)) #define TASK_UNMAPPED_64 PAGE_ALIGN(TASK_SIZE64/3) #define TASK_UNMAPPED_BASE \ (test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64) /* * Size of io_bitmap. */ #define IO_BITMAP_BITS 65536 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) #define INVALID_IO_BITMAP_OFFSET 0x8000 struct i387_fxsave_struct { u16 cwd; u16 swd; u16 twd; u16 fop; u64 rip; u64 rdp; u32 mxcsr; u32 mxcsr_mask; u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */ u32 padding[24]; } __attribute__ ((aligned (16))); union i387_union { struct i387_fxsave_struct fxsave; }; struct tss_struct { u32 reserved1; u64 rsp0; u64 rsp1; u64 rsp2; u64 reserved2; u64 ist[7]; u32 reserved3; u32 reserved4; u16 reserved5; u16 io_bitmap_base; /* * The extra 1 is there because the CPU will access an * additional byte beyond the end of the IO permission * bitmap. The extra byte must be all 1 bits, and must * be within the limit. Thus we have: * * 128 bytes, the bitmap itself, for ports 0..0x3ff * 8 bytes, for an extra "long" of ~0UL */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; } __attribute__((packed)) ____cacheline_aligned; extern struct cpuinfo_x86 boot_cpu_data; DECLARE_PER_CPU(struct tss_struct,init_tss); /* Save the original ist values for checking stack pointers during debugging */ struct orig_ist { unsigned long ist[7]; }; DECLARE_PER_CPU(struct orig_ist, orig_ist); #ifdef CONFIG_X86_VSMP #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) #define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) #else #define ARCH_MIN_TASKALIGN 16 #define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif struct thread_struct { unsigned long rsp0; unsigned long rsp; unsigned long userrsp; /* Copy from PDA */ unsigned long fs; unsigned long gs; unsigned short es, ds, fsindex, gsindex; /* Hardware debugging registers */ unsigned long debugreg0; unsigned long debugreg1; unsigned long debugreg2; unsigned long debugreg3; unsigned long debugreg6; unsigned long debugreg7; /* fault info */ unsigned long cr2, trap_no, error_code; /* floating point info */ union i387_union i387 __attribute__((aligned(16))); /* IO permissions. the bitmap could be moved into the GDT, that would make switch faster for a limited number of ioperm using tasks. -AK */ int ioperm; unsigned long *io_bitmap_ptr; unsigned io_bitmap_max; /* cached TLS descriptors. */ u64 tls_array[GDT_ENTRY_TLS_ENTRIES]; } __attribute__((aligned(16))); #define INIT_THREAD { \ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ } #define INIT_TSS { \ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ } #define INIT_MMAP \ { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } #define start_thread(regs,new_rip,new_rsp) do { \ asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ load_gs_index(0); \ (regs)->rip = (new_rip); \ (regs)->rsp = (new_rsp); \ write_pda(oldrsp, (new_rsp)); \ (regs)->cs = __USER_CS; \ (regs)->ss = __USER_DS; \ (regs)->eflags = 0x200; \ set_fs(USER_DS); \ } while(0) #define get_debugreg(var, register) \ __asm__("movq %%db" #register ", %0" \ :"=r" (var)) #define set_debugreg(value, register) \ __asm__("movq %0,%%db" #register \ : /* no output */ \ :"r" (value)) struct task_struct; struct mm_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); /* Prepare to copy thread state - unlazy all lazy status */ extern void prepare_to_copy(struct task_struct *tsk); /* * create a kernel thread without removing it from tasklists */ extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); /* * Return saved PC of a blocked thread. * What is this good for? it will be always the scheduler or ret_from_fork. */ #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8)) extern unsigned long get_wchan(struct task_struct *p); #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1) #define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip) #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ struct microcode_header { unsigned int hdrver; unsigned int rev; unsigned int date; unsigned int sig; unsigned int cksum; unsigned int ldrver; unsigned int pf; unsigned int datasize; unsigned int totalsize; unsigned int reserved[3]; }; struct microcode { struct microcode_header hdr; unsigned int bits[0]; }; typedef struct microcode microcode_t; typedef struct microcode_header microcode_header_t; /* microcode format is extended from prescott processors */ struct extended_signature { unsigned int sig; unsigned int pf; unsigned int cksum; }; struct extended_sigtable { unsigned int count; unsigned int cksum; unsigned int reserved[3]; struct extended_signature sigs[0]; }; /* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ #define MICROCODE_IOCFREE _IO('6',0) #define ASM_NOP1 K8_NOP1 #define ASM_NOP2 K8_NOP2 #define ASM_NOP3 K8_NOP3 #define ASM_NOP4 K8_NOP4 #define ASM_NOP5 K8_NOP5 #define ASM_NOP6 K8_NOP6 #define ASM_NOP7 K8_NOP7 #define ASM_NOP8 K8_NOP8 /* Opteron nops */ #define K8_NOP1 ".byte 0x90\n" #define K8_NOP2 ".byte 0x66,0x90\n" #define K8_NOP3 ".byte 0x66,0x66,0x90\n" #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" #define K8_NOP5 K8_NOP3 K8_NOP2 #define K8_NOP6 K8_NOP3 K8_NOP3 #define K8_NOP7 K8_NOP4 K8_NOP3 #define K8_NOP8 K8_NOP4 K8_NOP4 #define ASM_NOP_MAX 8 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) { __asm__ __volatile__("rep;nop": : :"memory"); } /* Stop speculative execution */ static inline void sync_core(void) { int tmp; asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); } #define cpu_has_fpu 1 #define ARCH_HAS_PREFETCH static inline void prefetch(void *x) { asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } #define ARCH_HAS_PREFETCHW 1 static inline void prefetchw(void *x) { alternative_input("prefetcht0 (%1)", "prefetchw (%1)", X86_FEATURE_3DNOW, "r" (x)); } #define ARCH_HAS_SPINLOCK_PREFETCH 1 #define spin_lock_prefetch(x) prefetchw(x) #define cpu_relax() rep_nop() /* * NSC/Cyrix CPU configuration register indexes */ #define CX86_CCR0 0xc0 #define CX86_CCR1 0xc1 #define CX86_CCR2 0xc2 #define CX86_CCR3 0xc3 #define CX86_CCR4 0xe8 #define CX86_CCR5 0xe9 #define CX86_CCR6 0xea #define CX86_CCR7 0xeb #define CX86_DIR0 0xfe #define CX86_DIR1 0xff #define CX86_ARR_BASE 0xc4 #define CX86_RCR_BASE 0xdc /* * NSC/Cyrix CPU indexed register access macros */ #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); }) #define setCx86(reg, data) do { \ outb((reg), 0x22); \ outb((data), 0x23); \ } while (0) static inline void serialize_cpu(void) { __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); } static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { /* "monitor %eax,%ecx,%edx;" */ asm volatile( ".byte 0x0f,0x01,0xc8;" : :"a" (eax), "c" (ecx), "d"(edx)); } static inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax,%ecx;" */ asm volatile( ".byte 0x0f,0x01,0xc9;" : :"a" (eax), "c" (ecx)); } #define stack_current() \ ({ \ struct thread_info *ti; \ asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ ti->task; \ }) #define cache_line_size() (boot_cpu_data.x86_cache_alignment) extern unsigned long boot_option_idle_override; /* Boot loader type from the setup header */ extern int bootloader_type; #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 #endif /* __ASM_X86_64_PROCESSOR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/proto.h000066400000000000000000000075671314037446600244600ustar00rootroot00000000000000#ifndef _ASM_X8664_PROTO_H #define _ASM_X8664_PROTO_H 1 #include /* misc architecture specific prototypes */ struct cpuinfo_x86; struct pt_regs; extern void start_kernel(void); extern void pda_init(int); extern void zap_low_mappings(int cpu); extern void early_idt_handler(void); extern void mcheck_init(struct cpuinfo_x86 *c); #ifdef CONFIG_MTRR extern void mtrr_ap_init(void); extern void mtrr_bp_init(void); #else #define mtrr_ap_init() do {} while (0) #define mtrr_bp_init() do {} while (0) #endif extern void init_memory_mapping(unsigned long start, unsigned long end); extern void size_zones(unsigned long *z, unsigned long *h, unsigned long start_pfn, unsigned long end_pfn); extern void system_call(void); extern int kernel_syscall(void); extern void syscall_init(void); extern void ia32_syscall(void); extern void ia32_cstar_target(void); extern void ia32_sysenter_target(void); extern void config_acpi_tables(void); extern void ia32_syscall(void); extern void iommu_hole_init(void); extern int pmtimer_mark_offset(void); extern void pmtimer_resume(void); extern void pmtimer_wait(unsigned); extern unsigned int do_gettimeoffset_pm(void); #ifdef CONFIG_X86_PM_TIMER extern u32 pmtmr_ioport; #else #define pmtmr_ioport 0 #endif extern unsigned long long monotonic_base; extern int sysctl_vsyscall; extern int nohpet; extern unsigned long vxtime_hz; extern void do_softirq_thunk(void); extern int numa_setup(char *opt); extern int setup_early_printk(char *); extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); extern void early_identify_cpu(struct cpuinfo_x86 *c); extern int k8_scan_nodes(unsigned long start, unsigned long end); extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn); extern unsigned long numa_free_all_bootmem(void); extern void reserve_bootmem_generic(unsigned long phys, unsigned len); extern void free_bootmem_generic(unsigned long phys, unsigned len); extern void load_gs_index(unsigned gs); extern void stop_timer_interrupt(void); extern void main_timer_handler(struct pt_regs *regs); extern unsigned long end_pfn_map; extern void show_trace(unsigned long * rsp); extern void show_registers(struct pt_regs *regs); extern void exception_table_check(void); extern void acpi_reserve_bootmem(void); extern void swap_low_mappings(void); extern void __show_regs(struct pt_regs * regs); extern void show_regs(struct pt_regs * regs); extern char *syscall32_page; extern void syscall32_cpu_init(void); extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); extern void check_ioapic(void); extern void check_efer(void); extern int unhandled_signal(struct task_struct *tsk, int sig); extern int unsynchronized_tsc(void); extern void select_idle_routine(const struct cpuinfo_x86 *c); extern void gart_parse_options(char *); extern void __init no_iommu_init(void); extern unsigned long table_start, table_end; extern int exception_trace; extern int force_iommu, no_iommu; extern int using_apic_timer; extern int disable_apic; extern unsigned cpu_khz; extern int ioapic_force; extern int skip_ioapic_setup; extern int acpi_ht; extern int acpi_disabled; #ifdef CONFIG_GART_IOMMU extern void gart_iommu_init(void); extern void iommu_hole_init(void); extern int fallback_aper_order; extern int fallback_aper_force; extern int iommu_aperture; extern int iommu_aperture_allowed; extern int iommu_aperture_disabled; extern int fix_aperture; #else #define iommu_aperture 0 #define iommu_aperture_allowed 0 #endif extern int force_iommu; extern int iommu_detected; extern int reboot_force; extern int notsc_setup(char *); extern int setup_additional_cpus(char *); extern void smp_local_timer_interrupt(struct pt_regs * regs); long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); #define round_up(x,y) (((x) + (y) - 1) & ~((y)-1)) #define round_down(x,y) ((x) & ~((y)-1)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/ptrace.h000066400000000000000000000060201314037446600245520ustar00rootroot00000000000000#ifndef _X86_64_PTRACE_H #define _X86_64_PTRACE_H #if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS) #define R15 0 #define R14 8 #define R13 16 #define R12 24 #define RBP 32 #define RBX 40 /* arguments: interrupts/non tracing syscalls only save upto here*/ #define R11 48 #define R10 56 #define R9 64 #define R8 72 #define RAX 80 #define RCX 88 #define RDX 96 #define RSI 104 #define RDI 112 #define ORIG_RAX 120 /* = ERROR */ /* end of arguments */ /* cpu exception frame or undefined in case of fast syscall. */ #define RIP 128 #define CS 136 #define EFLAGS 144 #define RSP 152 #define SS 160 #define ARGOFFSET R11 #endif /* __ASSEMBLY__ */ /* top of stack page */ #define FRAME_SIZE 168 #define PTRACE_OLDSETOPTIONS 21 #ifndef __ASSEMBLY__ struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long rbp; unsigned long rbx; /* arguments: non interrupts/non tracing syscalls only save upto here*/ unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long rax; unsigned long rcx; unsigned long rdx; unsigned long rsi; unsigned long rdi; unsigned long orig_rax; /* end of arguments */ /* cpu exception frame or undefined */ unsigned long rip; unsigned long cs; unsigned long eflags; unsigned long rsp; unsigned long ss; /* top of stack page */ }; #endif /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ #define PTRACE_GETREGS 12 #define PTRACE_SETREGS 13 #define PTRACE_GETFPREGS 14 #define PTRACE_SETFPREGS 15 #define PTRACE_GETFPXREGS 18 #define PTRACE_SETFPXREGS 19 /* only useful for access 32bit programs */ #define PTRACE_GET_THREAD_AREA 25 #define PTRACE_SET_THREAD_AREA 26 #define PTRACE_ARCH_PRCTL 30 /* arch_prctl for child */ #if defined(__KERNEL__) && !defined(__ASSEMBLY__) #define user_mode(regs) (!!((regs)->cs & 3)) #define user_mode_vm(regs) user_mode(regs) #define instruction_pointer(regs) ((regs)->rip) extern unsigned long profile_pc(struct pt_regs *regs); void signal_fault(struct pt_regs *regs, void __user *frame, char *where); struct task_struct; extern unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs); enum { EF_CF = 0x00000001, EF_PF = 0x00000004, EF_AF = 0x00000010, EF_ZF = 0x00000040, EF_SF = 0x00000080, EF_TF = 0x00000100, EF_IE = 0x00000200, EF_DF = 0x00000400, EF_OF = 0x00000800, EF_IOPL = 0x00003000, EF_IOPL_RING0 = 0x00000000, EF_IOPL_RING1 = 0x00001000, EF_IOPL_RING2 = 0x00002000, EF_NT = 0x00004000, /* nested task */ EF_RF = 0x00010000, /* resume */ EF_VM = 0x00020000, /* virtual mode */ EF_AC = 0x00040000, /* alignment */ EF_VIF = 0x00080000, /* virtual interrupt */ EF_VIP = 0x00100000, /* virtual interrupt pending */ EF_ID = 0x00200000, /* id */ }; #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/resource.h000066400000000000000000000001371314037446600251260ustar00rootroot00000000000000#ifndef _X8664_RESOURCE_H #define _X8664_RESOURCE_H #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/rio.h000066400000000000000000000057331314037446600240770ustar00rootroot00000000000000/* * Derived from include/asm-i386/mach-summit/mach_mpparse.h * and include/asm-i386/mach-default/bios_ebda.h * * Author: Laurent Vivier */ #ifndef __ASM_RIO_H #define __ASM_RIO_H #define RIO_TABLE_VERSION 3 struct rio_table_hdr { u8 version; /* Version number of this data structure */ u8 num_scal_dev; /* # of Scalability devices */ u8 num_rio_dev; /* # of RIO I/O devices */ } __attribute__((packed)); struct scal_detail { u8 node_id; /* Scalability Node ID */ u32 CBAR; /* Address of 1MB register space */ u8 port0node; /* Node ID port connected to: 0xFF=None */ u8 port0port; /* Port num port connected to: 0,1,2, or */ /* 0xFF=None */ u8 port1node; /* Node ID port connected to: 0xFF = None */ u8 port1port; /* Port num port connected to: 0,1,2, or */ /* 0xFF=None */ u8 port2node; /* Node ID port connected to: 0xFF = None */ u8 port2port; /* Port num port connected to: 0,1,2, or */ /* 0xFF=None */ u8 chassis_num; /* 1 based Chassis number (1 = boot node) */ } __attribute__((packed)); struct rio_detail { u8 node_id; /* RIO Node ID */ u32 BBAR; /* Address of 1MB register space */ u8 type; /* Type of device */ u8 owner_id; /* Node ID of Hurricane that owns this */ /* node */ u8 port0node; /* Node ID port connected to: 0xFF=None */ u8 port0port; /* Port num port connected to: 0,1,2, or */ /* 0xFF=None */ u8 port1node; /* Node ID port connected to: 0xFF=None */ u8 port1port; /* Port num port connected to: 0,1,2, or */ /* 0xFF=None */ u8 first_slot; /* Lowest slot number below this Calgary */ u8 status; /* Bit 0 = 1 : the XAPIC is used */ /* = 0 : the XAPIC is not used, ie: */ /* ints fwded to another XAPIC */ /* Bits1:7 Reserved */ u8 WP_index; /* instance index - lower ones have */ /* lower slot numbers/PCI bus numbers */ u8 chassis_num; /* 1 based Chassis number */ } __attribute__((packed)); enum { HURR_SCALABILTY = 0, /* Hurricane Scalability info */ HURR_RIOIB = 2, /* Hurricane RIOIB info */ COMPAT_CALGARY = 4, /* Compatibility Calgary */ ALT_CALGARY = 5, /* Second Planar Calgary */ }; /* * there is a real-mode segmented pointer pointing to the * 4K EBDA area at 0x40E. */ static inline unsigned long get_bios_ebda(void) { unsigned long address = *(unsigned short *)phys_to_virt(0x40EUL); address <<= 4; return address; } #endif /* __ASM_RIO_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/rtc.h000066400000000000000000000002161314037446600240650ustar00rootroot00000000000000#ifndef _X86_64_RTC_H #define _X86_64_RTC_H /* * x86 uses the default access methods for the RTC. */ #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/rwlock.h000066400000000000000000000045641314037446600246100ustar00rootroot00000000000000/* include/asm-x86_64/rwlock.h * * Helpers used by both rw spinlocks and rw semaphores. * * Based in part on code from semaphore.h and * spinlock.h Copyright 1996 Linus Torvalds. * * Copyright 1999 Red Hat, Inc. * Copyright 2001,2002 SuSE labs * * Written by Benjamin LaHaise. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #ifndef _ASM_X86_64_RWLOCK_H #define _ASM_X86_64_RWLOCK_H #include #define RW_LOCK_BIAS 0x01000000 #define RW_LOCK_BIAS_STR "0x01000000" #define __build_read_lock_ptr(rw, helper) \ asm volatile(LOCK "subl $1,(%0)\n\t" \ "js 2f\n" \ "1:\n" \ LOCK_SECTION_START("") \ "2:\tcall " helper "\n\t" \ "jmp 1b\n" \ LOCK_SECTION_END \ ::"a" (rw) : "memory") #define __build_read_lock_const(rw, helper) \ asm volatile(LOCK "subl $1,%0\n\t" \ "js 2f\n" \ "1:\n" \ LOCK_SECTION_START("") \ "2:\tpushq %%rax\n\t" \ "leaq %0,%%rax\n\t" \ "call " helper "\n\t" \ "popq %%rax\n\t" \ "jmp 1b\n" \ LOCK_SECTION_END \ :"=m" (*((volatile int *)rw))::"memory") #define __build_read_lock(rw, helper) do { \ if (__builtin_constant_p(rw)) \ __build_read_lock_const(rw, helper); \ else \ __build_read_lock_ptr(rw, helper); \ } while (0) #define __build_write_lock_ptr(rw, helper) \ asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ "jnz 2f\n" \ "1:\n" \ LOCK_SECTION_START("") \ "2:\tcall " helper "\n\t" \ "jmp 1b\n" \ LOCK_SECTION_END \ ::"a" (rw) : "memory") #define __build_write_lock_const(rw, helper) \ asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ "jnz 2f\n" \ "1:\n" \ LOCK_SECTION_START("") \ "2:\tpushq %%rax\n\t" \ "leaq %0,%%rax\n\t" \ "call " helper "\n\t" \ "popq %%rax\n\t" \ "jmp 1b\n" \ LOCK_SECTION_END \ :"=m" (*((volatile long *)rw))::"memory") #define __build_write_lock(rw, helper) do { \ if (__builtin_constant_p(rw)) \ __build_write_lock_const(rw, helper); \ else \ __build_write_lock_ptr(rw, helper); \ } while (0) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/scatterlist.h000066400000000000000000000011201314037446600256310ustar00rootroot00000000000000#ifndef _X8664_SCATTERLIST_H #define _X8664_SCATTERLIST_H struct scatterlist { struct page *page; unsigned int offset; unsigned int length; dma_addr_t dma_address; unsigned int dma_length; }; #define ISA_DMA_THRESHOLD (0x00ffffff) /* These macros should be used after a pci_map_sg call has been done * to get bus addresses of each of the SG entries and their lengths. * You should only work with the number of sg entries pci_map_sg * returns. */ #define sg_dma_address(sg) ((sg)->dma_address) #define sg_dma_len(sg) ((sg)->dma_length) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/seccomp.h000066400000000000000000000011131314037446600247230ustar00rootroot00000000000000#ifndef _ASM_SECCOMP_H #include #ifdef TIF_32BIT #error "unexpected TIF_32BIT on x86_64" #else #define TIF_32BIT TIF_IA32 #endif #include #include #define __NR_seccomp_read __NR_read #define __NR_seccomp_write __NR_write #define __NR_seccomp_exit __NR_exit #define __NR_seccomp_sigreturn __NR_rt_sigreturn #define __NR_seccomp_read_32 __NR_ia32_read #define __NR_seccomp_write_32 __NR_ia32_write #define __NR_seccomp_exit_32 __NR_ia32_exit #define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn #endif /* _ASM_SECCOMP_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/sections.h000066400000000000000000000002001314037446600251150ustar00rootroot00000000000000#ifndef _X8664_SECTIONS_H #define _X8664_SECTIONS_H /* nothing to see, move along */ #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/segment.h000066400000000000000000000021771314037446600247470ustar00rootroot00000000000000#ifndef _ASM_SEGMENT_H #define _ASM_SEGMENT_H #include #define __KERNEL_CS 0x10 #define __KERNEL_DS 0x18 #define __KERNEL32_CS 0x38 /* * we cannot use the same code segment descriptor for user and kernel * -- not even in the long flat mode, because of different DPL /kkeil * The segment offset needs to contain a RPL. Grr. -AK * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets) */ #define __USER32_CS 0x23 /* 4*8+3 */ #define __USER_DS 0x2b /* 5*8+3 */ #define __USER_CS 0x33 /* 6*8+3 */ #define __USER32_DS __USER_DS #define GDT_ENTRY_TLS 1 #define GDT_ENTRY_TSS 8 /* needs two entries */ #define GDT_ENTRY_LDT 10 /* needs two entries */ #define GDT_ENTRY_TLS_MIN 12 #define GDT_ENTRY_TLS_MAX 14 /* 15 free */ #define GDT_ENTRY_TLS_ENTRIES 3 /* TLS indexes for 64bit - hardcoded in arch_prctl */ #define FS_TLS 0 #define GS_TLS 1 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3) #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3) #define IDT_ENTRIES 256 #define GDT_ENTRIES 16 #define GDT_SIZE (GDT_ENTRIES * 8) #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/semaphore.h000066400000000000000000000123611314037446600252640ustar00rootroot00000000000000#ifndef _X86_64_SEMAPHORE_H #define _X86_64_SEMAPHORE_H #include #ifdef __KERNEL__ /* * SMP- and interrupt-safe semaphores.. * * (C) Copyright 1996 Linus Torvalds * * Modified 1996-12-23 by Dave Grothe to fix bugs in * the original code and to make semaphore waits * interruptible so that processes waiting on * semaphores can be killed. * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper * functions in asm/sempahore-helper.h while fixing a * potential and subtle race discovered by Ulrich Schmid * in down_interruptible(). Since I started to play here I * also implemented the `trylock' semaphore operation. * 1999-07-02 Artur Skawina * Optimized "0(ecx)" -> "(ecx)" (the assembler does not * do this). Changed calling sequences from push/jmp to * traditional call/ret. * Modified 2001-01-01 Andreas Franck * Some hacks to ensure compatibility with recent * GCC snapshots, to avoid stack corruption when compiling * with -fomit-frame-pointer. It's not sure if this will * be fixed in GCC, as our previous implementation was a * bit dubious. * * If you would like to see an analysis of this implementation, please * ftp to gcom.com and download the file * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz. * */ #include #include #include #include #include #include struct semaphore { atomic_t count; int sleepers; wait_queue_head_t wait; }; #define __SEMAPHORE_INITIALIZER(name, n) \ { \ .count = ATOMIC_INIT(n), \ .sleepers = 0, \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ } #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) static inline void sema_init (struct semaphore *sem, int val) { /* * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); * * i'd rather use the more flexible initialization above, but sadly * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. */ atomic_set(&sem->count, val); sem->sleepers = 0; init_waitqueue_head(&sem->wait); } static inline void init_MUTEX (struct semaphore *sem) { sema_init(sem, 1); } static inline void init_MUTEX_LOCKED (struct semaphore *sem) { sema_init(sem, 0); } asmlinkage void __down_failed(void /* special register calling convention */); asmlinkage int __down_failed_interruptible(void /* params in registers */); asmlinkage int __down_failed_trylock(void /* params in registers */); asmlinkage void __up_wakeup(void /* special register calling convention */); asmlinkage void __down(struct semaphore * sem); asmlinkage int __down_interruptible(struct semaphore * sem); asmlinkage int __down_trylock(struct semaphore * sem); asmlinkage void __up(struct semaphore * sem); /* * This is ugly, but we want the default case to fall through. * "__down_failed" is a special asm handler that calls the C * routine that actually waits. See arch/x86_64/kernel/semaphore.c */ static inline void down(struct semaphore * sem) { might_sleep(); __asm__ __volatile__( "# atomic down operation\n\t" LOCK "decl %0\n\t" /* --sem->count */ "js 2f\n" "1:\n" LOCK_SECTION_START("") "2:\tcall __down_failed\n\t" "jmp 1b\n" LOCK_SECTION_END :"=m" (sem->count) :"D" (sem) :"memory"); } /* * Interruptible try to acquire a semaphore. If we obtained * it, return zero. If we were interrupted, returns -EINTR */ static inline int down_interruptible(struct semaphore * sem) { int result; might_sleep(); __asm__ __volatile__( "# atomic interruptible down operation\n\t" LOCK "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" LOCK_SECTION_START("") "2:\tcall __down_failed_interruptible\n\t" "jmp 1b\n" LOCK_SECTION_END :"=a" (result), "=m" (sem->count) :"D" (sem) :"memory"); return result; } /* * Non-blockingly attempt to down() a semaphore. * Returns zero if we acquired it */ static inline int down_trylock(struct semaphore * sem) { int result; __asm__ __volatile__( "# atomic interruptible down operation\n\t" LOCK "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" LOCK_SECTION_START("") "2:\tcall __down_failed_trylock\n\t" "jmp 1b\n" LOCK_SECTION_END :"=a" (result), "=m" (sem->count) :"D" (sem) :"memory","cc"); return result; } /* * Note! This is subtle. We jump to wake people up only if * the semaphore was negative (== somebody was waiting on it). * The default case (no contention) will result in NO * jumps for both down() and up(). */ static inline void up(struct semaphore * sem) { __asm__ __volatile__( "# atomic up operation\n\t" LOCK "incl %0\n\t" /* ++sem->count */ "jle 2f\n" "1:\n" LOCK_SECTION_START("") "2:\tcall __up_wakeup\n\t" "jmp 1b\n" LOCK_SECTION_END :"=m" (sem->count) :"D" (sem) :"memory"); } #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/sembuf.h000066400000000000000000000013011314037446600245520ustar00rootroot00000000000000#ifndef _X86_64_SEMBUF_H #define _X86_64_SEMBUF_H /* * The semid64_ds structure for x86_64 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * * Pad space is left for: * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values */ struct semid64_ds { struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ __kernel_time_t sem_otime; /* last semop time */ unsigned long __unused1; __kernel_time_t sem_ctime; /* last change time */ unsigned long __unused2; unsigned long sem_nsems; /* no. of semaphores in array */ unsigned long __unused3; unsigned long __unused4; }; #endif /* _X86_64_SEMBUF_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/serial.h000066400000000000000000000020111314037446600245470ustar00rootroot00000000000000/* * include/asm-x86_64/serial.h */ #include /* * This assumes you have a 1.8432 MHz clock for your UART. * * It'd be nice if someone built a serial card with a 24.576 MHz * clock, since the 16550A is capable of handling a top speed of 1.5 * megabits/second; but this requires the faster clock. */ #define BASE_BAUD ( 1843200 / 16 ) /* Standard COM flags (except for COM4, because of the 8514 problem) */ #ifdef CONFIG_SERIAL_DETECT_IRQ #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ) #define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ) #else #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) #define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF #endif #define SERIAL_PORT_DFNS \ /* UART CLK PORT IRQ FLAGS */ \ { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/setup.h000066400000000000000000000001261314037446600244350ustar00rootroot00000000000000#ifndef _x8664_SETUP_H #define _x8664_SETUP_H #define COMMAND_LINE_SIZE 2048 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/shmbuf.h000066400000000000000000000017771314037446600245760ustar00rootroot00000000000000#ifndef _X8664_SHMBUF_H #define _X8664_SHMBUF_H /* * The shmid64_ds structure for x8664 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * * Pad space is left for: * - 2 miscellaneous 64-bit values */ struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ size_t shm_segsz; /* size of segment (bytes) */ __kernel_time_t shm_atime; /* last attach time */ __kernel_time_t shm_dtime; /* last detach time */ __kernel_time_t shm_ctime; /* last change time */ __kernel_pid_t shm_cpid; /* pid of creator */ __kernel_pid_t shm_lpid; /* pid of last operator */ unsigned long shm_nattch; /* no. of current attaches */ unsigned long __unused4; unsigned long __unused5; }; struct shminfo64 { unsigned long shmmax; unsigned long shmmin; unsigned long shmmni; unsigned long shmseg; unsigned long shmall; unsigned long __unused1; unsigned long __unused2; unsigned long __unused3; unsigned long __unused4; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/shmparam.h000066400000000000000000000002361314037446600251070ustar00rootroot00000000000000#ifndef _ASMX8664_SHMPARAM_H #define _ASMX8664_SHMPARAM_H #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ #endif /* _ASMX8664_SHMPARAM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/sigcontext.h000066400000000000000000000023421314037446600254660ustar00rootroot00000000000000#ifndef _ASM_X86_64_SIGCONTEXT_H #define _ASM_X86_64_SIGCONTEXT_H #include #include /* FXSAVE frame */ /* Note: reserved1/2 may someday contain valuable data. Always save/restore them when you change signal frames. */ struct _fpstate { __u16 cwd; __u16 swd; __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */ __u16 fop; __u64 rip; __u64 rdp; __u32 mxcsr; __u32 mxcsr_mask; __u32 st_space[32]; /* 8*16 bytes for each FP-reg */ __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ __u32 reserved2[24]; }; struct sigcontext { unsigned long r8; unsigned long r9; unsigned long r10; unsigned long r11; unsigned long r12; unsigned long r13; unsigned long r14; unsigned long r15; unsigned long rdi; unsigned long rsi; unsigned long rbp; unsigned long rbx; unsigned long rdx; unsigned long rax; unsigned long rcx; unsigned long rsp; unsigned long rip; unsigned long eflags; /* RFLAGS */ unsigned short cs; unsigned short gs; unsigned short fs; unsigned short __pad0; unsigned long err; unsigned long trapno; unsigned long oldmask; unsigned long cr2; struct _fpstate __user *fpstate; /* zero when no FPU context */ unsigned long reserved1[8]; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/sigcontext32.h000066400000000000000000000030261314037446600256330ustar00rootroot00000000000000#ifndef _SIGCONTEXT32_H #define _SIGCONTEXT32_H 1 /* signal context for 32bit programs. */ #define X86_FXSR_MAGIC 0x0000 struct _fpreg { unsigned short significand[4]; unsigned short exponent; }; struct _fpxreg { unsigned short significand[4]; unsigned short exponent; unsigned short padding[3]; }; struct _xmmreg { __u32 element[4]; }; /* FSAVE frame with extensions */ struct _fpstate_ia32 { /* Regular FPU environment */ __u32 cw; __u32 sw; __u32 tag; /* not compatible to 64bit twd */ __u32 ipoff; __u32 cssel; __u32 dataoff; __u32 datasel; struct _fpreg _st[8]; unsigned short status; unsigned short magic; /* 0xffff = regular FPU data only */ /* FXSR FPU environment */ __u32 _fxsr_env[6]; __u32 mxcsr; __u32 reserved; struct _fpxreg _fxsr_st[8]; struct _xmmreg _xmm[8]; /* It's actually 16 */ __u32 padding[56]; }; struct sigcontext_ia32 { unsigned short gs, __gsh; unsigned short fs, __fsh; unsigned short es, __esh; unsigned short ds, __dsh; unsigned int edi; unsigned int esi; unsigned int ebp; unsigned int esp; unsigned int ebx; unsigned int edx; unsigned int ecx; unsigned int eax; unsigned int trapno; unsigned int err; unsigned int eip; unsigned short cs, __csh; unsigned int eflags; unsigned int esp_at_signal; unsigned short ss, __ssh; unsigned int fpstate; /* really (struct _fpstate_ia32 *) */ unsigned int oldmask; unsigned int cr2; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/siginfo.h000066400000000000000000000002171314037446600247340ustar00rootroot00000000000000#ifndef _X8664_SIGINFO_H #define _X8664_SIGINFO_H #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/signal.h000066400000000000000000000100171314037446600245520ustar00rootroot00000000000000#ifndef _ASMx8664_SIGNAL_H #define _ASMx8664_SIGNAL_H #ifndef __ASSEMBLY__ #include #include #include /* Avoid too many header ordering problems. */ struct siginfo; #ifdef __KERNEL__ /* Most things should be clean enough to redefine this at will, if care is taken to make libc match. */ #define _NSIG 64 #define _NSIG_BPW 64 #define _NSIG_WORDS (_NSIG / _NSIG_BPW) typedef unsigned long old_sigset_t; /* at least 32 bits */ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; struct pt_regs; asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset); #else /* Here we must cater to libcs that poke about in kernel headers. */ #define NSIG 32 typedef unsigned long sigset_t; #endif /* __KERNEL__ */ #endif #define SIGHUP 1 #define SIGINT 2 #define SIGQUIT 3 #define SIGILL 4 #define SIGTRAP 5 #define SIGABRT 6 #define SIGIOT 6 #define SIGBUS 7 #define SIGFPE 8 #define SIGKILL 9 #define SIGUSR1 10 #define SIGSEGV 11 #define SIGUSR2 12 #define SIGPIPE 13 #define SIGALRM 14 #define SIGTERM 15 #define SIGSTKFLT 16 #define SIGCHLD 17 #define SIGCONT 18 #define SIGSTOP 19 #define SIGTSTP 20 #define SIGTTIN 21 #define SIGTTOU 22 #define SIGURG 23 #define SIGXCPU 24 #define SIGXFSZ 25 #define SIGVTALRM 26 #define SIGPROF 27 #define SIGWINCH 28 #define SIGIO 29 #define SIGPOLL SIGIO /* #define SIGLOST 29 */ #define SIGPWR 30 #define SIGSYS 31 #define SIGUNUSED 31 /* These should not be considered constants from userland. */ #define SIGRTMIN 32 #define SIGRTMAX _NSIG /* * SA_FLAGS values: * * SA_ONSTACK indicates that a registered stack_t will be used. * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the * SA_RESTART flag to get restarting signals (which were the default long ago) * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. * SA_RESETHAND clears the handler when the signal is delivered. * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. * SA_NODEFER prevents the current signal from being masked in the handler. * * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single * Unix names RESETHAND and NODEFER respectively. */ #define SA_NOCLDSTOP 0x00000001 #define SA_NOCLDWAIT 0x00000002 #define SA_SIGINFO 0x00000004 #define SA_ONSTACK 0x08000000 #define SA_RESTART 0x10000000 #define SA_NODEFER 0x40000000 #define SA_RESETHAND 0x80000000 #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND #define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ #define SA_RESTORER 0x04000000 /* * sigaltstack controls */ #define SS_ONSTACK 1 #define SS_DISABLE 2 #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 #include #ifndef __ASSEMBLY__ struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; struct k_sigaction { struct sigaction sa; }; typedef struct sigaltstack { void __user *ss_sp; int ss_flags; size_t ss_size; } stack_t; #ifdef __KERNEL__ #include #undef __HAVE_ARCH_SIG_BITOPS #if 0 static inline void sigaddset(sigset_t *set, int _sig) { __asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); } static inline void sigdelset(sigset_t *set, int _sig) { __asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); } static inline int __const_sigismember(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1))); } static inline int __gen_sigismember(sigset_t *set, int _sig) { int ret; __asm__("btq %2,%1\n\tsbbq %0,%0" : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); return ret; } #define sigismember(set,sig) \ (__builtin_constant_p(sig) ? \ __const_sigismember((set),(sig)) : \ __gen_sigismember((set),(sig))) static inline int sigfindinword(unsigned long word) { __asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc"); return word; } #endif #endif #define ptrace_signal_deliver(regs, cookie) do { } while (0) #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/smp.h000066400000000000000000000064351314037446600241050ustar00rootroot00000000000000#ifndef __ASM_SMP_H #define __ASM_SMP_H /* * We need the APIC definitions automatically as part of 'smp.h' */ #ifndef __ASSEMBLY__ #include #include #include #include extern int disable_apic; #endif #ifdef CONFIG_X86_LOCAL_APIC #ifndef __ASSEMBLY__ #include #include #ifdef CONFIG_X86_IO_APIC #include #endif #include #include #endif #endif #ifdef CONFIG_SMP #ifndef ASSEMBLY #include struct pt_regs; extern cpumask_t cpu_present_mask; extern cpumask_t cpu_possible_map; extern cpumask_t cpu_online_map; extern cpumask_t cpu_callout_map; extern cpumask_t cpu_initialized; /* * Private routines/data */ extern void smp_alloc_memory(void); extern volatile unsigned long smp_invalidate_needed; extern int pic_mode; extern void lock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void); extern int smp_num_siblings; extern void smp_send_reschedule(int cpu); void smp_stop_cpu(void); extern int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, int retry, int wait); extern cpumask_t cpu_sibling_map[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS]; extern u8 phys_proc_id[NR_CPUS]; extern u8 cpu_core_id[NR_CPUS]; extern u8 cpu_llc_id[NR_CPUS]; #define SMP_TRAMPOLINE_BASE 0x6000 /* * On x86 all CPUs are mapped 1:1 to the APIC space. * This simplifies scheduling and IPI sending and * compresses data structures. */ static inline int num_booting_cpus(void) { return cpus_weight(cpu_callout_map); } #define raw_smp_processor_id() read_pda(cpunumber) static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); } extern int safe_smp_processor_id(void); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); extern void prefill_possible_map(void); extern unsigned num_processors; extern unsigned disabled_cpus; #endif /* !ASSEMBLY */ #define NO_PROC_ID 0xFF /* No processor magic marker */ #endif #ifndef ASSEMBLY /* * Some lowlevel functions might want to know about * the real APIC ID <-> CPU # mapping. */ extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */ extern u8 x86_cpu_to_log_apicid[NR_CPUS]; extern u8 bios_cpu_apicid[]; static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) { return cpus_addr(cpumask)[0]; } static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < NR_CPUS) return (int)bios_cpu_apicid[mps_cpu]; else return BAD_APICID; } #endif /* !ASSEMBLY */ #ifndef CONFIG_SMP #define stack_smp_processor_id() 0 #define safe_smp_processor_id() 0 #define cpu_logical_map(x) (x) #else #include #define stack_smp_processor_id() \ ({ \ struct thread_info *ti; \ __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ ti->cpu; \ }) #endif #ifndef __ASSEMBLY__ static __inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); } #endif #ifdef CONFIG_SMP #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] #else #define cpu_physical_id(cpu) boot_cpu_id #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/socket.h000066400000000000000000000021671314037446600245740ustar00rootroot00000000000000#ifndef _ASM_SOCKET_H #define _ASM_SOCKET_H #include /* For setsockopt(2) */ #define SOL_SOCKET 1 #define SO_DEBUG 1 #define SO_REUSEADDR 2 #define SO_TYPE 3 #define SO_ERROR 4 #define SO_DONTROUTE 5 #define SO_BROADCAST 6 #define SO_SNDBUF 7 #define SO_RCVBUF 8 #define SO_SNDBUFFORCE 32 #define SO_RCVBUFFORCE 33 #define SO_KEEPALIVE 9 #define SO_OOBINLINE 10 #define SO_NO_CHECK 11 #define SO_PRIORITY 12 #define SO_LINGER 13 #define SO_BSDCOMPAT 14 /* To add :#define SO_REUSEPORT 15 */ #define SO_PASSCRED 16 #define SO_PEERCRED 17 #define SO_RCVLOWAT 18 #define SO_SNDLOWAT 19 #define SO_RCVTIMEO 20 #define SO_SNDTIMEO 21 /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 22 #define SO_SECURITY_ENCRYPTION_TRANSPORT 23 #define SO_SECURITY_ENCRYPTION_NETWORK 24 #define SO_BINDTODEVICE 25 /* Socket filtering */ #define SO_ATTACH_FILTER 26 #define SO_DETACH_FILTER 27 #define SO_PEERNAME 28 #define SO_TIMESTAMP 29 #define SCM_TIMESTAMP SO_TIMESTAMP #define SO_ACCEPTCONN 30 #define SO_PEERSEC 31 #endif /* _ASM_SOCKET_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/sockios.h000066400000000000000000000004271314037446600247530ustar00rootroot00000000000000#ifndef __ARCH_X8664_SOCKIOS__ #define __ARCH_X8664_SOCKIOS__ /* Socket-level I/O control calls. */ #define FIOSETOWN 0x8901 #define SIOCSPGRP 0x8902 #define FIOGETOWN 0x8903 #define SIOCGPGRP 0x8904 #define SIOCATMARK 0x8905 #define SIOCGSTAMP 0x8906 /* Get stamp */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/sparsemem.h000066400000000000000000000012461314037446600252750ustar00rootroot00000000000000#ifndef _ASM_X86_64_SPARSEMEM_H #define _ASM_X86_64_SPARSEMEM_H 1 #ifdef CONFIG_SPARSEMEM /* * generic non-linear memory support: * * 1) we will not split memory into more chunks than will fit into the flags * field of the struct page * * SECTION_SIZE_BITS 2^n: size of each section * MAX_PHYSADDR_BITS 2^n: max size of physical address space * MAX_PHYSMEM_BITS 2^n: how much memory we can have in that space * */ #define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ #define MAX_PHYSADDR_BITS 40 #define MAX_PHYSMEM_BITS 40 extern int early_pfn_to_nid(unsigned long pfn); #endif /* CONFIG_SPARSEMEM */ #endif /* _ASM_X86_64_SPARSEMEM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/spinlock.h000066400000000000000000000063071314037446600251260ustar00rootroot00000000000000#ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H #include #include #include #include /* * Your basic SMP spinlocks, allowing only a single CPU anywhere * * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. * * We make no fairness assumptions. They have a cost. * * (the type definitions are in asm/spinlock_types.h) */ #define __raw_spin_is_locked(x) \ (*(volatile signed int *)(&(x)->slock) <= 0) #define __raw_spin_lock_string \ "\n1:\t" \ "lock ; decl %0\n\t" \ "js 2f\n" \ LOCK_SECTION_START("") \ "2:\t" \ "rep;nop\n\t" \ "cmpl $0,%0\n\t" \ "jle 2b\n\t" \ "jmp 1b\n" \ LOCK_SECTION_END #define __raw_spin_unlock_string \ "movl $1,%0" \ :"=m" (lock->slock) : : "memory" static inline void __raw_spin_lock(raw_spinlock_t *lock) { __asm__ __volatile__( __raw_spin_lock_string :"=m" (lock->slock) : : "memory"); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) static inline int __raw_spin_trylock(raw_spinlock_t *lock) { int oldval; __asm__ __volatile__( "xchgl %0,%1" :"=q" (oldval), "=m" (lock->slock) :"0" (0) : "memory"); return oldval > 0; } static inline void __raw_spin_unlock(raw_spinlock_t *lock) { __asm__ __volatile__( __raw_spin_unlock_string ); } #define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) /* * Read-write spinlocks, allowing multiple readers * but only one writer. * * NOTE! it is quite common to have readers in interrupts * but no interrupt writers. For those circumstances we * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. * * On x86, we implement read-write locks as a 32-bit counter * with the high bit (sign) being the "contended" bit. * * The inline assembly is non-obvious. Think about it. * * Changed to use the same technique as rw semaphores. See * semaphore.h for details. -ben * * the helpers are in arch/i386/kernel/semaphore.c */ #define __raw_read_can_lock(x) ((int)(x)->lock > 0) #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) static inline void __raw_read_lock(raw_rwlock_t *rw) { __build_read_lock(rw, "__read_lock_failed"); } static inline void __raw_write_lock(raw_rwlock_t *rw) { __build_write_lock(rw, "__write_lock_failed"); } static inline int __raw_read_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; atomic_dec(count); if (atomic_read(count) >= 0) return 1; atomic_inc(count); return 0; } static inline int __raw_write_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) return 1; atomic_add(RW_LOCK_BIAS, count); return 0; } static inline void __raw_read_unlock(raw_rwlock_t *rw) { asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" : "=m" (rw->lock) : : "memory"); } #define _raw_spin_relax(lock) cpu_relax() #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/spinlock_types.h000066400000000000000000000005741314037446600263520ustar00rootroot00000000000000#ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_H # error "please don't include this file directly" #endif typedef struct { volatile unsigned int slock; } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile unsigned int lock; } raw_rwlock_t; #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/stat.h000066400000000000000000000016101314037446600242470ustar00rootroot00000000000000#ifndef _ASM_X86_64_STAT_H #define _ASM_X86_64_STAT_H #define STAT_HAVE_NSEC 1 struct stat { unsigned long st_dev; unsigned long st_ino; unsigned long st_nlink; unsigned int st_mode; unsigned int st_uid; unsigned int st_gid; unsigned int __pad0; unsigned long st_rdev; long st_size; long st_blksize; long st_blocks; /* Number 512-byte blocks allocated. */ unsigned long st_atime; unsigned long st_atime_nsec; unsigned long st_mtime; unsigned long st_mtime_nsec; unsigned long st_ctime; unsigned long st_ctime_nsec; long __unused[3]; }; /* For 32bit emulation */ struct __old_kernel_stat { unsigned short st_dev; unsigned short st_ino; unsigned short st_mode; unsigned short st_nlink; unsigned short st_uid; unsigned short st_gid; unsigned short st_rdev; unsigned int st_size; unsigned int st_atime; unsigned int st_mtime; unsigned int st_ctime; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/statfs.h000066400000000000000000000016041314037446600246030ustar00rootroot00000000000000#ifndef _X86_64_STATFS_H #define _X86_64_STATFS_H #ifndef __KERNEL_STRICT_NAMES #include typedef __kernel_fsid_t fsid_t; #endif /* * This is ugly -- we're already 64-bit clean, so just duplicate the * definitions. */ struct statfs { long f_type; long f_bsize; long f_blocks; long f_bfree; long f_bavail; long f_files; long f_ffree; __kernel_fsid_t f_fsid; long f_namelen; long f_frsize; long f_spare[5]; }; struct statfs64 { long f_type; long f_bsize; long f_blocks; long f_bfree; long f_bavail; long f_files; long f_ffree; __kernel_fsid_t f_fsid; long f_namelen; long f_frsize; long f_spare[5]; }; struct compat_statfs64 { __u32 f_type; __u32 f_bsize; __u64 f_blocks; __u64 f_bfree; __u64 f_bavail; __u64 f_files; __u64 f_ffree; __kernel_fsid_t f_fsid; __u32 f_namelen; __u32 f_frsize; __u32 f_spare[5]; } __attribute__((packed)); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/string.h000066400000000000000000000031731314037446600246100ustar00rootroot00000000000000#ifndef _X86_64_STRING_H_ #define _X86_64_STRING_H_ #ifdef __KERNEL__ /* Written 2002 by Andi Kleen */ /* Only used for special circumstances. Stolen from i386/string.h */ static inline void * __inline_memcpy(void * to, const void * from, size_t n) { unsigned long d0, d1, d2; __asm__ __volatile__( "rep ; movsl\n\t" "testb $2,%b4\n\t" "je 1f\n\t" "movsw\n" "1:\ttestb $1,%b4\n\t" "je 2f\n\t" "movsb\n" "2:" : "=&c" (d0), "=&D" (d1), "=&S" (d2) :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from) : "memory"); return (to); } /* Even with __builtin_ the compiler may decide to use the out of line function. */ #define __HAVE_ARCH_MEMCPY 1 extern void *__memcpy(void *to, const void *from, size_t len); #define memcpy(dst,src,len) \ ({ size_t __len = (len); \ void *__ret; \ if (__builtin_constant_p(len) && __len >= 64) \ __ret = __memcpy((dst),(src),__len); \ else \ __ret = __builtin_memcpy((dst),(src),__len); \ __ret; }) #define __HAVE_ARCH_MEMSET #define memset __builtin_memset #define __HAVE_ARCH_MEMMOVE void * memmove(void * dest,const void *src,size_t count); /* Use C out of line version for memcmp */ #define memcmp __builtin_memcmp int memcmp(const void * cs,const void * ct,size_t count); /* out of line string functions use always C versions */ #define strlen __builtin_strlen size_t strlen(const char * s); #define strcpy __builtin_strcpy char * strcpy(char * dest,const char *src); #define strcat __builtin_strcat char * strcat(char * dest, const char * src); #define strcmp __builtin_strcmp int strcmp(const char * cs,const char * ct); #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/suspend.h000066400000000000000000000033441314037446600247630ustar00rootroot00000000000000/* * Copyright 2001-2003 Pavel Machek * Based on code * Copyright 2001 Patrick Mochel */ #include #include static inline int arch_prepare_suspend(void) { return 0; } /* Image of the saved processor state. If you touch this, fix acpi_wakeup.S. */ struct saved_context { u16 ds, es, fs, gs, ss; unsigned long gs_base, gs_kernel_base, fs_base; unsigned long cr0, cr2, cr3, cr4, cr8; u16 gdt_pad; u16 gdt_limit; unsigned long gdt_base; u16 idt_pad; u16 idt_limit; unsigned long idt_base; u16 ldt; u16 tss; unsigned long tr; unsigned long safety; unsigned long return_address; unsigned long eflags; } __attribute__((packed)); /* We'll access these from assembly, so we'd better have them outside struct */ extern unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx; extern unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi; extern unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11; extern unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15; extern unsigned long saved_context_eflags; #define loaddebug(thread,register) \ __asm__("movq %0,%%db" #register \ : /* no output */ \ :"r" ((thread)->debugreg##register)) extern void fix_processor_context(void); #ifdef CONFIG_ACPI_SLEEP extern unsigned long saved_eip; extern unsigned long saved_esp; extern unsigned long saved_ebp; extern unsigned long saved_ebx; extern unsigned long saved_esi; extern unsigned long saved_edi; /* routines for saving/restoring kernel state */ extern int acpi_save_state_mem(void); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/swiotlb.h000066400000000000000000000043771314037446600247740ustar00rootroot00000000000000#ifndef _ASM_SWIOTLB_H #define _ASM_SWTIOLB_H 1 #include #include /* SWIOTLB interface */ extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir); extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags); extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir); extern void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir); extern void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir); extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, unsigned long offset, size_t size, int dir); extern void swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, unsigned long offset, size_t size, int dir); extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, int dir); extern void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, int dir); extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction); extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction); extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); extern void swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); extern void swiotlb_init(void); #ifdef CONFIG_XEN extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction); extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction); #endif #ifdef CONFIG_SWIOTLB extern int swiotlb; #else #define swiotlb 0 #endif extern void pci_swiotlb_init(void); #endif /* _ASM_SWTIOLB_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/system.h000066400000000000000000000272371314037446600246350ustar00rootroot00000000000000#ifndef __ASM_SYSTEM_H #define __ASM_SYSTEM_H #include #include #include #ifdef __KERNEL__ #ifdef CONFIG_SMP #define LOCK_PREFIX "lock ; " #else #define LOCK_PREFIX "" #endif #define __STR(x) #x #define STR(x) __STR(x) #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" /* frame pointer must be last for get_wchan */ #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\n\t" #define __EXTRA_CLOBBER \ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" /* Save restore flags to clear handle leaking NT */ #define switch_to(prev,next,last) \ asm volatile(SAVE_CONTEXT \ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ "call __switch_to\n\t" \ ".globl thread_return\n" \ "thread_return:\n\t" \ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ "movq %P[thread_info](%%rsi),%%r8\n\t" \ LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ "movq %%rax,%%rdi\n\t" \ "jc ret_from_fork\n\t" \ RESTORE_CONTEXT \ : "=a" (last) \ : [next] "S" (next), [prev] "D" (prev), \ [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ [ti_flags] "i" (offsetof(struct thread_info, flags)),\ [tif_fork] "i" (TIF_FORK), \ [thread_info] "i" (offsetof(struct task_struct, thread_info)), \ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ : "memory", "cc" __EXTRA_CLOBBER) extern void load_gs_index(unsigned); /* * Load a segment. Fall back on loading the zero * segment if something goes wrong.. */ #define loadsegment(seg,value) \ asm volatile("\n" \ "1:\t" \ "movl %k0,%%" #seg "\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3:\t" \ "movl %1,%%" #seg "\n\t" \ "jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n\t" \ ".align 8\n\t" \ ".quad 1b,3b\n" \ ".previous" \ : :"r" (value), "r" (0)) #define set_debug(value,register) \ __asm__("movq %0,%%db" #register \ : /* no output */ \ :"r" ((unsigned long) value)) #ifdef __KERNEL__ struct alt_instr { __u8 *instr; /* original instruction */ __u8 *replacement; __u8 cpuid; /* cpuid bit set for replacement */ __u8 instrlen; /* length of original instruction */ __u8 replacementlen; /* length of new instruction, <= instrlen */ __u8 pad[5]; }; #endif /* * Alternative instructions for different CPU types or capabilities. * * This allows to use optimized instructions even on generic binary * kernels. * * length of oldinstr must be longer or equal the length of newinstr * It can be padded with nops as needed. * * For non barrier like inlines please define new variants * without volatile and memory clobber. */ #define alternative(oldinstr, newinstr, feature) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ " .align 8\n" \ " .quad 661b\n" /* label */ \ " .quad 663f\n" /* new instruction */ \ " .byte %c0\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ ".section .altinstr_replacement,\"ax\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" :: "i" (feature) : "memory") /* * Alternative inline assembly with input. * * Peculiarities: * No memory clobber here. * Argument numbers start with 1. * Best is to use constraints that are fixed size (like (%1) ... "r") * If you use variable sized constraints like "m" or "g" in the * replacement make sure to pad to the worst case length. */ #define alternative_input(oldinstr, newinstr, feature, input...) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ " .align 8\n" \ " .quad 661b\n" /* label */ \ " .quad 663f\n" /* new instruction */ \ " .byte %c0\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ ".section .altinstr_replacement,\"ax\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" :: "i" (feature), ##input) /* Like alternative_input, but with a single output argument */ #define alternative_io(oldinstr, newinstr, feature, output, input...) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ " .align 8\n" \ " .quad 661b\n" /* label */ \ " .quad 663f\n" /* new instruction */ \ " .byte %c[feat]\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ ".previous\n" \ ".section .altinstr_replacement,\"ax\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" : output : [feat] "i" (feature), ##input) /* * Clear and set 'TS' bit respectively */ #define clts() __asm__ __volatile__ ("clts") static inline unsigned long __raw_local_save_flags(void) { unsigned long flags; __asm__ __volatile__( "# __raw_save_flags\n\t" "pushfq ; popq %q0" : "=g" (flags) : /* no input */ : "memory" ); return flags; } #define raw_local_save_flags(flags) \ do { (flags) = __raw_local_save_flags(); } while (0) static inline unsigned long read_cr0(void) { unsigned long cr0; asm volatile("movq %%cr0,%0" : "=r" (cr0)); return cr0; } static inline void write_cr0(unsigned long val) { asm volatile("movq %0,%%cr0" :: "r" (val)); } static inline unsigned long read_cr3(void) { unsigned long cr3; asm("movq %%cr3,%0" : "=r" (cr3)); return cr3; } static inline unsigned long read_cr4(void) { unsigned long cr4; asm("movq %%cr4,%0" : "=r" (cr4)); return cr4; } static inline void write_cr4(unsigned long val) { asm volatile("movq %0,%%cr4" :: "r" (val)); } #define stts() write_cr0(8 | read_cr0()) #define wbinvd() \ __asm__ __volatile__ ("wbinvd": : :"memory"); /* * On SMP systems, when the scheduler does migration-cost autodetection, * it needs a way to flush as much of the CPU's caches as possible. */ static inline void sched_cacheflush(void) { wbinvd(); } #endif /* __KERNEL__ */ #define nop() __asm__ __volatile__ ("nop") #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) #define tas(ptr) (xchg((ptr),1)) #define __xg(x) ((volatile long *)(x)) static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) { *ptr = val; } #define _set_64bit set_64bit /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note 2: xchg has side effect, so that attribute volatile is necessary, * but generally the primitive is invalid, *ptr is output argument. --ANK */ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { case 1: __asm__ __volatile__("xchgb %b0,%1" :"=q" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 2: __asm__ __volatile__("xchgw %w0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 4: __asm__ __volatile__("xchgl %k0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; case 8: __asm__ __volatile__("xchgq %0,%1" :"=r" (x) :"m" (*__xg(ptr)), "0" (x) :"memory"); break; } return x; } /* * Atomic compare and exchange. Compare OLD with MEM, if identical, * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ #define __HAVE_ARCH_CMPXCHG 1 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 4: __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; } return old; } #define cmpxchg(ptr,o,n)\ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ (unsigned long)(n),sizeof(*(ptr)))) #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() #define smp_read_barrier_depends() do {} while(0) #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define smp_read_barrier_depends() do {} while(0) #endif /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking * to devices. */ #define mb() asm volatile("mfence":::"memory") #define rmb() asm volatile("lfence":::"memory") #ifdef CONFIG_UNORDERED_IO #define wmb() asm volatile("sfence" ::: "memory") #else #define wmb() asm volatile("" ::: "memory") #endif #define read_barrier_depends() do {} while(0) #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) /* interrupt control.. */ #define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) #define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") #ifdef CONFIG_X86_VSMP /* Interrupt control for VSMP architecture */ #define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0) #define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0) #define irqs_disabled() \ ({ \ unsigned long flags; \ local_save_flags(flags); \ (flags & (1<<18)) || !(flags & (1<<9)); \ }) /* For spinlocks etc */ #define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0) #else /* CONFIG_X86_VSMP */ #define local_irq_disable() __asm__ __volatile__("cli": : :"memory") #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") #define irqs_disabled() \ ({ \ unsigned long flags; \ local_save_flags(flags); \ !(flags & (1<<9)); \ }) /* For spinlocks etc */ #define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) #endif /* used in the idle loop; sti takes one instruction cycle to complete */ #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") /* used when interrupts are already enabled or to shutdown the processor */ #define halt() __asm__ __volatile__("hlt": : :"memory") void cpu_idle_wait(void); extern unsigned long arch_align_stack(unsigned long sp); #define AT_VECTOR_SIZE_ARCH 2 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/tce.h000066400000000000000000000032571314037446600240600ustar00rootroot00000000000000/* * This file is derived from asm-powerpc/tce.h. * * Copyright (C) IBM Corporation, 2006 * * Author: Muli Ben-Yehuda * Author: Jon Mason * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _ASM_X86_64_TCE_H #define _ASM_X86_64_TCE_H extern unsigned int specified_table_size; struct iommu_table; #define TCE_ENTRY_SIZE 8 /* in bytes */ #define TCE_READ_SHIFT 0 #define TCE_WRITE_SHIFT 1 #define TCE_HUBID_SHIFT 2 /* unused */ #define TCE_RSVD_SHIFT 8 /* unused */ #define TCE_RPN_SHIFT 12 #define TCE_UNUSED_SHIFT 48 /* unused */ #define TCE_RPN_MASK 0x0000fffffffff000ULL extern void tce_build(struct iommu_table *tbl, unsigned long index, unsigned int npages, unsigned long uaddr, int direction); extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); extern void* alloc_tce_table(void); extern void free_tce_table(void *tbl); extern int build_tce_table(struct pci_dev *dev, void __iomem *bbar); #endif /* _ASM_X86_64_TCE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/termbits.h000066400000000000000000000073531314037446600251370ustar00rootroot00000000000000#ifndef __ARCH_X8664_TERMBITS_H__ #define __ARCH_X8664_TERMBITS_H__ #include typedef unsigned char cc_t; typedef unsigned int speed_t; typedef unsigned int tcflag_t; #define NCCS 19 struct termios { tcflag_t c_iflag; /* input mode flags */ tcflag_t c_oflag; /* output mode flags */ tcflag_t c_cflag; /* control mode flags */ tcflag_t c_lflag; /* local mode flags */ cc_t c_line; /* line discipline */ cc_t c_cc[NCCS]; /* control characters */ }; /* c_cc characters */ #define VINTR 0 #define VQUIT 1 #define VERASE 2 #define VKILL 3 #define VEOF 4 #define VTIME 5 #define VMIN 6 #define VSWTC 7 #define VSTART 8 #define VSTOP 9 #define VSUSP 10 #define VEOL 11 #define VREPRINT 12 #define VDISCARD 13 #define VWERASE 14 #define VLNEXT 15 #define VEOL2 16 /* c_iflag bits */ #define IGNBRK 0000001 #define BRKINT 0000002 #define IGNPAR 0000004 #define PARMRK 0000010 #define INPCK 0000020 #define ISTRIP 0000040 #define INLCR 0000100 #define IGNCR 0000200 #define ICRNL 0000400 #define IUCLC 0001000 #define IXON 0002000 #define IXANY 0004000 #define IXOFF 0010000 #define IMAXBEL 0020000 #define IUTF8 0040000 /* c_oflag bits */ #define OPOST 0000001 #define OLCUC 0000002 #define ONLCR 0000004 #define OCRNL 0000010 #define ONOCR 0000020 #define ONLRET 0000040 #define OFILL 0000100 #define OFDEL 0000200 #define NLDLY 0000400 #define NL0 0000000 #define NL1 0000400 #define CRDLY 0003000 #define CR0 0000000 #define CR1 0001000 #define CR2 0002000 #define CR3 0003000 #define TABDLY 0014000 #define TAB0 0000000 #define TAB1 0004000 #define TAB2 0010000 #define TAB3 0014000 #define XTABS 0014000 #define BSDLY 0020000 #define BS0 0000000 #define BS1 0020000 #define VTDLY 0040000 #define VT0 0000000 #define VT1 0040000 #define FFDLY 0100000 #define FF0 0000000 #define FF1 0100000 /* c_cflag bit meaning */ #define CBAUD 0010017 #define B0 0000000 /* hang up */ #define B50 0000001 #define B75 0000002 #define B110 0000003 #define B134 0000004 #define B150 0000005 #define B200 0000006 #define B300 0000007 #define B600 0000010 #define B1200 0000011 #define B1800 0000012 #define B2400 0000013 #define B4800 0000014 #define B9600 0000015 #define B19200 0000016 #define B38400 0000017 #define EXTA B19200 #define EXTB B38400 #define CSIZE 0000060 #define CS5 0000000 #define CS6 0000020 #define CS7 0000040 #define CS8 0000060 #define CSTOPB 0000100 #define CREAD 0000200 #define PARENB 0000400 #define PARODD 0001000 #define HUPCL 0002000 #define CLOCAL 0004000 #define CBAUDEX 0010000 #define B57600 0010001 #define B115200 0010002 #define B230400 0010003 #define B460800 0010004 #define B500000 0010005 #define B576000 0010006 #define B921600 0010007 #define B1000000 0010010 #define B1152000 0010011 #define B1500000 0010012 #define B2000000 0010013 #define B2500000 0010014 #define B3000000 0010015 #define B3500000 0010016 #define B4000000 0010017 #define CIBAUD 002003600000 /* input baud rate (not used) */ #define CMSPAR 010000000000 /* mark or space (stick) parity */ #define CRTSCTS 020000000000 /* flow control */ /* c_lflag bits */ #define ISIG 0000001 #define ICANON 0000002 #define XCASE 0000004 #define ECHO 0000010 #define ECHOE 0000020 #define ECHOK 0000040 #define ECHONL 0000100 #define NOFLSH 0000200 #define TOSTOP 0000400 #define ECHOCTL 0001000 #define ECHOPRT 0002000 #define ECHOKE 0004000 #define FLUSHO 0010000 #define PENDIN 0040000 #define IEXTEN 0100000 /* tcflow() and TCXONC use these */ #define TCOOFF 0 #define TCOON 1 #define TCIOFF 2 #define TCION 3 /* tcflush() and TCFLSH use these */ #define TCIFLUSH 0 #define TCOFLUSH 1 #define TCIOFLUSH 2 /* tcsetattr uses these */ #define TCSANOW 0 #define TCSADRAIN 1 #define TCSAFLUSH 2 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/termios.h000066400000000000000000000061241314037446600247630ustar00rootroot00000000000000#ifndef _X8664_TERMIOS_H #define _X8664_TERMIOS_H #include #include struct winsize { unsigned short ws_row; unsigned short ws_col; unsigned short ws_xpixel; unsigned short ws_ypixel; }; #define NCC 8 struct termio { unsigned short c_iflag; /* input mode flags */ unsigned short c_oflag; /* output mode flags */ unsigned short c_cflag; /* control mode flags */ unsigned short c_lflag; /* local mode flags */ unsigned char c_line; /* line discipline */ unsigned char c_cc[NCC]; /* control characters */ }; /* modem lines */ #define TIOCM_LE 0x001 #define TIOCM_DTR 0x002 #define TIOCM_RTS 0x004 #define TIOCM_ST 0x008 #define TIOCM_SR 0x010 #define TIOCM_CTS 0x020 #define TIOCM_CAR 0x040 #define TIOCM_RNG 0x080 #define TIOCM_DSR 0x100 #define TIOCM_CD TIOCM_CAR #define TIOCM_RI TIOCM_RNG #define TIOCM_OUT1 0x2000 #define TIOCM_OUT2 0x4000 #define TIOCM_LOOP 0x8000 /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ /* line disciplines */ #define N_TTY 0 #define N_SLIP 1 #define N_MOUSE 2 #define N_PPP 3 #define N_STRIP 4 #define N_AX25 5 #define N_X25 6 /* X.25 async */ #define N_6PACK 7 #define N_MASC 8 /* Reserved for Mobitex module */ #define N_R3964 9 /* Reserved for Simatic R3964 module */ #define N_PROFIBUS_FDL 10 /* Reserved for Profibus */ #define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */ #define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */ #define N_HDLC 13 /* synchronous HDLC */ #define N_SYNC_PPP 14 /* synchronous PPP */ #define N_HCI 15 /* Bluetooth HCI UART */ #ifdef __KERNEL__ /* intr=^C quit=^\ erase=del kill=^U eof=^D vtime=\0 vmin=\1 sxtc=\0 start=^Q stop=^S susp=^Z eol=\0 reprint=^R discard=^U werase=^W lnext=^V eol2=\0 */ #define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" /* * Translate a "termio" structure into a "termios". Ugh. */ #define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ unsigned short __tmp; \ get_user(__tmp,&(termio)->x); \ *(unsigned short *) &(termios)->x = __tmp; \ } #define user_termio_to_kernel_termios(termios, termio) \ ({ \ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ }) /* * Translate a "termios" structure into a "termio". Ugh. */ #define kernel_termios_to_user_termio(termio, termios) \ ({ \ put_user((termios)->c_iflag, &(termio)->c_iflag); \ put_user((termios)->c_oflag, &(termio)->c_oflag); \ put_user((termios)->c_cflag, &(termio)->c_cflag); \ put_user((termios)->c_lflag, &(termio)->c_lflag); \ put_user((termios)->c_line, &(termio)->c_line); \ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ }) #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) #endif /* __KERNEL__ */ #endif /* _X8664_TERMIOS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/thread_info.h000066400000000000000000000105421314037446600255620ustar00rootroot00000000000000/* thread_info.h: x86_64 low-level thread information * * Copyright (C) 2002 David Howells (dhowells@redhat.com) * - Incorporating suggestions made by Linus Torvalds and Dave Miller */ #ifndef _ASM_THREAD_INFO_H #define _ASM_THREAD_INFO_H #ifdef __KERNEL__ #include #include #include /* * low level task data that entry.S needs immediate access to * - this struct should fit entirely inside of one cache line * - this struct shares the supervisor stack pages */ #ifndef __ASSEMBLY__ struct task_struct; struct exec_domain; #include struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ __u32 flags; /* low level flags */ __u32 status; /* thread synchronous flags */ __u32 cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; struct restart_block restart_block; }; #endif /* * macros/functions for gaining access to the thread information structure * preempt_count needs to be 1 initially, until the scheduler is functional. */ #ifndef __ASSEMBLY__ #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ .exec_domain = &default_exec_domain, \ .flags = 0, \ .cpu = 0, \ .preempt_count = 1, \ .addr_limit = KERNEL_DS, \ .restart_block = { \ .fn = do_no_restart_syscall, \ }, \ } #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) static inline struct thread_info *current_thread_info(void) { struct thread_info *ti; ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE); return ti; } /* do not use in interrupt context */ static inline struct thread_info *stack_thread_info(void) { struct thread_info *ti; __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1))); return ti; } /* thread information allocation */ #define alloc_thread_info(tsk) \ ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)) #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) #else /* !__ASSEMBLY__ */ /* how to get the thread information struct from ASM */ #define GET_THREAD_INFO(reg) \ movq %gs:pda_kernelstack,reg ; \ subq $(THREAD_SIZE-PDA_STACKOFFSET),reg #endif /* * thread information flags * - these are process state flags that various assembly files may need to access * - pending work-to-be-done flags are in LSW * - other flags in MSW * Warning: layout of LSW is hardcoded in entry.S */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ #define TIF_IRET 5 /* force IRET */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_IA32 17 /* 32bit process */ #define TIF_FORK 18 /* ret_from_fork */ #define TIF_ABI_PENDING 19 #define TIF_MEMDIE 20 #define _TIF_SYSCALL_TRACE (1< #include #include #include #include #include #include #define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */ typedef unsigned long long cycles_t; static inline cycles_t get_cycles (void) { unsigned long long ret; rdtscll(ret); return ret; } /* * Stop RDTSC speculation. This is needed when you need to use RDTSC * (or get_cycles or vread that possibly accesses the TSC) in a defined * code region. * * (Could use an alternative three way for this if there was one.) */ static inline void rdtsc_barrier(void) { alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); } /* Like get_cycles, but make sure the CPU is synchronized. */ static __always_inline cycles_t get_cycles_sync(void) { unsigned long long ret; rdtsc_barrier(); rdtscll(ret); rdtsc_barrier(); return ret; } extern unsigned int cpu_khz; extern int read_current_timer(unsigned long *timer_value); #define ARCH_HAS_READ_CURRENT_TIMER 1 extern struct vxtime_data vxtime; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/tlb.h000066400000000000000000000004321314037446600240560ustar00rootroot00000000000000#ifndef TLB_H #define TLB_H 1 #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/tlbflush.h000066400000000000000000000066531314037446600251330ustar00rootroot00000000000000#ifndef _X8664_TLBFLUSH_H #define _X8664_TLBFLUSH_H #include #include #include #define __flush_tlb() \ do { \ unsigned long tmpreg; \ \ __asm__ __volatile__( \ "movq %%cr3, %0; # flush TLB \n" \ "movq %0, %%cr3; \n" \ : "=r" (tmpreg) \ :: "memory"); \ } while (0) /* * Global pages have to be flushed a bit differently. Not a real * performance problem because this does not happen often. */ #define __flush_tlb_global() \ do { \ unsigned long tmpreg, cr4, cr4_orig; \ \ __asm__ __volatile__( \ "movq %%cr4, %2; # turn off PGE \n" \ "movq %2, %1; \n" \ "andq %3, %1; \n" \ "movq %1, %%cr4; \n" \ "movq %%cr3, %0; # flush TLB \n" \ "movq %0, %%cr3; \n" \ "movq %2, %%cr4; # turn PGE back on \n" \ : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \ : "i" (~X86_CR4_PGE) \ : "memory"); \ } while (0) extern unsigned long pgkern_mask; #define __flush_tlb_all() __flush_tlb_global() #define __flush_tlb_one(addr) \ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) /* * TLB flushing: * * - flush_tlb() flushes the current mm struct TLBs * - flush_tlb_all() flushes all processes TLBs * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * * x86-64 can only flush individual pages or full VMs. For a range flush * we always do the full VM. Might be worth trying if for a small * range a few INVLPGs in a row are a win. */ #ifndef CONFIG_SMP #define flush_tlb() __flush_tlb() #define flush_tlb_all() __flush_tlb_all() #define local_flush_tlb() __flush_tlb() static inline void flush_tlb_mm(struct mm_struct *mm) { if (mm == current->active_mm) __flush_tlb(); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { if (vma->vm_mm == current->active_mm) __flush_tlb_one(addr); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (vma->vm_mm == current->active_mm) __flush_tlb(); } #else #include #define local_flush_tlb() \ __flush_tlb() extern void flush_tlb_all(void); extern void flush_tlb_current_task(void); extern void flush_tlb_mm(struct mm_struct *); extern void flush_tlb_page(struct vm_area_struct *, unsigned long); #define flush_tlb() flush_tlb_current_task() static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) { flush_tlb_mm(vma->vm_mm); } #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 /* Roughly an IPI every 20MB with 4k pages for freeing page table ranges. Cost is about 42k of memory for each CPU. */ #define ARCH_FREE_PTE_NR 5350 #endif #define flush_tlb_kernel_range(start, end) flush_tlb_all() static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { /* x86_64 does not keep any page table caches in a software TLB. The CPUs do in their hardware TLBs, but they are handled by the normal TLB flushing algorithms. */ } #endif /* _X8664_TLBFLUSH_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/topology.h000066400000000000000000000036411314037446600251560ustar00rootroot00000000000000#ifndef _ASM_X86_64_TOPOLOGY_H #define _ASM_X86_64_TOPOLOGY_H #include #ifdef CONFIG_NUMA #include #include /* Map the K8 CPU local memory controllers to a simple 1:1 CPU:NODE topology */ extern cpumask_t cpu_online_map; extern unsigned char cpu_to_node[]; extern cpumask_t node_to_cpumask[]; #ifdef CONFIG_ACPI_NUMA extern int __node_distance(int, int); #define node_distance(a,b) __node_distance(a,b) /* #else fallback version */ #endif #define cpu_to_node(cpu) (cpu_to_node[cpu]) #define parent_node(node) (node) #define node_to_first_cpu(node) (first_cpu(node_to_cpumask[node])) #define node_to_cpumask(node) (node_to_cpumask[node]) #define pcibus_to_node(bus) ((long)(bus->sysdata)) #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); #define numa_node_id() read_pda(nodenumber) /* sched_domains SD_NODE_INIT for x86_64 machines */ #define SD_NODE_INIT (struct sched_domain) { \ .span = CPU_MASK_NONE, \ .parent = NULL, \ .groups = NULL, \ .min_interval = 8, \ .max_interval = 32, \ .busy_factor = 32, \ .imbalance_pct = 125, \ .cache_nice_tries = 2, \ .busy_idx = 3, \ .idle_idx = 2, \ .newidle_idx = 0, \ .wake_idx = 1, \ .forkexec_idx = 1, \ .per_cpu_gain = 100, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_FORK \ | SD_BALANCE_EXEC \ | SD_WAKE_BALANCE, \ .last_balance = jiffies, \ .balance_interval = 1, \ .nr_balance_failed = 0, \ } #endif #ifdef CONFIG_SMP #define topology_physical_package_id(cpu) \ (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) #define topology_core_id(cpu) \ (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu]) #define topology_core_siblings(cpu) (cpu_core_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) #endif #include extern cpumask_t cpu_coregroup_map(int cpu); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/types.h000066400000000000000000000020001314037446600244320ustar00rootroot00000000000000#ifndef _X86_64_TYPES_H #define _X86_64_TYPES_H #ifndef __ASSEMBLY__ typedef unsigned short umode_t; /* * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the * header files exported to user space */ typedef __signed__ char __s8; typedef unsigned char __u8; typedef __signed__ short __s16; typedef unsigned short __u16; typedef __signed__ int __s32; typedef unsigned int __u32; typedef __signed__ long long __s64; typedef unsigned long long __u64; #endif /* __ASSEMBLY__ */ /* * These aren't exported outside the kernel to avoid name space clashes */ #ifdef __KERNEL__ #define BITS_PER_LONG 64 #ifndef __ASSEMBLY__ typedef signed char s8; typedef unsigned char u8; typedef signed short s16; typedef unsigned short u16; typedef signed int s32; typedef unsigned int u32; typedef signed long long s64; typedef unsigned long long u64; typedef u64 dma64_addr_t; typedef u64 dma_addr_t; typedef u64 sector_t; #define HAVE_SECTOR_T #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/uaccess.h000066400000000000000000000263741314037446600247400ustar00rootroot00000000000000#ifndef __X86_64_UACCESS_H #define __X86_64_UACCESS_H /* * User space memory access functions */ #include #include #include #include #include #include #define VERIFY_READ 0 #define VERIFY_WRITE 1 /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with * get_fs() == KERNEL_DS, checking is bypassed. * * For historical reasons, these macros are grossly misnamed. */ #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL) #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #define segment_eq(a,b) ((a).seg == (b).seg) #define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg))) /* * Uhhuh, this needs 65-bit arithmetic. We have a carry.. */ #define __range_not_ok(addr,size) ({ \ unsigned long flag,sum; \ __chk_user_ptr(addr); \ asm("# range_ok\n\r" \ "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ :"=&r" (flag), "=r" (sum) \ :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \ flag; }) #define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0) /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is * the address at which the program should continue. No registers are * modified, so it is entirely up to the continuation code to figure out * what to do. * * All the routines below use bits of fixup code that are out of line * with the main instruction path. This means when everything is well, * we don't even have to jump over them. Further, they do not intrude * on our cache or tlb entries. */ struct exception_table_entry { unsigned long insn, fixup; }; #define ARCH_HAS_SEARCH_EXTABLE /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. * * This gets kind of ugly. We want to return _two_ values in "get_user()" * and yet we don't want to do any pointers, because that is too much * of a performance impact. Thus we have a few rather ugly macros here, * and hide all the ugliness from the user. * * The "__xxx" versions of the user access functions are versions that * do not verify the address space, that must have been done previously * with a separate "access_ok()" call (this is used when we do multiple * accesses to the same area of user memory). */ #define __get_user_x(size,ret,x,ptr) \ __asm__ __volatile__("call __get_user_" #size \ :"=a" (ret),"=d" (x) \ :"c" (ptr) \ :"r8") /* Careful: we have to cast the result to the type of the pointer for sign reasons */ #define get_user(x,ptr) \ ({ unsigned long __val_gu; \ int __ret_gu; \ __chk_user_ptr(ptr); \ switch(sizeof (*(ptr))) { \ case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \ default: __get_user_bad(); break; \ } \ (x) = (__typeof__(*(ptr)))__val_gu; \ __ret_gu; \ }) extern void __put_user_1(void); extern void __put_user_2(void); extern void __put_user_4(void); extern void __put_user_8(void); extern void __put_user_bad(void); #define __put_user_x(size,ret,x,ptr) \ __asm__ __volatile__("call __put_user_" #size \ :"=a" (ret) \ :"c" (ptr),"d" (x) \ :"r8") #define put_user(x,ptr) \ __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) #define __get_user(x,ptr) \ __get_user_nocheck((x),(ptr),sizeof(*(ptr))) #define __put_user(x,ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) #define __get_user_unaligned __get_user #define __put_user_unaligned __put_user #define __put_user_nocheck(x,ptr,size) \ ({ \ int __pu_err; \ __put_user_size((x),(ptr),(size),__pu_err); \ __pu_err; \ }) #define __put_user_check(x,ptr,size) \ ({ \ int __pu_err; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ switch (size) { \ case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \ case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \ case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \ case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \ default: __put_user_bad(); \ } \ __pu_err; \ }) #define __put_user_size(x,ptr,size,retval) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\ case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\ case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\ case 8: __put_user_asm(x,ptr,retval,"q","","ir",-EFAULT); break;\ default: __put_user_bad(); \ } \ } while (0) /* FIXME: this hack is definitely wrong -AK */ struct __large_struct { unsigned long buf[100]; }; #define __m(x) (*(struct __large_struct __user *)(x)) /* * Tell gcc we read from memory instead of writing: this is because * we do not write to any memory gcc knows about, so there are no * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \ __asm__ __volatile__( \ "1: mov"itype" %"rtype"1,%2\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: mov %3,%0\n" \ " jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 8\n" \ " .quad 1b,3b\n" \ ".previous" \ : "=r"(err) \ : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err)) #define __get_user_nocheck(x,ptr,size) \ ({ \ int __gu_err; \ unsigned long __gu_val; \ __get_user_size(__gu_val,(ptr),(size),__gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) extern int __get_user_1(void); extern int __get_user_2(void); extern int __get_user_4(void); extern int __get_user_8(void); extern int __get_user_bad(void); #define __get_user_size(x,ptr,size,retval) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\ case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\ case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\ case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\ default: (x) = __get_user_bad(); \ } \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \ __asm__ __volatile__( \ "1: mov"itype" %2,%"rtype"1\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: mov %3,%0\n" \ " xor"itype" %"rtype"1,%"rtype"1\n" \ " jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 8\n" \ " .quad 1b,3b\n" \ ".previous" \ : "=r"(err), ltype (x) \ : "m"(__m(addr)), "i"(errno), "0"(err)) /* * Copy To/From Userspace */ /* Handles exceptions in both to and from, but doesn't do access_ok */ extern unsigned long copy_user_generic(void *to, const void *from, unsigned len); extern unsigned long copy_to_user(void __user *to, const void *from, unsigned len); extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len); extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); static __always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size) { int ret = 0; if (!__builtin_constant_p(size)) return copy_user_generic(dst,(__force void *)src,size); switch (size) { case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1); return ret; case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2); return ret; case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4); return ret; case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8); return ret; case 10: __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); if (unlikely(ret)) return ret; __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2); return ret; case 16: __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); if (unlikely(ret)) return ret; __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8); return ret; default: return copy_user_generic(dst,(__force void *)src,size); } } static __always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size) { int ret = 0; if (!__builtin_constant_p(size)) return copy_user_generic((__force void *)dst,src,size); switch (size) { case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1); return ret; case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2); return ret; case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4); return ret; case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8); return ret; case 10: __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10); if (unlikely(ret)) return ret; asm("":::"memory"); __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2); return ret; case 16: __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16); if (unlikely(ret)) return ret; asm("":::"memory"); __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8); return ret; default: return copy_user_generic((__force void *)dst,src,size); } } static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size) { int ret = 0; if (!__builtin_constant_p(size)) return copy_user_generic((__force void *)dst,(__force void *)src,size); switch (size) { case 1: { u8 tmp; __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1); if (likely(!ret)) __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1); return ret; } case 2: { u16 tmp; __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2); if (likely(!ret)) __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2); return ret; } case 4: { u32 tmp; __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4); if (likely(!ret)) __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4); return ret; } case 8: { u64 tmp; __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8); if (likely(!ret)) __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8); return ret; } default: return copy_user_generic((__force void *)dst,(__force void *)src,size); } } long strncpy_from_user(char *dst, const char __user *src, long count); long __strncpy_from_user(char *dst, const char __user *src, long count); long strnlen_user(const char __user *str, long n); long __strnlen_user(const char __user *str, long n); long strlen_user(const char __user *str); unsigned long clear_user(void __user *mem, unsigned long len); unsigned long __clear_user(void __user *mem, unsigned long len); #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user #endif /* __X86_64_UACCESS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/ucontext.h000066400000000000000000000003771314037446600251560ustar00rootroot00000000000000#ifndef _ASMX8664_UCONTEXT_H #define _ASMX8664_UCONTEXT_H struct ucontext { unsigned long uc_flags; struct ucontext *uc_link; stack_t uc_stack; struct sigcontext uc_mcontext; sigset_t uc_sigmask; /* mask last for extensibility */ }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/unaligned.h000066400000000000000000000022531314037446600252460ustar00rootroot00000000000000#ifndef __X8664_UNALIGNED_H #define __X8664_UNALIGNED_H /* * The x86-64 can do unaligned accesses itself. * * The strange macros are there to make sure these can't * be misused in a way that makes them not work on other * architectures where unaligned accesses aren't as simple. */ /** * get_unaligned - get value from possibly mis-aligned location * @ptr: pointer to value * * This macro should be used for accessing values larger in size than * single bytes at locations that are expected to be improperly aligned, * e.g. retrieving a u16 value from a location not u16-aligned. * * Note that unaligned accesses can be very expensive on some architectures. */ #define get_unaligned(ptr) (*(ptr)) /** * put_unaligned - put value to a possibly mis-aligned location * @val: value to place * @ptr: pointer to location * * This macro should be used for placing values larger in size than * single bytes at locations that are expected to be improperly aligned, * e.g. writing a u16 value to a location not u16-aligned. * * Note that unaligned accesses can be very expensive on some architectures. */ #define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/unistd.h000066400000000000000000000722741314037446600246200ustar00rootroot00000000000000#ifndef _ASM_X86_64_UNISTD_H_ #define _ASM_X86_64_UNISTD_H_ #ifndef __SYSCALL #define __SYSCALL(a,b) #endif /* * This file contains the system call numbers. * * Note: holes are not allowed. */ /* at least 8 syscall per cacheline */ #define __NR_read 0 __SYSCALL(__NR_read, sys_read) #define __NR_write 1 __SYSCALL(__NR_write, sys_write) #define __NR_open 2 __SYSCALL(__NR_open, sys_open) #define __NR_close 3 __SYSCALL(__NR_close, sys_close) #define __NR_stat 4 __SYSCALL(__NR_stat, sys_newstat) #define __NR_fstat 5 __SYSCALL(__NR_fstat, sys_newfstat) #define __NR_lstat 6 __SYSCALL(__NR_lstat, sys_newlstat) #define __NR_poll 7 __SYSCALL(__NR_poll, sys_poll) #define __NR_lseek 8 __SYSCALL(__NR_lseek, sys_lseek) #define __NR_mmap 9 __SYSCALL(__NR_mmap, sys_mmap) #define __NR_mprotect 10 __SYSCALL(__NR_mprotect, sys_mprotect) #define __NR_munmap 11 __SYSCALL(__NR_munmap, sys_munmap) #define __NR_brk 12 __SYSCALL(__NR_brk, sys_brk) #define __NR_rt_sigaction 13 __SYSCALL(__NR_rt_sigaction, sys_rt_sigaction) #define __NR_rt_sigprocmask 14 __SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask) #define __NR_rt_sigreturn 15 __SYSCALL(__NR_rt_sigreturn, stub_rt_sigreturn) #define __NR_ioctl 16 __SYSCALL(__NR_ioctl, sys_ioctl) #define __NR_pread64 17 __SYSCALL(__NR_pread64, sys_pread64) #define __NR_pwrite64 18 __SYSCALL(__NR_pwrite64, sys_pwrite64) #define __NR_readv 19 __SYSCALL(__NR_readv, sys_readv) #define __NR_writev 20 __SYSCALL(__NR_writev, sys_writev) #define __NR_access 21 __SYSCALL(__NR_access, sys_access) #define __NR_pipe 22 __SYSCALL(__NR_pipe, sys_pipe) #define __NR_select 23 __SYSCALL(__NR_select, sys_select) #define __NR_sched_yield 24 __SYSCALL(__NR_sched_yield, sys_sched_yield) #define __NR_mremap 25 __SYSCALL(__NR_mremap, sys_mremap) #define __NR_msync 26 __SYSCALL(__NR_msync, sys_msync) #define __NR_mincore 27 __SYSCALL(__NR_mincore, sys_mincore) #define __NR_madvise 28 __SYSCALL(__NR_madvise, sys_madvise) #define __NR_shmget 29 __SYSCALL(__NR_shmget, sys_shmget) #define __NR_shmat 30 __SYSCALL(__NR_shmat, sys_shmat) #define __NR_shmctl 31 __SYSCALL(__NR_shmctl, sys_shmctl) #define __NR_dup 32 __SYSCALL(__NR_dup, sys_dup) #define __NR_dup2 33 __SYSCALL(__NR_dup2, sys_dup2) #define __NR_pause 34 __SYSCALL(__NR_pause, sys_pause) #define __NR_nanosleep 35 __SYSCALL(__NR_nanosleep, sys_nanosleep) #define __NR_getitimer 36 __SYSCALL(__NR_getitimer, sys_getitimer) #define __NR_alarm 37 __SYSCALL(__NR_alarm, sys_alarm) #define __NR_setitimer 38 __SYSCALL(__NR_setitimer, sys_setitimer) #define __NR_getpid 39 __SYSCALL(__NR_getpid, sys_getpid) #define __NR_sendfile 40 __SYSCALL(__NR_sendfile, sys_sendfile64) #define __NR_socket 41 __SYSCALL(__NR_socket, sys_socket) #define __NR_connect 42 __SYSCALL(__NR_connect, sys_connect) #define __NR_accept 43 __SYSCALL(__NR_accept, sys_accept) #define __NR_sendto 44 __SYSCALL(__NR_sendto, sys_sendto) #define __NR_recvfrom 45 __SYSCALL(__NR_recvfrom, sys_recvfrom) #define __NR_sendmsg 46 __SYSCALL(__NR_sendmsg, sys_sendmsg) #define __NR_recvmsg 47 __SYSCALL(__NR_recvmsg, sys_recvmsg) #define __NR_shutdown 48 __SYSCALL(__NR_shutdown, sys_shutdown) #define __NR_bind 49 __SYSCALL(__NR_bind, sys_bind) #define __NR_listen 50 __SYSCALL(__NR_listen, sys_listen) #define __NR_getsockname 51 __SYSCALL(__NR_getsockname, sys_getsockname) #define __NR_getpeername 52 __SYSCALL(__NR_getpeername, sys_getpeername) #define __NR_socketpair 53 __SYSCALL(__NR_socketpair, sys_socketpair) #define __NR_setsockopt 54 __SYSCALL(__NR_setsockopt, sys_setsockopt) #define __NR_getsockopt 55 __SYSCALL(__NR_getsockopt, sys_getsockopt) #define __NR_clone 56 __SYSCALL(__NR_clone, stub_clone) #define __NR_fork 57 __SYSCALL(__NR_fork, stub_fork) #define __NR_vfork 58 __SYSCALL(__NR_vfork, stub_vfork) #define __NR_execve 59 __SYSCALL(__NR_execve, stub_execve) #define __NR_exit 60 __SYSCALL(__NR_exit, sys_exit) #define __NR_wait4 61 __SYSCALL(__NR_wait4, sys_wait4) #define __NR_kill 62 __SYSCALL(__NR_kill, sys_kill) #define __NR_uname 63 __SYSCALL(__NR_uname, sys_uname) #define __NR_semget 64 __SYSCALL(__NR_semget, sys_semget) #define __NR_semop 65 __SYSCALL(__NR_semop, sys_semop) #define __NR_semctl 66 __SYSCALL(__NR_semctl, sys_semctl) #define __NR_shmdt 67 __SYSCALL(__NR_shmdt, sys_shmdt) #define __NR_msgget 68 __SYSCALL(__NR_msgget, sys_msgget) #define __NR_msgsnd 69 __SYSCALL(__NR_msgsnd, sys_msgsnd) #define __NR_msgrcv 70 __SYSCALL(__NR_msgrcv, sys_msgrcv) #define __NR_msgctl 71 __SYSCALL(__NR_msgctl, sys_msgctl) #define __NR_fcntl 72 __SYSCALL(__NR_fcntl, sys_fcntl) #define __NR_flock 73 __SYSCALL(__NR_flock, sys_flock) #define __NR_fsync 74 __SYSCALL(__NR_fsync, sys_fsync) #define __NR_fdatasync 75 __SYSCALL(__NR_fdatasync, sys_fdatasync) #define __NR_truncate 76 __SYSCALL(__NR_truncate, sys_truncate) #define __NR_ftruncate 77 __SYSCALL(__NR_ftruncate, sys_ftruncate) #define __NR_getdents 78 __SYSCALL(__NR_getdents, sys_getdents) #define __NR_getcwd 79 __SYSCALL(__NR_getcwd, sys_getcwd) #define __NR_chdir 80 __SYSCALL(__NR_chdir, sys_chdir) #define __NR_fchdir 81 __SYSCALL(__NR_fchdir, sys_fchdir) #define __NR_rename 82 __SYSCALL(__NR_rename, sys_rename) #define __NR_mkdir 83 __SYSCALL(__NR_mkdir, sys_mkdir) #define __NR_rmdir 84 __SYSCALL(__NR_rmdir, sys_rmdir) #define __NR_creat 85 __SYSCALL(__NR_creat, sys_creat) #define __NR_link 86 __SYSCALL(__NR_link, sys_link) #define __NR_unlink 87 __SYSCALL(__NR_unlink, sys_unlink) #define __NR_symlink 88 __SYSCALL(__NR_symlink, sys_symlink) #define __NR_readlink 89 __SYSCALL(__NR_readlink, sys_readlink) #define __NR_chmod 90 __SYSCALL(__NR_chmod, sys_chmod) #define __NR_fchmod 91 __SYSCALL(__NR_fchmod, sys_fchmod) #define __NR_chown 92 __SYSCALL(__NR_chown, sys_chown) #define __NR_fchown 93 __SYSCALL(__NR_fchown, sys_fchown) #define __NR_lchown 94 __SYSCALL(__NR_lchown, sys_lchown) #define __NR_umask 95 __SYSCALL(__NR_umask, sys_umask) #define __NR_gettimeofday 96 __SYSCALL(__NR_gettimeofday, sys_gettimeofday) #define __NR_getrlimit 97 __SYSCALL(__NR_getrlimit, sys_getrlimit) #define __NR_getrusage 98 __SYSCALL(__NR_getrusage, sys_getrusage) #define __NR_sysinfo 99 __SYSCALL(__NR_sysinfo, sys_sysinfo) #define __NR_times 100 __SYSCALL(__NR_times, sys_times) #define __NR_ptrace 101 __SYSCALL(__NR_ptrace, sys_ptrace) #define __NR_getuid 102 __SYSCALL(__NR_getuid, sys_getuid) #define __NR_syslog 103 __SYSCALL(__NR_syslog, sys_syslog) /* at the very end the stuff that never runs during the benchmarks */ #define __NR_getgid 104 __SYSCALL(__NR_getgid, sys_getgid) #define __NR_setuid 105 __SYSCALL(__NR_setuid, sys_setuid) #define __NR_setgid 106 __SYSCALL(__NR_setgid, sys_setgid) #define __NR_geteuid 107 __SYSCALL(__NR_geteuid, sys_geteuid) #define __NR_getegid 108 __SYSCALL(__NR_getegid, sys_getegid) #define __NR_setpgid 109 __SYSCALL(__NR_setpgid, sys_setpgid) #define __NR_getppid 110 __SYSCALL(__NR_getppid, sys_getppid) #define __NR_getpgrp 111 __SYSCALL(__NR_getpgrp, sys_getpgrp) #define __NR_setsid 112 __SYSCALL(__NR_setsid, sys_setsid) #define __NR_setreuid 113 __SYSCALL(__NR_setreuid, sys_setreuid) #define __NR_setregid 114 __SYSCALL(__NR_setregid, sys_setregid) #define __NR_getgroups 115 __SYSCALL(__NR_getgroups, sys_getgroups) #define __NR_setgroups 116 __SYSCALL(__NR_setgroups, sys_setgroups) #define __NR_setresuid 117 __SYSCALL(__NR_setresuid, sys_setresuid) #define __NR_getresuid 118 __SYSCALL(__NR_getresuid, sys_getresuid) #define __NR_setresgid 119 __SYSCALL(__NR_setresgid, sys_setresgid) #define __NR_getresgid 120 __SYSCALL(__NR_getresgid, sys_getresgid) #define __NR_getpgid 121 __SYSCALL(__NR_getpgid, sys_getpgid) #define __NR_setfsuid 122 __SYSCALL(__NR_setfsuid, sys_setfsuid) #define __NR_setfsgid 123 __SYSCALL(__NR_setfsgid, sys_setfsgid) #define __NR_getsid 124 __SYSCALL(__NR_getsid, sys_getsid) #define __NR_capget 125 __SYSCALL(__NR_capget, sys_capget) #define __NR_capset 126 __SYSCALL(__NR_capset, sys_capset) #define __NR_rt_sigpending 127 __SYSCALL(__NR_rt_sigpending, sys_rt_sigpending) #define __NR_rt_sigtimedwait 128 __SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait) #define __NR_rt_sigqueueinfo 129 __SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo) #define __NR_rt_sigsuspend 130 __SYSCALL(__NR_rt_sigsuspend, stub_rt_sigsuspend) #define __NR_sigaltstack 131 __SYSCALL(__NR_sigaltstack, stub_sigaltstack) #define __NR_utime 132 __SYSCALL(__NR_utime, sys_utime) #define __NR_mknod 133 __SYSCALL(__NR_mknod, sys_mknod) /* Only needed for a.out */ #define __NR_uselib 134 __SYSCALL(__NR_uselib, sys_ni_syscall) #define __NR_personality 135 __SYSCALL(__NR_personality, sys_personality) #define __NR_ustat 136 __SYSCALL(__NR_ustat, sys_ustat) #define __NR_statfs 137 __SYSCALL(__NR_statfs, sys_statfs) #define __NR_fstatfs 138 __SYSCALL(__NR_fstatfs, sys_fstatfs) #define __NR_sysfs 139 __SYSCALL(__NR_sysfs, sys_sysfs) #define __NR_getpriority 140 __SYSCALL(__NR_getpriority, sys_getpriority) #define __NR_setpriority 141 __SYSCALL(__NR_setpriority, sys_setpriority) #define __NR_sched_setparam 142 __SYSCALL(__NR_sched_setparam, sys_sched_setparam) #define __NR_sched_getparam 143 __SYSCALL(__NR_sched_getparam, sys_sched_getparam) #define __NR_sched_setscheduler 144 __SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler) #define __NR_sched_getscheduler 145 __SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler) #define __NR_sched_get_priority_max 146 __SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max) #define __NR_sched_get_priority_min 147 __SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min) #define __NR_sched_rr_get_interval 148 __SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval) #define __NR_mlock 149 __SYSCALL(__NR_mlock, sys_mlock) #define __NR_munlock 150 __SYSCALL(__NR_munlock, sys_munlock) #define __NR_mlockall 151 __SYSCALL(__NR_mlockall, sys_mlockall) #define __NR_munlockall 152 __SYSCALL(__NR_munlockall, sys_munlockall) #define __NR_vhangup 153 __SYSCALL(__NR_vhangup, sys_vhangup) #define __NR_modify_ldt 154 __SYSCALL(__NR_modify_ldt, sys_modify_ldt) #define __NR_pivot_root 155 __SYSCALL(__NR_pivot_root, sys_pivot_root) #define __NR__sysctl 156 __SYSCALL(__NR__sysctl, sys_sysctl) #define __NR_prctl 157 __SYSCALL(__NR_prctl, sys_prctl) #define __NR_arch_prctl 158 __SYSCALL(__NR_arch_prctl, sys_arch_prctl) #define __NR_adjtimex 159 __SYSCALL(__NR_adjtimex, sys_adjtimex) #define __NR_setrlimit 160 __SYSCALL(__NR_setrlimit, sys_setrlimit) #define __NR_chroot 161 __SYSCALL(__NR_chroot, sys_chroot) #define __NR_sync 162 __SYSCALL(__NR_sync, sys_sync) #define __NR_acct 163 __SYSCALL(__NR_acct, sys_acct) #define __NR_settimeofday 164 __SYSCALL(__NR_settimeofday, sys_settimeofday) #define __NR_mount 165 __SYSCALL(__NR_mount, sys_mount) #define __NR_umount2 166 __SYSCALL(__NR_umount2, sys_umount) #define __NR_swapon 167 __SYSCALL(__NR_swapon, sys_swapon) #define __NR_swapoff 168 __SYSCALL(__NR_swapoff, sys_swapoff) #define __NR_reboot 169 __SYSCALL(__NR_reboot, sys_reboot) #define __NR_sethostname 170 __SYSCALL(__NR_sethostname, sys_sethostname) #define __NR_setdomainname 171 __SYSCALL(__NR_setdomainname, sys_setdomainname) #define __NR_iopl 172 __SYSCALL(__NR_iopl, stub_iopl) #define __NR_ioperm 173 __SYSCALL(__NR_ioperm, sys_ioperm) #define __NR_create_module 174 __SYSCALL(__NR_create_module, sys_ni_syscall) #define __NR_init_module 175 __SYSCALL(__NR_init_module, sys_init_module) #define __NR_delete_module 176 __SYSCALL(__NR_delete_module, sys_delete_module) #define __NR_get_kernel_syms 177 __SYSCALL(__NR_get_kernel_syms, sys_ni_syscall) #define __NR_query_module 178 __SYSCALL(__NR_query_module, sys_ni_syscall) #define __NR_quotactl 179 __SYSCALL(__NR_quotactl, sys_quotactl) #define __NR_nfsservctl 180 __SYSCALL(__NR_nfsservctl, sys_nfsservctl) #define __NR_getpmsg 181 /* reserved for LiS/STREAMS */ __SYSCALL(__NR_getpmsg, sys_ni_syscall) #define __NR_putpmsg 182 /* reserved for LiS/STREAMS */ __SYSCALL(__NR_putpmsg, sys_ni_syscall) #define __NR_afs_syscall 183 /* reserved for AFS */ __SYSCALL(__NR_afs_syscall, sys_ni_syscall) #define __NR_tuxcall 184 /* reserved for tux */ __SYSCALL(__NR_tuxcall, sys_ni_syscall) #define __NR_security 185 __SYSCALL(__NR_security, sys_ni_syscall) #define __NR_gettid 186 __SYSCALL(__NR_gettid, sys_gettid) #define __NR_readahead 187 __SYSCALL(__NR_readahead, sys_readahead) #define __NR_setxattr 188 __SYSCALL(__NR_setxattr, sys_setxattr) #define __NR_lsetxattr 189 __SYSCALL(__NR_lsetxattr, sys_lsetxattr) #define __NR_fsetxattr 190 __SYSCALL(__NR_fsetxattr, sys_fsetxattr) #define __NR_getxattr 191 __SYSCALL(__NR_getxattr, sys_getxattr) #define __NR_lgetxattr 192 __SYSCALL(__NR_lgetxattr, sys_lgetxattr) #define __NR_fgetxattr 193 __SYSCALL(__NR_fgetxattr, sys_fgetxattr) #define __NR_listxattr 194 __SYSCALL(__NR_listxattr, sys_listxattr) #define __NR_llistxattr 195 __SYSCALL(__NR_llistxattr, sys_llistxattr) #define __NR_flistxattr 196 __SYSCALL(__NR_flistxattr, sys_flistxattr) #define __NR_removexattr 197 __SYSCALL(__NR_removexattr, sys_removexattr) #define __NR_lremovexattr 198 __SYSCALL(__NR_lremovexattr, sys_lremovexattr) #define __NR_fremovexattr 199 __SYSCALL(__NR_fremovexattr, sys_fremovexattr) #define __NR_tkill 200 __SYSCALL(__NR_tkill, sys_tkill) #define __NR_time 201 __SYSCALL(__NR_time, sys_time) #define __NR_futex 202 __SYSCALL(__NR_futex, sys_futex) #define __NR_sched_setaffinity 203 __SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity) #define __NR_sched_getaffinity 204 __SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity) #define __NR_set_thread_area 205 __SYSCALL(__NR_set_thread_area, sys_ni_syscall) /* use arch_prctl */ #define __NR_io_setup 206 __SYSCALL(__NR_io_setup, sys_io_setup) #define __NR_io_destroy 207 __SYSCALL(__NR_io_destroy, sys_io_destroy) #define __NR_io_getevents 208 __SYSCALL(__NR_io_getevents, sys_io_getevents) #define __NR_io_submit 209 __SYSCALL(__NR_io_submit, sys_io_submit) #define __NR_io_cancel 210 __SYSCALL(__NR_io_cancel, sys_io_cancel) #define __NR_get_thread_area 211 __SYSCALL(__NR_get_thread_area, sys_ni_syscall) /* use arch_prctl */ #define __NR_lookup_dcookie 212 __SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie) #define __NR_epoll_create 213 __SYSCALL(__NR_epoll_create, sys_epoll_create) #define __NR_epoll_ctl_old 214 __SYSCALL(__NR_epoll_ctl_old, sys_ni_syscall) #define __NR_epoll_wait_old 215 __SYSCALL(__NR_epoll_wait_old, sys_ni_syscall) #define __NR_remap_file_pages 216 __SYSCALL(__NR_remap_file_pages, sys_remap_file_pages) #define __NR_getdents64 217 __SYSCALL(__NR_getdents64, sys_getdents64) #define __NR_set_tid_address 218 __SYSCALL(__NR_set_tid_address, sys_set_tid_address) #define __NR_restart_syscall 219 __SYSCALL(__NR_restart_syscall, sys_restart_syscall) #define __NR_semtimedop 220 __SYSCALL(__NR_semtimedop, sys_semtimedop) #define __NR_fadvise64 221 __SYSCALL(__NR_fadvise64, sys_fadvise64) #define __NR_timer_create 222 __SYSCALL(__NR_timer_create, sys_timer_create) #define __NR_timer_settime 223 __SYSCALL(__NR_timer_settime, sys_timer_settime) #define __NR_timer_gettime 224 __SYSCALL(__NR_timer_gettime, sys_timer_gettime) #define __NR_timer_getoverrun 225 __SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun) #define __NR_timer_delete 226 __SYSCALL(__NR_timer_delete, sys_timer_delete) #define __NR_clock_settime 227 __SYSCALL(__NR_clock_settime, sys_clock_settime) #define __NR_clock_gettime 228 __SYSCALL(__NR_clock_gettime, sys_clock_gettime) #define __NR_clock_getres 229 __SYSCALL(__NR_clock_getres, sys_clock_getres) #define __NR_clock_nanosleep 230 __SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep) #define __NR_exit_group 231 __SYSCALL(__NR_exit_group, sys_exit_group) #define __NR_epoll_wait 232 __SYSCALL(__NR_epoll_wait, sys_epoll_wait) #define __NR_epoll_ctl 233 __SYSCALL(__NR_epoll_ctl, sys_epoll_ctl) #define __NR_tgkill 234 __SYSCALL(__NR_tgkill, sys_tgkill) #define __NR_utimes 235 __SYSCALL(__NR_utimes, sys_utimes) #define __NR_vserver 236 __SYSCALL(__NR_vserver, sys_ni_syscall) #define __NR_mbind 237 __SYSCALL(__NR_mbind, sys_mbind) #define __NR_set_mempolicy 238 __SYSCALL(__NR_set_mempolicy, sys_set_mempolicy) #define __NR_get_mempolicy 239 __SYSCALL(__NR_get_mempolicy, sys_get_mempolicy) #define __NR_mq_open 240 __SYSCALL(__NR_mq_open, sys_mq_open) #define __NR_mq_unlink 241 __SYSCALL(__NR_mq_unlink, sys_mq_unlink) #define __NR_mq_timedsend 242 __SYSCALL(__NR_mq_timedsend, sys_mq_timedsend) #define __NR_mq_timedreceive 243 __SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive) #define __NR_mq_notify 244 __SYSCALL(__NR_mq_notify, sys_mq_notify) #define __NR_mq_getsetattr 245 __SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr) #define __NR_kexec_load 246 __SYSCALL(__NR_kexec_load, sys_kexec_load) #define __NR_waitid 247 __SYSCALL(__NR_waitid, sys_waitid) #define __NR_add_key 248 __SYSCALL(__NR_add_key, sys_add_key) #define __NR_request_key 249 __SYSCALL(__NR_request_key, sys_request_key) #define __NR_keyctl 250 __SYSCALL(__NR_keyctl, sys_keyctl) #define __NR_ioprio_set 251 __SYSCALL(__NR_ioprio_set, sys_ioprio_set) #define __NR_ioprio_get 252 __SYSCALL(__NR_ioprio_get, sys_ioprio_get) #define __NR_inotify_init 253 __SYSCALL(__NR_inotify_init, sys_inotify_init) #define __NR_inotify_add_watch 254 __SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch) #define __NR_inotify_rm_watch 255 __SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch) #define __NR_migrate_pages 256 __SYSCALL(__NR_migrate_pages, sys_migrate_pages) #define __NR_openat 257 __SYSCALL(__NR_openat, sys_openat) #define __NR_mkdirat 258 __SYSCALL(__NR_mkdirat, sys_mkdirat) #define __NR_mknodat 259 __SYSCALL(__NR_mknodat, sys_mknodat) #define __NR_fchownat 260 __SYSCALL(__NR_fchownat, sys_fchownat) #define __NR_futimesat 261 __SYSCALL(__NR_futimesat, sys_futimesat) #define __NR_newfstatat 262 __SYSCALL(__NR_newfstatat, sys_newfstatat) #define __NR_unlinkat 263 __SYSCALL(__NR_unlinkat, sys_unlinkat) #define __NR_renameat 264 __SYSCALL(__NR_renameat, sys_renameat) #define __NR_linkat 265 __SYSCALL(__NR_linkat, sys_linkat) #define __NR_symlinkat 266 __SYSCALL(__NR_symlinkat, sys_symlinkat) #define __NR_readlinkat 267 __SYSCALL(__NR_readlinkat, sys_readlinkat) #define __NR_fchmodat 268 __SYSCALL(__NR_fchmodat, sys_fchmodat) #define __NR_faccessat 269 __SYSCALL(__NR_faccessat, sys_faccessat) #define __NR_pselect6 270 __SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */ #define __NR_ppoll 271 __SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */ #define __NR_unshare 272 __SYSCALL(__NR_unshare, sys_unshare) #define __NR_syscall_max __NR_unshare #ifndef __NO_STUBS /* user-visible error numbers are in the range -1 - -4095 */ #define __syscall_clobber "r11","rcx","memory" #define __syscall_return(type, res) \ do { \ if ((unsigned long)(res) >= (unsigned long)(-127)) { \ errno = -(res); \ res = -1; \ } \ return (type) (res); \ } while (0) #ifdef __KERNEL__ #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_SGETMASK #define __ARCH_WANT_SYS_SIGNAL #define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_WAITPID #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP #define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_OLD_GETRLIMIT #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_TIME #define __ARCH_WANT_COMPAT_SYS_TIME #endif #ifndef __KERNEL_SYSCALLS__ #define __syscall "syscall" #define _syscall0(type,name) \ type name(void) \ { \ long __res; \ __asm__ volatile (__syscall \ : "=a" (__res) \ : "0" (__NR_##name) : __syscall_clobber ); \ __syscall_return(type,__res); \ } #define _syscall1(type,name,type1,arg1) \ type name(type1 arg1) \ { \ long __res; \ __asm__ volatile (__syscall \ : "=a" (__res) \ : "0" (__NR_##name),"D" ((long)(arg1)) : __syscall_clobber ); \ __syscall_return(type,__res); \ } #define _syscall2(type,name,type1,arg1,type2,arg2) \ type name(type1 arg1,type2 arg2) \ { \ long __res; \ __asm__ volatile (__syscall \ : "=a" (__res) \ : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)) : __syscall_clobber ); \ __syscall_return(type,__res); \ } #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ type name(type1 arg1,type2 arg2,type3 arg3) \ { \ long __res; \ __asm__ volatile (__syscall \ : "=a" (__res) \ : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \ "d" ((long)(arg3)) : __syscall_clobber); \ __syscall_return(type,__res); \ } #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ { \ long __res; \ __asm__ volatile ("movq %5,%%r10 ;" __syscall \ : "=a" (__res) \ : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \ "d" ((long)(arg3)),"g" ((long)(arg4)) : __syscall_clobber,"r10" ); \ __syscall_return(type,__res); \ } #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5) \ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ { \ long __res; \ __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall \ : "=a" (__res) \ : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \ "d" ((long)(arg3)),"g" ((long)(arg4)),"g" ((long)(arg5)) : \ __syscall_clobber,"r8","r10" ); \ __syscall_return(type,__res); \ } #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5,type6,arg6) \ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \ { \ long __res; \ __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; " __syscall \ : "=a" (__res) \ : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \ "d" ((long)(arg3)), "g" ((long)(arg4)), "g" ((long)(arg5)), \ "g" ((long)(arg6)) : \ __syscall_clobber,"r8","r10","r9" ); \ __syscall_return(type,__res); \ } #else /* __KERNEL_SYSCALLS__ */ #include #include /* * we need this inline - forking from kernel space will result * in NO COPY ON WRITE (!!!), until an execve is executed. This * is no problem, but for the stack. This is handled by not letting * main() use the stack at all after fork(). Thus, no function * calls - which means inline code for fork too, as otherwise we * would use the stack upon exit from 'fork()'. * * Actually only pause and fork are needed inline, so that there * won't be any messing with the stack from main(), but we define * some others too. */ #define __NR__exit __NR_exit static inline pid_t setsid(void) { return sys_setsid(); } static inline ssize_t write(unsigned int fd, char * buf, size_t count) { return sys_write(fd, buf, count); } static inline ssize_t read(unsigned int fd, char * buf, size_t count) { return sys_read(fd, buf, count); } static inline off_t lseek(unsigned int fd, off_t offset, unsigned int origin) { return sys_lseek(fd, offset, origin); } static inline long dup(unsigned int fd) { return sys_dup(fd); } /* implemented in asm in arch/x86_64/kernel/entry.S */ extern int execve(const char *, char * const *, char * const *); static inline long open(const char * filename, int flags, int mode) { return sys_open(filename, flags, mode); } static inline long close(unsigned int fd) { return sys_close(fd); } static inline pid_t waitpid(int pid, int * wait_stat, int flags) { return sys_wait4(pid, wait_stat, flags, NULL); } extern long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off); extern int sys_modify_ldt(int func, void *ptr, unsigned long bytecount); asmlinkage long sys_execve(char *name, char **argv, char **envp, struct pt_regs regs); asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void *parent_tid, void *child_tid, struct pt_regs regs); asmlinkage long sys_fork(struct pt_regs regs); asmlinkage long sys_vfork(struct pt_regs regs); asmlinkage long sys_pipe(int *fildes); #endif /* __KERNEL_SYSCALLS__ */ #if !defined(__ASSEMBLY__) && defined(__KERNEL__) #include #include #include #include asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs); asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on); struct sigaction; asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act, struct sigaction __user *oact, size_t sigsetsize); #endif /* __ASSEMBLY__ */ #endif /* __NO_STUBS */ /* * "Conditional" syscalls * * What we want is __attribute__((weak,alias("sys_ni_syscall"))), * but it doesn't work on all toolchains, so we just do it by hand */ #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/user.h000066400000000000000000000116121314037446600242550ustar00rootroot00000000000000#ifndef _X86_64_USER_H #define _X86_64_USER_H #include #include /* Core file format: The core file is written in such a way that gdb can understand it and provide useful information to the user. There are quite a number of obstacles to being able to view the contents of the floating point registers, and until these are solved you will not be able to view the contents of them. Actually, you can read in the core file and look at the contents of the user struct to find out what the floating point registers contain. The actual file contents are as follows: UPAGE: 1 page consisting of a user struct that tells gdb what is present in the file. Directly after this is a copy of the task_struct, which is currently not used by gdb, but it may come in useful at some point. All of the registers are stored as part of the upage. The upage should always be only one page. DATA: The data area is stored. We use current->end_text to current->brk to pick up all of the user variables, plus any memory that may have been malloced. No attempt is made to determine if a page is demand-zero or if a page is totally unused, we just cover the entire range. All of the addresses are rounded in such a way that an integral number of pages is written. STACK: We need the stack information in order to get a meaningful backtrace. We need to write the data from (esp) to current->start_stack, so we round each of these off in order to be able to write an integer number of pages. The minimum core file size is 3 pages, or 12288 bytes. */ /* * Pentium III FXSR, SSE support * Gareth Hughes , May 2000 * * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for * interacting with the FXSR-format floating point environment. Floating * point data can be accessed in the regular format in the usual manner, * and both the standard and SIMD floating point data can be accessed via * the new ptrace requests. In either case, changes to the FPU environment * will be reflected in the task's state as expected. * * x86-64 support by Andi Kleen. */ /* This matches the 64bit FXSAVE format as defined by AMD. It is the same as the 32bit format defined by Intel, except that the selector:offset pairs for data and eip are replaced with flat 64bit pointers. */ struct user_i387_struct { unsigned short cwd; unsigned short swd; unsigned short twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */ unsigned short fop; __u64 rip; __u64 rdp; __u32 mxcsr; __u32 mxcsr_mask; __u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ __u32 padding[24]; }; /* * Segment register layout in coredumps. */ struct user_regs_struct { unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10; unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax; unsigned long rip,cs,eflags; unsigned long rsp,ss; unsigned long fs_base, gs_base; unsigned long ds,es,fs,gs; }; /* When the kernel dumps core, it starts by dumping the user struct - this will be used by gdb to figure out where the data and stack segments are within the file, and what virtual addresses to use. */ struct user{ /* We start with the registers, to mimic the way that "memory" is returned from the ptrace(3,...) function. */ struct user_regs_struct regs; /* Where the registers are actually stored */ /* ptrace does not yet supply these. Someday.... */ int u_fpvalid; /* True if math co-processor being used. */ /* for this mess. Not yet used. */ int pad0; struct user_i387_struct i387; /* Math Co-processor registers. */ /* The rest of this junk is to help gdb figure out what goes where */ unsigned long int u_tsize; /* Text segment size (pages). */ unsigned long int u_dsize; /* Data segment size (pages). */ unsigned long int u_ssize; /* Stack segment size (pages). */ unsigned long start_code; /* Starting virtual address of text. */ unsigned long start_stack; /* Starting virtual address of stack area. This is actually the bottom of the stack, the top of the stack is always found in the esp register. */ long int signal; /* Signal that caused the core dump. */ int reserved; /* No longer used */ int pad1; struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */ /* the registers. */ struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ unsigned long magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ unsigned long u_debugreg[8]; unsigned long error_code; /* CPU error code or 0 */ unsigned long fault_address; /* CR3 or 0 */ }; #define NBPG PAGE_SIZE #define UPAGES 1 #define HOST_TEXT_START_ADDR (u.start_code) #define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) #endif /* _X86_64_USER_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/user32.h000066400000000000000000000040671314037446600244300ustar00rootroot00000000000000#ifndef USER32_H #define USER32_H 1 /* IA32 compatible user structures for ptrace. These should be used for 32bit coredumps too. */ struct user_i387_ia32_struct { u32 cwd; u32 swd; u32 twd; u32 fip; u32 fcs; u32 foo; u32 fos; u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ }; /* FSAVE frame with extensions */ struct user32_fxsr_struct { unsigned short cwd; unsigned short swd; unsigned short twd; /* not compatible to 64bit twd */ unsigned short fop; int fip; int fcs; int foo; int fos; int mxcsr; int reserved; int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ int padding[56]; }; struct user_regs_struct32 { __u32 ebx, ecx, edx, esi, edi, ebp, eax; unsigned short ds, __ds, es, __es; unsigned short fs, __fs, gs, __gs; __u32 orig_eax, eip; unsigned short cs, __cs; __u32 eflags, esp; unsigned short ss, __ss; }; struct user32 { struct user_regs_struct32 regs; /* Where the registers are actually stored */ int u_fpvalid; /* True if math co-processor being used. */ /* for this mess. Not yet used. */ struct user_i387_ia32_struct i387; /* Math Co-processor registers. */ /* The rest of this junk is to help gdb figure out what goes where */ __u32 u_tsize; /* Text segment size (pages). */ __u32 u_dsize; /* Data segment size (pages). */ __u32 u_ssize; /* Stack segment size (pages). */ __u32 start_code; /* Starting virtual address of text. */ __u32 start_stack; /* Starting virtual address of stack area. This is actually the bottom of the stack, the top of the stack is always found in the esp register. */ __u32 signal; /* Signal that caused the core dump. */ int reserved; /* No __u32er used */ __u32 u_ar0; /* Used by gdb to help find the values for */ /* the registers. */ __u32 u_fpstate; /* Math Co-processor pointer. */ __u32 magic; /* To uniquely identify a core file */ char u_comm[32]; /* User command that was responsible */ int u_debugreg[8]; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/vga.h000066400000000000000000000005711314037446600240560ustar00rootroot00000000000000/* * Access to VGA videoram * * (c) 1998 Martin Mares */ #ifndef _LINUX_ASM_VGA_H_ #define _LINUX_ASM_VGA_H_ /* * On the PC, we can just recalculate addresses and then * access the videoram directly without any black magic. */ #define VGA_MAP_MEM(x) (unsigned long)phys_to_virt(x) #define vga_readb(x) (*(x)) #define vga_writeb(x,y) (*(y) = (x)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/vmware_backdoor.h000066400000000000000000000046741314037446600264560ustar00rootroot00000000000000/* * Copyright (C) 2007, VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #ifndef _VMWARE_BACKDOOR_H #define _VMWARE_BACKDOOR_H #include #define VMWARE_BDOOR_MAGIC 0x564D5868 #define VMWARE_BDOOR_PORT 0x5658 #define VMWARE_BDOOR_CMD_GETVERSION 10 #define VMWARE_BDOOR_CMD_GETHZ 45 #define VMWARE_BDOOR_CMD_LAZYTIMEREMULATION 49 #define VMWARE_BDOOR(cmd, eax, ebx, ecx, edx) \ __asm__("inl (%%dx)" : \ "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \ "0"(VMWARE_BDOOR_MAGIC), "1"(VMWARE_BDOOR_CMD_##cmd), \ "2"(VMWARE_BDOOR_PORT), "3"(0) : \ "memory"); static inline int vmware_platform(void) { uint32_t eax, ebx, ecx, edx; VMWARE_BDOOR(GETVERSION, eax, ebx, ecx, edx); return eax != (uint32_t)-1 && ebx == VMWARE_BDOOR_MAGIC; } static inline unsigned int vmware_get_tsc_khz(void) { uint64_t tsc_hz; uint32_t eax, ebx, ecx, edx; VMWARE_BDOOR(GETHZ, eax, ebx, ecx, edx); if (eax == (uint32_t)-1) return 0; tsc_hz = eax | (((uint64_t)ebx) << 32); BUG_ON((tsc_hz / 1000) >> 32); return tsc_hz / 1000; } /* * Usually, all timer devices in the VM are kept consistent, but this * can cause time in the VM to fall behind (e.g., if the VM is * descheduled for long periods of time, the hypervisor may not have had * an opportunity to deliver timer interrupts, and so the time in the VM * will be behind until it can catch up. In lazy timer emulation mode, * the TSC tracks real time regardless of whether e.g. timer interrupts * are delivered on time. */ static inline int vmware_enable_lazy_timer_emulation(void) { uint32_t eax, ebx, ecx, edx; VMWARE_BDOOR(LAZYTIMEREMULATION, eax, ebx, ecx, edx); return ebx == VMWARE_BDOOR_MAGIC; } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/vsyscall.h000066400000000000000000000036411314037446600251420ustar00rootroot00000000000000#ifndef _ASM_X86_64_VSYSCALL_H_ #define _ASM_X86_64_VSYSCALL_H_ #include enum vsyscall_num { __NR_vgettimeofday, __NR_vtime, }; #define VSYSCALL_START (-10UL << 20) #define VSYSCALL_SIZE 1024 #define VSYSCALL_END (-2UL << 20) #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr)) #ifdef __KERNEL__ #define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16))) #define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16))) #define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16))) #define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16))) #define __section_sysctl_vsyscall __attribute__ ((unused, __section__ (".sysctl_vsyscall"), aligned(16))) #define __section_xtime __attribute__ ((unused, __section__ (".xtime"), aligned(16))) #define __section_xtime_lock __attribute__ ((unused, __section__ (".xtime_lock"), aligned(16))) #define VXTIME_TSC 1 #define VXTIME_HPET 2 #define VXTIME_PMTMR 3 struct vxtime_data { long hpet_address; /* HPET base address */ int last; unsigned long last_tsc; long quot; long tsc_quot; int mode; }; #define hpet_readl(a) readl((const void __iomem *)fix_to_virt(FIX_HPET_BASE) + a) #define hpet_writel(d,a) writel(d, (void __iomem *)fix_to_virt(FIX_HPET_BASE) + a) /* vsyscall space (readonly) */ extern struct vxtime_data __vxtime; extern struct timespec __xtime; extern volatile unsigned long __jiffies; extern unsigned long __wall_jiffies; extern struct timezone __sys_tz; extern seqlock_t __xtime_lock; /* kernel space (writeable) */ extern struct vxtime_data vxtime; extern unsigned long wall_jiffies; extern struct timezone sys_tz; extern int sysctl_vsyscall; extern seqlock_t xtime_lock; extern int sysctl_vsyscall; #define ARCH_HAVE_XTIME_LOCK 1 #endif /* __KERNEL__ */ #endif /* _ASM_X86_64_VSYSCALL_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/vsyscall32.h000066400000000000000000000012361314037446600253050ustar00rootroot00000000000000#ifndef _ASM_VSYSCALL32_H #define _ASM_VSYSCALL32_H 1 /* Values need to match arch/x86_64/ia32/vsyscall.lds */ #ifdef __ASSEMBLY__ #define VSYSCALL32_BASE 0xffffe000 #define VSYSCALL32_SYSEXIT (VSYSCALL32_BASE + 0x410) #else #define VSYSCALL32_BASE 0xffffe000UL #define VSYSCALL32_END (VSYSCALL32_BASE + PAGE_SIZE) #define VSYSCALL32_EHDR ((const struct elf32_hdr *) VSYSCALL32_BASE) #define VSYSCALL32_VSYSCALL ((void *)VSYSCALL32_BASE + 0x400) #define VSYSCALL32_SYSEXIT ((void *)VSYSCALL32_BASE + 0x410) #define VSYSCALL32_SIGRETURN ((void __user *)VSYSCALL32_BASE + 0x500) #define VSYSCALL32_RTSIGRETURN ((void __user *)VSYSCALL32_BASE + 0x600) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm-x86_64/xor.h000066400000000000000000000175211314037446600241140ustar00rootroot00000000000000/* * include/asm-x86_64/xor.h * * Optimized RAID-5 checksumming functions for MMX and SSE. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * You should have received a copy of the GNU General Public License * (for example /usr/src/linux/COPYING); if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Cache avoiding checksumming functions utilizing KNI instructions * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) */ /* * Based on * High-speed RAID5 checksumming functions utilizing SSE instructions. * Copyright (C) 1998 Ingo Molnar. */ /* * x86-64 changes / gcc fixes from Andi Kleen. * Copyright 2002 Andi Kleen, SuSE Labs. * * This hasn't been optimized for the hammer yet, but there are likely * no advantages to be gotten from x86-64 here anyways. */ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; /* Doesn't use gcc to save the XMM registers, because there is no easy way to tell it to do a clts before the register saving. */ #define XMMS_SAVE do { \ preempt_disable(); \ asm volatile ( \ "movq %%cr0,%0 ;\n\t" \ "clts ;\n\t" \ "movups %%xmm0,(%1) ;\n\t" \ "movups %%xmm1,0x10(%1) ;\n\t" \ "movups %%xmm2,0x20(%1) ;\n\t" \ "movups %%xmm3,0x30(%1) ;\n\t" \ : "=&r" (cr0) \ : "r" (xmm_save) \ : "memory"); \ } while(0) #define XMMS_RESTORE do { \ asm volatile ( \ "sfence ;\n\t" \ "movups (%1),%%xmm0 ;\n\t" \ "movups 0x10(%1),%%xmm1 ;\n\t" \ "movups 0x20(%1),%%xmm2 ;\n\t" \ "movups 0x30(%1),%%xmm3 ;\n\t" \ "movq %0,%%cr0 ;\n\t" \ : \ : "r" (cr0), "r" (xmm_save) \ : "memory"); \ preempt_enable(); \ } while(0) #define OFFS(x) "16*("#x")" #define PF_OFFS(x) "256+16*("#x")" #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n" #define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" #define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n" #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n" #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n" #define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n" #define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" #define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" #define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" #define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" #define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n" static void xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { unsigned int lines = bytes >> 8; unsigned long cr0; xmm_store_t xmm_save[4]; XMMS_SAVE; asm volatile ( #undef BLOCK #define BLOCK(i) \ LD(i,0) \ LD(i+1,1) \ PF1(i) \ PF1(i+2) \ LD(i+2,2) \ LD(i+3,3) \ PF0(i+4) \ PF0(i+6) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addq %[inc], %[p1] ;\n" " addq %[inc], %[p2] ;\n" " decl %[cnt] ; jnz 1b" : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines) : [inc] "r" (256UL) : "memory"); XMMS_RESTORE; } static void xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { unsigned int lines = bytes >> 8; xmm_store_t xmm_save[4]; unsigned long cr0; XMMS_SAVE; __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ PF1(i) \ PF1(i+2) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ PF2(i) \ PF2(i+2) \ PF0(i+4) \ PF0(i+6) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addq %[inc], %[p1] ;\n" " addq %[inc], %[p2] ;\n" " addq %[inc], %[p3] ;\n" " decl %[cnt] ; jnz 1b" : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) : [inc] "r" (256UL) : "memory"); XMMS_RESTORE; } static void xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { unsigned int lines = bytes >> 8; xmm_store_t xmm_save[4]; unsigned long cr0; XMMS_SAVE; __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ PF1(i) \ PF1(i+2) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ PF2(i) \ PF2(i+2) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ PF3(i) \ PF3(i+2) \ PF0(i+4) \ PF0(i+6) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ XO3(i,0) \ XO3(i+1,1) \ XO3(i+2,2) \ XO3(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addq %[inc], %[p1] ;\n" " addq %[inc], %[p2] ;\n" " addq %[inc], %[p3] ;\n" " addq %[inc], %[p4] ;\n" " decl %[cnt] ; jnz 1b" : [cnt] "+c" (lines), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) : [inc] "r" (256UL) : "memory" ); XMMS_RESTORE; } static void xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { unsigned int lines = bytes >> 8; xmm_store_t xmm_save[4]; unsigned long cr0; XMMS_SAVE; __asm__ __volatile__ ( #undef BLOCK #define BLOCK(i) \ PF1(i) \ PF1(i+2) \ LD(i,0) \ LD(i+1,1) \ LD(i+2,2) \ LD(i+3,3) \ PF2(i) \ PF2(i+2) \ XO1(i,0) \ XO1(i+1,1) \ XO1(i+2,2) \ XO1(i+3,3) \ PF3(i) \ PF3(i+2) \ XO2(i,0) \ XO2(i+1,1) \ XO2(i+2,2) \ XO2(i+3,3) \ PF4(i) \ PF4(i+2) \ PF0(i+4) \ PF0(i+6) \ XO3(i,0) \ XO3(i+1,1) \ XO3(i+2,2) \ XO3(i+3,3) \ XO4(i,0) \ XO4(i+1,1) \ XO4(i+2,2) \ XO4(i+3,3) \ ST(i,0) \ ST(i+1,1) \ ST(i+2,2) \ ST(i+3,3) \ PF0(0) PF0(2) " .align 32 ;\n" " 1: ;\n" BLOCK(0) BLOCK(4) BLOCK(8) BLOCK(12) " addq %[inc], %[p1] ;\n" " addq %[inc], %[p2] ;\n" " addq %[inc], %[p3] ;\n" " addq %[inc], %[p4] ;\n" " addq %[inc], %[p5] ;\n" " decl %[cnt] ; jnz 1b" : [cnt] "+c" (lines), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5) : [inc] "r" (256UL) : "memory"); XMMS_RESTORE; } static struct xor_block_template xor_block_sse = { .name = "generic_sse", .do_2 = xor_sse_2, .do_3 = xor_sse_3, .do_4 = xor_sse_4, .do_5 = xor_sse_5, }; #undef XOR_TRY_TEMPLATES #define XOR_TRY_TEMPLATES \ do { \ xor_speed(&xor_block_sse); \ } while (0) /* We force the use of the SSE xor block because it can write around L2. We may also be able to load into the L1 only depending on how the cpu deals with a load to a line that is being prefetched. */ #define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse) UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm/000077500000000000000000000000001314037446600221715ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm/gnttab_dma.h000066400000000000000000000026471314037446600244530ustar00rootroot00000000000000/* * Copyright (c) 2007 Herbert Xu * Copyright (c) 2007 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _ASM_I386_GNTTAB_DMA_H #define _ASM_I386_GNTTAB_DMA_H static inline int gnttab_dma_local_pfn(struct page *page) { /* Has it become a local MFN? */ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page)))); } static inline maddr_t gnttab_dma_map_page(struct page *page) { __gnttab_dma_map_page(page); return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT); } static inline void gnttab_dma_unmap_page(maddr_t maddr) { __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr))); } #endif /* _ASM_I386_GNTTAB_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm/hypercall.h000066400000000000000000000002451314037446600243260ustar00rootroot00000000000000#ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #ifdef CONFIG_X86_64 #include "hypercall_64.h" #else #include "hypercall_32.h" #endif #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm/hypercall_32.h000066400000000000000000000246071314037446600246420ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_32_H__ #define __HYPERCALL_32_H__ #include /* memcpy() */ #include #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #ifdef CONFIG_XEN #define HYPERCALL_STR(name) \ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" #else #define HYPERCALL_STR(name) \ "mov hypercall_stubs,%%eax; " \ "add $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ "call *%%eax" #endif #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res) \ : \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, a1) \ ({ \ type __res; \ long __ign1; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1) \ : "1" ((long)(a1)) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ long __ign1, __ign2; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ : "1" ((long)(a1)), "2" ((long)(a2)) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ long __ign1, __ign2, __ign3, __ign4; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ long __ign1, __ign2, __ign3, __ign4, __ign5; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)), \ "5" ((long)(a5)) \ : "memory" ); \ __res; \ }) static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_selector, unsigned long event_address, unsigned long failsafe_selector, unsigned long failsafe_address) { return _hypercall4(int, set_callbacks, event_selector, event_address, failsafe_selector, failsafe_address); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { unsigned long timeout_hi = (unsigned long)(timeout>>32); unsigned long timeout_lo = (unsigned long)timeout; return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); } static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_update_descriptor( u64 ma, u64 desc) { return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; /* when hypercall memory_op failed, retry */ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall4(int, update_va_mapping, va, new_val.pte_low, pte_hi, flags); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_acm_op( int cmd, void *arg) { return _hypercall2(int, acm_op, cmd, arg); } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { return _hypercall3(int, grant_table_op, cmd, uop, count); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall5(int, update_va_mapping_otherdomain, va, new_val.pte_low, pte_hi, flags, domid); } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } #endif /* __HYPERCALL_32_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm/hypercall_64.h000066400000000000000000000245461314037446600246510ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * 64-bit updates: * Benjamin Liu * Jun Nakajima * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_64_H__ #define __HYPERCALL_64_H__ #include /* memcpy() */ #include #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #ifdef CONFIG_XEN #define HYPERCALL_STR(name) \ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" #else #define HYPERCALL_STR(name) \ "mov $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ "add hypercall_stubs(%%rip),%%rax; " \ "call *%%rax" #endif #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res) \ : \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, a1) \ ({ \ type __res; \ long __ign1; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1) \ : "1" ((long)(a1)) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ long __ign1, __ign2; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \ : "1" ((long)(a1)), "2" ((long)(a2)) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ register long __arg4 asm("r10") = (long)(a4); \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3), "+r" (__arg4) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ register long __arg4 asm("r10") = (long)(a4); \ register long __arg5 asm("r8") = (long)(a5); \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3), "+r" (__arg4), "+r" (__arg5) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_address, unsigned long failsafe_address, unsigned long syscall_address) { return _hypercall3(int, set_callbacks, event_address, failsafe_address, syscall_address); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall1(long, set_timer_op, timeout); } static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_update_descriptor( unsigned long ma, unsigned long word) { return _hypercall2(int, update_descriptor, ma, word); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; /* when hypercall memory_op failed, retry*/ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { return _hypercall3(int, update_va_mapping, va, new_val.pte, flags); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_acm_op( int cmd, void *arg) { return _hypercall2(int, acm_op, cmd, arg); } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { return _hypercall3(int, grant_table_op, cmd, uop, count); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { return _hypercall4(int, update_va_mapping_otherdomain, va, new_val.pte, flags, domid); } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_set_segment_base( int reg, unsigned long value) { return _hypercall2(int, set_segment_base, reg, value); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } #endif /* __HYPERCALL_64_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm/hypervisor.h000066400000000000000000000160101314037446600245520ustar00rootroot00000000000000/****************************************************************************** * hypervisor.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERVISOR_H__ #define __HYPERVISOR_H__ #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) # ifdef CONFIG_X86_PAE # include # else # include # endif #elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) # include #endif extern shared_info_t *HYPERVISOR_shared_info; #define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu)) #ifdef CONFIG_SMP #define current_vcpu_info() vcpu_info(smp_processor_id()) #else #define current_vcpu_info() vcpu_info(0) #endif #ifdef CONFIG_X86_32 extern unsigned long hypervisor_virt_start; #endif /* arch/xen/i386/kernel/setup.c */ extern start_info_t *xen_start_info; #ifdef CONFIG_XEN_PRIVILEGED_GUEST #define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN) #else #define is_initial_xendomain() 0 #endif /* arch/xen/kernel/evtchn.c */ /* Force a proper event-channel callback from Xen. */ void force_evtchn_callback(void); /* arch/xen/kernel/process.c */ void xen_cpu_idle (void); /* arch/xen/i386/kernel/hypervisor.c */ void do_hypervisor_callback(struct pt_regs *regs); /* arch/xen/i386/mm/hypervisor.c */ /* * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already * be MACHINE addresses. */ void xen_pt_switch(unsigned long ptr); void xen_new_user_pt(unsigned long ptr); /* x86_64 only */ void xen_load_gs(unsigned int selector); /* x86_64 only */ void xen_tlb_flush(void); void xen_invlpg(unsigned long ptr); void xen_l1_entry_update(pte_t *ptr, pte_t val); void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */ void xen_pgd_pin(unsigned long ptr); void xen_pgd_unpin(unsigned long ptr); void xen_set_ldt(const void *ptr, unsigned int ents); #ifdef CONFIG_SMP #include void xen_tlb_flush_all(void); void xen_invlpg_all(unsigned long ptr); void xen_tlb_flush_mask(cpumask_t *mask); void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr); #endif /* Returns zero on success else negative errno. */ int xen_create_contiguous_region( unsigned long vstart, unsigned int order, unsigned int address_bits); void xen_destroy_contiguous_region( unsigned long vstart, unsigned int order); struct page; int xen_limit_pages_to_max_mfn( struct page *pages, unsigned int order, unsigned int address_bits); /* Turn jiffies into Xen system time. */ u64 jiffies_to_st(unsigned long jiffies); #ifdef CONFIG_XEN_SCRUB_PAGES void scrub_pages(void *, unsigned int); #else #define scrub_pages(_p,_n) ((void)0) #endif #include #if defined(CONFIG_X86_64) #define MULTI_UVMFLAGS_INDEX 2 #define MULTI_UVMDOMID_INDEX 3 #else #define MULTI_UVMFLAGS_INDEX 3 #define MULTI_UVMDOMID_INDEX 4 #endif #ifdef CONFIG_XEN #define is_running_on_xen() 1 #else extern char *hypercall_stubs; #define is_running_on_xen() (!!hypercall_stubs) #endif static inline int HYPERVISOR_yield( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int HYPERVISOR_block( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0); #endif return rc; } static inline void /*__noreturn*/ HYPERVISOR_shutdown( unsigned int reason) { struct sched_shutdown sched_shutdown = { .reason = reason }; VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown)); #if CONFIG_XEN_COMPAT <= 0x030002 VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason)); #endif /* Don't recurse needlessly. */ BUG_ON(reason != SHUTDOWN_crash); for(;;); } static inline int __must_check HYPERVISOR_poll( evtchn_port_t *ports, unsigned int nr_ports, u64 timeout) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports, .timeout = jiffies_to_st(timeout) }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } #ifdef CONFIG_XEN static inline void MULTI_update_va_mapping( multicall_entry_t *mcl, unsigned long va, pte_t new_val, unsigned long flags) { mcl->op = __HYPERVISOR_update_va_mapping; mcl->args[0] = va; #if defined(CONFIG_X86_64) mcl->args[1] = new_val.pte; #elif defined(CONFIG_X86_PAE) mcl->args[1] = new_val.pte_low; mcl->args[2] = new_val.pte_high; #else mcl->args[1] = new_val.pte_low; mcl->args[2] = 0; #endif mcl->args[MULTI_UVMFLAGS_INDEX] = flags; } static inline void MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd, void *uop, unsigned int count) { mcl->op = __HYPERVISOR_grant_table_op; mcl->args[0] = cmd; mcl->args[1] = (unsigned long)uop; mcl->args[2] = count; } #else /* !defined(CONFIG_XEN) */ /* Multicalls not supported for HVM guests. */ #define MULTI_update_va_mapping(a,b,c,d) ((void)0) #define MULTI_grant_table_op(a,b,c,d) ((void)0) #endif #endif /* __HYPERVISOR_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm/maddr.h000066400000000000000000000002161314037446600234300ustar00rootroot00000000000000#ifndef __MADDR_H__ #define __MADDR_H__ #ifdef CONFIG_X86_64 #include "maddr_64.h" #else #include "maddr_32.h" #endif #endif /* _MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm/maddr_32.h000066400000000000000000000133331314037446600237400ustar00rootroot00000000000000#ifndef _I386_MADDR_H #define _I386_MADDR_H #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<31) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ #ifdef CONFIG_X86_PAE typedef unsigned long long paddr_t; typedef unsigned long long maddr_t; #else typedef unsigned long paddr_t; typedef unsigned long maddr_t; #endif #ifdef CONFIG_XEN extern unsigned long *phys_to_machine_mapping; extern unsigned long max_mapnr; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return pfn; BUG_ON(max_mapnr && pfn >= max_mapnr); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return 1; BUG_ON(max_mapnr && pfn >= max_mapnr); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return max_mapnr; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movl %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movl %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if ((pfn < max_mapnr) && !xen_feature(XENFEAT_auto_translated_physmap) && (phys_to_machine_mapping[pfn] != mfn)) return max_mapnr; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { BUG_ON(max_mapnr && pfn >= max_mapnr); if (xen_feature(XENFEAT_auto_translated_physmap)) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } #ifdef CONFIG_X86_PAE static inline paddr_t pte_phys_to_machine(paddr_t phys) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to pfn_to_mfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to mfn_to_pfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #endif #ifdef CONFIG_X86_PAE #define __pte_ma(x) ((pte_t) { (x), (maddr_t)(x) >> 32 } ) static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot) { pte_t pte; pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \ (pgprot_val(pgprot) >> 32); pte.pte_high &= (__supported_pte_mask >> 32); pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \ __supported_pte_mask; return pte; } #else #define __pte_ma(x) ((pte_t) { (x) } ) #define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) #endif #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _I386_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm/maddr_64.h000066400000000000000000000114541314037446600237470ustar00rootroot00000000000000#ifndef _X86_64_MADDR_H #define _X86_64_MADDR_H #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<63) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ typedef unsigned long paddr_t; typedef unsigned long maddr_t; #ifdef CONFIG_XEN extern unsigned long *phys_to_machine_mapping; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return pfn; BUG_ON(end_pfn && pfn >= end_pfn); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return 1; BUG_ON(end_pfn && pfn >= end_pfn); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return end_pfn; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movq %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movq %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 8\n" " .quad 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if ((pfn < end_pfn) && !xen_feature(XENFEAT_auto_translated_physmap) && (phys_to_machine_mapping[pfn] != mfn)) return end_pfn; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { BUG_ON(end_pfn && pfn >= end_pfn); if (xen_feature(XENFEAT_auto_translated_physmap)) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } static inline paddr_t pte_phys_to_machine(paddr_t phys) { maddr_t machine; machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { paddr_t phys; phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #define __pte_ma(x) ((pte_t) { (x) } ) #define pfn_pte_ma(pfn, prot) __pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask) #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _X86_64_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/asm/synch_bitops.h000066400000000000000000000072471314037446600250600ustar00rootroot00000000000000#ifndef __XEN_SYNCH_BITOPS_H__ #define __XEN_SYNCH_BITOPS_H__ /* * Copyright 1992, Linus Torvalds. * Heavily modified to provide guaranteed strong synchronisation * when communicating with Xen or other guest OSes running on other CPUs. */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define ADDR (*(volatile long *) addr) static __inline__ void synch_set_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btsl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_clear_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btrl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_change_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btcl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btsl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btrl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btcl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } struct __synch_xchg_dummy { unsigned long a[100]; }; #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x)) #define synch_cmpxchg(ptr, old, new) \ ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ (unsigned long)(old), \ (unsigned long)(new), \ sizeof(*(ptr)))) static inline unsigned long __synch_cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__("lock; cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__("lock; cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #ifdef CONFIG_X86_64 case 4: __asm__ __volatile__("lock; cmpxchgl %k1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__("lock; cmpxchgq %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #else case 4: __asm__ __volatile__("lock; cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #endif } return old; } static __always_inline int synch_const_test_bit(int nr, const volatile void * addr) { return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; } static __inline__ int synch_var_test_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "btl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) ); return oldbit; } #define synch_test_bit(nr,addr) \ (__builtin_constant_p(nr) ? \ synch_const_test_bit((nr),(addr)) : \ synch_var_test_bit((nr),(addr))) #define synch_cmpxchg_subword synch_cmpxchg #endif /* __XEN_SYNCH_BITOPS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/000077500000000000000000000000001314037446600222035ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/balloon.h000066400000000000000000000050641314037446600240070ustar00rootroot00000000000000/****************************************************************************** * balloon.h * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_BALLOON_H__ #define __ASM_BALLOON_H__ /* * Inform the balloon driver that it should allow some slop for device-driver * memory activities. */ void balloon_update_driver_allowance(long delta); /* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */ struct page **alloc_empty_pages_and_pagevec(int nr_pages); void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages); /* Free an empty page range (not allocated through alloc_empty_pages_and_pagevec), adding to the balloon. */ void free_empty_pages(struct page **pagevec, int nr_pages); void balloon_release_driver_page(struct page *page); /* * Prevent the balloon driver from changing the memory reservation during * a driver critical region. */ extern spinlock_t balloon_lock; #define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags) #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags) #endif /* __ASM_BALLOON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/blkif.h000066400000000000000000000070621314037446600234500ustar00rootroot00000000000000#ifndef __XEN_BLKIF_H__ #define __XEN_BLKIF_H__ #include #include #include /* Not a real protocol. Used to generate ring structs which contain * the elements common to all protocols only. This way we get a * compiler-checkable way to use common struct elements, so we can * avoid using switch(protocol) in a number of places. */ struct blkif_common_request { char dummy; }; struct blkif_common_response { char dummy; }; /* i386 protocol version */ #pragma pack(push, 4) struct blkif_x86_32_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_32_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_x86_32_request blkif_x86_32_request_t; typedef struct blkif_x86_32_response blkif_x86_32_response_t; #pragma pack(pop) /* x86_64 protocol version */ struct blkif_x86_64_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t __attribute__((__aligned__(8))) id; blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_64_response { uint64_t __attribute__((__aligned__(8))) id; uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_x86_64_request blkif_x86_64_request_t; typedef struct blkif_x86_64_response blkif_x86_64_response_t; DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response); DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response); DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response); union blkif_back_rings { blkif_back_ring_t native; blkif_common_back_ring_t common; blkif_x86_32_back_ring_t x86_32; blkif_x86_64_back_ring_t x86_64; }; typedef union blkif_back_rings blkif_back_rings_t; enum blkif_protocol { BLKIF_PROTOCOL_NATIVE = 1, BLKIF_PROTOCOL_X86_32 = 2, BLKIF_PROTOCOL_X86_64 = 3, }; static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; dst->id = src->id; dst->sector_number = src->sector_number; if (n > src->nr_segments) n = src->nr_segments; for (i = 0; i < n; i++) dst->seg[i] = src->seg[i]; } static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; dst->id = src->id; dst->sector_number = src->sector_number; if (n > src->nr_segments) n = src->nr_segments; for (i = 0; i < n; i++) dst->seg[i] = src->seg[i]; } #endif /* __XEN_BLKIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/compat_ioctl.h000066400000000000000000000030411314037446600250270ustar00rootroot00000000000000/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright IBM Corp. 2007 * * Authors: Jimi Xenidis * Hollis Blanchard */ #ifndef __LINUX_XEN_COMPAT_H__ #define __LINUX_XEN_COMPAT_H__ #include extern int privcmd_ioctl_32(int fd, unsigned int cmd, unsigned long arg); struct privcmd_mmap_32 { int num; domid_t dom; compat_uptr_t entry; }; struct privcmd_mmapbatch_32 { int num; /* number of pages to populate */ domid_t dom; /* target domain */ __u64 addr; /* virtual address */ compat_uptr_t arr; /* array of mfns - top nibble set on err */ }; #define IOCTL_PRIVCMD_MMAP_32 \ _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap_32)) #define IOCTL_PRIVCMD_MMAPBATCH_32 \ _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch_32)) #endif /* __LINUX_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/cpu_hotplug.h000066400000000000000000000014751314037446600247140ustar00rootroot00000000000000#ifndef __XEN_CPU_HOTPLUG_H__ #define __XEN_CPU_HOTPLUG_H__ #include #include #if defined(CONFIG_X86) && defined(CONFIG_SMP) extern cpumask_t cpu_initialized_map; #endif #if defined(CONFIG_HOTPLUG_CPU) int cpu_up_check(unsigned int cpu); void init_xenbus_allowed_cpumask(void); int smp_suspend(void); void smp_resume(void); void cpu_bringup(void); #else /* !defined(CONFIG_HOTPLUG_CPU) */ #define cpu_up_check(cpu) (0) #define init_xenbus_allowed_cpumask() ((void)0) static inline int smp_suspend(void) { if (num_online_cpus() > 1) { printk(KERN_WARNING "Can't suspend SMP guests " "without CONFIG_HOTPLUG_CPU\n"); return -EOPNOTSUPP; } return 0; } static inline void smp_resume(void) { } #endif /* !defined(CONFIG_HOTPLUG_CPU) */ #endif /* __XEN_CPU_HOTPLUG_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/driver_util.h000066400000000000000000000005511314037446600247050ustar00rootroot00000000000000 #ifndef __ASM_XEN_DRIVER_UTIL_H__ #define __ASM_XEN_DRIVER_UTIL_H__ #include #include /* Allocate/destroy a 'vmalloc' VM area. */ extern struct vm_struct *alloc_vm_area(unsigned long size); extern void free_vm_area(struct vm_struct *area); extern struct class *get_xen_class(void); #endif /* __ASM_XEN_DRIVER_UTIL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/evtchn.h000066400000000000000000000107741314037446600236540ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Communication via Xen event channels. * Also definitions for the device that demuxes notifications to userspace. * * Copyright (c) 2004-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_EVTCHN_H__ #define __ASM_EVTCHN_H__ #include #include #include #include #include #include /* * LOW-LEVEL DEFINITIONS */ /* * Dynamically bind an event source to an IRQ-like callback handler. * On some platforms this may not be implemented via the Linux IRQ subsystem. * The IRQ argument passed to the callback handler is the same as returned * from the bind call. It may not correspond to a Linux IRQ number. * Returns IRQ or negative errno. */ int bind_caller_port_to_irqhandler( unsigned int caller_port, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); int bind_interdomain_evtchn_to_irqhandler( unsigned int remote_domain, unsigned int remote_port, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irqhandler( unsigned int virq, unsigned int cpu, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (except for bindings * made with bind_caller_port_to_irqhandler()). */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); void irq_resume(void); /* Entry point for notifications into Linux subsystems. */ asmlinkage void evtchn_do_upcall(struct pt_regs *regs); /* Entry point for notifications into the userland character device. */ void evtchn_device_upcall(int port); void mask_evtchn(int port); void disable_all_local_evtchn(void); void unmask_evtchn(int port); #ifdef CONFIG_SMP void rebind_evtchn_to_cpu(int port, unsigned int cpu); #else #define rebind_evtchn_to_cpu(port, cpu) ((void)0) #endif static inline int test_and_set_evtchn_mask(int port) { shared_info_t *s = HYPERVISOR_shared_info; return synch_test_and_set_bit(port, s->evtchn_mask); } static inline void clear_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; synch_clear_bit(port, s->evtchn_pending); } static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send)); } /* * Use these to access the event channel underlying the IRQ handle returned * by bind_*_to_irqhandler(). */ void notify_remote_via_irq(int irq); int irq_to_evtchn_port(int irq); #endif /* __ASM_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/features.h000066400000000000000000000007041314037446600241730ustar00rootroot00000000000000/****************************************************************************** * features.h * * Query the features reported by Xen. * * Copyright (c) 2006, Ian Campbell */ #ifndef __ASM_XEN_FEATURES_H__ #define __ASM_XEN_FEATURES_H__ #include extern void setup_xen_features(void); extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32]; #define xen_feature(flag) (xen_features[flag]) #endif /* __ASM_XEN_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/firmware.h000066400000000000000000000003011314037446600241620ustar00rootroot00000000000000#ifndef __XEN_FIRMWARE_H__ #define __XEN_FIRMWARE_H__ #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) void copy_edd(void); #endif void copy_edid(void); #endif /* __XEN_FIRMWARE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/gnttab.h000066400000000000000000000124221314037446600236340ustar00rootroot00000000000000/****************************************************************************** * gnttab.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include /* maddr_t */ #include #include #include struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; }; int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_ref(grant_ref_t ref); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); int gnttab_query_foreign_access(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags); void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep); void __gnttab_dma_map_page(struct page *page); static inline void __gnttab_dma_unmap_page(struct page *page) { } void gnttab_reset_grant_page(struct page *page); int gnttab_suspend(void); int gnttab_resume(void); void *arch_gnttab_alloc_shared(unsigned long *frames); static inline void gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr, uint32_t flags, grant_ref_t ref, domid_t domid) { if (flags & GNTMAP_contains_pte) map->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) map->host_addr = __pa(addr); else map->host_addr = addr; map->flags = flags; map->ref = ref; map->dom = domid; } static inline void gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr, uint32_t flags, grant_handle_t handle) { if (flags & GNTMAP_contains_pte) unmap->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) unmap->host_addr = __pa(addr); else unmap->host_addr = addr; unmap->handle = handle; unmap->dev_bus_addr = 0; } static inline void gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr, maddr_t new_addr, grant_handle_t handle) { if (xen_feature(XENFEAT_auto_translated_physmap)) { unmap->host_addr = __pa(addr); unmap->new_addr = __pa(new_addr); } else { unmap->host_addr = addr; unmap->new_addr = new_addr; } unmap->handle = handle; } #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/hvm.h000066400000000000000000000007101314037446600231440ustar00rootroot00000000000000/* Simple wrappers around HVM functions */ #ifndef XEN_HVM_H__ #define XEN_HVM_H__ #include static inline unsigned long hvm_get_parameter(int idx) { struct xen_hvm_param xhv; int r; xhv.domid = DOMID_SELF; xhv.index = idx; r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); if (r < 0) { printk(KERN_ERR "cannot get hvm parameter %d: %d.\n", idx, r); return 0; } return xhv.value; } #endif /* XEN_HVM_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/hypercall.h000066400000000000000000000013751314037446600243450ustar00rootroot00000000000000#ifndef __XEN_HYPERCALL_H__ #define __XEN_HYPERCALL_H__ #include static inline int __must_check HYPERVISOR_multicall_check( multicall_entry_t *call_list, unsigned int nr_calls, const unsigned long *rc_list) { int rc = HYPERVISOR_multicall(call_list, nr_calls); if (unlikely(rc < 0)) return rc; BUG_ON(rc); BUG_ON((int)nr_calls < 0); for ( ; nr_calls > 0; --nr_calls, ++call_list) if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0))) return nr_calls; return 0; } /* A construct to ignore the return value of hypercall wrappers in a few * exceptional cases (simply casting the function result to void doesn't * avoid the compiler warning): */ #define VOID(expr) ((void)((expr)?:0)) #endif /* __XEN_HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/hypervisor_sysfs.h000066400000000000000000000015051314037446600260160ustar00rootroot00000000000000/* * copyright (c) 2006 IBM Corporation * Authored by: Mike D. Day * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _HYP_SYSFS_H_ #define _HYP_SYSFS_H_ #include #include #define HYPERVISOR_ATTR_RO(_name) \ static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) #define HYPERVISOR_ATTR_RW(_name) \ static struct hyp_sysfs_attr _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) struct hyp_sysfs_attr { struct attribute attr; ssize_t (*show)(struct hyp_sysfs_attr *, char *); ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t); void *hyp_attr_data; }; #endif /* _HYP_SYSFS_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/000077500000000000000000000000001314037446600241435ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/COPYING000066400000000000000000000032641314037446600252030ustar00rootroot00000000000000XEN NOTICE ========== This copyright applies to all files within this subdirectory and its subdirectories: include/public/*.h include/public/hvm/*.h include/public/io/*.h The intention is that these files can be freely copied into the source tree of an operating system when porting that OS to run on Xen. Doing so does *not* cause the OS to become subject to the terms of the GPL. All other files in the Xen source distribution are covered by version 2 of the GNU General Public License except where explicitly stated otherwise within individual source files. -- Keir Fraser (on behalf of the Xen team) ===================================================================== Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/acm.h000066400000000000000000000156051314037446600250630ustar00rootroot00000000000000/* * acm.h: Xen access control module interface defintions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005, International Business Machines Corporation. */ #ifndef _XEN_PUBLIC_ACM_H #define _XEN_PUBLIC_ACM_H #include "xen.h" /* if ACM_DEBUG defined, all hooks should * print a short trace message (comment it out * when not in testing mode ) */ /* #define ACM_DEBUG */ #ifdef ACM_DEBUG # define printkd(fmt, args...) printk(fmt,## args) #else # define printkd(fmt, args...) #endif /* default ssid reference value if not supplied */ #define ACM_DEFAULT_SSID 0x0 #define ACM_DEFAULT_LOCAL_SSID 0x0 /* Internal ACM ERROR types */ #define ACM_OK 0 #define ACM_UNDEF -1 #define ACM_INIT_SSID_ERROR -2 #define ACM_INIT_SOID_ERROR -3 #define ACM_ERROR -4 /* External ACCESS DECISIONS */ #define ACM_ACCESS_PERMITTED 0 #define ACM_ACCESS_DENIED -111 #define ACM_NULL_POINTER_ERROR -200 /* Error codes reported in when trying to test for a new policy These error codes are reported in an array of tuples where each error code is followed by a parameter describing the error more closely, such as a domain id. */ #define ACM_EVTCHN_SHARING_VIOLATION 0x100 #define ACM_GNTTAB_SHARING_VIOLATION 0x101 #define ACM_DOMAIN_LOOKUP 0x102 #define ACM_CHWALL_CONFLICT 0x103 #define ACM_SSIDREF_IN_USE 0x104 /* primary policy in lower 4 bits */ #define ACM_NULL_POLICY 0 #define ACM_CHINESE_WALL_POLICY 1 #define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2 #define ACM_POLICY_UNDEFINED 15 /* combinations have secondary policy component in higher 4bit */ #define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY) /* policy: */ #define ACM_POLICY_NAME(X) \ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \ "UNDEFINED" /* the following policy versions must be increased * whenever the interpretation of the related * policy's data structure changes */ #define ACM_POLICY_VERSION 3 #define ACM_CHWALL_VERSION 1 #define ACM_STE_VERSION 1 /* defines a ssid reference used by xen */ typedef uint32_t ssidref_t; /* hooks that are known to domains */ #define ACMHOOK_none 0 #define ACMHOOK_sharing 1 /* -------security policy relevant type definitions-------- */ /* type identifier; compares to "equal" or "not equal" */ typedef uint16_t domaintype_t; /* CHINESE WALL POLICY DATA STRUCTURES * * current accumulated conflict type set: * When a domain is started and has a type that is in * a conflict set, the conflicting types are incremented in * the aggregate set. When a domain is destroyed, the * conflicting types to its type are decremented. * If a domain has multiple types, this procedure works over * all those types. * * conflict_aggregate_set[i] holds the number of * running domains that have a conflict with type i. * * running_types[i] holds the number of running domains * that include type i in their ssidref-referenced type set * * conflict_sets[i][j] is "0" if type j has no conflict * with type i and is "1" otherwise. */ /* high-16 = version, low-16 = check magic */ #define ACM_MAGIC 0x0001debc /* each offset in bytes from start of the struct they * are part of */ /* V3 of the policy buffer aded a version structure */ struct acm_policy_version { uint32_t major; uint32_t minor; }; /* each buffer consists of all policy information for * the respective policy given in the policy code * * acm_policy_buffer, acm_chwall_policy_buffer, * and acm_ste_policy_buffer need to stay 32-bit aligned * because we create binary policies also with external * tools that assume packed representations (e.g. the java tool) */ struct acm_policy_buffer { uint32_t policy_version; /* ACM_POLICY_VERSION */ uint32_t magic; uint32_t len; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_buffer_offset; uint32_t secondary_policy_code; uint32_t secondary_buffer_offset; struct acm_policy_version xml_pol_version; /* add in V3 */ }; struct acm_policy_reference_buffer { uint32_t len; }; struct acm_chwall_policy_buffer { uint32_t policy_version; /* ACM_CHWALL_VERSION */ uint32_t policy_code; uint32_t chwall_max_types; uint32_t chwall_max_ssidrefs; uint32_t chwall_max_conflictsets; uint32_t chwall_ssid_offset; uint32_t chwall_conflict_sets_offset; uint32_t chwall_running_types_offset; uint32_t chwall_conflict_aggregate_offset; }; struct acm_ste_policy_buffer { uint32_t policy_version; /* ACM_STE_VERSION */ uint32_t policy_code; uint32_t ste_max_types; uint32_t ste_max_ssidrefs; uint32_t ste_ssid_offset; }; struct acm_stats_buffer { uint32_t magic; uint32_t len; uint32_t primary_policy_code; uint32_t primary_stats_offset; uint32_t secondary_policy_code; uint32_t secondary_stats_offset; }; struct acm_ste_stats_buffer { uint32_t ec_eval_count; uint32_t gt_eval_count; uint32_t ec_denied_count; uint32_t gt_denied_count; uint32_t ec_cachehit_count; uint32_t gt_cachehit_count; }; struct acm_ssid_buffer { uint32_t len; ssidref_t ssidref; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_max_types; uint32_t primary_types_offset; uint32_t secondary_policy_code; uint32_t secondary_max_types; uint32_t secondary_types_offset; }; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/acm_ops.h000066400000000000000000000104711314037446600257400ustar00rootroot00000000000000/* * acm_ops.h: Xen access control module hypervisor commands * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005,2006 International Business Machines Corporation. */ #ifndef __XEN_PUBLIC_ACM_OPS_H__ #define __XEN_PUBLIC_ACM_OPS_H__ #include "xen.h" #include "acm.h" /* * Make sure you increment the interface version whenever you modify this file! * This makes sure that old versions of acm tools will stop working in a * well-defined way (rather than crashing the machine, for instance). */ #define ACM_INTERFACE_VERSION 0xAAAA000A /************************************************************************/ /* * Prototype for this hypercall is: * int acm_op(int cmd, void *args) * @cmd == ACMOP_??? (access control module operation). * @args == Operation-specific extra arguments (NULL if none). */ #define ACMOP_setpolicy 1 struct acm_setpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pushcache; uint32_t pushcache_size; }; #define ACMOP_getpolicy 2 struct acm_getpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_dumpstats 3 struct acm_dumpstats { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_getssid 4 #define ACM_GETBY_ssidref 1 #define ACM_GETBY_domainid 2 struct acm_getssid { /* IN */ uint32_t get_ssid_by; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id; XEN_GUEST_HANDLE_64(void) ssidbuf; uint32_t ssidbuf_size; }; #define ACMOP_getdecision 5 struct acm_getdecision { /* IN */ uint32_t get_decision_by1; /* ACM_GETBY_* */ uint32_t get_decision_by2; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id1; union { domaintype_t domainid; ssidref_t ssidref; } id2; uint32_t hook; /* OUT */ uint32_t acm_decision; }; #define ACMOP_chgpolicy 6 struct acm_change_policy { /* IN */ XEN_GUEST_HANDLE_64(void) policy_pushcache; uint32_t policy_pushcache_size; XEN_GUEST_HANDLE_64(void) del_array; uint32_t delarray_size; XEN_GUEST_HANDLE_64(void) chg_array; uint32_t chgarray_size; /* OUT */ /* array with error code */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; #define ACMOP_relabeldoms 7 struct acm_relabel_doms { /* IN */ XEN_GUEST_HANDLE_64(void) relabel_map; uint32_t relabel_map_size; /* OUT */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; /* future interface to Xen */ struct xen_acmctl { uint32_t cmd; uint32_t interface_version; union { struct acm_setpolicy setpolicy; struct acm_getpolicy getpolicy; struct acm_dumpstats dumpstats; struct acm_getssid getssid; struct acm_getdecision getdecision; struct acm_change_policy change_policy; struct acm_relabel_doms relabel_doms; } u; }; typedef struct xen_acmctl xen_acmctl_t; DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t); #endif /* __XEN_PUBLIC_ACM_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/arch-x86/000077500000000000000000000000001314037446600255035ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/arch-x86/cpuid.h000066400000000000000000000050721314037446600267640ustar00rootroot00000000000000/****************************************************************************** * arch-x86/cpuid.h * * CPUID interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2007 Citrix Systems, Inc. * * Authors: * Keir Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__ #define __XEN_PUBLIC_ARCH_X86_CPUID_H__ /* Xen identification leaves start at 0x40000000. */ #define XEN_CPUID_FIRST_LEAF 0x40000000 #define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i)) /* * Leaf 1 (0x40000000) * EAX: Largest Xen-information leaf. All leaves up to an including @EAX * are supported by the Xen host. * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification * of a Xen host. */ #define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */ #define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */ #define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */ /* * Leaf 2 (0x40000001) * EAX[31:16]: Xen major version. * EAX[15: 0]: Xen minor version. * EBX-EDX: Reserved (currently all zeroes). */ /* * Leaf 3 (0x40000002) * EAX: Number of hypercall transfer pages. This register is always guaranteed * to specify one hypercall page. * EBX: Base address of Xen-specific MSRs. * ECX: Features 1. Unused bits are set to zero. * EDX: Features 2. Unused bits are set to zero. */ /* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */ #define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0 #define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0) #endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/arch-x86/hvm/000077500000000000000000000000001314037446600262755ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/arch-x86/hvm/save.h000066400000000000000000000243761314037446600274200ustar00rootroot00000000000000/* * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * Copyright (c) 2007 XenSource Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__ #define __XEN_PUBLIC_HVM_SAVE_X86_H__ /* * Save/restore header: general info about the save file. */ #define HVM_FILE_MAGIC 0x54381286 #define HVM_FILE_VERSION 0x00000001 struct hvm_save_header { uint32_t magic; /* Must be HVM_FILE_MAGIC */ uint32_t version; /* File format version */ uint64_t changeset; /* Version of Xen that saved this file */ uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */ uint32_t pad0; }; DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header); /* * Processor */ struct hvm_hw_cpu { uint8_t fpu_regs[512]; uint64_t rax; uint64_t rbx; uint64_t rcx; uint64_t rdx; uint64_t rbp; uint64_t rsi; uint64_t rdi; uint64_t rsp; uint64_t r8; uint64_t r9; uint64_t r10; uint64_t r11; uint64_t r12; uint64_t r13; uint64_t r14; uint64_t r15; uint64_t rip; uint64_t rflags; uint64_t cr0; uint64_t cr2; uint64_t cr3; uint64_t cr4; uint64_t dr0; uint64_t dr1; uint64_t dr2; uint64_t dr3; uint64_t dr6; uint64_t dr7; uint32_t cs_sel; uint32_t ds_sel; uint32_t es_sel; uint32_t fs_sel; uint32_t gs_sel; uint32_t ss_sel; uint32_t tr_sel; uint32_t ldtr_sel; uint32_t cs_limit; uint32_t ds_limit; uint32_t es_limit; uint32_t fs_limit; uint32_t gs_limit; uint32_t ss_limit; uint32_t tr_limit; uint32_t ldtr_limit; uint32_t idtr_limit; uint32_t gdtr_limit; uint64_t cs_base; uint64_t ds_base; uint64_t es_base; uint64_t fs_base; uint64_t gs_base; uint64_t ss_base; uint64_t tr_base; uint64_t ldtr_base; uint64_t idtr_base; uint64_t gdtr_base; uint32_t cs_arbytes; uint32_t ds_arbytes; uint32_t es_arbytes; uint32_t fs_arbytes; uint32_t gs_arbytes; uint32_t ss_arbytes; uint32_t tr_arbytes; uint32_t ldtr_arbytes; uint32_t sysenter_cs; uint32_t padding0; uint64_t sysenter_esp; uint64_t sysenter_eip; /* msr for em64t */ uint64_t shadow_gs; /* msr content saved/restored. */ uint64_t msr_flags; uint64_t msr_lstar; uint64_t msr_star; uint64_t msr_cstar; uint64_t msr_syscall_mask; uint64_t msr_efer; /* guest's idea of what rdtsc() would return */ uint64_t tsc; /* pending event, if any */ union { uint32_t pending_event; struct { uint8_t pending_vector:8; uint8_t pending_type:3; uint8_t pending_error_valid:1; uint32_t pending_reserved:19; uint8_t pending_valid:1; }; }; /* error code for pending event */ uint32_t error_code; }; DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu); /* * PIC */ struct hvm_hw_vpic { /* IR line bitmasks. */ uint8_t irr; uint8_t imr; uint8_t isr; /* Line IRx maps to IRQ irq_base+x */ uint8_t irq_base; /* * Where are we in ICW2-4 initialisation (0 means no init in progress)? * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1). * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence) * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence) */ uint8_t init_state:4; /* IR line with highest priority. */ uint8_t priority_add:4; /* Reads from A=0 obtain ISR or IRR? */ uint8_t readsel_isr:1; /* Reads perform a polling read? */ uint8_t poll:1; /* Automatically clear IRQs from the ISR during INTA? */ uint8_t auto_eoi:1; /* Automatically rotate IRQ priorities during AEOI? */ uint8_t rotate_on_auto_eoi:1; /* Exclude slave inputs when considering in-service IRQs? */ uint8_t special_fully_nested_mode:1; /* Special mask mode excludes masked IRs from AEOI and priority checks. */ uint8_t special_mask_mode:1; /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */ uint8_t is_master:1; /* Edge/trigger selection. */ uint8_t elcr; /* Virtual INT output. */ uint8_t int_output; }; DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic); /* * IO-APIC */ #ifdef __ia64__ #define VIOAPIC_IS_IOSAPIC 1 #define VIOAPIC_NUM_PINS 24 #else #define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */ #endif struct hvm_hw_vioapic { uint64_t base_address; uint32_t ioregsel; uint32_t id; union vioapic_redir_entry { uint64_t bits; struct { uint8_t vector; uint8_t delivery_mode:3; uint8_t dest_mode:1; uint8_t delivery_status:1; uint8_t polarity:1; uint8_t remote_irr:1; uint8_t trig_mode:1; uint8_t mask:1; uint8_t reserve:7; #if !VIOAPIC_IS_IOSAPIC uint8_t reserved[4]; uint8_t dest_id; #else uint8_t reserved[3]; uint16_t dest_id; #endif } fields; } redirtbl[VIOAPIC_NUM_PINS]; }; DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic); /* * LAPIC */ struct hvm_hw_lapic { uint64_t apic_base_msr; uint32_t disabled; /* VLAPIC_xx_DISABLED */ uint32_t timer_divisor; }; DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic); struct hvm_hw_lapic_regs { /* A 4k page of register state */ uint8_t data[0x400]; }; DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs); /* * IRQs */ struct hvm_hw_pci_irqs { /* * Virtual interrupt wires for a single PCI bus. * Indexed by: device*4 + INTx#. */ union { DECLARE_BITMAP(i, 32*4); uint64_t pad[2]; }; }; DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs); struct hvm_hw_isa_irqs { /* * Virtual interrupt wires for ISA devices. * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing). */ union { DECLARE_BITMAP(i, 16); uint64_t pad[1]; }; }; DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs); struct hvm_hw_pci_link { /* * PCI-ISA interrupt router. * Each PCI is 'wire-ORed' into one of four links using * the traditional 'barber's pole' mapping ((device + INTx#) & 3). * The router provides a programmable mapping from each link to a GSI. */ uint8_t route[4]; uint8_t pad0[4]; }; DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link); /* * PIT */ struct hvm_hw_pit { struct hvm_hw_pit_channel { uint32_t count; /* can be 65536 */ uint16_t latched_count; uint8_t count_latched; uint8_t status_latched; uint8_t status; uint8_t read_state; uint8_t write_state; uint8_t write_latch; uint8_t rw_mode; uint8_t mode; uint8_t bcd; /* not supported */ uint8_t gate; /* timer start */ } channels[3]; /* 3 x 16 bytes */ uint32_t speaker_data_on; uint32_t pad0; }; DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit); /* * RTC */ #define RTC_CMOS_SIZE 14 struct hvm_hw_rtc { /* CMOS bytes */ uint8_t cmos_data[RTC_CMOS_SIZE]; /* Index register for 2-part operations */ uint8_t cmos_index; uint8_t pad0; }; DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc); /* * HPET */ #define HPET_TIMER_NUM 3 /* 3 timers supported now */ struct hvm_hw_hpet { /* Memory-mapped, software visible registers */ uint64_t capability; /* capabilities */ uint64_t res0; /* reserved */ uint64_t config; /* configuration */ uint64_t res1; /* reserved */ uint64_t isr; /* interrupt status reg */ uint64_t res2[25]; /* reserved */ uint64_t mc64; /* main counter */ uint64_t res3; /* reserved */ struct { /* timers */ uint64_t config; /* configuration/cap */ uint64_t cmp; /* comparator */ uint64_t fsb; /* FSB route, not supported now */ uint64_t res4; /* reserved */ } timers[HPET_TIMER_NUM]; uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */ /* Hidden register state */ uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */ }; DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet); /* * PM timer */ struct hvm_hw_pmtimer { uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */ uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */ uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */ }; DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer); /* * MTRR MSRs */ struct hvm_hw_mtrr { #define MTRR_VCNT 8 #define NUM_FIXED_MSR 11 uint64_t msr_pat_cr; /* mtrr physbase & physmask msr pair*/ uint64_t msr_mtrr_var[MTRR_VCNT*2]; uint64_t msr_mtrr_fixed[NUM_FIXED_MSR]; uint64_t msr_mtrr_cap; uint64_t msr_mtrr_def_type; }; DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr); /* * Largest type-code in use */ #define HVM_SAVE_CODE_MAX 14 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/arch-x86/xen-x86_32.h000066400000000000000000000150311314037446600273750ustar00rootroot00000000000000/****************************************************************************** * xen-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2007, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ /* * Hypercall interface: * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5) * Output: %eax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; int $0x82 */ #define TRAP_INSTR "int $0x82" #endif /* * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ #define FLAT_KERNEL_CS FLAT_RING1_CS #define FLAT_KERNEL_DS FLAT_RING1_DS #define FLAT_KERNEL_SS FLAT_RING1_SS #define FLAT_USER_CS FLAT_RING3_CS #define FLAT_USER_DS FLAT_RING3_DS #define FLAT_USER_SS FLAT_RING3_SS #define __HYPERVISOR_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_END_PAE 0xF6800000 #define HYPERVISOR_VIRT_START_PAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE) #define MACH2PHYS_VIRT_START_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE) #define MACH2PHYS_VIRT_END_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE) #define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000 #define HYPERVISOR_VIRT_START_NONPAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_START_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_END_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE) #ifdef CONFIG_X86_PAE #define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE #define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE #define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE #else #define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_NONPAE #define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_NONPAE #define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_NONPAE #endif #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) #endif /* 32-/64-bit invariability for control interfaces (domctl/sysctl). */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #undef ___DEFINE_XEN_GUEST_HANDLE #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } \ __guest_handle_ ## name; \ typedef struct { union { type *p; uint64_aligned_t q; }; } \ __guest_handle_64_ ## name #undef set_xen_guest_handle #define set_xen_guest_handle(hnd, val) \ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \ (hnd).p = val; \ } while ( 0 ) #define uint64_aligned_t uint64_t __attribute__((aligned(8))) #define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name #define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name) #endif #ifndef __ASSEMBLY__ struct cpu_user_regs { uint32_t ebx; uint32_t ecx; uint32_t edx; uint32_t esi; uint32_t edi; uint32_t ebp; uint32_t eax; uint16_t error_code; /* private */ uint16_t entry_vector; /* private */ uint32_t eip; uint16_t cs; uint8_t saved_upcall_mask; uint8_t _pad0; uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ uint32_t esp; uint16_t ss, _pad1; uint16_t es, _pad2; uint16_t ds, _pad3; uint16_t fs, _pad4; uint16_t gs, _pad5; }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); /* * Page-directory addresses above 4GB do not fit into architectural %cr3. * When accessing %cr3, or equivalent field in vcpu_guest_context, guests * must use the following accessor macros to pack/unpack valid MFNs. */ #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) struct arch_vcpu_info { unsigned long cr2; unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; struct xen_callback { unsigned long cs; unsigned long eip; }; typedef struct xen_callback xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/arch-x86/xen-x86_64.h000066400000000000000000000156711314037446600274140ustar00rootroot00000000000000/****************************************************************************** * xen-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ /* * Hypercall interface: * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5) * Output: %rax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; syscall * Clobbered: %rcx, %r11, argument registers (as above) */ #define TRAP_INSTR "syscall" #endif /* * 64-bit segment selectors * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING3_CS32 0xe023 /* GDT index 260 */ #define FLAT_RING3_CS64 0xe033 /* GDT index 261 */ #define FLAT_RING3_DS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_DS64 0x0000 /* NULL selector */ #define FLAT_RING3_SS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_SS64 0xe02b /* GDT index 262 */ #define FLAT_KERNEL_DS64 FLAT_RING3_DS64 #define FLAT_KERNEL_DS32 FLAT_RING3_DS32 #define FLAT_KERNEL_DS FLAT_KERNEL_DS64 #define FLAT_KERNEL_CS64 FLAT_RING3_CS64 #define FLAT_KERNEL_CS32 FLAT_RING3_CS32 #define FLAT_KERNEL_CS FLAT_KERNEL_CS64 #define FLAT_KERNEL_SS64 FLAT_RING3_SS64 #define FLAT_KERNEL_SS32 FLAT_RING3_SS32 #define FLAT_KERNEL_SS FLAT_KERNEL_SS64 #define FLAT_USER_DS64 FLAT_RING3_DS64 #define FLAT_USER_DS32 FLAT_RING3_DS32 #define FLAT_USER_DS FLAT_USER_DS64 #define FLAT_USER_CS64 FLAT_RING3_CS64 #define FLAT_USER_CS32 FLAT_RING3_CS32 #define FLAT_USER_CS FLAT_USER_CS64 #define FLAT_USER_SS64 FLAT_RING3_SS64 #define FLAT_USER_SS32 FLAT_RING3_SS32 #define FLAT_USER_SS FLAT_USER_SS64 #define __HYPERVISOR_VIRT_START 0xFFFF800000000000 #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) #endif #ifndef __ASSEMBLY__ /* * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) * @which == SEGBASE_* ; @base == 64-bit base address * Returns 0 on success. */ #define SEGBASE_FS 0 #define SEGBASE_GS_USER 1 #define SEGBASE_GS_KERNEL 2 #define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */ /* * int HYPERVISOR_iret(void) * All arguments are on the kernel stack, in the following format. * Never returns if successful. Current kernel context is lost. * The saved CS is mapped as follows: * RING0 -> RING3 kernel mode. * RING1 -> RING3 kernel mode. * RING2 -> RING3 kernel mode. * RING3 -> RING3 user mode. * However RING0 indicates that the guest kernel should return to iteself * directly with * orb $3,1*8(%rsp) * iretq * If flags contains VGCF_in_syscall: * Restore RAX, RIP, RFLAGS, RSP. * Discard R11, RCX, CS, SS. * Otherwise: * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP. * All other registers are saved on hypercall entry and restored to user. */ /* Guest exited in SYSCALL context? Return to guest with SYSRET? */ #define _VGCF_in_syscall 8 #define VGCF_in_syscall (1<<_VGCF_in_syscall) #define VGCF_IN_SYSCALL VGCF_in_syscall struct iret_context { /* Top of stack (%rsp at point of hypercall). */ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; /* Bottom of iret stack frame. */ }; #ifdef __GNUC__ /* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ #define __DECL_REG(name) union { \ uint64_t r ## name, e ## name; \ uint32_t _e ## name; \ } #else /* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ #define __DECL_REG(name) uint64_t r ## name #endif struct cpu_user_regs { uint64_t r15; uint64_t r14; uint64_t r13; uint64_t r12; __DECL_REG(bp); __DECL_REG(bx); uint64_t r11; uint64_t r10; uint64_t r9; uint64_t r8; __DECL_REG(ax); __DECL_REG(cx); __DECL_REG(dx); __DECL_REG(si); __DECL_REG(di); uint32_t error_code; /* private */ uint32_t entry_vector; /* private */ __DECL_REG(ip); uint16_t cs, _pad0[1]; uint8_t saved_upcall_mask; uint8_t _pad1[3]; __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */ __DECL_REG(sp); uint16_t ss, _pad2[3]; uint16_t es, _pad3[3]; uint16_t ds, _pad4[3]; uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */ }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); #undef __DECL_REG #define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) #define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) struct arch_vcpu_info { unsigned long cr2; unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; typedef unsigned long xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/arch-x86/xen.h000066400000000000000000000167371314037446600264640ustar00rootroot00000000000000/****************************************************************************** * arch-x86/xen.h * * Guest OS interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_H__ /* Structural guest handles introduced in 0x00030201. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030201 #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name #else #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef type * __guest_handle_ ## name #endif #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ ___DEFINE_XEN_GUEST_HANDLE(name, type); \ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif #if defined(__i386__) #include "xen-x86_32.h" #elif defined(__x86_64__) #include "xen-x86_64.h" #endif #ifndef __ASSEMBLY__ typedef unsigned long xen_pfn_t; #define PRI_xen_pfn "lx" #endif /* * SEGMENT DESCRIPTOR TABLES */ /* * A number of GDT entries are reserved by Xen. These are not situated at the * start of the GDT because some stupid OSes export hard-coded selector values * in their ABI. These hard-coded values are always near the start of the GDT, * so Xen places itself out of the way, at the far end of the GDT. */ #define FIRST_RESERVED_GDT_PAGE 14 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) /* Maximum number of virtual CPUs in multi-processor guests. */ #define MAX_VIRT_CPUS 32 #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; /* * Send an array of these to HYPERVISOR_set_trap_table(). * The privilege level specifies which modes may enter a trap via a software * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate * privilege levels as follows: * Level == 0: Noone may enter * Level == 1: Kernel may enter * Level == 2: Kernel may enter * Level == 3: Everyone may enter */ #define TI_GET_DPL(_ti) ((_ti)->flags & 3) #define TI_GET_IF(_ti) ((_ti)->flags & 4) #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) struct trap_info { uint8_t vector; /* exception vector */ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ uint16_t cs; /* code selector */ unsigned long address; /* code offset */ }; typedef struct trap_info trap_info_t; DEFINE_XEN_GUEST_HANDLE(trap_info_t); typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* * The following is all CPU context. Note that the fpu_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ struct vcpu_guest_context { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ #define VGCF_I387_VALID (1<<0) #define VGCF_IN_KERNEL (1<<2) #define _VGCF_i387_valid 0 #define VGCF_i387_valid (1<<_VGCF_i387_valid) #define _VGCF_in_kernel 2 #define VGCF_in_kernel (1<<_VGCF_in_kernel) #define _VGCF_failsafe_disables_events 3 #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) #define _VGCF_syscall_disables_events 4 #define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) #define _VGCF_online 5 #define VGCF_online (1<<_VGCF_online) unsigned long flags; /* VGCF_* flags */ struct cpu_user_regs user_regs; /* User-level CPU registers */ struct trap_info trap_ctxt[256]; /* Virtual IDT */ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ #ifdef __i386__ unsigned long event_callback_cs; /* CS:EIP of event callback */ unsigned long event_callback_eip; unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ unsigned long failsafe_callback_eip; #else unsigned long event_callback_eip; unsigned long failsafe_callback_eip; #ifdef __XEN__ union { unsigned long syscall_callback_eip; struct { unsigned int event_callback_cs; /* compat CS of event cb */ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */ }; }; #else unsigned long syscall_callback_eip; #endif #endif unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ #ifdef __x86_64__ /* Segment base addresses. */ uint64_t fs_base; uint64_t gs_base_kernel; uint64_t gs_base_user; #endif }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); struct arch_shared_info { unsigned long max_pfn; /* max pfn that appears in table */ /* Frame containing list of mfns containing list of mfns containing p2m. */ xen_pfn_t pfn_to_mfn_frame_list_list; unsigned long nmi_reason; uint64_t pad[32]; }; typedef struct arch_shared_info arch_shared_info_t; #endif /* !__ASSEMBLY__ */ /* * Prefix forces emulation of some non-trapping instructions. * Currently only CPUID. */ #ifdef __ASSEMBLY__ #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; #define XEN_CPUID XEN_EMULATE_PREFIX cpuid #else #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" #endif #endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/arch-x86_32.h000066400000000000000000000024131314037446600261600ustar00rootroot00000000000000/****************************************************************************** * arch-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/arch-x86_64.h000066400000000000000000000024131314037446600261650ustar00rootroot00000000000000/****************************************************************************** * arch-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/callback.h000066400000000000000000000076671314037446600260700ustar00rootroot00000000000000/****************************************************************************** * callback.h * * Register guest OS callbacks with Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell */ #ifndef __XEN_PUBLIC_CALLBACK_H__ #define __XEN_PUBLIC_CALLBACK_H__ #include "xen.h" /* * Prototype for this hypercall is: * long callback_op(int cmd, void *extra_args) * @cmd == CALLBACKOP_??? (callback operation). * @extra_args == Operation-specific extra arguments (NULL if none). */ /* ia64, x86: Callback for event delivery. */ #define CALLBACKTYPE_event 0 /* x86: Failsafe callback when guest state cannot be restored by Xen. */ #define CALLBACKTYPE_failsafe 1 /* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */ #define CALLBACKTYPE_syscall 2 /* * x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel * feature is enabled. Do not use this callback type in new code. */ #define CALLBACKTYPE_sysenter_deprecated 3 /* x86: Callback for NMI delivery. */ #define CALLBACKTYPE_nmi 4 /* * x86: sysenter is only available as follows: * - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled * - 64-bit hypervisor: 32-bit guest applications on Intel CPUs * ('32-on-32-on-64', '32-on-64-on-64') * [nb. also 64-bit guest applications on Intel CPUs * ('64-on-64-on-64'), but syscall is preferred] */ #define CALLBACKTYPE_sysenter 5 /* * x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs * ('32-on-32-on-64', '32-on-64-on-64') */ #define CALLBACKTYPE_syscall32 7 /* * Disable event deliver during callback? This flag is ignored for event and * NMI callbacks: event delivery is unconditionally disabled. */ #define _CALLBACKF_mask_events 0 #define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events) /* * Register a callback. */ #define CALLBACKOP_register 0 struct callback_register { uint16_t type; uint16_t flags; xen_callback_t address; }; typedef struct callback_register callback_register_t; DEFINE_XEN_GUEST_HANDLE(callback_register_t); /* * Unregister a callback. * * Not all callbacks can be unregistered. -EINVAL will be returned if * you attempt to unregister such a callback. */ #define CALLBACKOP_unregister 1 struct callback_unregister { uint16_t type; uint16_t _unused; }; typedef struct callback_unregister callback_unregister_t; DEFINE_XEN_GUEST_HANDLE(callback_unregister_t); #if __XEN_INTERFACE_VERSION__ < 0x00030207 #undef CALLBACKTYPE_sysenter #define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated #endif #endif /* __XEN_PUBLIC_CALLBACK_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/dom0_ops.h000066400000000000000000000077061314037446600260460ustar00rootroot00000000000000/****************************************************************************** * dom0_ops.h * * Process command requests from domain-0 guest OS. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOM0_OPS_H__ #define __XEN_PUBLIC_DOM0_OPS_H__ #include "xen.h" #include "platform.h" #if __XEN_INTERFACE_VERSION__ >= 0x00030204 #error "dom0_ops.h is a compatibility interface only" #endif #define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION #define DOM0_SETTIME XENPF_settime #define dom0_settime xenpf_settime #define dom0_settime_t xenpf_settime_t #define DOM0_ADD_MEMTYPE XENPF_add_memtype #define dom0_add_memtype xenpf_add_memtype #define dom0_add_memtype_t xenpf_add_memtype_t #define DOM0_DEL_MEMTYPE XENPF_del_memtype #define dom0_del_memtype xenpf_del_memtype #define dom0_del_memtype_t xenpf_del_memtype_t #define DOM0_READ_MEMTYPE XENPF_read_memtype #define dom0_read_memtype xenpf_read_memtype #define dom0_read_memtype_t xenpf_read_memtype_t #define DOM0_MICROCODE XENPF_microcode_update #define dom0_microcode xenpf_microcode_update #define dom0_microcode_t xenpf_microcode_update_t #define DOM0_PLATFORM_QUIRK XENPF_platform_quirk #define dom0_platform_quirk xenpf_platform_quirk #define dom0_platform_quirk_t xenpf_platform_quirk_t typedef uint64_t cpumap_t; /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_MSR 15 struct dom0_msr { /* IN variables. */ uint32_t write; cpumap_t cpu_mask; uint32_t msr; uint32_t in1; uint32_t in2; /* OUT variables. */ uint32_t out1; uint32_t out2; }; typedef struct dom0_msr dom0_msr_t; DEFINE_XEN_GUEST_HANDLE(dom0_msr_t); /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_PHYSICAL_MEMORY_MAP 40 struct dom0_memory_map_entry { uint64_t start, end; uint32_t flags; /* reserved */ uint8_t is_ram; }; typedef struct dom0_memory_map_entry dom0_memory_map_entry_t; DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t); struct dom0_op { uint32_t cmd; uint32_t interface_version; /* DOM0_INTERFACE_VERSION */ union { struct dom0_msr msr; struct dom0_settime settime; struct dom0_add_memtype add_memtype; struct dom0_del_memtype del_memtype; struct dom0_read_memtype read_memtype; struct dom0_microcode microcode; struct dom0_platform_quirk platform_quirk; struct dom0_memory_map_entry physical_memory_map; uint8_t pad[128]; } u; }; typedef struct dom0_op dom0_op_t; DEFINE_XEN_GUEST_HANDLE(dom0_op_t); #endif /* __XEN_PUBLIC_DOM0_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/domctl.h000066400000000000000000000525421314037446600256060ustar00rootroot00000000000000/****************************************************************************** * domctl.h * * Domain management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOMCTL_H__ #define __XEN_PUBLIC_DOMCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "domctl operations are intended for use by node control tools only" #endif #include "xen.h" #define XEN_DOMCTL_INTERFACE_VERSION 0x00000005 struct xenctl_cpumap { XEN_GUEST_HANDLE_64(uint8) bitmap; uint32_t nr_cpus; }; /* * NB. xen_domctl.domain is an IN/OUT parameter for this operation. * If it is specified as zero, an id is auto-allocated and returned. */ #define XEN_DOMCTL_createdomain 1 struct xen_domctl_createdomain { /* IN parameters */ uint32_t ssidref; xen_domain_handle_t handle; /* Is this an HVM guest (as opposed to a PV guest)? */ #define _XEN_DOMCTL_CDF_hvm_guest 0 #define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest) uint32_t flags; }; typedef struct xen_domctl_createdomain xen_domctl_createdomain_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t); #define XEN_DOMCTL_destroydomain 2 #define XEN_DOMCTL_pausedomain 3 #define XEN_DOMCTL_unpausedomain 4 #define XEN_DOMCTL_resumedomain 27 #define XEN_DOMCTL_getdomaininfo 5 struct xen_domctl_getdomaininfo { /* OUT variables. */ domid_t domain; /* Also echoed in domctl.domain */ /* Domain is scheduled to die. */ #define _XEN_DOMINF_dying 0 #define XEN_DOMINF_dying (1U<<_XEN_DOMINF_dying) /* Domain is an HVM guest (as opposed to a PV guest). */ #define _XEN_DOMINF_hvm_guest 1 #define XEN_DOMINF_hvm_guest (1U<<_XEN_DOMINF_hvm_guest) /* The guest OS has shut down. */ #define _XEN_DOMINF_shutdown 2 #define XEN_DOMINF_shutdown (1U<<_XEN_DOMINF_shutdown) /* Currently paused by control software. */ #define _XEN_DOMINF_paused 3 #define XEN_DOMINF_paused (1U<<_XEN_DOMINF_paused) /* Currently blocked pending an event. */ #define _XEN_DOMINF_blocked 4 #define XEN_DOMINF_blocked (1U<<_XEN_DOMINF_blocked) /* Domain is currently running. */ #define _XEN_DOMINF_running 5 #define XEN_DOMINF_running (1U<<_XEN_DOMINF_running) /* Being debugged. */ #define _XEN_DOMINF_debugged 6 #define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged) /* CPU to which this domain is bound. */ #define XEN_DOMINF_cpumask 255 #define XEN_DOMINF_cpushift 8 /* XEN_DOMINF_shutdown guest-supplied code. */ #define XEN_DOMINF_shutdownmask 255 #define XEN_DOMINF_shutdownshift 16 uint32_t flags; /* XEN_DOMINF_* */ uint64_aligned_t tot_pages; uint64_aligned_t max_pages; uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */ uint64_aligned_t cpu_time; uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */ uint32_t ssidref; xen_domain_handle_t handle; }; typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t); #define XEN_DOMCTL_getmemlist 6 struct xen_domctl_getmemlist { /* IN variables. */ /* Max entries to write to output buffer. */ uint64_aligned_t max_pfns; /* Start index in guest's page list. */ uint64_aligned_t start_pfn; XEN_GUEST_HANDLE_64(uint64) buffer; /* OUT variables. */ uint64_aligned_t num_pfns; }; typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t); #define XEN_DOMCTL_getpageframeinfo 7 #define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28 #define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28) #define XEN_DOMCTL_PFINFO_L1TAB (0x1U<<28) #define XEN_DOMCTL_PFINFO_L2TAB (0x2U<<28) #define XEN_DOMCTL_PFINFO_L3TAB (0x3U<<28) #define XEN_DOMCTL_PFINFO_L4TAB (0x4U<<28) #define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28) #define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31) #define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */ #define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28) struct xen_domctl_getpageframeinfo { /* IN variables. */ uint64_aligned_t gmfn; /* GMFN to query */ /* OUT variables. */ /* Is the page PINNED to a type? */ uint32_t type; /* see above type defs */ }; typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t); #define XEN_DOMCTL_getpageframeinfo2 8 struct xen_domctl_getpageframeinfo2 { /* IN variables. */ uint64_aligned_t num; /* IN/OUT variables. */ XEN_GUEST_HANDLE_64(uint32) array; }; typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t); /* * Control shadow pagetables operation */ #define XEN_DOMCTL_shadow_op 10 /* Disable shadow mode. */ #define XEN_DOMCTL_SHADOW_OP_OFF 0 /* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */ #define XEN_DOMCTL_SHADOW_OP_ENABLE 32 /* Log-dirty bitmap operations. */ /* Return the bitmap and clean internal copy for next round. */ #define XEN_DOMCTL_SHADOW_OP_CLEAN 11 /* Return the bitmap but do not modify internal copy. */ #define XEN_DOMCTL_SHADOW_OP_PEEK 12 /* Memory allocation accessors. */ #define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30 #define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31 /* Legacy enable operations. */ /* Equiv. to ENABLE with no mode flags. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST 1 /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY 2 /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE 3 /* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */ /* * Shadow pagetables are refcounted: guest does not use explicit mmu * operations nor write-protect its pagetables. */ #define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT (1 << 1) /* * Log pages in a bitmap as they are dirtied. * Used for live relocation to determine which pages must be re-sent. */ #define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2) /* * Automatically translate GPFNs into MFNs. */ #define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3) /* * Xen does not steal virtual address space from the guest. * Requires HVM support. */ #define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4) struct xen_domctl_shadow_op_stats { uint32_t fault_count; uint32_t dirty_count; }; typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t); struct xen_domctl_shadow_op { /* IN variables. */ uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */ /* OP_ENABLE */ uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */ /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */ uint32_t mb; /* Shadow memory allocation in MB */ /* OP_PEEK / OP_CLEAN */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */ struct xen_domctl_shadow_op_stats stats; }; typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t); #define XEN_DOMCTL_max_mem 11 struct xen_domctl_max_mem { /* IN variables. */ uint64_aligned_t max_memkb; }; typedef struct xen_domctl_max_mem xen_domctl_max_mem_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t); #define XEN_DOMCTL_setvcpucontext 12 #define XEN_DOMCTL_getvcpucontext 13 struct xen_domctl_vcpucontext { uint32_t vcpu; /* IN */ XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */ }; typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t); #define XEN_DOMCTL_getvcpuinfo 14 struct xen_domctl_getvcpuinfo { /* IN variables. */ uint32_t vcpu; /* OUT variables. */ uint8_t online; /* currently online (not hotplugged)? */ uint8_t blocked; /* blocked waiting for an event? */ uint8_t running; /* currently scheduled on its CPU? */ uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */ uint32_t cpu; /* current mapping */ }; typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t); /* Get/set which physical cpus a vcpu can execute on. */ #define XEN_DOMCTL_setvcpuaffinity 9 #define XEN_DOMCTL_getvcpuaffinity 25 struct xen_domctl_vcpuaffinity { uint32_t vcpu; /* IN */ struct xenctl_cpumap cpumap; /* IN/OUT */ }; typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t); #define XEN_DOMCTL_max_vcpus 15 struct xen_domctl_max_vcpus { uint32_t max; /* maximum number of vcpus */ }; typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t); #define XEN_DOMCTL_scheduler_op 16 /* Scheduler types. */ #define XEN_SCHEDULER_SEDF 4 #define XEN_SCHEDULER_CREDIT 5 /* Set or get info? */ #define XEN_DOMCTL_SCHEDOP_putinfo 0 #define XEN_DOMCTL_SCHEDOP_getinfo 1 struct xen_domctl_scheduler_op { uint32_t sched_id; /* XEN_SCHEDULER_* */ uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */ union { struct xen_domctl_sched_sedf { uint64_aligned_t period; uint64_aligned_t slice; uint64_aligned_t latency; uint32_t extratime; uint32_t weight; } sedf; struct xen_domctl_sched_credit { uint16_t weight; uint16_t cap; } credit; } u; }; typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t); #define XEN_DOMCTL_setdomainhandle 17 struct xen_domctl_setdomainhandle { xen_domain_handle_t handle; }; typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t); #define XEN_DOMCTL_setdebugging 18 struct xen_domctl_setdebugging { uint8_t enable; }; typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t); #define XEN_DOMCTL_irq_permission 19 struct xen_domctl_irq_permission { uint8_t pirq; uint8_t allow_access; /* flag to specify enable/disable of IRQ access */ }; typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t); #define XEN_DOMCTL_iomem_permission 20 struct xen_domctl_iomem_permission { uint64_aligned_t first_mfn;/* first page (physical page number) in range */ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */ }; typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t); #define XEN_DOMCTL_ioport_permission 21 struct xen_domctl_ioport_permission { uint32_t first_port; /* first port int range */ uint32_t nr_ports; /* size of port range */ uint8_t allow_access; /* allow or deny access to range? */ }; typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t); #define XEN_DOMCTL_hypercall_init 22 struct xen_domctl_hypercall_init { uint64_aligned_t gmfn; /* GMFN to be initialised */ }; typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t); #define XEN_DOMCTL_arch_setup 23 #define _XEN_DOMAINSETUP_hvm_guest 0 #define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest) #define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */ #define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query) typedef struct xen_domctl_arch_setup { uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */ #ifdef __ia64__ uint64_aligned_t bp; /* mpaddr of boot param area */ uint64_aligned_t maxmem; /* Highest memory address for MDT. */ uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */ int8_t vhpt_size_log2; /* Log2 of VHPT size. */ #endif } xen_domctl_arch_setup_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t); #define XEN_DOMCTL_settimeoffset 24 struct xen_domctl_settimeoffset { int32_t time_offset_seconds; /* applied to domain wallclock time */ }; typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t); #define XEN_DOMCTL_gethvmcontext 33 #define XEN_DOMCTL_sethvmcontext 34 typedef struct xen_domctl_hvmcontext { uint32_t size; /* IN/OUT: size of buffer / bytes filled */ XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call * gethvmcontext with NULL * buffer to get size req'd */ } xen_domctl_hvmcontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t); #define XEN_DOMCTL_set_address_size 35 #define XEN_DOMCTL_get_address_size 36 typedef struct xen_domctl_address_size { uint32_t size; } xen_domctl_address_size_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t); #define XEN_DOMCTL_real_mode_area 26 struct xen_domctl_real_mode_area { uint32_t log; /* log2 of Real Mode Area size */ }; typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t); #define XEN_DOMCTL_sendtrigger 28 #define XEN_DOMCTL_SENDTRIGGER_NMI 0 #define XEN_DOMCTL_SENDTRIGGER_RESET 1 #define XEN_DOMCTL_SENDTRIGGER_INIT 2 struct xen_domctl_sendtrigger { uint32_t trigger; /* IN */ uint32_t vcpu; /* IN */ }; typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t); /* Assign PCI device to HVM guest. Sets up IOMMU structures. */ #define XEN_DOMCTL_assign_device 37 #define XEN_DOMCTL_test_assign_device 45 struct xen_domctl_assign_device { uint32_t machine_bdf; /* machine PCI ID of assigned device */ }; typedef struct xen_domctl_assign_device xen_domctl_assign_device_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t); /* Pass-through interrupts: bind real irq -> hvm devfn. */ #define XEN_DOMCTL_bind_pt_irq 38 typedef enum pt_irq_type_e { PT_IRQ_TYPE_PCI, PT_IRQ_TYPE_ISA } pt_irq_type_t; struct xen_domctl_bind_pt_irq { uint32_t machine_irq; pt_irq_type_t irq_type; uint32_t hvm_domid; union { struct { uint8_t isa_irq; } isa; struct { uint8_t bus; uint8_t device; uint8_t intx; } pci; } u; }; typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t); /* Bind machine I/O address range -> HVM address range. */ #define XEN_DOMCTL_memory_mapping 39 #define DPCI_ADD_MAPPING 1 #define DPCI_REMOVE_MAPPING 0 struct xen_domctl_memory_mapping { uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */ uint64_aligned_t first_mfn; /* first page (machine page) in range */ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ uint32_t add_mapping; /* add or remove mapping */ uint32_t padding; /* padding for 64-bit aligned structure */ }; typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t); /* Bind machine I/O port range -> HVM I/O port range. */ #define XEN_DOMCTL_ioport_mapping 40 struct xen_domctl_ioport_mapping { uint32_t first_gport; /* first guest IO port*/ uint32_t first_mport; /* first machine IO port */ uint32_t nr_ports; /* size of port range */ uint32_t add_mapping; /* add or remove mapping */ }; typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t); /* * Pin caching type of RAM space for x86 HVM domU. */ #define XEN_DOMCTL_pin_mem_cacheattr 41 /* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */ #define XEN_DOMCTL_MEM_CACHEATTR_UC 0 #define XEN_DOMCTL_MEM_CACHEATTR_WC 1 #define XEN_DOMCTL_MEM_CACHEATTR_WT 4 #define XEN_DOMCTL_MEM_CACHEATTR_WP 5 #define XEN_DOMCTL_MEM_CACHEATTR_WB 6 #define XEN_DOMCTL_MEM_CACHEATTR_UCM 7 struct xen_domctl_pin_mem_cacheattr { uint64_aligned_t start, end; unsigned int type; /* XEN_DOMCTL_MEM_CACHEATTR_* */ }; typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t); #define XEN_DOMCTL_set_ext_vcpucontext 42 #define XEN_DOMCTL_get_ext_vcpucontext 43 struct xen_domctl_ext_vcpucontext { /* IN: VCPU that this call applies to. */ uint32_t vcpu; /* * SET: Size of struct (IN) * GET: Size of struct (OUT) */ uint32_t size; #if defined(__i386__) || defined(__x86_64__) /* SYSCALL from 32-bit mode and SYSENTER callback information. */ /* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */ uint64_aligned_t syscall32_callback_eip; uint64_aligned_t sysenter_callback_eip; uint16_t syscall32_callback_cs; uint16_t sysenter_callback_cs; uint8_t syscall32_disables_events; uint8_t sysenter_disables_events; #endif }; typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t); /* * Set optimizaton features for a domain */ #define XEN_DOMCTL_set_opt_feature 44 struct xen_domctl_set_opt_feature { #if defined(__ia64__) struct xen_ia64_opt_feature optf; #else /* Make struct non-empty: do not depend on this field name! */ uint64_t dummy; #endif }; typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t); struct xen_domctl { uint32_t cmd; uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */ domid_t domain; union { struct xen_domctl_createdomain createdomain; struct xen_domctl_getdomaininfo getdomaininfo; struct xen_domctl_getmemlist getmemlist; struct xen_domctl_getpageframeinfo getpageframeinfo; struct xen_domctl_getpageframeinfo2 getpageframeinfo2; struct xen_domctl_vcpuaffinity vcpuaffinity; struct xen_domctl_shadow_op shadow_op; struct xen_domctl_max_mem max_mem; struct xen_domctl_vcpucontext vcpucontext; struct xen_domctl_getvcpuinfo getvcpuinfo; struct xen_domctl_max_vcpus max_vcpus; struct xen_domctl_scheduler_op scheduler_op; struct xen_domctl_setdomainhandle setdomainhandle; struct xen_domctl_setdebugging setdebugging; struct xen_domctl_irq_permission irq_permission; struct xen_domctl_iomem_permission iomem_permission; struct xen_domctl_ioport_permission ioport_permission; struct xen_domctl_hypercall_init hypercall_init; struct xen_domctl_arch_setup arch_setup; struct xen_domctl_settimeoffset settimeoffset; struct xen_domctl_real_mode_area real_mode_area; struct xen_domctl_hvmcontext hvmcontext; struct xen_domctl_address_size address_size; struct xen_domctl_sendtrigger sendtrigger; struct xen_domctl_assign_device assign_device; struct xen_domctl_bind_pt_irq bind_pt_irq; struct xen_domctl_memory_mapping memory_mapping; struct xen_domctl_ioport_mapping ioport_mapping; struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr; struct xen_domctl_ext_vcpucontext ext_vcpucontext; struct xen_domctl_set_opt_feature set_opt_feature; uint8_t pad[128]; } u; }; typedef struct xen_domctl xen_domctl_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_t); #endif /* __XEN_PUBLIC_DOMCTL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/elfnote.h000066400000000000000000000155261314037446600257610ustar00rootroot00000000000000/****************************************************************************** * elfnote.h * * Definitions used for the Xen ELF notes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell, XenSource Ltd. */ #ifndef __XEN_PUBLIC_ELFNOTE_H__ #define __XEN_PUBLIC_ELFNOTE_H__ /* * The notes should live in a PT_NOTE segment and have "Xen" in the * name field. * * Numeric types are either 4 or 8 bytes depending on the content of * the desc field. * * LEGACY indicated the fields in the legacy __xen_guest string which * this a note type replaces. */ /* * NAME=VALUE pair (string). */ #define XEN_ELFNOTE_INFO 0 /* * The virtual address of the entry point (numeric). * * LEGACY: VIRT_ENTRY */ #define XEN_ELFNOTE_ENTRY 1 /* The virtual address of the hypercall transfer page (numeric). * * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page * number not a virtual address) */ #define XEN_ELFNOTE_HYPERCALL_PAGE 2 /* The virtual address where the kernel image should be mapped (numeric). * * Defaults to 0. * * LEGACY: VIRT_BASE */ #define XEN_ELFNOTE_VIRT_BASE 3 /* * The offset of the ELF paddr field from the acutal required * psuedo-physical address (numeric). * * This is used to maintain backwards compatibility with older kernels * which wrote __PAGE_OFFSET into that field. This field defaults to 0 * if not present. * * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE) */ #define XEN_ELFNOTE_PADDR_OFFSET 4 /* * The version of Xen that we work with (string). * * LEGACY: XEN_VER */ #define XEN_ELFNOTE_XEN_VERSION 5 /* * The name of the guest operating system (string). * * LEGACY: GUEST_OS */ #define XEN_ELFNOTE_GUEST_OS 6 /* * The version of the guest operating system (string). * * LEGACY: GUEST_VER */ #define XEN_ELFNOTE_GUEST_VERSION 7 /* * The loader type (string). * * LEGACY: LOADER */ #define XEN_ELFNOTE_LOADER 8 /* * The kernel supports PAE (x86/32 only, string = "yes", "no" or * "bimodal"). * * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting * may be given as "yes,bimodal" which will cause older Xen to treat * this kernel as PAE. * * LEGACY: PAE (n.b. The legacy interface included a provision to * indicate 'extended-cr3' support allowing L3 page tables to be * placed above 4G. It is assumed that any kernel new enough to use * these ELF notes will include this and therefore "yes" here is * equivalent to "yes[entended-cr3]" in the __xen_guest interface. */ #define XEN_ELFNOTE_PAE_MODE 9 /* * The features supported/required by this kernel (string). * * The string must consist of a list of feature names (as given in * features.h, without the "XENFEAT_" prefix) separated by '|' * characters. If a feature is required for the kernel to function * then the feature name must be preceded by a '!' character. * * LEGACY: FEATURES */ #define XEN_ELFNOTE_FEATURES 10 /* * The kernel requires the symbol table to be loaded (string = "yes" or "no") * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence * of this string as a boolean flag rather than requiring "yes" or * "no". */ #define XEN_ELFNOTE_BSD_SYMTAB 11 /* * The lowest address the hypervisor hole can begin at (numeric). * * This must not be set higher than HYPERVISOR_VIRT_START. Its presence * also indicates to the hypervisor that the kernel can deal with the * hole starting at a higher address. */ #define XEN_ELFNOTE_HV_START_LOW 12 /* * List of maddr_t-sized mask/value pairs describing how to recognize * (non-present) L1 page table entries carrying valid MFNs (numeric). */ #define XEN_ELFNOTE_L1_MFN_VALID 13 /* * Whether or not the guest supports cooperative suspend cancellation. */ #define XEN_ELFNOTE_SUSPEND_CANCEL 14 /* * The number of the highest elfnote defined. */ #define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUSPEND_CANCEL /* * System information exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO * note in case of a system crash. This note will contain various * information about the system, see xen/include/xen/elfcore.h. */ #define XEN_ELFNOTE_CRASH_INFO 0x1000001 /* * System registers exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS * note per cpu in case of a system crash. This note is architecture * specific and will contain registers not saved in the "CORE" note. * See xen/include/xen/elfcore.h for more information. */ #define XEN_ELFNOTE_CRASH_REGS 0x1000002 /* * xen dump-core none note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE * in its dump file to indicate that the file is xen dump-core * file. This note doesn't have any other information. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000 /* * xen dump-core header note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER * in its dump file. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001 /* * xen dump-core xen version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION * in its dump file. It contains the xen version obtained via the * XENVER hypercall. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002 /* * xen dump-core format version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION * in its dump file. It contains a format version identifier. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003 #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/elfstructs.h000066400000000000000000000463261314037446600265250ustar00rootroot00000000000000#ifndef __XEN_PUBLIC_ELFSTRUCTS_H__ #define __XEN_PUBLIC_ELFSTRUCTS_H__ 1 /* * Copyright (c) 1995, 1996 Erik Theisen. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ typedef uint8_t Elf_Byte; typedef uint32_t Elf32_Addr; /* Unsigned program address */ typedef uint32_t Elf32_Off; /* Unsigned file offset */ typedef int32_t Elf32_Sword; /* Signed large integer */ typedef uint32_t Elf32_Word; /* Unsigned large integer */ typedef uint16_t Elf32_Half; /* Unsigned medium integer */ typedef uint64_t Elf64_Addr; typedef uint64_t Elf64_Off; typedef int32_t Elf64_Shalf; typedef int32_t Elf64_Sword; typedef uint32_t Elf64_Word; typedef int64_t Elf64_Sxword; typedef uint64_t Elf64_Xword; typedef uint32_t Elf64_Half; typedef uint16_t Elf64_Quarter; /* * e_ident[] identification indexes * See http://www.caldera.com/developers/gabi/2000-07-17/ch4.eheader.html */ #define EI_MAG0 0 /* file ID */ #define EI_MAG1 1 /* file ID */ #define EI_MAG2 2 /* file ID */ #define EI_MAG3 3 /* file ID */ #define EI_CLASS 4 /* file class */ #define EI_DATA 5 /* data encoding */ #define EI_VERSION 6 /* ELF header version */ #define EI_OSABI 7 /* OS/ABI ID */ #define EI_ABIVERSION 8 /* ABI version */ #define EI_PAD 9 /* start of pad bytes */ #define EI_NIDENT 16 /* Size of e_ident[] */ /* e_ident[] magic number */ #define ELFMAG0 0x7f /* e_ident[EI_MAG0] */ #define ELFMAG1 'E' /* e_ident[EI_MAG1] */ #define ELFMAG2 'L' /* e_ident[EI_MAG2] */ #define ELFMAG3 'F' /* e_ident[EI_MAG3] */ #define ELFMAG "\177ELF" /* magic */ #define SELFMAG 4 /* size of magic */ /* e_ident[] file class */ #define ELFCLASSNONE 0 /* invalid */ #define ELFCLASS32 1 /* 32-bit objs */ #define ELFCLASS64 2 /* 64-bit objs */ #define ELFCLASSNUM 3 /* number of classes */ /* e_ident[] data encoding */ #define ELFDATANONE 0 /* invalid */ #define ELFDATA2LSB 1 /* Little-Endian */ #define ELFDATA2MSB 2 /* Big-Endian */ #define ELFDATANUM 3 /* number of data encode defines */ /* e_ident[] Operating System/ABI */ #define ELFOSABI_SYSV 0 /* UNIX System V ABI */ #define ELFOSABI_HPUX 1 /* HP-UX operating system */ #define ELFOSABI_NETBSD 2 /* NetBSD */ #define ELFOSABI_LINUX 3 /* GNU/Linux */ #define ELFOSABI_HURD 4 /* GNU/Hurd */ #define ELFOSABI_86OPEN 5 /* 86Open common IA32 ABI */ #define ELFOSABI_SOLARIS 6 /* Solaris */ #define ELFOSABI_MONTEREY 7 /* Monterey */ #define ELFOSABI_IRIX 8 /* IRIX */ #define ELFOSABI_FREEBSD 9 /* FreeBSD */ #define ELFOSABI_TRU64 10 /* TRU64 UNIX */ #define ELFOSABI_MODESTO 11 /* Novell Modesto */ #define ELFOSABI_OPENBSD 12 /* OpenBSD */ #define ELFOSABI_ARM 97 /* ARM */ #define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */ /* e_ident */ #define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \ (ehdr).e_ident[EI_MAG1] == ELFMAG1 && \ (ehdr).e_ident[EI_MAG2] == ELFMAG2 && \ (ehdr).e_ident[EI_MAG3] == ELFMAG3) /* ELF Header */ typedef struct elfhdr { unsigned char e_ident[EI_NIDENT]; /* ELF Identification */ Elf32_Half e_type; /* object file type */ Elf32_Half e_machine; /* machine */ Elf32_Word e_version; /* object file version */ Elf32_Addr e_entry; /* virtual entry point */ Elf32_Off e_phoff; /* program header table offset */ Elf32_Off e_shoff; /* section header table offset */ Elf32_Word e_flags; /* processor-specific flags */ Elf32_Half e_ehsize; /* ELF header size */ Elf32_Half e_phentsize; /* program header entry size */ Elf32_Half e_phnum; /* number of program header entries */ Elf32_Half e_shentsize; /* section header entry size */ Elf32_Half e_shnum; /* number of section header entries */ Elf32_Half e_shstrndx; /* section header table's "section header string table" entry offset */ } Elf32_Ehdr; typedef struct { unsigned char e_ident[EI_NIDENT]; /* Id bytes */ Elf64_Quarter e_type; /* file type */ Elf64_Quarter e_machine; /* machine type */ Elf64_Half e_version; /* version number */ Elf64_Addr e_entry; /* entry point */ Elf64_Off e_phoff; /* Program hdr offset */ Elf64_Off e_shoff; /* Section hdr offset */ Elf64_Half e_flags; /* Processor flags */ Elf64_Quarter e_ehsize; /* sizeof ehdr */ Elf64_Quarter e_phentsize; /* Program header entry size */ Elf64_Quarter e_phnum; /* Number of program headers */ Elf64_Quarter e_shentsize; /* Section header entry size */ Elf64_Quarter e_shnum; /* Number of section headers */ Elf64_Quarter e_shstrndx; /* String table index */ } Elf64_Ehdr; /* e_type */ #define ET_NONE 0 /* No file type */ #define ET_REL 1 /* relocatable file */ #define ET_EXEC 2 /* executable file */ #define ET_DYN 3 /* shared object file */ #define ET_CORE 4 /* core file */ #define ET_NUM 5 /* number of types */ #define ET_LOPROC 0xff00 /* reserved range for processor */ #define ET_HIPROC 0xffff /* specific e_type */ /* e_machine */ #define EM_NONE 0 /* No Machine */ #define EM_M32 1 /* AT&T WE 32100 */ #define EM_SPARC 2 /* SPARC */ #define EM_386 3 /* Intel 80386 */ #define EM_68K 4 /* Motorola 68000 */ #define EM_88K 5 /* Motorola 88000 */ #define EM_486 6 /* Intel 80486 - unused? */ #define EM_860 7 /* Intel 80860 */ #define EM_MIPS 8 /* MIPS R3000 Big-Endian only */ /* * Don't know if EM_MIPS_RS4_BE, * EM_SPARC64, EM_PARISC, * or EM_PPC are ABI compliant */ #define EM_MIPS_RS4_BE 10 /* MIPS R4000 Big-Endian */ #define EM_SPARC64 11 /* SPARC v9 64-bit unoffical */ #define EM_PARISC 15 /* HPPA */ #define EM_SPARC32PLUS 18 /* Enhanced instruction set SPARC */ #define EM_PPC 20 /* PowerPC */ #define EM_PPC64 21 /* PowerPC 64-bit */ #define EM_ARM 40 /* Advanced RISC Machines ARM */ #define EM_ALPHA 41 /* DEC ALPHA */ #define EM_SPARCV9 43 /* SPARC version 9 */ #define EM_ALPHA_EXP 0x9026 /* DEC ALPHA */ #define EM_IA_64 50 /* Intel Merced */ #define EM_X86_64 62 /* AMD x86-64 architecture */ #define EM_VAX 75 /* DEC VAX */ /* Version */ #define EV_NONE 0 /* Invalid */ #define EV_CURRENT 1 /* Current */ #define EV_NUM 2 /* number of versions */ /* Section Header */ typedef struct { Elf32_Word sh_name; /* name - index into section header string table section */ Elf32_Word sh_type; /* type */ Elf32_Word sh_flags; /* flags */ Elf32_Addr sh_addr; /* address */ Elf32_Off sh_offset; /* file offset */ Elf32_Word sh_size; /* section size */ Elf32_Word sh_link; /* section header table index link */ Elf32_Word sh_info; /* extra information */ Elf32_Word sh_addralign; /* address alignment */ Elf32_Word sh_entsize; /* section entry size */ } Elf32_Shdr; typedef struct { Elf64_Half sh_name; /* section name */ Elf64_Half sh_type; /* section type */ Elf64_Xword sh_flags; /* section flags */ Elf64_Addr sh_addr; /* virtual address */ Elf64_Off sh_offset; /* file offset */ Elf64_Xword sh_size; /* section size */ Elf64_Half sh_link; /* link to another */ Elf64_Half sh_info; /* misc info */ Elf64_Xword sh_addralign; /* memory alignment */ Elf64_Xword sh_entsize; /* table entry size */ } Elf64_Shdr; /* Special Section Indexes */ #define SHN_UNDEF 0 /* undefined */ #define SHN_LORESERVE 0xff00 /* lower bounds of reserved indexes */ #define SHN_LOPROC 0xff00 /* reserved range for processor */ #define SHN_HIPROC 0xff1f /* specific section indexes */ #define SHN_ABS 0xfff1 /* absolute value */ #define SHN_COMMON 0xfff2 /* common symbol */ #define SHN_HIRESERVE 0xffff /* upper bounds of reserved indexes */ /* sh_type */ #define SHT_NULL 0 /* inactive */ #define SHT_PROGBITS 1 /* program defined information */ #define SHT_SYMTAB 2 /* symbol table section */ #define SHT_STRTAB 3 /* string table section */ #define SHT_RELA 4 /* relocation section with addends*/ #define SHT_HASH 5 /* symbol hash table section */ #define SHT_DYNAMIC 6 /* dynamic section */ #define SHT_NOTE 7 /* note section */ #define SHT_NOBITS 8 /* no space section */ #define SHT_REL 9 /* relation section without addends */ #define SHT_SHLIB 10 /* reserved - purpose unknown */ #define SHT_DYNSYM 11 /* dynamic symbol table section */ #define SHT_NUM 12 /* number of section types */ #define SHT_LOPROC 0x70000000 /* reserved range for processor */ #define SHT_HIPROC 0x7fffffff /* specific section header types */ #define SHT_LOUSER 0x80000000 /* reserved range for application */ #define SHT_HIUSER 0xffffffff /* specific indexes */ /* Section names */ #define ELF_BSS ".bss" /* uninitialized data */ #define ELF_DATA ".data" /* initialized data */ #define ELF_DEBUG ".debug" /* debug */ #define ELF_DYNAMIC ".dynamic" /* dynamic linking information */ #define ELF_DYNSTR ".dynstr" /* dynamic string table */ #define ELF_DYNSYM ".dynsym" /* dynamic symbol table */ #define ELF_FINI ".fini" /* termination code */ #define ELF_GOT ".got" /* global offset table */ #define ELF_HASH ".hash" /* symbol hash table */ #define ELF_INIT ".init" /* initialization code */ #define ELF_REL_DATA ".rel.data" /* relocation data */ #define ELF_REL_FINI ".rel.fini" /* relocation termination code */ #define ELF_REL_INIT ".rel.init" /* relocation initialization code */ #define ELF_REL_DYN ".rel.dyn" /* relocaltion dynamic link info */ #define ELF_REL_RODATA ".rel.rodata" /* relocation read-only data */ #define ELF_REL_TEXT ".rel.text" /* relocation code */ #define ELF_RODATA ".rodata" /* read-only data */ #define ELF_SHSTRTAB ".shstrtab" /* section header string table */ #define ELF_STRTAB ".strtab" /* string table */ #define ELF_SYMTAB ".symtab" /* symbol table */ #define ELF_TEXT ".text" /* code */ /* Section Attribute Flags - sh_flags */ #define SHF_WRITE 0x1 /* Writable */ #define SHF_ALLOC 0x2 /* occupies memory */ #define SHF_EXECINSTR 0x4 /* executable */ #define SHF_MASKPROC 0xf0000000 /* reserved bits for processor */ /* specific section attributes */ /* Symbol Table Entry */ typedef struct elf32_sym { Elf32_Word st_name; /* name - index into string table */ Elf32_Addr st_value; /* symbol value */ Elf32_Word st_size; /* symbol size */ unsigned char st_info; /* type and binding */ unsigned char st_other; /* 0 - no defined meaning */ Elf32_Half st_shndx; /* section header index */ } Elf32_Sym; typedef struct { Elf64_Half st_name; /* Symbol name index in str table */ Elf_Byte st_info; /* type / binding attrs */ Elf_Byte st_other; /* unused */ Elf64_Quarter st_shndx; /* section index of symbol */ Elf64_Xword st_value; /* value of symbol */ Elf64_Xword st_size; /* size of symbol */ } Elf64_Sym; /* Symbol table index */ #define STN_UNDEF 0 /* undefined */ /* Extract symbol info - st_info */ #define ELF32_ST_BIND(x) ((x) >> 4) #define ELF32_ST_TYPE(x) (((unsigned int) x) & 0xf) #define ELF32_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf)) #define ELF64_ST_BIND(x) ((x) >> 4) #define ELF64_ST_TYPE(x) (((unsigned int) x) & 0xf) #define ELF64_ST_INFO(b,t) (((b) << 4) + ((t) & 0xf)) /* Symbol Binding - ELF32_ST_BIND - st_info */ #define STB_LOCAL 0 /* Local symbol */ #define STB_GLOBAL 1 /* Global symbol */ #define STB_WEAK 2 /* like global - lower precedence */ #define STB_NUM 3 /* number of symbol bindings */ #define STB_LOPROC 13 /* reserved range for processor */ #define STB_HIPROC 15 /* specific symbol bindings */ /* Symbol type - ELF32_ST_TYPE - st_info */ #define STT_NOTYPE 0 /* not specified */ #define STT_OBJECT 1 /* data object */ #define STT_FUNC 2 /* function */ #define STT_SECTION 3 /* section */ #define STT_FILE 4 /* file */ #define STT_NUM 5 /* number of symbol types */ #define STT_LOPROC 13 /* reserved range for processor */ #define STT_HIPROC 15 /* specific symbol types */ /* Relocation entry with implicit addend */ typedef struct { Elf32_Addr r_offset; /* offset of relocation */ Elf32_Word r_info; /* symbol table index and type */ } Elf32_Rel; /* Relocation entry with explicit addend */ typedef struct { Elf32_Addr r_offset; /* offset of relocation */ Elf32_Word r_info; /* symbol table index and type */ Elf32_Sword r_addend; } Elf32_Rela; /* Extract relocation info - r_info */ #define ELF32_R_SYM(i) ((i) >> 8) #define ELF32_R_TYPE(i) ((unsigned char) (i)) #define ELF32_R_INFO(s,t) (((s) << 8) + (unsigned char)(t)) typedef struct { Elf64_Xword r_offset; /* where to do it */ Elf64_Xword r_info; /* index & type of relocation */ } Elf64_Rel; typedef struct { Elf64_Xword r_offset; /* where to do it */ Elf64_Xword r_info; /* index & type of relocation */ Elf64_Sxword r_addend; /* adjustment value */ } Elf64_Rela; #define ELF64_R_SYM(info) ((info) >> 32) #define ELF64_R_TYPE(info) ((info) & 0xFFFFFFFF) #define ELF64_R_INFO(s,t) (((s) << 32) + (u_int32_t)(t)) /* Program Header */ typedef struct { Elf32_Word p_type; /* segment type */ Elf32_Off p_offset; /* segment offset */ Elf32_Addr p_vaddr; /* virtual address of segment */ Elf32_Addr p_paddr; /* physical address - ignored? */ Elf32_Word p_filesz; /* number of bytes in file for seg. */ Elf32_Word p_memsz; /* number of bytes in mem. for seg. */ Elf32_Word p_flags; /* flags */ Elf32_Word p_align; /* memory alignment */ } Elf32_Phdr; typedef struct { Elf64_Half p_type; /* entry type */ Elf64_Half p_flags; /* flags */ Elf64_Off p_offset; /* offset */ Elf64_Addr p_vaddr; /* virtual address */ Elf64_Addr p_paddr; /* physical address */ Elf64_Xword p_filesz; /* file size */ Elf64_Xword p_memsz; /* memory size */ Elf64_Xword p_align; /* memory & file alignment */ } Elf64_Phdr; /* Segment types - p_type */ #define PT_NULL 0 /* unused */ #define PT_LOAD 1 /* loadable segment */ #define PT_DYNAMIC 2 /* dynamic linking section */ #define PT_INTERP 3 /* the RTLD */ #define PT_NOTE 4 /* auxiliary information */ #define PT_SHLIB 5 /* reserved - purpose undefined */ #define PT_PHDR 6 /* program header */ #define PT_NUM 7 /* Number of segment types */ #define PT_LOPROC 0x70000000 /* reserved range for processor */ #define PT_HIPROC 0x7fffffff /* specific segment types */ /* Segment flags - p_flags */ #define PF_X 0x1 /* Executable */ #define PF_W 0x2 /* Writable */ #define PF_R 0x4 /* Readable */ #define PF_MASKPROC 0xf0000000 /* reserved bits for processor */ /* specific segment flags */ /* Dynamic structure */ typedef struct { Elf32_Sword d_tag; /* controls meaning of d_val */ union { Elf32_Word d_val; /* Multiple meanings - see d_tag */ Elf32_Addr d_ptr; /* program virtual address */ } d_un; } Elf32_Dyn; typedef struct { Elf64_Xword d_tag; /* controls meaning of d_val */ union { Elf64_Addr d_ptr; Elf64_Xword d_val; } d_un; } Elf64_Dyn; /* Dynamic Array Tags - d_tag */ #define DT_NULL 0 /* marks end of _DYNAMIC array */ #define DT_NEEDED 1 /* string table offset of needed lib */ #define DT_PLTRELSZ 2 /* size of relocation entries in PLT */ #define DT_PLTGOT 3 /* address PLT/GOT */ #define DT_HASH 4 /* address of symbol hash table */ #define DT_STRTAB 5 /* address of string table */ #define DT_SYMTAB 6 /* address of symbol table */ #define DT_RELA 7 /* address of relocation table */ #define DT_RELASZ 8 /* size of relocation table */ #define DT_RELAENT 9 /* size of relocation entry */ #define DT_STRSZ 10 /* size of string table */ #define DT_SYMENT 11 /* size of symbol table entry */ #define DT_INIT 12 /* address of initialization func. */ #define DT_FINI 13 /* address of termination function */ #define DT_SONAME 14 /* string table offset of shared obj */ #define DT_RPATH 15 /* string table offset of library search path */ #define DT_SYMBOLIC 16 /* start sym search in shared obj. */ #define DT_REL 17 /* address of rel. tbl. w addends */ #define DT_RELSZ 18 /* size of DT_REL relocation table */ #define DT_RELENT 19 /* size of DT_REL relocation entry */ #define DT_PLTREL 20 /* PLT referenced relocation entry */ #define DT_DEBUG 21 /* bugger */ #define DT_TEXTREL 22 /* Allow rel. mod. to unwritable seg */ #define DT_JMPREL 23 /* add. of PLT's relocation entries */ #define DT_BIND_NOW 24 /* Bind now regardless of env setting */ #define DT_NUM 25 /* Number used. */ #define DT_LOPROC 0x70000000 /* reserved range for processor */ #define DT_HIPROC 0x7fffffff /* specific dynamic array tags */ /* Standard ELF hashing function */ unsigned int elf_hash(const unsigned char *name); /* * Note Definitions */ typedef struct { Elf32_Word namesz; Elf32_Word descsz; Elf32_Word type; } Elf32_Note; typedef struct { Elf64_Half namesz; Elf64_Half descsz; Elf64_Half type; } Elf64_Note; #if defined(ELFSIZE) #define CONCAT(x,y) __CONCAT(x,y) #define ELFNAME(x) CONCAT(elf,CONCAT(ELFSIZE,CONCAT(_,x))) #define ELFNAME2(x,y) CONCAT(x,CONCAT(_elf,CONCAT(ELFSIZE,CONCAT(_,y)))) #define ELFNAMEEND(x) CONCAT(x,CONCAT(_elf,ELFSIZE)) #define ELFDEFNNAME(x) CONCAT(ELF,CONCAT(ELFSIZE,CONCAT(_,x))) #endif #if defined(ELFSIZE) && (ELFSIZE == 32) #define Elf_Ehdr Elf32_Ehdr #define Elf_Phdr Elf32_Phdr #define Elf_Shdr Elf32_Shdr #define Elf_Sym Elf32_Sym #define Elf_Rel Elf32_Rel #define Elf_RelA Elf32_Rela #define Elf_Dyn Elf32_Dyn #define Elf_Word Elf32_Word #define Elf_Sword Elf32_Sword #define Elf_Addr Elf32_Addr #define Elf_Off Elf32_Off #define Elf_Nhdr Elf32_Nhdr #define Elf_Note Elf32_Note #define ELF_R_SYM ELF32_R_SYM #define ELF_R_TYPE ELF32_R_TYPE #define ELF_R_INFO ELF32_R_INFO #define ELFCLASS ELFCLASS32 #define ELF_ST_BIND ELF32_ST_BIND #define ELF_ST_TYPE ELF32_ST_TYPE #define ELF_ST_INFO ELF32_ST_INFO #define AuxInfo Aux32Info #elif defined(ELFSIZE) && (ELFSIZE == 64) #define Elf_Ehdr Elf64_Ehdr #define Elf_Phdr Elf64_Phdr #define Elf_Shdr Elf64_Shdr #define Elf_Sym Elf64_Sym #define Elf_Rel Elf64_Rel #define Elf_RelA Elf64_Rela #define Elf_Dyn Elf64_Dyn #define Elf_Word Elf64_Word #define Elf_Sword Elf64_Sword #define Elf_Addr Elf64_Addr #define Elf_Off Elf64_Off #define Elf_Nhdr Elf64_Nhdr #define Elf_Note Elf64_Note #define ELF_R_SYM ELF64_R_SYM #define ELF_R_TYPE ELF64_R_TYPE #define ELF_R_INFO ELF64_R_INFO #define ELFCLASS ELFCLASS64 #define ELF_ST_BIND ELF64_ST_BIND #define ELF_ST_TYPE ELF64_ST_TYPE #define ELF_ST_INFO ELF64_ST_INFO #define AuxInfo Aux64Info #endif #endif /* __XEN_PUBLIC_ELFSTRUCTS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/event_channel.h000066400000000000000000000213251314037446600271300ustar00rootroot00000000000000/****************************************************************************** * event_channel.h * * Event channels between domains. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, K A Fraser. */ #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ #define __XEN_PUBLIC_EVENT_CHANNEL_H__ /* * Prototype for this hypercall is: * int event_channel_op(int cmd, void *args) * @cmd == EVTCHNOP_??? (event-channel operation). * @args == Operation-specific extra arguments (NULL if none). */ typedef uint32_t evtchn_port_t; DEFINE_XEN_GUEST_HANDLE(evtchn_port_t); /* * EVTCHNOP_alloc_unbound: Allocate a port in domain and mark as * accepting interdomain bindings from domain . A fresh port * is allocated in and returned as . * NOTES: * 1. If the caller is unprivileged then must be DOMID_SELF. * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_alloc_unbound 6 struct evtchn_alloc_unbound { /* IN parameters */ domid_t dom, remote_dom; /* OUT parameters */ evtchn_port_t port; }; typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; /* * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between * the calling domain and . must identify * a port that is unbound and marked as accepting bindings from the calling * domain. A fresh port is allocated in the calling domain and returned as * . * NOTES: * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_bind_interdomain 0 struct evtchn_bind_interdomain { /* IN parameters. */ domid_t remote_dom; evtchn_port_t remote_port; /* OUT parameters. */ evtchn_port_t local_port; }; typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t; /* * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ on specified * vcpu. * NOTES: * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list * in xen.h for the classification of each VIRQ. * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be * re-bound via EVTCHNOP_bind_vcpu. * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. * The allocated event channel is bound to the specified vcpu and the * binding cannot be changed. */ #define EVTCHNOP_bind_virq 1 struct evtchn_bind_virq { /* IN parameters. */ uint32_t virq; uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_virq evtchn_bind_virq_t; /* * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ . * NOTES: * 1. A physical IRQ may be bound to at most one event channel per domain. * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. */ #define EVTCHNOP_bind_pirq 2 struct evtchn_bind_pirq { /* IN parameters. */ uint32_t pirq; #define BIND_PIRQ__WILL_SHARE 1 uint32_t flags; /* BIND_PIRQ__* */ /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_pirq evtchn_bind_pirq_t; /* * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. * NOTES: * 1. The allocated event channel is bound to the specified vcpu. The binding * may not be changed. */ #define EVTCHNOP_bind_ipi 7 struct evtchn_bind_ipi { uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_ipi evtchn_bind_ipi_t; /* * EVTCHNOP_close: Close a local event channel . If the channel is * interdomain then the remote end is placed in the unbound state * (EVTCHNSTAT_unbound), awaiting a new connection. */ #define EVTCHNOP_close 3 struct evtchn_close { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_close evtchn_close_t; /* * EVTCHNOP_send: Send an event to the remote end of the channel whose local * endpoint is . */ #define EVTCHNOP_send 4 struct evtchn_send { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_send evtchn_send_t; /* * EVTCHNOP_status: Get the current status of the communication channel which * has an endpoint at . * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may obtain the status of an event * channel for which is not DOMID_SELF. */ #define EVTCHNOP_status 5 struct evtchn_status { /* IN parameters */ domid_t dom; evtchn_port_t port; /* OUT parameters */ #define EVTCHNSTAT_closed 0 /* Channel is not in use. */ #define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ #define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ uint32_t status; uint32_t vcpu; /* VCPU to which this channel is bound. */ union { struct { domid_t dom; } unbound; /* EVTCHNSTAT_unbound */ struct { domid_t dom; evtchn_port_t port; } interdomain; /* EVTCHNSTAT_interdomain */ uint32_t pirq; /* EVTCHNSTAT_pirq */ uint32_t virq; /* EVTCHNSTAT_virq */ } u; }; typedef struct evtchn_status evtchn_status_t; /* * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an * event is pending. * NOTES: * 1. IPI-bound channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 3. All other channels notify vcpu0 by default. This default is set when * the channel is allocated (a port that is freed and subsequently reused * has its binding reset to vcpu0). */ #define EVTCHNOP_bind_vcpu 8 struct evtchn_bind_vcpu { /* IN parameters. */ evtchn_port_t port; uint32_t vcpu; }; typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t; /* * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver * a notification to the appropriate VCPU if an event is pending. */ #define EVTCHNOP_unmask 9 struct evtchn_unmask { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_unmask evtchn_unmask_t; /* * EVTCHNOP_reset: Close all event channels associated with specified domain. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. */ #define EVTCHNOP_reset 10 struct evtchn_reset { /* IN parameters. */ domid_t dom; }; typedef struct evtchn_reset evtchn_reset_t; /* * Argument to event_channel_op_compat() hypercall. Superceded by new * event_channel_op() hypercall since 0x00030202. */ struct evtchn_op { uint32_t cmd; /* EVTCHNOP_* */ union { struct evtchn_alloc_unbound alloc_unbound; struct evtchn_bind_interdomain bind_interdomain; struct evtchn_bind_virq bind_virq; struct evtchn_bind_pirq bind_pirq; struct evtchn_bind_ipi bind_ipi; struct evtchn_close close; struct evtchn_send send; struct evtchn_status status; struct evtchn_bind_vcpu bind_vcpu; struct evtchn_unmask unmask; } u; }; typedef struct evtchn_op evtchn_op_t; DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/features.h000066400000000000000000000051521314037446600261350ustar00rootroot00000000000000/****************************************************************************** * features.h * * Feature flags, reported by XENVER_get_features. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Keir Fraser */ #ifndef __XEN_PUBLIC_FEATURES_H__ #define __XEN_PUBLIC_FEATURES_H__ /* * If set, the guest does not need to write-protect its pagetables, and can * update them via direct writes. */ #define XENFEAT_writable_page_tables 0 /* * If set, the guest does not need to write-protect its segment descriptor * tables, and can update them via direct writes. */ #define XENFEAT_writable_descriptor_tables 1 /* * If set, translation between the guest's 'pseudo-physical' address space * and the host's machine address space are handled by the hypervisor. In this * mode the guest does not need to perform phys-to/from-machine translations * when performing page table operations. */ #define XENFEAT_auto_translated_physmap 2 /* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ #define XENFEAT_supervisor_mode_kernel 3 /* * If set, the guest does not need to allocate x86 PAE page directories * below 4GB. This flag is usually implied by auto_translated_physmap. */ #define XENFEAT_pae_pgdir_above_4gb 4 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ #define XENFEAT_mmu_pt_update_preserve_ad 5 #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/grant_table.h000066400000000000000000000402631314037446600266030ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ #define __XEN_PUBLIC_GRANT_TABLE_H__ /*********************************** * GRANT TABLE REPRESENTATION */ /* Some rough guidelines on accessing and updating grant-table entries * in a concurrency-safe manner. For more information, Linux contains a * reference implementation for guest OSes (arch/xen/kernel/grant_table.c). * * NB. WMB is a no-op on current-generation x86 processors. However, a * compiler barrier will still be required. * * Introducing a valid entry into the grant table: * 1. Write ent->domid. * 2. Write ent->frame: * GTF_permit_access: Frame to which access is permitted. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new * frame, or zero if none. * 3. Write memory barrier (WMB). * 4. Write ent->flags, inc. valid type. * * Invalidating an unused GTF_permit_access entry: * 1. flags = ent->flags. * 2. Observe that !(flags & (GTF_reading|GTF_writing)). * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * * Invalidating an in-use GTF_permit_access entry: * This cannot be done directly. Request assistance from the domain controller * which can set a timeout on the use of a grant entry and take necessary * action. (NB. This is not yet implemented!). * * Invalidating an unused GTF_accept_transfer entry: * 1. flags = ent->flags. * 2. Observe that !(flags & GTF_transfer_committed). [*] * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. * The guest must /not/ modify the grant entry until the address of the * transferred frame is written. It is safe for the guest to spin waiting * for this to occur (detect by observing GTF_transfer_completed in * ent->flags). * * Invalidating a committed GTF_accept_transfer entry: * 1. Wait for (ent->flags & GTF_transfer_completed). * * Changing a GTF_permit_access from writable to read-only: * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. * * Changing a GTF_permit_access from read-only to writable: * Use SMP-safe bit-setting instruction. */ /* * A grant table comprises a packed array of grant entries in one or more * page frames shared between Xen and a guest. * [XEN]: This field is written by Xen and read by the sharing guest. * [GST]: This field is written by the guest and read by Xen. */ struct grant_entry { /* GTF_xxx: various type and flag information. [XEN,GST] */ uint16_t flags; /* The domain being granted foreign privileges. [GST] */ domid_t domid; /* * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] */ uint32_t frame; }; typedef struct grant_entry grant_entry_t; /* * Type of grant entry. * GTF_invalid: This grant entry grants no privileges. * GTF_permit_access: Allow @domid to map/access @frame. * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame * to this guest. Xen writes the page number to @frame. */ #define GTF_invalid (0U<<0) #define GTF_permit_access (1U<<0) #define GTF_accept_transfer (2U<<0) #define GTF_type_mask (3U<<0) /* * Subflags for GTF_permit_access. * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST] */ #define _GTF_readonly (2) #define GTF_readonly (1U<<_GTF_readonly) #define _GTF_reading (3) #define GTF_reading (1U<<_GTF_reading) #define _GTF_writing (4) #define GTF_writing (1U<<_GTF_writing) #define _GTF_PWT (5) #define GTF_PWT (1U<<_GTF_PWT) #define _GTF_PCD (6) #define GTF_PCD (1U<<_GTF_PCD) #define _GTF_PAT (7) #define GTF_PAT (1U<<_GTF_PAT) /* * Subflags for GTF_accept_transfer: * GTF_transfer_committed: Xen sets this flag to indicate that it is committed * to transferring ownership of a page frame. When a guest sees this flag * it must /not/ modify the grant entry until GTF_transfer_completed is * set by Xen. * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag * after reading GTF_transfer_committed. Xen will always write the frame * address, followed by ORing this flag, in a timely manner. */ #define _GTF_transfer_committed (2) #define GTF_transfer_committed (1U<<_GTF_transfer_committed) #define _GTF_transfer_completed (3) #define GTF_transfer_completed (1U<<_GTF_transfer_completed) /*********************************** * GRANT TABLE QUERIES AND USES */ /* * Reference to a grant entry in a specified domain's grant table. */ typedef uint32_t grant_ref_t; /* * Handle to track a mapping created via a grant reference. */ typedef uint32_t grant_handle_t; /* * GNTTABOP_map_grant_ref: Map the grant entry (,) for access * by devices and/or host CPUs. If successful, is a tracking number * that must be presented later to destroy the mapping(s). On error, * is a negative status code. * NOTES: * 1. If GNTMAP_device_map is specified then is the address * via which I/O devices may access the granted frame. * 2. If GNTMAP_host_map is specified then a mapping will be added at * either a host virtual address in the current address space, or at * a PTE at the specified machine address. The type of mapping to * perform is selected through the GNTMAP_contains_pte flag, and the * address is specified in . * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a * host mapping is destroyed by other means then it is *NOT* guaranteed * to be accounted to the correct grant reference! */ #define GNTTABOP_map_grant_ref 0 struct gnttab_map_grant_ref { /* IN parameters. */ uint64_t host_addr; uint32_t flags; /* GNTMAP_* */ grant_ref_t ref; domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ grant_handle_t handle; uint64_t dev_bus_addr; }; typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t); /* * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings * tracked by . If or is zero, that * field is ignored. If non-zero, they must refer to a device/host mapping * that is tracked by * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 3. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_grant_ref 1 struct gnttab_unmap_grant_ref { /* IN parameters. */ uint64_t host_addr; uint64_t dev_bus_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t); /* * GNTTABOP_setup_table: Set up a grant table for comprising at least * pages. The frame addresses are written to the . * Only addresses are written, even if the table is larger. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. * 3. Xen may not support more than a single grant-table page per domain. */ #define GNTTABOP_setup_table 2 struct gnttab_setup_table { /* IN parameters. */ domid_t dom; uint32_t nr_frames; /* OUT parameters. */ int16_t status; /* GNTST_* */ XEN_GUEST_HANDLE(ulong) frame_list; }; typedef struct gnttab_setup_table gnttab_setup_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t); /* * GNTTABOP_dump_table: Dump the contents of the grant table to the * xen console. Debugging use only. */ #define GNTTABOP_dump_table 3 struct gnttab_dump_table { /* IN parameters. */ domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_dump_table gnttab_dump_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t); /* * GNTTABOP_transfer_grant_ref: Transfer to a foreign domain. The * foreign domain has previously registered its interest in the transfer via * . * * Note that, even if the transfer fails, the specified page no longer belongs * to the calling domain *unless* the error is GNTST_bad_page. */ #define GNTTABOP_transfer 4 struct gnttab_transfer { /* IN parameters. */ xen_pfn_t mfn; domid_t domid; grant_ref_t ref; /* OUT parameters. */ int16_t status; }; typedef struct gnttab_transfer gnttab_transfer_t; DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t); /* * GNTTABOP_copy: Hypervisor based copy * source and destinations can be eithers MFNs or, for foreign domains, * grant references. the foreign domain has to grant read/write access * in its grant table. * * The flags specify what type source and destinations are (either MFN * or grant reference). * * Note that this can also be used to copy data between two domains * via a third party if the source and destination domains had previously * grant appropriate access to their pages to the third party. * * source_offset specifies an offset in the source frame, dest_offset * the offset in the target frame and len specifies the number of * bytes to be copied. */ #define _GNTCOPY_source_gref (0) #define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) #define _GNTCOPY_dest_gref (1) #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) #define GNTTABOP_copy 5 typedef struct gnttab_copy { /* IN parameters. */ struct { union { grant_ref_t ref; xen_pfn_t gmfn; } u; domid_t domid; uint16_t offset; } source, dest; uint16_t len; uint16_t flags; /* GNTCOPY_* */ /* OUT parameters. */ int16_t status; } gnttab_copy_t; DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t); /* * GNTTABOP_query_size: Query the current and maximum sizes of the shared * grant table. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. */ #define GNTTABOP_query_size 6 struct gnttab_query_size { /* IN parameters. */ domid_t dom; /* OUT parameters. */ uint32_t nr_frames; uint32_t max_nr_frames; int16_t status; /* GNTST_* */ }; typedef struct gnttab_query_size gnttab_query_size_t; DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t); /* * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings * tracked by but atomically replace the page table entry with one * pointing to the machine address under . will be * redirected to the null entry. * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 2. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_and_replace 7 struct gnttab_unmap_and_replace { /* IN parameters. */ uint64_t host_addr; uint64_t new_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t); /* * Bitfield values for update_pin_status.flags. */ /* Map the grant entry for access by I/O devices. */ #define _GNTMAP_device_map (0) #define GNTMAP_device_map (1<<_GNTMAP_device_map) /* Map the grant entry for access by host CPUs. */ #define _GNTMAP_host_map (1) #define GNTMAP_host_map (1<<_GNTMAP_host_map) /* Accesses to the granted frame will be restricted to read-only access. */ #define _GNTMAP_readonly (2) #define GNTMAP_readonly (1<<_GNTMAP_readonly) /* * GNTMAP_host_map subflag: * 0 => The host mapping is usable only by the guest OS. * 1 => The host mapping is usable by guest OS + current application. */ #define _GNTMAP_application_map (3) #define GNTMAP_application_map (1<<_GNTMAP_application_map) /* * GNTMAP_contains_pte subflag: * 0 => This map request contains a host virtual address. * 1 => This map request contains the machine addess of the PTE to update. */ #define _GNTMAP_contains_pte (4) #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) /* * Values for error status returns. All errors are -ve. */ #define GNTST_okay (0) /* Normal return. */ #define GNTST_general_error (-1) /* General undefined error. */ #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ #define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ #define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ #define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ #define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ #define GNTST_address_too_big (-11) /* transfer page address too large. */ #define GNTTABOP_error_msgs { \ "okay", \ "undefined error", \ "unrecognised domain id", \ "invalid grant reference", \ "invalid mapping handle", \ "invalid virtual address", \ "invalid device address", \ "no spare translation slot in the I/O MMU", \ "permission denied", \ "bad page", \ "copy arguments cross page boundary", \ "page address size too large" \ } #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/hvm/000077500000000000000000000000001314037446600247355ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/hvm/e820.h000066400000000000000000000030061314037446600255630ustar00rootroot00000000000000 /* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_E820_H__ #define __XEN_PUBLIC_HVM_E820_H__ /* E820 location in HVM virtual address space. */ #define HVM_E820_PAGE 0x00090000 #define HVM_E820_NR_OFFSET 0x000001E8 #define HVM_E820_OFFSET 0x000002D0 #define HVM_BELOW_4G_RAM_END 0xF0000000 #define HVM_BELOW_4G_MMIO_START HVM_BELOW_4G_RAM_END #define HVM_BELOW_4G_MMIO_LENGTH ((1ULL << 32) - HVM_BELOW_4G_MMIO_START) #endif /* __XEN_PUBLIC_HVM_E820_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/hvm/hvm_info_table.h000066400000000000000000000033221314037446600300620ustar00rootroot00000000000000/****************************************************************************** * hvm/hvm_info_table.h * * HVM parameter and information table, written into guest memory map. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define HVM_INFO_PFN 0x09F #define HVM_INFO_OFFSET 0x800 #define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET) struct hvm_info_table { char signature[8]; /* "HVM INFO" */ uint32_t length; uint8_t checksum; uint8_t acpi_enabled; uint8_t apic_mode; uint32_t nr_vcpus; }; #endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/hvm/hvm_op.h000066400000000000000000000057011314037446600264010ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ #define __XEN_PUBLIC_HVM_HVM_OP_H__ /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */ #define HVMOP_set_param 0 #define HVMOP_get_param 1 struct xen_hvm_param { domid_t domid; /* IN */ uint32_t index; /* IN */ uint64_t value; /* IN/OUT */ }; typedef struct xen_hvm_param xen_hvm_param_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); /* Set the logical level of one of a domain's PCI INTx wires. */ #define HVMOP_set_pci_intx_level 2 struct xen_hvm_set_pci_intx_level { /* Domain to be updated. */ domid_t domid; /* PCI INTx identification in PCI topology (domain:bus:device:intx). */ uint8_t domain, bus, device, intx; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t); /* Set the logical level of one of a domain's ISA IRQ wires. */ #define HVMOP_set_isa_irq_level 3 struct xen_hvm_set_isa_irq_level { /* Domain to be updated. */ domid_t domid; /* ISA device identification, by ISA IRQ (0-15). */ uint8_t isa_irq; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t); #define HVMOP_set_pci_link_route 4 struct xen_hvm_set_pci_link_route { /* Domain to be updated. */ domid_t domid; /* PCI link identifier (0-3). */ uint8_t link; /* ISA IRQ (1-15), or 0 (disable link). */ uint8_t isa_irq; }; typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); /* Flushes all VCPU TLBs: @arg must be NULL. */ #define HVMOP_flush_tlbs 5 #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/hvm/ioreq.h000066400000000000000000000107701314037446600262320ustar00rootroot00000000000000/* * ioreq.h: I/O request definitions for device models * Copyright (c) 2004, Intel Corporation. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef _IOREQ_H_ #define _IOREQ_H_ #define IOREQ_READ 1 #define IOREQ_WRITE 0 #define STATE_IOREQ_NONE 0 #define STATE_IOREQ_READY 1 #define STATE_IOREQ_INPROCESS 2 #define STATE_IORESP_READY 3 #define IOREQ_TYPE_PIO 0 /* pio */ #define IOREQ_TYPE_COPY 1 /* mmio ops */ #define IOREQ_TYPE_AND 2 #define IOREQ_TYPE_OR 3 #define IOREQ_TYPE_XOR 4 #define IOREQ_TYPE_XCHG 5 #define IOREQ_TYPE_ADD 6 #define IOREQ_TYPE_TIMEOFFSET 7 #define IOREQ_TYPE_INVALIDATE 8 /* mapcache */ #define IOREQ_TYPE_SUB 9 /* * VMExit dispatcher should cooperate with instruction decoder to * prepare this structure and notify service OS and DM by sending * virq */ struct ioreq { uint64_t addr; /* physical address */ uint64_t size; /* size in bytes */ uint64_t count; /* for rep prefixes */ uint64_t data; /* data (or paddr of data) */ uint8_t state:4; uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr * of the real data to use. */ uint8_t dir:1; /* 1=read, 0=write */ uint8_t df:1; uint8_t pad:1; uint8_t type; /* I/O type */ uint8_t _pad0[6]; uint64_t io_count; /* How many IO done on a vcpu */ }; typedef struct ioreq ioreq_t; struct vcpu_iodata { struct ioreq vp_ioreq; /* Event channel port, used for notifications to/from the device model. */ uint32_t vp_eport; uint32_t _pad0; }; typedef struct vcpu_iodata vcpu_iodata_t; struct shared_iopage { struct vcpu_iodata vcpu_iodata[1]; }; typedef struct shared_iopage shared_iopage_t; struct buf_ioreq { uint8_t type; /* I/O type */ uint8_t pad:1; uint8_t dir:1; /* 1=read, 0=write */ uint8_t size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */ uint32_t addr:20;/* physical address */ uint32_t data; /* data */ }; typedef struct buf_ioreq buf_ioreq_t; #define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */ struct buffered_iopage { unsigned int read_pointer; unsigned int write_pointer; buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM]; }; /* NB. Size of this structure must be no greater than one page. */ typedef struct buffered_iopage buffered_iopage_t; #if defined(__ia64__) struct pio_buffer { uint32_t page_offset; uint32_t pointer; uint32_t data_end; uint32_t buf_size; void *opaque; }; #define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */ #define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */ #define PIO_BUFFER_ENTRY_NUM 2 struct buffered_piopage { struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM]; uint8_t buffer[1]; }; #endif /* defined(__ia64__) */ #if defined(__i386__) || defined(__x86_64__) #define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40 #define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04) #define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08) #endif /* defined(__i386__) || defined(__x86_64__) */ #endif /* _IOREQ_H_ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/hvm/params.h000066400000000000000000000063441314037446600264000ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_PARAMS_H__ #define __XEN_PUBLIC_HVM_PARAMS_H__ #include "hvm_op.h" /* * Parameter space for HVMOP_{set,get}_param. */ /* * How should CPU0 event-channel notifications be delivered? * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: * Domain = val[47:32], Bus = val[31:16], * DevFn = val[15: 8], IntX = val[ 1: 0] * If val == 0 then CPU0 event-channel notifications are not delivered. */ #define HVM_PARAM_CALLBACK_IRQ 0 /* * These are not used by Xen. They are here for convenience of HVM-guest * xenbus implementations. */ #define HVM_PARAM_STORE_PFN 1 #define HVM_PARAM_STORE_EVTCHN 2 #define HVM_PARAM_PAE_ENABLED 4 #define HVM_PARAM_IOREQ_PFN 5 #define HVM_PARAM_BUFIOREQ_PFN 6 #ifdef __ia64__ #define HVM_PARAM_NVRAM_FD 7 #define HVM_PARAM_VHPT_SIZE 8 #define HVM_PARAM_BUFPIOREQ_PFN 9 #endif /* * Set mode for virtual timers (currently x86 only): * delay_for_missed_ticks (default): * Do not advance a vcpu's time beyond the correct delivery time for * interrupts that have been missed due to preemption. Deliver missed * interrupts when the vcpu is rescheduled and advance the vcpu's virtual * time stepwise for each one. * no_delay_for_missed_ticks: * As above, missed interrupts are delivered, but guest time always tracks * wallclock (i.e., real) time while doing so. * no_missed_ticks_pending: * No missed interrupts are held pending. Instead, to ensure ticks are * delivered at some non-zero rate, if we detect missed ticks then the * internal tick alarm is not disabled if the VCPU is preempted during the * next tick period. * one_missed_tick_pending: * Missed interrupts are collapsed together and delivered as one 'late tick'. * Guest time always tracks wallclock (i.e., real) time. */ #define HVM_PARAM_TIMER_MODE 10 #define HVMPTM_delay_for_missed_ticks 0 #define HVMPTM_no_delay_for_missed_ticks 1 #define HVMPTM_no_missed_ticks_pending 2 #define HVMPTM_one_missed_tick_pending 3 #define HVM_NR_PARAMS 11 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/hvm/save.h000066400000000000000000000061631314037446600260520ustar00rootroot00000000000000/* * hvm/save.h * * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * Copyright (c) 2007 XenSource Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_H__ #define __XEN_PUBLIC_HVM_SAVE_H__ /* * Structures in this header *must* have the same layout in 32bit * and 64bit environments: this means that all fields must be explicitly * sized types and aligned to their sizes, and the structs must be * a multiple of eight bytes long. * * Only the state necessary for saving and restoring (i.e. fields * that are analogous to actual hardware state) should go in this file. * Internal mechanisms should be kept in Xen-private headers. */ /* * Each entry is preceded by a descriptor giving its type and length */ struct hvm_save_descriptor { uint16_t typecode; /* Used to demux the various types below */ uint16_t instance; /* Further demux within a type */ uint32_t length; /* In bytes, *not* including this descriptor */ }; /* * Each entry has a datatype associated with it: for example, the CPU state * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU), * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU). * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system * ugliness. */ #define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; } #define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t) #define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x))) #define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c)) /* * The series of save records is teminated by a zero-type, zero-length * descriptor. */ struct hvm_save_end {}; DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); #if defined(__i386__) || defined(__x86_64__) #include "../arch-x86/hvm/save.h" #elif defined(__ia64__) #include "../arch-ia64/hvm/save.h" #else #error "unsupported architecture" #endif #endif /* __XEN_PUBLIC_HVM_SAVE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/hvm/vmx_assist.h000066400000000000000000000073731314037446600273200ustar00rootroot00000000000000/* * vmx_assist.h: Context definitions for the VMXASSIST world switch. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Leendert van Doorn, leendert@watson.ibm.com * Copyright (c) 2005, International Business Machines Corporation. */ #ifndef _VMX_ASSIST_H_ #define _VMX_ASSIST_H_ #define VMXASSIST_BASE 0xD0000 #define VMXASSIST_MAGIC 0x17101966 #define VMXASSIST_MAGIC_OFFSET (VMXASSIST_BASE+8) #define VMXASSIST_NEW_CONTEXT (VMXASSIST_BASE + 12) #define VMXASSIST_OLD_CONTEXT (VMXASSIST_NEW_CONTEXT + 4) #ifndef __ASSEMBLY__ #define NR_EXCEPTION_HANDLER 32 #define NR_INTERRUPT_HANDLERS 16 #define NR_TRAPS (NR_EXCEPTION_HANDLER+NR_INTERRUPT_HANDLERS) union vmcs_arbytes { struct arbyte_fields { unsigned int seg_type : 4, s : 1, dpl : 2, p : 1, reserved0 : 4, avl : 1, reserved1 : 1, default_ops_size: 1, g : 1, null_bit : 1, reserved2 : 15; } fields; unsigned int bytes; }; /* * World switch state */ struct vmx_assist_context { uint32_t eip; /* execution pointer */ uint32_t esp; /* stack pointer */ uint32_t eflags; /* flags register */ uint32_t cr0; uint32_t cr3; /* page table directory */ uint32_t cr4; uint32_t idtr_limit; /* idt */ uint32_t idtr_base; uint32_t gdtr_limit; /* gdt */ uint32_t gdtr_base; uint32_t cs_sel; /* cs selector */ uint32_t cs_limit; uint32_t cs_base; union vmcs_arbytes cs_arbytes; uint32_t ds_sel; /* ds selector */ uint32_t ds_limit; uint32_t ds_base; union vmcs_arbytes ds_arbytes; uint32_t es_sel; /* es selector */ uint32_t es_limit; uint32_t es_base; union vmcs_arbytes es_arbytes; uint32_t ss_sel; /* ss selector */ uint32_t ss_limit; uint32_t ss_base; union vmcs_arbytes ss_arbytes; uint32_t fs_sel; /* fs selector */ uint32_t fs_limit; uint32_t fs_base; union vmcs_arbytes fs_arbytes; uint32_t gs_sel; /* gs selector */ uint32_t gs_limit; uint32_t gs_base; union vmcs_arbytes gs_arbytes; uint32_t tr_sel; /* task selector */ uint32_t tr_limit; uint32_t tr_base; union vmcs_arbytes tr_arbytes; uint32_t ldtr_sel; /* ldtr selector */ uint32_t ldtr_limit; uint32_t ldtr_base; union vmcs_arbytes ldtr_arbytes; unsigned char rm_irqbase[2]; }; typedef struct vmx_assist_context vmx_assist_context_t; #endif /* __ASSEMBLY__ */ #endif /* _VMX_ASSIST_H_ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/io/000077500000000000000000000000001314037446600245525ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/io/blkif.h000066400000000000000000000274231314037446600260220ustar00rootroot00000000000000/****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_BLKIF_H__ #define __XEN_PUBLIC_IO_BLKIF_H__ #include "ring.h" #include "../grant_table.h" /* * Front->back notifications: When enqueuing a new request, sending a * notification can be made conditional on req_event (i.e., the generic * hold-off mechanism provided by the ring macros). Backends must set * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). * * Back->front notifications: When enqueuing a new response, sending a * notification can be made conditional on rsp_event (i.e., the generic * hold-off mechanism provided by the ring macros). Frontends must set * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). */ #ifndef blkif_vdev_t #define blkif_vdev_t uint16_t #endif #define blkif_sector_t uint64_t /* * REQUEST CODES. */ #define BLKIF_OP_READ 0 #define BLKIF_OP_WRITE 1 /* * Recognised only if "feature-barrier" is present in backend xenbus info. * The "feature-barrier" node contains a boolean indicating whether barrier * requests are likely to succeed or fail. Either way, a barrier request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt barrier requests. * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* * create the "feature-barrier" node! */ #define BLKIF_OP_WRITE_BARRIER 2 /* * Recognised if "feature-flush-cache" is present in backend xenbus * info. A flush will ask the underlying storage hardware to flush its * non-volatile caches as appropriate. The "feature-flush-cache" node * contains a boolean indicating whether flush requests are likely to * succeed or fail. Either way, a flush request may fail at any time * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying * block-device hardware. The boolean simply indicates whether or not it * is worthwhile for the frontend to attempt flushes. If a backend does * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the * "feature-flush-cache" node! */ #define BLKIF_OP_FLUSH_DISKCACHE 3 /* * Device specific command packet contained within the request */ #define BLKIF_OP_PACKET 4 /* * Recognised only if "feature-discard" is present in backend xenbus info. * The "feature-discard" node contains a boolean indicating whether trim * (ATA) or unmap (SCSI) - conviently called discard requests are likely * to succeed or fail. Either way, a discard request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt discard requests. * If a backend does not recognise BLKIF_OP_DISCARD, it should *not* * create the "feature-discard" node! * * Discard operation is a request for the underlying block device to mark * extents to be erased. However, discard does not guarantee that the blocks * will be erased from the device - it is just a hint to the device * controller that these blocks are no longer in use. What the device * controller does with that information is left to the controller. * Discard operations are passed with sector_number as the * sector index to begin discard operations at and nr_sectors as the number of * sectors to be discarded. The specified sectors should be discarded if the * underlying block device supports trim (ATA) or unmap (SCSI) operations, * or a BLKIF_RSP_EOPNOTSUPP should be returned. * More information about trim/unmap operations at: * http://t13.org/Documents/UploadedDocuments/docs2008/ * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc * http://www.seagate.com/staticfiles/support/disc/manuals/ * Interface%20manuals/100293068c.pdf * The backend can optionally provide these extra XenBus attributes to * further optimize the discard functionality: * 'discard-aligment' - Devices that support discard functionality may * internally allocate space in units that are bigger than the exported * logical block size. The discard-alignment parameter indicates how many bytes * the beginning of the partition is offset from the internal allocation unit's * natural alignment. Do not confuse this with natural disk alignment offset. * 'discard-granularity' - Devices that support discard functionality may * internally allocate space using units that are bigger than the logical block * size. The discard-granularity parameter indicates the size of the internal * allocation unit in bytes if reported by the device. Otherwise the * discard-granularity will be set to match the device's physical block size. * It is the minimum size you can discard. * 'discard-secure' - All copies of the discarded sectors (potentially created * by garbage collection) must also be erased. To use this feature, the flag * BLKIF_DISCARD_SECURE must be set in the blkif_request_discard. */ #define BLKIF_OP_DISCARD 5 /* * Recognized if "feature-max-indirect-segments" in present in the backend * xenbus info. The "feature-max-indirect-segments" node contains the maximum * number of segments allowed by the backend per request. If the node is * present, the frontend might use blkif_request_indirect structs in order to * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The * maximum number of indirect segments is fixed by the backend, but the * frontend can issue requests with any number of indirect segments as long as * it's less than the number provided by the backend. The indirect_grefs field * in blkif_request_indirect should be filled by the frontend with the * grant references of the pages that are holding the indirect segments. * This pages are filled with an array of blkif_request_segment_aligned * that hold the information about the segments. The number of indirect * pages to use is determined by the maximum number of segments * a indirect request contains. Every indirect page can contain a maximum * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), * so to calculate the number of indirect pages to use we have to do * ceil(indirect_segments/512). * * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* * create the "feature-max-indirect-segments" node! */ #define BLKIF_OP_INDIRECT 6 /* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 struct blkif_request_segment_aligned { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ } __attribute__((__packed__)); struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; } __attribute__((__packed__)); struct blkif_request_discard { uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ blkif_vdev_t _pad1; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number; uint64_t nr_sectors; uint8_t _pad3; } __attribute__((__packed__)); struct blkif_request_other { uint8_t _pad1; blkif_vdev_t _pad2; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ } __attribute__((__packed__)); struct blkif_request_indirect { uint8_t indirect_op; uint16_t nr_segments; #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ #endif uint64_t id; blkif_sector_t sector_number; blkif_vdev_t handle; uint16_t _pad2; grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; #ifndef CONFIG_X86_32 uint32_t _pad3; /* make it 64 byte aligned */ #else uint64_t _pad3; /* make it 64 byte aligned */ #endif } __attribute__((__packed__)); struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_request_rw rw; struct blkif_request_discard discard; struct blkif_request_other other; struct blkif_request_indirect indirect; } u; } __attribute__((__packed__)); typedef struct blkif_request blkif_request_t; struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_response blkif_response_t; /* * STATUS RETURN CODES. */ /* Operation not supported (only happens on barrier writes). */ #define BLKIF_RSP_EOPNOTSUPP -2 /* Operation failed for some unspecified reason (-EIO). */ #define BLKIF_RSP_ERROR -1 /* Operation completed successfully. */ #define BLKIF_RSP_OKAY 0 /* * Generate blkif ring structures and types. */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/io/console.h000066400000000000000000000033341314037446600263700ustar00rootroot00000000000000/****************************************************************************** * console.h * * Console I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_CONSOLE_H__ #define __XEN_PUBLIC_IO_CONSOLE_H__ typedef uint32_t XENCONS_RING_IDX; #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) struct xencons_interface { char in[1024]; char out[2048]; XENCONS_RING_IDX in_cons, in_prod; XENCONS_RING_IDX out_cons, out_prod; }; #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/io/fbif.h000066400000000000000000000113101314037446600256250ustar00rootroot00000000000000/* * fbif.h -- Xen virtual frame buffer device * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_FBIF_H__ #define __XEN_PUBLIC_IO_FBIF_H__ /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. */ /* Event type 1 currently not used */ /* * Framebuffer update notification event * Capable frontend sets feature-update in xenstore. * Backend requests it by setting request-update in xenstore. */ #define XENFB_TYPE_UPDATE 2 struct xenfb_update { uint8_t type; /* XENFB_TYPE_UPDATE */ int32_t x; /* source x */ int32_t y; /* source y */ int32_t width; /* rect width */ int32_t height; /* rect height */ }; /* * Framebuffer resize notification event * Capable backend sets feature-resize in xenstore. */ #define XENFB_TYPE_RESIZE 3 struct xenfb_resize { uint8_t type; /* XENFB_TYPE_RESIZE */ int32_t width; /* width in pixels */ int32_t height; /* height in pixels */ int32_t stride; /* stride in bytes */ int32_t depth; /* depth in bits */ }; #define XENFB_OUT_EVENT_SIZE 40 union xenfb_out_event { uint8_t type; struct xenfb_update update; struct xenfb_resize resize; char pad[XENFB_OUT_EVENT_SIZE]; }; /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. * No in events currently defined. */ #define XENFB_IN_EVENT_SIZE 40 union xenfb_in_event { uint8_t type; char pad[XENFB_IN_EVENT_SIZE]; }; /* shared page */ #define XENFB_IN_RING_SIZE 1024 #define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE) #define XENFB_IN_RING_OFFS 1024 #define XENFB_IN_RING(page) \ ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS)) #define XENFB_IN_RING_REF(page, idx) \ (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN]) #define XENFB_OUT_RING_SIZE 2048 #define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE) #define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE) #define XENFB_OUT_RING(page) \ ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS)) #define XENFB_OUT_RING_REF(page, idx) \ (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN]) struct xenfb_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; int32_t width; /* the width of the framebuffer (in pixels) */ int32_t height; /* the height of the framebuffer (in pixels) */ uint32_t line_length; /* the length of a row of pixels (in bytes) */ uint32_t mem_length; /* the length of the framebuffer (in bytes) */ uint8_t depth; /* the depth of a pixel (in bits) */ /* * Framebuffer page directory * * Each directory page holds PAGE_SIZE / sizeof(*pd) * framebuffer pages, and can thus map up to PAGE_SIZE * * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 Megs * 64 bit. 256 directories give enough room for a 512 Meg * framebuffer with a max resolution of 12,800x10,240. Should * be enough for a while with room leftover for expansion. */ unsigned long pd[256]; }; /* * Wart: xenkbd needs to know default resolution. Put it here until a * better solution is found, but don't leak it to the backend. */ #ifdef __KERNEL__ #define XENFB_WIDTH 800 #define XENFB_HEIGHT 600 #define XENFB_DEPTH 32 #endif #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/io/fsif.h000066400000000000000000000113471314037446600256600ustar00rootroot00000000000000/****************************************************************************** * fsif.h * * Interface to FS level split device drivers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2007, Grzegorz Milos, Sun Microsystems, Inc. */ #ifndef __XEN_PUBLIC_IO_FSIF_H__ #define __XEN_PUBLIC_IO_FSIF_H__ #include "ring.h" #include "../grant_table.h" #define REQ_FILE_OPEN 1 #define REQ_FILE_CLOSE 2 #define REQ_FILE_READ 3 #define REQ_FILE_WRITE 4 #define REQ_STAT 5 #define REQ_FILE_TRUNCATE 6 #define REQ_REMOVE 7 #define REQ_RENAME 8 #define REQ_CREATE 9 #define REQ_DIR_LIST 10 #define REQ_CHMOD 11 #define REQ_FS_SPACE 12 #define REQ_FILE_SYNC 13 struct fsif_open_request { grant_ref_t gref; }; struct fsif_close_request { uint32_t fd; }; struct fsif_read_request { uint32_t fd; grant_ref_t gref; uint64_t len; uint64_t offset; }; struct fsif_write_request { uint32_t fd; grant_ref_t gref; uint64_t len; uint64_t offset; }; struct fsif_stat_request { uint32_t fd; grant_ref_t gref; }; /* This structure is a copy of some fields from stat structure, writen to the * granted page. */ struct fsif_stat_response { int32_t stat_mode; uint32_t stat_uid; uint32_t stat_gid; int32_t pad; int64_t stat_size; int64_t stat_atime; int64_t stat_mtime; int64_t stat_ctime; }; struct fsif_truncate_request { uint32_t fd; int32_t pad; int64_t length; }; struct fsif_remove_request { grant_ref_t gref; }; struct fsif_rename_request { uint16_t old_name_offset; uint16_t new_name_offset; grant_ref_t gref; }; struct fsif_create_request { int8_t directory; int8_t pad; int16_t pad2; int32_t mode; grant_ref_t gref; }; struct fsif_list_request { uint32_t offset; grant_ref_t gref; }; #define NR_FILES_SHIFT 0 #define NR_FILES_SIZE 16 /* 16 bits for the number of files mask */ #define NR_FILES_MASK (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT) #define ERROR_SIZE 32 /* 32 bits for the error mask */ #define ERROR_SHIFT (NR_FILES_SIZE + NR_FILES_SHIFT) #define ERROR_MASK (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT) #define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE) #define HAS_MORE_FLAG (1ULL << HAS_MORE_SHIFT) struct fsif_chmod_request { uint32_t fd; int32_t mode; }; struct fsif_space_request { grant_ref_t gref; }; struct fsif_sync_request { uint32_t fd; }; /* FS operation request */ struct fsif_request { uint8_t type; /* Type of the request */ uint8_t pad; uint16_t id; /* Request ID, copied to the response */ uint32_t pad2; union { struct fsif_open_request fopen; struct fsif_close_request fclose; struct fsif_read_request fread; struct fsif_write_request fwrite; struct fsif_stat_request fstat; struct fsif_truncate_request ftruncate; struct fsif_remove_request fremove; struct fsif_rename_request frename; struct fsif_create_request fcreate; struct fsif_list_request flist; struct fsif_chmod_request fchmod; struct fsif_space_request fspace; struct fsif_sync_request fsync; } u; }; typedef struct fsif_request fsif_request_t; /* FS operation response */ struct fsif_response { uint16_t id; uint16_t pad1; uint32_t pad2; uint64_t ret_val; }; typedef struct fsif_response fsif_response_t; DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response); #define STATE_INITIALISED "init" #define STATE_READY "ready" #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/io/kbdif.h000066400000000000000000000076241314037446600260130ustar00rootroot00000000000000/* * kbdif.h -- Xen virtual keyboard/mouse * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_KBDIF_H__ #define __XEN_PUBLIC_IO_KBDIF_H__ /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. */ /* Pointer movement event */ #define XENKBD_TYPE_MOTION 1 /* Event type 2 currently not used */ /* Key event (includes pointer buttons) */ #define XENKBD_TYPE_KEY 3 /* * Pointer position event * Capable backend sets feature-abs-pointer in xenstore. * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting * request-abs-update in xenstore. */ #define XENKBD_TYPE_POS 4 struct xenkbd_motion { uint8_t type; /* XENKBD_TYPE_MOTION */ int32_t rel_x; /* relative X motion */ int32_t rel_y; /* relative Y motion */ int32_t rel_z; /* relative Z motion (wheel) */ }; struct xenkbd_key { uint8_t type; /* XENKBD_TYPE_KEY */ uint8_t pressed; /* 1 if pressed; 0 otherwise */ uint32_t keycode; /* KEY_* from linux/input.h */ }; struct xenkbd_position { uint8_t type; /* XENKBD_TYPE_POS */ int32_t abs_x; /* absolute X position (in FB pixels) */ int32_t abs_y; /* absolute Y position (in FB pixels) */ int32_t rel_z; /* relative Z motion (wheel) */ }; #define XENKBD_IN_EVENT_SIZE 40 union xenkbd_in_event { uint8_t type; struct xenkbd_motion motion; struct xenkbd_key key; struct xenkbd_position pos; char pad[XENKBD_IN_EVENT_SIZE]; }; /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. * No out events currently defined. */ #define XENKBD_OUT_EVENT_SIZE 40 union xenkbd_out_event { uint8_t type; char pad[XENKBD_OUT_EVENT_SIZE]; }; /* shared page */ #define XENKBD_IN_RING_SIZE 2048 #define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE) #define XENKBD_IN_RING_OFFS 1024 #define XENKBD_IN_RING(page) \ ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS)) #define XENKBD_IN_RING_REF(page, idx) \ (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN]) #define XENKBD_OUT_RING_SIZE 1024 #define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE) #define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE) #define XENKBD_OUT_RING(page) \ ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS)) #define XENKBD_OUT_RING_REF(page, idx) \ (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN]) struct xenkbd_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; }; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/io/netif.h000066400000000000000000000163401314037446600260340ustar00rootroot00000000000000/****************************************************************************** * netif.h * * Unified network-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_NETIF_H__ #define __XEN_PUBLIC_IO_NETIF_H__ #include "ring.h" #include "../grant_table.h" /* * Notifications after enqueuing any type of message should be conditional on * the appropriate req_event or rsp_event field in the shared ring. * If the client sends notification for rx requests then it should specify * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume * that it cannot safely queue packets (as it may not be kicked to send them). */ /* * This is the 'wire' format for packets: * Request 1: netif_tx_request -- NETTXF_* (any flags) * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info) * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE) * Request 4: netif_tx_request -- NETTXF_more_data * Request 5: netif_tx_request -- NETTXF_more_data * ... * Request N: netif_tx_request -- 0 */ /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETTXF_csum_blank (0) #define NETTXF_csum_blank (1U<<_NETTXF_csum_blank) /* Packet data has been validated against protocol checksum. */ #define _NETTXF_data_validated (1) #define NETTXF_data_validated (1U<<_NETTXF_data_validated) /* Packet continues in the next request descriptor. */ #define _NETTXF_more_data (2) #define NETTXF_more_data (1U<<_NETTXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETTXF_extra_info (3) #define NETTXF_extra_info (1U<<_NETTXF_extra_info) struct netif_tx_request { grant_ref_t gref; /* Reference to buffer page */ uint16_t offset; /* Offset within buffer page */ uint16_t flags; /* NETTXF_* */ uint16_t id; /* Echoed in response message. */ uint16_t size; /* Packet size in bytes. */ }; typedef struct netif_tx_request netif_tx_request_t; /* Types of netif_extra_info descriptors. */ #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MAX (4) /* netif_extra_info flags. */ #define _XEN_NETIF_EXTRA_FLAG_MORE (0) #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) /* GSO types - only TCPv4 currently supported. */ #define XEN_NETIF_GSO_TYPE_TCPV4 (1) /* * This structure needs to fit within both netif_tx_request and * netif_rx_response for compatibility. */ struct netif_extra_info { uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ union { /* * XEN_NETIF_EXTRA_TYPE_GSO: */ struct { /* * Maximum payload size of each segment. For example, for TCP this * is just the path MSS. */ uint16_t size; /* * GSO type. This determines the protocol of the packet and any * extra features required to segment the packet properly. */ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ /* Future expansion. */ uint8_t pad; /* * GSO features. This specifies any extra GSO features required * to process this packet, such as ECN support for TCPv4. */ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ } gso; /* * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}: * Backend advertises availability via 'feature-multicast-control' * xenbus node containing value '1'. * Frontend requests this feature by advertising * 'request-multicast-control' xenbus node containing value '1'. * If multicast control is requested then multicast flooding is * disabled and the frontend must explicitly register its interest * in multicast groups using dummy transmit requests containing * MCAST_{ADD,DEL} extra-info fragments. */ struct { uint8_t addr[6]; /* Address to add/remove. */ } mcast; uint16_t pad[3]; } u; }; typedef struct netif_extra_info netif_extra_info_t; struct netif_tx_response { uint16_t id; int16_t status; /* NETIF_RSP_* */ }; typedef struct netif_tx_response netif_tx_response_t; struct netif_rx_request { uint16_t id; /* Echoed in response message. */ grant_ref_t gref; /* Reference to incoming granted frame */ }; typedef struct netif_rx_request netif_rx_request_t; /* Packet data has been validated against protocol checksum. */ #define _NETRXF_data_validated (0) #define NETRXF_data_validated (1U<<_NETRXF_data_validated) /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETRXF_csum_blank (1) #define NETRXF_csum_blank (1U<<_NETRXF_csum_blank) /* Packet continues in the next request descriptor. */ #define _NETRXF_more_data (2) #define NETRXF_more_data (1U<<_NETRXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETRXF_extra_info (3) #define NETRXF_extra_info (1U<<_NETRXF_extra_info) struct netif_rx_response { uint16_t id; uint16_t offset; /* Offset in page of start of received packet */ uint16_t flags; /* NETRXF_* */ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ }; typedef struct netif_rx_response netif_rx_response_t; /* * Generate netif ring structures and types. */ DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response); DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response); #define NETIF_RSP_DROPPED -2 #define NETIF_RSP_ERROR -1 #define NETIF_RSP_OKAY 0 /* No response: used for auxiliary requests (e.g., netif_tx_extra). */ #define NETIF_RSP_NULL 1 #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/io/pciif.h000066400000000000000000000051331314037446600260170ustar00rootroot00000000000000/* * PCI Backend/Frontend Common Data Structures & Macros * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Ryan Wilson */ #ifndef __XEN_PCI_COMMON_H__ #define __XEN_PCI_COMMON_H__ /* Be sure to bump this number if you change this file */ #define XEN_PCI_MAGIC "7" /* xen_pci_sharedinfo flags */ #define _XEN_PCIF_active (0) #define XEN_PCIF_active (1<<_XEN_PCI_active) /* xen_pci_op commands */ #define XEN_PCI_OP_conf_read (0) #define XEN_PCI_OP_conf_write (1) /* xen_pci_op error numbers */ #define XEN_PCI_ERR_success (0) #define XEN_PCI_ERR_dev_not_found (-1) #define XEN_PCI_ERR_invalid_offset (-2) #define XEN_PCI_ERR_access_denied (-3) #define XEN_PCI_ERR_not_implemented (-4) /* XEN_PCI_ERR_op_failed - backend failed to complete the operation */ #define XEN_PCI_ERR_op_failed (-5) struct xen_pci_op { /* IN: what action to perform: XEN_PCI_OP_* */ uint32_t cmd; /* OUT: will contain an error number (if any) from errno.h */ int32_t err; /* IN: which device to touch */ uint32_t domain; /* PCI Domain/Segment */ uint32_t bus; uint32_t devfn; /* IN: which configuration registers to touch */ int32_t offset; int32_t size; /* IN/OUT: Contains the result after a READ or the value to WRITE */ uint32_t value; }; struct xen_pci_sharedinfo { /* flags - XEN_PCIF_* */ uint32_t flags; struct xen_pci_op op; }; #endif /* __XEN_PCI_COMMON_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/io/protocols.h000066400000000000000000000011751314037446600267530ustar00rootroot00000000000000#ifndef __XEN_PROTOCOLS_H__ #define __XEN_PROTOCOLS_H__ #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi" #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi" #define XEN_IO_PROTO_ABI_IA64 "ia64-abi" #define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi" #if defined(__i386__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 #elif defined(__x86_64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 #elif defined(__ia64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64 #elif defined(__powerpc64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64 #else # error arch fixup needed here #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/io/ring.h000066400000000000000000000350141314037446600256650ustar00rootroot00000000000000/****************************************************************************** * ring.h * * Shared producer-consumer ring macros. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Tim Deegan and Andrew Warfield November 2004. */ #ifndef __XEN_PUBLIC_IO_RING_H__ #define __XEN_PUBLIC_IO_RING_H__ typedef unsigned int RING_IDX; /* Round a 32-bit unsigned constant down to the nearest power of two. */ #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) /* * Calculate size of a shared ring, given the total available space for the * ring and indexes (_sz), and the name tag of the request/response structure. * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ #define __CONST_RING_SIZE(_s, _sz) \ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ sizeof(((struct _s##_sring *)0)->ring[0]))) #define __RING_SIZE(_s, _sz) \ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* * Macros to make the correct C datatypes for a new kind of ring. * * To make a new ring datatype, you need to have two message structures, * let's say request_t, and response_t already defined. * * In a header where you want the ring datatype declared, you then do: * * DEFINE_RING_TYPES(mytag, request_t, response_t); * * These expand out to give you a set of types, as you can see below. * The most important of these are: * * mytag_sring_t - The shared ring. * mytag_front_ring_t - The 'front' half of the ring. * mytag_back_ring_t - The 'back' half of the ring. * * To initialize a ring in your code you need to know the location and size * of the shared memory area (PAGE_SIZE, for instance). To initialise * the front half: * * mytag_front_ring_t front_ring; * SHARED_RING_INIT((mytag_sring_t *)shared_page); * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); * * Initializing the back follows similarly (note that only the front * initializes the shared ring): * * mytag_back_ring_t back_ring; * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); */ #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ \ /* Shared ring entry */ \ union __name##_sring_entry { \ __req_t req; \ __rsp_t rsp; \ }; \ \ /* Shared ring page */ \ struct __name##_sring { \ RING_IDX req_prod, req_event; \ RING_IDX rsp_prod, rsp_event; \ uint8_t pad[48]; \ union __name##_sring_entry ring[1]; /* variable-length */ \ }; \ \ /* "Front" end's private variables */ \ struct __name##_front_ring { \ RING_IDX req_prod_pvt; \ RING_IDX rsp_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* "Back" end's private variables */ \ struct __name##_back_ring { \ RING_IDX rsp_prod_pvt; \ RING_IDX req_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* Syntactic sugar */ \ typedef struct __name##_sring __name##_sring_t; \ typedef struct __name##_front_ring __name##_front_ring_t; \ typedef struct __name##_back_ring __name##_back_ring_t /* * Macros for manipulating rings. * * FRONT_RING_whatever works on the "front end" of a ring: here * requests are pushed on to the ring and responses taken off it. * * BACK_RING_whatever works on the "back end" of a ring: here * requests are taken off the ring and responses put on. * * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. * This is OK in 1-for-1 request-response situations where the * requestor (front end) never has more than RING_SIZE()-1 * outstanding requests. */ /* Initialising empty rings */ #define SHARED_RING_INIT(_s) do { \ (_s)->req_prod = (_s)->rsp_prod = 0; \ (_s)->req_event = (_s)->rsp_event = 1; \ (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \ } while(0) #define FRONT_RING_INIT(_r, _s, __size) do { \ (_r)->req_prod_pvt = 0; \ (_r)->rsp_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) #define BACK_RING_INIT(_r, _s, __size) do { \ (_r)->rsp_prod_pvt = 0; \ (_r)->req_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) /* Initialize to existing shared indexes -- for recovery */ #define FRONT_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->req_prod_pvt = (_s)->req_prod; \ (_r)->rsp_cons = (_s)->rsp_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) #define BACK_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ (_r)->req_cons = (_s)->req_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) /* How big is this ring? */ #define RING_SIZE(_r) \ ((_r)->nr_ents) /* Number of free requests (for use on front side only). */ #define RING_FREE_REQUESTS(_r) \ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) /* Test if there is an empty slot available on the front ring. * (This is only meaningful from the front. ) */ #define RING_FULL(_r) \ (RING_FREE_REQUESTS(_r) == 0) /* Test if there are outstanding messages to be processed on a ring. */ #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) #ifdef __GNUC__ #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ unsigned int rsp = RING_SIZE(_r) - \ ((_r)->req_cons - (_r)->rsp_prod_pvt); \ req < rsp ? req : rsp; \ }) #else /* Same as above, but without the nice GCC ({ ... }) syntax. */ #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ ((((_r)->sring->req_prod - (_r)->req_cons) < \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ ((_r)->sring->req_prod - (_r)->req_cons) : \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) #endif /* Direct access to individual ring elements, by index. */ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) #define RING_GET_RESPONSE(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) /* Loop termination condition: Would the specified index overflow the ring? */ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) #define RING_PUSH_REQUESTS(_r) do { \ wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) #define RING_PUSH_RESPONSES(_r) do { \ wmb(); /* front sees responses /before/ updated producer index */ \ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ } while (0) /* * Notification hold-off (req_event and rsp_event): * * When queueing requests or responses on a shared ring, it may not always be * necessary to notify the remote end. For example, if requests are in flight * in a backend, the front may be able to queue further requests without * notifying the back (if the back checks for new requests when it queues * responses). * * When enqueuing requests or responses: * * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument * is a boolean return value. True indicates that the receiver requires an * asynchronous notification. * * After dequeuing requests or responses (before sleeping the connection): * * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). * The second argument is a boolean return value. True indicates that there * are pending messages on the ring (i.e., the connection should not be put * to sleep). * * These macros will set the req_event/rsp_event field to trigger a * notification on the very next message that is enqueued. If you want to * create batches of work (i.e., only receive a notification after several * messages have been enqueued) then you will need to create a customised * version of the FINAL_CHECK macro in your own code, which sets the event * field appropriately. */ #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->req_prod; \ RING_IDX __new = (_r)->req_prod_pvt; \ wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = __new; \ mb(); /* back sees new requests /before/ we check req_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->rsp_prod; \ RING_IDX __new = (_r)->rsp_prod_pvt; \ wmb(); /* front sees responses /before/ updated producer index */ \ (_r)->sring->rsp_prod = __new; \ mb(); /* front sees new responses /before/ we check rsp_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ if (_work_to_do) break; \ (_r)->sring->req_event = (_r)->req_cons + 1; \ mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ } while (0) #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ if (_work_to_do) break; \ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ } while (0) #endif /* __XEN_PUBLIC_IO_RING_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/io/tpmif.h000066400000000000000000000046251314037446600260510ustar00rootroot00000000000000/****************************************************************************** * tpmif.h * * TPM I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, IBM Corporation * * Author: Stefan Berger, stefanb@us.ibm.com * Grant table support: Mahadevan Gomathisankaran * * This code has been derived from tools/libxc/xen/io/netif.h * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_TPMIF_H__ #define __XEN_PUBLIC_IO_TPMIF_H__ #include "../grant_table.h" struct tpmif_tx_request { unsigned long addr; /* Machine address of packet. */ grant_ref_t ref; /* grant table access reference */ uint16_t unused; uint16_t size; /* Packet size in bytes. */ }; typedef struct tpmif_tx_request tpmif_tx_request_t; /* * The TPMIF_TX_RING_SIZE defines the number of pages the * front-end and backend can exchange (= size of array). */ typedef uint32_t TPMIF_RING_IDX; #define TPMIF_TX_RING_SIZE 1 /* This structure must fit in a memory page. */ struct tpmif_ring { struct tpmif_tx_request req; }; typedef struct tpmif_ring tpmif_ring_t; struct tpmif_tx_interface { struct tpmif_ring ring[TPMIF_TX_RING_SIZE]; }; typedef struct tpmif_tx_interface tpmif_tx_interface_t; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/io/xenbus.h000066400000000000000000000044231314037446600262320ustar00rootroot00000000000000/***************************************************************************** * xenbus.h * * Xenbus protocol details. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 XenSource Ltd. */ #ifndef _XEN_PUBLIC_IO_XENBUS_H #define _XEN_PUBLIC_IO_XENBUS_H /* * The state of either end of the Xenbus, i.e. the current communication * status of initialisation across the bus. States here imply nothing about * the state of the connection between the driver and the kernel's device * layers. */ enum xenbus_state { XenbusStateUnknown = 0, XenbusStateInitialising = 1, /* * InitWait: Finished early initialisation but waiting for information * from the peer or hotplug scripts. */ XenbusStateInitWait = 2, /* * Initialised: Waiting for a connection from the peer. */ XenbusStateInitialised = 3, XenbusStateConnected = 4, /* * Closing: The device is being closed due to an error or an unplug event. */ XenbusStateClosing = 5, XenbusStateClosed = 6 }; typedef enum xenbus_state XenbusState; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/io/xs_wire.h000066400000000000000000000067101314037446600264070ustar00rootroot00000000000000/* * Details of the "wire" protocol between Xen Store Daemon and client * library or guest kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Rusty Russell IBM Corporation */ #ifndef _XS_WIRE_H #define _XS_WIRE_H enum xsd_sockmsg_type { XS_DEBUG, XS_DIRECTORY, XS_READ, XS_GET_PERMS, XS_WATCH, XS_UNWATCH, XS_TRANSACTION_START, XS_TRANSACTION_END, XS_INTRODUCE, XS_RELEASE, XS_GET_DOMAIN_PATH, XS_WRITE, XS_MKDIR, XS_RM, XS_SET_PERMS, XS_WATCH_EVENT, XS_ERROR, XS_IS_DOMAIN_INTRODUCED, XS_RESUME }; #define XS_WRITE_NONE "NONE" #define XS_WRITE_CREATE "CREATE" #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" /* We hand errors as strings, for portability. */ struct xsd_errors { int errnum; const char *errstring; }; #define XSD_ERROR(x) { x, #x } static struct xsd_errors xsd_errors[] #if defined(__GNUC__) __attribute__((unused)) #endif = { XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), XSD_ERROR(EISDIR), XSD_ERROR(ENOENT), XSD_ERROR(ENOMEM), XSD_ERROR(ENOSPC), XSD_ERROR(EIO), XSD_ERROR(ENOTEMPTY), XSD_ERROR(ENOSYS), XSD_ERROR(EROFS), XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN) }; struct xsd_sockmsg { uint32_t type; /* XS_??? */ uint32_t req_id;/* Request identifier, echoed in daemon's response. */ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ uint32_t len; /* Length of data following this. */ /* Generally followed by nul-terminated string(s). */ }; enum xs_watch_type { XS_WATCH_PATH = 0, XS_WATCH_TOKEN }; /* Inter-domain shared memory communications. */ #define XENSTORE_RING_SIZE 1024 typedef uint32_t XENSTORE_RING_IDX; #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) struct xenstore_domain_interface { char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ XENSTORE_RING_IDX req_cons, req_prod; XENSTORE_RING_IDX rsp_cons, rsp_prod; }; /* Violating this is very bad. See docs/misc/xenstore.txt. */ #define XENSTORE_PAYLOAD_MAX 4096 /* Violating these just gets you an error back */ #define XENSTORE_ABS_PATH_MAX 3072 #define XENSTORE_REL_PATH_MAX 2048 #endif /* _XS_WIRE_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/kexec.h000066400000000000000000000110311314037446600254070ustar00rootroot00000000000000/****************************************************************************** * kexec.h - Public portion * * Xen port written by: * - Simon 'Horms' Horman * - Magnus Damm */ #ifndef _XEN_PUBLIC_KEXEC_H #define _XEN_PUBLIC_KEXEC_H /* This file describes the Kexec / Kdump hypercall interface for Xen. * * Kexec under vanilla Linux allows a user to reboot the physical machine * into a new user-specified kernel. The Xen port extends this idea * to allow rebooting of the machine from dom0. When kexec for dom0 * is used to reboot, both the hypervisor and the domains get replaced * with some other kernel. It is possible to kexec between vanilla * Linux and Xen and back again. Xen to Xen works well too. * * The hypercall interface for kexec can be divided into three main * types of hypercall operations: * * 1) Range information: * This is used by the dom0 kernel to ask the hypervisor about various * address information. This information is needed to allow kexec-tools * to fill in the ELF headers for /proc/vmcore properly. * * 2) Load and unload of images: * There are no big surprises here, the kexec binary from kexec-tools * runs in userspace in dom0. The tool loads/unloads data into the * dom0 kernel such as new kernel, initramfs and hypervisor. When * loaded the dom0 kernel performs a load hypercall operation, and * before releasing all page references the dom0 kernel calls unload. * * 3) Kexec operation: * This is used to start a previously loaded kernel. */ #include "xen.h" #if defined(__i386__) || defined(__x86_64__) #define KEXEC_XEN_NO_PAGES 17 #endif /* * Prototype for this hypercall is: * int kexec_op(int cmd, void *args) * @cmd == KEXEC_CMD_... * KEXEC operation to perform * @args == Operation-specific extra arguments (NULL if none). */ /* * Kexec supports two types of operation: * - kexec into a regular kernel, very similar to a standard reboot * - KEXEC_TYPE_DEFAULT is used to specify this type * - kexec into a special "crash kernel", aka kexec-on-panic * - KEXEC_TYPE_CRASH is used to specify this type * - parts of our system may be broken at kexec-on-panic time * - the code should be kept as simple and self-contained as possible */ #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 /* The kexec implementation for Xen allows the user to load two * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH. * All data needed for a kexec reboot is kept in one xen_kexec_image_t * per "instance". The data mainly consists of machine address lists to pages * together with destination addresses. The data in xen_kexec_image_t * is passed to the "code page" which is one page of code that performs * the final relocations before jumping to the new kernel. */ typedef struct xen_kexec_image { #if defined(__i386__) || defined(__x86_64__) unsigned long page_list[KEXEC_XEN_NO_PAGES]; #endif #if defined(__ia64__) unsigned long reboot_code_buffer; #endif unsigned long indirection_page; unsigned long start_address; } xen_kexec_image_t; /* * Perform kexec having previously loaded a kexec or kdump kernel * as appropriate. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] */ #define KEXEC_CMD_kexec 0 typedef struct xen_kexec_exec { int type; } xen_kexec_exec_t; /* * Load/Unload kernel image for kexec or kdump. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] * image == relocation information for kexec (ignored for unload) [in] */ #define KEXEC_CMD_kexec_load 1 #define KEXEC_CMD_kexec_unload 2 typedef struct xen_kexec_load { int type; xen_kexec_image_t image; } xen_kexec_load_t; #define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */ #define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */ #define KEXEC_RANGE_MA_CPU 2 /* machine address and size of a CPU note */ /* * Find the address and size of certain memory areas * range == KEXEC_RANGE_... [in] * nr == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in] * size == number of bytes reserved in window [out] * start == address of the first byte in the window [out] */ #define KEXEC_CMD_kexec_get_range 3 typedef struct xen_kexec_range { int range; int nr; unsigned long size; unsigned long start; } xen_kexec_range_t; #endif /* _XEN_PUBLIC_KEXEC_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/libelf.h000066400000000000000000000167501314037446600255620ustar00rootroot00000000000000#ifndef __XC_LIBELF__ #define __XC_LIBELF__ 1 #if defined(__i386__) || defined(__x86_64) || defined(__ia64__) #define XEN_ELF_LITTLE_ENDIAN #elif defined(__powerpc__) #define XEN_ELF_BIG_ENDIAN #else #error define architectural endianness #endif #undef ELFSIZE #include "elfnote.h" #include "elfstructs.h" #include "features.h" /* ------------------------------------------------------------------------ */ typedef union { Elf32_Ehdr e32; Elf64_Ehdr e64; } elf_ehdr; typedef union { Elf32_Phdr e32; Elf64_Phdr e64; } elf_phdr; typedef union { Elf32_Shdr e32; Elf64_Shdr e64; } elf_shdr; typedef union { Elf32_Sym e32; Elf64_Sym e64; } elf_sym; typedef union { Elf32_Rel e32; Elf64_Rel e64; } elf_rel; typedef union { Elf32_Rela e32; Elf64_Rela e64; } elf_rela; typedef union { Elf32_Note e32; Elf64_Note e64; } elf_note; struct elf_binary { /* elf binary */ const char *image; size_t size; char class; char data; const elf_ehdr *ehdr; const char *sec_strtab; const elf_shdr *sym_tab; const char *sym_strtab; /* loaded to */ char *dest; uint64_t pstart; uint64_t pend; uint64_t reloc_offset; uint64_t bsd_symtab_pstart; uint64_t bsd_symtab_pend; #ifndef __XEN__ /* misc */ FILE *log; #endif int verbose; }; /* ------------------------------------------------------------------------ */ /* accessing elf header fields */ #ifdef XEN_ELF_BIG_ENDIAN # define NATIVE_ELFDATA ELFDATA2MSB #else # define NATIVE_ELFDATA ELFDATA2LSB #endif #define elf_32bit(elf) (ELFCLASS32 == (elf)->class) #define elf_64bit(elf) (ELFCLASS64 == (elf)->class) #define elf_msb(elf) (ELFDATA2MSB == (elf)->data) #define elf_lsb(elf) (ELFDATA2LSB == (elf)->data) #define elf_swap(elf) (NATIVE_ELFDATA != (elf)->data) #define elf_uval(elf, str, elem) \ ((ELFCLASS64 == (elf)->class) \ ? elf_access_unsigned((elf), (str), \ offsetof(typeof(*(str)),e64.elem), \ sizeof((str)->e64.elem)) \ : elf_access_unsigned((elf), (str), \ offsetof(typeof(*(str)),e32.elem), \ sizeof((str)->e32.elem))) #define elf_sval(elf, str, elem) \ ((ELFCLASS64 == (elf)->class) \ ? elf_access_signed((elf), (str), \ offsetof(typeof(*(str)),e64.elem), \ sizeof((str)->e64.elem)) \ : elf_access_signed((elf), (str), \ offsetof(typeof(*(str)),e32.elem), \ sizeof((str)->e32.elem))) #define elf_size(elf, str) \ ((ELFCLASS64 == (elf)->class) \ ? sizeof((str)->e64) : sizeof((str)->e32)) uint64_t elf_access_unsigned(struct elf_binary *elf, const void *ptr, uint64_t offset, size_t size); int64_t elf_access_signed(struct elf_binary *elf, const void *ptr, uint64_t offset, size_t size); uint64_t elf_round_up(struct elf_binary *elf, uint64_t addr); /* ------------------------------------------------------------------------ */ /* xc_libelf_tools.c */ int elf_shdr_count(struct elf_binary *elf); int elf_phdr_count(struct elf_binary *elf); const elf_shdr *elf_shdr_by_name(struct elf_binary *elf, const char *name); const elf_shdr *elf_shdr_by_index(struct elf_binary *elf, int index); const elf_phdr *elf_phdr_by_index(struct elf_binary *elf, int index); const char *elf_section_name(struct elf_binary *elf, const elf_shdr * shdr); const void *elf_section_start(struct elf_binary *elf, const elf_shdr * shdr); const void *elf_section_end(struct elf_binary *elf, const elf_shdr * shdr); const void *elf_segment_start(struct elf_binary *elf, const elf_phdr * phdr); const void *elf_segment_end(struct elf_binary *elf, const elf_phdr * phdr); const elf_sym *elf_sym_by_name(struct elf_binary *elf, const char *symbol); const elf_sym *elf_sym_by_index(struct elf_binary *elf, int index); const char *elf_note_name(struct elf_binary *elf, const elf_note * note); const void *elf_note_desc(struct elf_binary *elf, const elf_note * note); uint64_t elf_note_numeric(struct elf_binary *elf, const elf_note * note); const elf_note *elf_note_next(struct elf_binary *elf, const elf_note * note); int elf_is_elfbinary(const void *image); int elf_phdr_is_loadable(struct elf_binary *elf, const elf_phdr * phdr); /* ------------------------------------------------------------------------ */ /* xc_libelf_loader.c */ int elf_init(struct elf_binary *elf, const char *image, size_t size); #ifdef __XEN__ void elf_set_verbose(struct elf_binary *elf); #else void elf_set_logfile(struct elf_binary *elf, FILE * log, int verbose); #endif void elf_parse_binary(struct elf_binary *elf); void elf_load_binary(struct elf_binary *elf); void *elf_get_ptr(struct elf_binary *elf, unsigned long addr); uint64_t elf_lookup_addr(struct elf_binary *elf, const char *symbol); void elf_parse_bsdsyms(struct elf_binary *elf, uint64_t pstart); /* private */ /* ------------------------------------------------------------------------ */ /* xc_libelf_relocate.c */ int elf_reloc(struct elf_binary *elf); /* ------------------------------------------------------------------------ */ /* xc_libelf_dominfo.c */ #define UNSET_ADDR ((uint64_t)-1) enum xen_elfnote_type { XEN_ENT_NONE = 0, XEN_ENT_LONG = 1, XEN_ENT_STR = 2 }; struct xen_elfnote { enum xen_elfnote_type type; const char *name; union { const char *str; uint64_t num; } data; }; struct elf_dom_parms { /* raw */ const char *guest_info; const void *elf_note_start; const void *elf_note_end; struct xen_elfnote elf_notes[XEN_ELFNOTE_MAX + 1]; /* parsed */ char guest_os[16]; char guest_ver[16]; char xen_ver[16]; char loader[16]; int pae; int bsd_symtab; uint64_t virt_base; uint64_t virt_entry; uint64_t virt_hypercall; uint64_t virt_hv_start_low; uint64_t elf_paddr_offset; uint32_t f_supported[XENFEAT_NR_SUBMAPS]; uint32_t f_required[XENFEAT_NR_SUBMAPS]; /* calculated */ uint64_t virt_offset; uint64_t virt_kstart; uint64_t virt_kend; }; static inline void elf_xen_feature_set(int nr, uint32_t * addr) { addr[nr >> 5] |= 1 << (nr & 31); } static inline int elf_xen_feature_get(int nr, uint32_t * addr) { return !!(addr[nr >> 5] & (1 << (nr & 31))); } int elf_xen_parse_features(const char *features, uint32_t *supported, uint32_t *required); int elf_xen_parse_note(struct elf_binary *elf, struct elf_dom_parms *parms, const elf_note *note); int elf_xen_parse_guest_info(struct elf_binary *elf, struct elf_dom_parms *parms); int elf_xen_parse(struct elf_binary *elf, struct elf_dom_parms *parms); #endif /* __XC_LIBELF__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/memory.h000066400000000000000000000230441314037446600256270ustar00rootroot00000000000000/****************************************************************************** * memory.h * * Memory reservation and information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_MEMORY_H__ #define __XEN_PUBLIC_MEMORY_H__ /* * Increase or decrease the specified domain's memory reservation. Returns the * number of extents successfully allocated or freed. * arg == addr of struct xen_memory_reservation. */ #define XENMEM_increase_reservation 0 #define XENMEM_decrease_reservation 1 #define XENMEM_populate_physmap 6 struct xen_memory_reservation { /* * XENMEM_increase_reservation: * OUT: MFN (*not* GMFN) bases of extents that were allocated * XENMEM_decrease_reservation: * IN: GMFN bases of extents to free * XENMEM_populate_physmap: * IN: GPFN bases of extents to populate with memory * OUT: GMFN bases of extents that were allocated * (NB. This command also updates the mach_to_phys translation table) */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* Number of extents, and size/alignment of each (2^extent_order pages). */ xen_ulong_t nr_extents; unsigned int extent_order; /* * Maximum # bits addressable by the user of the allocated region (e.g., * I/O devices often have a 32-bit limitation even in 64-bit systems). If * zero then the user has no addressing restriction. * This field is not used by XENMEM_decrease_reservation. */ unsigned int address_bits; /* * Domain whose reservation is being changed. * Unprivileged domains can specify only DOMID_SELF. */ domid_t domid; }; typedef struct xen_memory_reservation xen_memory_reservation_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); /* * An atomic exchange of memory pages. If return code is zero then * @out.extent_list provides GMFNs of the newly-allocated memory. * Returns zero on complete success, otherwise a negative error code. * On complete success then always @nr_exchanged == @in.nr_extents. * On partial success @nr_exchanged indicates how much work was done. */ #define XENMEM_exchange 11 struct xen_memory_exchange { /* * [IN] Details of memory extents to be exchanged (GMFN bases). * Note that @in.address_bits is ignored and unused. */ struct xen_memory_reservation in; /* * [IN/OUT] Details of new memory extents. * We require that: * 1. @in.domid == @out.domid * 2. @in.nr_extents << @in.extent_order == * @out.nr_extents << @out.extent_order * 3. @in.extent_start and @out.extent_start lists must not overlap * 4. @out.extent_start lists GPFN bases to be populated * 5. @out.extent_start is overwritten with allocated GMFN bases */ struct xen_memory_reservation out; /* * [OUT] Number of input extents that were successfully exchanged: * 1. The first @nr_exchanged input extents were successfully * deallocated. * 2. The corresponding first entries in the output extent list correctly * indicate the GMFNs that were successfully exchanged. * 3. All other input and output extents are untouched. * 4. If not all input exents are exchanged then the return code of this * command will be non-zero. * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! */ xen_ulong_t nr_exchanged; }; typedef struct xen_memory_exchange xen_memory_exchange_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); /* * Returns the maximum machine frame number of mapped RAM in this system. * This command always succeeds (it never returns an error code). * arg == NULL. */ #define XENMEM_maximum_ram_page 2 /* * Returns the current or maximum memory reservation, in pages, of the * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. * arg == addr of domid_t. */ #define XENMEM_current_reservation 3 #define XENMEM_maximum_reservation 4 /* * Returns the maximum GPFN in use by the guest, or -ve errcode on failure. */ #define XENMEM_maximum_gpfn 14 /* * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys * mapping table. Architectures which do not have a m2p table do not implement * this command. * arg == addr of xen_machphys_mfn_list_t. */ #define XENMEM_machphys_mfn_list 5 struct xen_machphys_mfn_list { /* * Size of the 'extent_start' array. Fewer entries will be filled if the * machphys table is smaller than max_extents * 2MB. */ unsigned int max_extents; /* * Pointer to buffer to fill with list of extent starts. If there are * any large discontiguities in the machine address space, 2MB gaps in * the machphys table will be represented by an MFN base of zero. */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* * Number of extents written to the above array. This will be smaller * than 'max_extents' if the machphys table is smaller than max_e * 2MB. */ unsigned int nr_extents; }; typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); /* * Returns the location in virtual address space of the machine_to_phys * mapping table. Architectures which do not have a m2p table, or which do not * map it by default into guest address space, do not implement this command. * arg == addr of xen_machphys_mapping_t. */ #define XENMEM_machphys_mapping 12 struct xen_machphys_mapping { xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ }; typedef struct xen_machphys_mapping xen_machphys_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); /* * Sets the GPFN at which a particular page appears in the specified guest's * pseudophysical address space. * arg == addr of xen_add_to_physmap_t. */ #define XENMEM_add_to_physmap 7 struct xen_add_to_physmap { /* Which domain to change the mapping for. */ domid_t domid; /* Source mapping space. */ #define XENMAPSPACE_shared_info 0 /* shared info page */ #define XENMAPSPACE_grant_table 1 /* grant table page */ unsigned int space; /* Index into source mapping space. */ xen_ulong_t idx; /* GPFN where the source mapping page should appear. */ xen_pfn_t gpfn; }; typedef struct xen_add_to_physmap xen_add_to_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); /* * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error * code on failure. This call only works for auto-translated guests. */ #define XENMEM_translate_gpfn_list 8 struct xen_translate_gpfn_list { /* Which domain to translate for? */ domid_t domid; /* Length of list. */ xen_ulong_t nr_gpfns; /* List of GPFNs to translate. */ XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list; /* * Output list to contain MFN translations. May be the same as the input * list (in which case each input GPFN is overwritten with the output MFN). */ XEN_GUEST_HANDLE(xen_pfn_t) mfn_list; }; typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t); /* * Returns the pseudo-physical memory map as it was when the domain * was started (specified by XENMEM_set_memory_map). * arg == addr of xen_memory_map_t. */ #define XENMEM_memory_map 9 struct xen_memory_map { /* * On call the number of entries which can be stored in buffer. On * return the number of entries which have been stored in * buffer. */ unsigned int nr_entries; /* * Entries in the buffer are in the same format as returned by the * BIOS INT 0x15 EAX=0xE820 call. */ XEN_GUEST_HANDLE(void) buffer; }; typedef struct xen_memory_map xen_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); /* * Returns the real physical memory map. Passes the same structure as * XENMEM_memory_map. * arg == addr of xen_memory_map_t. */ #define XENMEM_machine_memory_map 10 /* * Set the pseudo-physical memory map of a domain, as returned by * XENMEM_memory_map. * arg == addr of xen_foreign_memory_map_t. */ #define XENMEM_set_memory_map 13 struct xen_foreign_memory_map { domid_t domid; struct xen_memory_map map; }; typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); #endif /* __XEN_PUBLIC_MEMORY_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/nmi.h000066400000000000000000000052641314037446600251060ustar00rootroot00000000000000/****************************************************************************** * nmi.h * * NMI callback registration and reason codes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_NMI_H__ #define __XEN_PUBLIC_NMI_H__ /* * NMI reason codes: * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. */ /* I/O-check error reported via ISA port 0x61, bit 6. */ #define _XEN_NMIREASON_io_error 0 #define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) /* Parity error reported via ISA port 0x61, bit 7. */ #define _XEN_NMIREASON_parity_error 1 #define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error) /* Unknown hardware-generated NMI. */ #define _XEN_NMIREASON_unknown 2 #define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) /* * long nmi_op(unsigned int cmd, void *arg) * NB. All ops return zero on success, else a negative error code. */ /* * Register NMI callback for this (calling) VCPU. Currently this only makes * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. * arg == pointer to xennmi_callback structure. */ #define XENNMI_register_callback 0 struct xennmi_callback { unsigned long handler_address; unsigned long pad; }; typedef struct xennmi_callback xennmi_callback_t; DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t); /* * Deregister NMI callback for this (calling) VCPU. * arg == NULL. */ #define XENNMI_unregister_callback 1 #endif /* __XEN_PUBLIC_NMI_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/physdev.h000066400000000000000000000125411314037446600260010ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_PHYSDEV_H__ #define __XEN_PUBLIC_PHYSDEV_H__ /* * Prototype for this hypercall is: * int physdev_op(int cmd, void *args) * @cmd == PHYSDEVOP_??? (physdev operation). * @args == Operation-specific extra arguments (NULL if none). */ /* * Notify end-of-interrupt (EOI) for the specified IRQ. * @arg == pointer to physdev_eoi structure. */ #define PHYSDEVOP_eoi 12 struct physdev_eoi { /* IN */ uint32_t irq; }; typedef struct physdev_eoi physdev_eoi_t; DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t); /* * Query the status of an IRQ line. * @arg == pointer to physdev_irq_status_query structure. */ #define PHYSDEVOP_irq_status_query 5 struct physdev_irq_status_query { /* IN */ uint32_t irq; /* OUT */ uint32_t flags; /* XENIRQSTAT_* */ }; typedef struct physdev_irq_status_query physdev_irq_status_query_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t); /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ #define _XENIRQSTAT_needs_eoi (0) #define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) /* IRQ shared by multiple guests? */ #define _XENIRQSTAT_shared (1) #define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) /* * Set the current VCPU's I/O privilege level. * @arg == pointer to physdev_set_iopl structure. */ #define PHYSDEVOP_set_iopl 6 struct physdev_set_iopl { /* IN */ uint32_t iopl; }; typedef struct physdev_set_iopl physdev_set_iopl_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t); /* * Set the current VCPU's I/O-port permissions bitmap. * @arg == pointer to physdev_set_iobitmap structure. */ #define PHYSDEVOP_set_iobitmap 7 struct physdev_set_iobitmap { /* IN */ XEN_GUEST_HANDLE_00030205(uint8) bitmap; uint32_t nr_ports; }; typedef struct physdev_set_iobitmap physdev_set_iobitmap_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t); /* * Read or write an IO-APIC register. * @arg == pointer to physdev_apic structure. */ #define PHYSDEVOP_apic_read 8 #define PHYSDEVOP_apic_write 9 struct physdev_apic { /* IN */ unsigned long apic_physbase; uint32_t reg; /* IN or OUT */ uint32_t value; }; typedef struct physdev_apic physdev_apic_t; DEFINE_XEN_GUEST_HANDLE(physdev_apic_t); /* * Allocate or free a physical upcall vector for the specified IRQ line. * @arg == pointer to physdev_irq structure. */ #define PHYSDEVOP_alloc_irq_vector 10 #define PHYSDEVOP_free_irq_vector 11 struct physdev_irq { /* IN */ uint32_t irq; /* IN or OUT */ uint32_t vector; }; typedef struct physdev_irq physdev_irq_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_t); /* * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() * hypercall since 0x00030202. */ struct physdev_op { uint32_t cmd; union { struct physdev_irq_status_query irq_status_query; struct physdev_set_iopl set_iopl; struct physdev_set_iobitmap set_iobitmap; struct physdev_apic apic_op; struct physdev_irq irq_op; } u; }; typedef struct physdev_op physdev_op_t; DEFINE_XEN_GUEST_HANDLE(physdev_op_t); /* * Notify that some PIRQ-bound event channels have been unmasked. * ** This command is obsolete since interface version 0x00030202 and is ** * ** unsupported by newer versions of Xen. ** */ #define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 /* * These all-capitals physdev operation names are superceded by the new names * (defined above) since interface version 0x00030202. */ #define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query #define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl #define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap #define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read #define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write #define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector #define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi #define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared #endif /* __XEN_PUBLIC_PHYSDEV_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/platform.h000066400000000000000000000207651314037446600261520ustar00rootroot00000000000000/****************************************************************************** * platform.h * * Hardware platform operations. Intended for use by domain-0 kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_PLATFORM_H__ #define __XEN_PUBLIC_PLATFORM_H__ #include "xen.h" #define XENPF_INTERFACE_VERSION 0x03000001 /* * Set clock such that it would read after 00:00:00 UTC, * 1 January, 1970 if the current system time was . */ #define XENPF_settime 17 struct xenpf_settime { /* IN variables. */ uint32_t secs; uint32_t nsecs; uint64_t system_time; }; typedef struct xenpf_settime xenpf_settime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t); /* * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type. * On x86, @type is an architecture-defined MTRR memory type. * On success, returns the MTRR that was used (@reg) and a handle that can * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting. * (x86-specific). */ #define XENPF_add_memtype 31 struct xenpf_add_memtype { /* IN variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; /* OUT variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_add_memtype xenpf_add_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t); /* * Tear down an existing memory-range type. If @handle is remembered then it * should be passed in to accurately tear down the correct setting (in case * of overlapping memory regions with differing types). If it is not known * then @handle should be set to zero. In all cases @reg must be set. * (x86-specific). */ #define XENPF_del_memtype 32 struct xenpf_del_memtype { /* IN variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_del_memtype xenpf_del_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t); /* Read current type of an MTRR (x86-specific). */ #define XENPF_read_memtype 33 struct xenpf_read_memtype { /* IN variables. */ uint32_t reg; /* OUT variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; }; typedef struct xenpf_read_memtype xenpf_read_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t); #define XENPF_microcode_update 35 struct xenpf_microcode_update { /* IN variables. */ XEN_GUEST_HANDLE(void) data; /* Pointer to microcode data */ uint32_t length; /* Length of microcode data. */ }; typedef struct xenpf_microcode_update xenpf_microcode_update_t; DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t); #define XENPF_platform_quirk 39 #define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */ #define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */ #define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */ struct xenpf_platform_quirk { /* IN variables. */ uint32_t quirk_id; }; typedef struct xenpf_platform_quirk xenpf_platform_quirk_t; DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t); #define XENPF_firmware_info 50 #define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */ #define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */ #define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */ struct xenpf_firmware_info { /* IN variables. */ uint32_t type; uint32_t index; /* OUT variables. */ union { struct { /* Int13, Fn48: Check Extensions Present. */ uint8_t device; /* %dl: bios device number */ uint8_t version; /* %ah: major version */ uint16_t interface_support; /* %cx: support bitmap */ /* Int13, Fn08: Legacy Get Device Parameters. */ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */ uint8_t legacy_max_head; /* %dh: max head # */ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */ /* NB. First uint16_t of buffer must be set to buffer size. */ XEN_GUEST_HANDLE(void) edd_params; } disk_info; /* XEN_FW_DISK_INFO */ struct { uint8_t device; /* bios device number */ uint32_t mbr_signature; /* offset 0x1b8 in mbr */ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */ struct { /* Int10, AX=4F15: Get EDID info. */ uint8_t capabilities; uint8_t edid_transfer_time; /* must refer to 128-byte buffer */ XEN_GUEST_HANDLE(uint8) edid; } vbeddc_info; /* XEN_FW_VBEDDC_INFO */ } u; }; typedef struct xenpf_firmware_info xenpf_firmware_info_t; DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t); #define XENPF_enter_acpi_sleep 51 struct xenpf_enter_acpi_sleep { /* IN variables */ uint16_t pm1a_cnt_val; /* PM1a control value. */ uint16_t pm1b_cnt_val; /* PM1b control value. */ uint32_t sleep_state; /* Which state to enter (Sn). */ uint32_t flags; /* Must be zero. */ }; typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t; DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t); #define XENPF_change_freq 52 struct xenpf_change_freq { /* IN variables */ uint32_t flags; /* Must be zero. */ uint32_t cpu; /* Physical cpu. */ uint64_t freq; /* New frequency (Hz). */ }; typedef struct xenpf_change_freq xenpf_change_freq_t; DEFINE_XEN_GUEST_HANDLE(xenpf_change_freq_t); /* * Get idle times (nanoseconds since boot) for physical CPUs specified in the * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is * indexed by CPU number; only entries with the corresponding @cpumap_bitmap * bit set are written to. On return, @cpumap_bitmap is modified so that any * non-existent CPUs are cleared. Such CPUs have their @idletime array entry * cleared. */ #define XENPF_getidletime 53 struct xenpf_getidletime { /* IN/OUT variables */ /* IN: CPUs to interrogate; OUT: subset of IN which are present */ XEN_GUEST_HANDLE(uint8) cpumap_bitmap; /* IN variables */ /* Size of cpumap bitmap. */ uint32_t cpumap_nr_cpus; /* Must be indexable for every cpu in cpumap_bitmap. */ XEN_GUEST_HANDLE(uint64) idletime; /* OUT variables */ /* System time when the idletime snapshots were taken. */ uint64_t now; }; typedef struct xenpf_getidletime xenpf_getidletime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t); struct xen_platform_op { uint32_t cmd; uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ union { struct xenpf_settime settime; struct xenpf_add_memtype add_memtype; struct xenpf_del_memtype del_memtype; struct xenpf_read_memtype read_memtype; struct xenpf_microcode_update microcode; struct xenpf_platform_quirk platform_quirk; struct xenpf_firmware_info firmware_info; struct xenpf_enter_acpi_sleep enter_acpi_sleep; struct xenpf_change_freq change_freq; struct xenpf_getidletime getidletime; uint8_t pad[128]; } u; }; typedef struct xen_platform_op xen_platform_op_t; DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t); #endif /* __XEN_PUBLIC_PLATFORM_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/sched.h000066400000000000000000000104321314037446600254020ustar00rootroot00000000000000/****************************************************************************** * sched.h * * Scheduler state interactions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_SCHED_H__ #define __XEN_PUBLIC_SCHED_H__ #include "event_channel.h" /* * The prototype for this hypercall is: * long sched_op(int cmd, void *arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == Operation-specific extra argument(s), as described below. * * Versions of Xen prior to 3.0.2 provided only the following legacy version * of this hypercall, supporting only the commands yield, block and shutdown: * long sched_op(int cmd, unsigned long arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) * == SHUTDOWN_* code (SCHEDOP_shutdown) * This legacy version is available to new guests as sched_op_compat(). */ /* * Voluntarily yield the CPU. * @arg == NULL. */ #define SCHEDOP_yield 0 /* * Block execution of this VCPU until an event is received for processing. * If called with event upcalls masked, this operation will atomically * reenable event delivery and check for pending events before blocking the * VCPU. This avoids a "wakeup waiting" race. * @arg == NULL. */ #define SCHEDOP_block 1 /* * Halt execution of this domain (all VCPUs) and notify the system controller. * @arg == pointer to sched_shutdown structure. */ #define SCHEDOP_shutdown 2 struct sched_shutdown { unsigned int reason; /* SHUTDOWN_* */ }; typedef struct sched_shutdown sched_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t); /* * Poll a set of event-channel ports. Return when one or more are pending. An * optional timeout may be specified. * @arg == pointer to sched_poll structure. */ #define SCHEDOP_poll 3 struct sched_poll { XEN_GUEST_HANDLE(evtchn_port_t) ports; unsigned int nr_ports; uint64_t timeout; }; typedef struct sched_poll sched_poll_t; DEFINE_XEN_GUEST_HANDLE(sched_poll_t); /* * Declare a shutdown for another domain. The main use of this function is * in interpreting shutdown requests and reasons for fully-virtualized * domains. A para-virtualized domain may use SCHEDOP_shutdown directly. * @arg == pointer to sched_remote_shutdown structure. */ #define SCHEDOP_remote_shutdown 4 struct sched_remote_shutdown { domid_t domain_id; /* Remote domain ID */ unsigned int reason; /* SHUTDOWN_xxx reason */ }; typedef struct sched_remote_shutdown sched_remote_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t); /* * Reason codes for SCHEDOP_shutdown. These may be interpreted by control * software to determine the appropriate action. For the most part, Xen does * not care about the shutdown code. */ #define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #endif /* __XEN_PUBLIC_SCHED_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/sysctl.h000066400000000000000000000204741314037446600256440ustar00rootroot00000000000000/****************************************************************************** * sysctl.h * * System management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_SYSCTL_H__ #define __XEN_PUBLIC_SYSCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "sysctl operations are intended for use by node control tools only" #endif #include "xen.h" #include "domctl.h" #define XEN_SYSCTL_INTERFACE_VERSION 0x00000006 /* * Read console content from Xen buffer ring. */ #define XEN_SYSCTL_readconsole 1 struct xen_sysctl_readconsole { /* IN: Non-zero -> clear after reading. */ uint8_t clear; /* IN: Non-zero -> start index specified by @index field. */ uint8_t incremental; uint8_t pad0, pad1; /* * IN: Start index for consuming from ring buffer (if @incremental); * OUT: End index after consuming from ring buffer. */ uint32_t index; /* IN: Virtual address to write console data. */ XEN_GUEST_HANDLE_64(char) buffer; /* IN: Size of buffer; OUT: Bytes written to buffer. */ uint32_t count; }; typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t); /* Get trace buffers machine base address */ #define XEN_SYSCTL_tbuf_op 2 struct xen_sysctl_tbuf_op { /* IN variables */ #define XEN_SYSCTL_TBUFOP_get_info 0 #define XEN_SYSCTL_TBUFOP_set_cpu_mask 1 #define XEN_SYSCTL_TBUFOP_set_evt_mask 2 #define XEN_SYSCTL_TBUFOP_set_size 3 #define XEN_SYSCTL_TBUFOP_enable 4 #define XEN_SYSCTL_TBUFOP_disable 5 uint32_t cmd; /* IN/OUT variables */ struct xenctl_cpumap cpu_mask; uint32_t evt_mask; /* OUT variables */ uint64_aligned_t buffer_mfn; uint32_t size; }; typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t); /* * Get physical information about the host machine */ #define XEN_SYSCTL_physinfo 3 struct xen_sysctl_physinfo { /* IN variables. */ uint32_t threads_per_core; uint32_t cores_per_socket; uint32_t nr_cpus; uint32_t nr_nodes; uint32_t cpu_khz; uint64_aligned_t total_pages; uint64_aligned_t free_pages; uint64_aligned_t scrub_pages; uint32_t hw_cap[8]; /* IN/OUT variables. */ /* * IN: maximum addressable entry in the caller-provided cpu_to_node array. * OUT: largest cpu identifier in the system. * If OUT is greater than IN then the cpu_to_node array is truncated! */ uint32_t max_cpu_id; /* * If not NULL, this array is filled with node identifier for each cpu. * If a cpu has no node information (e.g., cpu not present) then the * sentinel value ~0u is written. * The size of this array is specified by the caller in @max_cpu_id. * If the actual @max_cpu_id is smaller than the array then the trailing * elements of the array will not be written by the sysctl. */ XEN_GUEST_HANDLE_64(uint32) cpu_to_node; }; typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t); /* * Get the ID of the current scheduler. */ #define XEN_SYSCTL_sched_id 4 struct xen_sysctl_sched_id { /* OUT variable */ uint32_t sched_id; }; typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t); /* Interface for controlling Xen software performance counters. */ #define XEN_SYSCTL_perfc_op 5 /* Sub-operations: */ #define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */ #define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */ struct xen_sysctl_perfc_desc { char name[80]; /* name of perf counter */ uint32_t nr_vals; /* number of values for this counter */ }; typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t); typedef uint32_t xen_sysctl_perfc_val_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t); struct xen_sysctl_perfc_op { /* IN variables. */ uint32_t cmd; /* XEN_SYSCTL_PERFCOP_??? */ /* OUT variables. */ uint32_t nr_counters; /* number of counters description */ uint32_t nr_vals; /* number of values */ /* counter information (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc; /* counter values (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val; }; typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t); #define XEN_SYSCTL_getdomaininfolist 6 struct xen_sysctl_getdomaininfolist { /* IN variables. */ domid_t first_domain; uint32_t max_domains; XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer; /* OUT variables. */ uint32_t num_domains; }; typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t); /* Inject debug keys into Xen. */ #define XEN_SYSCTL_debug_keys 7 struct xen_sysctl_debug_keys { /* IN variables. */ XEN_GUEST_HANDLE_64(char) keys; uint32_t nr_keys; }; typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t); /* Get physical CPU information. */ #define XEN_SYSCTL_getcpuinfo 8 struct xen_sysctl_cpuinfo { uint64_aligned_t idletime; }; typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t); struct xen_sysctl_getcpuinfo { /* IN variables. */ uint32_t max_cpus; XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info; /* OUT variables. */ uint32_t nr_cpus; }; typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t); #define XEN_SYSCTL_availheap 9 struct xen_sysctl_availheap { /* IN variables. */ uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */ uint32_t max_bitwidth; /* Largest address width (zero if don't care). */ int32_t node; /* NUMA node of interest (-1 for all nodes). */ /* OUT variables. */ uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */ }; typedef struct xen_sysctl_availheap xen_sysctl_availheap_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t); struct xen_sysctl { uint32_t cmd; uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */ union { struct xen_sysctl_readconsole readconsole; struct xen_sysctl_tbuf_op tbuf_op; struct xen_sysctl_physinfo physinfo; struct xen_sysctl_sched_id sched_id; struct xen_sysctl_perfc_op perfc_op; struct xen_sysctl_getdomaininfolist getdomaininfolist; struct xen_sysctl_debug_keys debug_keys; struct xen_sysctl_getcpuinfo getcpuinfo; struct xen_sysctl_availheap availheap; uint8_t pad[128]; } u; }; typedef struct xen_sysctl xen_sysctl_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t); #endif /* __XEN_PUBLIC_SYSCTL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/trace.h000066400000000000000000000152611314037446600254170ustar00rootroot00000000000000/****************************************************************************** * include/public/trace.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Mark Williamson, (C) 2004 Intel Research Cambridge * Copyright (C) 2005 Bin Ren */ #ifndef __XEN_PUBLIC_TRACE_H__ #define __XEN_PUBLIC_TRACE_H__ #define TRACE_EXTRA_MAX 7 #define TRACE_EXTRA_SHIFT 28 /* Trace classes */ #define TRC_CLS_SHIFT 16 #define TRC_GEN 0x0001f000 /* General trace */ #define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */ #define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */ #define TRC_HVM 0x0008f000 /* Xen HVM trace */ #define TRC_MEM 0x0010f000 /* Xen memory trace */ #define TRC_PV 0x0020f000 /* Xen PV traces */ #define TRC_ALL 0x0ffff000 #define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff) #define TRC_HD_CYCLE_FLAG (1UL<<31) #define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) ) #define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX) /* Trace subclasses */ #define TRC_SUBCLS_SHIFT 12 /* trace subclasses for SVM */ #define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */ #define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */ /* Trace events per class */ #define TRC_LOST_RECORDS (TRC_GEN + 1) #define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2) #define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3) #define TRC_SCHED_DOM_ADD (TRC_SCHED + 1) #define TRC_SCHED_DOM_REM (TRC_SCHED + 2) #define TRC_SCHED_SLEEP (TRC_SCHED + 3) #define TRC_SCHED_WAKE (TRC_SCHED + 4) #define TRC_SCHED_YIELD (TRC_SCHED + 5) #define TRC_SCHED_BLOCK (TRC_SCHED + 6) #define TRC_SCHED_SHUTDOWN (TRC_SCHED + 7) #define TRC_SCHED_CTL (TRC_SCHED + 8) #define TRC_SCHED_ADJDOM (TRC_SCHED + 9) #define TRC_SCHED_SWITCH (TRC_SCHED + 10) #define TRC_SCHED_S_TIMER_FN (TRC_SCHED + 11) #define TRC_SCHED_T_TIMER_FN (TRC_SCHED + 12) #define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED + 13) #define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED + 14) #define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED + 15) #define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1) #define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2) #define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3) #define TRC_PV_HYPERCALL (TRC_PV + 1) #define TRC_PV_TRAP (TRC_PV + 3) #define TRC_PV_PAGE_FAULT (TRC_PV + 4) #define TRC_PV_FORCED_INVALID_OP (TRC_PV + 5) #define TRC_PV_EMULATE_PRIVOP (TRC_PV + 6) #define TRC_PV_EMULATE_4GB (TRC_PV + 7) #define TRC_PV_MATH_STATE_RESTORE (TRC_PV + 8) #define TRC_PV_PAGING_FIXUP (TRC_PV + 9) #define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV + 10) #define TRC_PV_PTWR_EMULATION (TRC_PV + 11) #define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12) /* Indicates that addresses in trace record are 64 bits */ #define TRC_PV_64_FLAG (0x100) /* trace events per subclass */ #define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01) #define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02) #define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + 0x03) #define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01) #define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02) #define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03) #define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04) #define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05) #define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06) #define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07) #define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08) #define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09) #define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A) #define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B) #define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C) #define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D) #define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E) #define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F) #define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10) #define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11) #define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12) #define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13) #define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14) #define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15) #define TRC_HVM_IO_ASSIST (TRC_HVM_HANDLER + 0x16) #define TRC_HVM_MMIO_ASSIST (TRC_HVM_HANDLER + 0x17) #define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18) #define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19) #define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + 0x20) /* This structure represents a single trace buffer record. */ struct t_rec { uint32_t event:28; uint32_t extra_u32:3; /* # entries in trailing extra_u32[] array */ uint32_t cycles_included:1; /* u.cycles or u.no_cycles? */ union { struct { uint32_t cycles_lo, cycles_hi; /* cycle counter timestamp */ uint32_t extra_u32[7]; /* event data items */ } cycles; struct { uint32_t extra_u32[7]; /* event data items */ } nocycles; } u; }; /* * This structure contains the metadata for a single trace buffer. The head * field, indexes into an array of struct t_rec's. */ struct t_buf { uint32_t cons; /* Offset of next item to be consumed by control tools. */ uint32_t prod; /* Offset of next item to be produced by Xen. */ /* Records follow immediately after the meta-data header. */ }; #endif /* __XEN_PUBLIC_TRACE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/vcpu.h000066400000000000000000000166101314037446600252750ustar00rootroot00000000000000/****************************************************************************** * vcpu.h * * VCPU initialisation, query, and hotplug. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VCPU_H__ #define __XEN_PUBLIC_VCPU_H__ /* * Prototype for this hypercall is: * int vcpu_op(int cmd, int vcpuid, void *extra_args) * @cmd == VCPUOP_??? (VCPU operation). * @vcpuid == VCPU to operate on. * @extra_args == Operation-specific extra arguments (NULL if none). */ /* * Initialise a VCPU. Each VCPU can be initialised only once. A * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. * * @extra_arg == pointer to vcpu_guest_context structure containing initial * state for the VCPU. */ #define VCPUOP_initialise 0 /* * Bring up a VCPU. This makes the VCPU runnable. This operation will fail * if the VCPU has not been initialised (VCPUOP_initialise). */ #define VCPUOP_up 1 /* * Bring down a VCPU (i.e., make it non-runnable). * There are a few caveats that callers should observe: * 1. This operation may return, and VCPU_is_up may return false, before the * VCPU stops running (i.e., the command is asynchronous). It is a good * idea to ensure that the VCPU has entered a non-critical loop before * bringing it down. Alternatively, this operation is guaranteed * synchronous if invoked by the VCPU itself. * 2. After a VCPU is initialised, there is currently no way to drop all its * references to domain memory. Even a VCPU that is down still holds * memory references via its pagetable base pointer and GDT. It is good * practise to move a VCPU onto an 'idle' or default page table, LDT and * GDT before bringing it down. */ #define VCPUOP_down 2 /* Returns 1 if the given VCPU is up. */ #define VCPUOP_is_up 3 /* * Return information about the state and running time of a VCPU. * @extra_arg == pointer to vcpu_runstate_info structure. */ #define VCPUOP_get_runstate_info 4 struct vcpu_runstate_info { /* VCPU's current state (RUNSTATE_*). */ int state; /* When was current state entered (system time, ns)? */ uint64_t state_entry_time; /* * Time spent in each RUNSTATE_* (ns). The sum of these times is * guaranteed not to drift from system time. */ uint64_t time[4]; }; typedef struct vcpu_runstate_info vcpu_runstate_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t); /* VCPU is currently running on a physical CPU. */ #define RUNSTATE_running 0 /* VCPU is runnable, but not currently scheduled on any physical CPU. */ #define RUNSTATE_runnable 1 /* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ #define RUNSTATE_blocked 2 /* * VCPU is not runnable, but it is not blocked. * This is a 'catch all' state for things like hotplug and pauses by the * system administrator (or for critical sections in the hypervisor). * RUNSTATE_blocked dominates this state (it is the preferred state). */ #define RUNSTATE_offline 3 /* * Register a shared memory area from which the guest may obtain its own * runstate information without needing to execute a hypercall. * Notes: * 1. The registered address may be virtual or physical or guest handle, * depending on the platform. Virtual address or guest handle should be * registered on x86 systems. * 2. Only one shared area may be registered per VCPU. The shared area is * updated by the hypervisor each time the VCPU is scheduled. Thus * runstate.state will always be RUNSTATE_running and * runstate.state_entry_time will indicate the system time at which the * VCPU was last scheduled to run. * @extra_arg == pointer to vcpu_register_runstate_memory_area structure. */ #define VCPUOP_register_runstate_memory_area 5 struct vcpu_register_runstate_memory_area { union { XEN_GUEST_HANDLE(vcpu_runstate_info_t) h; struct vcpu_runstate_info *v; uint64_t p; } addr; }; typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t); /* * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer * which can be set via these commands. Periods smaller than one millisecond * may not be supported. */ #define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ #define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ struct vcpu_set_periodic_timer { uint64_t period_ns; }; typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t); /* * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot * timer which can be set via these commands. */ #define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */ struct vcpu_set_singleshot_timer { uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */ uint32_t flags; /* VCPU_SSHOTTMR_??? */ }; typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t); /* Flags to VCPUOP_set_singleshot_timer. */ /* Require the timeout to be in the future (return -ETIME if it's passed). */ #define _VCPU_SSHOTTMR_future (0) #define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future) /* * Register a memory location in the guest address space for the * vcpu_info structure. This allows the guest to place the vcpu_info * structure in a convenient place, such as in a per-cpu data area. * The pointer need not be page aligned, but the structure must not * cross a page boundary. * * This may be called only once per vcpu. */ #define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */ struct vcpu_register_vcpu_info { uint64_t mfn; /* mfn of page to place vcpu_info */ uint32_t offset; /* offset within page */ uint32_t rsvd; /* unused */ }; typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t); /* Send an NMI to the specified VCPU. @extra_arg == NULL. */ #define VCPUOP_send_nmi 11 #endif /* __XEN_PUBLIC_VCPU_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/version.h000066400000000000000000000057531314037446600260130ustar00rootroot00000000000000/****************************************************************************** * version.h * * Xen version, type, and compile information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Nguyen Anh Quynh * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VERSION_H__ #define __XEN_PUBLIC_VERSION_H__ /* NB. All ops return zero on success, except XENVER_{version,pagesize} */ /* arg == NULL; returns major:minor (16:16). */ #define XENVER_version 0 /* arg == xen_extraversion_t. */ #define XENVER_extraversion 1 typedef char xen_extraversion_t[16]; #define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t)) /* arg == xen_compile_info_t. */ #define XENVER_compile_info 2 struct xen_compile_info { char compiler[64]; char compile_by[16]; char compile_domain[32]; char compile_date[32]; }; typedef struct xen_compile_info xen_compile_info_t; #define XENVER_capabilities 3 typedef char xen_capabilities_info_t[1024]; #define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t)) #define XENVER_changeset 4 typedef char xen_changeset_info_t[64]; #define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t)) #define XENVER_platform_parameters 5 struct xen_platform_parameters { unsigned long virt_start; }; typedef struct xen_platform_parameters xen_platform_parameters_t; #define XENVER_get_features 6 struct xen_feature_info { unsigned int submap_idx; /* IN: which 32-bit submap to return */ uint32_t submap; /* OUT: 32-bit submap */ }; typedef struct xen_feature_info xen_feature_info_t; /* Declares the features reported by XENVER_get_features. */ #include "features.h" /* arg == NULL; returns host memory page size. */ #define XENVER_pagesize 7 /* arg == xen_domain_handle_t. */ #define XENVER_guest_handle 8 #endif /* __XEN_PUBLIC_VERSION_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/xen-compat.h000066400000000000000000000042031314037446600263660ustar00rootroot00000000000000/****************************************************************************** * xen-compat.h * * Guest OS interface to Xen. Compatibility layer. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Christian Limpach */ #ifndef __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_LATEST_INTERFACE_VERSION__ 0x00030207 #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Xen is built with matching headers and implements the latest interface. */ #define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__ #elif !defined(__XEN_INTERFACE_VERSION__) /* Guests which do not specify a version get the legacy interface. */ #define __XEN_INTERFACE_VERSION__ 0x00000000 #endif #if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__ #error "These header files do not support the requested interface version." #endif /* Fields defined as a Xen guest handle since 0x00030205. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 #define XEN_GUEST_HANDLE_00030205(type) XEN_GUEST_HANDLE(type) #else #define XEN_GUEST_HANDLE_00030205(type) type * #endif #endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/xen.h000066400000000000000000000606431314037446600251170ustar00rootroot00000000000000/****************************************************************************** * xen.h * * Guest OS interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_XEN_H__ #define __XEN_PUBLIC_XEN_H__ #include "xen-compat.h" #if defined(__i386__) || defined(__x86_64__) #include "arch-x86/xen.h" #elif defined(__ia64__) #include "arch-ia64.h" #elif defined(__powerpc__) #include "arch-powerpc.h" #else #error "Unsupported architecture" #endif #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ DEFINE_XEN_GUEST_HANDLE(char); __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); DEFINE_XEN_GUEST_HANDLE(int); __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); DEFINE_XEN_GUEST_HANDLE(long); __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); DEFINE_XEN_GUEST_HANDLE(void); DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #endif /* * HYPERCALLS */ #define __HYPERVISOR_set_trap_table 0 #define __HYPERVISOR_mmu_update 1 #define __HYPERVISOR_set_gdt 2 #define __HYPERVISOR_stack_switch 3 #define __HYPERVISOR_set_callbacks 4 #define __HYPERVISOR_fpu_taskswitch 5 #define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */ #define __HYPERVISOR_platform_op 7 #define __HYPERVISOR_set_debugreg 8 #define __HYPERVISOR_get_debugreg 9 #define __HYPERVISOR_update_descriptor 10 #define __HYPERVISOR_memory_op 12 #define __HYPERVISOR_multicall 13 #define __HYPERVISOR_update_va_mapping 14 #define __HYPERVISOR_set_timer_op 15 #define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */ #define __HYPERVISOR_xen_version 17 #define __HYPERVISOR_console_io 18 #define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */ #define __HYPERVISOR_grant_table_op 20 #define __HYPERVISOR_vm_assist 21 #define __HYPERVISOR_update_va_mapping_otherdomain 22 #define __HYPERVISOR_iret 23 /* x86 only */ #define __HYPERVISOR_vcpu_op 24 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ #define __HYPERVISOR_mmuext_op 26 #define __HYPERVISOR_xsm_op 27 #define __HYPERVISOR_nmi_op 28 #define __HYPERVISOR_sched_op 29 #define __HYPERVISOR_callback_op 30 #define __HYPERVISOR_xenoprof_op 31 #define __HYPERVISOR_event_channel_op 32 #define __HYPERVISOR_physdev_op 33 #define __HYPERVISOR_hvm_op 34 #define __HYPERVISOR_sysctl 35 #define __HYPERVISOR_domctl 36 #define __HYPERVISOR_kexec_op 37 /* Architecture-specific hypercall definitions. */ #define __HYPERVISOR_arch_0 48 #define __HYPERVISOR_arch_1 49 #define __HYPERVISOR_arch_2 50 #define __HYPERVISOR_arch_3 51 #define __HYPERVISOR_arch_4 52 #define __HYPERVISOR_arch_5 53 #define __HYPERVISOR_arch_6 54 #define __HYPERVISOR_arch_7 55 /* * HYPERCALL COMPATIBILITY. */ /* New sched_op hypercall introduced in 0x00030101. */ #if __XEN_INTERFACE_VERSION__ < 0x00030101 #undef __HYPERVISOR_sched_op #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat #endif /* New event-channel and physdev hypercalls introduced in 0x00030202. */ #if __XEN_INTERFACE_VERSION__ < 0x00030202 #undef __HYPERVISOR_event_channel_op #define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat #undef __HYPERVISOR_physdev_op #define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat #endif /* New platform_op hypercall introduced in 0x00030204. */ #if __XEN_INTERFACE_VERSION__ < 0x00030204 #define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op #endif /* * VIRTUAL INTERRUPTS * * Virtual interrupts that a guest OS may receive from Xen. * * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. * The latter can be allocated only once per guest: they must initially be * allocated to VCPU0 but can subsequently be re-bound. */ #define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ #define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ #define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ #define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ #define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ #define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ #define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ #define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ /* Architecture-specific VIRQ definitions. */ #define VIRQ_ARCH_0 16 #define VIRQ_ARCH_1 17 #define VIRQ_ARCH_2 18 #define VIRQ_ARCH_3 19 #define VIRQ_ARCH_4 20 #define VIRQ_ARCH_5 21 #define VIRQ_ARCH_6 22 #define VIRQ_ARCH_7 23 #define NR_VIRQS 24 /* * MMU-UPDATE REQUESTS * * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * ptr[1:0] specifies the appropriate MMU_* command. * * ptr[1:0] == MMU_NORMAL_PT_UPDATE: * Updates an entry in a page table. If updating an L1 table, and the new * table entry is valid/present, the mapped frame must belong to the FD, if * an FD has been specified. If attempting to map an I/O page then the * caller assumes the privilege of the FD. * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. * FD == DOMID_XEN: Map restricted areas of Xen's heap space. * ptr[:2] -- Machine address of the page-table entry to modify. * val -- Value to write. * * ptr[1:0] == MMU_MACHPHYS_UPDATE: * Updates an entry in the machine->pseudo-physical mapping table. * ptr[:2] -- Machine address within the frame whose mapping to modify. * The frame must belong to the FD, if one is specified. * val -- Value to write into the mapping entry. * * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed * with those in @val. */ #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ /* * MMU EXTENDED OPERATIONS * * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * * cmd: MMUEXT_(UN)PIN_*_TABLE * mfn: Machine frame number to be (un)pinned as a p.t. page. * The frame must belong to the FD, if one is specified. * * cmd: MMUEXT_NEW_BASEPTR * mfn: Machine frame number of new page-table base to install in MMU. * * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] * mfn: Machine frame number of new page-table base to install in MMU * when in user space. * * cmd: MMUEXT_TLB_FLUSH_LOCAL * No additional arguments. Flushes local TLB. * * cmd: MMUEXT_INVLPG_LOCAL * linear_addr: Linear address to be flushed from the local TLB. * * cmd: MMUEXT_TLB_FLUSH_MULTI * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_INVLPG_MULTI * linear_addr: Linear address to be flushed. * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_TLB_FLUSH_ALL * No additional arguments. Flushes all VCPUs' TLBs. * * cmd: MMUEXT_INVLPG_ALL * linear_addr: Linear address to be flushed from all VCPUs' TLBs. * * cmd: MMUEXT_FLUSH_CACHE * No additional arguments. Writes back and flushes cache contents. * * cmd: MMUEXT_SET_LDT * linear_addr: Linear address of LDT base (NB. must be page-aligned). * nr_ents: Number of entries in LDT. */ #define MMUEXT_PIN_L1_TABLE 0 #define MMUEXT_PIN_L2_TABLE 1 #define MMUEXT_PIN_L3_TABLE 2 #define MMUEXT_PIN_L4_TABLE 3 #define MMUEXT_UNPIN_TABLE 4 #define MMUEXT_NEW_BASEPTR 5 #define MMUEXT_TLB_FLUSH_LOCAL 6 #define MMUEXT_INVLPG_LOCAL 7 #define MMUEXT_TLB_FLUSH_MULTI 8 #define MMUEXT_INVLPG_MULTI 9 #define MMUEXT_TLB_FLUSH_ALL 10 #define MMUEXT_INVLPG_ALL 11 #define MMUEXT_FLUSH_CACHE 12 #define MMUEXT_SET_LDT 13 #define MMUEXT_NEW_USER_BASEPTR 15 #ifndef __ASSEMBLY__ struct mmuext_op { unsigned int cmd; union { /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */ xen_pfn_t mfn; /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ unsigned long linear_addr; } arg1; union { /* SET_LDT */ unsigned int nr_ents; /* TLB_FLUSH_MULTI, INVLPG_MULTI */ XEN_GUEST_HANDLE_00030205(void) vcpumask; } arg2; }; typedef struct mmuext_op mmuext_op_t; DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); #endif /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ #define UVMF_NONE (0UL<<0) /* No flushing at all. */ #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ #define UVMF_FLUSHTYPE_MASK (3UL<<0) #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ /* * Commands to HYPERVISOR_console_io(). */ #define CONSOLEIO_write 0 #define CONSOLEIO_read 1 /* * Commands to HYPERVISOR_vm_assist(). */ #define VMASST_CMD_enable 0 #define VMASST_CMD_disable 1 /* x86/32 guests: simulate full 4GB segment limits. */ #define VMASST_TYPE_4gb_segments 0 /* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ #define VMASST_TYPE_4gb_segments_notify 1 /* * x86 guests: support writes to bottom-level PTEs. * NB1. Page-directory entries cannot be written. * NB2. Guest must continue to remove all writable mappings of PTEs. */ #define VMASST_TYPE_writable_pagetables 2 /* x86/PAE guests: support PDPTs above 4GB. */ #define VMASST_TYPE_pae_extended_cr3 3 #define MAX_VMASST_TYPE 3 #ifndef __ASSEMBLY__ typedef uint16_t domid_t; /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ #define DOMID_FIRST_RESERVED (0x7FF0U) /* DOMID_SELF is used in certain contexts to refer to oneself. */ #define DOMID_SELF (0x7FF0U) /* * DOMID_IO is used to restrict page-table updates to mapping I/O memory. * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO * is useful to ensure that no mappings to the OS's own heap are accidentally * installed. (e.g., in Linux this could cause havoc as reference counts * aren't adjusted on the I/O-mapping code path). * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can * be specified by any calling domain. */ #define DOMID_IO (0x7FF1U) /* * DOMID_XEN is used to allow privileged domains to map restricted parts of * Xen's heap space (e.g., the machine_to_phys table). * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if * the caller is privileged. */ #define DOMID_XEN (0x7FF2U) /* * Send an array of these to HYPERVISOR_mmu_update(). * NB. The fields are natural pointer/address size for this architecture. */ struct mmu_update { uint64_t ptr; /* Machine address of PTE. */ uint64_t val; /* New contents of PTE. */ }; typedef struct mmu_update mmu_update_t; DEFINE_XEN_GUEST_HANDLE(mmu_update_t); /* * Send an array of these to HYPERVISOR_multicall(). * NB. The fields are natural register size for this architecture. */ struct multicall_entry { unsigned long op, result; unsigned long args[6]; }; typedef struct multicall_entry multicall_entry_t; DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); /* * Event channel endpoints per domain: * 1024 if a long is 32 bits; 4096 if a long is 64 bits. */ #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) struct vcpu_time_info { /* * Updates to the following values are preceded and followed by an * increment of 'version'. The guest can therefore detect updates by * looking for changes to 'version'. If the least-significant bit of * the version number is set then an update is in progress and the guest * must wait to read a consistent set of values. * The correct way to interact with the version number is similar to * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry. */ uint32_t version; uint32_t pad0; uint64_t tsc_timestamp; /* TSC at last update of time vals. */ uint64_t system_time; /* Time, in nanosecs, since boot. */ /* * Current system time: * system_time + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32) * CPU frequency (Hz): * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift */ uint32_t tsc_to_system_mul; int8_t tsc_shift; int8_t pad1[3]; }; /* 32 bytes */ typedef struct vcpu_time_info vcpu_time_info_t; struct vcpu_info { /* * 'evtchn_upcall_pending' is written non-zero by Xen to indicate * a pending notification for a particular VCPU. It is then cleared * by the guest OS /before/ checking for pending work, thus avoiding * a set-and-check race. Note that the mask is only accessed by Xen * on the CPU that is currently hosting the VCPU. This means that the * pending and mask flags can be updated by the guest without special * synchronisation (i.e., no need for the x86 LOCK prefix). * This may seem suboptimal because if the pending flag is set by * a different CPU then an IPI may be scheduled even when the mask * is set. However, note: * 1. The task of 'interrupt holdoff' is covered by the per-event- * channel mask bits. A 'noisy' event that is continually being * triggered can be masked at source at this very precise * granularity. * 2. The main purpose of the per-VCPU mask is therefore to restrict * reentrant execution: whether for concurrency control, or to * prevent unbounded stack usage. Whatever the purpose, we expect * that the mask will be asserted only for short periods at a time, * and so the likelihood of a 'spurious' IPI is suitably small. * The mask is read before making an event upcall to the guest: a * non-zero mask therefore guarantees that the VCPU will not receive * an upcall activation. The mask is cleared when the VCPU requests * to block: this avoids wakeup-waiting races. */ uint8_t evtchn_upcall_pending; uint8_t evtchn_upcall_mask; unsigned long evtchn_pending_sel; struct arch_vcpu_info arch; struct vcpu_time_info time; }; /* 64 bytes (x86) */ #ifndef __XEN__ typedef struct vcpu_info vcpu_info_t; #endif /* * Xen/kernel shared data -- pointer provided in start_info. * * This structure is defined to be both smaller than a page, and the * only data on the shared page, but may vary in actual size even within * compatible Xen versions; guests should not rely on the size * of this structure remaining constant. */ struct shared_info { struct vcpu_info vcpu_info[MAX_VIRT_CPUS]; /* * A domain can create "event channels" on which it can send and receive * asynchronous event notifications. There are three classes of event that * are delivered by this mechanism: * 1. Bi-directional inter- and intra-domain connections. Domains must * arrange out-of-band to set up a connection (usually by allocating * an unbound 'listener' port and avertising that via a storage service * such as xenstore). * 2. Physical interrupts. A domain with suitable hardware-access * privileges can bind an event-channel port to a physical interrupt * source. * 3. Virtual interrupts ('events'). A domain can bind an event-channel * port to a virtual interrupt source, such as the virtual-timer * device or the emergency console. * * Event channels are addressed by a "port index". Each channel is * associated with two bits of information: * 1. PENDING -- notifies the domain that there is a pending notification * to be processed. This bit is cleared by the guest. * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING * will cause an asynchronous upcall to be scheduled. This bit is only * updated by the guest. It is read-only within Xen. If a channel * becomes pending while the channel is masked then the 'edge' is lost * (i.e., when the channel is unmasked, the guest must manually handle * pending notifications as no upcall will be scheduled by Xen). * * To expedite scanning of pending notifications, any 0->1 pending * transition on an unmasked channel causes a corresponding bit in a * per-vcpu selector word to be set. Each bit in the selector covers a * 'C long' in the PENDING bitfield array. */ unsigned long evtchn_pending[sizeof(unsigned long) * 8]; unsigned long evtchn_mask[sizeof(unsigned long) * 8]; /* * Wallclock time: updated only by control software. Guests should base * their gettimeofday() syscall on this wallclock-base value. */ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ struct arch_shared_info arch; }; #ifndef __XEN__ typedef struct shared_info shared_info_t; #endif /* * Start-of-day memory layout: * 1. The domain is started within contiguous virtual-memory region. * 2. The contiguous region ends on an aligned 4MB boundary. * 3. This the order of bootstrap elements in the initial virtual region: * a. relocated kernel image * b. initial ram disk [mod_start, mod_len] * c. list of allocated page frames [mfn_list, nr_pages] * d. start_info_t structure [register ESI (x86)] * e. bootstrap page tables [pt_base, CR3 (x86)] * f. bootstrap stack [register ESP (x86)] * 4. Bootstrap elements are packed together, but each is 4kB-aligned. * 5. The initial ram disk may be omitted. * 6. The list of page frames forms a contiguous 'pseudo-physical' memory * layout for the domain. In particular, the bootstrap virtual-memory * region is a 1:1 mapping to the first section of the pseudo-physical map. * 7. All bootstrap elements are mapped read-writable for the guest OS. The * only exception is the bootstrap page table, which is mapped read-only. * 8. There is guaranteed to be at least 512kB padding after the final * bootstrap element. If necessary, the bootstrap virtual region is * extended by an extra 4MB to ensure this. */ #define MAX_GUEST_CMDLINE 1024 struct start_info { /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ char magic[32]; /* "xen--". */ unsigned long nr_pages; /* Total pages allocated to this domain. */ unsigned long shared_info; /* MACHINE address of shared info struct. */ uint32_t flags; /* SIF_xxx flags. */ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ uint32_t store_evtchn; /* Event channel for store communication. */ union { struct { xen_pfn_t mfn; /* MACHINE page number of console page. */ uint32_t evtchn; /* Event channel for console page. */ } domU; struct { uint32_t info_off; /* Offset of console_info struct. */ uint32_t info_size; /* Size of console_info struct from start.*/ } dom0; } console; /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ unsigned long pt_base; /* VIRTUAL address of page directory. */ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ int8_t cmd_line[MAX_GUEST_CMDLINE]; }; typedef struct start_info start_info_t; /* New console union for dom0 introduced in 0x00030203. */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 #define console_mfn console.domU.mfn #define console_evtchn console.domU.evtchn #endif /* These flags are passed in the 'flags' field of start_info_t. */ #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ typedef struct dom0_vga_console_info { uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ #define XEN_VGATYPE_TEXT_MODE_3 0x03 #define XEN_VGATYPE_VESA_LFB 0x23 union { struct { /* Font height, in pixels. */ uint16_t font_height; /* Cursor location (column, row). */ uint16_t cursor_x, cursor_y; /* Number of rows and columns (dimensions in characters). */ uint16_t rows, columns; } text_mode_3; struct { /* Width and height, in pixels. */ uint16_t width, height; /* Bytes per scan line. */ uint16_t bytes_per_line; /* Bits per pixel. */ uint16_t bits_per_pixel; /* LFB physical address, and size (in units of 64kB). */ uint32_t lfb_base; uint32_t lfb_size; /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ uint8_t red_pos, red_size; uint8_t green_pos, green_size; uint8_t blue_pos, blue_size; uint8_t rsvd_pos, rsvd_size; #if __XEN_INTERFACE_VERSION__ >= 0x00030206 /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ uint32_t gbl_caps; /* Mode attributes (offset 0x0, VESA command 0x4f01). */ uint16_t mode_attrs; #endif } vesa_lfb; } u; } dom0_vga_console_info_t; #define xen_vga_console_info dom0_vga_console_info #define xen_vga_console_info_t dom0_vga_console_info_t typedef uint8_t xen_domain_handle_t[16]; /* Turn a plain number into a C unsigned long constant. */ #define __mk_unsigned_long(x) x ## UL #define mk_unsigned_long(x) __mk_unsigned_long(x) __DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t); __DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t); __DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t); __DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t); #else /* __ASSEMBLY__ */ /* In assembly code we cannot use C numeric constant suffixes. */ #define mk_unsigned_long(x) x #endif /* !__ASSEMBLY__ */ /* Default definitions for macros used by domctl/sysctl. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #ifndef uint64_aligned_t #define uint64_aligned_t uint64_t #endif #ifndef XEN_GUEST_HANDLE_64 #define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) #endif #endif #endif /* __XEN_PUBLIC_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/xencomm.h000066400000000000000000000032131314037446600257610ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) IBM Corp. 2006 */ #ifndef _XEN_XENCOMM_H_ #define _XEN_XENCOMM_H_ /* A xencomm descriptor is a scatter/gather list containing physical * addresses corresponding to a virtually contiguous memory area. The * hypervisor translates these physical addresses to machine addresses to copy * to and from the virtually contiguous area. */ #define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */ #define XENCOMM_INVALID (~0UL) struct xencomm_desc { uint32_t magic; uint32_t nr_addrs; /* the number of entries in address[] */ uint64_t address[0]; }; #endif /* _XEN_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/xenoprof.h000066400000000000000000000100311314037446600261470ustar00rootroot00000000000000/****************************************************************************** * xenoprof.h * * Interface for enabling system wide profiling based on hardware performance * counters * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Hewlett-Packard Co. * Written by Aravind Menon & Jose Renato Santos */ #ifndef __XEN_PUBLIC_XENOPROF_H__ #define __XEN_PUBLIC_XENOPROF_H__ #include "xen.h" /* * Commands to HYPERVISOR_xenoprof_op(). */ #define XENOPROF_init 0 #define XENOPROF_reset_active_list 1 #define XENOPROF_reset_passive_list 2 #define XENOPROF_set_active 3 #define XENOPROF_set_passive 4 #define XENOPROF_reserve_counters 5 #define XENOPROF_counter 6 #define XENOPROF_setup_events 7 #define XENOPROF_enable_virq 8 #define XENOPROF_start 9 #define XENOPROF_stop 10 #define XENOPROF_disable_virq 11 #define XENOPROF_release_counters 12 #define XENOPROF_shutdown 13 #define XENOPROF_get_buffer 14 #define XENOPROF_set_backtrace 15 #define XENOPROF_last_op 15 #define MAX_OPROF_EVENTS 32 #define MAX_OPROF_DOMAINS 25 #define XENOPROF_CPU_TYPE_SIZE 64 /* Xenoprof performance events (not Xen events) */ struct event_log { uint64_t eip; uint8_t mode; uint8_t event; }; /* PC value that indicates a special code */ #define XENOPROF_ESCAPE_CODE ~0UL /* Transient events for the xenoprof->oprofile cpu buf */ #define XENOPROF_TRACE_BEGIN 1 /* Xenoprof buffer shared between Xen and domain - 1 per VCPU */ struct xenoprof_buf { uint32_t event_head; uint32_t event_tail; uint32_t event_size; uint32_t vcpu_id; uint64_t xen_samples; uint64_t kernel_samples; uint64_t user_samples; uint64_t lost_samples; struct event_log event_log[1]; }; #ifndef __XEN__ typedef struct xenoprof_buf xenoprof_buf_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t); #endif struct xenoprof_init { int32_t num_events; int32_t is_primary; char cpu_type[XENOPROF_CPU_TYPE_SIZE]; }; typedef struct xenoprof_init xenoprof_init_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t); struct xenoprof_get_buffer { int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; }; typedef struct xenoprof_get_buffer xenoprof_get_buffer_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t); struct xenoprof_counter { uint32_t ind; uint64_t count; uint32_t enabled; uint32_t event; uint32_t hypervisor; uint32_t kernel; uint32_t user; uint64_t unit_mask; }; typedef struct xenoprof_counter xenoprof_counter_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t); typedef struct xenoprof_passive { uint16_t domain_id; int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; } xenoprof_passive_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t); #endif /* __XEN_PUBLIC_XENOPROF_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/xsm/000077500000000000000000000000001314037446600247525ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/xsm/acm.h000066400000000000000000000156641314037446600256770ustar00rootroot00000000000000/* * acm.h: Xen access control module interface defintions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005, International Business Machines Corporation. */ #ifndef _XEN_PUBLIC_ACM_H #define _XEN_PUBLIC_ACM_H #include "../xen.h" /* if ACM_DEBUG defined, all hooks should * print a short trace message (comment it out * when not in testing mode ) */ /* #define ACM_DEBUG */ #ifdef ACM_DEBUG # define printkd(fmt, args...) printk(fmt,## args) #else # define printkd(fmt, args...) #endif /* default ssid reference value if not supplied */ #define ACM_DEFAULT_SSID 0x0 #define ACM_DEFAULT_LOCAL_SSID 0x0 /* Internal ACM ERROR types */ #define ACM_OK 0 #define ACM_UNDEF -1 #define ACM_INIT_SSID_ERROR -2 #define ACM_INIT_SOID_ERROR -3 #define ACM_ERROR -4 /* External ACCESS DECISIONS */ #define ACM_ACCESS_PERMITTED 0 #define ACM_ACCESS_DENIED -111 #define ACM_NULL_POINTER_ERROR -200 /* Error codes reported in when trying to test for a new policy These error codes are reported in an array of tuples where each error code is followed by a parameter describing the error more closely, such as a domain id. */ #define ACM_EVTCHN_SHARING_VIOLATION 0x100 #define ACM_GNTTAB_SHARING_VIOLATION 0x101 #define ACM_DOMAIN_LOOKUP 0x102 #define ACM_CHWALL_CONFLICT 0x103 #define ACM_SSIDREF_IN_USE 0x104 /* primary policy in lower 4 bits */ #define ACM_NULL_POLICY 0 #define ACM_CHINESE_WALL_POLICY 1 #define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2 #define ACM_POLICY_UNDEFINED 15 /* combinations have secondary policy component in higher 4bit */ #define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY) /* policy: */ #define ACM_POLICY_NAME(X) \ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \ "UNDEFINED" /* the following policy versions must be increased * whenever the interpretation of the related * policy's data structure changes */ #define ACM_POLICY_VERSION 3 #define ACM_CHWALL_VERSION 1 #define ACM_STE_VERSION 1 /* defines a ssid reference used by xen */ typedef uint32_t ssidref_t; /* hooks that are known to domains */ #define ACMHOOK_none 0 #define ACMHOOK_sharing 1 #define ACMHOOK_authorization 2 /* -------security policy relevant type definitions-------- */ /* type identifier; compares to "equal" or "not equal" */ typedef uint16_t domaintype_t; /* CHINESE WALL POLICY DATA STRUCTURES * * current accumulated conflict type set: * When a domain is started and has a type that is in * a conflict set, the conflicting types are incremented in * the aggregate set. When a domain is destroyed, the * conflicting types to its type are decremented. * If a domain has multiple types, this procedure works over * all those types. * * conflict_aggregate_set[i] holds the number of * running domains that have a conflict with type i. * * running_types[i] holds the number of running domains * that include type i in their ssidref-referenced type set * * conflict_sets[i][j] is "0" if type j has no conflict * with type i and is "1" otherwise. */ /* high-16 = version, low-16 = check magic */ #define ACM_MAGIC 0x0001debc /* each offset in bytes from start of the struct they * are part of */ /* V3 of the policy buffer aded a version structure */ struct acm_policy_version { uint32_t major; uint32_t minor; }; /* each buffer consists of all policy information for * the respective policy given in the policy code * * acm_policy_buffer, acm_chwall_policy_buffer, * and acm_ste_policy_buffer need to stay 32-bit aligned * because we create binary policies also with external * tools that assume packed representations (e.g. the java tool) */ struct acm_policy_buffer { uint32_t magic; uint32_t policy_version; /* ACM_POLICY_VERSION */ uint32_t len; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_buffer_offset; uint32_t secondary_policy_code; uint32_t secondary_buffer_offset; struct acm_policy_version xml_pol_version; /* add in V3 */ }; struct acm_policy_reference_buffer { uint32_t len; }; struct acm_chwall_policy_buffer { uint32_t policy_version; /* ACM_CHWALL_VERSION */ uint32_t policy_code; uint32_t chwall_max_types; uint32_t chwall_max_ssidrefs; uint32_t chwall_max_conflictsets; uint32_t chwall_ssid_offset; uint32_t chwall_conflict_sets_offset; uint32_t chwall_running_types_offset; uint32_t chwall_conflict_aggregate_offset; }; struct acm_ste_policy_buffer { uint32_t policy_version; /* ACM_STE_VERSION */ uint32_t policy_code; uint32_t ste_max_types; uint32_t ste_max_ssidrefs; uint32_t ste_ssid_offset; }; struct acm_stats_buffer { uint32_t magic; uint32_t len; uint32_t primary_policy_code; uint32_t primary_stats_offset; uint32_t secondary_policy_code; uint32_t secondary_stats_offset; }; struct acm_ste_stats_buffer { uint32_t ec_eval_count; uint32_t gt_eval_count; uint32_t ec_denied_count; uint32_t gt_denied_count; uint32_t ec_cachehit_count; uint32_t gt_cachehit_count; }; struct acm_ssid_buffer { uint32_t len; ssidref_t ssidref; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_max_types; uint32_t primary_types_offset; uint32_t secondary_policy_code; uint32_t secondary_max_types; uint32_t secondary_types_offset; }; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/xsm/acm_ops.h000066400000000000000000000104741314037446600265520ustar00rootroot00000000000000/* * acm_ops.h: Xen access control module hypervisor commands * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005,2006 International Business Machines Corporation. */ #ifndef __XEN_PUBLIC_ACM_OPS_H__ #define __XEN_PUBLIC_ACM_OPS_H__ #include "../xen.h" #include "acm.h" /* * Make sure you increment the interface version whenever you modify this file! * This makes sure that old versions of acm tools will stop working in a * well-defined way (rather than crashing the machine, for instance). */ #define ACM_INTERFACE_VERSION 0xAAAA000A /************************************************************************/ /* * Prototype for this hypercall is: * int acm_op(int cmd, void *args) * @cmd == ACMOP_??? (access control module operation). * @args == Operation-specific extra arguments (NULL if none). */ #define ACMOP_setpolicy 1 struct acm_setpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pushcache; uint32_t pushcache_size; }; #define ACMOP_getpolicy 2 struct acm_getpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_dumpstats 3 struct acm_dumpstats { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_getssid 4 #define ACM_GETBY_ssidref 1 #define ACM_GETBY_domainid 2 struct acm_getssid { /* IN */ uint32_t get_ssid_by; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id; XEN_GUEST_HANDLE_64(void) ssidbuf; uint32_t ssidbuf_size; }; #define ACMOP_getdecision 5 struct acm_getdecision { /* IN */ uint32_t get_decision_by1; /* ACM_GETBY_* */ uint32_t get_decision_by2; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id1; union { domaintype_t domainid; ssidref_t ssidref; } id2; uint32_t hook; /* OUT */ uint32_t acm_decision; }; #define ACMOP_chgpolicy 6 struct acm_change_policy { /* IN */ XEN_GUEST_HANDLE_64(void) policy_pushcache; uint32_t policy_pushcache_size; XEN_GUEST_HANDLE_64(void) del_array; uint32_t delarray_size; XEN_GUEST_HANDLE_64(void) chg_array; uint32_t chgarray_size; /* OUT */ /* array with error code */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; #define ACMOP_relabeldoms 7 struct acm_relabel_doms { /* IN */ XEN_GUEST_HANDLE_64(void) relabel_map; uint32_t relabel_map_size; /* OUT */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; /* future interface to Xen */ struct xen_acmctl { uint32_t cmd; uint32_t interface_version; union { struct acm_setpolicy setpolicy; struct acm_getpolicy getpolicy; struct acm_dumpstats dumpstats; struct acm_getssid getssid; struct acm_getdecision getdecision; struct acm_change_policy change_policy; struct acm_relabel_doms relabel_doms; } u; }; typedef struct xen_acmctl xen_acmctl_t; DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t); #endif /* __XEN_PUBLIC_ACM_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/interface/xsm/flask_op.h000066400000000000000000000022601314037446600267210ustar00rootroot00000000000000/* * This file contains the flask_op hypercall commands and definitions. * * Author: George Coker, * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ #ifndef __FLASK_OP_H__ #define __FLASK_OP_H__ #define FLASK_LOAD 1 #define FLASK_GETENFORCE 2 #define FLASK_SETENFORCE 3 #define FLASK_CONTEXT_TO_SID 4 #define FLASK_SID_TO_CONTEXT 5 #define FLASK_ACCESS 6 #define FLASK_CREATE 7 #define FLASK_RELABEL 8 #define FLASK_USER 9 #define FLASK_POLICYVERS 10 #define FLASK_GETBOOL 11 #define FLASK_SETBOOL 12 #define FLASK_COMMITBOOLS 13 #define FLASK_MLS 14 #define FLASK_DISABLE 15 #define FLASK_GETAVC_THRESHOLD 16 #define FLASK_SETAVC_THRESHOLD 17 #define FLASK_AVC_HASHSTATS 18 #define FLASK_AVC_CACHESTATS 19 #define FLASK_MEMBER 20 typedef struct flask_op { int cmd; int size; char *buf; } flask_op_t; DEFINE_XEN_GUEST_HANDLE(flask_op_t); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/pcifront.h000066400000000000000000000033141314037446600242010ustar00rootroot00000000000000/* * PCI Frontend - arch-dependendent declarations * * Author: Ryan Wilson */ #ifndef __XEN_ASM_PCIFRONT_H__ #define __XEN_ASM_PCIFRONT_H__ #include #ifdef __KERNEL__ #ifndef __ia64__ struct pcifront_device; struct pci_bus; struct pcifront_sd { int domain; struct pcifront_device *pdev; }; static inline struct pcifront_device * pcifront_get_pdev(struct pcifront_sd *sd) { return sd->pdev; } static inline void pcifront_init_sd(struct pcifront_sd *sd, unsigned int domain, unsigned int bus, struct pcifront_device *pdev) { sd->domain = domain; sd->pdev = pdev; } #if defined(CONFIG_PCI_DOMAINS) static inline int pci_domain_nr(struct pci_bus *bus) { struct pcifront_sd *sd = bus->sysdata; return sd->domain; } static inline int pci_proc_domain(struct pci_bus *bus) { return pci_domain_nr(bus); } #endif /* CONFIG_PCI_DOMAINS */ static inline void pcifront_setup_root_resources(struct pci_bus *bus, struct pcifront_sd *sd) { } #else /* __ia64__ */ #include #include #define pcifront_sd pci_controller extern void xen_add_resource(struct pci_controller *, unsigned int, unsigned int, struct acpi_resource *); extern void xen_pcibios_setup_root_windows(struct pci_bus *, struct pci_controller *); static inline struct pcifront_device * pcifront_get_pdev(struct pcifront_sd *sd) { return (struct pcifront_device *)sd->platform_data; } static inline void pcifront_setup_root_resources(struct pci_bus *bus, struct pcifront_sd *sd) { xen_pcibios_setup_root_windows(bus, sd); } #endif /* __ia64__ */ extern struct rw_semaphore pci_bus_sem; #endif /* __KERNEL__ */ #endif /* __XEN_ASM_PCIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/public/000077500000000000000000000000001314037446600234615ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/public/evtchn.h000066400000000000000000000056351314037446600251320ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Interface to /dev/xen/evtchn. * * Copyright (c) 2003-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_EVTCHN_H__ #define __LINUX_PUBLIC_EVTCHN_H__ /* * Bind a fresh port to VIRQ @virq. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_VIRQ \ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq)) struct ioctl_evtchn_bind_virq { unsigned int virq; }; /* * Bind a fresh port to remote <@remote_domain, @remote_port>. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_INTERDOMAIN \ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain)) struct ioctl_evtchn_bind_interdomain { unsigned int remote_domain, remote_port; }; /* * Allocate a fresh port for binding to @remote_domain. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_UNBOUND_PORT \ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port)) struct ioctl_evtchn_bind_unbound_port { unsigned int remote_domain; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_UNBIND \ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind)) struct ioctl_evtchn_unbind { unsigned int port; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_NOTIFY \ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify)) struct ioctl_evtchn_notify { unsigned int port; }; /* Clear and reinitialise the event buffer. Clear error condition. */ #define IOCTL_EVTCHN_RESET \ _IOC(_IOC_NONE, 'E', 5, 0) #endif /* __LINUX_PUBLIC_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/public/gntdev.h000066400000000000000000000100661314037446600251240ustar00rootroot00000000000000/****************************************************************************** * gntdev.h * * Interface to /dev/xen/gntdev. * * Copyright (c) 2007, D G Murray * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_GNTDEV_H__ #define __LINUX_PUBLIC_GNTDEV_H__ struct ioctl_gntdev_grant_ref { /* The domain ID of the grant to be mapped. */ uint32_t domid; /* The grant reference of the grant to be mapped. */ uint32_t ref; }; /* * Inserts the grant references into the mapping table of an instance * of gntdev. N.B. This does not perform the mapping, which is deferred * until mmap() is called with @index as the offset. */ #define IOCTL_GNTDEV_MAP_GRANT_REF \ _IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref)) struct ioctl_gntdev_map_grant_ref { /* IN parameters */ /* The number of grants to be mapped. */ uint32_t count; uint32_t pad; /* OUT parameters */ /* The offset to be used on a subsequent call to mmap(). */ uint64_t index; /* Variable IN parameter. */ /* Array of grant references, of size @count. */ struct ioctl_gntdev_grant_ref refs[1]; }; /* * Removes the grant references from the mapping table of an instance of * of gntdev. N.B. munmap() must be called on the relevant virtual address(es) * before this ioctl is called, or an error will result. */ #define IOCTL_GNTDEV_UNMAP_GRANT_REF \ _IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) struct ioctl_gntdev_unmap_grant_ref { /* IN parameters */ /* The offset was returned by the corresponding map operation. */ uint64_t index; /* The number of pages to be unmapped. */ uint32_t count; uint32_t pad; }; /* * Returns the offset in the driver's address space that corresponds * to @vaddr. This can be used to perform a munmap(), followed by an * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by * the caller. The number of pages that were allocated at the same time as * @vaddr is returned in @count. * * N.B. Where more than one page has been mapped into a contiguous range, the * supplied @vaddr must correspond to the start of the range; otherwise * an error will result. It is only possible to munmap() the entire * contiguously-allocated range at once, and not any subrange thereof. */ #define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \ _IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr)) struct ioctl_gntdev_get_offset_for_vaddr { /* IN parameters */ /* The virtual address of the first mapped page in a range. */ uint64_t vaddr; /* OUT parameters */ /* The offset that was used in the initial mmap() operation. */ uint64_t offset; /* The number of pages mapped in the VM area that begins at @vaddr. */ uint32_t count; uint32_t pad; }; #endif /* __LINUX_PUBLIC_GNTDEV_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/public/privcmd.h000066400000000000000000000052141314037446600253000ustar00rootroot00000000000000/****************************************************************************** * privcmd.h * * Interface to /proc/xen/privcmd. * * Copyright (c) 2003-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_PRIVCMD_H__ #define __LINUX_PUBLIC_PRIVCMD_H__ #include #ifndef __user #define __user #endif typedef struct privcmd_hypercall { __u64 op; __u64 arg[5]; } privcmd_hypercall_t; typedef struct privcmd_mmap_entry { __u64 va; __u64 mfn; __u64 npages; } privcmd_mmap_entry_t; typedef struct privcmd_mmap { int num; domid_t dom; /* target domain */ privcmd_mmap_entry_t __user *entry; } privcmd_mmap_t; typedef struct privcmd_mmapbatch { int num; /* number of pages to populate */ domid_t dom; /* target domain */ __u64 addr; /* virtual address */ xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */ } privcmd_mmapbatch_t; /* * @cmd: IOCTL_PRIVCMD_HYPERCALL * @arg: &privcmd_hypercall_t * Return: Value returned from execution of the specified hypercall. */ #define IOCTL_PRIVCMD_HYPERCALL \ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t)) #define IOCTL_PRIVCMD_MMAP \ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t)) #define IOCTL_PRIVCMD_MMAPBATCH \ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t)) #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/xen_proc.h000066400000000000000000000004021314037446600241650ustar00rootroot00000000000000 #ifndef __ASM_XEN_PROC_H__ #define __ASM_XEN_PROC_H__ #include extern struct proc_dir_entry *create_xen_proc_entry( const char *name, mode_t mode); extern void remove_xen_proc_entry( const char *name); #endif /* __ASM_XEN_PROC_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/xenbus.h000066400000000000000000000255551314037446600236740ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XEN_XENBUS_H #define _XEN_XENBUS_H #include #include #include #include #include #include #include #include #include #include /* Register callback to watch this node. */ struct xenbus_watch { struct list_head list; /* Path being watched. */ const char *node; /* Callback (executed in a process context with no locks held). */ void (*callback)(struct xenbus_watch *, const char **vec, unsigned int len); /* See XBWF_ definitions below. */ unsigned long flags; }; /* * Execute callback in its own kthread. Useful if the callback is long * running or heavily serialised, to avoid taking out the main xenwatch thread * for a long period of time (or even unwittingly causing a deadlock). */ #define XBWF_new_thread 1 /* A xenbus device. */ struct xenbus_device { const char *devicetype; const char *nodename; const char *otherend; int otherend_id; struct xenbus_watch otherend_watch; struct device dev; enum xenbus_state state; struct completion down; }; static inline struct xenbus_device *to_xenbus_device(struct device *dev) { return container_of(dev, struct xenbus_device, dev); } struct xenbus_device_id { /* .../device// */ char devicetype[32]; /* General class of device. */ }; /* A xenbus driver. */ struct xenbus_driver { char *name; struct module *owner; const struct xenbus_device_id *ids; int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); void (*otherend_changed)(struct xenbus_device *dev, enum xenbus_state backend_state); int (*remove)(struct xenbus_device *dev); int (*suspend)(struct xenbus_device *dev); int (*suspend_cancel)(struct xenbus_device *dev); int (*resume)(struct xenbus_device *dev); int (*uevent)(struct xenbus_device *, char **, int, char *, int); struct device_driver driver; int (*read_otherend_details)(struct xenbus_device *dev); int (*is_ready)(struct xenbus_device *dev); }; static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) { return container_of(drv, struct xenbus_driver, driver); } int xenbus_register_frontend(struct xenbus_driver *drv); int xenbus_register_backend(struct xenbus_driver *drv); void xenbus_unregister_driver(struct xenbus_driver *drv); struct xenbus_transaction { u32 id; }; /* Nil transaction ID. */ #define XBT_NIL ((struct xenbus_transaction) { 0 }) char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num); void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len); int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string); int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_transaction_start(struct xenbus_transaction *t); int xenbus_transaction_end(struct xenbus_transaction t, int abort); /* Single read and scanf: returns -errno or num scanned if > 0. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(scanf, 4, 5))); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(printf, 4, 5))); /* Generic read function: NULL-terminated triples of name, * sprintf-style type string, and pointer. Returns 0 or errno.*/ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...); /* notifer routines for when the xenstore comes up */ int register_xenstore_notifier(struct notifier_block *nb); void unregister_xenstore_notifier(struct notifier_block *nb); int register_xenbus_watch(struct xenbus_watch *watch); void unregister_xenbus_watch(struct xenbus_watch *watch); void xs_suspend(void); void xs_resume(void); void xs_suspend_cancel(void); /* Used by xenbus_dev to borrow kernel's store connection. */ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); /* Prepare for domain suspend: then resume or cancel the suspend. */ void xenbus_suspend(void); void xenbus_resume(void); void xenbus_suspend_cancel(void); #define XENBUS_IS_ERR_READ(str) ({ \ if (!IS_ERR(str) && strlen(str) == 0) { \ kfree(str); \ str = ERR_PTR(-ERANGE); \ } \ IS_ERR(str); \ }) #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE) /** * Register a watch on the given path, using the given xenbus_watch structure * for storage, and the given callback function as the callback. Return 0 on * success, or -errno on error. On success, the given path will be saved as * watch->node, and remains the caller's to free. On error, watch->node will * be NULL, the device will switch to XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); /** * Register a watch on the given path/path2, using the given xenbus_watch * structure for storage, and the given callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (path/path2) will be saved as watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); /** * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); /** * Grant access to the given ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn); /** * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_valloc allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * xenbus_map_ring does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr); /** * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_vfree if you mapped in your memory with * xenbus_map_ring_valloc (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port); /** * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path); /*** * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...); /*** * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...); int xenbus_dev_init(void); const char *xenbus_strstate(enum xenbus_state state); int xenbus_dev_is_online(struct xenbus_device *dev); int xenbus_frontend_closed(struct xenbus_device *dev); int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)); int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)); #endif /* _XEN_XENBUS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/xencomm.h000066400000000000000000000050601314037446600240230ustar00rootroot00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Copyright (C) IBM Corp. 2006 * * Authors: Hollis Blanchard * Jerone Young */ #ifndef _LINUX_XENCOMM_H_ #define _LINUX_XENCOMM_H_ #include #define XENCOMM_MINI_ADDRS 3 struct xencomm_mini { struct xencomm_desc _desc; uint64_t address[XENCOMM_MINI_ADDRS]; }; /* To avoid additionnal virt to phys conversion, an opaque structure is presented. */ struct xencomm_handle; extern void xencomm_free(struct xencomm_handle *desc); extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes); extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, struct xencomm_mini *xc_area); #if 0 #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ struct xencomm_mini xc_desc ## _base[(n)] \ __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \ struct xencomm_mini* xc_desc = &xc_desc ## _base[0]; #else /* * gcc bug workaround: * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660 * gcc doesn't handle properly stack variable with * __attribute__((__align__(sizeof(struct xencomm_mini)))) */ #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ unsigned char xc_desc ## _base[((n) + 1 ) * \ sizeof(struct xencomm_mini)]; \ struct xencomm_mini *xc_desc = (struct xencomm_mini*) \ ((unsigned long)xc_desc ## _base + \ (sizeof(struct xencomm_mini) - \ ((unsigned long)xc_desc ## _base) % \ sizeof(struct xencomm_mini))); #endif #define xencomm_map_no_alloc(ptr, bytes) \ ({XENCOMM_MINI_ALIGNED(xc_desc, 1); \ __xencomm_map_no_alloc(ptr, bytes, xc_desc);}) /* provided by architecture code: */ extern unsigned long xencomm_vtop(unsigned long vaddr); static inline void *xencomm_pa(void *ptr) { return (void *)xencomm_vtop((unsigned long)ptr); } #define xen_guest_handle(hnd) ((hnd).p) #endif /* _LINUX_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/xencons.h000066400000000000000000000013271314037446600240340ustar00rootroot00000000000000#ifndef __ASM_XENCONS_H__ #define __ASM_XENCONS_H__ #ifdef CONFIG_XEN_PRIVILEGED_GUEST struct dom0_vga_console_info; void dom0_init_screen_info(const struct dom0_vga_console_info *, size_t); #else #define dom0_init_screen_info(info) ((void)(info)) #endif #ifdef CONFIG_XEN_CONSOLE void xencons_force_flush(void); void xencons_resume(void); /* Interrupt work hooks. Receive data, or kick data out. */ struct pt_regs; void xencons_rx(char *buf, unsigned len, struct pt_regs *regs); void xencons_tx(void); int xencons_ring_init(void); int xencons_ring_send(const char *data, unsigned len); #else static inline void xencons_force_flush(void) {} static inline void xencons_resume(void) {} #endif #endif /* __ASM_XENCONS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/include/xen/xenoprof.h000066400000000000000000000025651314037446600242240ustar00rootroot00000000000000/****************************************************************************** * xen/xenoprof.h * * Copyright (c) 2006 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_XENOPROF_H__ #define __XEN_XENOPROF_H__ #ifdef CONFIG_XEN #include struct oprofile_operations; int xenoprofile_init(struct oprofile_operations * ops); void xenoprofile_exit(void); struct xenoprof_shared_buffer { char *buffer; struct xenoprof_arch_shared_buffer arch; }; #else #define xenoprofile_init(ops) (-ENOSYS) #define xenoprofile_exit() do { } while (0) #endif /* CONFIG_XEN */ #endif /* __XEN_XENOPROF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/version.ini000066400000000000000000000000641314037446600221540ustar00rootroot00000000000000KernModeVersion=2.2.0.112 UserModeVersion=2.2.0.112 UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-balloon/000077500000000000000000000000001314037446600222045ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-balloon/Kbuild000066400000000000000000000002461314037446600233430ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-balloon.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-balloon-objs = xen-balloon-objs += balloon.o xen-balloon-objs += sysfs.o UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-balloon/Makefile000066400000000000000000000000661314037446600236460ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-balloon/balloon.c000066400000000000000000000541331314037446600240040ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /*added by linyuliang 00176349 begin */ #include #include /*added by linyuliang 00176349 end */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); struct balloon_stats balloon_stats; unsigned long old_totalram_pages; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; /* VM /proc information for memory */ extern unsigned long totalram_pages; #ifndef MODULE extern unsigned long totalhigh_pages; #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else #define inc_totalhigh_pages() ((void)0) #define dec_totalhigh_pages() ((void)0) #endif extern unsigned long num_physpages; #ifndef CONFIG_XEN /* * In HVM guests accounting here uses the Xen visible values, but the kernel * determined totalram_pages value shouldn't get altered. Since totalram_pages * includes neither the kernel static image nor any memory allocated prior to * or from the bootmem allocator, we have to synchronize the two values. */ static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ /*modified by linyuliang 00176349 begin */ static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process,NULL); /*modified by linyuliang 00176349 end */ static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page) { /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; dec_totalhigh_pages(); } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(void) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); if (PageHighMem(page)) { bs.balloon_high--; inc_totalhigh_pages(); } else bs.balloon_low--; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static inline void balloon_free_page(struct page *page) { #ifndef MODULE if (put_page_testzero(page)) free_cold_page(page); #else /* free_cold_page() is not being exported. */ __free_page(page); #endif } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = min(bs.target_pages, bs.hard_limit); if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } /*modified by linyuliang 00176349 begin */ unsigned long balloon_minimum_target(void) /*modified by linyuliang 00176349 end */ { #ifndef CONFIG_XEN #define max_pfn num_physpages #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN #undef max_pfn #endif } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page); page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); page_zone(page)->present_pages++; init_page_count(page); balloon_free_page(page); } /*modified by linyuliang 00176349 begin */ bs.current_pages += rc; /*modified by linyuliang 00176349 end */ /* totalram_pages = bs.current_pages; */ if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages + rc; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages=totalram_pages; out: balloon_unlock(flags); /*modified by linyuliang 00176349 begin */ //return 0; return rc < 0 ? rc : rc != nr_pages; /*modified by linyuliang 00176349 end */ } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; /*added by linyuliang 00176349 begin */ IPRINTK("nr_pages = %lu\n",nr_pages); IPRINTK("need_sleep= %d\n",need_sleep); /*added by linyuliang 00176349 end */ break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); balloon_append(pfn_to_page(pfn)); page = pfn_to_page(pfn); SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); page_zone(page)->present_pages--; } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; /* totalram_pages = bs.current_pages; */ if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages - nr_pages; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages=totalram_pages; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ /*modified by linyuliang 00176349 begin */ /*When adjustment be stopped one time,adjustment over! then write current memory to xenstore*/ static int flag; static void balloon_process(struct work_struct *unused) { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); if(totalram_pages > (bs.current_pages - totalram_bias)) { IPRINTK("totalram_pages=%lu, current_pages=%lu, totalram_bias=%lu\n", totalram_pages*4, bs.current_pages*4, totalram_bias*4); bs.current_pages = totalram_pages + totalram_bias; IPRINTK("totalram_pages=%lu, current_pages=%lu\n", totalram_pages*4, bs.current_pages*4); } if(current_target() == bs.current_pages) { mutex_unlock(&balloon_mutex); return; } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); // if (!need_sleep) // { if (current_target() != bs.current_pages || (flag==1)) { sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); // } /* Schedule more work if there is some still to be done. */ // if (current_target() != bs.current_pages) // mod_timer(&balloon_timer, jiffies + HZ); mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, balloon_minimum_target()); flag = bs.target_pages== target ? 0:1; // IPRINTK("b_set_new_tar_bs.target_pages= %lu",bs.target_pages); schedule_work(&balloon_worker); } /*modified by linyuliang 00176349 end */ static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf( page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Minimum target: %8lu kB\n" "Maximum target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n" "Xen hard limit: ", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(balloon_minimum_target()), PAGES2KB(num_physpages), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); if (bs.hard_limit != ~0UL) len += sprintf(page + len, "%8lu kB\n", PAGES2KB(bs.hard_limit)); else len += sprintf(page + len, " ??? kB\n"); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { /*#if defined(CONFIG_X86) && defined(CONFIG_XEN)*/ #if !defined(CONFIG_XEN) # ifndef XENMEM_get_pod_target # define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; # endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; #elif defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("[uvp] Initialising new balloon driver.\n"); #ifdef CONFIG_XEN bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else /* bs.current_pages = totalram_pages; */ totalram_bias = HYPERVISOR_memory_op( HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target) != -ENOSYS ? XENMEM_maximum_reservation : XENMEM_current_reservation, &pod_target.domid); if ((long)totalram_bias != -ENOSYS) { BUG_ON(totalram_bias < totalram_pages); bs.current_pages = totalram_bias; totalram_bias -= totalram_pages; old_totalram_pages = totalram_pages; } else { totalram_bias = 0; bs.current_pages = totalram_pages; } #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; bs.hard_limit = ~0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #ifdef CONFIG_PROC_FS if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) balloon_append(page); } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void __exit balloon_exit(void) { /* XXX - release balloon here */ return; } module_exit(balloon_exit); void balloon_update_driver_allowance(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } #ifdef CONFIG_XEN static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long flags; void *v; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD); if (page == NULL) goto err; v = page_address(page); scrub_pages(v, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, (unsigned long)v, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_unlock(flags); balloon_free_page(page); goto err; } /* totalram_pages = --bs.current_pages; */ totalram_pages = --bs.current_pages - totalram_bias; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i]); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages, int free_vec) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i]); } balloon_unlock(flags); if (free_vec) kfree(pagevec); else totalram_pages = bs.current_pages -= nr_pages; schedule_work(&balloon_worker); } void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { _free_empty_pages_and_pagevec(pagevec, nr_pages, 1); } void free_empty_pages(struct page **pagevec, int nr_pages) { _free_empty_pages_and_pagevec(pagevec, nr_pages, 0); } void balloon_release_driver_page(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page); bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-balloon/common.h000066400000000000000000000044621314037446600236530ustar00rootroot00000000000000/****************************************************************************** * balloon/common.h * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_COMMON_H__ #define __XEN_BALLOON_COMMON_H__ #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* We may hit the hard limit in Xen. If we do then we remember it. */ unsigned long hard_limit; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; extern struct balloon_stats balloon_stats; #define bs balloon_stats int balloon_sysfs_init(void); void balloon_sysfs_exit(void); void balloon_set_new_target(unsigned long target); #endif /* __XEN_BALLOON_COMMON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-balloon/sysfs.c000066400000000000000000000112731314037446600235230ustar00rootroot00000000000000/****************************************************************************** * balloon/sysfs.c * * Xen balloon driver - sysfs interfaces. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BALLOON_CLASS_NAME "xen_memory" #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); BALLOON_SHOW(hard_limit_kb, (bs.hard_limit!=~0UL) ? "%lu\n" : "???\n", (bs.hard_limit!=~0UL) ? PAGES2KB(bs.hard_limit) : 0); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, const char *buf, size_t count) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ strcpy(memstring, buf); target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_hard_limit_kb.attr, &attr_driver_kb.attr, NULL }; static struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { set_kset_name(BALLOON_CLASS_NAME), }; static struct sys_device balloon_sysdev; static int register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } static void unregister_balloon(struct sys_device *sysdev) { int i; sysfs_remove_group(&sysdev->kobj, &balloon_info_group); for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); } int balloon_sysfs_init(void) { return register_balloon(&balloon_sysdev); } void balloon_sysfs_exit(void) { unregister_balloon(&balloon_sysdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/000077500000000000000000000000001314037446600231535ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/Kbuild000066400000000000000000000012331314037446600243070ustar00rootroot00000000000000include $(M)/config.mk obj-m := xen-platform-pci.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-platform-pci-objs := evtchn.o platform-pci.o gnttab.o xen_support.o xen-platform-pci-objs += features.o platform-compat.o xen-platform-pci-objs += reboot.o machine_reboot.o xen-platform-pci-objs += platform-pci-unplug.o xen-platform-pci-objs += xenbus_comms.o xen-platform-pci-objs += xenbus_xs.o xen-platform-pci-objs += xenbus_probe.o xen-platform-pci-objs += xenbus_dev.o xen-platform-pci-objs += xenbus_client.o xen-platform-pci-objs += xen_proc.o # Can we do better ? ifeq ($(ARCH),ia64) xen-platform-pci-objs += xencomm.o xencomm_arch.o xcom_hcall.o endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/Makefile000066400000000000000000000000661314037446600246150ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/evtchn.c000066400000000000000000000204541314037446600246130ustar00rootroot00000000000000/****************************************************************************** * evtchn.c * * A simplified event channel for para-drivers in unmodified linux * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, Intel Corporation * * This file may be distributed separately from the Linux kernel, or * incorporated into other software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif void *shared_info_area; #define is_valid_evtchn(x) ((x) != 0) #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn) static struct { spinlock_t lock; irq_handler_t handler; void *dev_id; int evtchn; int close:1; /* close on unbind_from_irqhandler()? */ int inuse:1; int in_handler:1; } irq_evtchn[256]; static int evtchn_to_irq[NR_EVENT_CHANNELS] = { [0 ... NR_EVENT_CHANNELS-1] = -1 }; static DEFINE_SPINLOCK(irq_alloc_lock); static int alloc_xen_irq(void) { static int warned; int irq; spin_lock(&irq_alloc_lock); for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) { if (irq_evtchn[irq].inuse) continue; irq_evtchn[irq].inuse = 1; spin_unlock(&irq_alloc_lock); return irq; } if (!warned) { warned = 1; printk(KERN_WARNING "No available IRQ to bind to: " "increase irq_evtchn[] size in evtchn.c.\n"); } spin_unlock(&irq_alloc_lock); return -ENOSPC; } static void free_xen_irq(int irq) { spin_lock(&irq_alloc_lock); irq_evtchn[irq].inuse = 0; spin_unlock(&irq_alloc_lock); } int irq_to_evtchn_port(int irq) { return irq_evtchn[irq].evtchn; } EXPORT_SYMBOL(irq_to_evtchn_port); void mask_evtchn(int port) { shared_info_t *s = shared_info_area; synch_set_bit(port, &s->evtchn_mask[0]); } EXPORT_SYMBOL(mask_evtchn); void unmask_evtchn(int port) { unsigned int cpu; shared_info_t *s = shared_info_area; vcpu_info_t *vcpu_info; cpu = get_cpu(); vcpu_info = &s->vcpu_info[cpu]; /* Slow path (hypercall) if this is a non-local port. We only ever bind event channels to vcpu 0 in HVM guests. */ if (unlikely(cpu != 0)) { evtchn_unmask_t op = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op)); put_cpu(); return; } synch_clear_bit(port, &s->evtchn_mask[0]); /* * The following is basically the equivalent of * 'hw_resend_irq'. Just like a real IO-APIC we 'lose the * interrupt edge' if the channel is masked. */ if (synch_test_bit(port, &s->evtchn_pending[0]) && !synch_test_and_set_bit(port / BITS_PER_LONG, &vcpu_info->evtchn_pending_sel)) { vcpu_info->evtchn_upcall_pending = 1; if (!vcpu_info->evtchn_upcall_mask) force_evtchn_callback(); } put_cpu(); } EXPORT_SYMBOL(unmask_evtchn); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { struct evtchn_alloc_unbound alloc_unbound; int err, irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_domain; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { spin_unlock_irq(&irq_evtchn[irq].lock); free_xen_irq(irq); return err; } irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = alloc_unbound.port; irq_evtchn[irq].close = 1; evtchn_to_irq[alloc_unbound.port] = irq; unmask_evtchn(alloc_unbound.port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_listening_port_to_irqhandler); int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = caller_port; irq_evtchn[irq].close = 0; evtchn_to_irq[caller_port] = irq; unmask_evtchn(caller_port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_caller_port_to_irqhandler); void unbind_from_irqhandler(unsigned int irq, void *dev_id) { int evtchn; spin_lock_irq(&irq_evtchn[irq].lock); evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) { evtchn_to_irq[evtchn] = -1; mask_evtchn(evtchn); if (irq_evtchn[irq].close) { struct evtchn_close close = { .port = evtchn }; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) BUG(); } } irq_evtchn[irq].handler = NULL; irq_evtchn[irq].evtchn = 0; spin_unlock_irq(&irq_evtchn[irq].lock); while (irq_evtchn[irq].in_handler) cpu_relax(); free_xen_irq(irq); } EXPORT_SYMBOL(unbind_from_irqhandler); void notify_remote_via_irq(int irq) { int evtchn; evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL(notify_remote_via_irq); static irqreturn_t evtchn_interrupt(int irq, void *dev_id #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) , struct pt_regs *regs #else # define handler(irq, dev_id, regs) handler(irq, dev_id) #endif ) { unsigned int l1i, port; /* XXX: All events are bound to vcpu0 but irq may be redirected. */ int cpu = 0; /*smp_processor_id();*/ irq_handler_t handler; shared_info_t *s = shared_info_area; vcpu_info_t *v = &s->vcpu_info[cpu]; unsigned long l1, l2; v->evtchn_upcall_pending = 0; /* NB. No need for a barrier here -- XCHG is a barrier on x86. */ l1 = xchg(&v->evtchn_pending_sel, 0); while (l1 != 0) { l1i = __ffs(l1); l1 &= ~(1 << l1i); while ((l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i])) { port = (l1i * BITS_PER_LONG) + __ffs(l2); synch_clear_bit(port, &s->evtchn_pending[0]); irq = evtchn_to_irq[port]; if (irq < 0) continue; spin_lock(&irq_evtchn[irq].lock); handler = irq_evtchn[irq].handler; dev_id = irq_evtchn[irq].dev_id; if (unlikely(handler == NULL)) { printk("Xen IRQ%d (port %d) has no handler!\n", irq, port); spin_unlock(&irq_evtchn[irq].lock); continue; } irq_evtchn[irq].in_handler = 1; spin_unlock(&irq_evtchn[irq].lock); local_irq_enable(); handler(irq, irq_evtchn[irq].dev_id, regs); local_irq_disable(); spin_lock(&irq_evtchn[irq].lock); irq_evtchn[irq].in_handler = 0; spin_unlock(&irq_evtchn[irq].lock); } } return IRQ_HANDLED; } void force_evtchn_callback(void) { VOID(HYPERVISOR_xen_version(0, NULL)); } EXPORT_SYMBOL(force_evtchn_callback); void irq_resume(void) { int evtchn, irq; for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) { mask_evtchn(evtchn); evtchn_to_irq[evtchn] = -1; } for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) irq_evtchn[irq].evtchn = 0; } int xen_irq_init(struct pci_dev *pdev) { int irq; for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) spin_lock_init(&irq_evtchn[irq].lock); return request_irq(pdev->irq, evtchn_interrupt, #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT, #else IRQF_SHARED | IRQF_SAMPLE_RANDOM | IRQF_DISABLED, #endif "xen-platform-pci", pdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/features.c000066400000000000000000000014641314037446600251420ustar00rootroot00000000000000/****************************************************************************** * features.c * * Xen feature flags. * * Copyright (c) 2006, Ian Campbell, XenSource Inc. */ #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */ EXPORT_SYMBOL(xen_features); void setup_xen_features(void) { xen_feature_info_t fi; int i, j; for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) { fi.submap_idx = i; if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j=0; j<32; j++) xen_features[i*32+j] = !!(fi.submap & 1< #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff #define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t)) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; static unsigned int boot_max_nr_grant_frames; static int gnttab_free_count; static grant_ref_t gnttab_free_head; static DEFINE_SPINLOCK(gnttab_list_lock); static struct grant_entry *shared; static struct gnttab_free_callback *gnttab_free_callback_list; static int gnttab_expand(unsigned int req_entries); #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP]) #define nr_freelist_frames(grant_frames) \ (((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP) static int get_free_entries(int count) { unsigned long flags; int ref, rc; grant_ref_t head; spin_lock_irqsave(&gnttab_list_lock, flags); if ((gnttab_free_count < count) && ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { spin_unlock_irqrestore(&gnttab_list_lock, flags); return rc; } ref = head = gnttab_free_head; gnttab_free_count -= count; while (count-- > 1) head = gnttab_entry(head); gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } #define get_free_entry() get_free_entries(1) static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; callback = gnttab_free_callback_list; gnttab_free_callback_list = NULL; while (callback != NULL) { next = callback->next; if (gnttab_free_count >= callback->count) { callback->next = NULL; callback->fn(callback->arg); } else { callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; } callback = next; } } static inline void check_free_callbacks(void) { if (unlikely(gnttab_free_callback_list)) do_free_callbacks(); } static void put_free_entry(grant_ref_t ref) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; gnttab_free_count++; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } /* * Public grant-issuing interface functions */ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags) { shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); int gnttab_query_foreign_access(grant_ref_t ref) { u16 nflags; nflags = shared[ref].flags; return (nflags & (GTF_reading|GTF_writing)); } EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); int gnttab_end_foreign_access_ref(grant_ref_t ref) { u16 flags, nflags; nflags = shared[ref].flags; do { if ((flags = nflags) & (GTF_reading|GTF_writing)) { printk(KERN_DEBUG "WARNING: g.e. still in use!\n"); return 0; } } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) != flags); return 1; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page) { if (gnttab_end_foreign_access_ref(ref)) { put_free_entry(ref); if (page != 0) free_page(page); } else { /* XXX This needs to be fixed so that the ref and page are placed on a list to be freed up later. */ printk(KERN_DEBUG "WARNING: leaking g.e. and page still in use!\n"); } } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; gnttab_grant_foreign_transfer_ref(ref, domid, pfn); return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, unsigned long pfn) { shared[ref].frame = pfn; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_accept_transfer; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) { unsigned long frame; u16 flags; /* * If a transfer is not even yet started, try to reclaim the grant * reference and return failure (== 0). */ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags) return 0; cpu_relax(); } /* If a transfer is in progress then wait until it is completed. */ while (!(flags & GTF_transfer_completed)) { flags = shared[ref].flags; cpu_relax(); } /* Read the frame number /after/ reading completion status. */ rmb(); frame = shared[ref].frame; BUG_ON(frame == 0); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) { unsigned long frame = gnttab_end_foreign_transfer_ref(ref); put_free_entry(ref); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); void gnttab_free_grant_reference(grant_ref_t ref) { put_free_entry(ref); } EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; unsigned long flags; int count = 1; if (head == GNTTAB_LIST_END) return; spin_lock_irqsave(&gnttab_list_lock, flags); ref = head; while (gnttab_entry(ref) != GNTTAB_LIST_END) { ref = gnttab_entry(ref); count++; } gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = head; gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_references); int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) { int h = get_free_entries(count); if (h < 0) return -ENOSPC; *head = h; return 0; } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); } EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); int gnttab_claim_grant_reference(grant_ref_t *private_head) { grant_ref_t g = *private_head; if (unlikely(g == GNTTAB_LIST_END)) return -ENOSPC; *private_head = gnttab_entry(g); return g; } EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release) { gnttab_entry(release) = *private_head; *private_head = release; } EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); if (callback->next) goto out; callback->fn = fn; callback->arg = arg; callback->count = count; callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; check_free_callbacks(); out: spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_request_free_callback); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) { struct gnttab_free_callback **pcb; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { if (*pcb == callback) { *pcb = callback->next; break; } } spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); static int grow_gnttab_list(unsigned int more_frames) { unsigned int new_nr_grant_frames, extra_entries, i; unsigned int nr_glist_frames, new_nr_glist_frames; new_nr_grant_frames = nr_grant_frames + more_frames; extra_entries = more_frames * ENTRIES_PER_GRANT_FRAME; nr_glist_frames = nr_freelist_frames(nr_grant_frames); new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames); for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); if (!gnttab_list[i]) goto grow_nomem; } for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; check_free_callbacks(); return 0; grow_nomem: for ( ; i >= nr_glist_frames; i--) free_page((unsigned long) gnttab_list[i]); return -ENOMEM; } static unsigned int __max_nr_grant_frames(void) { struct gnttab_query_size query; int rc; query.dom = DOMID_SELF; rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); if ((rc < 0) || (query.status != GNTST_okay)) return 4; /* Legacy max supported number of frames */ return query.max_nr_frames; } static inline unsigned int max_nr_grant_frames(void) { unsigned int xen_max = __max_nr_grant_frames(); if (xen_max > boot_max_nr_grant_frames) return boot_max_nr_grant_frames; return xen_max; } #ifdef CONFIG_XEN static seqlock_t gnttab_dma_lock = SEQLOCK_UNLOCKED; #ifdef CONFIG_X86 static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } void *arch_gnttab_alloc_shared(unsigned long *frames) { struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames()); BUG_ON(area == NULL); return area->addr; } #endif /* CONFIG_X86 */ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); if (shared == NULL) shared = arch_gnttab_alloc_shared(frames); #ifdef CONFIG_X86 rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); BUG_ON(rc); frames -= nr_gframes; /* adjust after map_pte_fn() */ #endif /* CONFIG_X86 */ kfree(frames); return 0; } static void gnttab_page_free(struct page *page) { ClearPageForeign(page); gnttab_reset_grant_page(page); put_page(page); } /* * Must not be called with IRQs off. This should only be used on the * slow path. * * Copy a foreign granted page to local memory. */ int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep) { struct gnttab_unmap_and_replace unmap; mmu_update_t mmu; struct page *page; struct page *new_page; void *new_addr; void *addr; paddr_t pfn; maddr_t mfn; maddr_t new_mfn; int err; page = *pagep; /* if (!get_page_unless_zero(page)) */ if (!atomic_add_unless(&page->_count, 1, -1)) return -ENOENT; err = -ENOMEM; new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!new_page) goto out; new_addr = page_address(new_page); addr = page_address(page); memcpy(new_addr, addr, PAGE_SIZE); pfn = page_to_pfn(page); mfn = pfn_to_mfn(pfn); new_mfn = virt_to_mfn(new_addr); write_seqlock(&gnttab_dma_lock); /* Make seq visible before checking page_mapped. */ smp_mb(); /* Has the page been DMA-mapped? */ if (unlikely(page_mapped(page))) { write_sequnlock(&gnttab_dma_lock); put_page(new_page); err = -EBUSY; goto out; } if (!xen_feature(XENFEAT_auto_translated_physmap)) set_phys_to_machine(pfn, new_mfn); gnttab_set_replace_op(&unmap, (unsigned long)addr, (unsigned long)new_addr, ref); err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace, &unmap, 1); BUG_ON(err); BUG_ON(unmap.status); write_sequnlock(&gnttab_dma_lock); if (!xen_feature(XENFEAT_auto_translated_physmap)) { set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY); mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu.val = pfn; err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF); BUG_ON(err); } new_page->mapping = page->mapping; new_page->index = page->index; set_bit(PG_foreign, &new_page->flags); *pagep = new_page; SetPageForeign(page, gnttab_page_free); page->mapping = NULL; out: put_page(page); return err; } EXPORT_SYMBOL_GPL(gnttab_copy_grant_page); void gnttab_reset_grant_page(struct page *page) { set_page_count(page, 1); reset_page_mapcount(page); } EXPORT_SYMBOL_GPL(gnttab_reset_grant_page); /* * Keep track of foreign pages marked as PageForeign so that we don't * return them to the remote domain prematurely. * * PageForeign pages are pinned down by increasing their mapcount. * * All other pages are simply returned as is. */ void __gnttab_dma_map_page(struct page *page) { unsigned int seq; if (!is_running_on_xen() || !PageForeign(page)) return; do { seq = read_seqbegin(&gnttab_dma_lock); if (gnttab_dma_local_pfn(page)) break; atomic_set(&page->_mapcount, 0); /* Make _mapcount visible before read_seqretry. */ smp_mb(); } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); } int gnttab_resume(void) { if (max_nr_grant_frames() < nr_grant_frames) return -ENOSYS; return gnttab_map(0, nr_grant_frames - 1); } int gnttab_suspend(void) { #ifdef CONFIG_X86 apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); #endif return 0; } #else /* !CONFIG_XEN */ #include static unsigned long resume_frames; static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; /* Loop backwards, so that the first hypercall has the largest index, * ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); } while (i-- > start_idx); return 0; } int gnttab_resume(void) { unsigned int max_nr_gframes, nr_gframes; nr_gframes = nr_grant_frames; max_nr_gframes = max_nr_grant_frames(); if (max_nr_gframes < nr_gframes) return -ENOSYS; if (!resume_frames) { resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); if (shared == NULL) { printk("error to ioremap gnttab share frames\n"); return -1; } } gnttab_map(0, nr_gframes - 1); return 0; } #endif /* !CONFIG_XEN */ static int gnttab_expand(unsigned int req_entries) { int rc; unsigned int cur, extra; cur = nr_grant_frames; extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) / ENTRIES_PER_GRANT_FRAME); if (cur + extra > max_nr_grant_frames()) return -ENOSPC; if ((rc = gnttab_map(cur, cur + extra - 1)) == 0) rc = grow_gnttab_list(extra); return rc; } int __devinit gnttab_init(void) { int i; unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int nr_init_grefs; if (!is_running_on_xen()) return -ENODEV; nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; nr_glist_frames = nr_freelist_frames(nr_grant_frames); for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) goto ini_nomem; } if (gnttab_resume() < 0) return -ENODEV; nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; return 0; ini_nomem: for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); return -ENOMEM; } #ifdef CONFIG_XEN core_initcall(gnttab_init); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/machine_reboot.c000066400000000000000000000046321314037446600263020ustar00rootroot00000000000000#include #include #include #include #include #include "platform-pci.h" #include struct ap_suspend_info { int do_spin; atomic_t nr_spinning; }; /* * Use a rwlock to protect the hypercall page from being executed in AP context * while the BSP is re-initializing it after restore. */ #if 0 static DEFINE_RWLOCK(suspend_lock); #endif #ifdef CONFIG_SMP /* * Spinning prevents, for example, APs touching grant table entries while * the shared grant table is not mapped into the address space imemdiately * after resume. */ static void ap_suspend(void *_info) { struct ap_suspend_info *info = _info; BUG_ON(!irqs_disabled()); atomic_inc(&info->nr_spinning); mb(); while (info->do_spin) { cpu_relax(); #if 0 read_lock(&suspend_lock); HYPERVISOR_yield(); read_unlock(&suspend_lock); #endif } mb(); atomic_dec(&info->nr_spinning); } #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0, 0) #else /* !defined(CONFIG_SMP) */ #define initiate_ap_suspend(i) 0 #endif static int bp_suspend(void) { int suspend_cancelled; BUG_ON(!irqs_disabled()); suspend_cancelled = HYPERVISOR_suspend(0); if (!suspend_cancelled) { //write_lock(&suspend_lock); platform_pci_resume(); //write_unlock(&suspend_lock); gnttab_resume(); irq_resume(); } return suspend_cancelled; } int __xen_suspend(int fast_suspend, void (*resume_notifier_func)(void)) { int err, suspend_cancelled, nr_cpus; struct ap_suspend_info info; xenbus_suspend(); preempt_disable(); /* Prevent any races with evtchn_interrupt() handler. */ disable_irq(xen_platform_pdev->irq); info.do_spin = 1; atomic_set(&info.nr_spinning, 0); smp_mb(); nr_cpus = num_online_cpus() - 1; err = initiate_ap_suspend(&info); if (err < 0) { preempt_enable(); xenbus_suspend_cancel(); return err; } while (atomic_read(&info.nr_spinning) != nr_cpus) cpu_relax(); local_irq_disable(); suspend_cancelled = bp_suspend(); resume_notifier_func(); local_irq_enable(); smp_mb(); info.do_spin = 0; while (atomic_read(&info.nr_spinning) != 0) cpu_relax(); enable_irq(xen_platform_pdev->irq); preempt_enable(); if (!suspend_cancelled){ xenbus_resume(); } else xenbus_suspend_cancel(); // update uvp flags write_driver_resume_flag(); write_feature_flag(); write_monitor_service_flag(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/platform-compat.c000066400000000000000000000067621314037446600264370ustar00rootroot00000000000000#include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) static int system_state = 1; EXPORT_SYMBOL(system_state); #endif void ctrl_alt_del(void) { kill_proc(1, SIGINT, 1); /* interrupt init */ } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) size_t strcspn(const char *s, const char *reject) { const char *p; const char *r; size_t count = 0; for (p = s; *p != '\0'; ++p) { for (r = reject; *r != '\0'; ++r) { if (*p == *r) return count; } ++count; } return count; } EXPORT_SYMBOL(strcspn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(void * vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; } EXPORT_SYMBOL(wait_for_completion_timeout); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) /* fake do_exit using complete_and_exit */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) asmlinkage NORET_TYPE void do_exit(long code) #else fastcall NORET_TYPE void do_exit(long code) #endif { complete_and_exit(NULL, code); } EXPORT_SYMBOL_GPL(do_exit); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout) { __set_current_state(TASK_INTERRUPTIBLE); return schedule_timeout(timeout); } EXPORT_SYMBOL(schedule_timeout_interruptible); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. */ void *kzalloc(size_t size, int flags) { void *ret = kmalloc(size, flags); if (ret) memset(ret, 0, size); return ret; } EXPORT_SYMBOL(kzalloc); #endif #if defined(CONFIG_SUSE_KERNEL) \ ? LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) \ : LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) /* Simplified asprintf. */ char *kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; unsigned int len; char *p, dummy[1]; va_start(ap, fmt); len = vsnprintf(dummy, 0, fmt, ap); va_end(ap); p = kmalloc(len + 1, gfp); if (!p) return NULL; va_start(ap, fmt); vsprintf(p, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL(kasprintf); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/platform-pci-unplug.c000066400000000000000000000117421314037446600272310ustar00rootroot00000000000000/****************************************************************************** * platform-pci-unplug.c * * Xen platform PCI device driver * Copyright (c) 2010, Citrix * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include /****************************************************************************** * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0xffff #define XEN_IOPORT_LINUX_DRVVER ((LINUX_VERSION_CODE << 8) + 0x0) #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define UNPLUG_ALL_IDE_DISKS 1 #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 #define UNPLUG_ALL 7 #define UNPLUG_IGNORE 8 int xen_platform_pci; EXPORT_SYMBOL_GPL(xen_platform_pci); static int xen_emul_unplug; void xen_unplug_emulated_devices(void) { /* If the version matches enable the Xen platform PCI driver. * Also enable the Xen platform PCI driver if the version is really old * and the user told us to ignore it. */ xen_platform_pci = 1; xen_emul_unplug |= UNPLUG_ALL_NICS; xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; /* Set the default value of xen_emul_unplug depending on whether or * not the Xen PV frontends and the Xen platform PCI driver have * been compiled for this kernel (modules or built-in are both OK). */ if (xen_platform_pci && !xen_emul_unplug) { #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Netfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated ICs.\n"); xen_emul_unplug |= UNPLUG_ALL_NICS; #endif #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Blkfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated disks.\n" "You might have to change the root device\n" "from /dev/hd[a-d] to /dev/xvd[a-d]\n" "in your root= kernel command line option\n"); xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; #endif } /* Now unplug the emulated devices */ if (xen_platform_pci && !(xen_emul_unplug & UNPLUG_IGNORE)) outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/platform-pci.c000066400000000000000000000240031314037446600257130ustar00rootroot00000000000000/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __ia64__ #include #endif #include "platform-pci.h" #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DRV_NAME "xen-platform-pci" #define DRV_VERSION "0.10" #define DRV_RELDATE "03/03/2005" static int max_hypercall_stub_pages, nr_hypercall_stub_pages; char *hypercall_stubs; EXPORT_SYMBOL(hypercall_stubs); MODULE_AUTHOR("ssmith@xensource.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); struct pci_dev *xen_platform_pdev; static unsigned long shared_info_frame; static uint64_t callback_via; /* driver should write xenstore flag to tell xen which feature supported */ void write_feature_flag() { int rc; char str[32] = {0}; (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); rc = xenbus_write(XBT_NIL, "control/uvp", "feature_flag", str); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); } } /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { int rc; rc = xenbus_write(XBT_NIL, "control/uvp", "monitor-service-flag", "true"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/monitor-service-flag failed \n"); } } /*DTS2014042508949. driver write this flag when the xenpci resume succeed.*/ void write_driver_resume_flag() { int rc = 0; rc = xenbus_write(XBT_NIL, "control/uvp", "driver-resume-flag", "1"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/driver-resume-flag failed \n"); } } static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; extern void *shared_info_area; #ifdef __ia64__ xencomm_initialize(); #endif setup_xen_features(); shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); shared_info_area = ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); return 0; } static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } #ifndef __ia64__ static uint32_t xen_cpuid_base(void) { uint32_t base = 0, eax = 0, ebx = 0, ecx = 0, edx = 0; char signature[13]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { cpuid(base, &eax, &ebx, &ecx, &edx); *(uint32_t*)(signature + 0) = ebx; *(uint32_t*)(signature + 4) = ecx; *(uint32_t*)(signature + 8) = edx; signature[12] = 0; if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) return base; } return 0; } static int init_hypercall_stubs(void) { uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0, pages = 0, msr = 0, i = 0, base = 0; base = xen_cpuid_base(); if (base == 0) { printk(KERN_WARNING "Detected Xen platform device but not Xen VMM?\n"); return -EINVAL; } cpuid(base + 1, &eax, &ebx, &ecx, &edx); printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff); /* * Find largest supported number of hypercall pages. * We'll create as many as possible up to this number. */ cpuid(base + 2, &pages, &msr, &ecx, &edx); /* * Use __vmalloc() because vmalloc_exec() is not an exported symbol. * PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL. * hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE); */ while (pages > 0) { hypercall_stubs = __vmalloc( pages * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM, __pgprot(__PAGE_KERNEL & ~_PAGE_NX)); if (hypercall_stubs != NULL) break; pages--; /* vmalloc failed: try one fewer pages */ } if (hypercall_stubs == NULL) return -ENOMEM; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; max_hypercall_stub_pages = pages; printk(KERN_INFO "Hypercall area is %u pages.\n", pages); return 0; } static void resume_hypercall_stubs(void) { uint32_t base, ecx, edx, pages = 0, msr = 0, i; base = xen_cpuid_base(); BUG_ON(base == 0); cpuid(base + 2, &pages, &msr, &ecx, &edx); if (pages > max_hypercall_stub_pages) pages = max_hypercall_stub_pages; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; } #else /* __ia64__ */ #define init_hypercall_stubs() (0) #define resume_hypercall_stubs() ((void)0) #endif static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; #ifdef __ia64__ for (irq = 0; irq < 16; irq++) { if (isa_irq_to_vector(irq) == pdev->irq) return irq; /* ISA IRQ */ } #else /* !__ia64__ */ irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) pin = pdev->pin; #else pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); #endif /* We don't know the GSI. Specify the PCI INTx line instead. */ return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3)); } static int set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } int xen_irq_init(struct pci_dev *pdev); int xenbus_init(void); int xen_reboot_init(void); int gnttab_init(void); void xen_unplug_emulated_devices(void); static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr, iolen; long mmio_addr, mmio_len; xen_unplug_emulated_devices(); if (xen_platform_pdev) return -EBUSY; xen_platform_pdev = pdev; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); callback_via = get_callback_via(pdev); if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { printk(KERN_WARNING DRV_NAME ":no resources found\n"); return -ENOENT; } if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) { printk(KERN_ERR ":MEM I/O resource 0x%lx @ 0x%lx busy\n", mmio_addr, mmio_len); return -EBUSY; } if (request_region(ioaddr, iolen, DRV_NAME) == NULL) { printk(KERN_ERR DRV_NAME ":I/O resource 0x%lx @ 0x%lx busy\n", iolen, ioaddr); release_mem_region(mmio_addr, mmio_len); return -EBUSY; } platform_mmio = mmio_addr; platform_mmiolen = mmio_len; ret = init_hypercall_stubs(); if (ret < 0) goto out; if ((ret = init_xen_info())) goto out; if ((ret = gnttab_init())) goto out; if ((ret = xen_irq_init(pdev))) goto out; if ((ret = set_callback_via(callback_via))) goto out; if ((ret = xenbus_init())) goto out; if ((ret = xen_reboot_init())) goto out; /* write flag to xenstore when xenbus is available */ write_feature_flag(); out: if (ret) { release_mem_region(mmio_addr, mmio_len); release_region(ioaddr, iolen); } return ret; } #define XEN_PLATFORM_VENDOR_ID 0x5853 #define XEN_PLATFORM_DEVICE_ID 0x0001 static struct pci_device_id platform_pci_tbl[] __devinitdata = { {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* Continue to recognise the old ID for now */ {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { name: DRV_NAME, probe: platform_pci_init, id_table: platform_pci_tbl, }; static int pci_device_registered; void platform_pci_resume(void) { struct xen_add_to_physmap xatp; resume_hypercall_stubs(); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); if (set_callback_via(callback_via)) printk("platform_pci_resume failure!\n"); } static int __init platform_pci_module_init(void) { int rc; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) rc = pci_module_init(&platform_driver); #else rc = pci_register_driver(&platform_driver); #endif if (rc) { printk(KERN_INFO DRV_NAME ": No platform pci device model found\n"); return rc; } pci_device_registered = 1; return 0; } module_init(platform_pci_module_init); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/platform-pci.h000066400000000000000000000026611314037446600257260ustar00rootroot00000000000000/****************************************************************************** * platform-pci.h * * Xen platform PCI device driver * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. */ #ifndef _XEN_PLATFORM_PCI_H #define _XEN_PLATFORM_PCI_H #include /* the features pvdriver supported */ #define XENPAGING 0x1 #define SUPPORTED_FEATURE XENPAGING extern void write_feature_flag(void); extern void write_monitor_service_flag(void); /*when xenpci resume succeed, write this flag, then uvpmonitor call ndsend*/ extern void write_driver_resume_flag(void); unsigned long alloc_xen_mmio(unsigned long len); void platform_pci_resume(void); extern struct pci_dev *xen_platform_pdev; #endif /* _XEN_PLATFORM_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/reboot.c000066400000000000000000000141461314037446600246170ustar00rootroot00000000000000#define __KERNEL_SYSCALLS__ #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(void *unused); static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL); int __xen_suspend(int fast_suspend, void (*resume_notifier)(void)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } static void xen_resume_notifier(void) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed(current, cpumask_of_cpu(0)); if (err) { printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { printk(KERN_ERR "Xen suspend failed (%d)\n", err); goto fail; } old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_work(&shutdown_work); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } static void __shutdown_handler(void *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, old_state, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } /*պǨʱsuspendʧᵼǨƹȥsuspendʧܺǨƳʱ*/ //xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } printk(KERN_WARNING "%s(%d): receive shutdown request %s\n", __FUNCTION__, __LINE__, str); if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) ctrl_alt_del(); else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else printk("Ignoring shutdown request: %s\n", str); if (new_state != SHUTDOWN_INVALID) { old_state = xchg(&shutting_down, new_state); if (old_state == SHUTDOWN_INVALID) schedule_work(&shutdown_work); else BUG_ON(old_state != SHUTDOWN_RESUMING); } kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) { printk(KERN_ERR "Unable to read sysrq code in " "control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key, NULL, NULL); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; static int setup_shutdown_watcher(void) { int err; xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { printk(KERN_ERR "Failed to set shutdown watcher\n"); return err; } err = register_xenbus_watch(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/xen_proc.c000066400000000000000000000010451314037446600251340ustar00rootroot00000000000000 #include #include #include static struct proc_dir_entry *xen_base; struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode) { if ( xen_base == NULL ) if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL ) panic("Couldn't create /proc/xen"); return create_proc_entry(name, mode, xen_base); } EXPORT_SYMBOL_GPL(create_xen_proc_entry); void remove_xen_proc_entry(const char *name) { remove_proc_entry(name, xen_base); } EXPORT_SYMBOL_GPL(remove_xen_proc_entry); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/xen_support.c000066400000000000000000000040531314037446600257070ustar00rootroot00000000000000/****************************************************************************** * support.c * Xen module support functions. * Copyright (C) 2004, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if defined (__ia64__) unsigned long __hypercall(unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long cmd) { unsigned long __res; __asm__ __volatile__ (";;\n" "mov r2=%1\n" "break 0x1000 ;;\n" "mov %0=r8 ;;\n" : "=r"(__res) : "r"(cmd) : "r2", "r8", "memory"); return __res; } EXPORT_SYMBOL(__hypercall); int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) { return xencomm_hypercall_grant_table_op(cmd, uop, count); } EXPORT_SYMBOL(HYPERVISOR_grant_table_op); /* without using balloon driver on PV-on-HVM for ia64 */ void balloon_update_driver_allowance(long delta) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); void balloon_release_driver_page(struct page *page) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_release_driver_page); #endif /* __ia64__ */ void xen_machphys_update(unsigned long mfn, unsigned long pfn) { BUG(); } EXPORT_SYMBOL(xen_machphys_update); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/xenbus_backend_client.c000066400000000000000000000106761314037446600276420ustar00rootroot00000000000000/****************************************************************************** * Backend-client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code in the backend * driver. * * Copyright (C) 2005-2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include /* Based on Rusty Russell's skeleton driver's map_page */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref) { struct gnttab_map_grant_ref op; struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE); if (!area) return ERR_PTR(-ENOMEM); gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map, gnt_ref, dev->otherend_id); if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { free_vm_area(area); xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); BUG_ON(!IS_ERR(ERR_PTR(op.status))); return ERR_PTR(op.status); } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; return area; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, dev->otherend_id); if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring); /* Based on Rusty Russell's skeleton driver's unmap_page */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map, (grant_handle_t)area->phys_addr); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) free_vm_area(area); else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring); int xenbus_dev_is_online(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/xenbus_client.c000066400000000000000000000172661314037446600261750ustar00rootroot00000000000000/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DPRINTK(fmt, args...) \ pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args) const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate); int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path); int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; char *state = kasprintf(GFP_KERNEL|__GFP_HIGH, "%s/%s", path, path2); if (!state) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, state, watch, callback); if (err) kfree(state); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path2); int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not work inside a Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ int current_state; int err; if (state == dev->state) return 0; err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d", ¤t_state); if (err != 1) return 0; err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state); if (err) { if (state != XenbusStateClosing) /* Avoid looping */ xenbus_dev_fatal(dev, err, "writing new state"); return err; } dev->state = state; return 0; } EXPORT_SYMBOL_GPL(xenbus_switch_state); int xenbus_frontend_closed(struct xenbus_device *dev) { xenbus_switch_state(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed); /** * Return the path to the error node for the given device, or NULL on failure. * If the value returned is non-NULL, then it is the caller's to kfree. */ static char *error_path(struct xenbus_device *dev) { return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); } void _dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list ap) { int ret; unsigned int len; char *printf_buffer = NULL, *path_buffer = NULL; #define PRINTF_BUFFER_SIZE 4096 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); if (printf_buffer == NULL) goto fail; len = sprintf(printf_buffer, "%i ", -err); ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = error_path(dev); if (path_buffer == NULL) { printk("xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { printk("xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } fail: if (printf_buffer) kfree(printf_buffer); if (path_buffer) kfree(path_buffer); } void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error); void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); xenbus_switch_state(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal); int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); if (err < 0) xenbus_dev_fatal(dev, err, "granting access to ring page"); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error(dev, err, "freeing event channel %d", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn); enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/xenbus_comms.c000066400000000000000000000140101314037446600260150ustar00rootroot00000000000000/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_irq; extern void xenbus_probe(void *); extern int xenstored_ready; static DECLARE_WORK(probe_work, xenbus_probe, NULL); static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs) { if (unlikely(xenstored_ready == 0)) { xenstored_ready = 1; schedule_work(&probe_work); } wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { return wait_event_interruptible(xb_waitq, xb_data_to_read()); } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /* Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; int err; if (intf->req_prod != intf->req_cons) printk(KERN_ERR "XENBUS request ring is not quiescent " "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { printk(KERN_WARNING "XENBUS response ring is not quiescent " "(%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); intf->rsp_cons = intf->rsp_prod; } if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); err = bind_caller_port_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/xenbus_comms.h000066400000000000000000000035561314037446600260370ustar00rootroot00000000000000/* * Private include for xenbus communications. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_COMMS_H #define _XENBUS_COMMS_H int xs_init(void); int xb_init_comms(void); /* Low level routines. */ int xb_write(const void *data, unsigned len); int xb_read(void *data, unsigned len); int xb_data_to_read(void); int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; #endif /* _XENBUS_COMMS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/xenbus_dev.c000066400000000000000000000231011314037446600254560ustar00rootroot00000000000000/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[PAGE_SIZE]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static struct proc_dir_entry *xenbus_dev_intf; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static void queue_reply(struct xenbus_dev_data *u, char *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); BUG_ON(rb == NULL); rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, &u->read_buffers); wake_up(&u->read_waitq); } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int path_len, tok_len, body_len; path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; body_len = path_len + tok_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr)); queue_reply(adap->dev_data, (char *)path, path_len); queue_reply(adap->dev_data, (char *)token, tok_len); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply; char *path, *token; struct watch_adapter *watch, *tmp_watch; int err, rc = len; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 if ((len + u->len) > sizeof(u->u.buffer)) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_TRANSACTION_START: case XS_TRANSACTION_END: case XS_DIRECTORY: case XS_READ: case XS_GET_PERMS: case XS_RELEASE: case XS_GET_DOMAIN_PATH: case XS_WRITE: case XS_LINUX_WEAK_WRITE: case XS_MKDIR: case XS_RM: case XS_SET_PERMS: if (msg_type == XS_TRANSACTION_START) { trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } } reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; BUG_ON(&trans->list == &u->transactions); list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg)); queue_reply(u, (char *)reply, u->u.msg.len); mutex_unlock(&u->reply_mutex); kfree(reply); break; case XS_WATCH: case XS_UNWATCH: { static const char *XS_RESP = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); watch->watch.node = kmalloc(strlen(path)+1, GFP_KERNEL); strcpy((char *)watch->watch.node, path); watch->watch.callback = watch_fired; watch->token = kmalloc(strlen(token)+1, GFP_KERNEL); strcpy(watch->token, token); watch->dev_data = u; err = register_xenbus_watch(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = strlen(XS_RESP) + 1; mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&hdr, sizeof(hdr)); queue_reply(u, (char *)XS_RESP, hdr.len); mutex_unlock(&u->reply_mutex); break; } default: rc = -EINVAL; break; } out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } static struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .poll = xenbus_dev_poll, }; int xenbus_dev_init(void) { xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400); if (xenbus_dev_intf) xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops; return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/xenbus_probe.c000066400000000000000000000706741314037446600260300ustar00rootroot00000000000000/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef MODULE #include #endif #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif int xen_store_evtchn; struct xenstore_domain_interface *xen_store_interface; static unsigned long xen_store_mfn; extern struct mutex xenwatch_mutex; static struct notifier_block *xenstore_chain; static void wait_for_devices(struct xenbus_driver *xendrv); static int xenbus_probe_frontend(const char *type, const char *name); static void xenbus_dev_shutdown(struct device *_dev); /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } /* device// => - */ static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) { printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, BUS_ID_SIZE); if (!strchr(bus_id, '/')) { printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } static int read_backend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "backend-id", "backend"); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) static int xenbus_uevent_frontend(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) { struct xenbus_device *xdev; int length = 0, i = 0; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_PATH=%s", xdev->nodename); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "MODALIAS=xen:%s", xdev->devicetype); return 0; } #endif /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { .name = "xen", .match = xenbus_match, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_frontend, #endif }, .dev = { .bus_id = "xen", }, }; static void otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]); return; } state = xenbus_read_driver_state(dev->otherend); DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { struct xen_bus_type *bus = bus; bus = container_of(dev->dev.bus, struct xen_bus_type, bus); /* If we're frontend, drive the state machine to Closed. */ /* This should cause the backend to release our resources. */ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) xenbus_frontend_closed(dev); return; } #endif if (drv->otherend_changed) drv->otherend_changed(dev, state); } static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { return xenbus_watch_path2(dev, dev->otherend, "state", &dev->otherend_watch, otherend_changed); } int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { printk(KERN_WARNING "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { printk(KERN_WARNING "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); return -ENODEV; } int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); free_otherend_details(dev); if (drv->remove) drv->remove(dev); xenbus_switch_state(dev, XenbusStateClosed); return 0; } static void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); get_device(&dev->dev); if (dev->state != XenbusStateConnected) { printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); } int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus) { int ret; if (bus->error) return bus->error; drv->driver.name = drv->name; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) drv->driver.owner = drv->owner; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; drv->driver.remove = xenbus_dev_remove; drv->driver.shutdown = xenbus_dev_shutdown; #endif mutex_lock(&xenwatch_mutex); ret = driver_register(&drv->driver); mutex_unlock(&xenwatch_mutex); return ret; } int xenbus_register_frontend(struct xenbus_driver *drv) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(xenbus_register_frontend); void xenbus_unregister_driver(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t xendev_show_nodename(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); static ssize_t xendev_show_devtype(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); static ssize_t xendev_show_modalias(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_driver_state(nodename); if (bus->error) return bus->error; if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); xendev->dev.parent = &bus->dev; xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); if (err) goto fail; /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) goto unregister; err = device_create_file(&xendev->dev, &dev_attr_devtype); if (err) goto unregister; err = device_create_file(&xendev->dev, &dev_attr_modalias); if (err) goto unregister; return 0; unregister: device_remove_file(&xendev->dev, &dev_attr_nodename); device_remove_file(&xendev->dev, &dev_attr_devtype); device_unregister(&xendev->dev); fail: kfree(xendev); return err; } /* device// */ static int xenbus_probe_frontend(const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(&xenbus_frontend, type, nodename); kfree(nodename); return err; } static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n; if (bus->error) return bus->error; dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void dev_changed(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[BUS_ID_SIZE]; const char *p, *root; if (bus->error || char_count(node, '/') < 2) return; exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend//... or device//... */ p = strchr(node, '/') + 1; snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int suspend_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend) err = drv->suspend(xdev); if (err) printk(KERN_WARNING "xenbus: suspend %s failed: %i\n", dev->bus_id, err); return 0; } static int suspend_cancel_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend_cancel) err = drv->suspend_cancel(xdev); if (err) printk(KERN_WARNING "xenbus: suspend_cancel %s failed: %i\n", dev->bus_id, err); return 0; } static int resume_dev(struct device *dev, void *data) { int err; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); err = talk_to_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus: resume (talk_to_otherend) %s failed: %i\n", dev->bus_id, err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { printk(KERN_WARNING "xenbus: resume %s failed: %i\n", dev->bus_id, err); return err; } } err = watch_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus_probe: resume (watch_otherend) %s failed: " "%d.\n", dev->bus_id, err); return err; } return 0; } void xenbus_suspend(void) { DPRINTK(""); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); xenbus_backend_suspend(suspend_dev); xs_suspend(); } EXPORT_SYMBOL_GPL(xenbus_suspend); void xen_unplug_emulated_devices(void); void xenbus_resume(void) { xb_init_comms(); xs_resume(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); xenbus_backend_resume(resume_dev); xen_unplug_emulated_devices(); } EXPORT_SYMBOL_GPL(xenbus_resume); void xenbus_suspend_cancel(void) { xs_suspend_cancel(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); xenbus_backend_resume(suspend_cancel_dev); } EXPORT_SYMBOL_GPL(xenbus_suspend_cancel); /* A flag to determine if xenstored is 'ready' (i.e. has started) */ int xenstored_ready = 0; int register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; if (xenstored_ready > 0) ret = nb->notifier_call(nb, 0, NULL); else notifier_chain_register(&xenstore_chain, nb); return ret; } EXPORT_SYMBOL_GPL(register_xenstore_notifier); void unregister_xenstore_notifier(struct notifier_block *nb) { notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); void xenbus_probe(void *unused) { BUG_ON((xenstored_ready <= 0)); /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); xenbus_backend_probe_and_watch(); /* Notify others that xenstore is up */ notifier_call_chain(&xenstore_chain, 0, NULL); } #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) static struct file_operations xsd_kva_fops; static struct proc_dir_entry *xsd_kva_intf; static struct proc_dir_entry *xsd_port_intf; static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static int xsd_kva_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "0x%p", xen_store_interface); *eof = 1; return len; } static int xsd_port_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "%d", xen_store_evtchn); *eof = 1; return len; } #endif static int xenbus_probe_init(void) { int err = 0; unsigned long page = 0; DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) printk(KERN_WARNING "XENBUS: Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { struct evtchn_alloc_unbound alloc_unbound; /* Allocate page. */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = 0; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600); if (xsd_kva_intf) { memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, sizeof(xsd_kva_fops)); xsd_kva_fops.mmap = xsd_kva_mmap; xsd_kva_intf->proc_fops = &xsd_kva_fops; xsd_kva_intf->read_proc = xsd_kva_read; } xsd_port_intf = create_xen_proc_entry("xsd_port", 0400); if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else { xenstored_ready = 1; #ifdef CONFIG_XEN xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); #else xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN); xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN); xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); #endif } xenbus_dev_init(); /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); goto err; } /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); if (xenbus_frontend.error) { bus_unregister(&xenbus_frontend.bus); printk(KERN_WARNING "XENBUS: Error registering frontend device: %i\n", xenbus_frontend.error); } } xenbus_backend_device_register(); if (!is_initial_xendomain()) xenbus_probe(NULL); return 0; err: if (page) free_page(page); /* * Do not unregister the xenbus front/backend buses here. The buses * must exist because front/backend drivers will use them when they are * registered. */ return err; } #ifdef CONFIG_XEN postcore_initcall(xenbus_probe_init); MODULE_LICENSE("Dual BSD/GPL"); #else int xenbus_init(void) { return xenbus_probe_init(); } #endif static int is_disconnected_device(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_disconnected_device(struct device_driver *drv) { if (xenbus_frontend.error) return xenbus_frontend.error; return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_disconnected_device); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", xendev->nodename); return 0; } if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); printk(KERN_WARNING "XENBUS: Timeout connecting " "to device: %s (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } xendrv = to_xenbus_driver(dev->driver); if (xendrv->is_ready && !xendrv->is_ready(xendev)) printk(KERN_WARNING "XENBUS: Device not ready: %s\n", xendev->nodename); return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; } if (!ready_to_wait_for_devices || !is_running_on_xen()) return; while (exists_disconnected_device(drv)) { if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) printk(KERN_WARNING "XENBUS: Waiting for " "devices to initialise: "); seconds_waited += 5; printk("%us...", 300 - seconds_waited); if (seconds_waited == 300) break; } schedule_timeout_interruptible(HZ/10); } if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } #ifndef MODULE static int __init boot_wait_for_devices(void) { if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; wait_for_devices(NULL); } return 0; } late_initcall(boot_wait_for_devices); #endif int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/xenbus_probe.h000066400000000000000000000061171314037446600260240ustar00rootroot00000000000000/****************************************************************************** * xenbus_probe.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_PROBE_H #define _XENBUS_PROBE_H #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); extern void xenbus_backend_probe_and_watch(void); extern void xenbus_backend_bus_register(void); extern void xenbus_backend_device_register(void); #else static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_probe_and_watch(void) {} static inline void xenbus_backend_bus_register(void) {} static inline void xenbus_backend_device_register(void) {} #endif struct xen_bus_type { char *root; int error; unsigned int levels; int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename); int (*probe)(const char *type, const char *dir); struct bus_type bus; struct device dev; }; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); extern int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus); extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); extern int xenbus_probe_devices(struct xen_bus_type *bus); extern void dev_changed(const char *node, struct xen_bus_type *bus); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/xenbus_probe_backend.c000066400000000000000000000172621314037446600274710ustar00rootroot00000000000000/****************************************************************************** * Talks to Xen Store to figure out what devices we have (backend half). * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_uevent_backend(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size); static int xenbus_probe_backend(const char *type, const char *domid); extern int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node); static int read_frontend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "frontend-id", "frontend"); } /* backend/// => -- */ static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) { int domid, err; const char *devid, *type, *frontend; unsigned int typelen; type = strchr(nodename, '/'); if (!type) return -EINVAL; type++; typelen = strcspn(type, "/"); if (!typelen || type[typelen] != '/') return -EINVAL; devid = strrchr(nodename, '/') + 1; err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid, "frontend", NULL, &frontend, NULL); if (err) return err; if (strlen(frontend) == 0) err = -ERANGE; if (!err && !xenbus_exists(XBT_NIL, frontend, "")) err = -ENOENT; kfree(frontend); if (err) return err; if (snprintf(bus_id, BUS_ID_SIZE, "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE) return -ENOSPC; return 0; } static struct xen_bus_type xenbus_backend = { .root = "backend", .levels = 3, /* backend/type// */ .get_bus_id = backend_bus_id, .probe = xenbus_probe_backend, .error = -ENODEV, .bus = { .name = "xen-backend", .match = xenbus_match, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, // .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_backend, }, .dev = { .bus_id = "xen-backend", }, }; static int xenbus_uevent_backend(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) { struct xenbus_device *xdev; struct xenbus_driver *drv; int i = 0; int length = 0; DPRINTK(""); if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_PATH=%s", xdev->nodename); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_BASE_PATH=%s", xenbus_backend.root); /* terminate, set to next free slot, shrink available space */ envp[i] = NULL; envp = &envp[i]; num_envp -= i; buffer = &buffer[length]; buffer_size -= length; if (dev->driver) { drv = to_xenbus_driver(dev->driver); if (drv && drv->uevent) return drv->uevent(xdev, envp, num_envp, buffer, buffer_size); } return 0; } int xenbus_register_backend(struct xenbus_driver *drv) { drv->read_otherend_details = read_frontend_details; return xenbus_register_driver_common(drv, &xenbus_backend); } EXPORT_SYMBOL_GPL(xenbus_register_backend); /* backend/// */ static int xenbus_probe_backend_unit(const char *dir, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); if (!nodename) return -ENOMEM; DPRINTK("%s\n", nodename); err = xenbus_probe_node(&xenbus_backend, type, nodename); kfree(nodename); return err; } /* backend// */ static int xenbus_probe_backend(const char *type, const char *domid) { char *nodename; int err = 0; char **dir; unsigned int i, dir_n = 0; DPRINTK(""); nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid); if (!nodename) return -ENOMEM; dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n); if (IS_ERR(dir)) { kfree(nodename); return PTR_ERR(dir); } for (i = 0; i < dir_n; i++) { err = xenbus_probe_backend_unit(nodename, type, dir[i]); if (err) break; } kfree(dir); kfree(nodename); return err; } static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); } static struct xenbus_watch be_watch = { .node = "backend", .callback = backend_changed, }; void xenbus_backend_suspend(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_resume(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_probe_and_watch(void) { xenbus_probe_devices(&xenbus_backend); register_xenbus_watch(&be_watch); } void xenbus_backend_bus_register(void) { xenbus_backend.error = bus_register(&xenbus_backend.bus); if (xenbus_backend.error) printk(KERN_WARNING "XENBUS: Error registering backend bus: %i\n", xenbus_backend.error); } void xenbus_backend_device_register(void) { if (xenbus_backend.error) return; xenbus_backend.error = device_register(&xenbus_backend.dev); if (xenbus_backend.error) { bus_unregister(&xenbus_backend.bus); printk(KERN_WARNING "XENBUS: Error registering backend device: %i\n", xenbus_backend.error); } } int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_backend); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-platform-pci/xenbus_xs.c000066400000000000000000000511471314037446600253450ustar00rootroot00000000000000/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct rw_semaphore transaction_mutex; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { printk(KERN_WARNING "XENBUS xen store gave: unknown error %s", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; struct xsd_sockmsg req_msg = *msg; int err; if (req_msg.type == XS_TRANSACTION_START) down_read(&xs_state.transaction_mutex); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((req_msg.type == XS_TRANSACTION_END) || ((req_msg.type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) up_read(&xs_state.transaction_mutex); return ret; } /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len);; if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { if (printk_ratelimit()) printk(KERN_WARNING "XENBUS unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_KERNEL|__GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_KERNEL|__GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len); /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL|__GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; down_read(&xs_state.transaction_mutex); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { up_read(&xs_state.transaction_mutex); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); up_read(&xs_state.transaction_mutex); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; #define PRINTF_BUFFER_SIZE 4096 char *printf_buffer; printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL|__GFP_HIGH); if (printf_buffer == NULL) return -ENOMEM; va_start(ap, fmt); ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap); va_end(ap); BUG_ON(ret > PRINTF_BUFFER_SIZE-1); ret = xenbus_write(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); /* Ignore errors due to multiple registration. */ if ((err != 0) && (err != -EEXIST)) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; BUG_ON(watch->flags & XBWF_new_thread); sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) printk(KERN_WARNING "XENBUS Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); /* Flush any currently-executing callback, unless we are it. :-) */ if (current->pid != xenwatch_pid) { mutex_lock(&xenwatch_mutex); mutex_unlock(&xenwatch_mutex); } } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); void xs_suspend(void) { down_write(&xs_state.transaction_mutex); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.transaction_mutex); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); up_write(&xs_state.transaction_mutex); } static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_KERNEL|__GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } body = kmalloc(msg->hdr.len + 1, GFP_KERNEL|__GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) printk(KERN_WARNING "XENBUS error %d while reading " "message\n", err); if (kthread_should_stop()) break; } return 0; } int xs_init(void) { int err; struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); init_rwsem(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) return err; task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vbd/000077500000000000000000000000001314037446600213315ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vbd/Kbuild000066400000000000000000000001151314037446600224630ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-vbd.o xen-vbd-objs := blkfront.o vbd.o UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vbd/Makefile000066400000000000000000000000661314037446600227730ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vbd/blkfront.c000066400000000000000000001343341314037446600233260ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include "md.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 static void connect(struct blkfront_info *); static void blkfront_closing(struct xenbus_device *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs); static void blkif_restart_queue(void *arg); static int blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *, struct blkfront_info *, struct blkif_response *); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice; struct blkfront_info *info; enum xenbus_state backend_state; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); printk(KERN_INFO "blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } INIT_LIST_HEAD(&info->grants); INIT_LIST_HEAD(&info->indirect_pages); info->xbdev = dev; info->vdevice = vdevice; /* for persistent grant */ info->persistent_gnts_c = 0; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue, (void *)info); /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev->dev.driver_data = info; backend_state = xenbus_read_driver_state(dev->otherend); /* * XenbusStateInitWait would be the correct state to enter here, * but (at least) blkback considers this a fatal error. */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_backend(dev, info); if (err) { kfree(info); dev->dev.driver_data = NULL; return err; } return 0; } static int fill_grant_buffer(struct blkfront_info *info, int num) { struct page *granted_page; struct grant *gnt_list_entry, *n; int i = 0; while(i < num) { gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); if (!gnt_list_entry) goto out_of_memory; /* If frontend uses feature_persistent, it will preallocate * (256 + 1) * 256 pages, almost 257M. */ if (info->feature_persistent) { granted_page = alloc_page(GFP_NOIO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; } gnt_list_entry->pfn = page_to_pfn(granted_page); } gnt_list_entry->gref = GRANT_INVALID_REF; list_add(&gnt_list_entry->node, &info->grants); i++; } return 0; out_of_memory: list_for_each_entry_safe(gnt_list_entry, n, &info->grants, node) { list_del(&gnt_list_entry->node); if (info->feature_persistent) __free_page(pfn_to_page(gnt_list_entry->pfn)); kfree(gnt_list_entry); i--; } BUG_ON(i != 0); return -ENOMEM; } static int blkfront_setup_indirect(struct blkfront_info *info) { unsigned int indirect_segments, segs; int err, i; info->max_indirect_segments = 0; segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-max-indirect-segments", "%u", &indirect_segments, NULL); if (!err && indirect_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) { info->max_indirect_segments = min(indirect_segments, xen_blkif_max_segments); segs = info->max_indirect_segments; } printk("[%s:%d], segs %d\n", __func__, __LINE__, segs); /*info->sg = kzalloc(sizeof(info->sg[0]) * segs, GFP_KERNEL); if (info->sg == NULL) goto out_of_memory; memset(info->sg, 0, sizeof(info->sg));*/ err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * RING_SIZE(&info->ring)); if (err) goto out_of_memory; if (!info->feature_persistent && info->max_indirect_segments) { /* * We are using indirect descriptors but not persistent * grants, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int num = INDIRECT_GREFS(segs) * RING_SIZE(&info->ring); BUG_ON(!list_empty(&info->indirect_pages)); for (i = 0; i < num; i++) { struct page *indirect_page = alloc_page(GFP_NOIO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &info->indirect_pages); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { info->shadow[i].grants_used = kzalloc( sizeof(info->shadow[i].grants_used[0]) * segs, GFP_NOIO); /* malloc space for every shadow's sg */ info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO); if (info->max_indirect_segments) info->shadow[i].indirect_grants = kzalloc( sizeof(info->shadow[i].indirect_grants[0]) * INDIRECT_GREFS(segs), GFP_NOIO); if ((info->shadow[i].grants_used == NULL) || (info->shadow[i].sg == NULL) || (info->max_indirect_segments && (info->shadow[i].indirect_grants == NULL))) goto out_of_memory; /* initialise every shadow's sg */ //sg_init_table(info->shadow[i].sg, segs); memset(info->shadow[i].sg, 0, sizeof(info->shadow[i].sg[0]) * segs); } return 0; out_of_memory: //kfree(info->sg); //info->sg = NULL; for (i = 0; i < RING_SIZE(&info->ring); i++) { kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; /* free every shadow's sg */ kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; } if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } return -ENOMEM; } /* * This is a clone of md_trim_bio, used to split a bio into smaller ones */ static void trim_bio(struct bio *bio, int offset, int size) { /* 'bio' is a cloned bio which we need to trim to match * the given offset and size. * This requires adjusting bi_sector, bi_size, and bi_io_vec */ int i; struct bio_vec *bvec; int sofar = 0; size <<= 9; if (offset == 0 && size == bio->bi_size) return; bio->bi_sector += offset; bio->bi_size = size; offset <<= 9; clear_bit(BIO_SEG_VALID, &bio->bi_flags); while (bio->bi_idx < bio->bi_vcnt && bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { /* remove this whole bio_vec */ offset -= bio->bi_io_vec[bio->bi_idx].bv_len; bio->bi_idx++; } if (bio->bi_idx < bio->bi_vcnt) { bio->bi_io_vec[bio->bi_idx].bv_offset += offset; bio->bi_io_vec[bio->bi_idx].bv_len -= offset; } /* avoid any complications with bi_idx being non-zero*/ if (bio->bi_idx) { memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); bio->bi_vcnt -= bio->bi_idx; bio->bi_idx = 0; } /* Make sure vcnt and last bv are not too big */ bio_for_each_segment(bvec, bio, i) { if (sofar + bvec->bv_len > size) bvec->bv_len = size - sofar; if (bvec->bv_len == 0) { bio->bi_vcnt = i; break; } sofar += bvec->bv_len; } } static void split_bio_end(struct bio *bio, int error) { struct split_bio *split_bio = bio->bi_private; if (error) split_bio->err = error; if (atomic_dec_and_test(&split_bio->pending)) { split_bio->bio->bi_phys_segments = 0; bio_endio(split_bio->bio,split_bio->bio->bi_size, split_bio->err); kfree(split_bio); } bio_put(bio); } static struct grant *get_grant(grant_ref_t *gref_head, unsigned long pfn, struct blkfront_info *info) { struct grant *gnt_list_entry; unsigned long buffer_mfn; BUG_ON(list_empty(&info->grants)); gnt_list_entry = list_entry((&info->grants)->next, struct grant, node); list_del(&gnt_list_entry->node); /* for persistent grant */ if (gnt_list_entry->gref != GRANT_INVALID_REF) { info->persistent_gnts_c--; return gnt_list_entry; } /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); if (!info->feature_persistent) { BUG_ON(!pfn); gnt_list_entry->pfn = pfn; } buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); gnttab_grant_foreign_access_ref(gnt_list_entry->gref, info->xbdev->otherend_id, buffer_mfn, 0); return gnt_list_entry; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; int err; enum xenbus_state backend_state; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); backend_state = xenbus_read_driver_state(dev->otherend); /* See respective comment in blkfront_probe(). */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_backend(dev, info); return err; } static void shadow_init(struct blk_shadow *shadow, unsigned int ring_size) { unsigned int i = 0; WARN_ON(!ring_size); while (++i < ring_size) shadow[i - 1].req.u.rw.id = i; shadow[i - 1].req.u.rw.id = 0x0fffffff; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { unsigned int ring_size, ring_order; const char *what = NULL; struct xenbus_transaction xbt; int err; if (dev->state >= XenbusStateInitialised) return 0; err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-pages", "%u", &ring_size); if (err != 1) ring_size = 0; else if (!ring_size) printk("blkfront: %s: zero max-ring-pages\n", dev->nodename); err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &ring_order); if (err != 1) ring_order = ring_size ? ffs(ring_size - 1) : 0; else if (!ring_size) /* nothing */; else if ((ring_size - 1) >> ring_order) printk("blkfront: %s: max-ring-pages (%#x) inconsistent with" " max-ring-page-order (%u)\n", dev->nodename, ring_size, ring_order); else ring_order = ffs(ring_size - 1); if (ring_order > BLK_MAX_RING_PAGE_ORDER) ring_order = BLK_MAX_RING_PAGE_ORDER; /* * While for larger rings not all pages are actually used, be on the * safe side and set up a full power of two to please as many backends * as possible. */ printk("talk_to_blkback ring_size %d, ring_order %d\n", 1U<ring_size = ring_size = 1U << ring_order; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } if (ring_size == 1) { what = "ring-ref"; err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[0]); if (err) { goto abort_transaction; } } else { unsigned int i; char buf[16]; what = "ring-page-order"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_order); if (err) goto abort_transaction; what = "num-ring-pages"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_size); if (err) goto abort_transaction; what = buf; for (i = 0; i < ring_size; i++) { snprintf(buf, sizeof(buf), "ring-ref%u", i); err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[i]); if (err) goto abort_transaction; } } what = "event-channel"; err = xenbus_printf(xbt, dev->nodename, what, "%u", irq_to_evtchn_port(info->irq)); if (err) { goto abort_transaction; } what = "protocol"; err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { goto abort_transaction; } /* for persistent grant */ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1); if (err) dev_warn(&dev->dev, "writing persistent grants feature to xenbus"); err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); ring_size = RING_SIZE(&info->ring); switch (info->connected) { case BLKIF_STATE_DISCONNECTED: shadow_init(info->shadow, ring_size); break; case BLKIF_STATE_SUSPENDED: err = blkif_recover(info); if (err) goto out; break; } pr_info("blkfront: %s: ring-pages=%u nr_ents=%u\n", dev->nodename, info->ring_size, ring_size); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (what) xenbus_dev_fatal(dev, err, "writing %s", what); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; unsigned int nr; int i; for (nr = 0; nr < info->ring_size; nr++) { info->ring_refs[nr] = GRANT_INVALID_REF; /*info->ring_pages[nr] = alloc_page(GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM);*/ info->ring_pages[nr] = alloc_page(GFP_NOIO | __GFP_HIGH); if (!info->ring_pages[nr]) break; } sring = nr == info->ring_size ? vmap(info->ring_pages, nr, VM_MAP, PAGE_KERNEL) : NULL; if (!sring) { while (nr--) __free_page(info->ring_pages[nr]); xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, (unsigned long)info->ring_size << PAGE_SHIFT); for (i=0; iring_size; i++) { err = xenbus_grant_ring(dev, pfn_to_mfn(page_to_pfn(info->ring_pages[i]))); if (err < 0) goto fail; info->ring_refs[i] = err; } err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev->dev.driver_data; struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (talk_to_backend(dev, info)) { dev_set_drvdata(&dev->dev, NULL); kfree(info); } break; case XenbusStateConnected: connect(info); break; case XenbusStateClosing: bd = bdget(info->dev); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); break; } } /* ** Connection ** */ /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err; /* for persistent grant */ int persistent; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (XENBUS_EXIST_ERR(err)) return; printk(KERN_INFO "Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); //revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: //blkif_recover(info); return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%lu", &info->feature_barrier, NULL); if (err) info->feature_barrier = 0; err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-persistent", "%u", &persistent, NULL); if (err) info->feature_persistent = 0; else info->feature_persistent = persistent; err = blkfront_setup_indirect(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); return; } err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&blkif_io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); add_disk(info->gd); info->is_ready = 1; if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; unsigned long flags; DPRINTK("blkfront_closing: %s removed\n", dev->nodename); if (info->rq == NULL) goto out; spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&blkif_io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); xlvbd_del(info); out: xenbus_frontend_closed(dev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); kfree(info); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= RING_SIZE(&info->ring)); info->shadow_free = info->shadow[free].req.u.rw.id; info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ return free; } static inline void ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { info->shadow[id].req.u.rw.id = info->shadow_free; info->shadow[id].request = NULL; info->shadow_free = id; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } int blkif_open(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users++; return 0; } int blkif_release(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users--; if (info->users == 0) { /* Check whether we have been instructed to close. We will have ignored this request initially, as the device was still mounted. */ struct xenbus_device * dev = info->xbdev; enum xenbus_state state = xenbus_read_driver_state(dev->otherend); if (state == XenbusStateClosing && info->is_ready) blkfront_closing(dev); } return 0; } int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct block_device *bd = inode->i_bdev; struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; struct gendisk *gd = info->gd; if (gd->flags & GENHD_FL_CD) return 0; return -EINVAL; } default: /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", command);*/ return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * blkif_queue_request * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; struct blkif_request_segment_aligned *segments = NULL; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref, n; /* * Used to store if we are able to queue the request by just using * existing persistent grants, or if we have to get new grants, * as there are not sufficiently many free. */ int new_persistent_gnts; grant_ref_t gref_head; struct scatterlist *sg; struct grant *gnt_list_entry = NULL; int nseg, max_grefs; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; max_grefs = info->max_indirect_segments ? info->max_indirect_segments + INDIRECT_GREFS(info->max_indirect_segments) : BLKIF_MAX_SEGMENTS_PER_REQUEST; /* Check if we have enought grants to allocate a requests */ if (info->persistent_gnts_c < max_grefs) { new_persistent_gnts = 1; if (gnttab_alloc_grant_references( max_grefs - info->persistent_gnts_c, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, max_grefs); return 1; } } else new_persistent_gnts = 0; /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = req; BUG_ON(info->max_indirect_segments == 0 && req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); BUG_ON(info->max_indirect_segments && req->nr_phys_segments > info->max_indirect_segments); nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); ring_req->u.rw.id = id; if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { /* * The indirect operation can only be a BLKIF_OP_READ or * BLKIF_OP_WRITE */ BUG_ON(req->flags & (REQ_FUA)); ring_req->operation = BLKIF_OP_INDIRECT; ring_req->u.indirect.indirect_op = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; ring_req->u.indirect.sector_number = (blkif_sector_t)req->sector; ring_req->u.indirect.handle = info->handle; ring_req->u.indirect.nr_segments = nseg; } else { ring_req->u.rw.sector_number = (blkif_sector_t)req->sector; ring_req->u.rw.handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (blk_barrier_rq(req)) ring_req->operation = BLKIF_OP_WRITE_BARRIER; ring_req->u.rw.nr_segments = nseg; } for (i = 0; i < nseg; ++i) { sg = info->shadow[id].sg + i; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; if ((ring_req->operation == BLKIF_OP_INDIRECT) && (i % SEGS_PER_INDIRECT_FRAME == 0)) { unsigned long pfn = 0UL; if (segments) kunmap_atomic(segments, KM_USER0); n = i / SEGS_PER_INDIRECT_FRAME; if (!info->feature_persistent) { struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ BUG_ON(list_empty(&info->indirect_pages)); indirect_page = list_entry((&info->indirect_pages)->next, struct page, lru); list_del(&indirect_page->lru); pfn = page_to_pfn(indirect_page); } gnt_list_entry = get_grant(&gref_head, pfn, info); info->shadow[id].indirect_grants[n] = gnt_list_entry; segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_USER0); ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; } gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg->page), info); ref = gnt_list_entry->gref; info->shadow[id].grants_used[i] = gnt_list_entry; /* If use persistent grant, it will have a memcpy, * just copy the data from sg page to grant page. */ if (rq_data_dir(req) && info->feature_persistent) { char *bvec_data; void *shared_data; BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_USER0); if (in_interrupt()) { if (in_irq()) bvec_data = kmap_atomic(sg->page, KM_IRQ0); else if (in_softirq()) bvec_data = kmap_atomic(sg->page, KM_SOFTIRQ0); else printk(KERN_WARNING "[kmap] in interrupt, but not irq!\n"); } else bvec_data = kmap_atomic(sg->page, KM_USER0); /* * this does not wipe data stored outside the * range sg->offset..sg->offset+sg->length. * Therefore, blkback *could* see data from * previous requests. This is OK as long as * persistent grants are shared with just one * domain. It may need refactoring if this * changes */ memcpy(shared_data + sg->offset, bvec_data + sg->offset, sg->length); if (in_interrupt()) { if (in_irq()) kunmap_atomic(bvec_data, KM_IRQ0); else if (in_softirq()) kunmap_atomic(bvec_data, KM_SOFTIRQ0); else printk(KERN_WARNING "[kunmap] in interrupt, but not irq!\n"); } else kunmap_atomic(bvec_data, KM_USER0); kunmap_atomic(shared_data, KM_USER0); } if (ring_req->operation != BLKIF_OP_INDIRECT) { ring_req->u.rw.seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } else { n = i % SEGS_PER_INDIRECT_FRAME; segments[n] = (struct blkif_request_segment_aligned) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } } if (segments) kunmap_atomic(segments, KM_USER0); info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; /* for persistent grant */ if (new_persistent_gnts) gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(request_queue_t *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = elv_next_request(rq)) != NULL) { info = req->rq_disk->private_data; if (!blk_fs_request(req)) { end_request(req, 0); continue; } if (RING_FULL(&info->ring)) goto wait; DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%li) buffer:%p [%s]\n", req, req->cmd, (long long)req->sector, req->current_nr_sectors, req->nr_sectors, req->buffer, rq_data_dir(req) ? "write" : "read"); blkdev_dequeue_request(req); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; int uptodate; spin_lock_irqsave(&blkif_io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = info->shadow[id].request; blkif_completion(&info->shadow[id], info, bret); ADD_ID_TO_FREELIST(info, id); uptodate = (bret->status == BLKIF_RSP_OKAY); switch (bret->operation) { case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk("blkfront: %s: write barrier op failed\n", info->gd->disk_name); uptodate = -EOPNOTSUPP; info->feature_barrier = 0; xlvbd_barrier(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); ret = end_that_request_first(req, uptodate, req->hard_nr_sectors); BUG_ON(ret); end_that_request_last(req, uptodate); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { struct grant *persistent_gnt; struct grant *n; int i, j, segs; /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* Remove all persistent grants */ if (!list_empty(&info->grants)) { list_for_each_entry_safe(persistent_gnt, n, &info->grants, node) { list_del(&persistent_gnt->node); if (persistent_gnt->gref != GRANT_INVALID_REF) { gnttab_end_foreign_access(persistent_gnt->gref, 0UL); info->persistent_gnts_c--; } if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } } BUG_ON(info->persistent_gnts_c != 0); /* * Remove indirect pages, this only happens when using indirect * descriptors but not persistent grants */ if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; BUG_ON(info->feature_persistent); list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { /* * Clear persistent grants present in requests already * on the shared ring */ if (!info->shadow[i].request) goto free_shadow; segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? info->shadow[i].req.u.indirect.nr_segments : info->shadow[i].req.u.rw.nr_segments; for (j = 0; j < segs; j++) { persistent_gnt = info->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT) /* * If this is not an indirect operation don't try to * free indirect segments */ goto free_shadow; for (j = 0; j < INDIRECT_GREFS(segs); j++) { persistent_gnt = info->shadow[i].indirect_grants[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } free_shadow: kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; } //kfree(info->sg); //info->sg = NULL; /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&blkif_io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); /* Free resources associated with old device channel. */ vunmap(info->ring.sring); info->ring.sring = NULL; for (i = 0; i < info->ring_size; i++) { if (info->ring_refs[i] != GRANT_INVALID_REF) { /*gnttab_end_foreign_access(info->ring_refs[i], (unsigned long)mfn_to_virt(pfn_to_mfn(page_to_pfn(info->ring_pages[i]))));*/ gnttab_end_foreign_access(info->ring_refs[i],0UL); if(info->ring_pages[i]) { __free_page(info->ring_pages[i]); info->ring_pages[i] = NULL; } info->ring_refs[i] = GRANT_INVALID_REF; } } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, struct blkif_response *bret) { int i; int nseg; /* for persistent grant */ struct scatterlist *sg; char *bvec_data; void *shared_data; nseg = s->req.operation == BLKIF_OP_INDIRECT ? s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { /* * Copy the data received from the backend into the bvec. * Since bv_offset can be different than 0, and bv_len different * than PAGE_SIZE, we have to keep track of the current offset, * to be sure we are copying the data from the right shared page. */ //for_each_sg(s->sg, sg, nseg, i) { for (i = 0; i < nseg; ++i) { sg = s->sg + i; BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic( pfn_to_page(s->grants_used[i]->pfn), KM_USER0); if (in_interrupt()) { if (in_irq()) bvec_data = kmap_atomic(sg->page, KM_IRQ0); else if (in_softirq()) bvec_data = kmap_atomic(sg->page, KM_SOFTIRQ0); else printk(KERN_WARNING "[kmap] in interrupt, but not irq!\n"); } else bvec_data = kmap_atomic(sg->page, KM_USER0); memcpy(bvec_data + sg->offset, shared_data + sg->offset, sg->length); if (in_interrupt()) { if (in_irq()) kunmap_atomic(bvec_data, KM_IRQ0); else if (in_softirq()) kunmap_atomic(bvec_data, KM_SOFTIRQ0); else printk(KERN_WARNING "[kunmap] in interrupt, but not irq!\n"); } else kunmap_atomic(bvec_data, KM_USER0); kunmap_atomic(shared_data, KM_USER0); } } /* Add the persistent grant into the list of free grants */ for (i = 0; i < nseg; i++) { if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->grants_used[i]->gref); list_add(&s->grants_used[i]->node, &info->grants); info->persistent_gnts_c++; } else { /* * If the grant is not mapped by the backend we end the * foreign access and add it to the tail of the list, * so it will not be picked again unless we run out of * persistent grants. */ gnttab_end_foreign_access(s->grants_used[i]->gref, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &info->grants); } } if (s->req.operation == BLKIF_OP_INDIRECT) { for (i = 0; i < INDIRECT_GREFS(nseg); i++) { if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->indirect_grants[i]->gref); list_add(&s->indirect_grants[i]->node, &info->grants); info->persistent_gnts_c++; } else { struct page *indirect_page; gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. */ if (!info->feature_persistent) { indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); list_add(&indirect_page->lru, &info->indirect_pages); } s->indirect_grants[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->indirect_grants[i]->node, &info->grants); } } } } static int blkif_recover(struct blkfront_info *info) { int i; // struct blkif_request *req; struct request *req, *n; struct blk_shadow *copy; int rc; struct bio *bio, *cloned_bio; struct bio_list bio_list, merge_bio; unsigned int segs, offset; int pending, size; struct split_bio *split_bio; struct list_head requests; //printk("[INTO]----->blkif_recover\n"); /* Stage 1: Make a safe copy of the shadow state. */ copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); if (!copy) return -ENOMEM; memcpy(copy, info->shadow, sizeof(info->shadow)); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); shadow_init(info->shadow, RING_SIZE(&info->ring)); info->shadow_free = info->ring.req_prod_pvt; rc = blkfront_setup_indirect(info); if (rc) { kfree(copy); return rc; } segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; blk_queue_max_phys_segments(info->rq, segs); blk_queue_max_hw_segments(info->rq, segs); bio_list_init(&bio_list); INIT_LIST_HEAD(&requests); /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < RING_SIZE(&info->ring); i++) { /* Not in use? */ if (!copy[i].request) continue; merge_bio.head = copy[i].request->bio; merge_bio.tail = copy[i].request->biotail; bio_list_merge(&bio_list, &merge_bio); copy[i].request->bio = NULL; blk_put_request(copy[i].request); } kfree(copy); /* * Empty the queue, this is important because we might have * requests in the queue with more segments than what we * can handle now. */ spin_lock_irq(&blkif_io_lock); //TODO while ((req = blk_fetch_request(info->rq)) != NULL) { while ((req = elv_next_request(info->rq)) != NULL) { //printk("[blkif_recover] while req:%p\n", req); blkdev_dequeue_request(req); merge_bio.head = req->bio; merge_bio.tail = req->biotail; bio_list_merge(&bio_list, &merge_bio); req->bio = NULL; if (req->flags & (REQ_FUA)) printk("diskcache flush request found!\n"); __blk_put_request(info->rq, req); } spin_unlock_irq(&blkif_io_lock); xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&blkif_io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Send off requeued requests */ //flush_requests(info); /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); list_for_each_entry_safe(req, n, &requests, queuelist) { /* Requeue pending requests (flush or discard) */ list_del_init(&req->queuelist); BUG_ON(req->nr_phys_segments > segs); blk_requeue_request(info->rq, req); } spin_unlock_irq(&blkif_io_lock); while ((bio = bio_list_pop(&bio_list)) != NULL) { /* Traverse the list of pending bios and re-queue them */ if (bio_segments(bio) > segs) { /* * This bio has more segments than what we can * handle, we have to split it. */ pending = (bio_segments(bio) + segs - 1) / segs; split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO); BUG_ON(split_bio == NULL); atomic_set(&split_bio->pending, pending); split_bio->bio = bio; for (i = 0; i < pending; i++) { offset = (i * segs * PAGE_SIZE) >> 9; size = min((unsigned int)(segs * PAGE_SIZE) >> 9, (unsigned int)(bio->bi_size >> 9) - offset); cloned_bio = bio_clone(bio, GFP_NOIO); BUG_ON(cloned_bio == NULL); trim_bio(cloned_bio, offset, size); cloned_bio->bi_private = split_bio; cloned_bio->bi_end_io = split_bio_end; submit_bio(cloned_bio->bi_rw, cloned_bio); } /* * Now we have to wait for all those smaller bios to * end, so we can also end the "parent" bio. */ continue; } /* We don't need to split this bio */ submit_bio(bio->bi_rw, bio); } if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } return 0; } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; return info->is_ready; } /* ** Driver Registration ** */ static struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static struct xenbus_driver blkfront = { .name = "vbd", .owner = THIS_MODULE, .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, }; static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront); } module_init(xlblk_init); static void __exit xlblk_exit(void) { return xenbus_unregister_driver(&blkfront); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vbd/block.h000066400000000000000000000134621314037446600226020ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #if 0 #define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a) #else #define DPRINTK_IOCTL(_f, _a...) ((void)0) #endif struct xlbd_type_info { int partn_shift; int disks_per_major; char *devname; char *diskname; }; struct xlbd_major_info { int major; int index; int usage; struct xlbd_type_info *type; }; struct grant { grant_ref_t gref; unsigned long pfn; struct list_head node; }; struct blk_shadow { blkif_request_t req; struct request *request; struct grant **grants_used; struct grant **indirect_grants; /* If use persistent grant, it will copy response * from grant page to sg when read io completion. */ struct scatterlist *sg; }; struct split_bio { struct bio *bio; atomic_t pending; int err; }; /* * Maximum number of segments in indirect requests, the actual value used by * the frontend driver is the minimum of this value and the value provided * by the backend driver. */ static unsigned int xen_blkif_max_segments = 256; //module_param_named(max, xen_blkif_max_segments, int, S_IRUGO); //MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 256 (1M))"); //#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) #define BLK_MAX_RING_PAGE_ORDER 4U #define BLK_MAX_RING_PAGES (1U << BLK_MAX_RING_PAGE_ORDER) #define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, \ BLK_MAX_RING_PAGES * PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; dev_t dev; struct gendisk *gd; int vdevice; blkif_vdev_t handle; int connected; unsigned int ring_size; blkif_front_ring_t ring; unsigned int irq; struct xlbd_major_info *mi; request_queue_t *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_MAX_RING_SIZE]; grant_ref_t ring_refs[BLK_MAX_RING_PAGES]; struct page *ring_pages[BLK_MAX_RING_PAGES]; struct list_head grants; struct list_head indirect_pages; unsigned int persistent_gnts_c; unsigned int feature_persistent:1; unsigned long shadow_free; int feature_barrier; unsigned int max_indirect_segments; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; extern spinlock_t blkif_io_lock; extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (request_queue_t *rq); #define SEGS_PER_INDIRECT_FRAME \ (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) #define INDIRECT_GREFS(_segs) \ ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); int xlvbd_barrier(struct blkfront_info *info); #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vbd/dm-bio-list.h000066400000000000000000000020461314037446600236240ustar00rootroot00000000000000/* * Copyright (C) 2004 Red Hat UK Ltd. * * This file is released under the GPL. */ #ifndef DM_BIO_LIST_H #define DM_BIO_LIST_H #include struct bio_list { struct bio *head; struct bio *tail; }; static inline void bio_list_init(struct bio_list *bl) { bl->head = bl->tail = NULL; } static inline void bio_list_add(struct bio_list *bl, struct bio *bio) { bio->bi_next = NULL; if (bl->tail) bl->tail->bi_next = bio; else bl->head = bio; bl->tail = bio; } static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) { if (!bl2->head) return; if (bl->tail) bl->tail->bi_next = bl2->head; else bl->head = bl2->head; bl->tail = bl2->tail; } static inline struct bio *bio_list_pop(struct bio_list *bl) { struct bio *bio = bl->head; if (bio) { bl->head = bl->head->bi_next; if (!bl->head) bl->tail = NULL; bio->bi_next = NULL; } return bio; } static inline struct bio *bio_list_get(struct bio_list *bl) { struct bio *bio = bl->head; bl->head = bl->tail = NULL; return bio; } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vbd/md.h000066400000000000000000000066451314037446600221150ustar00rootroot00000000000000/* md.h : Multiple Devices driver for Linux Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman Copyright (C) 1994-96 Marc ZYNGIER or This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. You should have received a copy of the GNU General Public License (for example /usr/src/linux/COPYING); if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _MD_H #define _MD_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * 'md_p.h' holds the 'physical' layout of RAID devices * 'md_u.h' holds the user <=> kernel API * * 'md_k.h' holds kernel internal definitions */ #include #include #include "md_k.h" #ifdef CONFIG_MD /* * Different major versions are not compatible. * Different minor versions are only downward compatible. * Different patchlevel versions are downward and upward compatible. */ #define MD_MAJOR_VERSION 0 #define MD_MINOR_VERSION 90 /* * MD_PATCHLEVEL_VERSION indicates kernel functionality. * >=1 means different superblock formats are selectable using SET_ARRAY_INFO * and major_version/minor_version accordingly * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT * in the super status byte * >=3 means that bitmap superblock version 4 is supported, which uses * little-ending representation rather than host-endian */ #define MD_PATCHLEVEL_VERSION 3 extern int mdp_major; extern int register_md_personality (struct mdk_personality *p); extern int unregister_md_personality (struct mdk_personality *p); extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev), mddev_t *mddev, const char *name); extern void md_unregister_thread (mdk_thread_t *thread); extern void md_wakeup_thread(mdk_thread_t *thread); extern void md_check_recovery(mddev_t *mddev); extern void md_write_start(mddev_t *mddev, struct bio *bi); extern void md_write_end(mddev_t *mddev); extern void md_handle_safemode(mddev_t *mddev); extern void md_done_sync(mddev_t *mddev, int blocks, int ok); extern void md_error (mddev_t *mddev, mdk_rdev_t *rdev); extern void md_unplug_mddev(mddev_t *mddev); extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, sector_t sector, int size, struct page *page); extern void md_super_wait(mddev_t *mddev); extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, struct page *page, int rw); extern void md_do_sync(mddev_t *mddev); extern void md_new_event(mddev_t *mddev); extern void md_allow_write(mddev_t *mddev); extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); #endif /* CONFIG_MD */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vbd/md_k.h000066400000000000000000000261671314037446600224300ustar00rootroot00000000000000/* md_k.h : kernel internal structure of the Linux MD driver Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. You should have received a copy of the GNU General Public License (for example /usr/src/linux/COPYING); if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _MD_K_H #define _MD_K_H /* and dm-bio-list.h is not under include/linux because.... ??? */ #include "dm-bio-list.h" #ifdef CONFIG_BLOCK #define LEVEL_MULTIPATH (-4) #define LEVEL_LINEAR (-1) #define LEVEL_FAULTY (-5) /* we need a value for 'no level specified' and 0 * means 'raid0', so we need something else. This is * for internal use only */ #define LEVEL_NONE (-1000000) #define MaxSector (~(sector_t)0) typedef struct mddev_s mddev_t; typedef struct mdk_rdev_s mdk_rdev_t; /* * options passed in raidrun: */ /* Currently this must fit in an 'int' */ #define MAX_CHUNK_SIZE (1<<30) /* * MD's 'extended' device */ struct mdk_rdev_s { struct list_head same_set; /* RAID devices within the same set */ sector_t size; /* Device size (in blocks) */ mddev_t *mddev; /* RAID array if running */ long last_events; /* IO event timestamp */ struct block_device *bdev; /* block device handle */ struct page *sb_page; int sb_loaded; __u64 sb_events; sector_t data_offset; /* start of data in array */ sector_t sb_offset; int sb_size; /* bytes in the superblock */ int preferred_minor; /* autorun support */ struct kobject kobj; /* A device can be in one of three states based on two flags: * Not working: faulty==1 in_sync==0 * Fully working: faulty==0 in_sync==1 * Working, but not * in sync with array * faulty==0 in_sync==0 * * It can never have faulty==1, in_sync==1 * This reduces the burden of testing multiple flags in many cases */ unsigned long flags; #define Faulty 1 /* device is known to have a fault */ #define In_sync 2 /* device is in_sync with rest of array */ #define WriteMostly 4 /* Avoid reading if at all possible */ #define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ #define AllReserved 6 /* If whole device is reserved for * one array */ #define AutoDetected 7 /* added by auto-detect */ #define Blocked 8 /* An error occured on an externally * managed array, don't allow writes * until it is cleared */ wait_queue_head_t blocked_wait; int desc_nr; /* descriptor index in the superblock */ int raid_disk; /* role of device in array */ int saved_raid_disk; /* role that device used to have in the * array and could again if we did a partial * resync from the bitmap */ sector_t recovery_offset;/* If this device has been partially * recovered, this is where we were * up to. */ atomic_t nr_pending; /* number of pending requests. * only maintained for arrays that * support hot removal */ atomic_t read_errors; /* number of consecutive read errors that * we have tried to ignore. */ atomic_t corrected_errors; /* number of corrected read errors, * for reporting to userspace and storing * in superblock. */ struct work_struct del_work; /* used for delayed sysfs removal */ }; struct mddev_s { void *private; struct mdk_personality *pers; dev_t unit; int md_minor; struct list_head disks; unsigned long flags; #define MD_CHANGE_DEVS 0 /* Some device status has changed */ #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ #define MD_CHANGE_PENDING 2 /* superblock update in progress */ int ro; struct gendisk *gendisk; struct kobject kobj; /* Superblock information */ int major_version, minor_version, patch_version; int persistent; int external; /* metadata is * managed externally */ char metadata_type[17]; /* externally set*/ int chunk_size; time_t ctime, utime; int level, layout; char clevel[16]; int raid_disks; int max_disks; sector_t size; /* used size of component devices */ sector_t array_size; /* exported array size */ __u64 events; char uuid[16]; /* If the array is being reshaped, we need to record the * new shape and an indication of where we are up to. * This is written to the superblock. * If reshape_position is MaxSector, then no reshape is happening (yet). */ sector_t reshape_position; int delta_disks, new_level, new_layout, new_chunk; struct mdk_thread_s *thread; /* management thread */ struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ sector_t curr_resync; /* last block scheduled */ unsigned long resync_mark; /* a recent timestamp */ sector_t resync_mark_cnt;/* blocks written at resync_mark */ sector_t curr_mark_cnt; /* blocks scheduled now */ sector_t resync_max_sectors; /* may be set by personality */ sector_t resync_mismatches; /* count of sectors where * parity/replica mismatch found */ /* allow user-space to request suspension of IO to regions of the array */ sector_t suspend_lo; sector_t suspend_hi; /* if zero, use the system-wide default */ int sync_speed_min; int sync_speed_max; /* resync even though the same disks are shared among md-devices */ int parallel_resync; int ok_start_degraded; /* recovery/resync flags * NEEDED: we might need to start a resync/recover * RUNNING: a thread is running, or about to be started * SYNC: actually doing a resync, not a recovery * INTR: resync needs to be aborted for some reason * DONE: thread is done and is waiting to be reaped * REQUEST: user-space has requested a sync (used with SYNC) * CHECK: user-space request for for check-only, no repair * RESHAPE: A reshape is happening * * If neither SYNC or RESHAPE are set, then it is a recovery. */ #define MD_RECOVERY_RUNNING 0 #define MD_RECOVERY_SYNC 1 #define MD_RECOVERY_INTR 3 #define MD_RECOVERY_DONE 4 #define MD_RECOVERY_NEEDED 5 #define MD_RECOVERY_REQUESTED 6 #define MD_RECOVERY_CHECK 7 #define MD_RECOVERY_RESHAPE 8 #define MD_RECOVERY_FROZEN 9 unsigned long recovery; int in_sync; /* know to not need resync */ struct mutex reconfig_mutex; atomic_t active; int changed; /* true if we might need to reread partition info */ int degraded; /* whether md should consider * adding a spare */ int barriers_work; /* initialised to true, cleared as soon * as a barrier request to slave * fails. Only supported */ struct bio *biolist; /* bios that need to be retried * because BIO_RW_BARRIER is not supported */ atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; sector_t recovery_cp; sector_t resync_max; /* resync should pause * when it gets here */ spinlock_t write_lock; wait_queue_head_t sb_wait; /* for waiting on superblock updates */ atomic_t pending_writes; /* number of active superblock writes */ unsigned int safemode; /* if set, update "clean" superblock * when no writes pending. */ unsigned int safemode_delay; struct timer_list safemode_timer; atomic_t writes_pending; struct request_queue *queue; /* for plugging ... */ atomic_t write_behind; /* outstanding async IO */ unsigned int max_write_behind; /* 0 = sync */ struct bitmap *bitmap; /* the bitmap for the device */ struct file *bitmap_file; /* the bitmap file */ long bitmap_offset; /* offset from superblock of * start of bitmap. May be * negative, but not '0' */ long default_bitmap_offset; /* this is the offset to use when * hot-adding a bitmap. It should * eventually be settable by sysfs. */ struct list_head all_mddevs; }; static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) { int faulty = test_bit(Faulty, &rdev->flags); if (atomic_dec_and_test(&rdev->nr_pending) && faulty) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) { atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); } struct mdk_personality { char *name; int level; struct list_head list; struct module *owner; int (*make_request)(struct request_queue *q, struct bio *bio); int (*run)(mddev_t *mddev); int (*stop)(mddev_t *mddev); void (*status)(struct seq_file *seq, mddev_t *mddev); /* error_handler must set ->faulty and clear ->in_sync * if appropriate, and should abort recovery if needed */ void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev); int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev); int (*hot_remove_disk) (mddev_t *mddev, int number); int (*spare_active) (mddev_t *mddev); sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); int (*resize) (mddev_t *mddev, sector_t sectors); int (*check_reshape) (mddev_t *mddev); int (*start_reshape) (mddev_t *mddev); int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); /* quiesce moves between quiescence states * 0 - fully active * 1 - no new requests allowed * others - reserved */ void (*quiesce) (mddev_t *mddev, int state); }; struct md_sysfs_entry { struct attribute attr; ssize_t (*show)(mddev_t *, char *); ssize_t (*store)(mddev_t *, const char *, size_t); }; static inline char * mdname (mddev_t * mddev) { return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; } /* * iterates through some rdev ringlist. It's safe to remove the * current 'rdev'. Dont touch 'tmp' though. */ #define rdev_for_each_list(rdev, tmp, list) \ \ for ((tmp) = (list).next; \ (rdev) = (list_entry((tmp), mdk_rdev_t, same_set)), \ (tmp) = (tmp)->next, (tmp)->prev != &(list) \ ; ) /* * iterates through the 'same array disks' ringlist */ #define rdev_for_each(rdev, tmp, mddev) \ rdev_for_each_list(rdev, tmp, (mddev)->disks) typedef struct mdk_thread_s { void (*run) (mddev_t *mddev); mddev_t *mddev; wait_queue_head_t wqueue; unsigned long flags; struct task_struct *tsk; unsigned long timeout; } mdk_thread_t; #define THREAD_WAKEUP 0 #define __wait_event_lock_irq(wq, condition, lock, cmd) \ do { \ wait_queue_t __wait; \ init_waitqueue_entry(&__wait, current); \ \ add_wait_queue(&wq, &__wait); \ for (;;) { \ set_current_state(TASK_UNINTERRUPTIBLE); \ if (condition) \ break; \ spin_unlock_irq(&lock); \ cmd; \ schedule(); \ spin_lock_irq(&lock); \ } \ current->state = TASK_RUNNING; \ remove_wait_queue(&wq, &__wait); \ } while (0) #define wait_event_lock_irq(wq, condition, lock, cmd) \ do { \ if (condition) \ break; \ __wait_event_lock_irq(wq, condition, lock, cmd); \ } while (0) static inline void safe_put_page(struct page *p) { if (p) put_page(p); } #endif /* CONFIG_BLOCK */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vbd/vbd.c000066400000000000000000000245431314037446600222600ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) /*llj: the major of vitual_device_ext */ #define EXT_MAJOR 202 #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; DEFINE_SPINLOCK(blkif_io_lock); static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; int do_register, i; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SCSI_RANGE: ptr->type = &xlbd_scsi_type; ptr->index = index - XLBD_MAJOR_SCSI_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major EXT_MAJOR, * don't try to register it again */ for (i = XLBD_MAJOR_VBD_START; i < XLBD_MAJOR_VBD_START+ NUM_VBD_MAJORS; i++ ) { if (major_info[i] != NULL && major_info[i]->major == ptr->major) { do_register = 0; break; } } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(ptr); return NULL; } printk("xen-vbd: registered block device major %i\n", ptr->major); } major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = 10; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = 11 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = 18 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = 26; break; default: if (!VDEV_IS_EXTENDED(vdevice)) index = 27; else index = 28; break; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, unsigned int segments) { request_queue_t *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) elevator_init(rq, "noop"); #else elevator_init(rq, &elevator_noop); #endif /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_hardsect_size(rq, sector_size); blk_queue_max_sectors(rq, segments*8); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_phys_segments(rq, segments); blk_queue_max_hw_segments(rq, segments); printk("init queue set max_hw_sectors %u max_segments %u\n", segments*8, segments); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; return 0; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { int major, minor; struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; unsigned int offset; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = EXT_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); } BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if (!(vdisk_info & VDISK_CDROM) && (minor & ((1 << mi->type->partn_shift) - 1)) == 0) nr_minors = 1 << mi->type->partn_shift; gd = alloc_disk(nr_minors); if (gd == NULL) goto out; offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (nr_minors > 1 || (vdisk_info & VDISK_CDROM)) { if (offset < 26) { sprintf(gd->disk_name, "%s%c", mi->type->diskname, 'a' + offset ); } else { sprintf(gd->disk_name, "%s%c%c", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26) ); } } else { if (offset < 26) { sprintf(gd->disk_name, "%s%c%d", mi->type->diskname, 'a' + offset, minor & ((1 << mi->type->partn_shift) - 1)); } else { sprintf(gd->disk_name, "%s%c%c%d", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26), minor & ((1 << mi->type->partn_shift) - 1)); } } gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); printk("Set blk queue %d\n", info->max_indirect_segments ? :BLKIF_MAX_SEGMENTS_PER_REQUEST); if (xlvbd_init_blk_queue(gd, sector_size, info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST)) { del_gendisk(gd); goto out; } info->rq = gd->queue; info->gd = gd; if (info->feature_barrier) xlvbd_barrier(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } void xlvbd_del(struct blkfront_info *info) { if (info->mi == NULL) return; BUG_ON(info->gd == NULL); del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); blk_cleanup_queue(info->rq); info->rq = NULL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int xlvbd_barrier(struct blkfront_info *info) { int err; err = blk_queue_ordered(info->rq, info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL); if (err) return err; printk(KERN_INFO "blkfront: %s: barriers %s\n", info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled"); return 0; } #else int xlvbd_barrier(struct blkfront_info *info) { printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name); return -ENOSYS; } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vnif/000077500000000000000000000000001314037446600215205ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vnif/Kbuild000066400000000000000000000002161314037446600226540ustar00rootroot00000000000000include $(M)/config.mk CFLAGS_accel.o := -include asm/hypervisor.h obj-m = xen-vnif.o xen-vnif-objs := netfront.o xen-vnif-objs += accel.o UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vnif/Makefile000066400000000000000000000000661314037446600231620ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vnif/accel.c000066400000000000000000000530621314037446600227410ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront/accel: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront/accel: " fmt, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Lock to protect access to accelerators_list */ static spinlock_t accelerators_lock; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); spin_lock_init(&accelerators_lock); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; unsigned long flags; spin_lock_irqsave(&accelerators_lock, flags); list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } spin_unlock_irqrestore(&accelerators_lock, flags); } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); schedule_work(&vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* Check we're not trying to overwrite an existing watch */ BUG_ON(np->accel_vif_state.accel_watch.node != NULL); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; flush_scheduled_work(); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { unsigned long flags; /* Need lock to write list */ spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } /* * Initialise the state to track an accelerator plugin module. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); unsigned long flags; int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; spin_lock_irqsave(&accelerators_lock, flags); list_add(&accelerator->link, &accelerators_list); spin_unlock_irqrestore(&accelerators_lock, flags); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { /* This function must be called with the vif_states_lock held */ DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ netif_poll_disable(vif_state->np->netdev); netif_tx_lock_bh(vif_state->np->netdev); vif_state->hooks = vif_state->np->accelerator->hooks; netif_tx_unlock_bh(vif_state->np->netdev); netif_poll_enable(vif_state->np->netdev); } static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; unsigned long flags; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks) { if (hooks->new_device(np->netdev, dev) == 0) { spin_lock_irqsave (&accelerator->vif_states_lock, flags); accelerator_set_vif_state_hooks(&np->accel_vif_state); spin_unlock_irqrestore (&accelerator->vif_states_lock, flags); } } return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. Must be called with accelerator_mutex held */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* * currently hold accelerator_mutex, so don't need * vif_states_lock to read the list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if (hooks->new_device(np->netdev, vif_state->dev) == 0) { spin_lock_irqsave (&accelerator->vif_states_lock, flags); accelerator_set_vif_state_hooks(vif_state); spin_unlock_irqrestore (&accelerator->vif_states_lock, flags); } } } /* * Called by the netfront accelerator plugin module when it has loaded */ int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded); /* * Remove the hooks from a single vif state. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { /* Make sure there are no data path operations going on */ netif_poll_disable(vif_state->np->netdev); netif_tx_lock_bh(vif_state->np->netdev); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; netif_tx_unlock_bh(vif_state->np->netdev); netif_poll_enable(vif_state->np->netdev); } /* * Safely remove the accelerator function hooks from a netfront state. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held so don't need vif_states_lock to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if(vif_state->hooks) { hooks = vif_state->hooks; /* Last chance to get statistics from the accelerator */ hooks->get_stats(vif_state->np->netdev, &vif_state->np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } else { spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. */ void netfront_accelerator_stop(const char *frontend) { struct netfront_accelerator *accelerator; unsigned long flags; mutex_lock(&accelerator_mutex); spin_lock_irqsave(&accelerators_lock, flags); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { spin_unlock_irqrestore(&accelerators_lock, flags); accelerator_remove_hooks(accelerator); goto out; } } spin_unlock_irqrestore(&accelerators_lock, flags); out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop); /* Helper for call_remove and do_suspend */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev, unsigned long *lock_flags) { struct netfront_accelerator *accelerator = np->accelerator; struct netfront_accel_hooks *hooks; int rc = 0; if (np->accel_vif_state.hooks) { hooks = np->accel_vif_state.hooks; /* Last chance to get statistics from the accelerator */ hooks->get_stats(np->netdev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, *lock_flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); spin_lock_irqsave(&accelerator->vif_states_lock, *lock_flags); } return rc; } static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; unsigned long flags; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev, &flags); np->accelerator = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); return rc; } int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { unsigned long flags; int rc = 0; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ spin_lock_irqsave(&np->accelerator->vif_states_lock, flags); rc = do_remove(np, dev, &flags); spin_unlock_irqrestore(&np->accelerator->vif_states_lock, flags); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { /* * Setting the watch will cause it to fire and probe the * accelerator, so no need to call accelerator_probe_new_vif() * directly here */ netfront_accelerator_add_watch(np); return 0; } void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; spinlock_t *vif_states_lock; unsigned long flags; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); vif_states_lock = &np->accelerator->vif_states_lock; spin_lock_irqsave(vif_states_lock, flags); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; spin_unlock_irqrestore(vif_states_lock, flags); break; } } out: mutex_unlock(&accelerator_mutex); return; } int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; struct netfront_accel_hooks *hooks; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); hooks = np->accel_vif_state.hooks; if (hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_hooks *hooks; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); hooks = np->accel_vif_state.hooks; if (hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_hooks *hooks; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); hooks = np->accel_vif_state.hooks; if (hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vnif/netfront.c000066400000000000000000001623301314037446600235300ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif #define RX_COPY_THRESHOLD 256 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define HAVE_CSUM_OFFLOAD 1 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define HAVE_CSUM_OFFLOAD 0 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || unlikely(skb->ip_summed != CHECKSUM_HW)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define HAVE_CSUM_OFFLOAD 0 #define netif_needs_gso(dev, skb) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif #define GRANT_INVALID_REF 0 struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront: " fmt, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static void send_fake_arp(struct net_device *, int arpType); static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of nic8139 */ #include #include static void remove_nic8139() { struct pci_dev *pci_dev_nic8139 = NULL; //do { pci_dev_nic8139 = pci_get_device(0x10ec, 0x8139, pci_dev_nic8139); if (pci_dev_nic8139) { printk(KERN_INFO "remove_nic8139 : 8139 NIC of subsystem(0x%2x,0x%2x) removed.\n", pci_dev_nic8139->subsystem_vendor, pci_dev_nic8139->subsystem_device); pci_disable_device(pci_dev_nic8139); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) pci_remove_bus_device(pci_dev_nic8139); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pci_dev_nic8139); pci_remove_bus_device(pci_dev_nic8139); #else pci_stop_and_remove_bus_device(pci_dev_nic8139); #endif /* check kernel version */ } //} while (pci_dev_nic8139); # comment off because fedora12 will crash return; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; // remove 8139 nic when xen nic devices appear remove_nic8139(); netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev->dev.driver_data = info; err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { printk(KERN_WARNING "%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); printk(KERN_WARNING "%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev->dev.driver_data = NULL; return err; } static int __devexit netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", !HAVE_CSUM_OFFLOAD); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", HAVE_TSO); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_KERNEL|__GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_KERNEL|__GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, SA_SAMPLE_RANDOM, netdev->name, netdev); if (err < 0) goto fail; info->irq = err; return 0; fail: return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev->dev.driver_data; struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); break; case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @return 0 on success, error code otherwise */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; // creat GARP if ( ARPOP_REQUEST == arpType ) { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); } else { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); } if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); memset(&np->stats, 0, sizeof(np->stats)); spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); netif_rx_schedule(dev); } } spin_unlock_bh(&np->rx_lock); network_maybe_wake_tx(dev); return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { printk(KERN_ALERT "network_tx_buf_gc: warning " "-- grant still in use by backend " "domain.\n"); BUG(); } gnttab_end_foreign_access_ref(np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); netif_rx_schedule(dev); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; struct xen_memory_reservation reservation; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if ( nr_flips != 0 ) { /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); reservation.nr_extents = nr_flips; reservation.extent_order = 0; reservation.address_bits = 0; reservation.domid = DOMID_SELF; if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ np->rx_mcl[i].op = __HYPERVISOR_memory_op; np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; np->rx_mcl[i].args[1] = (unsigned long)&reservation; /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (i--) BUG_ON(np->rx_mcl[i].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return 0; } frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irq(&np->tx_lock); if (unlikely(!netfront_carrier_ok(np) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb))) { spin_unlock_irq(&np->tx_lock); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_HW) /* local packet? */ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; #ifdef CONFIG_XEN if (skb->proto_data_valid) /* remote but checksummed? */ tx->flags |= NETTXF_data_validated; #endif #if HAVE_TSO if (skb_shinfo(skb)->gso_size) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->irq); np->stats.tx_bytes += skb->len; np->stats.tx_packets++; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irq(&np->tx_lock); return 0; drop: np->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); netif_rx_schedule(dev); } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) WPRINTK("Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) WPRINTK("Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) WPRINTK("rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) WPRINTK("Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { if (net_ratelimit()) WPRINTK("Unfulfilled rx req " "(id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); BUG_ON(!ret); } gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) WPRINTK("Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) WPRINTK("GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) WPRINTK("Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) WPRINTK("GSO unsupported by this kernel.\n"); return -EINVAL; #endif } static int netif_poll(struct net_device *dev, int *pbudget) { struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; struct multicall_entry *mcl; int work_done, budget, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); if ((budget = *pbudget) > dev->quota) budget = dev->quota; rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); np->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize must approximates the size of true data plus * any supervisor overheads. Adding hypervisor overheads * has been shown to significantly reduce achievable * bandwidth with the default receive buffer size. It is * therefore not wise to account for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to * RX_COPY_THRESHOLD + the supervisor overheads. Here, we * add the size of the data pulled in xennet_fill_frags(). * * We also adjust for any unused space in the main data * area by subtracting (RX_COPY_THRESHOLD - len). This is * especially important with drivers which split incoming * packets into header and data, using only 66 bytes of * the main data area (see the e1000 driver for example.) * On such systems, without this last adjustement, our * achievable receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; /* * Old backends do not assert data_validated but we * can infer it from csum_blank so test both flags. */ if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; #ifdef CONFIG_XEN skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE); skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank); #endif np->stats.rx_packets++; np->stats.rx_bytes += skb->len; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { mcl = np->rx_mcl + pages_flipped; mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = pages_flipped; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } while ((skb = __skb_dequeue(&errq))) kfree_skb(skb); while ((skb = __skb_dequeue(&rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); /* Modified by UVP: resolve ping problem */ memset(skb->cb, 0, 48); /*** End modified by UVP ****************/ /* Pass it up. */ netif_receive_skb(skb); dev->last_rx = jiffies; } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } *pbudget -= work_done; dev->quota -= work_done; if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) __netif_rx_complete(dev); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return more_to_do | accel_more_to_do; } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); if (0 == mfn) { struct page *page = skb_shinfo(skb)->frags[0].page; balloon_release_driver_page(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = mmu - np->rx_mmu; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; mcl++; rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl - np->rx_mcl, NULL); BUG_ON(rc); } } while ((skb = __skb_dequeue(&free_list)) != NULL) dev_kfree_skb(skb); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_ref(ref)) { busy++; continue; } gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); return 0; } static struct net_device_stats *network_get_stats(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_get_stats(np, dev); return &np->stats; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static int xennet_set_sg(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } else if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; return ethtool_op_set_sg(dev, data); } static int xennet_set_tso(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } return ethtool_op_set_tso(dev, data); } static void xennet_set_features(struct net_device *dev) { dev_disable_gso_features(dev); xennet_set_sg(dev, 0); /* We need checksum offload to enable scatter/gather and TSO. */ if (!(dev->features & NETIF_F_IP_CSUM)) return; if (xennet_set_sg(dev, 1)) return; /* Before 2.6.9 TSO seems to be unreliable so do not enable it * on older kernels. */ if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)) xennet_set_tso(dev, 1); } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; xennet_set_features(dev); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref( ref, np->xbdev->otherend_id, page_to_pfn(skb_shinfo(skb)->frags->page)); } else { gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static struct ethtool_ops network_ethtool_ops = { .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = xennet_set_sg, #if HAVE_TSO .get_tso = ethtool_op_get_tso, .set_tso = xennet_set_tso, #endif .get_link = ethtool_op_get_link, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct class_device *cd, char *buf) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct class_device *cd, const char *buf, size_t len) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct class_device *cd, char *buf) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct class_device *cd, const char *buf, size_t len) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf) { struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_target); } static const struct class_device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { error = class_device_create_file(&netdev->class_dev, &xennet_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) class_device_remove_file(&netdev->class_dev, &xennet_attrs[i]); return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { class_device_remove_file(&netdev->class_dev, &xennet_attrs[i]); } } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } static struct net_device * __devinit create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->open = network_open; netdev->hard_start_xmit = network_start_xmit; netdev->stop = network_close; netdev->get_stats = network_get_stats; netdev->poll = netif_poll; netdev->set_multicast_list = network_set_multicast_list; netdev->uninit = netif_uninit; netdev->change_mtu = xennet_change_mtu; netdev->weight = 64; netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } #ifdef CONFIG_INET /* * We use this notifier to send out a fake ARP reply to reset switches and * router ARP caches when an IP interface is brought up on a VIF. */ static int inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) { int count = 0; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ if (event == NETDEV_UP && dev->open == network_open) { for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REQUEST); } for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REPLY); } } return NOTIFY_DONE; } static struct notifier_block notifier_inetdev = { .notifier_call = inetdev_notify, .next = NULL, .priority = 0 }; #endif static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler(info->irq, info->netdev); info->irq = 0; end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, (unsigned long)page); } /* ** Driver registration ** */ static struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static struct xenbus_driver netfront = { .name = "vif", .owner = THIS_MODULE, .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(netfront_remove), .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, }; static int __init netif_init(void) { if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN if (MODPARM_rx_flip && MODPARM_rx_copy) { WPRINTK("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_flip = 1; /* Default is to flip. */ #endif if (is_initial_xendomain()) return 0; netif_init_accel(); IPRINTK("Initialising virtual ethernet driver.\n"); #ifdef CONFIG_INET (void)register_inetaddr_notifier(¬ifier_inetdev); #endif return xenbus_register_frontend(&netfront); } module_init(netif_init); static void __exit netif_exit(void) { if (is_initial_xendomain()) return; #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif netif_exit_accel(); return xenbus_unregister_driver(&netfront); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/SLES10/xen-vnif/netfront.h000066400000000000000000000207561314037446600235420ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include #include #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct net_device_stats stats; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded() */ extern void netfront_accelerator_stop(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/000077500000000000000000000000001314037446600205025ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/Makefile000066400000000000000000000017611314037446600221470ustar00rootroot00000000000000############################################################################### ###### File name : Makefile ###### ###### Author : ###### ###### Modify : songjiangtao 00202546 ###### ###### Description : To optimize the PV_ON_HVM drivers's Makefile ###### ###### History : ###### ###### 2012-02-24 : Create the file ###### ############################################################################### M=$(shell pwd) include $(M)/config.mk COMPILEARGS = $(CROSSCOMPILE) obj-m += xen-platform-pci/ obj-m += xen-balloon/ obj-m += xen-vbd/ obj-m += xen-vnif/ all: make -C $(KERNDIR) M=$(M) modules $(COMPILEARGS) modules_install: make -C $(KERNDIR) M=$(M) modules_install $(COMPILEARGS) clean: make -C $(KERNDIR) M=$(M) clean $(COMPILEARGS) rm -rf Module.symvers UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/compat-include/000077500000000000000000000000001314037446600234065ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/compat-include/asm-generic/000077500000000000000000000000001314037446600256005ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/compat-include/asm-generic/pgtable-nopmd.h000066400000000000000000000005611314037446600305040ustar00rootroot00000000000000#ifndef _PGTABLE_NOPMD_H #define _PGTABLE_NOPMD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopmd.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPMD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/compat-include/asm-generic/pgtable-nopud.h000066400000000000000000000006211314037446600305110ustar00rootroot00000000000000#ifndef _PGTABLE_NOPUD_H #define _PGTABLE_NOPUD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopud.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define pud_bad(pud) 0 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPUD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/compat-include/linux/000077500000000000000000000000001314037446600245455ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/compat-include/linux/io.h000066400000000000000000000002771314037446600253330ustar00rootroot00000000000000#ifndef _LINUX_IO_H #define _LINUX_IO_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) #error "This version of Linux should not need compat linux/io.h" #endif #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/compat-include/linux/mutex.h000066400000000000000000000015411314037446600260610ustar00rootroot00000000000000/* * Copyright (c) 2006 Cisco Systems. All rights reserved. * * This file is released under the GPLv2. */ /* mutex compatibility for pre-2.6.16 kernels */ #ifndef __LINUX_MUTEX_H #define __LINUX_MUTEX_H #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) #error "This version of Linux should not need compat mutex.h" #endif #include #include #define mutex semaphore #define DEFINE_MUTEX(foo) DECLARE_MUTEX(foo) #define mutex_init(foo) init_MUTEX(foo) #define mutex_lock(foo) down(foo) #define mutex_lock_interruptible(foo) down_interruptible(foo) /* this function follows the spin_trylock() convention, so * * it is negated to the down_trylock() return values! Be careful */ #define mutex_trylock(foo) !down_trylock(foo) #define mutex_unlock(foo) up(foo) #endif /* __LINUX_MUTEX_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/compat-include/linux/scatterlist.h000066400000000000000000000003761314037446600272650ustar00rootroot00000000000000#ifndef _LINUX_SCATTERLIST_H #define _LINUX_SCATTERLIST_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) #error "This version of Linux should not need compat linux/scatterlist.h" #endif #include #endif /* _LINUX_SCATTERLIST_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/compat-include/xen/000077500000000000000000000000001314037446600242005ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/compat-include/xen/platform-compat.h000066400000000000000000000126561314037446600274700ustar00rootroot00000000000000#ifndef COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #define COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #include #include #include #if defined(__LINUX_COMPILER_H) && !defined(__always_inline) #define __always_inline inline #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_SPINLOCK) #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED #endif #if defined(_LINUX_INIT_H) && !defined(__init) #define __init #endif #if defined(__LINUX_CACHE_H) && !defined(__read_mostly) #define __read_mostly #endif #if defined(_LINUX_SKBUFF_H) && !defined(NET_IP_ALIGN) #define NET_IP_ALIGN 0 #endif #if defined(_LINUX_SKBUFF_H) && !defined(CHECKSUM_HW) #define CHECKSUM_HW CHECKSUM_PARTIAL #endif #if defined(_LINUX_ERR_H) && !defined(IS_ERR_VALUE) #define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) #endif #if defined(_ASM_IA64_PGTABLE_H) && !defined(_PGTABLE_NOPUD_H) #include #endif /* Some kernels have this typedef backported so we cannot reliably * detect based on version number, hence we forcibly #define it. */ #if defined(__LINUX_TYPES_H) || defined(__LINUX_GFP_H) || defined(_LINUX_KERNEL_H) #define gfp_t unsigned #endif #if defined(_LINUX_NOTIFIER_H) && !defined(ATOMIC_NOTIFIER_HEAD) #define ATOMIC_NOTIFIER_HEAD(name) struct notifier_block *name #define atomic_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define atomic_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define atomic_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_NOTIFIER_H) && !defined(BLOCKING_NOTIFIER_HEAD) #define BLOCKING_NOTIFIER_HEAD(name) struct notifier_block *name #define blocking_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define blocking_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define blocking_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_MM_H) && defined set_page_count #define init_page_count(page) set_page_count(page, 1) #endif #if defined(__LINUX_GFP_H) && !defined __GFP_NOMEMALLOC #define __GFP_NOMEMALLOC 0 #endif #if defined(_LINUX_FS_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) #define nonseekable_open(inode, filp) /* Nothing to do */ #endif #if defined(_LINUX_MM_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) unsigned long vmalloc_to_pfn(void *addr); #endif #if defined(__LINUX_COMPLETION_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); #endif #if defined(_LINUX_SCHED_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout); #endif #if defined(_LINUX_SLAB_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) void *kzalloc(size_t size, int flags); #endif #if defined(_LINUX_BLKDEV_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define end_that_request_last(req, uptodate) end_that_request_last(req) #endif #if defined(_LINUX_CAPABILITY_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define capable(cap) (1) #endif #if defined(_LINUX_KERNEL_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) extern char *kasprintf(gfp_t gfp, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); #endif #if defined(_LINUX_SYSRQ_H) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) #define handle_sysrq(x,y,z) handle_sysrq(x,y) #endif #if defined(_PAGE_PRESENT) && !defined(_PAGE_NX) #define _PAGE_NX 0 /* * This variable at present is referenced by netfront, but only in code that * is dead when running in hvm guests. To detect potential active uses of it * in the future, don't try to supply a 'valid' value here, so that any * mappings created with it will fault when accessed. */ #define __supported_pte_mask ((maddr_t)0) #endif /* This code duplication is not ideal, but || does not seem to properly * short circuit in a #if condition. **/ #if defined(_LINUX_NETDEVICE_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) #if !defined(SLE_VERSION) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #elif SLE_VERSION_CODE < SLE_VERSION(10,1,0) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #endif #endif #if defined(__LINUX_SEQLOCK_H) && !defined(DEFINE_SEQLOCK) #define DEFINE_SEQLOCK(x) seqlock_t x = SEQLOCK_UNLOCKED #endif /* Bug in RHEL4-U3: rw_lock_t is mistakenly defined in DEFINE_RWLOCK() macro */ #if defined(__LINUX_SPINLOCK_H) && defined(DEFINE_RWLOCK) #define rw_lock_t rwlock_t #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_RWLOCK) #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED #endif #if defined(_LINUX_INTERRUPT_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /** * RHEL4-U5 pulled back this feature into the older kernel * Since it is a typedef, and not a macro - detect this kernel via * RHEL_VERSION */ #if !defined(RHEL_VERSION) || (RHEL_VERSION == 4 && RHEL_UPDATE < 5) #if !defined(RHEL_MAJOR) || (RHEL_MAJOR == 4 && RHEL_MINOR < 5) typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *); #endif #endif #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) #define setup_xen_features_uvp xen_setup_features #endif #ifndef atomic_cmpxchg #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/config.mk000066400000000000000000000030711314037446600223010ustar00rootroot00000000000000# Hack: we need to use the config which was used to build the kernel, # except that that won't have the right headers etc., so duplicate # some of the mach-xen infrastructure in here. # # (i.e. we need the native config for things like -mregparm, but # a Xen kernel to find the right headers) _XEN_CPPFLAGS += -D__XEN_INTERFACE_VERSION__=0x00030205 _XEN_CPPFLAGS += -DCONFIG_XEN_COMPAT=0xffffff _XEN_CPPFLAGS += -I$(M)/include -I$(M)/compat-include -DHAVE_XEN_PLATFORM_COMPAT_H OS_UBUNTU_1004 := $(shell echo $(KERNDIR) | grep "2.6.32.21\|2.6.32.38\|2.6.32-24\|2.6.32-28\|2.6.32-33") OS_UBUNTU_9 := $(shell echo $(KERNDIR) | grep 2.6.31-14) # for suse 10 sp2(2.6.29-bigsmp) OS_DIST_2629 := $(shell echo $(KERNDIR) | grep '2.6.29-bigsmp') # for debian 5 OS_DIST_DEBIAN := $(shell echo $(KERNDIR) | grep '2.6.26-2') # for debian 6 32 OS_DIST_DEBIAN_6_32 := $(shell echo $(KERNDIR) | grep '2.6.32-5-686') #AUTOCONF := $(shell find $(CROSS_COMPILE_KERNELSOURCE) -name autoconf.h) ifeq ("Ubuntu", "$(OSDISTRIBUTION)") _XEN_CPPFLAGS += -DUBUNTU endif ifneq ($(OS_UBUNTU_1004), ) _XEN_CPPFLAGS += -DUBUNTU_1004 endif ifneq ($(OS_UBUNTU_9), ) _XEN_CPPFLAGS += -DUBUNTU_9 endif ifneq ($(OS_DIST_2629), ) _XEN_CPPFLAGS += -DSUSE_2629 endif ifneq ($(OS_DIST_DEBIAN), ) _XEN_CPPFLAGS += -DDEBIAN5 endif ifneq ($(OS_DIST_DEBIAN_6_32), ) _XEN_CPPFLAGS += -DDEBIAN6_32 endif ifeq ($(ARCH),ia64) _XEN_CPPFLAGS += -DCONFIG_VMX_GUEST endif # _XEN_CPPFLAGS += -include $(AUTOCONF) EXTRA_CFLAGS += $(_XEN_CPPFLAGS) EXTRA_AFLAGS += $(_XEN_CPPFLAGS) CPPFLAGS := -I$(M)/include $(CPPFLAGS) UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/extra/000077500000000000000000000000001314037446600216255ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/extra/xen_pvdrivers000066400000000000000000000005061314037446600244470ustar00rootroot00000000000000# Install the paravirtualized drivers install libata /sbin/modprobe xen-vbd 2>&1 |:; /sbin/modprobe --ignore-install libata install 8139cp /sbin/modprobe xen-vnif >/dev/null 2>&1 || /sbin/modprobe --ignore-install 8139cp install 8139too /sbin/modprobe xen-vnif >/dev/null 2>&1 || /sbin/modprobe --ignore-install 8139too UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/000077500000000000000000000000001314037446600221255ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/asm-i386/000077500000000000000000000000001314037446600233745ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/asm-i386/README000066400000000000000000000000001314037446600242420ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/asm-xen/000077500000000000000000000000001314037446600234755ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/asm-xen/gnttab_dma.h000066400000000000000000000026471314037446600257570ustar00rootroot00000000000000/* * Copyright (c) 2007 Herbert Xu * Copyright (c) 2007 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _ASM_I386_GNTTAB_DMA_H #define _ASM_I386_GNTTAB_DMA_H static inline int gnttab_dma_local_pfn(struct page *page) { /* Has it become a local MFN? */ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page)))); } static inline maddr_t gnttab_dma_map_page(struct page *page) { __gnttab_dma_map_page(page); return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT); } static inline void gnttab_dma_unmap_page(maddr_t maddr) { __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr))); } #endif /* _ASM_I386_GNTTAB_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/asm-xen/hypercall.h000066400000000000000000000251111314037446600256310ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * 64-bit updates: * Benjamin Liu * Jun Nakajima * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #if CONFIG_XEN_COMPAT <= 0x030002 # include /* memcpy() */ #endif #ifdef CONFIG_XEN_no #define HYPERCALL_ASM_OPERAND "%c" #define HYPERCALL_LOCATION(op) (hypercall_page + (op) * 32) #define HYPERCALL_C_OPERAND(name) "i" (HYPERCALL_LOCATION(__HYPERVISOR_##name)) #else #define HYPERCALL_ASM_OPERAND "*%" #define HYPERCALL_LOCATION(op) (hypercall_stubs + (op) * 32) #define HYPERCALL_C_OPERAND(name) "g" (HYPERCALL_LOCATION(__HYPERVISOR_##name)) #endif #define HYPERCALL_ARG(arg, n) \ register typeof((arg)+0) __arg##n asm(HYPERCALL_arg##n) = (arg) #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "1" \ : "=a" (__res) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, arg) \ ({ \ type __res; \ HYPERCALL_ARG(arg, 1); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "2" \ : "=a" (__res), "+r" (__arg1) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "3" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "4" \ : "=a" (__res), "+r" (__arg1), \ "+r" (__arg2), "+r" (__arg3) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ HYPERCALL_ARG(a4, 4); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "5" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ "+r" (__arg3), "+r" (__arg4) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ HYPERCALL_ARG(a4, 4); \ HYPERCALL_ARG(a5, 5); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "6" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall(type, op, a1, a2, a3, a4, a5) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ HYPERCALL_ARG(a4, 4); \ HYPERCALL_ARG(a5, 5); \ asm volatile ( \ "call *%6" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ : "g" (HYPERCALL_LOCATION(op)) \ : "memory" ); \ __res; \ }) #ifdef CONFIG_X86_32 # include "hypercall_32.h" #else # include "hypercall_64.h" #endif static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { if (arch_use_lazy_mmu_mode()) return xen_multi_mmu_update(req, count, success_count, domid); return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { if (arch_use_lazy_mmu_mode()) return xen_multi_mmuext_op(op, count, success_count, domid); return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } #endif static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; if (arch_use_lazy_mmu_mode()) xen_multicall_flush(false); /* when hypercall memory_op failed, retry */ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { bool fixup = false; int rc; if (arch_use_lazy_mmu_mode()) xen_multicall_flush(false); #ifdef GNTTABOP_map_grant_ref if (cmd == GNTTABOP_map_grant_ref) #endif fixup = gnttab_pre_map_adjust(cmd, uop, count); rc = _hypercall3(int, grant_table_op, cmd, uop, count); if (rc == 0 && fixup) rc = gnttab_post_map_adjust(uop, count); return rc; } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN_no static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/asm-xen/hypercall_32.h000066400000000000000000000031401314037446600261330ustar00rootroot00000000000000#define HYPERCALL_arg1 "ebx" #define HYPERCALL_arg2 "ecx" #define HYPERCALL_arg3 "edx" #define HYPERCALL_arg4 "esi" #define HYPERCALL_arg5 "edi" #if CONFIG_XEN_COMPAT <= 0x030002 static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_selector, unsigned long event_address, unsigned long failsafe_selector, unsigned long failsafe_address) { return _hypercall4(int, set_callbacks, event_selector, event_address, failsafe_selector, failsafe_address); } #endif static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall2(long, set_timer_op, (unsigned long)timeout, (unsigned long)(timeout>>32)); } static inline int __must_check HYPERVISOR_update_descriptor( u64 ma, u64 desc) { return _hypercall4(int, update_descriptor, (unsigned long)ma, (unsigned long)(ma>>32), (unsigned long)desc, (unsigned long)(desc>>32)); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { unsigned long pte_hi = 0; if (arch_use_lazy_mmu_mode()) return xen_multi_update_va_mapping(va, new_val, flags); #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall4(int, update_va_mapping, va, new_val.pte_low, pte_hi, flags); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall5(int, update_va_mapping_otherdomain, va, new_val.pte_low, pte_hi, flags, domid); } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/asm-xen/hypercall_64.h000066400000000000000000000025661314037446600261530ustar00rootroot00000000000000#define HYPERCALL_arg1 "rdi" #define HYPERCALL_arg2 "rsi" #define HYPERCALL_arg3 "rdx" #define HYPERCALL_arg4 "r10" #define HYPERCALL_arg5 "r8" #if CONFIG_XEN_COMPAT <= 0x030002 static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_address, unsigned long failsafe_address, unsigned long syscall_address) { return _hypercall3(int, set_callbacks, event_address, failsafe_address, syscall_address); } #endif static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall1(long, set_timer_op, timeout); } static inline int __must_check HYPERVISOR_update_descriptor( unsigned long ma, unsigned long word) { return _hypercall2(int, update_descriptor, ma, word); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { if (arch_use_lazy_mmu_mode()) return xen_multi_update_va_mapping(va, new_val, flags); return _hypercall3(int, update_va_mapping, va, new_val.pte, flags); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { return _hypercall4(int, update_va_mapping_otherdomain, va, new_val.pte, flags, domid); } static inline int __must_check HYPERVISOR_set_segment_base( int reg, unsigned long value) { return _hypercall2(int, set_segment_base, reg, value); } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/asm-xen/hypervisor.h000066400000000000000000000235711314037446600260700ustar00rootroot00000000000000/****************************************************************************** * hypervisor.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERVISOR_H__ #define __HYPERVISOR_H__ #include #include #include #include #include #include #include #include #include #include #include #include extern shared_info_t *HYPERVISOR_shared_info; #define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu)) #ifdef CONFIG_SMP #define current_vcpu_info() vcpu_info(smp_processor_id()) #else #define current_vcpu_info() vcpu_info(0) #endif #ifdef CONFIG_X86_32 extern unsigned long hypervisor_virt_start; #endif /* arch/xen/i386/kernel/setup.c */ extern start_info_t *xen_start_info; #ifdef CONFIG_XEN_PRIVILEGED_GUEST #define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN) #else #define is_initial_xendomain() 0 #endif #define init_hypervisor(c) ((void)((c)->x86_hyper_vendor = X86_HYPER_VENDOR_XEN)) /* arch/xen/kernel/evtchn.c */ /* Force a proper event-channel callback from Xen. */ void force_evtchn_callback(void); /* arch/xen/kernel/process.c */ void xen_cpu_idle (void); /* arch/xen/i386/kernel/hypervisor.c */ void do_hypervisor_callback(struct pt_regs *regs); /* arch/xen/i386/mm/hypervisor.c */ /* * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already * be MACHINE addresses. */ void xen_pt_switch(pgd_t *); void xen_new_user_pt(pgd_t *); /* x86_64 only */ void xen_load_gs(unsigned int selector); /* x86_64 only */ void xen_tlb_flush(void); void xen_invlpg(unsigned long ptr); void xen_l1_entry_update(pte_t *ptr, pte_t val); void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */ void xen_pgd_pin(pgd_t *); void xen_pgd_unpin(pgd_t *); void xen_init_pgd_pin(void); void xen_set_ldt(const void *ptr, unsigned int ents); #ifdef CONFIG_SMP #include void xen_tlb_flush_all(void); void xen_invlpg_all(unsigned long ptr); void xen_tlb_flush_mask(cpumask_t *mask); void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr); #endif /* Returns zero on success else negative errno. */ int xen_create_contiguous_region( unsigned long vstart, unsigned int order, unsigned int address_bits); void xen_destroy_contiguous_region( unsigned long vstart, unsigned int order); struct page; int xen_limit_pages_to_max_mfn( struct page *pages, unsigned int order, unsigned int address_bits); /* Turn jiffies into Xen system time. */ u64 jiffies_to_st(unsigned long jiffies); #ifdef CONFIG_XEN_SCRUB_PAGES_uvp void scrub_pages(void *, unsigned int); #else #define scrub_pages(_p,_n) ((void)0) #endif #ifdef CONFIG_XEN_no DECLARE_PER_CPU(bool, xen_lazy_mmu); int xen_multicall_flush(bool); int __must_check xen_multi_update_va_mapping(unsigned long va, pte_t, unsigned long flags); int __must_check xen_multi_mmu_update(mmu_update_t *, unsigned int count, unsigned int *success_count, domid_t); int __must_check xen_multi_mmuext_op(struct mmuext_op *, unsigned int count, unsigned int *success_count, domid_t); #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE static inline void arch_enter_lazy_mmu_mode(void) { __get_cpu_var(xen_lazy_mmu) = true; } static inline void arch_leave_lazy_mmu_mode(void) { __get_cpu_var(xen_lazy_mmu) = false; xen_multicall_flush(false); } #if defined(CONFIG_X86_32) #define arch_use_lazy_mmu_mode() unlikely(x86_read_percpu(xen_lazy_mmu)) #elif !defined(arch_use_lazy_mmu_mode) #define arch_use_lazy_mmu_mode() unlikely(__get_cpu_var(xen_lazy_mmu)) #endif #if 0 /* All uses are in places potentially called asynchronously, but * asynchronous code should rather not make use of lazy mode at all. * Therefore, all uses of this function get commented out, proper * detection of asynchronous invocations is added whereever needed, * and this function is disabled to catch any new (improper) uses. */ static inline void arch_flush_lazy_mmu_mode(void) { if (arch_use_lazy_mmu_mode()) xen_multicall_flush(false); } #endif struct gnttab_map_grant_ref; bool gnttab_pre_map_adjust(unsigned int cmd, struct gnttab_map_grant_ref *, unsigned int count); #if CONFIG_XEN_COMPAT < 0x030400 int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *, unsigned int); #else static inline int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *m, unsigned int count) { BUG(); return -ENOSYS; } #endif #else /* CONFIG_XEN_no */ static inline void xen_multicall_flush(bool ignore) {} #define arch_use_lazy_mmu_mode() false #define xen_multi_update_va_mapping(...) ({ BUG(); -ENOSYS; }) #define xen_multi_mmu_update(...) ({ BUG(); -ENOSYS; }) #define xen_multi_mmuext_op(...) ({ BUG(); -ENOSYS; }) #define gnttab_pre_map_adjust(...) false #define gnttab_post_map_adjust(...) ({ BUG(); -ENOSYS; }) #endif /* CONFIG_XEN_no */ #if defined(CONFIG_X86_64) #define MULTI_UVMFLAGS_INDEX 2 #define MULTI_UVMDOMID_INDEX 3 #else #define MULTI_UVMFLAGS_INDEX 3 #define MULTI_UVMDOMID_INDEX 4 #endif #ifdef CONFIG_XEN_no #define is_running_on_xen() 1 extern char hypercall_page[PAGE_SIZE]; #else extern char *hypercall_stubs; #define is_running_on_xen() (!!hypercall_stubs) #endif #include static inline int HYPERVISOR_yield( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int HYPERVISOR_block( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0); #endif return rc; } static inline void __noreturn HYPERVISOR_shutdown( unsigned int reason) { struct sched_shutdown sched_shutdown = { .reason = reason }; VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown)); #if CONFIG_XEN_COMPAT <= 0x030002 VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason)); #endif /* Don't recurse needlessly. */ BUG_ON(reason != SHUTDOWN_crash); for(;;); } static inline int __must_check HYPERVISOR_poll( evtchn_port_t *ports, unsigned int nr_ports, u64 timeout) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports, .timeout = jiffies_to_st(timeout) }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int __must_check HYPERVISOR_poll_no_timeout( evtchn_port_t *ports, unsigned int nr_ports) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } #ifdef CONFIG_XEN_no static inline void MULTI_update_va_mapping( multicall_entry_t *mcl, unsigned long va, pte_t new_val, unsigned long flags) { mcl->op = __HYPERVISOR_update_va_mapping; mcl->args[0] = va; #if defined(CONFIG_X86_64) mcl->args[1] = new_val.pte; #elif defined(CONFIG_X86_PAE) mcl->args[1] = new_val.pte_low; mcl->args[2] = new_val.pte_high; #else mcl->args[1] = new_val.pte_low; mcl->args[2] = 0; #endif mcl->args[MULTI_UVMFLAGS_INDEX] = flags; } static inline void MULTI_mmu_update(multicall_entry_t *mcl, mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)req; mcl->args[1] = count; mcl->args[2] = (unsigned long)success_count; mcl->args[3] = domid; } static inline void MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd, void *uop, unsigned int count) { mcl->op = __HYPERVISOR_grant_table_op; mcl->args[0] = cmd; mcl->args[1] = (unsigned long)uop; mcl->args[2] = count; } #else /* !defined(CONFIG_XEN) */ /* Multicalls not supported for HVM guests. */ #define MULTI_update_va_mapping(a,b,c,d) ((void)0) #define MULTI_grant_table_op(a,b,c,d) ((void)0) #endif #ifdef LINUX /* drivers/staging/rt28?0/ use Windows-style types, including VOID */ #undef VOID #endif #endif /* __HYPERVISOR_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/asm-xen/maddr.h000066400000000000000000000001201314037446600247260ustar00rootroot00000000000000#ifdef CONFIG_X86_32 # include "maddr_32.h" #else # include "maddr_64.h" #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/asm-xen/maddr_32.h000066400000000000000000000126271314037446600252510ustar00rootroot00000000000000#ifndef _I386_MADDR_H #define _I386_MADDR_H #include #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<31) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ #ifdef CONFIG_X86_PAE typedef unsigned long long paddr_t; typedef unsigned long long maddr_t; #else typedef unsigned long paddr_t; typedef unsigned long maddr_t; #endif #ifdef CONFIG_XEN_no extern unsigned long *phys_to_machine_mapping; extern unsigned long max_mapnr; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return pfn; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return 1; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return max_mapnr; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movl %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movl %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if (likely(pfn < max_mapnr) && likely(!xen_feature(XENFEAT_auto_translated_physmap)) && unlikely(phys_to_machine_mapping[pfn] != mfn)) return max_mapnr; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } #ifdef CONFIG_X86_PAE static inline paddr_t pte_phys_to_machine(paddr_t phys) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to pfn_to_mfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to mfn_to_pfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #else #define pte_phys_to_machine phys_to_machine #define pte_machine_to_phys machine_to_phys #endif #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _I386_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/asm-xen/maddr_64.h000066400000000000000000000115571314037446600252570ustar00rootroot00000000000000#ifndef _X86_64_MADDR_H #define _X86_64_MADDR_H #include #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<63) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ typedef unsigned long paddr_t; typedef unsigned long maddr_t; #ifdef CONFIG_XEN_no extern unsigned long *phys_to_machine_mapping; extern unsigned long max_mapnr; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return pfn; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { printk("cjm---not form macro"); if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return 1; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return max_mapnr; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movq %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movq %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 8\n" " .quad 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if (likely(pfn < max_mapnr) && likely(!xen_feature(XENFEAT_auto_translated_physmap)) && unlikely(phys_to_machine_mapping[pfn] != mfn)) return max_mapnr; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } static inline paddr_t pte_phys_to_machine(paddr_t phys) { maddr_t machine; machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { paddr_t phys; phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _X86_64_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/asm-xen/synch_bitops.h000066400000000000000000000062311314037446600263540ustar00rootroot00000000000000#ifndef __XEN_SYNCH_BITOPS_H__ #define __XEN_SYNCH_BITOPS_H__ /* * Copyright 1992, Linus Torvalds. * Heavily modified to provide guaranteed strong synchronisation * when communicating with Xen or other guest OSes running on other CPUs. */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define ADDR (*(volatile long *) addr) static __inline__ void synch_set_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btsl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_clear_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btrl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_change_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btcl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btsl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btrl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btcl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } struct __synch_xchg_dummy { unsigned long a[100]; }; #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x)) #define synch_cmpxchg(ptr, old, new) \ ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ (unsigned long)(old), \ (unsigned long)(new), \ sizeof(*(ptr)))) static inline unsigned long __synch_cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__("lock; cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__("lock; cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #ifdef CONFIG_X86_64 case 4: __asm__ __volatile__("lock; cmpxchgl %k1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__("lock; cmpxchgq %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #else case 4: __asm__ __volatile__("lock; cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #endif } return old; } #define synch_test_bit test_bit #define synch_cmpxchg_subword synch_cmpxchg #endif /* __XEN_SYNCH_BITOPS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/000077500000000000000000000000001314037446600231025ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/balloon.h000066400000000000000000000052711314037446600247060ustar00rootroot00000000000000/****************************************************************************** * balloon.h * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_H__ #define __XEN_BALLOON_H__ #include #if !defined(CONFIG_PARAVIRT_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /* * Inform the balloon driver that it should allow some slop for device-driver * memory activities. */ void balloon_update_driver_allowance_uvp_uvp(long delta); /* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */ struct page **alloc_empty_pages_and_pagevec_uvp(int nr_pages); void free_empty_pages_and_pagevec_uvp(struct page **pagevec, int nr_pages); /* Free an empty page range (not allocated through alloc_empty_pages_and_pagevec_uvp), adding to the balloon. */ void free_empty_pages(struct page **pagevec, int nr_pages); void balloon_release_driver_page_uvp_uvp(struct page *page); /* * Prevent the balloon driver from changing the memory reservation during * a driver critical region. */ extern spinlock_t balloon_lock; #define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags) #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags) #endif #endif /* __XEN_BALLOON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/blkif.h000066400000000000000000000112031314037446600243370ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_BLKIF_H__ #define __XEN_BLKIF_H__ #include #include #include /* Not a real protocol. Used to generate ring structs which contain * the elements common to all protocols only. This way we get a * compiler-checkable way to use common struct elements, so we can * avoid using switch(protocol) in a number of places. */ struct blkif_common_request { char dummy; }; struct blkif_common_response { char dummy; }; /* i386 protocol version */ #pragma pack(push, 4) struct blkif_x86_32_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_32_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_x86_32_request blkif_x86_32_request_t; typedef struct blkif_x86_32_response blkif_x86_32_response_t; #pragma pack(pop) /* x86_64 protocol version */ struct blkif_x86_64_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t __attribute__((__aligned__(8))) id; blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_64_response { uint64_t __attribute__((__aligned__(8))) id; uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_x86_64_request blkif_x86_64_request_t; typedef struct blkif_x86_64_response blkif_x86_64_response_t; DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response); DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response); DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response); union blkif_back_rings { blkif_back_ring_t native; blkif_common_back_ring_t common; blkif_x86_32_back_ring_t x86_32; blkif_x86_64_back_ring_t x86_64; }; typedef union blkif_back_rings blkif_back_rings_t; enum blkif_protocol { BLKIF_PROTOCOL_NATIVE = 1, BLKIF_PROTOCOL_X86_32 = 2, BLKIF_PROTOCOL_X86_64 = 3, }; static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; dst->id = src->id; dst->sector_number = src->sector_number; barrier(); if (n > dst->nr_segments) n = dst->nr_segments; for (i = 0; i < n; i++) dst->seg[i] = src->seg[i]; } static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; dst->id = src->id; dst->sector_number = src->sector_number; barrier(); if (n > dst->nr_segments) n = dst->nr_segments; for (i = 0; i < n; i++) dst->seg[i] = src->seg[i]; } #endif /* __XEN_BLKIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/compat_ioctl.h000066400000000000000000000030411314037446600257260ustar00rootroot00000000000000/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright IBM Corp. 2007 * * Authors: Jimi Xenidis * Hollis Blanchard */ #ifndef __LINUX_XEN_COMPAT_H__ #define __LINUX_XEN_COMPAT_H__ #include extern int privcmd_ioctl_32(int fd, unsigned int cmd, unsigned long arg); struct privcmd_mmap_32 { int num; domid_t dom; compat_uptr_t entry; }; struct privcmd_mmapbatch_32 { int num; /* number of pages to populate */ domid_t dom; /* target domain */ __u64 addr; /* virtual address */ compat_uptr_t arr; /* array of mfns - top nibble set on err */ }; #define IOCTL_PRIVCMD_MMAP_32 \ _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap_32)) #define IOCTL_PRIVCMD_MMAPBATCH_32 \ _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch_32)) #endif /* __LINUX_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/cpu_hotplug.h000066400000000000000000000014751314037446600256130ustar00rootroot00000000000000#ifndef __XEN_CPU_HOTPLUG_H__ #define __XEN_CPU_HOTPLUG_H__ #include #include #if defined(CONFIG_X86) && defined(CONFIG_SMP) extern cpumask_t cpu_initialized_map; #endif #if defined(CONFIG_HOTPLUG_CPU) int cpu_up_check(unsigned int cpu); void init_xenbus_allowed_cpumask(void); int smp_suspend(void); void smp_resume(void); void cpu_bringup(void); #else /* !defined(CONFIG_HOTPLUG_CPU) */ #define cpu_up_check(cpu) (0) #define init_xenbus_allowed_cpumask() ((void)0) static inline int smp_suspend(void) { if (num_online_cpus() > 1) { printk(KERN_WARNING "Can't suspend SMP guests " "without CONFIG_HOTPLUG_CPU\n"); return -EOPNOTSUPP; } return 0; } static inline void smp_resume(void) { } #endif /* !defined(CONFIG_HOTPLUG_CPU) */ #endif /* __XEN_CPU_HOTPLUG_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/driver_util.h000066400000000000000000000003161314037446600256030ustar00rootroot00000000000000 #ifndef __ASM_XEN_DRIVER_UTIL_H__ #define __ASM_XEN_DRIVER_UTIL_H__ #include #include extern struct class *get_xen_class(void); #endif /* __ASM_XEN_DRIVER_UTIL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/events.h000066400000000000000000000032311314037446600245560ustar00rootroot00000000000000#ifndef _XEN_EVENTS_H #define _XEN_EVENTS_H #include #include #include #include int bind_evtchn_to_irq(unsigned int evtchn); int bind_evtchn_to_irqhandler(unsigned int evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_ipi_to_irqhandler(enum ipi_vector ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (even for bindings * made with bind_evtchn_to_irqhandler()). */ void unbind_from_irqhandler_uvp(unsigned int irq, void *dev_id); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); int resend_irq_on_evtchn(unsigned int irq); void rebind_evtchn_irq(int evtchn, int irq); static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); } extern void notify_remote_via_irq_uvp(int irq); extern void xen_irq_resume(void); /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq); /* Poll waiting for an irq to become pending. In the usual case, the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq(int irq); #endif /* _XEN_EVENTS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/evtchn.h000066400000000000000000000161601314037446600245460ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Communication via Xen event channels. * Also definitions for the device that demuxes notifications to userspace. * * Copyright (c) 2004-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_EVTCHN_H__ #define __ASM_EVTCHN_H__ #include #include #include #include #include #include /* * LOW-LEVEL DEFINITIONS */ /* * Dynamically bind an event source to an IRQ-like callback handler. * On some platforms this may not be implemented via the Linux IRQ subsystem. * The IRQ argument passed to the callback handler is the same as returned * from the bind call. It may not correspond to a Linux IRQ number. * Returns IRQ or negative errno. */ int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_interdomain_evtchn_to_irqhandler( unsigned int remote_domain, unsigned int remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irqhandler( unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); #if defined(CONFIG_SMP) && defined(CONFIG_XEN_no) && defined(CONFIG_X86) int bind_virq_to_irqaction( unsigned int virq, unsigned int cpu, struct irqaction *action); #else #define bind_virq_to_irqaction(virq, cpu, action) \ bind_virq_to_irqhandler(virq, cpu, (action)->handler, \ (action)->flags | IRQF_NOBALANCING, \ (action)->name, action) #endif #if defined(CONFIG_SMP) && !defined(MODULE) #ifndef CONFIG_X86 int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); #else int bind_ipi_to_irqaction( unsigned int ipi, unsigned int cpu, struct irqaction *action); #endif #endif /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (except for bindings * made with bind_caller_port_to_irqhandler()). */ void unbind_from_irqhandler_uvp(unsigned int irq, void *dev_id); #if defined(CONFIG_SMP) && defined(CONFIG_XEN_no) && defined(CONFIG_X86) /* Specialized unbind function for per-CPU IRQs. */ void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu, struct irqaction *); #else #define unbind_from_per_cpu_irq(irq, cpu, action) \ unbind_from_irqhandler_uvp(irq, action) #endif #ifndef CONFIG_XEN_no void irq_resume(void); #endif /* Entry point for notifications into Linux subsystems. */ asmlinkage void evtchn_do_upcall(struct pt_regs *regs); /* Entry point for notifications into the userland character device. */ void evtchn_device_upcall(int port); /* Mark a PIRQ as unavailable for dynamic allocation. */ void evtchn_register_pirq(int irq); /* Map a Xen-supplied PIRQ to a dynamically allocated one. */ int evtchn_map_pirq(int irq, int xen_pirq); /* Look up a Xen-supplied PIRQ for a dynamically allocated one. */ int evtchn_get_xen_pirq(int irq); void mask_evtchn(int port); void disable_all_local_evtchn(void); void unmask_evtchn(int port); #ifdef CONFIG_SMP void rebind_evtchn_to_cpu(int port, unsigned int cpu); #else #define rebind_evtchn_to_cpu(port, cpu) ((void)0) #endif static inline int test_and_set_evtchn_mask(int port) { shared_info_t *s = HYPERVISOR_shared_info; return synch_test_and_set_bit(port, s->evtchn_mask); } static inline void clear_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; synch_clear_bit(port, s->evtchn_pending); } static inline void set_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; synch_set_bit(port, s->evtchn_pending); } static inline int test_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; return synch_test_bit(port, s->evtchn_pending); } static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send)); } static inline void multi_notify_remote_via_evtchn(multicall_entry_t *mcl, int port) { struct evtchn_send *send = (void *)(mcl->args + 2); BUILD_BUG_ON(sizeof(*send) > sizeof(mcl->args) - 2 * sizeof(*mcl->args)); send->port = port; mcl->op = __HYPERVISOR_event_channel_op; mcl->args[0] = EVTCHNOP_send; mcl->args[1] = (unsigned long)send; } /* Clear an irq's pending state, in preparation for polling on it. */ void xen_clear_irq_pending(int irq); /* Set an irq's pending state, to avoid blocking on it. */ void xen_set_irq_pending(int irq); /* Test an irq's pending state. */ int xen_test_irq_pending(int irq); /* Poll waiting for an irq to become pending. In the usual case, the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq(int irq); /* * Use these to access the event channel underlying the IRQ handle returned * by bind_*_to_irqhandler(). */ void notify_remote_via_irq_uvp(int irq); int multi_notify_remote_via_irq_uvp(multicall_entry_t *, int irq); int irq_to_evtchn_port(int irq); #define PIRQ_SET_MAPPING 0x0 #define PIRQ_CLEAR_MAPPING 0x1 #define PIRQ_GET_MAPPING 0x3 int pirq_mapstatus(int pirq, int action); int set_pirq_hw_action(int pirq, int (*action)(int pirq, int action)); int clear_pirq_hw_action(int pirq); #define PIRQ_STARTUP 1 #define PIRQ_SHUTDOWN 2 #define PIRQ_ENABLE 3 #define PIRQ_DISABLE 4 #define PIRQ_END 5 #define PIRQ_ACK 6 #if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86) void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu); #endif #endif /* __ASM_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/features.h000066400000000000000000000007701314037446600250750ustar00rootroot00000000000000/****************************************************************************** * features.h * * Query the features reported by Xen. * * Copyright (c) 2006, Ian Campbell */ #ifndef __XEN_FEATURES_H__ #define __XEN_FEATURES_H__ #include #include void xen_setup_features(void); extern u8 xen_features_uvp[XENFEAT_NR_SUBMAPS * 32]; static inline int xen_feature(int flag) { return xen_features_uvp[flag]; } #endif /* __XEN_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/firmware.h000066400000000000000000000003011314037446600250610ustar00rootroot00000000000000#ifndef __XEN_FIRMWARE_H__ #define __XEN_FIRMWARE_H__ #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) void copy_edd(void); #endif void copy_edid(void); #endif /* __XEN_FIRMWARE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/gnttab.h000066400000000000000000000125771314037446600245460ustar00rootroot00000000000000/****************************************************************************** * gnttab.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include /* maddr_t */ #include #include #include struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; u8 queued; }; int gnttab_grant_foreign_access_uvp(domid_t domid, unsigned long frame, int flags); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_uvp_ref_uvp(grant_ref_t ref); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access_uvp(grant_ref_t ref, unsigned long page); int gnttab_grant_foreign_transfer_uvp(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_uvp_ref_uvp(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer_uvp(grant_ref_t ref); int gnttab_query_foreign_access_uvp(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references_uvp(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference_uvp(grant_ref_t ref); void gnttab_free_grant_reference_uvps(grant_ref_t head); int gnttab_empty_grant_references_uvp(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference_uvp(grant_ref_t *pprivate_head); void gnttab_release_grant_reference_uvp(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback_uvp(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback_uvp(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_uvp_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags); void gnttab_grant_foreign_transfer_uvp_ref(grant_ref_t, domid_t domid, unsigned long pfn); int gnttab_copy_grant_page_uvp(grant_ref_t ref, struct page **pagep); void __gnttab_dma_map_page(struct page *page); static inline void __gnttab_dma_unmap_page(struct page *page) { } void gnttab_reset_grant_page_uvp(struct page *page); #ifndef CONFIG_XEN_no int gnttab_resume(void); #endif void *arch_gnttab_alloc_shared(unsigned long *frames); static inline void gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr, uint32_t flags, grant_ref_t ref, domid_t domid) { if (flags & GNTMAP_contains_pte) map->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) map->host_addr = __pa(addr); else map->host_addr = addr; map->flags = flags; map->ref = ref; map->dom = domid; } static inline void gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr, uint32_t flags, grant_handle_t handle) { if (flags & GNTMAP_contains_pte) unmap->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) unmap->host_addr = __pa(addr); else unmap->host_addr = addr; unmap->handle = handle; unmap->dev_bus_addr = 0; } static inline void gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr, maddr_t new_addr, grant_handle_t handle) { if (xen_feature(XENFEAT_auto_translated_physmap)) { unmap->host_addr = __pa(addr); unmap->new_addr = __pa(new_addr); } else { unmap->host_addr = addr; unmap->new_addr = new_addr; } unmap->handle = handle; } #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/grant_table.h000066400000000000000000000105641314037446600255430ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include #include /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ #define NR_GRANT_FRAMES 4 struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; }; int gnttab_suspend(void); int gnttab_resume(void); int gnttab_grant_foreign_access_uvp(domid_t domid, unsigned long frame, int readonly); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_uvp_ref_uvp(grant_ref_t ref, int readonly); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access_uvp(grant_ref_t ref, int readonly, unsigned long page); int gnttab_grant_foreign_transfer_uvp(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_uvp_ref_uvp(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer_uvp(grant_ref_t ref); int gnttab_query_foreign_access_uvp(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references_uvp(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference_uvp(grant_ref_t ref); void gnttab_free_grant_reference_uvps(grant_ref_t head); int gnttab_empty_grant_references_uvp(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference_uvp(grant_ref_t *pprivate_head); void gnttab_release_grant_reference_uvp(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback_uvp(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback_uvp(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_uvp_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly); void gnttab_grant_foreign_transfer_uvp_ref(grant_ref_t, domid_t domid, unsigned long pfn); int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, struct grant_entry **__shared); void arch_gnttab_unmap_shared(struct grant_entry *shared, unsigned long nr_gframes); #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/hvc-console.h000066400000000000000000000007051314037446600254750ustar00rootroot00000000000000#ifndef XEN_HVC_CONSOLE_H #define XEN_HVC_CONSOLE_H extern struct console xenboot_console; #ifdef CONFIG_HVC_XEN_uvp void xen_console_resume(void); void xen_raw_console_write(const char *str); void xen_raw_printk(const char *fmt, ...); #else static inline void xen_console_resume(void) { } static inline void xen_raw_console_write(const char *str) { } static inline void xen_raw_printk(const char *fmt, ...) { } #endif #endif /* XEN_HVC_CONSOLE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/hvm.h000066400000000000000000000007111314037446600240440ustar00rootroot00000000000000/* Simple wrappers around HVM functions */ #ifndef XEN_HVM_H__ #define XEN_HVM_H__ #include static inline unsigned long hvm_get_parameter(int idx) { struct xen_hvm_param xhv; int r; xhv.domid = DOMID_SELF; xhv.index = idx; r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); if (r < 0) { printk(KERN_ERR "cannot get hvm parameter %d: %d.\n", idx, r); return 0; } return xhv.value; } #endif /* XEN_HVM_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/hypercall.h000066400000000000000000000014011314037446600252320ustar00rootroot00000000000000#ifndef __XEN_HYPERCALL_H__ #define __XEN_HYPERCALL_H__ #include static inline int __must_check HYPERVISOR_multicall_check( multicall_entry_t *call_list, unsigned int nr_calls, const unsigned long *rc_list) { int rc = HYPERVISOR_multicall(call_list, nr_calls); if (unlikely(rc < 0)) return rc; BUG_ON(rc); BUG_ON((int)nr_calls < 0); for ( ; nr_calls > 0; --nr_calls, ++call_list) if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0))) return nr_calls; return 0; } /* A construct to ignore the return value of hypercall wrappers in a few * exceptional cases (simply casting the function result to void doesn't * avoid the compiler warning): */ #define VOID(expr) ((void)((expr)?:0)) #endif /* __XEN_HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/hypervisor_sysfs.h000066400000000000000000000015051314037446600267150ustar00rootroot00000000000000/* * copyright (c) 2006 IBM Corporation * Authored by: Mike D. Day * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _HYP_SYSFS_H_ #define _HYP_SYSFS_H_ #include #include #define HYPERVISOR_ATTR_RO(_name) \ static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) #define HYPERVISOR_ATTR_RW(_name) \ static struct hyp_sysfs_attr _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) struct hyp_sysfs_attr { struct attribute attr; ssize_t (*show)(struct hyp_sysfs_attr *, char *); ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t); void *hyp_attr_data; }; #endif /* _HYP_SYSFS_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/000077500000000000000000000000001314037446600250425ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/COPYING000066400000000000000000000032641314037446600261020ustar00rootroot00000000000000XEN NOTICE ========== This copyright applies to all files within this subdirectory and its subdirectories: include/public/*.h include/public/hvm/*.h include/public/io/*.h The intention is that these files can be freely copied into the source tree of an operating system when porting that OS to run on Xen. Doing so does *not* cause the OS to become subject to the terms of the GPL. All other files in the Xen source distribution are covered by version 2 of the GNU General Public License except where explicitly stated otherwise within individual source files. -- Keir Fraser (on behalf of the Xen team) ===================================================================== Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/arch-x86/000077500000000000000000000000001314037446600264025ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/arch-x86/cpuid.h000066400000000000000000000050721314037446600276630ustar00rootroot00000000000000/****************************************************************************** * arch-x86/cpuid.h * * CPUID interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2007 Citrix Systems, Inc. * * Authors: * Keir Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__ #define __XEN_PUBLIC_ARCH_X86_CPUID_H__ /* Xen identification leaves start at 0x40000000. */ #define XEN_CPUID_FIRST_LEAF 0x40000000 #define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i)) /* * Leaf 1 (0x40000000) * EAX: Largest Xen-information leaf. All leaves up to an including @EAX * are supported by the Xen host. * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification * of a Xen host. */ #define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */ #define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */ #define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */ /* * Leaf 2 (0x40000001) * EAX[31:16]: Xen major version. * EAX[15: 0]: Xen minor version. * EBX-EDX: Reserved (currently all zeroes). */ /* * Leaf 3 (0x40000002) * EAX: Number of hypercall transfer pages. This register is always guaranteed * to specify one hypercall page. * EBX: Base address of Xen-specific MSRs. * ECX: Features 1. Unused bits are set to zero. * EDX: Features 2. Unused bits are set to zero. */ /* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */ #define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0 #define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0) #endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/arch-x86/hvm/000077500000000000000000000000001314037446600271745ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/arch-x86/hvm/save.h000066400000000000000000000250301314037446600303030ustar00rootroot00000000000000/* * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * Copyright (c) 2007 XenSource Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__ #define __XEN_PUBLIC_HVM_SAVE_X86_H__ /* * Save/restore header: general info about the save file. */ #define HVM_FILE_MAGIC 0x54381286 #define HVM_FILE_VERSION 0x00000001 struct hvm_save_header { uint32_t magic; /* Must be HVM_FILE_MAGIC */ uint32_t version; /* File format version */ uint64_t changeset; /* Version of Xen that saved this file */ uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */ uint32_t gtsc_khz; /* Guest's TSC frequency in kHz */ }; DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header); /* * Processor */ struct hvm_hw_cpu { uint8_t fpu_regs[512]; uint64_t rax; uint64_t rbx; uint64_t rcx; uint64_t rdx; uint64_t rbp; uint64_t rsi; uint64_t rdi; uint64_t rsp; uint64_t r8; uint64_t r9; uint64_t r10; uint64_t r11; uint64_t r12; uint64_t r13; uint64_t r14; uint64_t r15; uint64_t rip; uint64_t rflags; uint64_t cr0; uint64_t cr2; uint64_t cr3; uint64_t cr4; uint64_t dr0; uint64_t dr1; uint64_t dr2; uint64_t dr3; uint64_t dr6; uint64_t dr7; uint32_t cs_sel; uint32_t ds_sel; uint32_t es_sel; uint32_t fs_sel; uint32_t gs_sel; uint32_t ss_sel; uint32_t tr_sel; uint32_t ldtr_sel; uint32_t cs_limit; uint32_t ds_limit; uint32_t es_limit; uint32_t fs_limit; uint32_t gs_limit; uint32_t ss_limit; uint32_t tr_limit; uint32_t ldtr_limit; uint32_t idtr_limit; uint32_t gdtr_limit; uint64_t cs_base; uint64_t ds_base; uint64_t es_base; uint64_t fs_base; uint64_t gs_base; uint64_t ss_base; uint64_t tr_base; uint64_t ldtr_base; uint64_t idtr_base; uint64_t gdtr_base; uint32_t cs_arbytes; uint32_t ds_arbytes; uint32_t es_arbytes; uint32_t fs_arbytes; uint32_t gs_arbytes; uint32_t ss_arbytes; uint32_t tr_arbytes; uint32_t ldtr_arbytes; uint64_t sysenter_cs; uint64_t sysenter_esp; uint64_t sysenter_eip; /* msr for em64t */ uint64_t shadow_gs; /* msr content saved/restored. */ uint64_t msr_flags; uint64_t msr_lstar; uint64_t msr_star; uint64_t msr_cstar; uint64_t msr_syscall_mask; uint64_t msr_efer; uint64_t msr_tsc_aux; /* guest's idea of what rdtsc() would return */ uint64_t tsc; /* pending event, if any */ union { uint32_t pending_event; struct { uint8_t pending_vector:8; uint8_t pending_type:3; uint8_t pending_error_valid:1; uint32_t pending_reserved:19; uint8_t pending_valid:1; }; }; /* error code for pending event */ uint32_t error_code; }; DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu); /* * PIC */ struct hvm_hw_vpic { /* IR line bitmasks. */ uint8_t irr; uint8_t imr; uint8_t isr; /* Line IRx maps to IRQ irq_base+x */ uint8_t irq_base; /* * Where are we in ICW2-4 initialisation (0 means no init in progress)? * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1). * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence) * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence) */ uint8_t init_state:4; /* IR line with highest priority. */ uint8_t priority_add:4; /* Reads from A=0 obtain ISR or IRR? */ uint8_t readsel_isr:1; /* Reads perform a polling read? */ uint8_t poll:1; /* Automatically clear IRQs from the ISR during INTA? */ uint8_t auto_eoi:1; /* Automatically rotate IRQ priorities during AEOI? */ uint8_t rotate_on_auto_eoi:1; /* Exclude slave inputs when considering in-service IRQs? */ uint8_t special_fully_nested_mode:1; /* Special mask mode excludes masked IRs from AEOI and priority checks. */ uint8_t special_mask_mode:1; /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */ uint8_t is_master:1; /* Edge/trigger selection. */ uint8_t elcr; /* Virtual INT output. */ uint8_t int_output; }; DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic); /* * IO-APIC */ #ifdef __ia64__ #define VIOAPIC_IS_IOSAPIC 1 #define VIOAPIC_NUM_PINS 24 #else #define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */ #endif struct hvm_hw_vioapic { uint64_t base_address; uint32_t ioregsel; uint32_t id; union vioapic_redir_entry { uint64_t bits; struct { uint8_t vector; uint8_t delivery_mode:3; uint8_t dest_mode:1; uint8_t delivery_status:1; uint8_t polarity:1; uint8_t remote_irr:1; uint8_t trig_mode:1; uint8_t mask:1; uint8_t reserve:7; #if !VIOAPIC_IS_IOSAPIC uint8_t reserved[4]; uint8_t dest_id; #else uint8_t reserved[3]; uint16_t dest_id; #endif } fields; } redirtbl[VIOAPIC_NUM_PINS]; }; DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic); /* * LAPIC */ struct hvm_hw_lapic { uint64_t apic_base_msr; uint32_t disabled; /* VLAPIC_xx_DISABLED */ uint32_t timer_divisor; }; DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic); struct hvm_hw_lapic_regs { uint8_t data[1024]; }; DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs); /* * IRQs */ struct hvm_hw_pci_irqs { /* * Virtual interrupt wires for a single PCI bus. * Indexed by: device*4 + INTx#. */ union { unsigned long i[16 / sizeof (unsigned long)]; /* DECLARE_BITMAP(i, 32*4); */ uint64_t pad[2]; }; }; DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs); struct hvm_hw_isa_irqs { /* * Virtual interrupt wires for ISA devices. * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing). */ union { unsigned long i[1]; /* DECLARE_BITMAP(i, 16); */ uint64_t pad[1]; }; }; DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs); struct hvm_hw_pci_link { /* * PCI-ISA interrupt router. * Each PCI is 'wire-ORed' into one of four links using * the traditional 'barber's pole' mapping ((device + INTx#) & 3). * The router provides a programmable mapping from each link to a GSI. */ uint8_t route[4]; uint8_t pad0[4]; }; DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link); /* * PIT */ struct hvm_hw_pit { struct hvm_hw_pit_channel { uint32_t count; /* can be 65536 */ uint16_t latched_count; uint8_t count_latched; uint8_t status_latched; uint8_t status; uint8_t read_state; uint8_t write_state; uint8_t write_latch; uint8_t rw_mode; uint8_t mode; uint8_t bcd; /* not supported */ uint8_t gate; /* timer start */ } channels[3]; /* 3 x 16 bytes */ uint32_t speaker_data_on; uint32_t pad0; }; DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit); /* * RTC */ #define RTC_CMOS_SIZE 14 struct hvm_hw_rtc { /* CMOS bytes */ uint8_t cmos_data[RTC_CMOS_SIZE]; /* Index register for 2-part operations */ uint8_t cmos_index; uint8_t pad0; }; DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc); /* * HPET */ #define HPET_TIMER_NUM 3 /* 3 timers supported now */ struct hvm_hw_hpet { /* Memory-mapped, software visible registers */ uint64_t capability; /* capabilities */ uint64_t res0; /* reserved */ uint64_t config; /* configuration */ uint64_t res1; /* reserved */ uint64_t isr; /* interrupt status reg */ uint64_t res2[25]; /* reserved */ uint64_t mc64; /* main counter */ uint64_t res3; /* reserved */ struct { /* timers */ uint64_t config; /* configuration/cap */ uint64_t cmp; /* comparator */ uint64_t fsb; /* FSB route, not supported now */ uint64_t res4; /* reserved */ } timers[HPET_TIMER_NUM]; uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */ /* Hidden register state */ uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */ }; DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet); /* * PM timer */ struct hvm_hw_pmtimer { uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */ uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */ uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */ }; DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer); /* * MTRR MSRs */ struct hvm_hw_mtrr { #define MTRR_VCNT 8 #define NUM_FIXED_MSR 11 uint64_t msr_pat_cr; /* mtrr physbase & physmask msr pair*/ uint64_t msr_mtrr_var[MTRR_VCNT*2]; uint64_t msr_mtrr_fixed[NUM_FIXED_MSR]; uint64_t msr_mtrr_cap; uint64_t msr_mtrr_def_type; }; DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr); /* * Viridian hypervisor context. */ struct hvm_viridian_context { uint64_t hypercall_gpa; uint64_t guest_os_id; }; DECLARE_HVM_SAVE_TYPE(VIRIDIAN, 15, struct hvm_viridian_context); /* * Largest type-code in use */ #define HVM_SAVE_CODE_MAX 15 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/arch-x86/xen-mca.h000066400000000000000000000342111314037446600301040ustar00rootroot00000000000000/****************************************************************************** * arch-x86/mca.h * * Contributed by Advanced Micro Devices, Inc. * Author: Christoph Egger * * Guest OS machine check interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /* Full MCA functionality has the following Usecases from the guest side: * * Must have's: * 1. Dom0 and DomU register machine check trap callback handlers * (already done via "set_trap_table" hypercall) * 2. Dom0 registers machine check event callback handler * (doable via EVTCHNOP_bind_virq) * 3. Dom0 and DomU fetches machine check data * 4. Dom0 wants Xen to notify a DomU * 5. Dom0 gets DomU ID from physical address * 6. Dom0 wants Xen to kill DomU (already done for "xm destroy") * * Nice to have's: * 7. Dom0 wants Xen to deactivate a physical CPU * This is better done as separate task, physical CPU hotplugging, * and hypercall(s) should be sysctl's * 8. Page migration proposed from Xen NUMA work, where Dom0 can tell Xen to * move a DomU (or Dom0 itself) away from a malicious page * producing correctable errors. * 9. offlining physical page: * Xen free's and never re-uses a certain physical page. * 10. Testfacility: Allow Dom0 to write values into machine check MSR's * and tell Xen to trigger a machine check */ #ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__ #define __XEN_PUBLIC_ARCH_X86_MCA_H__ /* Hypercall */ #define __HYPERVISOR_mca __HYPERVISOR_arch_0 /* * The xen-unstable repo has interface version 0x03000001; out interface * is incompatible with that and any future minor revisions, so we * choose a different version number range that is numerically less * than that used in xen-unstable. */ #define XEN_MCA_INTERFACE_VERSION 0x01ecc003 /* IN: Dom0 calls hypercall to retrieve nonurgent telemetry */ #define XEN_MC_NONURGENT 0x0001 /* IN: Dom0/DomU calls hypercall to retrieve urgent telemetry */ #define XEN_MC_URGENT 0x0002 /* IN: Dom0 acknowledges previosly-fetched telemetry */ #define XEN_MC_ACK 0x0004 /* OUT: All is ok */ #define XEN_MC_OK 0x0 /* OUT: Domain could not fetch data. */ #define XEN_MC_FETCHFAILED 0x1 /* OUT: There was no machine check data to fetch. */ #define XEN_MC_NODATA 0x2 /* OUT: Between notification time and this hypercall an other * (most likely) correctable error happened. The fetched data, * does not match the original machine check data. */ #define XEN_MC_NOMATCH 0x4 /* OUT: DomU did not register MC NMI handler. Try something else. */ #define XEN_MC_CANNOTHANDLE 0x8 /* OUT: Notifying DomU failed. Retry later or try something else. */ #define XEN_MC_NOTDELIVERED 0x10 /* Note, XEN_MC_CANNOTHANDLE and XEN_MC_NOTDELIVERED are mutually exclusive. */ #ifndef __ASSEMBLY__ #define VIRQ_MCA VIRQ_ARCH_0 /* G. (DOM0) Machine Check Architecture */ /* * Machine Check Architecure: * structs are read-only and used to report all kinds of * correctable and uncorrectable errors detected by the HW. * Dom0 and DomU: register a handler to get notified. * Dom0 only: Correctable errors are reported via VIRQ_MCA * Dom0 and DomU: Uncorrectable errors are reported via nmi handlers */ #define MC_TYPE_GLOBAL 0 #define MC_TYPE_BANK 1 #define MC_TYPE_EXTENDED 2 #define MC_TYPE_RECOVERY 3 struct mcinfo_common { uint16_t type; /* structure type */ uint16_t size; /* size of this struct in bytes */ }; #define MC_FLAG_CORRECTABLE (1 << 0) #define MC_FLAG_UNCORRECTABLE (1 << 1) #define MC_FLAG_RECOVERABLE (1 << 2) #define MC_FLAG_POLLED (1 << 3) #define MC_FLAG_RESET (1 << 4) #define MC_FLAG_CMCI (1 << 5) #define MC_FLAG_MCE (1 << 6) /* contains global x86 mc information */ struct mcinfo_global { struct mcinfo_common common; /* running domain at the time in error (most likely the impacted one) */ uint16_t mc_domid; uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */ uint32_t mc_socketid; /* physical socket of the physical core */ uint16_t mc_coreid; /* physical impacted core */ uint16_t mc_core_threadid; /* core thread of physical core */ uint32_t mc_apicid; uint32_t mc_flags; uint64_t mc_gstatus; /* global status */ }; /* contains bank local x86 mc information */ struct mcinfo_bank { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint16_t mc_domid; /* Usecase 5: domain referenced by mc_addr on dom0 * and if mc_addr is valid. Never valid on DomU. */ uint64_t mc_status; /* bank status */ uint64_t mc_addr; /* bank address, only valid * if addr bit is set in mc_status */ uint64_t mc_misc; uint64_t mc_ctrl2; uint64_t mc_tsc; }; struct mcinfo_msr { uint64_t reg; /* MSR */ uint64_t value; /* MSR value */ }; /* contains mc information from other * or additional mc MSRs */ struct mcinfo_extended { struct mcinfo_common common; /* You can fill up to five registers. * If you need more, then use this structure * multiple times. */ uint32_t mc_msrs; /* Number of msr with valid values. */ /* * Currently Intel extended MSR (32/64) include all gp registers * and E(R)FLAGS, E(R)IP, E(R)MISC, up to 11/19 of them might be * useful at present. So expand this array to 16/32 to leave room. */ struct mcinfo_msr mc_msr[sizeof(void *) * 4]; }; /* Recovery Action flags. Giving recovery result information to DOM0 */ /* Xen takes successful recovery action, the error is recovered */ #define REC_ACTION_RECOVERED (0x1 << 0) /* No action is performed by XEN */ #define REC_ACTION_NONE (0x1 << 1) /* It's possible DOM0 might take action ownership in some case */ #define REC_ACTION_NEED_RESET (0x1 << 2) /* Different Recovery Action types, if the action is performed successfully, * REC_ACTION_RECOVERED flag will be returned. */ /* Page Offline Action */ #define MC_ACTION_PAGE_OFFLINE (0x1 << 0) /* CPU offline Action */ #define MC_ACTION_CPU_OFFLINE (0x1 << 1) /* L3 cache disable Action */ #define MC_ACTION_CACHE_SHRINK (0x1 << 2) /* Below interface used between XEN/DOM0 for passing XEN's recovery action * information to DOM0. * usage Senario: After offlining broken page, XEN might pass its page offline * recovery action result to DOM0. DOM0 will save the information in * non-volatile memory for further proactive actions, such as offlining the * easy broken page earlier when doing next reboot. */ struct page_offline_action { /* Params for passing the offlined page number to DOM0 */ uint64_t mfn; uint64_t status; }; struct cpu_offline_action { /* Params for passing the identity of the offlined CPU to DOM0 */ uint32_t mc_socketid; uint16_t mc_coreid; uint16_t mc_core_threadid; }; #define MAX_UNION_SIZE 16 struct mcinfo_recovery { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint8_t action_flags; uint8_t action_types; union { struct page_offline_action page_retire; struct cpu_offline_action cpu_offline; uint8_t pad[MAX_UNION_SIZE]; } action_info; }; #define MCINFO_HYPERCALLSIZE 1024 #define MCINFO_MAXSIZE 768 struct mc_info { /* Number of mcinfo_* entries in mi_data */ uint32_t mi_nentries; uint32_t _pad0; uint64_t mi_data[(MCINFO_MAXSIZE - 1) / 8]; }; typedef struct mc_info mc_info_t; DEFINE_XEN_GUEST_HANDLE(mc_info_t); #define __MC_MSR_ARRAYSIZE 8 #define __MC_NMSRS 1 #define MC_NCAPS 7 /* 7 CPU feature flag words */ #define MC_CAPS_STD_EDX 0 /* cpuid level 0x00000001 (%edx) */ #define MC_CAPS_AMD_EDX 1 /* cpuid level 0x80000001 (%edx) */ #define MC_CAPS_TM 2 /* cpuid level 0x80860001 (TransMeta) */ #define MC_CAPS_LINUX 3 /* Linux-defined */ #define MC_CAPS_STD_ECX 4 /* cpuid level 0x00000001 (%ecx) */ #define MC_CAPS_VIA 5 /* cpuid level 0xc0000001 */ #define MC_CAPS_AMD_ECX 6 /* cpuid level 0x80000001 (%ecx) */ struct mcinfo_logical_cpu { uint32_t mc_cpunr; uint32_t mc_chipid; uint16_t mc_coreid; uint16_t mc_threadid; uint32_t mc_apicid; uint32_t mc_clusterid; uint32_t mc_ncores; uint32_t mc_ncores_active; uint32_t mc_nthreads; int32_t mc_cpuid_level; uint32_t mc_family; uint32_t mc_vendor; uint32_t mc_model; uint32_t mc_step; char mc_vendorid[16]; char mc_brandid[64]; uint32_t mc_cpu_caps[MC_NCAPS]; uint32_t mc_cache_size; uint32_t mc_cache_alignment; int32_t mc_nmsrvals; struct mcinfo_msr mc_msrvalues[__MC_MSR_ARRAYSIZE]; }; typedef struct mcinfo_logical_cpu xen_mc_logical_cpu_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_logical_cpu_t); /* * OS's should use these instead of writing their own lookup function * each with its own bugs and drawbacks. * We use macros instead of static inline functions to allow guests * to include this header in assembly files (*.S). */ /* Prototype: * uint32_t x86_mcinfo_nentries(struct mc_info *mi); */ #define x86_mcinfo_nentries(_mi) \ (_mi)->mi_nentries /* Prototype: * struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi); */ #define x86_mcinfo_first(_mi) \ ((struct mcinfo_common *)(_mi)->mi_data) /* Prototype: * struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic); */ #define x86_mcinfo_next(_mic) \ ((struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size)) /* Prototype: * void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type); */ #define x86_mcinfo_lookup(_ret, _mi, _type) \ do { \ uint32_t found, i; \ struct mcinfo_common *_mic; \ \ found = 0; \ (_ret) = NULL; \ if (_mi == NULL) break; \ _mic = x86_mcinfo_first(_mi); \ for (i = 0; i < x86_mcinfo_nentries(_mi); i++) { \ if (_mic->type == (_type)) { \ found = 1; \ break; \ } \ _mic = x86_mcinfo_next(_mic); \ } \ (_ret) = found ? _mic : NULL; \ } while (0) /* Usecase 1 * Register machine check trap callback handler * (already done via "set_trap_table" hypercall) */ /* Usecase 2 * Dom0 registers machine check event callback handler * done by EVTCHNOP_bind_virq */ /* Usecase 3 * Fetch machine check data from hypervisor. * Note, this hypercall is special, because both Dom0 and DomU must use this. */ #define XEN_MC_fetch 1 struct xen_mc_fetch { /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_NONURGENT, XEN_MC_URGENT, XEN_MC_ACK if ack'ing an earlier fetch */ /* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED, XEN_MC_NODATA, XEN_MC_NOMATCH */ uint32_t _pad0; uint64_t fetch_id; /* OUT: id for ack, IN: id we are ack'ing */ /* OUT variables. */ XEN_GUEST_HANDLE(mc_info_t) data; }; typedef struct xen_mc_fetch xen_mc_fetch_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t); /* Usecase 4 * This tells the hypervisor to notify a DomU about the machine check error */ #define XEN_MC_notifydomain 2 struct xen_mc_notifydomain { /* IN variables. */ uint16_t mc_domid; /* The unprivileged domain to notify. */ uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify. * Usually echo'd value from the fetch hypercall. */ /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */ /* OUT: XEN_MC_OK, XEN_MC_CANNOTHANDLE, XEN_MC_NOTDELIVERED, XEN_MC_NOMATCH */ }; typedef struct xen_mc_notifydomain xen_mc_notifydomain_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t); #define XEN_MC_physcpuinfo 3 struct xen_mc_physcpuinfo { /* IN/OUT */ uint32_t ncpus; uint32_t _pad0; /* OUT */ XEN_GUEST_HANDLE(xen_mc_logical_cpu_t) info; }; #define XEN_MC_msrinject 4 #define MC_MSRINJ_MAXMSRS 8 struct xen_mc_msrinject { /* IN */ uint32_t mcinj_cpunr; /* target processor id */ uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */ uint32_t mcinj_count; /* 0 .. count-1 in array are valid */ uint32_t _pad0; struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS]; }; /* Flags for mcinj_flags above; bits 16-31 are reserved */ #define MC_MSRINJ_F_INTERPOSE 0x1 #define XEN_MC_mceinject 5 struct xen_mc_mceinject { unsigned int mceinj_cpunr; /* target processor id */ }; struct xen_mc { uint32_t cmd; uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */ union { struct xen_mc_fetch mc_fetch; struct xen_mc_notifydomain mc_notifydomain; struct xen_mc_physcpuinfo mc_physcpuinfo; struct xen_mc_msrinject mc_msrinject; struct xen_mc_mceinject mc_mceinject; } u; }; typedef struct xen_mc xen_mc_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_t); #endif /* __ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/arch-x86/xen-x86_32.h000066400000000000000000000145401314037446600303000ustar00rootroot00000000000000/****************************************************************************** * xen-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2007, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ /* * Hypercall interface: * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5) * Output: %eax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; int $0x82 */ #define TRAP_INSTR "int $0x82" #endif /* * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ #define FLAT_KERNEL_CS FLAT_RING1_CS #define FLAT_KERNEL_DS FLAT_RING1_DS #define FLAT_KERNEL_SS FLAT_RING1_SS #define FLAT_USER_CS FLAT_RING3_CS #define FLAT_USER_DS FLAT_RING3_DS #define FLAT_USER_SS FLAT_RING3_SS #define __HYPERVISOR_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_END_PAE 0xF6800000 #define HYPERVISOR_VIRT_START_PAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE) #define MACH2PHYS_VIRT_START_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE) #define MACH2PHYS_VIRT_END_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE) /* Non-PAE bounds are obsolete. */ #define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000 #define HYPERVISOR_VIRT_START_NONPAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_START_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_END_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE) #define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE #define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE #define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) #endif /* 32-/64-bit invariability for control interfaces (domctl/sysctl). */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #undef ___DEFINE_XEN_GUEST_HANDLE #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } \ __guest_handle_ ## name; \ typedef struct { union { type *p; uint64_aligned_t q; }; } \ __guest_handle_64_ ## name #undef set_xen_guest_handle #define set_xen_guest_handle(hnd, val) \ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \ (hnd).p = val; \ } while ( 0 ) #define uint64_aligned_t uint64_t __attribute__((aligned(8))) #define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name #define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name) #endif #ifndef __ASSEMBLY__ struct cpu_user_regs { uint32_t ebx; uint32_t ecx; uint32_t edx; uint32_t esi; uint32_t edi; uint32_t ebp; uint32_t eax; uint16_t error_code; /* private */ uint16_t entry_vector; /* private */ uint32_t eip; uint16_t cs; uint8_t saved_upcall_mask; uint8_t _pad0; uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ uint32_t esp; uint16_t ss, _pad1; uint16_t es, _pad2; uint16_t ds, _pad3; uint16_t fs, _pad4; uint16_t gs, _pad5; }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); /* * Page-directory addresses above 4GB do not fit into architectural %cr3. * When accessing %cr3, or equivalent field in vcpu_guest_context, guests * must use the following accessor macros to pack/unpack valid MFNs. */ #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) struct arch_vcpu_info { unsigned long cr2; unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; struct xen_callback { unsigned long cs; unsigned long eip; }; typedef struct xen_callback xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/arch-x86/xen-x86_64.h000066400000000000000000000157351314037446600303140ustar00rootroot00000000000000/****************************************************************************** * xen-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ /* * Hypercall interface: * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5) * Output: %rax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; syscall * Clobbered: %rcx, %r11, argument registers (as above) */ #define TRAP_INSTR "syscall" #endif /* * 64-bit segment selectors * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING3_CS32 0xe023 /* GDT index 260 */ #define FLAT_RING3_CS64 0xe033 /* GDT index 261 */ #define FLAT_RING3_DS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_DS64 0x0000 /* NULL selector */ #define FLAT_RING3_SS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_SS64 0xe02b /* GDT index 262 */ #define FLAT_KERNEL_DS64 FLAT_RING3_DS64 #define FLAT_KERNEL_DS32 FLAT_RING3_DS32 #define FLAT_KERNEL_DS FLAT_KERNEL_DS64 #define FLAT_KERNEL_CS64 FLAT_RING3_CS64 #define FLAT_KERNEL_CS32 FLAT_RING3_CS32 #define FLAT_KERNEL_CS FLAT_KERNEL_CS64 #define FLAT_KERNEL_SS64 FLAT_RING3_SS64 #define FLAT_KERNEL_SS32 FLAT_RING3_SS32 #define FLAT_KERNEL_SS FLAT_KERNEL_SS64 #define FLAT_USER_DS64 FLAT_RING3_DS64 #define FLAT_USER_DS32 FLAT_RING3_DS32 #define FLAT_USER_DS FLAT_USER_DS64 #define FLAT_USER_CS64 FLAT_RING3_CS64 #define FLAT_USER_CS32 FLAT_RING3_CS32 #define FLAT_USER_CS FLAT_USER_CS64 #define FLAT_USER_SS64 FLAT_RING3_SS64 #define FLAT_USER_SS32 FLAT_RING3_SS32 #define FLAT_USER_SS FLAT_USER_SS64 #define __HYPERVISOR_VIRT_START 0xFFFF800000000000 #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) #endif /* * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) * @which == SEGBASE_* ; @base == 64-bit base address * Returns 0 on success. */ #define SEGBASE_FS 0 #define SEGBASE_GS_USER 1 #define SEGBASE_GS_KERNEL 2 #define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */ /* * int HYPERVISOR_iret(void) * All arguments are on the kernel stack, in the following format. * Never returns if successful. Current kernel context is lost. * The saved CS is mapped as follows: * RING0 -> RING3 kernel mode. * RING1 -> RING3 kernel mode. * RING2 -> RING3 kernel mode. * RING3 -> RING3 user mode. * However RING0 indicates that the guest kernel should return to iteself * directly with * orb $3,1*8(%rsp) * iretq * If flags contains VGCF_in_syscall: * Restore RAX, RIP, RFLAGS, RSP. * Discard R11, RCX, CS, SS. * Otherwise: * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP. * All other registers are saved on hypercall entry and restored to user. */ /* Guest exited in SYSCALL context? Return to guest with SYSRET? */ #define _VGCF_in_syscall 8 #define VGCF_in_syscall (1<<_VGCF_in_syscall) #define VGCF_IN_SYSCALL VGCF_in_syscall #ifndef __ASSEMBLY__ struct iret_context { /* Top of stack (%rsp at point of hypercall). */ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; /* Bottom of iret stack frame. */ }; #if defined(__GNUC__) && !defined(__STRICT_ANSI__) /* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ #define __DECL_REG(name) union { \ uint64_t r ## name, e ## name; \ uint32_t _e ## name; \ } #else /* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ #define __DECL_REG(name) uint64_t r ## name #endif struct cpu_user_regs { uint64_t r15; uint64_t r14; uint64_t r13; uint64_t r12; __DECL_REG(bp); __DECL_REG(bx); uint64_t r11; uint64_t r10; uint64_t r9; uint64_t r8; __DECL_REG(ax); __DECL_REG(cx); __DECL_REG(dx); __DECL_REG(si); __DECL_REG(di); uint32_t error_code; /* private */ uint32_t entry_vector; /* private */ __DECL_REG(ip); uint16_t cs, _pad0[1]; uint8_t saved_upcall_mask; uint8_t _pad1[3]; __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */ __DECL_REG(sp); uint16_t ss, _pad2[3]; uint16_t es, _pad3[3]; uint16_t ds, _pad4[3]; uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */ }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); #undef __DECL_REG #define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) #define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) struct arch_vcpu_info { unsigned long cr2; unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; typedef unsigned long xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/arch-x86/xen.h000066400000000000000000000171711314037446600273540ustar00rootroot00000000000000/****************************************************************************** * arch-x86/xen.h * * Guest OS interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "../xen.h" #ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_H__ /* Structural guest handles introduced in 0x00030201. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030201 #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name #else #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef type * __guest_handle_ ## name #endif #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ ___DEFINE_XEN_GUEST_HANDLE(name, type); \ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif /* Allow co-existing Linux 2.6.23+ Xen interface definitions. */ #define DEFINE_GUEST_HANDLE_STRUCT(name) struct name #if defined(__i386__) #include "xen-x86_32.h" #elif defined(__x86_64__) #include "xen-x86_64.h" #endif #ifndef __ASSEMBLY__ typedef unsigned long xen_pfn_t; #define PRI_xen_pfn "lx" #endif /* * SEGMENT DESCRIPTOR TABLES */ /* * A number of GDT entries are reserved by Xen. These are not situated at the * start of the GDT because some stupid OSes export hard-coded selector values * in their ABI. These hard-coded values are always near the start of the GDT, * so Xen places itself out of the way, at the far end of the GDT. */ #define FIRST_RESERVED_GDT_PAGE 14 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) /* Maximum number of virtual CPUs in legacy multi-processor guests. */ #define XEN_LEGACY_MAX_VCPUS 32 #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; /* * Send an array of these to HYPERVISOR_set_trap_table(). * The privilege level specifies which modes may enter a trap via a software * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate * privilege levels as follows: * Level == 0: Noone may enter * Level == 1: Kernel may enter * Level == 2: Kernel may enter * Level == 3: Everyone may enter */ #define TI_GET_DPL(_ti) ((_ti)->flags & 3) #define TI_GET_IF(_ti) ((_ti)->flags & 4) #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) struct trap_info { uint8_t vector; /* exception vector */ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ uint16_t cs; /* code selector */ unsigned long address; /* code offset */ }; typedef struct trap_info trap_info_t; DEFINE_XEN_GUEST_HANDLE(trap_info_t); typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* * The following is all CPU context. Note that the fpu_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ struct vcpu_guest_context { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ #define VGCF_I387_VALID (1<<0) #define VGCF_IN_KERNEL (1<<2) #define _VGCF_i387_valid 0 #define VGCF_i387_valid (1<<_VGCF_i387_valid) #define _VGCF_in_kernel 2 #define VGCF_in_kernel (1<<_VGCF_in_kernel) #define _VGCF_failsafe_disables_events 3 #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) #define _VGCF_syscall_disables_events 4 #define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) #define _VGCF_online 5 #define VGCF_online (1<<_VGCF_online) unsigned long flags; /* VGCF_* flags */ struct cpu_user_regs user_regs; /* User-level CPU registers */ struct trap_info trap_ctxt[256]; /* Virtual IDT */ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ #ifdef __i386__ unsigned long event_callback_cs; /* CS:EIP of event callback */ unsigned long event_callback_eip; unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ unsigned long failsafe_callback_eip; #else unsigned long event_callback_eip; unsigned long failsafe_callback_eip; #ifdef __XEN__ union { unsigned long syscall_callback_eip; struct { unsigned int event_callback_cs; /* compat CS of event cb */ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */ }; }; #else unsigned long syscall_callback_eip; #endif #endif unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ #ifdef __x86_64__ /* Segment base addresses. */ uint64_t fs_base; uint64_t gs_base_kernel; uint64_t gs_base_user; #endif }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); struct arch_shared_info { unsigned long max_pfn; /* max pfn that appears in table */ /* Frame containing list of mfns containing list of mfns containing p2m. */ xen_pfn_t pfn_to_mfn_frame_list_list; unsigned long nmi_reason; uint64_t pad[32]; }; typedef struct arch_shared_info arch_shared_info_t; #endif /* !__ASSEMBLY__ */ /* * Prefix forces emulation of some non-trapping instructions. * Currently only CPUID. */ #ifdef __ASSEMBLY__ #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; #define XEN_CPUID XEN_EMULATE_PREFIX cpuid #else #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" #endif #endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/arch-x86_32.h000066400000000000000000000024131314037446600270570ustar00rootroot00000000000000/****************************************************************************** * arch-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/arch-x86_64.h000066400000000000000000000024131314037446600270640ustar00rootroot00000000000000/****************************************************************************** * arch-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/callback.h000066400000000000000000000074531314037446600267600ustar00rootroot00000000000000/****************************************************************************** * callback.h * * Register guest OS callbacks with Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell */ #ifndef __XEN_PUBLIC_CALLBACK_H__ #define __XEN_PUBLIC_CALLBACK_H__ #include "xen.h" /* * Prototype for this hypercall is: * long callback_op(int cmd, void *extra_args) * @cmd == CALLBACKOP_??? (callback operation). * @extra_args == Operation-specific extra arguments (NULL if none). */ /* ia64, x86: Callback for event delivery. */ #define CALLBACKTYPE_event 0 /* x86: Failsafe callback when guest state cannot be restored by Xen. */ #define CALLBACKTYPE_failsafe 1 /* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */ #define CALLBACKTYPE_syscall 2 /* * x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel * feature is enabled. Do not use this callback type in new code. */ #define CALLBACKTYPE_sysenter_deprecated 3 /* x86: Callback for NMI delivery. */ #define CALLBACKTYPE_nmi 4 /* * x86: sysenter is only available as follows: * - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled * - 64-bit hypervisor: 32-bit guest applications on Intel CPUs * ('32-on-32-on-64', '32-on-64-on-64') * [nb. also 64-bit guest applications on Intel CPUs * ('64-on-64-on-64'), but syscall is preferred] */ #define CALLBACKTYPE_sysenter 5 /* * x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs * ('32-on-32-on-64', '32-on-64-on-64') */ #define CALLBACKTYPE_syscall32 7 /* * Disable event deliver during callback? This flag is ignored for event and * NMI callbacks: event delivery is unconditionally disabled. */ #define _CALLBACKF_mask_events 0 #define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events) /* * Register a callback. */ #define CALLBACKOP_register 0 struct callback_register { uint16_t type; uint16_t flags; xen_callback_t address; }; typedef struct callback_register callback_register_t; DEFINE_XEN_GUEST_HANDLE(callback_register_t); /* * Unregister a callback. * * Not all callbacks can be unregistered. -EINVAL will be returned if * you attempt to unregister such a callback. */ #define CALLBACKOP_unregister 1 struct callback_unregister { uint16_t type; uint16_t _unused; }; typedef struct callback_unregister callback_unregister_t; DEFINE_XEN_GUEST_HANDLE(callback_unregister_t); #if __XEN_INTERFACE_VERSION__ < 0x00030207 #undef CALLBACKTYPE_sysenter #define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated #endif #endif /* __XEN_PUBLIC_CALLBACK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/dom0_ops.h000066400000000000000000000077061314037446600267450ustar00rootroot00000000000000/****************************************************************************** * dom0_ops.h * * Process command requests from domain-0 guest OS. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOM0_OPS_H__ #define __XEN_PUBLIC_DOM0_OPS_H__ #include "xen.h" #include "platform.h" #if __XEN_INTERFACE_VERSION__ >= 0x00030204 #error "dom0_ops.h is a compatibility interface only" #endif #define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION #define DOM0_SETTIME XENPF_settime #define dom0_settime xenpf_settime #define dom0_settime_t xenpf_settime_t #define DOM0_ADD_MEMTYPE XENPF_add_memtype #define dom0_add_memtype xenpf_add_memtype #define dom0_add_memtype_t xenpf_add_memtype_t #define DOM0_DEL_MEMTYPE XENPF_del_memtype #define dom0_del_memtype xenpf_del_memtype #define dom0_del_memtype_t xenpf_del_memtype_t #define DOM0_READ_MEMTYPE XENPF_read_memtype #define dom0_read_memtype xenpf_read_memtype #define dom0_read_memtype_t xenpf_read_memtype_t #define DOM0_MICROCODE XENPF_microcode_update #define dom0_microcode xenpf_microcode_update #define dom0_microcode_t xenpf_microcode_update_t #define DOM0_PLATFORM_QUIRK XENPF_platform_quirk #define dom0_platform_quirk xenpf_platform_quirk #define dom0_platform_quirk_t xenpf_platform_quirk_t typedef uint64_t cpumap_t; /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_MSR 15 struct dom0_msr { /* IN variables. */ uint32_t write; cpumap_t cpu_mask; uint32_t msr; uint32_t in1; uint32_t in2; /* OUT variables. */ uint32_t out1; uint32_t out2; }; typedef struct dom0_msr dom0_msr_t; DEFINE_XEN_GUEST_HANDLE(dom0_msr_t); /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_PHYSICAL_MEMORY_MAP 40 struct dom0_memory_map_entry { uint64_t start, end; uint32_t flags; /* reserved */ uint8_t is_ram; }; typedef struct dom0_memory_map_entry dom0_memory_map_entry_t; DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t); struct dom0_op { uint32_t cmd; uint32_t interface_version; /* DOM0_INTERFACE_VERSION */ union { struct dom0_msr msr; struct dom0_settime settime; struct dom0_add_memtype add_memtype; struct dom0_del_memtype del_memtype; struct dom0_read_memtype read_memtype; struct dom0_microcode microcode; struct dom0_platform_quirk platform_quirk; struct dom0_memory_map_entry physical_memory_map; uint8_t pad[128]; } u; }; typedef struct dom0_op dom0_op_t; DEFINE_XEN_GUEST_HANDLE(dom0_op_t); #endif /* __XEN_PUBLIC_DOM0_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/domctl.h000066400000000000000000001004501314037446600264750ustar00rootroot00000000000000/****************************************************************************** * domctl.h * * Domain management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOMCTL_H__ #define __XEN_PUBLIC_DOMCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "domctl operations are intended for use by node control tools only" #endif #include "xen.h" #include "grant_table.h" #define XEN_DOMCTL_INTERFACE_VERSION 0x00000006 struct xenctl_cpumap { XEN_GUEST_HANDLE_64(uint8) bitmap; uint32_t nr_cpus; }; /* * NB. xen_domctl.domain is an IN/OUT parameter for this operation. * If it is specified as zero, an id is auto-allocated and returned. */ /* XEN_DOMCTL_createdomain */ struct xen_domctl_createdomain { /* IN parameters */ uint32_t ssidref; xen_domain_handle_t handle; /* Is this an HVM guest (as opposed to a PV guest)? */ #define _XEN_DOMCTL_CDF_hvm_guest 0 #define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest) /* Use hardware-assisted paging if available? */ #define _XEN_DOMCTL_CDF_hap 1 #define XEN_DOMCTL_CDF_hap (1U<<_XEN_DOMCTL_CDF_hap) /* Should domain memory integrity be verifed by tboot during Sx? */ #define _XEN_DOMCTL_CDF_s3_integrity 2 #define XEN_DOMCTL_CDF_s3_integrity (1U<<_XEN_DOMCTL_CDF_s3_integrity) uint32_t flags; /* Disable out-of-sync shadow page tables? */ #define _XEN_DOMCTL_CDF_oos_off 3 #define XEN_DOMCTL_CDF_oos_off (1U<<_XEN_DOMCTL_CDF_oos_off) }; typedef struct xen_domctl_createdomain xen_domctl_createdomain_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t); /* XEN_DOMCTL_getdomaininfo */ struct xen_domctl_getdomaininfo { /* OUT variables. */ domid_t domain; /* Also echoed in domctl.domain */ /* Domain is scheduled to die. */ #define _XEN_DOMINF_dying 0 #define XEN_DOMINF_dying (1U<<_XEN_DOMINF_dying) /* Domain is an HVM guest (as opposed to a PV guest). */ #define _XEN_DOMINF_hvm_guest 1 #define XEN_DOMINF_hvm_guest (1U<<_XEN_DOMINF_hvm_guest) /* The guest OS has shut down. */ #define _XEN_DOMINF_shutdown 2 #define XEN_DOMINF_shutdown (1U<<_XEN_DOMINF_shutdown) /* Currently paused by control software. */ #define _XEN_DOMINF_paused 3 #define XEN_DOMINF_paused (1U<<_XEN_DOMINF_paused) /* Currently blocked pending an event. */ #define _XEN_DOMINF_blocked 4 #define XEN_DOMINF_blocked (1U<<_XEN_DOMINF_blocked) /* Domain is currently running. */ #define _XEN_DOMINF_running 5 #define XEN_DOMINF_running (1U<<_XEN_DOMINF_running) /* Being debugged. */ #define _XEN_DOMINF_debugged 6 #define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged) /* XEN_DOMINF_shutdown guest-supplied code. */ #define XEN_DOMINF_shutdownmask 255 #define XEN_DOMINF_shutdownshift 16 uint32_t flags; /* XEN_DOMINF_* */ uint64_aligned_t tot_pages; uint64_aligned_t max_pages; uint64_aligned_t shr_pages; uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */ uint64_aligned_t cpu_time; uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */ uint32_t ssidref; xen_domain_handle_t handle; }; typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t); /* XEN_DOMCTL_getmemlist */ struct xen_domctl_getmemlist { /* IN variables. */ /* Max entries to write to output buffer. */ uint64_aligned_t max_pfns; /* Start index in guest's page list. */ uint64_aligned_t start_pfn; XEN_GUEST_HANDLE_64(uint64) buffer; /* OUT variables. */ uint64_aligned_t num_pfns; }; typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t); /* XEN_DOMCTL_getpageframeinfo */ #define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28 #define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28) #define XEN_DOMCTL_PFINFO_L1TAB (0x1U<<28) #define XEN_DOMCTL_PFINFO_L2TAB (0x2U<<28) #define XEN_DOMCTL_PFINFO_L3TAB (0x3U<<28) #define XEN_DOMCTL_PFINFO_L4TAB (0x4U<<28) #define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28) #define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31) #define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */ #define XEN_DOMCTL_PFINFO_PAGEDTAB (0x8U<<28) #define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28) struct xen_domctl_getpageframeinfo { /* IN variables. */ uint64_aligned_t gmfn; /* GMFN to query */ /* OUT variables. */ /* Is the page PINNED to a type? */ uint32_t type; /* see above type defs */ }; typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t); /* XEN_DOMCTL_getpageframeinfo2 */ struct xen_domctl_getpageframeinfo2 { /* IN variables. */ uint64_aligned_t num; /* IN/OUT variables. */ XEN_GUEST_HANDLE_64(uint32) array; }; typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t); /* * Control shadow pagetables operation */ /* XEN_DOMCTL_shadow_op */ /* Disable shadow mode. */ #define XEN_DOMCTL_SHADOW_OP_OFF 0 /* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */ #define XEN_DOMCTL_SHADOW_OP_ENABLE 32 /* Log-dirty bitmap operations. */ /* Return the bitmap and clean internal copy for next round. */ #define XEN_DOMCTL_SHADOW_OP_CLEAN 11 /* Return the bitmap but do not modify internal copy. */ #define XEN_DOMCTL_SHADOW_OP_PEEK 12 /* Memory allocation accessors. */ #define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30 #define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31 /* Legacy enable operations. */ /* Equiv. to ENABLE with no mode flags. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST 1 /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY 2 /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE 3 /* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */ /* * Shadow pagetables are refcounted: guest does not use explicit mmu * operations nor write-protect its pagetables. */ #define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT (1 << 1) /* * Log pages in a bitmap as they are dirtied. * Used for live relocation to determine which pages must be re-sent. */ #define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2) /* * Automatically translate GPFNs into MFNs. */ #define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3) /* * Xen does not steal virtual address space from the guest. * Requires HVM support. */ #define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4) struct xen_domctl_shadow_op_stats { uint32_t fault_count; uint32_t dirty_count; }; typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t); struct xen_domctl_shadow_op { /* IN variables. */ uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */ /* OP_ENABLE */ uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */ /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */ uint32_t mb; /* Shadow memory allocation in MB */ /* OP_PEEK / OP_CLEAN */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */ struct xen_domctl_shadow_op_stats stats; }; typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t); /* XEN_DOMCTL_max_mem */ struct xen_domctl_max_mem { /* IN variables. */ uint64_aligned_t max_memkb; }; typedef struct xen_domctl_max_mem xen_domctl_max_mem_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t); /* XEN_DOMCTL_setvcpucontext */ /* XEN_DOMCTL_getvcpucontext */ struct xen_domctl_vcpucontext { uint32_t vcpu; /* IN */ XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */ }; typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t); /* XEN_DOMCTL_getvcpuinfo */ struct xen_domctl_getvcpuinfo { /* IN variables. */ uint32_t vcpu; /* OUT variables. */ uint8_t online; /* currently online (not hotplugged)? */ uint8_t blocked; /* blocked waiting for an event? */ uint8_t running; /* currently scheduled on its CPU? */ uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */ uint32_t cpu; /* current mapping */ }; typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t); /* Get/set which physical cpus a vcpu can execute on. */ /* XEN_DOMCTL_setvcpuaffinity */ /* XEN_DOMCTL_getvcpuaffinity */ struct xen_domctl_vcpuaffinity { uint32_t vcpu; /* IN */ struct xenctl_cpumap cpumap; /* IN/OUT */ }; typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t); /* XEN_DOMCTL_max_vcpus */ struct xen_domctl_max_vcpus { uint32_t max; /* maximum number of vcpus */ }; typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t); /* XEN_DOMCTL_scheduler_op */ /* Scheduler types. */ #define XEN_SCHEDULER_SEDF 4 #define XEN_SCHEDULER_CREDIT 5 /* Set or get info? */ #define XEN_DOMCTL_SCHEDOP_putinfo 0 #define XEN_DOMCTL_SCHEDOP_getinfo 1 struct xen_domctl_scheduler_op { uint32_t sched_id; /* XEN_SCHEDULER_* */ uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */ union { struct xen_domctl_sched_sedf { uint64_aligned_t period; uint64_aligned_t slice; uint64_aligned_t latency; uint32_t extratime; uint32_t weight; } sedf; struct xen_domctl_sched_credit { uint16_t weight; uint16_t cap; } credit; } u; }; typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t); /* XEN_DOMCTL_setdomainhandle */ struct xen_domctl_setdomainhandle { xen_domain_handle_t handle; }; typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t); /* XEN_DOMCTL_setdebugging */ struct xen_domctl_setdebugging { uint8_t enable; }; typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t); /* XEN_DOMCTL_irq_permission */ struct xen_domctl_irq_permission { uint8_t pirq; uint8_t allow_access; /* flag to specify enable/disable of IRQ access */ }; typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t); /* XEN_DOMCTL_iomem_permission */ struct xen_domctl_iomem_permission { uint64_aligned_t first_mfn;/* first page (physical page number) in range */ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */ }; typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t); /* XEN_DOMCTL_ioport_permission */ struct xen_domctl_ioport_permission { uint32_t first_port; /* first port int range */ uint32_t nr_ports; /* size of port range */ uint8_t allow_access; /* allow or deny access to range? */ }; typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t); /* XEN_DOMCTL_hypercall_init */ struct xen_domctl_hypercall_init { uint64_aligned_t gmfn; /* GMFN to be initialised */ }; typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t); /* XEN_DOMCTL_arch_setup */ #define _XEN_DOMAINSETUP_hvm_guest 0 #define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest) #define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */ #define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query) #define _XEN_DOMAINSETUP_sioemu_guest 2 #define XEN_DOMAINSETUP_sioemu_guest (1UL<<_XEN_DOMAINSETUP_sioemu_guest) typedef struct xen_domctl_arch_setup { uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */ #ifdef __ia64__ uint64_aligned_t bp; /* mpaddr of boot param area */ uint64_aligned_t maxmem; /* Highest memory address for MDT. */ uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */ int8_t vhpt_size_log2; /* Log2 of VHPT size. */ #endif } xen_domctl_arch_setup_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t); /* XEN_DOMCTL_settimeoffset */ struct xen_domctl_settimeoffset { int32_t time_offset_seconds; /* applied to domain wallclock time */ }; typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t); /* XEN_DOMCTL_gethvmcontext */ /* XEN_DOMCTL_sethvmcontext */ typedef struct xen_domctl_hvmcontext { uint32_t size; /* IN/OUT: size of buffer / bytes filled */ XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call * gethvmcontext with NULL * buffer to get size req'd */ } xen_domctl_hvmcontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t); /* XEN_DOMCTL_set_address_size */ /* XEN_DOMCTL_get_address_size */ typedef struct xen_domctl_address_size { uint32_t size; } xen_domctl_address_size_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t); /* XEN_DOMCTL_real_mode_area */ struct xen_domctl_real_mode_area { uint32_t log; /* log2 of Real Mode Area size */ }; typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t); /* XEN_DOMCTL_sendtrigger */ #define XEN_DOMCTL_SENDTRIGGER_NMI 0 #define XEN_DOMCTL_SENDTRIGGER_RESET 1 #define XEN_DOMCTL_SENDTRIGGER_INIT 2 #define XEN_DOMCTL_SENDTRIGGER_POWER 3 struct xen_domctl_sendtrigger { uint32_t trigger; /* IN */ uint32_t vcpu; /* IN */ }; typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t); /* Assign PCI device to HVM guest. Sets up IOMMU structures. */ /* XEN_DOMCTL_assign_device */ /* XEN_DOMCTL_test_assign_device */ /* XEN_DOMCTL_deassign_device */ struct xen_domctl_assign_device { uint32_t machine_bdf; /* machine PCI ID of assigned device */ }; typedef struct xen_domctl_assign_device xen_domctl_assign_device_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t); /* Retrieve sibling devices infomation of machine_bdf */ /* XEN_DOMCTL_get_device_group */ struct xen_domctl_get_device_group { uint32_t machine_bdf; /* IN */ uint32_t max_sdevs; /* IN */ uint32_t num_sdevs; /* OUT */ XEN_GUEST_HANDLE_64(uint32) sdev_array; /* OUT */ }; typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t); /* Pass-through interrupts: bind real irq -> hvm devfn. */ /* XEN_DOMCTL_bind_pt_irq */ /* XEN_DOMCTL_unbind_pt_irq */ typedef enum pt_irq_type_e { PT_IRQ_TYPE_PCI, PT_IRQ_TYPE_ISA, PT_IRQ_TYPE_MSI, PT_IRQ_TYPE_MSI_TRANSLATE, } pt_irq_type_t; struct xen_domctl_bind_pt_irq { uint32_t machine_irq; pt_irq_type_t irq_type; uint32_t hvm_domid; union { struct { uint8_t isa_irq; } isa; struct { uint8_t bus; uint8_t device; uint8_t intx; } pci; struct { uint8_t gvec; uint32_t gflags; uint64_aligned_t gtable; } msi; } u; }; typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t); /* Bind machine I/O address range -> HVM address range. */ /* XEN_DOMCTL_memory_mapping */ #define DPCI_ADD_MAPPING 1 #define DPCI_REMOVE_MAPPING 0 struct xen_domctl_memory_mapping { uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */ uint64_aligned_t first_mfn; /* first page (machine page) in range */ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ uint32_t add_mapping; /* add or remove mapping */ uint32_t padding; /* padding for 64-bit aligned structure */ }; typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t); /* Bind machine I/O port range -> HVM I/O port range. */ /* XEN_DOMCTL_ioport_mapping */ struct xen_domctl_ioport_mapping { uint32_t first_gport; /* first guest IO port*/ uint32_t first_mport; /* first machine IO port */ uint32_t nr_ports; /* size of port range */ uint32_t add_mapping; /* add or remove mapping */ }; typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t); /* * Pin caching type of RAM space for x86 HVM domU. */ /* XEN_DOMCTL_pin_mem_cacheattr */ /* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */ #define XEN_DOMCTL_MEM_CACHEATTR_UC 0 #define XEN_DOMCTL_MEM_CACHEATTR_WC 1 #define XEN_DOMCTL_MEM_CACHEATTR_WT 4 #define XEN_DOMCTL_MEM_CACHEATTR_WP 5 #define XEN_DOMCTL_MEM_CACHEATTR_WB 6 #define XEN_DOMCTL_MEM_CACHEATTR_UCM 7 struct xen_domctl_pin_mem_cacheattr { uint64_aligned_t start, end; uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */ }; typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t); /* XEN_DOMCTL_set_ext_vcpucontext */ /* XEN_DOMCTL_get_ext_vcpucontext */ struct xen_domctl_ext_vcpucontext { /* IN: VCPU that this call applies to. */ uint32_t vcpu; /* * SET: Size of struct (IN) * GET: Size of struct (OUT) */ uint32_t size; #if defined(__i386__) || defined(__x86_64__) /* SYSCALL from 32-bit mode and SYSENTER callback information. */ /* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */ uint64_aligned_t syscall32_callback_eip; uint64_aligned_t sysenter_callback_eip; uint16_t syscall32_callback_cs; uint16_t sysenter_callback_cs; uint8_t syscall32_disables_events; uint8_t sysenter_disables_events; #endif }; typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t); /* * Set optimizaton features for a domain */ /* XEN_DOMCTL_set_opt_feature */ struct xen_domctl_set_opt_feature { #if defined(__ia64__) struct xen_ia64_opt_feature optf; #else /* Make struct non-empty: do not depend on this field name! */ uint64_t dummy; #endif }; typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t); /* * Set the target domain for a domain */ /* XEN_DOMCTL_set_target */ struct xen_domctl_set_target { domid_t target; }; typedef struct xen_domctl_set_target xen_domctl_set_target_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t); #if defined(__i386__) || defined(__x86_64__) # define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF /* XEN_DOMCTL_set_cpuid */ struct xen_domctl_cpuid { uint32_t input[2]; uint32_t eax; uint32_t ebx; uint32_t ecx; uint32_t edx; }; typedef struct xen_domctl_cpuid xen_domctl_cpuid_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t); #endif /* XEN_DOMCTL_subscribe */ struct xen_domctl_subscribe { uint32_t port; /* IN */ }; typedef struct xen_domctl_subscribe xen_domctl_subscribe_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t); /* * Define the maximum machine address size which should be allocated * to a guest. */ /* XEN_DOMCTL_set_machine_address_size */ /* XEN_DOMCTL_get_machine_address_size */ /* * Do not inject spurious page faults into this domain. */ /* XEN_DOMCTL_suppress_spurious_page_faults */ /* XEN_DOMCTL_debug_op */ #define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF 0 #define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON 1 struct xen_domctl_debug_op { uint32_t op; /* IN */ uint32_t vcpu; /* IN */ }; typedef struct xen_domctl_debug_op xen_domctl_debug_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_debug_op_t); /* * Request a particular record from the HVM context */ /* XEN_DOMCTL_gethvmcontext_partial */ typedef struct xen_domctl_hvmcontext_partial { uint32_t type; /* IN: Type of record required */ uint32_t instance; /* IN: Instance of that type */ XEN_GUEST_HANDLE_64(uint8) buffer; /* OUT: buffer to write record into */ } xen_domctl_hvmcontext_partial_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_partial_t); /* XEN_DOMCTL_disable_migrate */ typedef struct xen_domctl_disable_migrate { uint32_t disable; /* IN: 1: disable migration and restore */ } xen_domctl_disable_migrate_t; /* XEN_DOMCTL_gettscinfo */ /* XEN_DOMCTL_settscinfo */ struct xen_guest_tsc_info { uint32_t tsc_mode; uint32_t gtsc_khz; uint32_t incarnation; uint32_t pad; uint64_aligned_t elapsed_nsec; }; typedef struct xen_guest_tsc_info xen_guest_tsc_info_t; DEFINE_XEN_GUEST_HANDLE(xen_guest_tsc_info_t); typedef struct xen_domctl_tsc_info { XEN_GUEST_HANDLE_64(xen_guest_tsc_info_t) out_info; /* OUT */ xen_guest_tsc_info_t info; /* IN */ } xen_domctl_tsc_info_t; /* XEN_DOMCTL_gdbsx_guestmemio guest mem io */ struct xen_domctl_gdbsx_memio { /* IN */ uint64_aligned_t pgd3val;/* optional: init_mm.pgd[3] value */ uint64_aligned_t gva; /* guest virtual address */ uint64_aligned_t uva; /* user buffer virtual address */ uint32_t len; /* number of bytes to read/write */ uint8_t gwr; /* 0 = read from guest. 1 = write to guest */ /* OUT */ uint32_t remain; /* bytes remaining to be copied */ }; /* XEN_DOMCTL_gdbsx_pausevcpu */ /* XEN_DOMCTL_gdbsx_unpausevcpu */ struct xen_domctl_gdbsx_pauseunp_vcpu { /* pause/unpause a vcpu */ uint32_t vcpu; /* which vcpu */ }; /* XEN_DOMCTL_gdbsx_domstatus */ struct xen_domctl_gdbsx_domstatus { /* OUT */ uint8_t paused; /* is the domain paused */ uint32_t vcpu_id; /* any vcpu in an event? */ uint32_t vcpu_ev; /* if yes, what event? */ }; /* * Memory event operations */ /* XEN_DOMCTL_mem_event_op */ /* Add and remove memory handlers */ #define XEN_DOMCTL_MEM_EVENT_OP_ENABLE 0 #define XEN_DOMCTL_MEM_EVENT_OP_DISABLE 1 /* * Page memory in and out. */ #define XEN_DOMCTL_MEM_EVENT_OP_PAGING (1 << 0) /* Domain memory paging */ #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE 0 #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT 1 #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP 2 #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME 3 struct xen_domctl_mem_event_op { uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */ uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_ENABLE_* */ /* OP_ENABLE */ uint64_aligned_t shared_addr; /* IN: Virtual address of shared page */ uint64_aligned_t ring_addr; /* IN: Virtual address of ring page */ /* Other OPs */ uint64_aligned_t gfn; /* IN: gfn of page being operated on */ }; typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t); /* * Memory sharing operations */ /* XEN_DOMCTL_mem_sharing_op */ #define XEN_DOMCTL_MEM_SHARING_OP_CONTROL 0 #define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GFN 1 #define XEN_DOMCTL_MEM_SHARING_OP_NOMINATE_GREF 2 #define XEN_DOMCTL_MEM_SHARING_OP_SHARE 3 #define XEN_DOMCTL_MEM_SHARING_OP_RESUME 4 #define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GFN 5 #define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_MFN 6 #define XEN_DOMCTL_MEM_SHARING_OP_DEBUG_GREF 7 #define XEN_DOMCTL_MEM_SHARING_S_HANDLE_INVALID (-10) #define XEN_DOMCTL_MEM_SHARING_C_HANDLE_INVALID (-9) struct xen_domctl_mem_sharing_op { uint8_t op; /* XEN_DOMCTL_MEM_EVENT_OP_* */ union { uint8_t enable; /* OP_CONTROL */ struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */ union { uint64_aligned_t gfn; /* IN: gfn to nominate */ uint32_t grant_ref; /* IN: grant ref to nominate */ } u; uint64_aligned_t handle; /* OUT: the handle */ } nominate; struct mem_sharing_op_share { /* OP_SHARE */ uint64_aligned_t source_handle; /* IN: handle to the source page */ uint64_aligned_t client_handle; /* IN: handle to the client page */ } share; struct mem_sharing_op_debug { /* OP_DEBUG_xxx */ union { uint64_aligned_t gfn; /* IN: gfn to debug */ uint64_aligned_t mfn; /* IN: mfn to debug */ grant_ref_t gref; /* IN: gref to debug */ } u; } debug; } u; }; typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t); struct xen_domctl { uint32_t cmd; #define XEN_DOMCTL_createdomain 1 #define XEN_DOMCTL_destroydomain 2 #define XEN_DOMCTL_pausedomain 3 #define XEN_DOMCTL_unpausedomain 4 #define XEN_DOMCTL_getdomaininfo 5 #define XEN_DOMCTL_getmemlist 6 #define XEN_DOMCTL_getpageframeinfo 7 #define XEN_DOMCTL_getpageframeinfo2 8 #define XEN_DOMCTL_setvcpuaffinity 9 #define XEN_DOMCTL_shadow_op 10 #define XEN_DOMCTL_max_mem 11 #define XEN_DOMCTL_setvcpucontext 12 #define XEN_DOMCTL_getvcpucontext 13 #define XEN_DOMCTL_getvcpuinfo 14 #define XEN_DOMCTL_max_vcpus 15 #define XEN_DOMCTL_scheduler_op 16 #define XEN_DOMCTL_setdomainhandle 17 #define XEN_DOMCTL_setdebugging 18 #define XEN_DOMCTL_irq_permission 19 #define XEN_DOMCTL_iomem_permission 20 #define XEN_DOMCTL_ioport_permission 21 #define XEN_DOMCTL_hypercall_init 22 #define XEN_DOMCTL_arch_setup 23 #define XEN_DOMCTL_settimeoffset 24 #define XEN_DOMCTL_getvcpuaffinity 25 #define XEN_DOMCTL_real_mode_area 26 #define XEN_DOMCTL_resumedomain 27 #define XEN_DOMCTL_sendtrigger 28 #define XEN_DOMCTL_subscribe 29 #define XEN_DOMCTL_gethvmcontext 33 #define XEN_DOMCTL_sethvmcontext 34 #define XEN_DOMCTL_set_address_size 35 #define XEN_DOMCTL_get_address_size 36 #define XEN_DOMCTL_assign_device 37 #define XEN_DOMCTL_bind_pt_irq 38 #define XEN_DOMCTL_memory_mapping 39 #define XEN_DOMCTL_ioport_mapping 40 #define XEN_DOMCTL_pin_mem_cacheattr 41 #define XEN_DOMCTL_set_ext_vcpucontext 42 #define XEN_DOMCTL_get_ext_vcpucontext 43 #define XEN_DOMCTL_set_opt_feature 44 #define XEN_DOMCTL_test_assign_device 45 #define XEN_DOMCTL_set_target 46 #define XEN_DOMCTL_deassign_device 47 #define XEN_DOMCTL_unbind_pt_irq 48 #define XEN_DOMCTL_set_cpuid 49 #define XEN_DOMCTL_get_device_group 50 #define XEN_DOMCTL_set_machine_address_size 51 #define XEN_DOMCTL_get_machine_address_size 52 #define XEN_DOMCTL_suppress_spurious_page_faults 53 #define XEN_DOMCTL_debug_op 54 #define XEN_DOMCTL_gethvmcontext_partial 55 #define XEN_DOMCTL_mem_event_op 56 #define XEN_DOMCTL_mem_sharing_op 57 #define XEN_DOMCTL_disable_migrate 58 #define XEN_DOMCTL_gettscinfo 59 #define XEN_DOMCTL_settscinfo 60 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 #define XEN_DOMCTL_gdbsx_domstatus 1003 uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */ domid_t domain; union { struct xen_domctl_createdomain createdomain; struct xen_domctl_getdomaininfo getdomaininfo; struct xen_domctl_getmemlist getmemlist; struct xen_domctl_getpageframeinfo getpageframeinfo; struct xen_domctl_getpageframeinfo2 getpageframeinfo2; struct xen_domctl_vcpuaffinity vcpuaffinity; struct xen_domctl_shadow_op shadow_op; struct xen_domctl_max_mem max_mem; struct xen_domctl_vcpucontext vcpucontext; struct xen_domctl_getvcpuinfo getvcpuinfo; struct xen_domctl_max_vcpus max_vcpus; struct xen_domctl_scheduler_op scheduler_op; struct xen_domctl_setdomainhandle setdomainhandle; struct xen_domctl_setdebugging setdebugging; struct xen_domctl_irq_permission irq_permission; struct xen_domctl_iomem_permission iomem_permission; struct xen_domctl_ioport_permission ioport_permission; struct xen_domctl_hypercall_init hypercall_init; struct xen_domctl_arch_setup arch_setup; struct xen_domctl_settimeoffset settimeoffset; struct xen_domctl_disable_migrate disable_migrate; struct xen_domctl_tsc_info tsc_info; struct xen_domctl_real_mode_area real_mode_area; struct xen_domctl_hvmcontext hvmcontext; struct xen_domctl_hvmcontext_partial hvmcontext_partial; struct xen_domctl_address_size address_size; struct xen_domctl_sendtrigger sendtrigger; struct xen_domctl_get_device_group get_device_group; struct xen_domctl_assign_device assign_device; struct xen_domctl_bind_pt_irq bind_pt_irq; struct xen_domctl_memory_mapping memory_mapping; struct xen_domctl_ioport_mapping ioport_mapping; struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr; struct xen_domctl_ext_vcpucontext ext_vcpucontext; struct xen_domctl_set_opt_feature set_opt_feature; struct xen_domctl_set_target set_target; struct xen_domctl_subscribe subscribe; struct xen_domctl_debug_op debug_op; struct xen_domctl_mem_event_op mem_event_op; struct xen_domctl_mem_sharing_op mem_sharing_op; #if defined(__i386__) || defined(__x86_64__) struct xen_domctl_cpuid cpuid; #endif struct xen_domctl_gdbsx_memio gdbsx_guest_memio; struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu; struct xen_domctl_gdbsx_domstatus gdbsx_domstatus; uint8_t pad[128]; } u; }; typedef struct xen_domctl xen_domctl_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_t); #endif /* __XEN_PUBLIC_DOMCTL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/elfnote.h000066400000000000000000000165011314037446600266520ustar00rootroot00000000000000/****************************************************************************** * elfnote.h * * Definitions used for the Xen ELF notes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell, XenSource Ltd. */ #ifndef __XEN_PUBLIC_ELFNOTE_H__ #define __XEN_PUBLIC_ELFNOTE_H__ /* * The notes should live in a PT_NOTE segment and have "Xen" in the * name field. * * Numeric types are either 4 or 8 bytes depending on the content of * the desc field. * * LEGACY indicated the fields in the legacy __xen_guest string which * this a note type replaces. */ /* * NAME=VALUE pair (string). */ #define XEN_ELFNOTE_INFO 0 /* * The virtual address of the entry point (numeric). * * LEGACY: VIRT_ENTRY */ #define XEN_ELFNOTE_ENTRY 1 /* The virtual address of the hypercall transfer page (numeric). * * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page * number not a virtual address) */ #define XEN_ELFNOTE_HYPERCALL_PAGE 2 /* The virtual address where the kernel image should be mapped (numeric). * * Defaults to 0. * * LEGACY: VIRT_BASE */ #define XEN_ELFNOTE_VIRT_BASE 3 /* * The offset of the ELF paddr field from the acutal required * psuedo-physical address (numeric). * * This is used to maintain backwards compatibility with older kernels * which wrote __PAGE_OFFSET into that field. This field defaults to 0 * if not present. * * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE) */ #define XEN_ELFNOTE_PADDR_OFFSET 4 /* * The version of Xen that we work with (string). * * LEGACY: XEN_VER */ #define XEN_ELFNOTE_XEN_VERSION 5 /* * The name of the guest operating system (string). * * LEGACY: GUEST_OS */ #define XEN_ELFNOTE_GUEST_OS 6 /* * The version of the guest operating system (string). * * LEGACY: GUEST_VER */ #define XEN_ELFNOTE_GUEST_VERSION 7 /* * The loader type (string). * * LEGACY: LOADER */ #define XEN_ELFNOTE_LOADER 8 /* * The kernel supports PAE (x86/32 only, string = "yes", "no" or * "bimodal"). * * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting * may be given as "yes,bimodal" which will cause older Xen to treat * this kernel as PAE. * * LEGACY: PAE (n.b. The legacy interface included a provision to * indicate 'extended-cr3' support allowing L3 page tables to be * placed above 4G. It is assumed that any kernel new enough to use * these ELF notes will include this and therefore "yes" here is * equivalent to "yes[entended-cr3]" in the __xen_guest interface. */ #define XEN_ELFNOTE_PAE_MODE 9 /* * The features supported/required by this kernel (string). * * The string must consist of a list of feature names (as given in * features.h, without the "XENFEAT_" prefix) separated by '|' * characters. If a feature is required for the kernel to function * then the feature name must be preceded by a '!' character. * * LEGACY: FEATURES */ #define XEN_ELFNOTE_FEATURES 10 /* * The kernel requires the symbol table to be loaded (string = "yes" or "no") * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence * of this string as a boolean flag rather than requiring "yes" or * "no". */ #define XEN_ELFNOTE_BSD_SYMTAB 11 /* * The lowest address the hypervisor hole can begin at (numeric). * * This must not be set higher than HYPERVISOR_VIRT_START. Its presence * also indicates to the hypervisor that the kernel can deal with the * hole starting at a higher address. */ #define XEN_ELFNOTE_HV_START_LOW 12 /* * List of maddr_t-sized mask/value pairs describing how to recognize * (non-present) L1 page table entries carrying valid MFNs (numeric). */ #define XEN_ELFNOTE_L1_MFN_VALID 13 /* * Whether or not the guest supports cooperative suspend cancellation. */ #define XEN_ELFNOTE_SUSPEND_CANCEL 14 /* * The (non-default) location the initial phys-to-machine map should be * placed at by the hypervisor (Dom0) or the tools (DomU). * The kernel must be prepared for this mapping to be established using * large pages, despite such otherwise not being available to guests. * The kernel must also be able to handle the page table pages used for * this mapping not being accessible through the initial mapping. * (Only x86-64 supports this at present.) */ #define XEN_ELFNOTE_INIT_P2M 15 /* * The number of the highest elfnote defined. */ #define XEN_ELFNOTE_MAX XEN_ELFNOTE_INIT_P2M /* * System information exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO * note in case of a system crash. This note will contain various * information about the system, see xen/include/xen/elfcore.h. */ #define XEN_ELFNOTE_CRASH_INFO 0x1000001 /* * System registers exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS * note per cpu in case of a system crash. This note is architecture * specific and will contain registers not saved in the "CORE" note. * See xen/include/xen/elfcore.h for more information. */ #define XEN_ELFNOTE_CRASH_REGS 0x1000002 /* * xen dump-core none note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE * in its dump file to indicate that the file is xen dump-core * file. This note doesn't have any other information. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000 /* * xen dump-core header note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER * in its dump file. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001 /* * xen dump-core xen version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION * in its dump file. It contains the xen version obtained via the * XENVER hypercall. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002 /* * xen dump-core format version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION * in its dump file. It contains a format version identifier. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003 #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/event_channel.h000066400000000000000000000212301314037446600300220ustar00rootroot00000000000000/****************************************************************************** * event_channel.h * * Event channels between domains. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, K A Fraser. */ #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ #define __XEN_PUBLIC_EVENT_CHANNEL_H__ #include /* * Prototype for this hypercall is: * int event_channel_op(int cmd, void *args) * @cmd == EVTCHNOP_??? (event-channel operation). * @args == Operation-specific extra arguments (NULL if none). */ typedef uint32_t evtchn_port_t; DEFINE_XEN_GUEST_HANDLE(evtchn_port_t); /* * EVTCHNOP_alloc_unbound: Allocate a port in domain and mark as * accepting interdomain bindings from domain . A fresh port * is allocated in and returned as . * NOTES: * 1. If the caller is unprivileged then must be DOMID_SELF. * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_alloc_unbound 6 struct evtchn_alloc_unbound { /* IN parameters */ domid_t dom, remote_dom; /* OUT parameters */ evtchn_port_t port; }; typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; /* * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between * the calling domain and . must identify * a port that is unbound and marked as accepting bindings from the calling * domain. A fresh port is allocated in the calling domain and returned as * . * NOTES: * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_bind_interdomain 0 struct evtchn_bind_interdomain { /* IN parameters. */ domid_t remote_dom; evtchn_port_t remote_port; /* OUT parameters. */ evtchn_port_t local_port; }; typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t; /* * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ on specified * vcpu. * NOTES: * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list * in xen.h for the classification of each VIRQ. * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be * re-bound via EVTCHNOP_bind_vcpu. * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. * The allocated event channel is bound to the specified vcpu and the * binding cannot be changed. */ #define EVTCHNOP_bind_virq 1 struct evtchn_bind_virq { /* IN parameters. */ uint32_t virq; uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_virq evtchn_bind_virq_t; /* * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ . * NOTES: * 1. A physical IRQ may be bound to at most one event channel per domain. * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. */ #define EVTCHNOP_bind_pirq 2 struct evtchn_bind_pirq { /* IN parameters. */ uint32_t pirq; #define BIND_PIRQ__WILL_SHARE 1 uint32_t flags; /* BIND_PIRQ__* */ /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_pirq evtchn_bind_pirq_t; /* * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. * NOTES: * 1. The allocated event channel is bound to the specified vcpu. The binding * may not be changed. */ #define EVTCHNOP_bind_ipi 7 struct evtchn_bind_ipi { uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_ipi evtchn_bind_ipi_t; /* * EVTCHNOP_close: Close a local event channel . If the channel is * interdomain then the remote end is placed in the unbound state * (EVTCHNSTAT_unbound), awaiting a new connection. */ #define EVTCHNOP_close 3 struct evtchn_close { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_close evtchn_close_t; /* * EVTCHNOP_send: Send an event to the remote end of the channel whose local * endpoint is . */ #define EVTCHNOP_send 4 struct evtchn_send { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_send evtchn_send_t; /* * EVTCHNOP_status: Get the current status of the communication channel which * has an endpoint at . * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may obtain the status of an event * channel for which is not DOMID_SELF. */ #define EVTCHNOP_status 5 struct evtchn_status { /* IN parameters */ domid_t dom; evtchn_port_t port; /* OUT parameters */ #define EVTCHNSTAT_closed 0 /* Channel is not in use. */ #define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ #define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ uint32_t status; uint32_t vcpu; /* VCPU to which this channel is bound. */ union { struct { domid_t dom; } unbound; /* EVTCHNSTAT_unbound */ struct { domid_t dom; evtchn_port_t port; } interdomain; /* EVTCHNSTAT_interdomain */ uint32_t pirq; /* EVTCHNSTAT_pirq */ uint32_t virq; /* EVTCHNSTAT_virq */ } u; }; typedef struct evtchn_status evtchn_status_t; /* * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an * event is pending. * NOTES: * 1. IPI-bound channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 3. All other channels notify vcpu0 by default. This default is set when * the channel is allocated (a port that is freed and subsequently reused * has its binding reset to vcpu0). */ #define EVTCHNOP_bind_vcpu 8 struct evtchn_bind_vcpu { /* IN parameters. */ evtchn_port_t port; uint32_t vcpu; }; typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t; /* * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver * a notification to the appropriate VCPU if an event is pending. */ #define EVTCHNOP_unmask 9 struct evtchn_unmask { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_unmask evtchn_unmask_t; /* * EVTCHNOP_reset: Close all event channels associated with specified domain. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. */ #define EVTCHNOP_reset 10 struct evtchn_reset { /* IN parameters. */ domid_t dom; }; typedef struct evtchn_reset evtchn_reset_t; /* * Argument to event_channel_op_compat() hypercall. Superceded by new * event_channel_op() hypercall since 0x00030202. */ struct evtchn_op { uint32_t cmd; /* EVTCHNOP_* */ union { struct evtchn_alloc_unbound alloc_unbound; struct evtchn_bind_interdomain bind_interdomain; struct evtchn_bind_virq bind_virq; struct evtchn_bind_pirq bind_pirq; struct evtchn_bind_ipi bind_ipi; struct evtchn_close close; struct evtchn_send send; struct evtchn_status status; struct evtchn_bind_vcpu bind_vcpu; struct evtchn_unmask unmask; } u; }; DEFINE_GUEST_HANDLE_STRUCT(evtchn_op); typedef struct evtchn_op evtchn_op_t; DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/features.h000066400000000000000000000053701314037446600270360ustar00rootroot00000000000000/****************************************************************************** * features.h * * Feature flags, reported by XENVER_get_features. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Keir Fraser */ #ifndef __XEN_PUBLIC_FEATURES_H__ #define __XEN_PUBLIC_FEATURES_H__ /* * If set, the guest does not need to write-protect its pagetables, and can * update them via direct writes. */ #define XENFEAT_writable_page_tables 0 /* * If set, the guest does not need to write-protect its segment descriptor * tables, and can update them via direct writes. */ #define XENFEAT_writable_descriptor_tables 1 /* * If set, translation between the guest's 'pseudo-physical' address space * and the host's machine address space are handled by the hypervisor. In this * mode the guest does not need to perform phys-to/from-machine translations * when performing page table operations. */ #define XENFEAT_auto_translated_physmap 2 /* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ #define XENFEAT_supervisor_mode_kernel 3 /* * If set, the guest does not need to allocate x86 PAE page directories * below 4GB. This flag is usually implied by auto_translated_physmap. */ #define XENFEAT_pae_pgdir_above_4gb 4 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ #define XENFEAT_mmu_pt_update_preserve_ad 5 /* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */ #define XENFEAT_highmem_assist 6 /* * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel * available pte bits. */ #define XENFEAT_gnttab_map_avail_bits 7 #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/grant_table.h000066400000000000000000000534771314037446600275150ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ #define __XEN_PUBLIC_GRANT_TABLE_H__ #include "xen.h" /*********************************** * GRANT TABLE REPRESENTATION */ /* Some rough guidelines on accessing and updating grant-table entries * in a concurrency-safe manner. For more information, Linux contains a * reference implementation for guest OSes (arch/xen/kernel/grant_table.c). * * NB. WMB is a no-op on current-generation x86 processors. However, a * compiler barrier will still be required. * * Introducing a valid entry into the grant table: * 1. Write ent->domid. * 2. Write ent->frame: * GTF_permit_access: Frame to which access is permitted. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new * frame, or zero if none. * 3. Write memory barrier (WMB). * 4. Write ent->flags, inc. valid type. * * Invalidating an unused GTF_permit_access entry: * 1. flags = ent->flags. * 2. Observe that !(flags & (GTF_reading|GTF_writing)). * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * * Invalidating an in-use GTF_permit_access entry: * This cannot be done directly. Request assistance from the domain controller * which can set a timeout on the use of a grant entry and take necessary * action. (NB. This is not yet implemented!). * * Invalidating an unused GTF_accept_transfer entry: * 1. flags = ent->flags. * 2. Observe that !(flags & GTF_transfer_committed). [*] * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. * The guest must /not/ modify the grant entry until the address of the * transferred frame is written. It is safe for the guest to spin waiting * for this to occur (detect by observing GTF_transfer_completed in * ent->flags). * * Invalidating a committed GTF_accept_transfer entry: * 1. Wait for (ent->flags & GTF_transfer_completed). * * Changing a GTF_permit_access from writable to read-only: * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. * * Changing a GTF_permit_access from read-only to writable: * Use SMP-safe bit-setting instruction. */ /* * Reference to a grant entry in a specified domain's grant table. */ typedef uint32_t grant_ref_t; /* * A grant table comprises a packed array of grant entries in one or more * page frames shared between Xen and a guest. * [XEN]: This field is written by Xen and read by the sharing guest. * [GST]: This field is written by the guest and read by Xen. */ /* * Version 1 of the grant table entry structure is maintained purely * for backwards compatibility. New guests should use version 2. */ #if __XEN_INTERFACE_VERSION__ < 0x0003020a #define grant_entry_v1 grant_entry #define grant_entry_v1_t grant_entry_t #endif struct grant_entry_v1 { /* GTF_xxx: various type and flag information. [XEN,GST] */ uint16_t flags; /* The domain being granted foreign privileges. [GST] */ domid_t domid; /* * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] */ uint32_t frame; }; typedef struct grant_entry_v1 grant_entry_v1_t; /* * Type of grant entry. * GTF_invalid: This grant entry grants no privileges. * GTF_permit_access: Allow @domid to map/access @frame. * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame * to this guest. Xen writes the page number to @frame. * GTF_transitive: Allow @domid to transitively access a subrange of * @trans_grant in @trans_domid. No mappings are allowed. */ #define GTF_invalid (0U<<0) #define GTF_permit_access (1U<<0) #define GTF_accept_transfer (2U<<0) #define GTF_transitive (3U<<0) #define GTF_type_mask (3U<<0) /* * Subflags for GTF_permit_access. * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST] * GTF_sub_page: Grant access to only a subrange of the page. @domid * will only be allowed to copy from the grant, and not * map it. [GST] */ #define _GTF_readonly (2) #define GTF_readonly (1U<<_GTF_readonly) #define _GTF_reading (3) #define GTF_reading (1U<<_GTF_reading) #define _GTF_writing (4) #define GTF_writing (1U<<_GTF_writing) #define _GTF_PWT (5) #define GTF_PWT (1U<<_GTF_PWT) #define _GTF_PCD (6) #define GTF_PCD (1U<<_GTF_PCD) #define _GTF_PAT (7) #define GTF_PAT (1U<<_GTF_PAT) #define _GTF_sub_page (8) #define GTF_sub_page (1U<<_GTF_sub_page) /* * Subflags for GTF_accept_transfer: * GTF_transfer_committed: Xen sets this flag to indicate that it is committed * to transferring ownership of a page frame. When a guest sees this flag * it must /not/ modify the grant entry until GTF_transfer_completed is * set by Xen. * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag * after reading GTF_transfer_committed. Xen will always write the frame * address, followed by ORing this flag, in a timely manner. */ #define _GTF_transfer_committed (2) #define GTF_transfer_committed (1U<<_GTF_transfer_committed) #define _GTF_transfer_completed (3) #define GTF_transfer_completed (1U<<_GTF_transfer_completed) /* * Version 2 grant table entries. These fulfil the same role as * version 1 entries, but can represent more complicated operations. * Any given domain will have either a version 1 or a version 2 table, * and every entry in the table will be the same version. * * The interface by which domains use grant references does not depend * on the grant table version in use by the other domain. */ #if __XEN_INTERFACE_VERSION__ >= 0x0003020a /* * Version 1 and version 2 grant entries share a common prefix. The * fields of the prefix are documented as part of struct * grant_entry_v1. */ struct grant_entry_header { uint16_t flags; domid_t domid; }; typedef struct grant_entry_header grant_entry_header_t; /* * Version 2 of the grant entry structure. */ union grant_entry_v2 { grant_entry_header_t hdr; /* * This member is used for V1-style full page grants, where either: * * -- hdr.type is GTF_accept_transfer, or * -- hdr.type is GTF_permit_access and GTF_sub_page is not set. * * In that case, the frame field has the same semantics as the * field of the same name in the V1 entry structure. */ struct { grant_entry_header_t hdr; uint32_t pad0; uint64_t frame; } full_page; /* * If the grant type is GTF_grant_access and GTF_sub_page is set, * @domid is allowed to access bytes [@page_off,@page_off+@length) * in frame @frame. */ struct { grant_entry_header_t hdr; uint16_t page_off; uint16_t length; uint64_t frame; } sub_page; /* * If the grant is GTF_transitive, @domid is allowed to use the * grant @gref in domain @trans_domid, as if it was the local * domain. Obviously, the transitive access must be compatible * with the original grant. * * The current version of Xen does not allow transitive grants * to be mapped. */ struct { grant_entry_header_t hdr; domid_t trans_domid; uint16_t pad0; grant_ref_t gref; } transitive; uint32_t __spacer[4]; /* Pad to a power of two */ }; typedef union grant_entry_v2 grant_entry_v2_t; typedef uint16_t grant_status_t; #endif /* __XEN_INTERFACE_VERSION__ */ /*********************************** * GRANT TABLE QUERIES AND USES */ /* * Handle to track a mapping created via a grant reference. */ typedef uint32_t grant_handle_t; /* * GNTTABOP_map_grant_ref: Map the grant entry (,) for access * by devices and/or host CPUs. If successful, is a tracking number * that must be presented later to destroy the mapping(s). On error, * is a negative status code. * NOTES: * 1. If GNTMAP_device_map is specified then is the address * via which I/O devices may access the granted frame. * 2. If GNTMAP_host_map is specified then a mapping will be added at * either a host virtual address in the current address space, or at * a PTE at the specified machine address. The type of mapping to * perform is selected through the GNTMAP_contains_pte flag, and the * address is specified in . * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a * host mapping is destroyed by other means then it is *NOT* guaranteed * to be accounted to the correct grant reference! */ #define GNTTABOP_map_grant_ref 0 struct gnttab_map_grant_ref { /* IN parameters. */ uint64_t host_addr; uint32_t flags; /* GNTMAP_* */ grant_ref_t ref; domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ grant_handle_t handle; uint64_t dev_bus_addr; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref); typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t); /* * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings * tracked by . If or is zero, that * field is ignored. If non-zero, they must refer to a device/host mapping * that is tracked by * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 3. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_grant_ref 1 struct gnttab_unmap_grant_ref { /* IN parameters. */ uint64_t host_addr; uint64_t dev_bus_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref); typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t); /* * GNTTABOP_setup_table: Set up a grant table for comprising at least * pages. The frame addresses are written to the . * Only addresses are written, even if the table is larger. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. * 3. Xen may not support more than a single grant-table page per domain. */ #define GNTTABOP_setup_table 2 struct gnttab_setup_table { /* IN parameters. */ domid_t dom; uint32_t nr_frames; /* OUT parameters. */ int16_t status; /* GNTST_* */ XEN_GUEST_HANDLE(ulong) frame_list; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table); typedef struct gnttab_setup_table gnttab_setup_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t); /* * GNTTABOP_dump_table: Dump the contents of the grant table to the * xen console. Debugging use only. */ #define GNTTABOP_dump_table 3 struct gnttab_dump_table { /* IN parameters. */ domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table); typedef struct gnttab_dump_table gnttab_dump_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t); /* * GNTTABOP_transfer_grant_ref: Transfer to a foreign domain. The * foreign domain has previously registered its interest in the transfer via * . * * Note that, even if the transfer fails, the specified page no longer belongs * to the calling domain *unless* the error is GNTST_bad_page. */ #define GNTTABOP_transfer 4 struct gnttab_transfer { /* IN parameters. */ xen_pfn_t mfn; domid_t domid; grant_ref_t ref; /* OUT parameters. */ int16_t status; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer); typedef struct gnttab_transfer gnttab_transfer_t; DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t); /* * GNTTABOP_copy: Hypervisor based copy * source and destinations can be eithers MFNs or, for foreign domains, * grant references. the foreign domain has to grant read/write access * in its grant table. * * The flags specify what type source and destinations are (either MFN * or grant reference). * * Note that this can also be used to copy data between two domains * via a third party if the source and destination domains had previously * grant appropriate access to their pages to the third party. * * source_offset specifies an offset in the source frame, dest_offset * the offset in the target frame and len specifies the number of * bytes to be copied. */ #define _GNTCOPY_source_gref (0) #define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) #define _GNTCOPY_dest_gref (1) #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) #define _GNTCOPY_can_fail (2) #define GNTCOPY_can_fail (1<<_GNTCOPY_can_fail) #define GNTTABOP_copy 5 typedef struct gnttab_copy { /* IN parameters. */ struct { union { grant_ref_t ref; xen_pfn_t gmfn; } u; domid_t domid; uint16_t offset; } source, dest; uint16_t len; uint16_t flags; /* GNTCOPY_* */ /* OUT parameters. */ int16_t status; } gnttab_copy_t; DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy); DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t); /* * GNTTABOP_query_size: Query the current and maximum sizes of the shared * grant table. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. */ #define GNTTABOP_query_size 6 struct gnttab_query_size { /* IN parameters. */ domid_t dom; /* OUT parameters. */ uint32_t nr_frames; uint32_t max_nr_frames; int16_t status; /* GNTST_* */ }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size); typedef struct gnttab_query_size gnttab_query_size_t; DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t); /* * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings * tracked by but atomically replace the page table entry with one * pointing to the machine address under . will be * redirected to the null entry. * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 2. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_and_replace 7 struct gnttab_unmap_and_replace { /* IN parameters. */ uint64_t host_addr; uint64_t new_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t); #if __XEN_INTERFACE_VERSION__ >= 0x0003020a /* * GNTTABOP_set_version: Request a particular version of the grant * table shared table structure. This operation can only be performed * once in any given domain. It must be performed before any grants * are activated; otherwise, the domain will be stuck with version 1. * The only defined versions are 1 and 2. */ #define GNTTABOP_set_version 8 struct gnttab_set_version { /* IN/OUT parameters */ uint32_t version; }; typedef struct gnttab_set_version gnttab_set_version_t; DEFINE_XEN_GUEST_HANDLE(gnttab_set_version_t); /* * GNTTABOP_get_status_frames: Get the list of frames used to store grant * status for . In grant format version 2, the status is separated * from the other shared grant fields to allow more efficient synchronization * using barriers instead of atomic cmpexch operations. * specify the size of vector . * The frame addresses are returned in the . * Only addresses are returned, even if the table is larger. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. */ #define GNTTABOP_get_status_frames 9 struct gnttab_get_status_frames { /* IN parameters. */ uint32_t nr_frames; domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ XEN_GUEST_HANDLE(uint64_t) frame_list; }; typedef struct gnttab_get_status_frames gnttab_get_status_frames_t; DEFINE_XEN_GUEST_HANDLE(gnttab_get_status_frames_t); /* * GNTTABOP_get_version: Get the grant table version which is in * effect for domain . */ #define GNTTABOP_get_version 10 struct gnttab_get_version { /* IN parameters */ domid_t dom; uint16_t pad; /* OUT parameters */ uint32_t version; }; typedef struct gnttab_get_version gnttab_get_version_t; DEFINE_XEN_GUEST_HANDLE(gnttab_get_version_t); #endif /* __XEN_INTERFACE_VERSION__ */ /* * Bitfield values for gnttab_map_grant_ref.flags. */ /* Map the grant entry for access by I/O devices. */ #define _GNTMAP_device_map (0) #define GNTMAP_device_map (1<<_GNTMAP_device_map) /* Map the grant entry for access by host CPUs. */ #define _GNTMAP_host_map (1) #define GNTMAP_host_map (1<<_GNTMAP_host_map) /* Accesses to the granted frame will be restricted to read-only access. */ #define _GNTMAP_readonly (2) #define GNTMAP_readonly (1<<_GNTMAP_readonly) /* * GNTMAP_host_map subflag: * 0 => The host mapping is usable only by the guest OS. * 1 => The host mapping is usable by guest OS + current application. */ #define _GNTMAP_application_map (3) #define GNTMAP_application_map (1<<_GNTMAP_application_map) /* * GNTMAP_contains_pte subflag: * 0 => This map request contains a host virtual address. * 1 => This map request contains the machine addess of the PTE to update. */ #define _GNTMAP_contains_pte (4) #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) #define _GNTMAP_can_fail (5) #define GNTMAP_can_fail (1<<_GNTMAP_can_fail) /* * Bits to be placed in guest kernel available PTE bits (architecture * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set). */ #define _GNTMAP_guest_avail0 (16) #define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0) /* * Values for error status returns. All errors are -ve. */ #define GNTST_okay (0) /* Normal return. */ #define GNTST_general_error (-1) /* General undefined error. */ #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ #define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ #define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ #define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ #define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ #define GNTST_address_too_big (-11) /* transfer page address too large. */ #define GNTST_eagain (-12) /* Could not map at the moment. Retry. */ #define GNTTABOP_error_msgs { \ "okay", \ "undefined error", \ "unrecognised domain id", \ "invalid grant reference", \ "invalid mapping handle", \ "invalid virtual address", \ "invalid device address", \ "no spare translation slot in the I/O MMU", \ "permission denied", \ "bad page", \ "copy arguments cross page boundary", \ "page address size too large", \ "could not map at the moment, retry" \ } #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/hvm/000077500000000000000000000000001314037446600256345ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/hvm/e820.h000066400000000000000000000030061314037446600264620ustar00rootroot00000000000000 /* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_E820_H__ #define __XEN_PUBLIC_HVM_E820_H__ /* E820 location in HVM virtual address space. */ #define HVM_E820_PAGE 0x00090000 #define HVM_E820_NR_OFFSET 0x000001E8 #define HVM_E820_OFFSET 0x000002D0 #define HVM_BELOW_4G_RAM_END 0xF0000000 #define HVM_BELOW_4G_MMIO_START HVM_BELOW_4G_RAM_END #define HVM_BELOW_4G_MMIO_LENGTH ((1ULL << 32) - HVM_BELOW_4G_MMIO_START) #endif /* __XEN_PUBLIC_HVM_E820_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/hvm/hvm_info_table.h000066400000000000000000000052611314037446600307650ustar00rootroot00000000000000/****************************************************************************** * hvm/hvm_info_table.h * * HVM parameter and information table, written into guest memory map. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define HVM_INFO_PFN 0x09F #define HVM_INFO_OFFSET 0x800 #define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET) /* Maximum we can support with current vLAPIC ID mapping. */ #define HVM_MAX_VCPUS 128 struct hvm_info_table { char signature[8]; /* "HVM INFO" */ uint32_t length; uint8_t checksum; /* Should firmware build ACPI tables? */ uint8_t acpi_enabled; /* Should firmware build APIC descriptors (APIC MADT / MP BIOS)? */ uint8_t apic_mode; /* How many CPUs does this domain have? */ uint32_t nr_vcpus; /* * MEMORY MAP provided by HVM domain builder. * Notes: * 1. page_to_phys(x) = x << 12 * 2. If a field is zero, the corresponding range does not exist. */ /* * 0x0 to page_to_phys(low_mem_pgend)-1: * RAM below 4GB (except for VGA hole 0xA0000-0xBFFFF) */ uint32_t low_mem_pgend; /* * page_to_phys(reserved_mem_pgstart) to 0xFFFFFFFF: * Reserved for special memory mappings */ uint32_t reserved_mem_pgstart; /* * 0x100000000 to page_to_phys(high_mem_pgend)-1: * RAM above 4GB */ uint32_t high_mem_pgend; /* Bitmap of which CPUs are online at boot time. */ uint8_t vcpu_online[HVM_MAX_VCPUS/8]; }; #endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/hvm/hvm_op.h000066400000000000000000000112771314037446600273050ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ #define __XEN_PUBLIC_HVM_HVM_OP_H__ #include "../xen.h" /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */ #define HVMOP_set_param 0 #define HVMOP_get_param 1 struct xen_hvm_param { domid_t domid; /* IN */ uint32_t index; /* IN */ uint64_t value; /* IN/OUT */ }; typedef struct xen_hvm_param xen_hvm_param_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); /* Set the logical level of one of a domain's PCI INTx wires. */ #define HVMOP_set_pci_intx_level 2 struct xen_hvm_set_pci_intx_level { /* Domain to be updated. */ domid_t domid; /* PCI INTx identification in PCI topology (domain:bus:device:intx). */ uint8_t domain, bus, device, intx; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t); /* Set the logical level of one of a domain's ISA IRQ wires. */ #define HVMOP_set_isa_irq_level 3 struct xen_hvm_set_isa_irq_level { /* Domain to be updated. */ domid_t domid; /* ISA device identification, by ISA IRQ (0-15). */ uint8_t isa_irq; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t); #define HVMOP_set_pci_link_route 4 struct xen_hvm_set_pci_link_route { /* Domain to be updated. */ domid_t domid; /* PCI link identifier (0-3). */ uint8_t link; /* ISA IRQ (1-15), or 0 (disable link). */ uint8_t isa_irq; }; typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); /* Flushes all VCPU TLBs: @arg must be NULL. */ #define HVMOP_flush_tlbs 5 /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Track dirty VRAM. */ #define HVMOP_track_dirty_vram 6 struct xen_hvm_track_dirty_vram { /* Domain to be tracked. */ domid_t domid; /* First pfn to track. */ uint64_aligned_t first_pfn; /* Number of pages to track. */ uint64_aligned_t nr; /* OUT variable. */ /* Dirty bitmap buffer. */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; }; typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t); /* Notify that some pages got modified by the Device Model. */ #define HVMOP_modified_memory 7 struct xen_hvm_modified_memory { /* Domain to be updated. */ domid_t domid; /* First pfn. */ uint64_aligned_t first_pfn; /* Number of pages. */ uint64_aligned_t nr; }; typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); #define HVMOP_set_mem_type 8 typedef enum { HVMMEM_ram_rw, /* Normal read/write guest RAM */ HVMMEM_ram_ro, /* Read-only; writes are discarded */ HVMMEM_mmio_dm, /* Reads and write go to the device model */ } hvmmem_type_t; /* Notify that a region of memory is to be treated in a specific way. */ struct xen_hvm_set_mem_type { /* Domain to be updated. */ domid_t domid; /* Memory type */ hvmmem_type_t hvmmem_type; /* First pfn. */ uint64_aligned_t first_pfn; /* Number of pages. */ uint64_aligned_t nr; }; typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t); #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/hvm/ioreq.h000066400000000000000000000100451314037446600271240ustar00rootroot00000000000000/* * ioreq.h: I/O request definitions for device models * Copyright (c) 2004, Intel Corporation. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef _IOREQ_H_ #define _IOREQ_H_ #define IOREQ_READ 1 #define IOREQ_WRITE 0 #define STATE_IOREQ_NONE 0 #define STATE_IOREQ_READY 1 #define STATE_IOREQ_INPROCESS 2 #define STATE_IORESP_READY 3 #define IOREQ_TYPE_PIO 0 /* pio */ #define IOREQ_TYPE_COPY 1 /* mmio ops */ #define IOREQ_TYPE_TIMEOFFSET 7 #define IOREQ_TYPE_INVALIDATE 8 /* mapcache */ /* * VMExit dispatcher should cooperate with instruction decoder to * prepare this structure and notify service OS and DM by sending * virq */ struct ioreq { uint64_t addr; /* physical address */ uint64_t data; /* data (or paddr of data) */ uint32_t count; /* for rep prefixes */ uint32_t size; /* size in bytes */ uint32_t vp_eport; /* evtchn for notifications to/from device model */ uint16_t _pad0; uint8_t state:4; uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr * of the real data to use. */ uint8_t dir:1; /* 1=read, 0=write */ uint8_t df:1; uint8_t _pad1:1; uint8_t type; /* I/O type */ }; typedef struct ioreq ioreq_t; struct shared_iopage { struct ioreq vcpu_ioreq[1]; }; typedef struct shared_iopage shared_iopage_t; struct buf_ioreq { uint8_t type; /* I/O type */ uint8_t pad:1; uint8_t dir:1; /* 1=read, 0=write */ uint8_t size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */ uint32_t addr:20;/* physical address */ uint32_t data; /* data */ }; typedef struct buf_ioreq buf_ioreq_t; #define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */ struct buffered_iopage { unsigned int read_pointer; unsigned int write_pointer; buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM]; }; /* NB. Size of this structure must be no greater than one page. */ typedef struct buffered_iopage buffered_iopage_t; #if defined(__ia64__) struct pio_buffer { uint32_t page_offset; uint32_t pointer; uint32_t data_end; uint32_t buf_size; void *opaque; }; #define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */ #define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */ #define PIO_BUFFER_ENTRY_NUM 2 struct buffered_piopage { struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM]; uint8_t buffer[1]; }; #endif /* defined(__ia64__) */ #define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40 #define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04) #define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08) #define ACPI_GPE0_BLK_ADDRESS (ACPI_PM_TMR_BLK_ADDRESS + 0x20) #define ACPI_GPE0_BLK_LEN 0x08 #endif /* _IOREQ_H_ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/hvm/params.h000066400000000000000000000076341314037446600273020ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_PARAMS_H__ #define __XEN_PUBLIC_HVM_PARAMS_H__ #include "hvm_op.h" /* * Parameter space for HVMOP_{set,get}_param. */ /* * How should CPU0 event-channel notifications be delivered? * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: * Domain = val[47:32], Bus = val[31:16], * DevFn = val[15: 8], IntX = val[ 1: 0] * If val == 0 then CPU0 event-channel notifications are not delivered. */ #define HVM_PARAM_CALLBACK_IRQ 0 /* * These are not used by Xen. They are here for convenience of HVM-guest * xenbus implementations. */ #define HVM_PARAM_STORE_PFN 1 #define HVM_PARAM_STORE_EVTCHN 2 #define HVM_PARAM_PAE_ENABLED 4 #define HVM_PARAM_IOREQ_PFN 5 #define HVM_PARAM_BUFIOREQ_PFN 6 #ifdef __ia64__ #define HVM_PARAM_NVRAM_FD 7 #define HVM_PARAM_VHPT_SIZE 8 #define HVM_PARAM_BUFPIOREQ_PFN 9 #elif defined(__i386__) || defined(__x86_64__) /* Expose Viridian interfaces to this HVM guest? */ #define HVM_PARAM_VIRIDIAN 9 #endif /* * Set mode for virtual timers (currently x86 only): * delay_for_missed_ticks (default): * Do not advance a vcpu's time beyond the correct delivery time for * interrupts that have been missed due to preemption. Deliver missed * interrupts when the vcpu is rescheduled and advance the vcpu's virtual * time stepwise for each one. * no_delay_for_missed_ticks: * As above, missed interrupts are delivered, but guest time always tracks * wallclock (i.e., real) time while doing so. * no_missed_ticks_pending: * No missed interrupts are held pending. Instead, to ensure ticks are * delivered at some non-zero rate, if we detect missed ticks then the * internal tick alarm is not disabled if the VCPU is preempted during the * next tick period. * one_missed_tick_pending: * Missed interrupts are collapsed together and delivered as one 'late tick'. * Guest time always tracks wallclock (i.e., real) time. */ #define HVM_PARAM_TIMER_MODE 10 #define HVMPTM_delay_for_missed_ticks 0 #define HVMPTM_no_delay_for_missed_ticks 1 #define HVMPTM_no_missed_ticks_pending 2 #define HVMPTM_one_missed_tick_pending 3 /* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */ #define HVM_PARAM_HPET_ENABLED 11 /* Identity-map page directory used by Intel EPT when CR0.PG=0. */ #define HVM_PARAM_IDENT_PT 12 /* Device Model domain, defaults to 0. */ #define HVM_PARAM_DM_DOMAIN 13 /* ACPI S state: currently support S0 and S3 on x86. */ #define HVM_PARAM_ACPI_S_STATE 14 /* TSS used on Intel when CR0.PE=0. */ #define HVM_PARAM_VM86_TSS 15 /* Boolean: Enable aligning all periodic vpts to reduce interrupts */ #define HVM_PARAM_VPT_ALIGN 16 #define HVM_NR_PARAMS 17 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/hvm/save.h000066400000000000000000000063451314037446600267530ustar00rootroot00000000000000/* * hvm/save.h * * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * Copyright (c) 2007 XenSource Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_H__ #define __XEN_PUBLIC_HVM_SAVE_H__ /* * Structures in this header *must* have the same layout in 32bit * and 64bit environments: this means that all fields must be explicitly * sized types and aligned to their sizes, and the structs must be * a multiple of eight bytes long. * * Only the state necessary for saving and restoring (i.e. fields * that are analogous to actual hardware state) should go in this file. * Internal mechanisms should be kept in Xen-private headers. */ #if !defined(__GNUC__) || defined(__STRICT_ANSI__) #error "Anonymous structs/unions are a GNU extension." #endif /* * Each entry is preceded by a descriptor giving its type and length */ struct hvm_save_descriptor { uint16_t typecode; /* Used to demux the various types below */ uint16_t instance; /* Further demux within a type */ uint32_t length; /* In bytes, *not* including this descriptor */ }; /* * Each entry has a datatype associated with it: for example, the CPU state * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU), * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU). * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system * ugliness. */ #define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; } #define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t) #define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x))) #define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c)) /* * The series of save records is teminated by a zero-type, zero-length * descriptor. */ struct hvm_save_end {}; DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); #if defined(__i386__) || defined(__x86_64__) #include "../arch-x86/hvm/save.h" #elif defined(__ia64__) #include "../arch-ia64/hvm/save.h" #else #error "unsupported architecture" #endif #endif /* __XEN_PUBLIC_HVM_SAVE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/000077500000000000000000000000001314037446600254515ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/blkif.h000066400000000000000000000314261314037446600267170ustar00rootroot00000000000000/****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_BLKIF_H__ #define __XEN_PUBLIC_IO_BLKIF_H__ #include "ring.h" #include "../grant_table.h" /* * Front->back notifications: When enqueuing a new request, sending a * notification can be made conditional on req_event (i.e., the generic * hold-off mechanism provided by the ring macros). Backends must set * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). * * Back->front notifications: When enqueuing a new response, sending a * notification can be made conditional on rsp_event (i.e., the generic * hold-off mechanism provided by the ring macros). Frontends must set * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). */ #ifndef blkif_vdev_t #define blkif_vdev_t uint16_t #endif #define blkif_sector_t uint64_t /* * REQUEST CODES. */ #define BLKIF_OP_READ 0 #define BLKIF_OP_WRITE 1 /* * Recognised only if "feature-barrier" is present in backend xenbus info. * The "feature-barrier" node contains a boolean indicating whether barrier * requests are likely to succeed or fail. Either way, a barrier request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt barrier requests. * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* * create the "feature-barrier" node! */ #define BLKIF_OP_WRITE_BARRIER 2 /* * Recognised if "feature-flush-cache" is present in backend xenbus * info. A flush will ask the underlying storage hardware to flush its * non-volatile caches as appropriate. The "feature-flush-cache" node * contains a boolean indicating whether flush requests are likely to * succeed or fail. Either way, a flush request may fail at any time * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying * block-device hardware. The boolean simply indicates whether or not it * is worthwhile for the frontend to attempt flushes. If a backend does * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the * "feature-flush-cache" node! */ #define BLKIF_OP_FLUSH_DISKCACHE 3 /* * Device specific command packet contained within the request */ #define BLKIF_OP_PACKET 4 /* * Recognised only if "feature-discard" is present in backend xenbus info. * The "feature-discard" node contains a boolean indicating whether trim * (ATA) or unmap (SCSI) - conviently called discard requests are likely * to succeed or fail. Either way, a discard request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt discard requests. * If a backend does not recognise BLKIF_OP_DISCARD, it should *not* * create the "feature-discard" node! * * Discard operation is a request for the underlying block device to mark * extents to be erased. However, discard does not guarantee that the blocks * will be erased from the device - it is just a hint to the device * controller that these blocks are no longer in use. What the device * controller does with that information is left to the controller. * Discard operations are passed with sector_number as the * sector index to begin discard operations at and nr_sectors as the number of * sectors to be discarded. The specified sectors should be discarded if the * underlying block device supports trim (ATA) or unmap (SCSI) operations, * or a BLKIF_RSP_EOPNOTSUPP should be returned. * More information about trim/unmap operations at: * http://t13.org/Documents/UploadedDocuments/docs2008/ * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc * http://www.seagate.com/staticfiles/support/disc/manuals/ * Interface%20manuals/100293068c.pdf * The backend can optionally provide these extra XenBus attributes to * further optimize the discard functionality: * 'discard-aligment' - Devices that support discard functionality may * internally allocate space in units that are bigger than the exported * logical block size. The discard-alignment parameter indicates how many bytes * the beginning of the partition is offset from the internal allocation unit's * natural alignment. Do not confuse this with natural disk alignment offset. * 'discard-granularity' - Devices that support discard functionality may * internally allocate space using units that are bigger than the logical block * size. The discard-granularity parameter indicates the size of the internal * allocation unit in bytes if reported by the device. Otherwise the * discard-granularity will be set to match the device's physical block size. * It is the minimum size you can discard. * 'discard-secure' - All copies of the discarded sectors (potentially created * by garbage collection) must also be erased. To use this feature, the flag * BLKIF_DISCARD_SECURE must be set in the blkif_request_discard. */ #define BLKIF_OP_DISCARD 5 /* * Recognized if "feature-max-indirect-segments" in present in the backend * xenbus info. The "feature-max-indirect-segments" node contains the maximum * number of segments allowed by the backend per request. If the node is * present, the frontend might use blkif_request_indirect structs in order to * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The * maximum number of indirect segments is fixed by the backend, but the * frontend can issue requests with any number of indirect segments as long as * it's less than the number provided by the backend. The indirect_grefs field * in blkif_request_indirect should be filled by the frontend with the * grant references of the pages that are holding the indirect segments. * This pages are filled with an array of blkif_request_segment_aligned * that hold the information about the segments. The number of indirect * pages to use is determined by the maximum number of segments * a indirect request contains. Every indirect page can contain a maximum * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), * so to calculate the number of indirect pages to use we have to do * ceil(indirect_segments/512). * * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* * create the "feature-max-indirect-segments" node! */ #define BLKIF_OP_INDIRECT 6 /* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 #if 0 /* * NB. first_sect and last_sect in blkif_request_segment, as well as * sector_number in blkif_request, are always expressed in 512-byte units. * However they must be properly aligned to the real sector size of the * physical disk, which is reported in the "sector-size" node in the backend * xenbus info. Also the xenbus "sectors" node is expressed in 512-byte units. */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; }; struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #endif struct blkif_request_segment_aligned { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ } __attribute__((__packed__)); struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; } __attribute__((__packed__)); struct blkif_request_discard { uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ blkif_vdev_t _pad1; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number; uint64_t nr_sectors; uint8_t _pad3; } __attribute__((__packed__)); struct blkif_request_other { uint8_t _pad1; blkif_vdev_t _pad2; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ } __attribute__((__packed__)); struct blkif_request_indirect { uint8_t indirect_op; uint16_t nr_segments; #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ #endif uint64_t id; blkif_sector_t sector_number; blkif_vdev_t handle; uint16_t _pad2; grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; #ifndef CONFIG_X86_32 uint32_t _pad3; /* make it 64 byte aligned */ #else uint64_t _pad3; /* make it 64 byte aligned */ #endif } __attribute__((__packed__)); struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_request_rw rw; struct blkif_request_discard discard; struct blkif_request_other other; struct blkif_request_indirect indirect; } u; } __attribute__((__packed__)); typedef struct blkif_request blkif_request_t; struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_response blkif_response_t; /* * STATUS RETURN CODES. */ /* Operation not supported (only happens on barrier writes). */ #define BLKIF_RSP_EOPNOTSUPP -2 /* Operation failed for some unspecified reason (-EIO). */ #define BLKIF_RSP_ERROR -1 /* Operation completed successfully. */ #define BLKIF_RSP_OKAY 0 /* * Generate blkif ring structures and types. */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/cdromif.h000066400000000000000000000067321314037446600272550ustar00rootroot00000000000000/****************************************************************************** * cdromif.h * * Shared definitions between backend driver and Xen guest Virtual CDROM * block device. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_IO_CDROMIF_H__ #define __XEN_PUBLIC_IO_CDROMIF_H__ /* * Queries backend for CDROM support */ #define XEN_TYPE_CDROM_SUPPORT _IO('c', 1) struct xen_cdrom_support { uint32_t type; int8_t ret; /* returned, 0 succeded, -1 error */ int8_t err; /* returned, backend errno */ int8_t supported; /* returned, 1 supported */ }; /* * Opens backend device, returns drive geometry or * any encountered errors */ #define XEN_TYPE_CDROM_OPEN _IO('c', 2) struct xen_cdrom_open { uint32_t type; int8_t ret; int8_t err; int8_t pad; int8_t media_present; /* returned */ uint32_t sectors; /* returned */ uint32_t sector_size; /* returned */ int32_t payload_offset; /* offset to backend node name payload */ }; /* * Queries backend for media changed status */ #define XEN_TYPE_CDROM_MEDIA_CHANGED _IO('c', 3) struct xen_cdrom_media_changed { uint32_t type; int8_t ret; int8_t err; int8_t media_changed; /* returned */ }; /* * Sends vcd generic CDROM packet to backend, followed * immediately by the vcd_generic_command payload */ #define XEN_TYPE_CDROM_PACKET _IO('c', 4) struct xen_cdrom_packet { uint32_t type; int8_t ret; int8_t err; int8_t pad[2]; int32_t payload_offset; /* offset to vcd_generic_command payload */ }; /* CDROM_PACKET_COMMAND, payload for XEN_TYPE_CDROM_PACKET */ struct vcd_generic_command { uint8_t cmd[CDROM_PACKET_SIZE]; uint8_t pad[4]; uint32_t buffer_offset; uint32_t buflen; int32_t stat; uint32_t sense_offset; uint8_t data_direction; uint8_t pad1[3]; int32_t quiet; int32_t timeout; }; union xen_block_packet { uint32_t type; struct xen_cdrom_support xcs; struct xen_cdrom_open xco; struct xen_cdrom_media_changed xcmc; struct xen_cdrom_packet xcp; }; #define PACKET_PAYLOAD_OFFSET (sizeof(struct xen_cdrom_packet)) #define PACKET_SENSE_OFFSET (PACKET_PAYLOAD_OFFSET + sizeof(struct vcd_generic_command)) #define PACKET_BUFFER_OFFSET (PACKET_SENSE_OFFSET + sizeof(struct request_sense)) #define MAX_PACKET_DATA (PAGE_SIZE - sizeof(struct xen_cdrom_packet) - \ sizeof(struct vcd_generic_command) - sizeof(struct request_sense)) #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/console.h000066400000000000000000000031271314037446600272670ustar00rootroot00000000000000/****************************************************************************** * console.h * * Console I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_CONSOLE_H__ #define __XEN_PUBLIC_IO_CONSOLE_H__ typedef uint32_t XENCONS_RING_IDX; #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) struct xencons_interface { char in[1024]; char out[2048]; XENCONS_RING_IDX in_cons, in_prod; XENCONS_RING_IDX out_cons, out_prod; }; #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/fbif.h000066400000000000000000000126721314037446600265400ustar00rootroot00000000000000/* * fbif.h -- Xen virtual frame buffer device * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_FBIF_H__ #define __XEN_PUBLIC_IO_FBIF_H__ /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. */ /* Event type 1 currently not used */ /* * Framebuffer update notification event * Capable frontend sets feature-update in xenstore. * Backend requests it by setting request-update in xenstore. */ #define XENFB_TYPE_UPDATE 2 struct xenfb_update { uint8_t type; /* XENFB_TYPE_UPDATE */ int32_t x; /* source x */ int32_t y; /* source y */ int32_t width; /* rect width */ int32_t height; /* rect height */ }; /* * Framebuffer resize notification event * Capable backend sets feature-resize in xenstore. */ #define XENFB_TYPE_RESIZE 3 struct xenfb_resize { uint8_t type; /* XENFB_TYPE_RESIZE */ int32_t width; /* width in pixels */ int32_t height; /* height in pixels */ int32_t stride; /* stride in bytes */ int32_t depth; /* depth in bits */ int32_t offset; /* offset of the framebuffer in bytes */ }; #define XENFB_OUT_EVENT_SIZE 40 union xenfb_out_event { uint8_t type; struct xenfb_update update; struct xenfb_resize resize; char pad[XENFB_OUT_EVENT_SIZE]; }; /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. */ /* * Framebuffer refresh period advice * Backend sends it to advise the frontend their preferred period of * refresh. Frontends that keep the framebuffer constantly up-to-date * just ignore it. Frontends that use the advice should immediately * refresh the framebuffer (and send an update notification event if * those have been requested), then use the update frequency to guide * their periodical refreshs. */ #define XENFB_TYPE_REFRESH_PERIOD 1 #define XENFB_NO_REFRESH 0 struct xenfb_refresh_period { uint8_t type; /* XENFB_TYPE_UPDATE_PERIOD */ uint32_t period; /* period of refresh, in ms, * XENFB_NO_REFRESH if no refresh is needed */ }; #define XENFB_IN_EVENT_SIZE 40 union xenfb_in_event { uint8_t type; struct xenfb_refresh_period refresh_period; char pad[XENFB_IN_EVENT_SIZE]; }; /* shared page */ #define XENFB_IN_RING_SIZE 1024 #define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE) #define XENFB_IN_RING_OFFS 1024 #define XENFB_IN_RING(page) \ ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS)) #define XENFB_IN_RING_REF(page, idx) \ (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN]) #define XENFB_OUT_RING_SIZE 2048 #define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE) #define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE) #define XENFB_OUT_RING(page) \ ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS)) #define XENFB_OUT_RING_REF(page, idx) \ (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN]) struct xenfb_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; int32_t width; /* the width of the framebuffer (in pixels) */ int32_t height; /* the height of the framebuffer (in pixels) */ uint32_t line_length; /* the length of a row of pixels (in bytes) */ uint32_t mem_length; /* the length of the framebuffer (in bytes) */ uint8_t depth; /* the depth of a pixel (in bits) */ /* * Framebuffer page directory * * Each directory page holds PAGE_SIZE / sizeof(*pd) * framebuffer pages, and can thus map up to PAGE_SIZE * * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 Megs * 64 bit. 256 directories give enough room for a 512 Meg * framebuffer with a max resolution of 12,800x10,240. Should * be enough for a while with room leftover for expansion. */ #ifndef CONFIG_PARAVIRT_XEN unsigned long pd[256]; #else /* Two directory pages should be enough for a while. */ unsigned long pd[2]; #endif }; /* * Wart: xenkbd needs to know default resolution. Put it here until a * better solution is found, but don't leak it to the backend. */ #ifdef __KERNEL__ #define XENFB_WIDTH 800 #define XENFB_HEIGHT 600 #define XENFB_DEPTH 32 #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/fsif.h000066400000000000000000000123761314037446600265620ustar00rootroot00000000000000/****************************************************************************** * fsif.h * * Interface to FS level split device drivers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2007, Grzegorz Milos, . */ #ifndef __XEN_PUBLIC_IO_FSIF_H__ #define __XEN_PUBLIC_IO_FSIF_H__ #include "ring.h" #include "../grant_table.h" #define REQ_FILE_OPEN 1 #define REQ_FILE_CLOSE 2 #define REQ_FILE_READ 3 #define REQ_FILE_WRITE 4 #define REQ_STAT 5 #define REQ_FILE_TRUNCATE 6 #define REQ_REMOVE 7 #define REQ_RENAME 8 #define REQ_CREATE 9 #define REQ_DIR_LIST 10 #define REQ_CHMOD 11 #define REQ_FS_SPACE 12 #define REQ_FILE_SYNC 13 struct fsif_open_request { grant_ref_t gref; }; struct fsif_close_request { uint32_t fd; }; struct fsif_read_request { uint32_t fd; int32_t pad; uint64_t len; uint64_t offset; grant_ref_t grefs[1]; /* Variable length */ }; struct fsif_write_request { uint32_t fd; int32_t pad; uint64_t len; uint64_t offset; grant_ref_t grefs[1]; /* Variable length */ }; struct fsif_stat_request { uint32_t fd; }; /* This structure is a copy of some fields from stat structure, returned * via the ring. */ struct fsif_stat_response { int32_t stat_mode; uint32_t stat_uid; uint32_t stat_gid; int32_t stat_ret; int64_t stat_size; int64_t stat_atime; int64_t stat_mtime; int64_t stat_ctime; }; struct fsif_truncate_request { uint32_t fd; int32_t pad; int64_t length; }; struct fsif_remove_request { grant_ref_t gref; }; struct fsif_rename_request { uint16_t old_name_offset; uint16_t new_name_offset; grant_ref_t gref; }; struct fsif_create_request { int8_t directory; int8_t pad; int16_t pad2; int32_t mode; grant_ref_t gref; }; struct fsif_list_request { uint32_t offset; grant_ref_t gref; }; #define NR_FILES_SHIFT 0 #define NR_FILES_SIZE 16 /* 16 bits for the number of files mask */ #define NR_FILES_MASK (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT) #define ERROR_SIZE 32 /* 32 bits for the error mask */ #define ERROR_SHIFT (NR_FILES_SIZE + NR_FILES_SHIFT) #define ERROR_MASK (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT) #define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE) #define HAS_MORE_FLAG (1ULL << HAS_MORE_SHIFT) struct fsif_chmod_request { uint32_t fd; int32_t mode; }; struct fsif_space_request { grant_ref_t gref; }; struct fsif_sync_request { uint32_t fd; }; /* FS operation request */ struct fsif_request { uint8_t type; /* Type of the request */ uint8_t pad; uint16_t id; /* Request ID, copied to the response */ uint32_t pad2; union { struct fsif_open_request fopen; struct fsif_close_request fclose; struct fsif_read_request fread; struct fsif_write_request fwrite; struct fsif_stat_request fstat; struct fsif_truncate_request ftruncate; struct fsif_remove_request fremove; struct fsif_rename_request frename; struct fsif_create_request fcreate; struct fsif_list_request flist; struct fsif_chmod_request fchmod; struct fsif_space_request fspace; struct fsif_sync_request fsync; } u; }; typedef struct fsif_request fsif_request_t; /* FS operation response */ struct fsif_response { uint16_t id; uint16_t pad1; uint32_t pad2; union { uint64_t ret_val; struct fsif_stat_response fstat; } u; }; typedef struct fsif_response fsif_response_t; #define FSIF_RING_ENTRY_SIZE 64 #define FSIF_NR_READ_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_read_request)) / \ sizeof(grant_ref_t) + 1) #define FSIF_NR_WRITE_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_write_request)) / \ sizeof(grant_ref_t) + 1) DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response); #define STATE_INITIALISED "init" #define STATE_READY "ready" #define STATE_CLOSING "closing" #define STATE_CLOSED "closed" #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/kbdif.h000066400000000000000000000074211314037446600267050ustar00rootroot00000000000000/* * kbdif.h -- Xen virtual keyboard/mouse * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_KBDIF_H__ #define __XEN_PUBLIC_IO_KBDIF_H__ /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. */ /* Pointer movement event */ #define XENKBD_TYPE_MOTION 1 /* Event type 2 currently not used */ /* Key event (includes pointer buttons) */ #define XENKBD_TYPE_KEY 3 /* * Pointer position event * Capable backend sets feature-abs-pointer in xenstore. * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting * request-abs-update in xenstore. */ #define XENKBD_TYPE_POS 4 struct xenkbd_motion { uint8_t type; /* XENKBD_TYPE_MOTION */ int32_t rel_x; /* relative X motion */ int32_t rel_y; /* relative Y motion */ int32_t rel_z; /* relative Z motion (wheel) */ }; struct xenkbd_key { uint8_t type; /* XENKBD_TYPE_KEY */ uint8_t pressed; /* 1 if pressed; 0 otherwise */ uint32_t keycode; /* KEY_* from linux/input.h */ }; struct xenkbd_position { uint8_t type; /* XENKBD_TYPE_POS */ int32_t abs_x; /* absolute X position (in FB pixels) */ int32_t abs_y; /* absolute Y position (in FB pixels) */ int32_t rel_z; /* relative Z motion (wheel) */ }; #define XENKBD_IN_EVENT_SIZE 40 union xenkbd_in_event { uint8_t type; struct xenkbd_motion motion; struct xenkbd_key key; struct xenkbd_position pos; char pad[XENKBD_IN_EVENT_SIZE]; }; /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. * No out events currently defined. */ #define XENKBD_OUT_EVENT_SIZE 40 union xenkbd_out_event { uint8_t type; char pad[XENKBD_OUT_EVENT_SIZE]; }; /* shared page */ #define XENKBD_IN_RING_SIZE 2048 #define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE) #define XENKBD_IN_RING_OFFS 1024 #define XENKBD_IN_RING(page) \ ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS)) #define XENKBD_IN_RING_REF(page, idx) \ (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN]) #define XENKBD_OUT_RING_SIZE 1024 #define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE) #define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE) #define XENKBD_OUT_RING(page) \ ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS)) #define XENKBD_OUT_RING_REF(page, idx) \ (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN]) struct xenkbd_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/netif.h000066400000000000000000000171341314037446600267350ustar00rootroot00000000000000/****************************************************************************** * netif.h * * Unified network-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_NETIF_H__ #define __XEN_PUBLIC_IO_NETIF_H__ #include "ring.h" #include "../grant_table.h" /* * Notifications after enqueuing any type of message should be conditional on * the appropriate req_event or rsp_event field in the shared ring. * If the client sends notification for rx requests then it should specify * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume * that it cannot safely queue packets (as it may not be kicked to send them). */ /* * This is the 'wire' format for packets: * Request 1: netif_tx_request -- NETTXF_* (any flags) * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info) * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE) * Request 4: netif_tx_request -- NETTXF_more_data * Request 5: netif_tx_request -- NETTXF_more_data * ... * Request N: netif_tx_request -- 0 */ /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETTXF_csum_blank (0) #define NETTXF_csum_blank (1U<<_NETTXF_csum_blank) /* Packet data has been validated against protocol checksum. */ #define _NETTXF_data_validated (1) #define NETTXF_data_validated (1U<<_NETTXF_data_validated) /* Packet continues in the next request descriptor. */ #define _NETTXF_more_data (2) #define NETTXF_more_data (1U<<_NETTXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETTXF_extra_info (3) #define NETTXF_extra_info (1U<<_NETTXF_extra_info) struct netif_tx_request { grant_ref_t gref; /* Reference to buffer page */ uint16_t offset; /* Offset within buffer page */ uint16_t flags; /* NETTXF_* */ uint16_t id; /* Echoed in response message. */ uint16_t size; /* Packet size in bytes. */ }; typedef struct netif_tx_request netif_tx_request_t; /* Types of netif_extra_info descriptors. */ #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MAX (4) /* netif_extra_info flags. */ #define _XEN_NETIF_EXTRA_FLAG_MORE (0) #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) /* GSO types - only TCPv4 currently supported. */ #define XEN_NETIF_GSO_TYPE_TCPV4 (1) /* * This structure needs to fit within both netif_tx_request and * netif_rx_response for compatibility. */ struct netif_extra_info { uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ union { /* * XEN_NETIF_EXTRA_TYPE_GSO: */ struct { /* * Maximum payload size of each segment. For example, for TCP this * is just the path MSS. */ uint16_t size; /* * GSO type. This determines the protocol of the packet and any * extra features required to segment the packet properly. */ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ /* Future expansion. */ uint8_t pad; /* * GSO features. This specifies any extra GSO features required * to process this packet, such as ECN support for TCPv4. */ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ } gso; /* * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}: * Backend advertises availability via 'feature-multicast-control' * xenbus node containing value '1'. * Frontend requests this feature by advertising * 'request-multicast-control' xenbus node containing value '1'. * If multicast control is requested then multicast flooding is * disabled and the frontend must explicitly register its interest * in multicast groups using dummy transmit requests containing * MCAST_{ADD,DEL} extra-info fragments. */ struct { uint8_t addr[6]; /* Address to add/remove. */ } mcast; uint16_t pad[3]; } u; }; typedef struct netif_extra_info netif_extra_info_t; struct netif_tx_response { uint16_t id; int16_t status; /* NETIF_RSP_* */ }; typedef struct netif_tx_response netif_tx_response_t; struct netif_rx_request { uint16_t id; /* Echoed in response message. */ grant_ref_t gref; /* Reference to incoming granted frame */ }; typedef struct netif_rx_request netif_rx_request_t; /* Packet data has been validated against protocol checksum. */ #define _NETRXF_data_validated (0) #define NETRXF_data_validated (1U<<_NETRXF_data_validated) /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETRXF_csum_blank (1) #define NETRXF_csum_blank (1U<<_NETRXF_csum_blank) /* Packet continues in the next request descriptor. */ #define _NETRXF_more_data (2) #define NETRXF_more_data (1U<<_NETRXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETRXF_extra_info (3) #define NETRXF_extra_info (1U<<_NETRXF_extra_info) struct netif_rx_response { uint16_t id; uint16_t offset; /* Offset in page of start of received packet */ uint16_t flags; /* NETRXF_* */ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ }; typedef struct netif_rx_response netif_rx_response_t; /* * Generate netif ring structures and types. */ #if defined(CONFIG_XEN_no) || defined(HAVE_XEN_PLATFORM_COMPAT_H) DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response); DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response); #else #define xen_netif_tx_request netif_tx_request #define xen_netif_rx_request netif_rx_request #define xen_netif_tx_response netif_tx_response #define xen_netif_rx_response netif_rx_response DEFINE_RING_TYPES(xen_netif_tx, struct xen_netif_tx_request, struct xen_netif_tx_response); DEFINE_RING_TYPES(xen_netif_rx, struct xen_netif_rx_request, struct xen_netif_rx_response); #define xen_netif_extra_info netif_extra_info #endif #define NETIF_RSP_DROPPED -2 #define NETIF_RSP_ERROR -1 #define NETIF_RSP_OKAY 0 /* No response: used for auxiliary requests (e.g., netif_tx_extra). */ #define NETIF_RSP_NULL 1 #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/pciif.h000066400000000000000000000074321314037446600267220ustar00rootroot00000000000000/* * PCI Backend/Frontend Common Data Structures & Macros * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Ryan Wilson */ #ifndef __XEN_PCI_COMMON_H__ #define __XEN_PCI_COMMON_H__ /* Be sure to bump this number if you change this file */ #define XEN_PCI_MAGIC "7" /* xen_pci_sharedinfo flags */ #define _XEN_PCIF_active (0) #define XEN_PCIF_active (1<<_XEN_PCIF_active) #define _XEN_PCIB_AERHANDLER (1) #define XEN_PCIB_AERHANDLER (1<<_XEN_PCIB_AERHANDLER) #define _XEN_PCIB_active (2) #define XEN_PCIB_active (1<<_XEN_PCIB_active) /* xen_pci_op commands */ #define XEN_PCI_OP_conf_read (0) #define XEN_PCI_OP_conf_write (1) #define XEN_PCI_OP_enable_msi (2) #define XEN_PCI_OP_disable_msi (3) #define XEN_PCI_OP_enable_msix (4) #define XEN_PCI_OP_disable_msix (5) #define XEN_PCI_OP_aer_detected (6) #define XEN_PCI_OP_aer_resume (7) #define XEN_PCI_OP_aer_mmio (8) #define XEN_PCI_OP_aer_slotreset (9) /* xen_pci_op error numbers */ #define XEN_PCI_ERR_success (0) #define XEN_PCI_ERR_dev_not_found (-1) #define XEN_PCI_ERR_invalid_offset (-2) #define XEN_PCI_ERR_access_denied (-3) #define XEN_PCI_ERR_not_implemented (-4) /* XEN_PCI_ERR_op_failed - backend failed to complete the operation */ #define XEN_PCI_ERR_op_failed (-5) /* * it should be PAGE_SIZE-sizeof(struct xen_pci_op))/sizeof(struct msix_entry)) * Should not exceed 128 */ #define SH_INFO_MAX_VEC 128 struct xen_msix_entry { uint16_t vector; uint16_t entry; }; struct xen_pci_op { /* IN: what action to perform: XEN_PCI_OP_* */ uint32_t cmd; /* OUT: will contain an error number (if any) from errno.h */ int32_t err; /* IN: which device to touch */ uint32_t domain; /* PCI Domain/Segment */ uint32_t bus; uint32_t devfn; /* IN: which configuration registers to touch */ int32_t offset; int32_t size; /* IN/OUT: Contains the result after a READ or the value to WRITE */ uint32_t value; /* IN: Contains extra infor for this operation */ uint32_t info; /*IN: param for msi-x */ struct xen_msix_entry msix_entries[SH_INFO_MAX_VEC]; }; /*used for pcie aer handling*/ struct xen_pcie_aer_op { /* IN: what action to perform: XEN_PCI_OP_* */ uint32_t cmd; /*IN/OUT: return aer_op result or carry error_detected state as input*/ int32_t err; /* IN: which device to touch */ uint32_t domain; /* PCI Domain/Segment*/ uint32_t bus; uint32_t devfn; }; struct xen_pci_sharedinfo { /* flags - XEN_PCIF_* */ uint32_t flags; struct xen_pci_op op; struct xen_pcie_aer_op aer_op; }; #endif /* __XEN_PCI_COMMON_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/protocols.h000066400000000000000000000032071314037446600276500ustar00rootroot00000000000000/****************************************************************************** * protocols.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PROTOCOLS_H__ #define __XEN_PROTOCOLS_H__ #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi" #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi" #define XEN_IO_PROTO_ABI_IA64 "ia64-abi" #if defined(__i386__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 #elif defined(__x86_64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 #elif defined(__ia64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64 #else # error arch fixup needed here #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/ring.h000066400000000000000000000352331314037446600265670ustar00rootroot00000000000000/****************************************************************************** * ring.h * * Shared producer-consumer ring macros. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Tim Deegan and Andrew Warfield November 2004. */ #ifndef __XEN_PUBLIC_IO_RING_H__ #define __XEN_PUBLIC_IO_RING_H__ #include "../xen-compat.h" #if __XEN_INTERFACE_VERSION__ < 0x00030208 #define xen_mb() mb() #define xen_rmb() rmb() #define xen_wmb() wmb() #endif typedef unsigned int RING_IDX; /* Round a 32-bit unsigned constant down to the nearest power of two. */ #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) /* * Calculate size of a shared ring, given the total available space for the * ring and indexes (_sz), and the name tag of the request/response structure. * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ #define __CONST_RING_SIZE(_s, _sz) \ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ sizeof(((struct _s##_sring *)0)->ring[0]))) /* * The same for passing in an actual pointer instead of a name tag. */ #define __RING_SIZE(_s, _sz) \ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* * Macros to make the correct C datatypes for a new kind of ring. * * To make a new ring datatype, you need to have two message structures, * let's say request_t, and response_t already defined. * * In a header where you want the ring datatype declared, you then do: * * DEFINE_RING_TYPES(mytag, request_t, response_t); * * These expand out to give you a set of types, as you can see below. * The most important of these are: * * mytag_sring_t - The shared ring. * mytag_front_ring_t - The 'front' half of the ring. * mytag_back_ring_t - The 'back' half of the ring. * * To initialize a ring in your code you need to know the location and size * of the shared memory area (PAGE_SIZE, for instance). To initialise * the front half: * * mytag_front_ring_t front_ring; * SHARED_RING_INIT((mytag_sring_t *)shared_page); * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); * * Initializing the back follows similarly (note that only the front * initializes the shared ring): * * mytag_back_ring_t back_ring; * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); */ #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ \ /* Shared ring entry */ \ union __name##_sring_entry { \ __req_t req; \ __rsp_t rsp; \ }; \ \ /* Shared ring page */ \ struct __name##_sring { \ RING_IDX req_prod, req_event; \ RING_IDX rsp_prod, rsp_event; \ uint8_t netfront_smartpoll_active; \ uint8_t pad[47]; \ union __name##_sring_entry ring[1]; /* variable-length */ \ }; \ \ /* "Front" end's private variables */ \ struct __name##_front_ring { \ RING_IDX req_prod_pvt; \ RING_IDX rsp_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* "Back" end's private variables */ \ struct __name##_back_ring { \ RING_IDX rsp_prod_pvt; \ RING_IDX req_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* Syntactic sugar */ \ typedef struct __name##_sring __name##_sring_t; \ typedef struct __name##_front_ring __name##_front_ring_t; \ typedef struct __name##_back_ring __name##_back_ring_t /* * Macros for manipulating rings. * * FRONT_RING_whatever works on the "front end" of a ring: here * requests are pushed on to the ring and responses taken off it. * * BACK_RING_whatever works on the "back end" of a ring: here * requests are taken off the ring and responses put on. * * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. * This is OK in 1-for-1 request-response situations where the * requestor (front end) never has more than RING_SIZE()-1 * outstanding requests. */ /* Initialising empty rings */ #define SHARED_RING_INIT(_s) do { \ (_s)->req_prod = (_s)->rsp_prod = 0; \ (_s)->req_event = (_s)->rsp_event = 1; \ (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \ } while(0) #define FRONT_RING_INIT(_r, _s, __size) do { \ (_r)->req_prod_pvt = 0; \ (_r)->rsp_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) #define BACK_RING_INIT(_r, _s, __size) do { \ (_r)->rsp_prod_pvt = 0; \ (_r)->req_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) /* Initialize to existing shared indexes -- for recovery */ #define FRONT_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->req_prod_pvt = (_s)->req_prod; \ (_r)->rsp_cons = (_s)->rsp_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) #define BACK_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ (_r)->req_cons = (_s)->req_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) /* How big is this ring? */ #define RING_SIZE(_r) \ ((_r)->nr_ents) /* Number of free requests (for use on front side only). */ #define RING_FREE_REQUESTS(_r) \ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) /* Test if there is an empty slot available on the front ring. * (This is only meaningful from the front. ) */ #define RING_FULL(_r) \ (RING_FREE_REQUESTS(_r) == 0) /* Test if there are outstanding messages to be processed on a ring. */ #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) #ifdef __GNUC__ #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ unsigned int rsp = RING_SIZE(_r) - \ ((_r)->req_cons - (_r)->rsp_prod_pvt); \ req < rsp ? req : rsp; \ }) #else /* Same as above, but without the nice GCC ({ ... }) syntax. */ #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ ((((_r)->sring->req_prod - (_r)->req_cons) < \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ ((_r)->sring->req_prod - (_r)->req_cons) : \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) #endif /* Direct access to individual ring elements, by index. */ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) #define RING_GET_RESPONSE(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) /* Loop termination condition: Would the specified index overflow the ring? */ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) #define RING_PUSH_REQUESTS(_r) do { \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) #define RING_PUSH_RESPONSES(_r) do { \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ } while (0) /* * Notification hold-off (req_event and rsp_event): * * When queueing requests or responses on a shared ring, it may not always be * necessary to notify the remote end. For example, if requests are in flight * in a backend, the front may be able to queue further requests without * notifying the back (if the back checks for new requests when it queues * responses). * * When enqueuing requests or responses: * * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument * is a boolean return value. True indicates that the receiver requires an * asynchronous notification. * * After dequeuing requests or responses (before sleeping the connection): * * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). * The second argument is a boolean return value. True indicates that there * are pending messages on the ring (i.e., the connection should not be put * to sleep). * * These macros will set the req_event/rsp_event field to trigger a * notification on the very next message that is enqueued. If you want to * create batches of work (i.e., only receive a notification after several * messages have been enqueued) then you will need to create a customised * version of the FINAL_CHECK macro in your own code, which sets the event * field appropriately. */ #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->req_prod; \ RING_IDX __new = (_r)->req_prod_pvt; \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = __new; \ xen_mb(); /* back sees new requests /before/ we check req_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->rsp_prod; \ RING_IDX __new = (_r)->rsp_prod_pvt; \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = __new; \ xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ if (_work_to_do) break; \ (_r)->sring->req_event = (_r)->req_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ } while (0) #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ if (_work_to_do) break; \ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ } while (0) #endif /* __XEN_PUBLIC_IO_RING_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/tpmif.h000066400000000000000000000046251314037446600267500ustar00rootroot00000000000000/****************************************************************************** * tpmif.h * * TPM I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, IBM Corporation * * Author: Stefan Berger, stefanb@us.ibm.com * Grant table support: Mahadevan Gomathisankaran * * This code has been derived from tools/libxc/xen/io/netif.h * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_TPMIF_H__ #define __XEN_PUBLIC_IO_TPMIF_H__ #include "../grant_table.h" struct tpmif_tx_request { unsigned long addr; /* Machine address of packet. */ grant_ref_t ref; /* grant table access reference */ uint16_t unused; uint16_t size; /* Packet size in bytes. */ }; typedef struct tpmif_tx_request tpmif_tx_request_t; /* * The TPMIF_TX_RING_SIZE defines the number of pages the * front-end and backend can exchange (= size of array). */ typedef uint32_t TPMIF_RING_IDX; #define TPMIF_TX_RING_SIZE 1 /* This structure must fit in a memory page. */ struct tpmif_ring { struct tpmif_tx_request req; }; typedef struct tpmif_ring tpmif_ring_t; struct tpmif_tx_interface { struct tpmif_ring ring[TPMIF_TX_RING_SIZE]; }; typedef struct tpmif_tx_interface tpmif_tx_interface_t; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/usbif.h000066400000000000000000000105601314037446600267340ustar00rootroot00000000000000/* * usbif.h * * USB I/O interface for Xen guest OSes. * * Copyright (C) 2009, FUJITSU LABORATORIES LTD. * Author: Noboru Iwamatsu * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_IO_USBIF_H__ #define __XEN_PUBLIC_IO_USBIF_H__ #include "ring.h" #include "../grant_table.h" enum usb_spec_version { USB_VER_UNKNOWN = 0, USB_VER_USB11, USB_VER_USB20, USB_VER_USB30, /* not supported yet */ }; /* * USB pipe in usbif_request * * bits 0-5 are specific bits for virtual USB driver. * bits 7-31 are standard urb pipe. * * - port number(NEW): bits 0-4 * (USB_MAXCHILDREN is 31) * * - operation flag(NEW): bit 5 * (0 = submit urb, * 1 = unlink urb) * * - direction: bit 7 * (0 = Host-to-Device [Out] * 1 = Device-to-Host [In]) * * - device address: bits 8-14 * * - endpoint: bits 15-18 * * - pipe type: bits 30-31 * (00 = isochronous, 01 = interrupt, * 10 = control, 11 = bulk) */ #define usbif_pipeportnum(pipe) ((pipe) & 0x1f) #define usbif_setportnum_pipe(pipe, portnum) \ ((pipe)|(portnum)) #define usbif_pipeunlink(pipe) ((pipe) & 0x20) #define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe)) #define usbif_setunlink_pipe(pipe) ((pipe)|(0x20)) #define USBIF_BACK_MAX_PENDING_REQS (128) #define USBIF_MAX_SEGMENTS_PER_REQUEST (16) /* * RING for transferring urbs. */ struct usbif_request_segment { grant_ref_t gref; uint16_t offset; uint16_t length; }; struct usbif_urb_request { uint16_t id; /* request id */ uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */ /* basic urb parameter */ uint32_t pipe; uint16_t transfer_flags; uint16_t buffer_length; union { uint8_t ctrl[8]; /* setup_packet (Ctrl) */ struct { uint16_t interval; /* maximum (1024*8) in usb core */ uint16_t start_frame; /* start frame */ uint16_t number_of_packets; /* number of ISO packet */ uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */ } isoc; struct { uint16_t interval; /* maximum (1024*8) in usb core */ uint16_t pad[3]; } intr; struct { uint16_t unlink_id; /* unlink request id */ uint16_t pad[3]; } unlink; } u; /* urb data segments */ struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST]; }; typedef struct usbif_urb_request usbif_urb_request_t; struct usbif_urb_response { uint16_t id; /* request id */ uint16_t start_frame; /* start frame (ISO) */ int32_t status; /* status (non-ISO) */ int32_t actual_length; /* actual transfer length */ int32_t error_count; /* number of ISO errors */ }; typedef struct usbif_urb_response usbif_urb_response_t; DEFINE_RING_TYPES(usbif_urb, struct usbif_urb_request, struct usbif_urb_response); #define USB_URB_RING_SIZE __CONST_RING_SIZE(usbif_urb, PAGE_SIZE) /* * RING for notifying connect/disconnect events to frontend */ struct usbif_conn_request { uint16_t id; }; typedef struct usbif_conn_request usbif_conn_request_t; struct usbif_conn_response { uint16_t id; /* request id */ uint8_t portnum; /* port number */ uint8_t speed; /* usb_device_speed */ }; typedef struct usbif_conn_response usbif_conn_response_t; DEFINE_RING_TYPES(usbif_conn, struct usbif_conn_request, struct usbif_conn_response); #define USB_CONN_RING_SIZE __CONST_RING_SIZE(usbif_conn, PAGE_SIZE) #endif /* __XEN_PUBLIC_IO_USBIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/vscsiif.h000066400000000000000000000070421314037446600272730ustar00rootroot00000000000000/****************************************************************************** * vscsiif.h * * Based on the blkif.h code. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright(c) FUJITSU Limited 2008. */ #ifndef __XEN__PUBLIC_IO_SCSI_H__ #define __XEN__PUBLIC_IO_SCSI_H__ #include "ring.h" #include "../grant_table.h" /* command between backend and frontend */ #define VSCSIIF_ACT_SCSI_CDB 1 /* SCSI CDB command */ #define VSCSIIF_ACT_SCSI_ABORT 2 /* SCSI Device(Lun) Abort*/ #define VSCSIIF_ACT_SCSI_RESET 3 /* SCSI Device(Lun) Reset*/ #define VSCSIIF_BACK_MAX_PENDING_REQS 128 /* * Maximum scatter/gather segments per request. * * Considering balance between allocating al least 16 "vscsiif_request" * structures on one page (4096bytes) and number of scatter gather * needed, we decided to use 26 as a magic number. */ #define VSCSIIF_SG_TABLESIZE 26 /* * base on linux kernel 2.6.18 */ #define VSCSIIF_MAX_COMMAND_SIZE 16 #define VSCSIIF_SENSE_BUFFERSIZE 96 struct vscsiif_request { uint16_t rqid; /* private guest value, echoed in resp */ uint8_t act; /* command between backend and frontend */ uint8_t cmd_len; uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; uint16_t timeout_per_command; /* The command is issued by twice the value in Backend. */ uint16_t channel, id, lun; uint16_t padding; uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1) DMA_FROM_DEVICE(2) DMA_NONE(3) requests */ uint8_t nr_segments; /* Number of pieces of scatter-gather */ struct scsiif_request_segment { grant_ref_t gref; uint16_t offset; uint16_t length; } seg[VSCSIIF_SG_TABLESIZE]; uint32_t reserved[3]; }; typedef struct vscsiif_request vscsiif_request_t; struct vscsiif_response { uint16_t rqid; uint8_t padding; uint8_t sense_len; uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE]; int32_t rslt; uint32_t residual_len; /* request bufflen - return the value from physical device */ uint32_t reserved[36]; }; typedef struct vscsiif_response vscsiif_response_t; DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response); #endif /*__XEN__PUBLIC_IO_SCSI_H__*/ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/xenbus.h000066400000000000000000000044351314037446600271340ustar00rootroot00000000000000/***************************************************************************** * xenbus.h * * Xenbus protocol details. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 XenSource Ltd. */ #ifndef _XEN_PUBLIC_IO_XENBUS_H #define _XEN_PUBLIC_IO_XENBUS_H /* * The state of either end of the Xenbus, i.e. the current communication * status of initialisation across the bus. States here imply nothing about * the state of the connection between the driver and the kernel's device * layers. */ enum xenbus_state { XenbusStateUnknown = 0, XenbusStateInitialising = 1, /* * InitWait: Finished early initialisation but waiting for information * from the peer or hotplug scripts. */ XenbusStateInitWait = 2, /* * Initialised: Waiting for a connection from the peer. */ XenbusStateInitialised = 3, XenbusStateConnected = 4, /* * Closing: The device is being closed due to an error or an unplug event. */ XenbusStateClosing = 5, XenbusStateClosed = 6, /* * Reconfiguring: The device is being reconfigured. */ XenbusStateReconfiguring = 7, XenbusStateReconfigured = 8 }; typedef enum xenbus_state XenbusState; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/io/xs_wire.h000066400000000000000000000066111314037446600273060ustar00rootroot00000000000000/* * Details of the "wire" protocol between Xen Store Daemon and client * library or guest kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Rusty Russell IBM Corporation */ #ifndef _XS_WIRE_H #define _XS_WIRE_H enum xsd_sockmsg_type { XS_DEBUG, XS_DIRECTORY, XS_READ, XS_GET_PERMS, XS_WATCH, XS_UNWATCH, XS_TRANSACTION_START, XS_TRANSACTION_END, XS_INTRODUCE, XS_RELEASE, XS_GET_DOMAIN_PATH, XS_WRITE, XS_MKDIR, XS_RM, XS_SET_PERMS, XS_WATCH_EVENT, XS_ERROR, XS_IS_DOMAIN_INTRODUCED, XS_RESUME, XS_SET_TARGET }; #define XS_WRITE_NONE "NONE" #define XS_WRITE_CREATE "CREATE" #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" /* We hand errors as strings, for portability. */ struct xsd_errors { int errnum; const char *errstring; }; #ifdef EINVAL #define XSD_ERROR(x) { x, #x } /* LINTED: static unused */ static struct xsd_errors xsd_errors[] #if defined(__GNUC__) __attribute__((unused)) #endif = { XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), XSD_ERROR(EISDIR), XSD_ERROR(ENOENT), XSD_ERROR(ENOMEM), XSD_ERROR(ENOSPC), XSD_ERROR(EIO), XSD_ERROR(ENOTEMPTY), XSD_ERROR(ENOSYS), XSD_ERROR(EROFS), XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN) }; #endif struct xsd_sockmsg { uint32_t type; /* XS_??? */ uint32_t req_id;/* Request identifier, echoed in daemon's response. */ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ uint32_t len; /* Length of data following this. */ /* Generally followed by nul-terminated string(s). */ }; enum xs_watch_type { XS_WATCH_PATH = 0, XS_WATCH_TOKEN }; /* Inter-domain shared memory communications. */ #define XENSTORE_RING_SIZE 1024 typedef uint32_t XENSTORE_RING_IDX; #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) struct xenstore_domain_interface { char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ XENSTORE_RING_IDX req_cons, req_prod; XENSTORE_RING_IDX rsp_cons, rsp_prod; }; /* Violating this is very bad. See docs/misc/xenstore.txt. */ #define XENSTORE_PAYLOAD_MAX 4096 /* Violating these just gets you an error back */ #define XENSTORE_ABS_PATH_MAX 3072 #define XENSTORE_REL_PATH_MAX 2048 #endif /* _XS_WIRE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/kexec.h000066400000000000000000000144501314037446600263160ustar00rootroot00000000000000/****************************************************************************** * kexec.h - Public portion * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Xen port written by: * - Simon 'Horms' Horman * - Magnus Damm */ #ifndef _XEN_PUBLIC_KEXEC_H #define _XEN_PUBLIC_KEXEC_H /* This file describes the Kexec / Kdump hypercall interface for Xen. * * Kexec under vanilla Linux allows a user to reboot the physical machine * into a new user-specified kernel. The Xen port extends this idea * to allow rebooting of the machine from dom0. When kexec for dom0 * is used to reboot, both the hypervisor and the domains get replaced * with some other kernel. It is possible to kexec between vanilla * Linux and Xen and back again. Xen to Xen works well too. * * The hypercall interface for kexec can be divided into three main * types of hypercall operations: * * 1) Range information: * This is used by the dom0 kernel to ask the hypervisor about various * address information. This information is needed to allow kexec-tools * to fill in the ELF headers for /proc/vmcore properly. * * 2) Load and unload of images: * There are no big surprises here, the kexec binary from kexec-tools * runs in userspace in dom0. The tool loads/unloads data into the * dom0 kernel such as new kernel, initramfs and hypervisor. When * loaded the dom0 kernel performs a load hypercall operation, and * before releasing all page references the dom0 kernel calls unload. * * 3) Kexec operation: * This is used to start a previously loaded kernel. */ #include "xen.h" #if defined(__i386__) || defined(__x86_64__) #define KEXEC_XEN_NO_PAGES 17 #endif /* * Prototype for this hypercall is: * int kexec_op(int cmd, void *args) * @cmd == KEXEC_CMD_... * KEXEC operation to perform * @args == Operation-specific extra arguments (NULL if none). */ /* * Kexec supports two types of operation: * - kexec into a regular kernel, very similar to a standard reboot * - KEXEC_TYPE_DEFAULT is used to specify this type * - kexec into a special "crash kernel", aka kexec-on-panic * - KEXEC_TYPE_CRASH is used to specify this type * - parts of our system may be broken at kexec-on-panic time * - the code should be kept as simple and self-contained as possible */ #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 /* The kexec implementation for Xen allows the user to load two * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH. * All data needed for a kexec reboot is kept in one xen_kexec_image_t * per "instance". The data mainly consists of machine address lists to pages * together with destination addresses. The data in xen_kexec_image_t * is passed to the "code page" which is one page of code that performs * the final relocations before jumping to the new kernel. */ typedef struct xen_kexec_image { #if defined(__i386__) || defined(__x86_64__) unsigned long page_list[KEXEC_XEN_NO_PAGES]; #endif #if defined(__ia64__) unsigned long reboot_code_buffer; #endif unsigned long indirection_page; unsigned long start_address; } xen_kexec_image_t; /* * Perform kexec having previously loaded a kexec or kdump kernel * as appropriate. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] */ #define KEXEC_CMD_kexec 0 typedef struct xen_kexec_exec { int type; } xen_kexec_exec_t; /* * Load/Unload kernel image for kexec or kdump. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] * image == relocation information for kexec (ignored for unload) [in] */ #define KEXEC_CMD_kexec_load 1 #define KEXEC_CMD_kexec_unload 2 typedef struct xen_kexec_load { int type; xen_kexec_image_t image; } xen_kexec_load_t; #define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */ #define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */ #define KEXEC_RANGE_MA_CPU 2 /* machine address and size of a CPU note */ #define KEXEC_RANGE_MA_XENHEAP 3 /* machine address and size of xenheap * Note that although this is adjacent * to Xen it exists in a separate EFI * region on ia64, and thus needs to be * inserted into iomem_machine separately */ #define KEXEC_RANGE_MA_BOOT_PARAM 4 /* machine address and size of * the ia64_boot_param */ #define KEXEC_RANGE_MA_EFI_MEMMAP 5 /* machine address and size of * of the EFI Memory Map */ #define KEXEC_RANGE_MA_VMCOREINFO 6 /* machine address and size of vmcoreinfo */ /* * Find the address and size of certain memory areas * range == KEXEC_RANGE_... [in] * nr == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in] * size == number of bytes reserved in window [out] * start == address of the first byte in the window [out] */ #define KEXEC_CMD_kexec_get_range 3 typedef struct xen_kexec_range { int range; int nr; unsigned long size; unsigned long start; } xen_kexec_range_t; #endif /* _XEN_PUBLIC_KEXEC_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/mem_event.h000066400000000000000000000034371314037446600272010ustar00rootroot00000000000000/****************************************************************************** * mem_event.h * * Memory event common structures. * * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _XEN_PUBLIC_MEM_EVENT_H #define _XEN_PUBLIC_MEM_EVENT_H #include "xen.h" #include "io/ring.h" /* Memory event notification modes */ #define MEM_EVENT_MODE_ASYNC 0 #define MEM_EVENT_MODE_SYNC (1 << 0) #define MEM_EVENT_MODE_SYNC_ALL (1 << 1) /* Memory event flags */ #define MEM_EVENT_FLAG_VCPU_PAUSED (1 << 0) #define MEM_EVENT_FLAG_DOM_PAUSED (1 << 1) #define MEM_EVENT_FLAG_OUT_OF_MEM (1 << 2) typedef struct mem_event_shared_page { int port; } mem_event_shared_page_t; typedef struct mem_event_st { unsigned long gfn; unsigned long offset; unsigned long p2mt; int vcpu_id; uint64_t flags; } mem_event_request_t, mem_event_response_t; DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t); #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/memory.h000066400000000000000000000236111314037446600265260ustar00rootroot00000000000000/****************************************************************************** * memory.h * * Memory reservation and information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_MEMORY_H__ #define __XEN_PUBLIC_MEMORY_H__ #include "xen.h" /* * Increase or decrease the specified domain's memory reservation. Returns the * number of extents successfully allocated or freed. * arg == addr of struct xen_memory_reservation. */ #define XENMEM_increase_reservation 0 #define XENMEM_decrease_reservation 1 #define XENMEM_populate_physmap 6 #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* * Maximum # bits addressable by the user of the allocated region (e.g., I/O * devices often have a 32-bit limitation even in 64-bit systems). If zero * then the user has no addressing restriction. This field is not used by * XENMEM_decrease_reservation. */ #define XENMEMF_address_bits(x) (x) #define XENMEMF_get_address_bits(x) ((x) & 0xffu) /* NUMA node to allocate from. */ #define XENMEMF_node(x) (((x) + 1) << 8) #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu) /* Flag to populate physmap with populate-on-demand entries */ #define XENMEMF_populate_on_demand (1<<16) #endif struct xen_memory_reservation { /* * XENMEM_increase_reservation: * OUT: MFN (*not* GMFN) bases of extents that were allocated * XENMEM_decrease_reservation: * IN: GMFN bases of extents to free * XENMEM_populate_physmap: * IN: GPFN bases of extents to populate with memory * OUT: GMFN bases of extents that were allocated * (NB. This command also updates the mach_to_phys translation table) */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* Number of extents, and size/alignment of each (2^extent_order pages). */ xen_ulong_t nr_extents; unsigned int extent_order; #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* XENMEMF flags. */ unsigned int mem_flags; #else unsigned int address_bits; #endif /* * Domain whose reservation is being changed. * Unprivileged domains can specify only DOMID_SELF. */ domid_t domid; }; DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation); typedef struct xen_memory_reservation xen_memory_reservation_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); /* * An atomic exchange of memory pages. If return code is zero then * @out.extent_list provides GMFNs of the newly-allocated memory. * Returns zero on complete success, otherwise a negative error code. * On complete success then always @nr_exchanged == @in.nr_extents. * On partial success @nr_exchanged indicates how much work was done. */ #define XENMEM_exchange 11 struct xen_memory_exchange { /* * [IN] Details of memory extents to be exchanged (GMFN bases). * Note that @in.address_bits is ignored and unused. */ struct xen_memory_reservation in; /* * [IN/OUT] Details of new memory extents. * We require that: * 1. @in.domid == @out.domid * 2. @in.nr_extents << @in.extent_order == * @out.nr_extents << @out.extent_order * 3. @in.extent_start and @out.extent_start lists must not overlap * 4. @out.extent_start lists GPFN bases to be populated * 5. @out.extent_start is overwritten with allocated GMFN bases */ struct xen_memory_reservation out; /* * [OUT] Number of input extents that were successfully exchanged: * 1. The first @nr_exchanged input extents were successfully * deallocated. * 2. The corresponding first entries in the output extent list correctly * indicate the GMFNs that were successfully exchanged. * 3. All other input and output extents are untouched. * 4. If not all input exents are exchanged then the return code of this * command will be non-zero. * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! */ xen_ulong_t nr_exchanged; }; typedef struct xen_memory_exchange xen_memory_exchange_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); /* * Returns the maximum machine frame number of mapped RAM in this system. * This command always succeeds (it never returns an error code). * arg == NULL. */ #define XENMEM_maximum_ram_page 2 /* * Returns the current or maximum memory reservation, in pages, of the * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. * arg == addr of domid_t. */ #define XENMEM_current_reservation 3 #define XENMEM_maximum_reservation 4 /* * Returns the maximum GPFN in use by the guest, or -ve errcode on failure. */ #define XENMEM_maximum_gpfn 14 /* * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys * mapping table. Architectures which do not have a m2p table do not implement * this command. * arg == addr of xen_machphys_mfn_list_t. */ #define XENMEM_machphys_mfn_list 5 struct xen_machphys_mfn_list { /* * Size of the 'extent_start' array. Fewer entries will be filled if the * machphys table is smaller than max_extents * 2MB. */ unsigned int max_extents; /* * Pointer to buffer to fill with list of extent starts. If there are * any large discontiguities in the machine address space, 2MB gaps in * the machphys table will be represented by an MFN base of zero. */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* * Number of extents written to the above array. This will be smaller * than 'max_extents' if the machphys table is smaller than max_e * 2MB. */ unsigned int nr_extents; }; DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list); typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); /* * Returns the location in virtual address space of the machine_to_phys * mapping table. Architectures which do not have a m2p table, or which do not * map it by default into guest address space, do not implement this command. * arg == addr of xen_machphys_mapping_t. */ #define XENMEM_machphys_mapping 12 struct xen_machphys_mapping { xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ }; typedef struct xen_machphys_mapping xen_machphys_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); /* * Sets the GPFN at which a particular page appears in the specified guest's * pseudophysical address space. * arg == addr of xen_add_to_physmap_t. */ #define XENMEM_add_to_physmap 7 struct xen_add_to_physmap { /* Which domain to change the mapping for. */ domid_t domid; /* Source mapping space. */ #define XENMAPSPACE_shared_info 0 /* shared info page */ #define XENMAPSPACE_grant_table 1 /* grant table page */ #define XENMAPSPACE_gmfn 2 /* GMFN */ unsigned int space; #define XENMAPIDX_grant_table_status 0x80000000 /* Index into source mapping space. */ xen_ulong_t idx; /* GPFN where the source mapping page should appear. */ xen_pfn_t gpfn; }; DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap); typedef struct xen_add_to_physmap xen_add_to_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); /*** REMOVED ***/ /*#define XENMEM_translate_gpfn_list 8*/ /* * Returns the pseudo-physical memory map as it was when the domain * was started (specified by XENMEM_set_memory_map). * arg == addr of xen_memory_map_t. */ #define XENMEM_memory_map 9 struct xen_memory_map { /* * On call the number of entries which can be stored in buffer. On * return the number of entries which have been stored in * buffer. */ unsigned int nr_entries; /* * Entries in the buffer are in the same format as returned by the * BIOS INT 0x15 EAX=0xE820 call. */ XEN_GUEST_HANDLE(void) buffer; }; typedef struct xen_memory_map xen_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); /* * Returns the real physical memory map. Passes the same structure as * XENMEM_memory_map. * arg == addr of xen_memory_map_t. */ #define XENMEM_machine_memory_map 10 /* * Set the pseudo-physical memory map of a domain, as returned by * XENMEM_memory_map. * arg == addr of xen_foreign_memory_map_t. */ #define XENMEM_set_memory_map 13 struct xen_foreign_memory_map { domid_t domid; struct xen_memory_map map; }; typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); #define XENMEM_set_pod_target 16 #define XENMEM_get_pod_target 17 struct xen_pod_target { /* IN */ uint64_t target_pages; /* OUT */ uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; /* IN */ domid_t domid; }; typedef struct xen_pod_target xen_pod_target_t; /* * Get the number of MFNs saved through memory sharing. * The call never fails. */ #define XENMEM_get_sharing_freed_pages 18 #endif /* __XEN_PUBLIC_MEMORY_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/nmi.h000066400000000000000000000053061314037446600260020ustar00rootroot00000000000000/****************************************************************************** * nmi.h * * NMI callback registration and reason codes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_NMI_H__ #define __XEN_PUBLIC_NMI_H__ #include "xen.h" /* * NMI reason codes: * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. */ /* I/O-check error reported via ISA port 0x61, bit 6. */ #define _XEN_NMIREASON_io_error 0 #define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) /* Parity error reported via ISA port 0x61, bit 7. */ #define _XEN_NMIREASON_parity_error 1 #define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error) /* Unknown hardware-generated NMI. */ #define _XEN_NMIREASON_unknown 2 #define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) /* * long nmi_op(unsigned int cmd, void *arg) * NB. All ops return zero on success, else a negative error code. */ /* * Register NMI callback for this (calling) VCPU. Currently this only makes * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. * arg == pointer to xennmi_callback structure. */ #define XENNMI_register_callback 0 struct xennmi_callback { unsigned long handler_address; unsigned long pad; }; typedef struct xennmi_callback xennmi_callback_t; DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t); /* * Deregister NMI callback for this (calling) VCPU. * arg == NULL. */ #define XENNMI_unregister_callback 1 #endif /* __XEN_PUBLIC_NMI_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/physdev.h000066400000000000000000000173201314037446600267000ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_PHYSDEV_H__ #define __XEN_PUBLIC_PHYSDEV_H__ #include "xen.h" /* * Prototype for this hypercall is: * int physdev_op(int cmd, void *args) * @cmd == PHYSDEVOP_??? (physdev operation). * @args == Operation-specific extra arguments (NULL if none). */ /* * Notify end-of-interrupt (EOI) for the specified IRQ. * @arg == pointer to physdev_eoi structure. */ #define PHYSDEVOP_eoi 12 struct physdev_eoi { /* IN */ uint32_t irq; }; typedef struct physdev_eoi physdev_eoi_t; DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t); /* * Register a shared page for the hypervisor to indicate whether the guest * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly * once the guest used this function in that the associated event channel * will automatically get unmasked. The page registered is used as a bit * array indexed by Xen's PIRQ value. */ #define PHYSDEVOP_pirq_eoi_gmfn 17 struct physdev_pirq_eoi_gmfn { /* IN */ xen_pfn_t gmfn; }; typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t; DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t); /* * Query the status of an IRQ line. * @arg == pointer to physdev_irq_status_query structure. */ #define PHYSDEVOP_irq_status_query 5 struct physdev_irq_status_query { /* IN */ uint32_t irq; /* OUT */ uint32_t flags; /* XENIRQSTAT_* */ }; typedef struct physdev_irq_status_query physdev_irq_status_query_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t); /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ #define _XENIRQSTAT_needs_eoi (0) #define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) /* IRQ shared by multiple guests? */ #define _XENIRQSTAT_shared (1) #define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) /* * Set the current VCPU's I/O privilege level. * @arg == pointer to physdev_set_iopl structure. */ #define PHYSDEVOP_set_iopl 6 struct physdev_set_iopl { /* IN */ uint32_t iopl; }; typedef struct physdev_set_iopl physdev_set_iopl_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t); /* * Set the current VCPU's I/O-port permissions bitmap. * @arg == pointer to physdev_set_iobitmap structure. */ #define PHYSDEVOP_set_iobitmap 7 struct physdev_set_iobitmap { /* IN */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(uint8) bitmap; #else uint8_t *bitmap; #endif uint32_t nr_ports; }; typedef struct physdev_set_iobitmap physdev_set_iobitmap_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t); /* * Read or write an IO-APIC register. * @arg == pointer to physdev_apic structure. */ #define PHYSDEVOP_apic_read 8 #define PHYSDEVOP_apic_write 9 struct physdev_apic { /* IN */ unsigned long apic_physbase; uint32_t reg; /* IN or OUT */ uint32_t value; }; typedef struct physdev_apic physdev_apic_t; DEFINE_XEN_GUEST_HANDLE(physdev_apic_t); /* * Allocate or free a physical upcall vector for the specified IRQ line. * @arg == pointer to physdev_irq structure. */ #define PHYSDEVOP_alloc_irq_vector 10 #define PHYSDEVOP_free_irq_vector 11 struct physdev_irq { /* IN */ uint32_t irq; /* IN or OUT */ uint32_t vector; }; typedef struct physdev_irq physdev_irq_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_t); #define MAP_PIRQ_TYPE_MSI 0x0 #define MAP_PIRQ_TYPE_GSI 0x1 #define MAP_PIRQ_TYPE_UNKNOWN 0x2 #define PHYSDEVOP_map_pirq 13 struct physdev_map_pirq { domid_t domid; /* IN */ int type; /* IN */ int index; /* IN or OUT */ int pirq; /* IN */ int bus; /* IN */ int devfn; /* IN */ int entry_nr; /* IN */ uint64_t table_base; }; typedef struct physdev_map_pirq physdev_map_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t); #define PHYSDEVOP_unmap_pirq 14 struct physdev_unmap_pirq { domid_t domid; /* IN */ int pirq; }; typedef struct physdev_unmap_pirq physdev_unmap_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t); #define PHYSDEVOP_manage_pci_add 15 #define PHYSDEVOP_manage_pci_remove 16 struct physdev_manage_pci { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_manage_pci physdev_manage_pci_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t); #define PHYSDEVOP_restore_msi 19 struct physdev_restore_msi { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_restore_msi physdev_restore_msi_t; DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t); #define PHYSDEVOP_manage_pci_add_ext 20 struct physdev_manage_pci_ext { /* IN */ uint8_t bus; uint8_t devfn; unsigned is_extfn; unsigned is_virtfn; struct { uint8_t bus; uint8_t devfn; } physfn; }; typedef struct physdev_manage_pci_ext physdev_manage_pci_ext_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_ext_t); /* * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() * hypercall since 0x00030202. */ struct physdev_op { uint32_t cmd; union { struct physdev_irq_status_query irq_status_query; struct physdev_set_iopl set_iopl; struct physdev_set_iobitmap set_iobitmap; struct physdev_apic apic_op; struct physdev_irq irq_op; } u; }; typedef struct physdev_op physdev_op_t; DEFINE_XEN_GUEST_HANDLE(physdev_op_t); #define PHYSDEVOP_setup_gsi 21 struct physdev_setup_gsi { int gsi; /* IN */ uint8_t triggering; /* IN */ uint8_t polarity; /* IN */ }; typedef struct physdev_setup_gsi physdev_setup_gsi_t; DEFINE_XEN_GUEST_HANDLE(physdev_setup_gsi_t); /* * Notify that some PIRQ-bound event channels have been unmasked. * ** This command is obsolete since interface version 0x00030202 and is ** * ** unsupported by newer versions of Xen. ** */ #define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 /* * These all-capitals physdev operation names are superceded by the new names * (defined above) since interface version 0x00030202. */ #define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query #define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl #define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap #define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read #define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write #define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector #define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi #define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared #endif /* __XEN_PUBLIC_PHYSDEV_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/platform.h000066400000000000000000000325711314037446600270470ustar00rootroot00000000000000/****************************************************************************** * platform.h * * Hardware platform operations. Intended for use by domain-0 kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_PLATFORM_H__ #define __XEN_PUBLIC_PLATFORM_H__ #include "xen.h" #define XENPF_INTERFACE_VERSION 0x03000001 /* * Set clock such that it would read after 00:00:00 UTC, * 1 January, 1970 if the current system time was . */ #define XENPF_settime 17 struct xenpf_settime { /* IN variables. */ uint32_t secs; uint32_t nsecs; uint64_t system_time; }; typedef struct xenpf_settime xenpf_settime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t); /* * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type. * On x86, @type is an architecture-defined MTRR memory type. * On success, returns the MTRR that was used (@reg) and a handle that can * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting. * (x86-specific). */ #define XENPF_add_memtype 31 struct xenpf_add_memtype { /* IN variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; /* OUT variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_add_memtype xenpf_add_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t); /* * Tear down an existing memory-range type. If @handle is remembered then it * should be passed in to accurately tear down the correct setting (in case * of overlapping memory regions with differing types). If it is not known * then @handle should be set to zero. In all cases @reg must be set. * (x86-specific). */ #define XENPF_del_memtype 32 struct xenpf_del_memtype { /* IN variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_del_memtype xenpf_del_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t); /* Read current type of an MTRR (x86-specific). */ #define XENPF_read_memtype 33 struct xenpf_read_memtype { /* IN variables. */ uint32_t reg; /* OUT variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; }; typedef struct xenpf_read_memtype xenpf_read_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t); #define XENPF_microcode_update 35 struct xenpf_microcode_update { /* IN variables. */ XEN_GUEST_HANDLE(const_void) data;/* Pointer to microcode data */ uint32_t length; /* Length of microcode data. */ }; typedef struct xenpf_microcode_update xenpf_microcode_update_t; DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t); #define XENPF_platform_quirk 39 #define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */ #define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */ #define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */ struct xenpf_platform_quirk { /* IN variables. */ uint32_t quirk_id; }; typedef struct xenpf_platform_quirk xenpf_platform_quirk_t; DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t); #define XENPF_firmware_info 50 #define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */ #define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */ #define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */ struct xenpf_firmware_info { /* IN variables. */ uint32_t type; uint32_t index; /* OUT variables. */ union { struct { /* Int13, Fn48: Check Extensions Present. */ uint8_t device; /* %dl: bios device number */ uint8_t version; /* %ah: major version */ uint16_t interface_support; /* %cx: support bitmap */ /* Int13, Fn08: Legacy Get Device Parameters. */ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */ uint8_t legacy_max_head; /* %dh: max head # */ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */ /* NB. First uint16_t of buffer must be set to buffer size. */ XEN_GUEST_HANDLE(void) edd_params; } disk_info; /* XEN_FW_DISK_INFO */ struct { uint8_t device; /* bios device number */ uint32_t mbr_signature; /* offset 0x1b8 in mbr */ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */ struct { /* Int10, AX=4F15: Get EDID info. */ uint8_t capabilities; uint8_t edid_transfer_time; /* must refer to 128-byte buffer */ XEN_GUEST_HANDLE(uint8) edid; } vbeddc_info; /* XEN_FW_VBEDDC_INFO */ } u; }; typedef struct xenpf_firmware_info xenpf_firmware_info_t; DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t); #define XENPF_enter_acpi_sleep 51 struct xenpf_enter_acpi_sleep { /* IN variables */ uint16_t pm1a_cnt_val; /* PM1a control value. */ uint16_t pm1b_cnt_val; /* PM1b control value. */ uint32_t sleep_state; /* Which state to enter (Sn). */ uint32_t flags; /* Must be zero. */ }; typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t; DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t); #define XENPF_change_freq 52 struct xenpf_change_freq { /* IN variables */ uint32_t flags; /* Must be zero. */ uint32_t cpu; /* Physical cpu. */ uint64_t freq; /* New frequency (Hz). */ }; typedef struct xenpf_change_freq xenpf_change_freq_t; DEFINE_XEN_GUEST_HANDLE(xenpf_change_freq_t); /* * Get idle times (nanoseconds since boot) for physical CPUs specified in the * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is * indexed by CPU number; only entries with the corresponding @cpumap_bitmap * bit set are written to. On return, @cpumap_bitmap is modified so that any * non-existent CPUs are cleared. Such CPUs have their @idletime array entry * cleared. */ #define XENPF_getidletime 53 struct xenpf_getidletime { /* IN/OUT variables */ /* IN: CPUs to interrogate; OUT: subset of IN which are present */ XEN_GUEST_HANDLE(uint8) cpumap_bitmap; /* IN variables */ /* Size of cpumap bitmap. */ uint32_t cpumap_nr_cpus; /* Must be indexable for every cpu in cpumap_bitmap. */ XEN_GUEST_HANDLE(uint64) idletime; /* OUT variables */ /* System time when the idletime snapshots were taken. */ uint64_t now; }; typedef struct xenpf_getidletime xenpf_getidletime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t); #define XENPF_set_processor_pminfo 54 /* ability bits */ #define XEN_PROCESSOR_PM_CX 1 #define XEN_PROCESSOR_PM_PX 2 #define XEN_PROCESSOR_PM_TX 4 /* cmd type */ #define XEN_PM_CX 0 #define XEN_PM_PX 1 #define XEN_PM_TX 2 /* Px sub info type */ #define XEN_PX_PCT 1 #define XEN_PX_PSS 2 #define XEN_PX_PPC 4 #define XEN_PX_PSD 8 struct xen_power_register { uint32_t space_id; uint32_t bit_width; uint32_t bit_offset; uint32_t access_size; uint64_t address; }; struct xen_processor_csd { uint32_t domain; /* domain number of one dependent group */ uint32_t coord_type; /* coordination type */ uint32_t num; /* number of processors in same domain */ }; typedef struct xen_processor_csd xen_processor_csd_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_csd_t); struct xen_processor_cx { struct xen_power_register reg; /* GAS for Cx trigger register */ uint8_t type; /* cstate value, c0: 0, c1: 1, ... */ uint32_t latency; /* worst latency (ms) to enter/exit this cstate */ uint32_t power; /* average power consumption(mW) */ uint32_t dpcnt; /* number of dependency entries */ XEN_GUEST_HANDLE(xen_processor_csd_t) dp; /* NULL if no dependency */ }; typedef struct xen_processor_cx xen_processor_cx_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_cx_t); struct xen_processor_flags { uint32_t bm_control:1; uint32_t bm_check:1; uint32_t has_cst:1; uint32_t power_setup_done:1; uint32_t bm_rld_set:1; }; struct xen_processor_power { uint32_t count; /* number of C state entries in array below */ struct xen_processor_flags flags; /* global flags of this processor */ XEN_GUEST_HANDLE(xen_processor_cx_t) states; /* supported c states */ }; struct xen_pct_register { uint8_t descriptor; uint16_t length; uint8_t space_id; uint8_t bit_width; uint8_t bit_offset; uint8_t reserved; uint64_t address; }; struct xen_processor_px { uint64_t core_frequency; /* megahertz */ uint64_t power; /* milliWatts */ uint64_t transition_latency; /* microseconds */ uint64_t bus_master_latency; /* microseconds */ uint64_t control; /* control value */ uint64_t status; /* success indicator */ }; typedef struct xen_processor_px xen_processor_px_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_px_t); struct xen_psd_package { uint64_t num_entries; uint64_t revision; uint64_t domain; uint64_t coord_type; uint64_t num_processors; }; struct xen_processor_performance { uint32_t flags; /* flag for Px sub info type */ uint32_t platform_limit; /* Platform limitation on freq usage */ struct xen_pct_register control_register; struct xen_pct_register status_register; uint32_t state_count; /* total available performance states */ XEN_GUEST_HANDLE(xen_processor_px_t) states; struct xen_psd_package domain_info; uint32_t shared_type; /* coordination type of this processor */ }; typedef struct xen_processor_performance xen_processor_performance_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_performance_t); struct xenpf_set_processor_pminfo { /* IN variables */ uint32_t id; /* ACPI CPU ID */ uint32_t type; /* {XEN_PM_CX, XEN_PM_PX} */ union { struct xen_processor_power power;/* Cx: _CST/_CSD */ struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */ } u; }; typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t; DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t); #define XENPF_get_cpuinfo 55 struct xenpf_pcpuinfo { /* IN */ uint32_t xen_cpuid; /* OUT */ /* The maxium cpu_id that is present */ uint32_t max_present; #define XEN_PCPU_FLAGS_ONLINE 1 /* Correponding xen_cpuid is not present*/ #define XEN_PCPU_FLAGS_INVALID 2 uint32_t flags; uint32_t apic_id; uint32_t acpi_id; }; typedef struct xenpf_pcpuinfo xenpf_pcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xenpf_pcpuinfo_t); #define XENPF_cpu_online 56 #define XENPF_cpu_offline 57 struct xenpf_cpu_ol { uint32_t cpuid; }; typedef struct xenpf_cpu_ol xenpf_cpu_ol_t; DEFINE_XEN_GUEST_HANDLE(xenpf_cpu_ol_t); #define XENPF_cpu_hotadd 58 struct xenpf_cpu_hotadd { uint32_t apic_id; uint32_t acpi_id; uint32_t pxm; }; #define XENPF_mem_hotadd 59 struct xenpf_mem_hotadd { uint64_t spfn; uint64_t epfn; uint32_t pxm; uint32_t flags; }; #define XENPF_get_cpu_freq ('N' << 24) struct xenpf_get_cpu_freq { /* IN variables */ uint32_t vcpu; /* OUT variables */ uint32_t freq; /* in kHz */ }; struct xen_platform_op { uint32_t cmd; uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ union { struct xenpf_settime settime; struct xenpf_add_memtype add_memtype; struct xenpf_del_memtype del_memtype; struct xenpf_read_memtype read_memtype; struct xenpf_microcode_update microcode; struct xenpf_platform_quirk platform_quirk; struct xenpf_firmware_info firmware_info; struct xenpf_enter_acpi_sleep enter_acpi_sleep; struct xenpf_change_freq change_freq; struct xenpf_getidletime getidletime; struct xenpf_set_processor_pminfo set_pminfo; struct xenpf_pcpuinfo pcpu_info; struct xenpf_cpu_ol cpu_ol; struct xenpf_cpu_hotadd cpu_add; struct xenpf_mem_hotadd mem_add; struct xenpf_get_cpu_freq get_cpu_freq; uint8_t pad[128]; } u; }; typedef struct xen_platform_op xen_platform_op_t; DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t); #endif /* __XEN_PUBLIC_PLATFORM_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/sched.h000066400000000000000000000103501314037446600263000ustar00rootroot00000000000000/****************************************************************************** * sched.h * * Scheduler state interactions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_SCHED_H__ #define __XEN_PUBLIC_SCHED_H__ #include "event_channel.h" /* * The prototype for this hypercall is: * long sched_op(int cmd, void *arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == Operation-specific extra argument(s), as described below. * * Versions of Xen prior to 3.0.2 provided only the following legacy version * of this hypercall, supporting only the commands yield, block and shutdown: * long sched_op(int cmd, unsigned long arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) * == SHUTDOWN_* code (SCHEDOP_shutdown) * This legacy version is available to new guests as sched_op_compat(). */ /* * Voluntarily yield the CPU. * @arg == NULL. */ #define SCHEDOP_yield 0 /* * Block execution of this VCPU until an event is received for processing. * If called with event upcalls masked, this operation will atomically * reenable event delivery and check for pending events before blocking the * VCPU. This avoids a "wakeup waiting" race. * @arg == NULL. */ #define SCHEDOP_block 1 /* * Halt execution of this domain (all VCPUs) and notify the system controller. * @arg == pointer to sched_shutdown structure. */ #define SCHEDOP_shutdown 2 struct sched_shutdown { unsigned int reason; /* SHUTDOWN_* */ }; DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown); typedef struct sched_shutdown sched_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t); /* * Poll a set of event-channel ports. Return when one or more are pending. An * optional timeout may be specified. * @arg == pointer to sched_poll structure. */ #define SCHEDOP_poll 3 struct sched_poll { XEN_GUEST_HANDLE(evtchn_port_t) ports; unsigned int nr_ports; uint64_t timeout; }; DEFINE_GUEST_HANDLE_STRUCT(sched_poll); typedef struct sched_poll sched_poll_t; DEFINE_XEN_GUEST_HANDLE(sched_poll_t); /* * Declare a shutdown for another domain. The main use of this function is * in interpreting shutdown requests and reasons for fully-virtualized * domains. A para-virtualized domain may use SCHEDOP_shutdown directly. * @arg == pointer to sched_remote_shutdown structure. */ #define SCHEDOP_remote_shutdown 4 struct sched_remote_shutdown { domid_t domain_id; /* Remote domain ID */ unsigned int reason; /* SHUTDOWN_xxx reason */ }; typedef struct sched_remote_shutdown sched_remote_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t); /* * Reason codes for SCHEDOP_shutdown. These may be interpreted by control * software to determine the appropriate action. For the most part, Xen does * not care about the shutdown code. */ #define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #endif /* __XEN_PUBLIC_SCHED_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/sysctl.h000066400000000000000000000435651314037446600265510ustar00rootroot00000000000000/****************************************************************************** * sysctl.h * * System management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_SYSCTL_H__ #define __XEN_PUBLIC_SYSCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "sysctl operations are intended for use by node control tools only" #endif #include "xen.h" #include "domctl.h" #define XEN_SYSCTL_INTERFACE_VERSION 0x00000007 /* * Read console content from Xen buffer ring. */ #define XEN_SYSCTL_readconsole 1 struct xen_sysctl_readconsole { /* IN: Non-zero -> clear after reading. */ uint8_t clear; /* IN: Non-zero -> start index specified by @index field. */ uint8_t incremental; uint8_t pad0, pad1; /* * IN: Start index for consuming from ring buffer (if @incremental); * OUT: End index after consuming from ring buffer. */ uint32_t index; /* IN: Virtual address to write console data. */ XEN_GUEST_HANDLE_64(char) buffer; /* IN: Size of buffer; OUT: Bytes written to buffer. */ uint32_t count; }; typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t); /* Get trace buffers machine base address */ #define XEN_SYSCTL_tbuf_op 2 struct xen_sysctl_tbuf_op { /* IN variables */ #define XEN_SYSCTL_TBUFOP_get_info 0 #define XEN_SYSCTL_TBUFOP_set_cpu_mask 1 #define XEN_SYSCTL_TBUFOP_set_evt_mask 2 #define XEN_SYSCTL_TBUFOP_set_size 3 #define XEN_SYSCTL_TBUFOP_enable 4 #define XEN_SYSCTL_TBUFOP_disable 5 uint32_t cmd; /* IN/OUT variables */ struct xenctl_cpumap cpu_mask; uint32_t evt_mask; /* OUT variables */ uint64_aligned_t buffer_mfn; uint32_t size; }; typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t); /* * Get physical information about the host machine */ #define XEN_SYSCTL_physinfo 3 /* (x86) The platform supports HVM guests. */ #define _XEN_SYSCTL_PHYSCAP_hvm 0 #define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm) /* (x86) The platform supports HVM-guest direct access to I/O devices. */ #define _XEN_SYSCTL_PHYSCAP_hvm_directio 1 #define XEN_SYSCTL_PHYSCAP_hvm_directio (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio) struct xen_sysctl_physinfo { uint32_t threads_per_core; uint32_t cores_per_socket; uint32_t nr_cpus; uint32_t max_node_id; uint32_t cpu_khz; uint64_aligned_t total_pages; uint64_aligned_t free_pages; uint64_aligned_t scrub_pages; uint32_t hw_cap[8]; /* * IN: maximum addressable entry in the caller-provided cpu_to_node array. * OUT: largest cpu identifier in the system. * If OUT is greater than IN then the cpu_to_node array is truncated! */ uint32_t max_cpu_id; /* * If not NULL, this array is filled with node identifier for each cpu. * If a cpu has no node information (e.g., cpu not present) then the * sentinel value ~0u is written. * The size of this array is specified by the caller in @max_cpu_id. * If the actual @max_cpu_id is smaller than the array then the trailing * elements of the array will not be written by the sysctl. */ XEN_GUEST_HANDLE_64(uint32) cpu_to_node; /* XEN_SYSCTL_PHYSCAP_??? */ uint32_t capabilities; }; typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t); /* * Get the ID of the current scheduler. */ #define XEN_SYSCTL_sched_id 4 struct xen_sysctl_sched_id { /* OUT variable */ uint32_t sched_id; }; typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t); /* Interface for controlling Xen software performance counters. */ #define XEN_SYSCTL_perfc_op 5 /* Sub-operations: */ #define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */ #define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */ struct xen_sysctl_perfc_desc { char name[80]; /* name of perf counter */ uint32_t nr_vals; /* number of values for this counter */ }; typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t); typedef uint32_t xen_sysctl_perfc_val_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t); struct xen_sysctl_perfc_op { /* IN variables. */ uint32_t cmd; /* XEN_SYSCTL_PERFCOP_??? */ /* OUT variables. */ uint32_t nr_counters; /* number of counters description */ uint32_t nr_vals; /* number of values */ /* counter information (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc; /* counter values (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val; }; typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t); #define XEN_SYSCTL_getdomaininfolist 6 struct xen_sysctl_getdomaininfolist { /* IN variables. */ domid_t first_domain; uint32_t max_domains; XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer; /* OUT variables. */ uint32_t num_domains; }; typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t); /* Inject debug keys into Xen. */ #define XEN_SYSCTL_debug_keys 7 struct xen_sysctl_debug_keys { /* IN variables. */ XEN_GUEST_HANDLE_64(char) keys; uint32_t nr_keys; }; typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t); /* Get physical CPU information. */ #define XEN_SYSCTL_getcpuinfo 8 struct xen_sysctl_cpuinfo { uint64_aligned_t idletime; }; typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t); struct xen_sysctl_getcpuinfo { /* IN variables. */ uint32_t max_cpus; XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info; /* OUT variables. */ uint32_t nr_cpus; }; typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t); #define XEN_SYSCTL_availheap 9 struct xen_sysctl_availheap { /* IN variables. */ uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */ uint32_t max_bitwidth; /* Largest address width (zero if don't care). */ int32_t node; /* NUMA node of interest (-1 for all nodes). */ /* OUT variables. */ uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */ }; typedef struct xen_sysctl_availheap xen_sysctl_availheap_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t); #define XEN_SYSCTL_get_pmstat 10 struct pm_px_val { uint64_aligned_t freq; /* Px core frequency */ uint64_aligned_t residency; /* Px residency time */ uint64_aligned_t count; /* Px transition count */ }; typedef struct pm_px_val pm_px_val_t; DEFINE_XEN_GUEST_HANDLE(pm_px_val_t); struct pm_px_stat { uint8_t total; /* total Px states */ uint8_t usable; /* usable Px states */ uint8_t last; /* last Px state */ uint8_t cur; /* current Px state */ XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */ XEN_GUEST_HANDLE_64(pm_px_val_t) pt; }; typedef struct pm_px_stat pm_px_stat_t; DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t); struct pm_cx_stat { uint32_t nr; /* entry nr in triggers & residencies, including C0 */ uint32_t last; /* last Cx state */ uint64_aligned_t idle_time; /* idle time from boot */ XEN_GUEST_HANDLE_64(uint64) triggers; /* Cx trigger counts */ XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */ }; struct xen_sysctl_get_pmstat { #define PMSTAT_CATEGORY_MASK 0xf0 #define PMSTAT_PX 0x10 #define PMSTAT_CX 0x20 #define PMSTAT_get_max_px (PMSTAT_PX | 0x1) #define PMSTAT_get_pxstat (PMSTAT_PX | 0x2) #define PMSTAT_reset_pxstat (PMSTAT_PX | 0x3) #define PMSTAT_get_max_cx (PMSTAT_CX | 0x1) #define PMSTAT_get_cxstat (PMSTAT_CX | 0x2) #define PMSTAT_reset_cxstat (PMSTAT_CX | 0x3) uint32_t type; uint32_t cpuid; union { struct pm_px_stat getpx; struct pm_cx_stat getcx; /* other struct for tx, etc */ } u; }; typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t); /* * Status codes. Must be greater than 0 to avoid confusing * sysctl callers that see 0 as a plain successful return. */ #define XEN_CPU_HOTPLUG_STATUS_OFFLINE 1 #define XEN_CPU_HOTPLUG_STATUS_ONLINE 2 #define XEN_CPU_HOTPLUG_STATUS_NEW 3 #define XEN_SYSCTL_cpu_hotplug 11 struct xen_sysctl_cpu_hotplug { /* IN variables */ uint32_t cpu; /* Physical cpu. */ #define XEN_SYSCTL_CPU_HOTPLUG_ONLINE 0 #define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1 #define XEN_SYSCTL_CPU_HOTPLUG_STATUS 2 uint32_t op; /* hotplug opcode */ }; typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t); /* * Get/set xen power management, include * 1. cpufreq governors and related parameters */ #define XEN_SYSCTL_pm_op 12 struct xen_userspace { uint32_t scaling_setspeed; }; typedef struct xen_userspace xen_userspace_t; struct xen_ondemand { uint32_t sampling_rate_max; uint32_t sampling_rate_min; uint32_t sampling_rate; uint32_t up_threshold; }; typedef struct xen_ondemand xen_ondemand_t; /* * cpufreq para name of this structure named * same as sysfs file name of native linux */ #define CPUFREQ_NAME_LEN 16 struct xen_get_cpufreq_para { /* IN/OUT variable */ uint32_t cpu_num; uint32_t freq_num; uint32_t gov_num; /* for all governors */ /* OUT variable */ XEN_GUEST_HANDLE_64(uint32) affected_cpus; XEN_GUEST_HANDLE_64(uint32) scaling_available_frequencies; XEN_GUEST_HANDLE_64(char) scaling_available_governors; char scaling_driver[CPUFREQ_NAME_LEN]; uint32_t cpuinfo_cur_freq; uint32_t cpuinfo_max_freq; uint32_t cpuinfo_min_freq; uint32_t scaling_cur_freq; char scaling_governor[CPUFREQ_NAME_LEN]; uint32_t scaling_max_freq; uint32_t scaling_min_freq; /* for specific governor */ union { struct xen_userspace userspace; struct xen_ondemand ondemand; } u; }; struct xen_set_cpufreq_gov { char scaling_governor[CPUFREQ_NAME_LEN]; }; struct xen_set_cpufreq_para { #define SCALING_MAX_FREQ 1 #define SCALING_MIN_FREQ 2 #define SCALING_SETSPEED 3 #define SAMPLING_RATE 4 #define UP_THRESHOLD 5 uint32_t ctrl_type; uint32_t ctrl_value; }; /* Get physical CPU topology information. */ #define INVALID_TOPOLOGY_ID (~0U) struct xen_get_cputopo { /* IN: maximum addressable entry in * the caller-provided cpu_to_core/socket. */ uint32_t max_cpus; XEN_GUEST_HANDLE_64(uint32) cpu_to_core; XEN_GUEST_HANDLE_64(uint32) cpu_to_socket; /* OUT: number of cpus returned * If OUT is greater than IN then the cpu_to_core/socket is truncated! */ uint32_t nr_cpus; }; struct xen_sysctl_pm_op { #define PM_PARA_CATEGORY_MASK 0xf0 #define CPUFREQ_PARA 0x10 /* cpufreq command type */ #define GET_CPUFREQ_PARA (CPUFREQ_PARA | 0x01) #define SET_CPUFREQ_GOV (CPUFREQ_PARA | 0x02) #define SET_CPUFREQ_PARA (CPUFREQ_PARA | 0x03) #define GET_CPUFREQ_AVGFREQ (CPUFREQ_PARA | 0x04) /* get CPU topology */ #define XEN_SYSCTL_pm_op_get_cputopo 0x20 /* set/reset scheduler power saving option */ #define XEN_SYSCTL_pm_op_set_sched_opt_smt 0x21 /* cpuidle max_cstate access command */ #define XEN_SYSCTL_pm_op_get_max_cstate 0x22 #define XEN_SYSCTL_pm_op_set_max_cstate 0x23 /* set scheduler migration cost value */ #define XEN_SYSCTL_pm_op_set_vcpu_migration_delay 0x24 #define XEN_SYSCTL_pm_op_get_vcpu_migration_delay 0x25 uint32_t cmd; uint32_t cpuid; union { struct xen_get_cpufreq_para get_para; struct xen_set_cpufreq_gov set_gov; struct xen_set_cpufreq_para set_para; uint64_aligned_t get_avgfreq; struct xen_get_cputopo get_topo; uint32_t set_sched_opt_smt; uint32_t get_max_cstate; uint32_t set_max_cstate; uint32_t get_vcpu_migration_delay; uint32_t set_vcpu_migration_delay; } u; }; #define XEN_SYSCTL_page_offline_op 14 struct xen_sysctl_page_offline_op { /* IN: range of page to be offlined */ #define sysctl_page_offline 1 #define sysctl_page_online 2 #define sysctl_query_page_offline 3 uint32_t cmd; uint32_t start; uint32_t end; /* OUT: result of page offline request */ /* * bit 0~15: result flags * bit 16~31: owner */ XEN_GUEST_HANDLE(uint32) status; }; #define PG_OFFLINE_STATUS_MASK (0xFFUL) /* The result is invalid, i.e. HV does not handle it */ #define PG_OFFLINE_INVALID (0x1UL << 0) #define PG_OFFLINE_OFFLINED (0x1UL << 1) #define PG_OFFLINE_PENDING (0x1UL << 2) #define PG_OFFLINE_FAILED (0x1UL << 3) #define PG_ONLINE_FAILED PG_OFFLINE_FAILED #define PG_ONLINE_ONLINED PG_OFFLINE_OFFLINED #define PG_OFFLINE_STATUS_OFFLINED (0x1UL << 1) #define PG_OFFLINE_STATUS_ONLINE (0x1UL << 2) #define PG_OFFLINE_STATUS_OFFLINE_PENDING (0x1UL << 3) #define PG_OFFLINE_STATUS_BROKEN (0x1UL << 4) #define PG_OFFLINE_MISC_MASK (0xFFUL << 4) /* only valid when PG_OFFLINE_FAILED */ #define PG_OFFLINE_XENPAGE (0x1UL << 8) #define PG_OFFLINE_DOM0PAGE (0x1UL << 9) #define PG_OFFLINE_ANONYMOUS (0x1UL << 10) #define PG_OFFLINE_NOT_CONV_RAM (0x1UL << 11) #define PG_OFFLINE_OWNED (0x1UL << 12) #define PG_OFFLINE_BROKEN (0x1UL << 13) #define PG_ONLINE_BROKEN PG_OFFLINE_BROKEN #define PG_OFFLINE_OWNER_SHIFT 16 #define XEN_SYSCTL_lockprof_op 15 /* Sub-operations: */ #define XEN_SYSCTL_LOCKPROF_reset 1 /* Reset all profile data to zero. */ #define XEN_SYSCTL_LOCKPROF_query 2 /* Get lock profile information. */ /* Record-type: */ #define LOCKPROF_TYPE_GLOBAL 0 /* global lock, idx meaningless */ #define LOCKPROF_TYPE_PERDOM 1 /* per-domain lock, idx is domid */ #define LOCKPROF_TYPE_N 2 /* number of types */ struct xen_sysctl_lockprof_data { char name[40]; /* lock name (may include up to 2 %d specifiers) */ int32_t type; /* LOCKPROF_TYPE_??? */ int32_t idx; /* index (e.g. domain id) */ uint64_aligned_t lock_cnt; /* # of locking succeeded */ uint64_aligned_t block_cnt; /* # of wait for lock */ uint64_aligned_t lock_time; /* nsecs lock held */ uint64_aligned_t block_time; /* nsecs waited for lock */ }; typedef struct xen_sysctl_lockprof_data xen_sysctl_lockprof_data_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_data_t); struct xen_sysctl_lockprof_op { /* IN variables. */ uint32_t cmd; /* XEN_SYSCTL_LOCKPROF_??? */ uint32_t max_elem; /* size of output buffer */ /* OUT variables (query only). */ uint32_t nr_elem; /* number of elements available */ uint64_aligned_t time; /* nsecs of profile measurement */ /* profile information (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_lockprof_data_t) data; }; typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t); struct xen_sysctl { uint32_t cmd; uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */ union { struct xen_sysctl_readconsole readconsole; struct xen_sysctl_tbuf_op tbuf_op; struct xen_sysctl_physinfo physinfo; struct xen_sysctl_sched_id sched_id; struct xen_sysctl_perfc_op perfc_op; struct xen_sysctl_getdomaininfolist getdomaininfolist; struct xen_sysctl_debug_keys debug_keys; struct xen_sysctl_getcpuinfo getcpuinfo; struct xen_sysctl_availheap availheap; struct xen_sysctl_get_pmstat get_pmstat; struct xen_sysctl_cpu_hotplug cpu_hotplug; struct xen_sysctl_pm_op pm_op; struct xen_sysctl_page_offline_op page_offline; struct xen_sysctl_lockprof_op lockprof_op; uint8_t pad[128]; } u; }; typedef struct xen_sysctl xen_sysctl_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t); #endif /* __XEN_PUBLIC_SYSCTL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/tmem.h000066400000000000000000000110241314037446600261530ustar00rootroot00000000000000/****************************************************************************** * tmem.h * * Guest OS interface to Xen Transcendent Memory. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_TMEM_H__ #define __XEN_PUBLIC_TMEM_H__ #include "xen.h" /* Commands to HYPERVISOR_tmem_op() */ #define TMEM_CONTROL 0 #define TMEM_NEW_POOL 1 #define TMEM_DESTROY_POOL 2 #define TMEM_NEW_PAGE 3 #define TMEM_PUT_PAGE 4 #define TMEM_GET_PAGE 5 #define TMEM_FLUSH_PAGE 6 #define TMEM_FLUSH_OBJECT 7 #define TMEM_READ 8 #define TMEM_WRITE 9 #define TMEM_XCHG 10 /* Privileged commands to HYPERVISOR_tmem_op() */ #define TMEM_AUTH 101 #define TMEM_RESTORE_NEW 102 /* Subops for HYPERVISOR_tmem_op(TMEM_CONTROL) */ #define TMEMC_THAW 0 #define TMEMC_FREEZE 1 #define TMEMC_FLUSH 2 #define TMEMC_DESTROY 3 #define TMEMC_LIST 4 #define TMEMC_SET_WEIGHT 5 #define TMEMC_SET_CAP 6 #define TMEMC_SET_COMPRESS 7 #define TMEMC_QUERY_FREEABLE_MB 8 #define TMEMC_SAVE_BEGIN 10 #define TMEMC_SAVE_GET_VERSION 11 #define TMEMC_SAVE_GET_MAXPOOLS 12 #define TMEMC_SAVE_GET_CLIENT_WEIGHT 13 #define TMEMC_SAVE_GET_CLIENT_CAP 14 #define TMEMC_SAVE_GET_CLIENT_FLAGS 15 #define TMEMC_SAVE_GET_POOL_FLAGS 16 #define TMEMC_SAVE_GET_POOL_NPAGES 17 #define TMEMC_SAVE_GET_POOL_UUID 18 #define TMEMC_SAVE_GET_NEXT_PAGE 19 #define TMEMC_SAVE_GET_NEXT_INV 20 #define TMEMC_SAVE_END 21 #define TMEMC_RESTORE_BEGIN 30 #define TMEMC_RESTORE_PUT_PAGE 32 #define TMEMC_RESTORE_FLUSH_PAGE 33 /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ #define TMEM_POOL_PERSIST 1 #define TMEM_POOL_SHARED 2 #define TMEM_POOL_PAGESIZE_SHIFT 4 #define TMEM_POOL_PAGESIZE_MASK 0xf #define TMEM_POOL_VERSION_SHIFT 24 #define TMEM_POOL_VERSION_MASK 0xff /* Bits for client flags (save/restore) */ #define TMEM_CLIENT_COMPRESS 1 #define TMEM_CLIENT_FROZEN 2 /* Special errno values */ #define EFROZEN 1000 #define EEMPTY 1001 #ifndef __ASSEMBLY__ typedef xen_pfn_t tmem_cli_mfn_t; typedef XEN_GUEST_HANDLE(char) tmem_cli_va_t; struct tmem_op { uint32_t cmd; int32_t pool_id; union { struct { uint64_t uuid[2]; uint32_t flags; uint32_t arg1; } new; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */ struct { uint32_t subop; uint32_t cli_id; uint32_t arg1; uint32_t arg2; uint64_t arg3; tmem_cli_va_t buf; } ctrl; /* for cmd == TMEM_CONTROL */ struct { uint64_t object; uint32_t index; uint32_t tmem_offset; uint32_t pfn_offset; uint32_t len; tmem_cli_mfn_t cmfn; /* client machine page frame */ } gen; /* for all other cmd ("generic") */ } u; }; typedef struct tmem_op tmem_op_t; DEFINE_XEN_GUEST_HANDLE(tmem_op_t); struct tmem_handle { uint32_t pool_id; uint32_t index; uint64_t oid; }; #endif #endif /* __XEN_PUBLIC_TMEM_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/trace.h000066400000000000000000000226251314037446600263200ustar00rootroot00000000000000/****************************************************************************** * include/public/trace.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Mark Williamson, (C) 2004 Intel Research Cambridge * Copyright (C) 2005 Bin Ren */ #ifndef __XEN_PUBLIC_TRACE_H__ #define __XEN_PUBLIC_TRACE_H__ #define TRACE_EXTRA_MAX 7 #define TRACE_EXTRA_SHIFT 28 /* Trace classes */ #define TRC_CLS_SHIFT 16 #define TRC_GEN 0x0001f000 /* General trace */ #define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */ #define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */ #define TRC_HVM 0x0008f000 /* Xen HVM trace */ #define TRC_MEM 0x0010f000 /* Xen memory trace */ #define TRC_PV 0x0020f000 /* Xen PV traces */ #define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */ #define TRC_PM 0x0080f000 /* Xen power management trace */ #define TRC_ALL 0x0ffff000 #define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff) #define TRC_HD_CYCLE_FLAG (1UL<<31) #define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) ) #define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX) /* Trace subclasses */ #define TRC_SUBCLS_SHIFT 12 /* trace subclasses for SVM */ #define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */ #define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */ #define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */ #define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */ /* Trace events per class */ #define TRC_LOST_RECORDS (TRC_GEN + 1) #define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2) #define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3) #define TRC_TRACE_IRQ (TRC_GEN + 4) #define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1) #define TRC_SCHED_CONTINUE_RUNNING (TRC_SCHED_MIN + 2) #define TRC_SCHED_DOM_ADD (TRC_SCHED_VERBOSE + 1) #define TRC_SCHED_DOM_REM (TRC_SCHED_VERBOSE + 2) #define TRC_SCHED_SLEEP (TRC_SCHED_VERBOSE + 3) #define TRC_SCHED_WAKE (TRC_SCHED_VERBOSE + 4) #define TRC_SCHED_YIELD (TRC_SCHED_VERBOSE + 5) #define TRC_SCHED_BLOCK (TRC_SCHED_VERBOSE + 6) #define TRC_SCHED_SHUTDOWN (TRC_SCHED_VERBOSE + 7) #define TRC_SCHED_CTL (TRC_SCHED_VERBOSE + 8) #define TRC_SCHED_ADJDOM (TRC_SCHED_VERBOSE + 9) #define TRC_SCHED_SWITCH (TRC_SCHED_VERBOSE + 10) #define TRC_SCHED_S_TIMER_FN (TRC_SCHED_VERBOSE + 11) #define TRC_SCHED_T_TIMER_FN (TRC_SCHED_VERBOSE + 12) #define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED_VERBOSE + 13) #define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14) #define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15) #define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1) #define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2) #define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3) #define TRC_PV_HYPERCALL (TRC_PV + 1) #define TRC_PV_TRAP (TRC_PV + 3) #define TRC_PV_PAGE_FAULT (TRC_PV + 4) #define TRC_PV_FORCED_INVALID_OP (TRC_PV + 5) #define TRC_PV_EMULATE_PRIVOP (TRC_PV + 6) #define TRC_PV_EMULATE_4GB (TRC_PV + 7) #define TRC_PV_MATH_STATE_RESTORE (TRC_PV + 8) #define TRC_PV_PAGING_FIXUP (TRC_PV + 9) #define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV + 10) #define TRC_PV_PTWR_EMULATION (TRC_PV + 11) #define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12) /* Indicates that addresses in trace record are 64 bits */ #define TRC_64_FLAG (0x100) #define TRC_SHADOW_NOT_SHADOW (TRC_SHADOW + 1) #define TRC_SHADOW_FAST_PROPAGATE (TRC_SHADOW + 2) #define TRC_SHADOW_FAST_MMIO (TRC_SHADOW + 3) #define TRC_SHADOW_FALSE_FAST_PATH (TRC_SHADOW + 4) #define TRC_SHADOW_MMIO (TRC_SHADOW + 5) #define TRC_SHADOW_FIXUP (TRC_SHADOW + 6) #define TRC_SHADOW_DOMF_DYING (TRC_SHADOW + 7) #define TRC_SHADOW_EMULATE (TRC_SHADOW + 8) #define TRC_SHADOW_EMULATE_UNSHADOW_USER (TRC_SHADOW + 9) #define TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ (TRC_SHADOW + 10) #define TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED (TRC_SHADOW + 11) #define TRC_SHADOW_WRMAP_BF (TRC_SHADOW + 12) #define TRC_SHADOW_PREALLOC_UNPIN (TRC_SHADOW + 13) #define TRC_SHADOW_RESYNC_FULL (TRC_SHADOW + 14) #define TRC_SHADOW_RESYNC_ONLY (TRC_SHADOW + 15) /* trace events per subclass */ #define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01) #define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02) #define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02) #define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01) #define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01) #define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02) #define TRC_HVM_PF_INJECT64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02) #define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03) #define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04) #define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05) #define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06) #define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07) #define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08) #define TRC_HVM_CR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08) #define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09) #define TRC_HVM_CR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09) #define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A) #define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B) #define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C) #define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D) #define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E) #define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F) #define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10) #define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11) #define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12) #define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13) #define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14) #define TRC_HVM_INVLPG64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14) #define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15) #define TRC_HVM_IOPORT_READ (TRC_HVM_HANDLER + 0x16) #define TRC_HVM_IOMEM_READ (TRC_HVM_HANDLER + 0x17) #define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18) #define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19) #define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19) #define TRC_HVM_INTR_WINDOW (TRC_HVM_HANDLER + 0x20) #define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216) #define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217) /* trace subclasses for power management */ #define TRC_PM_FREQ 0x00801000 /* xen cpu freq events */ #define TRC_PM_IDLE 0x00802000 /* xen cpu idle events */ /* trace events for per class */ #define TRC_PM_FREQ_CHANGE (TRC_PM_FREQ + 0x01) #define TRC_PM_IDLE_ENTRY (TRC_PM_IDLE + 0x01) #define TRC_PM_IDLE_EXIT (TRC_PM_IDLE + 0x02) /* This structure represents a single trace buffer record. */ struct t_rec { uint32_t event:28; uint32_t extra_u32:3; /* # entries in trailing extra_u32[] array */ uint32_t cycles_included:1; /* u.cycles or u.no_cycles? */ union { struct { uint32_t cycles_lo, cycles_hi; /* cycle counter timestamp */ uint32_t extra_u32[7]; /* event data items */ } cycles; struct { uint32_t extra_u32[7]; /* event data items */ } nocycles; } u; }; /* * This structure contains the metadata for a single trace buffer. The head * field, indexes into an array of struct t_rec's. */ struct t_buf { /* Assume the data buffer size is X. X is generally not a power of 2. * CONS and PROD are incremented modulo (2*X): * 0 <= cons < 2*X * 0 <= prod < 2*X * This is done because addition modulo X breaks at 2^32 when X is not a * power of 2: * (((2^32 - 1) % X) + 1) % X != (2^32) % X */ uint32_t cons; /* Offset of next item to be consumed by control tools. */ uint32_t prod; /* Offset of next item to be produced by Xen. */ /* Records follow immediately after the meta-data header. */ }; #endif /* __XEN_PUBLIC_TRACE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/vcpu.h000066400000000000000000000227101314037446600261720ustar00rootroot00000000000000/****************************************************************************** * vcpu.h * * VCPU initialisation, query, and hotplug. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VCPU_H__ #define __XEN_PUBLIC_VCPU_H__ #include "xen.h" /* * Prototype for this hypercall is: * int vcpu_op(int cmd, int vcpuid, void *extra_args) * @cmd == VCPUOP_??? (VCPU operation). * @vcpuid == VCPU to operate on. * @extra_args == Operation-specific extra arguments (NULL if none). */ /* * Initialise a VCPU. Each VCPU can be initialised only once. A * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. * * @extra_arg == pointer to vcpu_guest_context structure containing initial * state for the VCPU. */ #define VCPUOP_initialise 0 /* * Bring up a VCPU. This makes the VCPU runnable. This operation will fail * if the VCPU has not been initialised (VCPUOP_initialise). */ #define VCPUOP_up 1 /* * Bring down a VCPU (i.e., make it non-runnable). * There are a few caveats that callers should observe: * 1. This operation may return, and VCPU_is_up may return false, before the * VCPU stops running (i.e., the command is asynchronous). It is a good * idea to ensure that the VCPU has entered a non-critical loop before * bringing it down. Alternatively, this operation is guaranteed * synchronous if invoked by the VCPU itself. * 2. After a VCPU is initialised, there is currently no way to drop all its * references to domain memory. Even a VCPU that is down still holds * memory references via its pagetable base pointer and GDT. It is good * practise to move a VCPU onto an 'idle' or default page table, LDT and * GDT before bringing it down. */ #define VCPUOP_down 2 /* Returns 1 if the given VCPU is up. */ #define VCPUOP_is_up 3 /* * Return information about the state and running time of a VCPU. * @extra_arg == pointer to vcpu_runstate_info structure. */ #define VCPUOP_get_runstate_info 4 struct vcpu_runstate_info { /* VCPU's current state (RUNSTATE_*). */ int state; /* When was current state entered (system time, ns)? */ uint64_t state_entry_time; /* * Time spent in each RUNSTATE_* (ns). The sum of these times is * guaranteed not to drift from system time. */ uint64_t time[4]; }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate_info); typedef struct vcpu_runstate_info vcpu_runstate_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t); /* VCPU is currently running on a physical CPU. */ #define RUNSTATE_running 0 /* VCPU is runnable, but not currently scheduled on any physical CPU. */ #define RUNSTATE_runnable 1 /* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ #define RUNSTATE_blocked 2 /* * VCPU is not runnable, but it is not blocked. * This is a 'catch all' state for things like hotplug and pauses by the * system administrator (or for critical sections in the hypervisor). * RUNSTATE_blocked dominates this state (it is the preferred state). */ #define RUNSTATE_offline 3 /* * Register a shared memory area from which the guest may obtain its own * runstate information without needing to execute a hypercall. * Notes: * 1. The registered address may be virtual or physical or guest handle, * depending on the platform. Virtual address or guest handle should be * registered on x86 systems. * 2. Only one shared area may be registered per VCPU. The shared area is * updated by the hypervisor each time the VCPU is scheduled. Thus * runstate.state will always be RUNSTATE_running and * runstate.state_entry_time will indicate the system time at which the * VCPU was last scheduled to run. * @extra_arg == pointer to vcpu_register_runstate_memory_area structure. */ #define VCPUOP_register_runstate_memory_area 5 struct vcpu_register_runstate_memory_area { union { XEN_GUEST_HANDLE(vcpu_runstate_info_t) h; struct vcpu_runstate_info *v; uint64_t p; } addr; }; typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t); /* * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer * which can be set via these commands. Periods smaller than one millisecond * may not be supported. */ #define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ #define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ struct vcpu_set_periodic_timer { uint64_t period_ns; }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_periodic_timer); typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t); /* * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot * timer which can be set via these commands. */ #define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */ struct vcpu_set_singleshot_timer { uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */ uint32_t flags; /* VCPU_SSHOTTMR_??? */ }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_singleshot_timer); typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t); /* Flags to VCPUOP_set_singleshot_timer. */ /* Require the timeout to be in the future (return -ETIME if it's passed). */ #define _VCPU_SSHOTTMR_future (0) #define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future) /* * Register a memory location in the guest address space for the * vcpu_info structure. This allows the guest to place the vcpu_info * structure in a convenient place, such as in a per-cpu data area. * The pointer need not be page aligned, but the structure must not * cross a page boundary. * * This may be called only once per vcpu. */ #define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */ struct vcpu_register_vcpu_info { uint64_t mfn; /* mfn of page to place vcpu_info */ uint32_t offset; /* offset within page */ uint32_t rsvd; /* unused */ }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info); typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t); /* Send an NMI to the specified VCPU. @extra_arg == NULL. */ #define VCPUOP_send_nmi 11 /* * Get the physical ID information for a pinned vcpu's underlying physical * processor. The physical ID informmation is architecture-specific. * On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and * greater are reserved. * This command returns -EINVAL if it is not a valid operation for this VCPU. */ #define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */ struct vcpu_get_physid { uint64_t phys_id; }; typedef struct vcpu_get_physid vcpu_get_physid_t; DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t); #define xen_vcpu_physid_to_x86_apicid(physid) \ ((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid))) #define xen_vcpu_physid_to_x86_acpiid(physid) \ ((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32))) /* * Register a memory location to get a secondary copy of the vcpu time * parameters. The master copy still exists as part of the vcpu shared * memory area, and this secondary copy is updated whenever the master copy * is updated (and using the same versioning scheme for synchronisation). * * The intent is that this copy may be mapped (RO) into userspace so * that usermode can compute system time using the time info and the * tsc. Usermode will see an array of vcpu_time_info structures, one * for each vcpu, and choose the right one by an existing mechanism * which allows it to get the current vcpu number (such as via a * segment limit). It can then apply the normal algorithm to compute * system time from the tsc. * * @extra_arg == pointer to vcpu_register_time_info_memory_area structure. */ #define VCPUOP_register_vcpu_time_memory_area 13 DEFINE_XEN_GUEST_HANDLE(vcpu_time_info_t); struct vcpu_register_time_memory_area { union { XEN_GUEST_HANDLE(vcpu_time_info_t) h; struct vcpu_time_info *v; uint64_t p; } addr; }; typedef struct vcpu_register_time_memory_area vcpu_register_time_memory_area_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t); #endif /* __XEN_PUBLIC_VCPU_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/version.h000066400000000000000000000061561314037446600267100ustar00rootroot00000000000000/****************************************************************************** * version.h * * Xen version, type, and compile information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Nguyen Anh Quynh * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VERSION_H__ #define __XEN_PUBLIC_VERSION_H__ /* NB. All ops return zero on success, except XENVER_{version,pagesize} */ /* arg == NULL; returns major:minor (16:16). */ #define XENVER_version 0 /* arg == xen_extraversion_t. */ #define XENVER_extraversion 1 typedef char xen_extraversion_t[16]; struct xen_extraversion { xen_extraversion_t extraversion; }; #define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t)) /* arg == xen_compile_info_t. */ #define XENVER_compile_info 2 struct xen_compile_info { char compiler[64]; char compile_by[16]; char compile_domain[32]; char compile_date[32]; }; typedef struct xen_compile_info xen_compile_info_t; #define XENVER_capabilities 3 typedef char xen_capabilities_info_t[1024]; struct xen_capabilities_info { xen_capabilities_info_t info; }; #define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t)) #define XENVER_changeset 4 typedef char xen_changeset_info_t[64]; struct xen_changeset_info { xen_changeset_info_t info; }; #define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t)) #define XENVER_platform_parameters 5 struct xen_platform_parameters { unsigned long virt_start; }; typedef struct xen_platform_parameters xen_platform_parameters_t; #define XENVER_get_features 6 struct xen_feature_info { unsigned int submap_idx; /* IN: which 32-bit submap to return */ uint32_t submap; /* OUT: 32-bit submap */ }; typedef struct xen_feature_info xen_feature_info_t; /* Declares the features reported by XENVER_get_features. */ #include "features.h" /* arg == NULL; returns host memory page size. */ #define XENVER_pagesize 7 /* arg == xen_domain_handle_t. */ #define XENVER_guest_handle 8 #define XENVER_commandline 9 typedef char xen_commandline_t[1024]; #endif /* __XEN_PUBLIC_VERSION_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/xen-compat.h000066400000000000000000000036361314037446600272760ustar00rootroot00000000000000/****************************************************************************** * xen-compat.h * * Guest OS interface to Xen. Compatibility layer. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Christian Limpach */ #ifndef __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_LATEST_INTERFACE_VERSION__ 0x0003020a #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Xen is built with matching headers and implements the latest interface. */ #define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__ #elif !defined(__XEN_INTERFACE_VERSION__) /* Guests which do not specify a version get the legacy interface. */ #define __XEN_INTERFACE_VERSION__ 0x00000000 #endif #if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__ #error "These header files do not support the requested interface version." #endif #endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/xen.h000066400000000000000000000672511314037446600260200ustar00rootroot00000000000000/****************************************************************************** * xen.h * * Guest OS interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_XEN_H__ #define __XEN_PUBLIC_XEN_H__ #include "xen-compat.h" #ifdef CONFIG_PARAVIRT_XEN #include #endif #if defined(CONFIG_PARAVIRT_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) #include #elif defined(__i386__) || defined(__x86_64__) #include "arch-x86/xen.h" #elif defined(__ia64__) #include "arch-ia64.h" #else #error "Unsupported architecture" #endif #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ DEFINE_XEN_GUEST_HANDLE(char); __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); DEFINE_XEN_GUEST_HANDLE(int); __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); DEFINE_XEN_GUEST_HANDLE(long); __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); DEFINE_XEN_GUEST_HANDLE(void); DEFINE_XEN_GUEST_HANDLE(uint64_t); DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #endif /* * HYPERCALLS */ #define __HYPERVISOR_set_trap_table 0 #define __HYPERVISOR_mmu_update 1 #define __HYPERVISOR_set_gdt 2 #define __HYPERVISOR_stack_switch 3 #define __HYPERVISOR_set_callbacks 4 #define __HYPERVISOR_fpu_taskswitch 5 #define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */ #define __HYPERVISOR_platform_op 7 #define __HYPERVISOR_set_debugreg 8 #define __HYPERVISOR_get_debugreg 9 #define __HYPERVISOR_update_descriptor 10 #define __HYPERVISOR_memory_op 12 #define __HYPERVISOR_multicall 13 #define __HYPERVISOR_update_va_mapping 14 #define __HYPERVISOR_set_timer_op 15 #define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */ #define __HYPERVISOR_xen_version 17 #define __HYPERVISOR_console_io 18 #define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */ #define __HYPERVISOR_grant_table_op 20 #define __HYPERVISOR_vm_assist 21 #define __HYPERVISOR_update_va_mapping_otherdomain 22 #define __HYPERVISOR_iret 23 /* x86 only */ #define __HYPERVISOR_vcpu_op 24 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ #define __HYPERVISOR_mmuext_op 26 #define __HYPERVISOR_xsm_op 27 #define __HYPERVISOR_nmi_op 28 #define __HYPERVISOR_sched_op_new 29 #define __HYPERVISOR_callback_op 30 #define __HYPERVISOR_xenoprof_op 31 #define __HYPERVISOR_event_channel_op 32 #define __HYPERVISOR_physdev_op 33 #define __HYPERVISOR_hvm_op 34 #define __HYPERVISOR_sysctl 35 #define __HYPERVISOR_domctl 36 #define __HYPERVISOR_kexec_op 37 #define __HYPERVISOR_tmem_op 38 /* Architecture-specific hypercall definitions. */ #define __HYPERVISOR_arch_0 48 #define __HYPERVISOR_arch_1 49 #define __HYPERVISOR_arch_2 50 #define __HYPERVISOR_arch_3 51 #define __HYPERVISOR_arch_4 52 #define __HYPERVISOR_arch_5 53 #define __HYPERVISOR_arch_6 54 #define __HYPERVISOR_arch_7 55 /* * HYPERCALL COMPATIBILITY. */ /* New sched_op hypercall introduced in 0x00030101. */ #if __XEN_INTERFACE_VERSION__ < 0x00030101 || (defined(CONFIG_PARAVIRT_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H)) #undef __HYPERVISOR_sched_op #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat #else #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_new #endif /* New event-channel and physdev hypercalls introduced in 0x00030202. */ #if __XEN_INTERFACE_VERSION__ < 0x00030202 #undef __HYPERVISOR_event_channel_op #define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat #undef __HYPERVISOR_physdev_op #define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat #endif /* New platform_op hypercall introduced in 0x00030204. */ #if __XEN_INTERFACE_VERSION__ < 0x00030204 || (defined(CONFIG_PARAVIRT_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H)) #define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op #endif /* * VIRTUAL INTERRUPTS * * Virtual interrupts that a guest OS may receive from Xen. * * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. * The latter can be allocated only once per guest: they must initially be * allocated to VCPU0 but can subsequently be re-bound. */ #define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ #define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ #define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ #define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ #define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ #define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ #define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ #define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ #define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */ #define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */ /* Architecture-specific VIRQ definitions. */ #define VIRQ_ARCH_0 16 #define VIRQ_ARCH_1 17 #define VIRQ_ARCH_2 18 #define VIRQ_ARCH_3 19 #define VIRQ_ARCH_4 20 #define VIRQ_ARCH_5 21 #define VIRQ_ARCH_6 22 #define VIRQ_ARCH_7 23 #define NR_VIRQS 24 /* * HYPERVISOR_mmu_update(reqs, count, pdone, foreigndom) * * @reqs is an array of mmu_update_t structures ((ptr, val) pairs). * @count is the length of the above array. * @pdone is an output parameter indicating number of completed operations * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this * hypercall invocation. Can be DOMID_SELF. * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced * in this hypercall invocation. The value of this field * (x) encodes the PFD as follows: * x == 0 => PFD == DOMID_SELF * x != 0 => PFD == x - 1 * * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command. * ------------- * ptr[1:0] == MMU_NORMAL_PT_UPDATE: * Updates an entry in a page table belonging to PFD. If updating an L1 table, * and the new table entry is valid/present, the mapped frame must belong to * FD. If attempting to map an I/O page then the caller assumes the privilege * of the FD. * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. * FD == DOMID_XEN: Map restricted areas of Xen's heap space. * ptr[:2] -- Machine address of the page-table entry to modify. * val -- Value to write. * * ptr[1:0] == MMU_MACHPHYS_UPDATE: * Updates an entry in the machine->pseudo-physical mapping table. * ptr[:2] -- Machine address within the frame whose mapping to modify. * The frame must belong to the FD, if one is specified. * val -- Value to write into the mapping entry. * * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed * with those in @val. */ #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ /* * MMU EXTENDED OPERATIONS * * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * * cmd: MMUEXT_(UN)PIN_*_TABLE * mfn: Machine frame number to be (un)pinned as a p.t. page. * The frame must belong to the FD, if one is specified. * * cmd: MMUEXT_NEW_BASEPTR * mfn: Machine frame number of new page-table base to install in MMU. * * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] * mfn: Machine frame number of new page-table base to install in MMU * when in user space. * * cmd: MMUEXT_TLB_FLUSH_LOCAL * No additional arguments. Flushes local TLB. * * cmd: MMUEXT_INVLPG_LOCAL * linear_addr: Linear address to be flushed from the local TLB. * * cmd: MMUEXT_TLB_FLUSH_MULTI * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_INVLPG_MULTI * linear_addr: Linear address to be flushed. * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_TLB_FLUSH_ALL * No additional arguments. Flushes all VCPUs' TLBs. * * cmd: MMUEXT_INVLPG_ALL * linear_addr: Linear address to be flushed from all VCPUs' TLBs. * * cmd: MMUEXT_FLUSH_CACHE * No additional arguments. Writes back and flushes cache contents. * * cmd: MMUEXT_SET_LDT * linear_addr: Linear address of LDT base (NB. must be page-aligned). * nr_ents: Number of entries in LDT. * * cmd: MMUEXT_CLEAR_PAGE * mfn: Machine frame number to be cleared. * * cmd: MMUEXT_COPY_PAGE * mfn: Machine frame number of the destination page. * src_mfn: Machine frame number of the source page. */ #define MMUEXT_PIN_L1_TABLE 0 #define MMUEXT_PIN_L2_TABLE 1 #define MMUEXT_PIN_L3_TABLE 2 #define MMUEXT_PIN_L4_TABLE 3 #define MMUEXT_UNPIN_TABLE 4 #define MMUEXT_NEW_BASEPTR 5 #define MMUEXT_TLB_FLUSH_LOCAL 6 #define MMUEXT_INVLPG_LOCAL 7 #define MMUEXT_TLB_FLUSH_MULTI 8 #define MMUEXT_INVLPG_MULTI 9 #define MMUEXT_TLB_FLUSH_ALL 10 #define MMUEXT_INVLPG_ALL 11 #define MMUEXT_FLUSH_CACHE 12 #define MMUEXT_SET_LDT 13 #define MMUEXT_NEW_USER_BASEPTR 15 #define MMUEXT_CLEAR_PAGE 16 #define MMUEXT_COPY_PAGE 17 #ifndef __ASSEMBLY__ struct mmuext_op { unsigned int cmd; union { /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR * CLEAR_PAGE, COPY_PAGE */ xen_pfn_t mfn; /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ unsigned long linear_addr; } arg1; union { /* SET_LDT */ unsigned int nr_ents; /* TLB_FLUSH_MULTI, INVLPG_MULTI */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(const_void) vcpumask; #else const void *vcpumask; #endif /* COPY_PAGE */ xen_pfn_t src_mfn; } arg2; }; DEFINE_GUEST_HANDLE_STRUCT(mmuext_op); typedef struct mmuext_op mmuext_op_t; DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); #endif /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ #define UVMF_NONE (0UL<<0) /* No flushing at all. */ #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ #define UVMF_FLUSHTYPE_MASK (3UL<<0) #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ /* * Commands to HYPERVISOR_console_io(). */ #define CONSOLEIO_write 0 #define CONSOLEIO_read 1 /* * Commands to HYPERVISOR_vm_assist(). */ #define VMASST_CMD_enable 0 #define VMASST_CMD_disable 1 /* x86/32 guests: simulate full 4GB segment limits. */ #define VMASST_TYPE_4gb_segments 0 /* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ #define VMASST_TYPE_4gb_segments_notify 1 /* * x86 guests: support writes to bottom-level PTEs. * NB1. Page-directory entries cannot be written. * NB2. Guest must continue to remove all writable mappings of PTEs. */ #define VMASST_TYPE_writable_pagetables 2 /* x86/PAE guests: support PDPTs above 4GB. */ #define VMASST_TYPE_pae_extended_cr3 3 #define MAX_VMASST_TYPE 3 #ifndef __ASSEMBLY__ typedef uint16_t domid_t; /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ #define DOMID_FIRST_RESERVED (0x7FF0U) /* DOMID_SELF is used in certain contexts to refer to oneself. */ #define DOMID_SELF (0x7FF0U) /* * DOMID_IO is used to restrict page-table updates to mapping I/O memory. * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO * is useful to ensure that no mappings to the OS's own heap are accidentally * installed. (e.g., in Linux this could cause havoc as reference counts * aren't adjusted on the I/O-mapping code path). * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can * be specified by any calling domain. */ #define DOMID_IO (0x7FF1U) /* * DOMID_XEN is used to allow privileged domains to map restricted parts of * Xen's heap space (e.g., the machine_to_phys table). * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if * the caller is privileged. */ #define DOMID_XEN (0x7FF2U) /* * DOMID_COW is used as the owner of sharable pages */ #define DOMID_COW (0x7FF3U) /* DOMID_INVALID is used to identity invalid domid */ #define DOMID_INVALID (0x7FFFU) /* * Send an array of these to HYPERVISOR_mmu_update(). * NB. The fields are natural pointer/address size for this architecture. */ struct mmu_update { uint64_t ptr; /* Machine address of PTE. */ uint64_t val; /* New contents of PTE. */ }; DEFINE_GUEST_HANDLE_STRUCT(mmu_update); typedef struct mmu_update mmu_update_t; DEFINE_XEN_GUEST_HANDLE(mmu_update_t); /* * Send an array of these to HYPERVISOR_multicall(). * NB. The fields are natural register size for this architecture. */ struct multicall_entry { unsigned long op; #if !defined(CONFIG_PARAVIRT_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) unsigned long result; #else long result; #endif unsigned long args[6]; }; DEFINE_GUEST_HANDLE_STRUCT(multicall_entry); typedef struct multicall_entry multicall_entry_t; DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); /* * Event channel endpoints per domain: * 1024 if a long is 32 bits; 4096 if a long is 64 bits. */ #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) struct vcpu_time_info { /* * Updates to the following values are preceded and followed by an * increment of 'version'. The guest can therefore detect updates by * looking for changes to 'version'. If the least-significant bit of * the version number is set then an update is in progress and the guest * must wait to read a consistent set of values. * The correct way to interact with the version number is similar to * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry. */ uint32_t version; uint32_t pad0; uint64_t tsc_timestamp; /* TSC at last update of time vals. */ uint64_t system_time; /* Time, in nanosecs, since boot. */ /* * Current system time: * system_time + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32) * CPU frequency (Hz): * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift */ uint32_t tsc_to_system_mul; int8_t tsc_shift; int8_t pad1[3]; }; /* 32 bytes */ typedef struct vcpu_time_info vcpu_time_info_t; struct vcpu_info { /* * 'evtchn_upcall_pending' is written non-zero by Xen to indicate * a pending notification for a particular VCPU. It is then cleared * by the guest OS /before/ checking for pending work, thus avoiding * a set-and-check race. Note that the mask is only accessed by Xen * on the CPU that is currently hosting the VCPU. This means that the * pending and mask flags can be updated by the guest without special * synchronisation (i.e., no need for the x86 LOCK prefix). * This may seem suboptimal because if the pending flag is set by * a different CPU then an IPI may be scheduled even when the mask * is set. However, note: * 1. The task of 'interrupt holdoff' is covered by the per-event- * channel mask bits. A 'noisy' event that is continually being * triggered can be masked at source at this very precise * granularity. * 2. The main purpose of the per-VCPU mask is therefore to restrict * reentrant execution: whether for concurrency control, or to * prevent unbounded stack usage. Whatever the purpose, we expect * that the mask will be asserted only for short periods at a time, * and so the likelihood of a 'spurious' IPI is suitably small. * The mask is read before making an event upcall to the guest: a * non-zero mask therefore guarantees that the VCPU will not receive * an upcall activation. The mask is cleared when the VCPU requests * to block: this avoids wakeup-waiting races. */ uint8_t evtchn_upcall_pending; uint8_t evtchn_upcall_mask; unsigned long evtchn_pending_sel; struct arch_vcpu_info arch; #ifdef CONFIG_PARAVIRT_XEN struct pvclock_vcpu_time_info time; #else struct vcpu_time_info time; #endif }; /* 64 bytes (x86) */ #ifndef __XEN__ typedef struct vcpu_info vcpu_info_t; #endif /* * Xen/kernel shared data -- pointer provided in start_info. * * This structure is defined to be both smaller than a page, and the * only data on the shared page, but may vary in actual size even within * compatible Xen versions; guests should not rely on the size * of this structure remaining constant. */ struct shared_info { struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS]; /* * A domain can create "event channels" on which it can send and receive * asynchronous event notifications. There are three classes of event that * are delivered by this mechanism: * 1. Bi-directional inter- and intra-domain connections. Domains must * arrange out-of-band to set up a connection (usually by allocating * an unbound 'listener' port and avertising that via a storage service * such as xenstore). * 2. Physical interrupts. A domain with suitable hardware-access * privileges can bind an event-channel port to a physical interrupt * source. * 3. Virtual interrupts ('events'). A domain can bind an event-channel * port to a virtual interrupt source, such as the virtual-timer * device or the emergency console. * * Event channels are addressed by a "port index". Each channel is * associated with two bits of information: * 1. PENDING -- notifies the domain that there is a pending notification * to be processed. This bit is cleared by the guest. * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING * will cause an asynchronous upcall to be scheduled. This bit is only * updated by the guest. It is read-only within Xen. If a channel * becomes pending while the channel is masked then the 'edge' is lost * (i.e., when the channel is unmasked, the guest must manually handle * pending notifications as no upcall will be scheduled by Xen). * * To expedite scanning of pending notifications, any 0->1 pending * transition on an unmasked channel causes a corresponding bit in a * per-vcpu selector word to be set. Each bit in the selector covers a * 'C long' in the PENDING bitfield array. */ unsigned long evtchn_pending[sizeof(unsigned long) * 8]; unsigned long evtchn_mask[sizeof(unsigned long) * 8]; /* * Wallclock time: updated only by control software. Guests should base * their gettimeofday() syscall on this wallclock-base value. */ #ifdef CONFIG_PARAVIRT_XEN struct pvclock_wall_clock wc; #else uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ #endif struct arch_shared_info arch; }; #ifndef __XEN__ typedef struct shared_info shared_info_t; #endif /* * Start-of-day memory layout: * 1. The domain is started within contiguous virtual-memory region. * 2. The contiguous region ends on an aligned 4MB boundary. * 3. This the order of bootstrap elements in the initial virtual region: * a. relocated kernel image * b. initial ram disk [mod_start, mod_len] * c. list of allocated page frames [mfn_list, nr_pages] * (unless relocated due to XEN_ELFNOTE_INIT_P2M) * d. start_info_t structure [register ESI (x86)] * e. bootstrap page tables [pt_base, CR3 (x86)] * f. bootstrap stack [register ESP (x86)] * 4. Bootstrap elements are packed together, but each is 4kB-aligned. * 5. The initial ram disk may be omitted. * 6. The list of page frames forms a contiguous 'pseudo-physical' memory * layout for the domain. In particular, the bootstrap virtual-memory * region is a 1:1 mapping to the first section of the pseudo-physical map. * 7. All bootstrap elements are mapped read-writable for the guest OS. The * only exception is the bootstrap page table, which is mapped read-only. * 8. There is guaranteed to be at least 512kB padding after the final * bootstrap element. If necessary, the bootstrap virtual region is * extended by an extra 4MB to ensure this. */ #define MAX_GUEST_CMDLINE 1024 struct start_info { /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ char magic[32]; /* "xen--". */ unsigned long nr_pages; /* Total pages allocated to this domain. */ unsigned long shared_info; /* MACHINE address of shared info struct. */ uint32_t flags; /* SIF_xxx flags. */ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ uint32_t store_evtchn; /* Event channel for store communication. */ union { struct { xen_pfn_t mfn; /* MACHINE page number of console page. */ uint32_t evtchn; /* Event channel for console page. */ } domU; struct { uint32_t info_off; /* Offset of console_info struct. */ uint32_t info_size; /* Size of console_info struct from start.*/ } dom0; } console; /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ unsigned long pt_base; /* VIRTUAL address of page directory. */ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ int8_t cmd_line[MAX_GUEST_CMDLINE]; /* The pfn range here covers both page table and p->m table frames. */ unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */ unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */ }; typedef struct start_info start_info_t; /* New console union for dom0 introduced in 0x00030203. */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 #define console_mfn console.domU.mfn #define console_evtchn console.domU.evtchn #endif /* These flags are passed in the 'flags' field of start_info_t. */ #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ #define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */ #define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ /* * A multiboot module is a package containing modules very similar to a * multiboot module array. The only differences are: * - the array of module descriptors is by convention simply at the beginning * of the multiboot module, * - addresses in the module descriptors are based on the beginning of the * multiboot module, * - the number of modules is determined by a termination descriptor that has * mod_start == 0. * * This permits to both build it statically and reference it in a configuration * file, and let the PV guest easily rebase the addresses to virtual addresses * and at the same time count the number of modules. */ struct xen_multiboot_mod_list { /* Address of first byte of the module */ uint32_t mod_start; /* Address of last byte of the module (inclusive) */ uint32_t mod_end; /* Address of zero-terminated command line */ uint32_t cmdline; /* Unused, must be zero */ uint32_t pad; }; typedef struct dom0_vga_console_info { uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ #define XEN_VGATYPE_TEXT_MODE_3 0x03 #define XEN_VGATYPE_VESA_LFB 0x23 union { struct { /* Font height, in pixels. */ uint16_t font_height; /* Cursor location (column, row). */ uint16_t cursor_x, cursor_y; /* Number of rows and columns (dimensions in characters). */ uint16_t rows, columns; } text_mode_3; struct { /* Width and height, in pixels. */ uint16_t width, height; /* Bytes per scan line. */ uint16_t bytes_per_line; /* Bits per pixel. */ uint16_t bits_per_pixel; /* LFB physical address, and size (in units of 64kB). */ uint32_t lfb_base; uint32_t lfb_size; /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ uint8_t red_pos, red_size; uint8_t green_pos, green_size; uint8_t blue_pos, blue_size; uint8_t rsvd_pos, rsvd_size; #if __XEN_INTERFACE_VERSION__ >= 0x00030206 /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ uint32_t gbl_caps; /* Mode attributes (offset 0x0, VESA command 0x4f01). */ uint16_t mode_attrs; #endif } vesa_lfb; } u; } dom0_vga_console_info_t; #define xen_vga_console_info dom0_vga_console_info #define xen_vga_console_info_t dom0_vga_console_info_t typedef uint8_t xen_domain_handle_t[16]; /* Turn a plain number into a C unsigned long constant. */ #define __mk_unsigned_long(x) x ## UL #define mk_unsigned_long(x) __mk_unsigned_long(x) __DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t); __DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t); __DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t); __DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t); #else /* __ASSEMBLY__ */ /* In assembly code we cannot use C numeric constant suffixes. */ #define mk_unsigned_long(x) x #endif /* !__ASSEMBLY__ */ /* Default definitions for macros used by domctl/sysctl. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #ifndef uint64_aligned_t #define uint64_aligned_t uint64_t #endif #ifndef XEN_GUEST_HANDLE_64 #define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) #endif #endif #endif /* __XEN_PUBLIC_XEN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/xencomm.h000066400000000000000000000032131314037446600266600ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) IBM Corp. 2006 */ #ifndef _XEN_XENCOMM_H_ #define _XEN_XENCOMM_H_ /* A xencomm descriptor is a scatter/gather list containing physical * addresses corresponding to a virtually contiguous memory area. The * hypervisor translates these physical addresses to machine addresses to copy * to and from the virtually contiguous area. */ #define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */ #define XENCOMM_INVALID (~0UL) struct xencomm_desc { uint32_t magic; uint32_t nr_addrs; /* the number of entries in address[] */ uint64_t address[0]; }; #endif /* _XEN_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/xenoprof.h000066400000000000000000000100311314037446600270460ustar00rootroot00000000000000/****************************************************************************** * xenoprof.h * * Interface for enabling system wide profiling based on hardware performance * counters * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Hewlett-Packard Co. * Written by Aravind Menon & Jose Renato Santos */ #ifndef __XEN_PUBLIC_XENOPROF_H__ #define __XEN_PUBLIC_XENOPROF_H__ #include "xen.h" /* * Commands to HYPERVISOR_xenoprof_op(). */ #define XENOPROF_init 0 #define XENOPROF_reset_active_list 1 #define XENOPROF_reset_passive_list 2 #define XENOPROF_set_active 3 #define XENOPROF_set_passive 4 #define XENOPROF_reserve_counters 5 #define XENOPROF_counter 6 #define XENOPROF_setup_events 7 #define XENOPROF_enable_virq 8 #define XENOPROF_start 9 #define XENOPROF_stop 10 #define XENOPROF_disable_virq 11 #define XENOPROF_release_counters 12 #define XENOPROF_shutdown 13 #define XENOPROF_get_buffer 14 #define XENOPROF_set_backtrace 15 #define XENOPROF_last_op 15 #define MAX_OPROF_EVENTS 32 #define MAX_OPROF_DOMAINS 25 #define XENOPROF_CPU_TYPE_SIZE 64 /* Xenoprof performance events (not Xen events) */ struct event_log { uint64_t eip; uint8_t mode; uint8_t event; }; /* PC value that indicates a special code */ #define XENOPROF_ESCAPE_CODE ~0UL /* Transient events for the xenoprof->oprofile cpu buf */ #define XENOPROF_TRACE_BEGIN 1 /* Xenoprof buffer shared between Xen and domain - 1 per VCPU */ struct xenoprof_buf { uint32_t event_head; uint32_t event_tail; uint32_t event_size; uint32_t vcpu_id; uint64_t xen_samples; uint64_t kernel_samples; uint64_t user_samples; uint64_t lost_samples; struct event_log event_log[1]; }; #ifndef __XEN__ typedef struct xenoprof_buf xenoprof_buf_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t); #endif struct xenoprof_init { int32_t num_events; int32_t is_primary; char cpu_type[XENOPROF_CPU_TYPE_SIZE]; }; typedef struct xenoprof_init xenoprof_init_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t); struct xenoprof_get_buffer { int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; }; typedef struct xenoprof_get_buffer xenoprof_get_buffer_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t); struct xenoprof_counter { uint32_t ind; uint64_t count; uint32_t enabled; uint32_t event; uint32_t hypervisor; uint32_t kernel; uint32_t user; uint64_t unit_mask; }; typedef struct xenoprof_counter xenoprof_counter_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t); typedef struct xenoprof_passive { uint16_t domain_id; int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; } xenoprof_passive_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t); #endif /* __XEN_PUBLIC_XENOPROF_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/xsm/000077500000000000000000000000001314037446600256515ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/xsm/acm.h000066400000000000000000000156251314037446600265730ustar00rootroot00000000000000/* * acm.h: Xen access control module interface defintions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005, International Business Machines Corporation. */ #ifndef _XEN_PUBLIC_ACM_H #define _XEN_PUBLIC_ACM_H #include "../xen.h" /* default ssid reference value if not supplied */ #define ACM_DEFAULT_SSID 0x0 #define ACM_DEFAULT_LOCAL_SSID 0x0 /* Internal ACM ERROR types */ #define ACM_OK 0 #define ACM_UNDEF -1 #define ACM_INIT_SSID_ERROR -2 #define ACM_INIT_SOID_ERROR -3 #define ACM_ERROR -4 /* External ACCESS DECISIONS */ #define ACM_ACCESS_PERMITTED 0 #define ACM_ACCESS_DENIED -111 #define ACM_NULL_POINTER_ERROR -200 /* Error codes reported in when trying to test for a new policy These error codes are reported in an array of tuples where each error code is followed by a parameter describing the error more closely, such as a domain id. */ #define ACM_EVTCHN_SHARING_VIOLATION 0x100 #define ACM_GNTTAB_SHARING_VIOLATION 0x101 #define ACM_DOMAIN_LOOKUP 0x102 #define ACM_CHWALL_CONFLICT 0x103 #define ACM_SSIDREF_IN_USE 0x104 /* primary policy in lower 4 bits */ #define ACM_NULL_POLICY 0 #define ACM_CHINESE_WALL_POLICY 1 #define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2 #define ACM_POLICY_UNDEFINED 15 /* combinations have secondary policy component in higher 4bit */ #define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY) /* policy: */ #define ACM_POLICY_NAME(X) \ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \ "UNDEFINED" /* the following policy versions must be increased * whenever the interpretation of the related * policy's data structure changes */ #define ACM_POLICY_VERSION 4 #define ACM_CHWALL_VERSION 1 #define ACM_STE_VERSION 1 /* defines a ssid reference used by xen */ typedef uint32_t ssidref_t; /* hooks that are known to domains */ #define ACMHOOK_none 0 #define ACMHOOK_sharing 1 #define ACMHOOK_authorization 2 #define ACMHOOK_conflictset 3 /* -------security policy relevant type definitions-------- */ /* type identifier; compares to "equal" or "not equal" */ typedef uint16_t domaintype_t; /* CHINESE WALL POLICY DATA STRUCTURES * * current accumulated conflict type set: * When a domain is started and has a type that is in * a conflict set, the conflicting types are incremented in * the aggregate set. When a domain is destroyed, the * conflicting types to its type are decremented. * If a domain has multiple types, this procedure works over * all those types. * * conflict_aggregate_set[i] holds the number of * running domains that have a conflict with type i. * * running_types[i] holds the number of running domains * that include type i in their ssidref-referenced type set * * conflict_sets[i][j] is "0" if type j has no conflict * with type i and is "1" otherwise. */ /* high-16 = version, low-16 = check magic */ #define ACM_MAGIC 0x0001debc /* size of the SHA1 hash identifying the XML policy from which the binary policy was created */ #define ACM_SHA1_HASH_SIZE 20 /* each offset in bytes from start of the struct they * are part of */ /* V3 of the policy buffer aded a version structure */ struct acm_policy_version { uint32_t major; uint32_t minor; }; /* each buffer consists of all policy information for * the respective policy given in the policy code * * acm_policy_buffer, acm_chwall_policy_buffer, * and acm_ste_policy_buffer need to stay 32-bit aligned * because we create binary policies also with external * tools that assume packed representations (e.g. the java tool) */ struct acm_policy_buffer { uint32_t magic; uint32_t policy_version; /* ACM_POLICY_VERSION */ uint32_t len; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_buffer_offset; uint32_t secondary_policy_code; uint32_t secondary_buffer_offset; struct acm_policy_version xml_pol_version; /* add in V3 */ uint8_t xml_policy_hash[ACM_SHA1_HASH_SIZE]; /* added in V4 */ }; struct acm_policy_reference_buffer { uint32_t len; }; struct acm_chwall_policy_buffer { uint32_t policy_version; /* ACM_CHWALL_VERSION */ uint32_t policy_code; uint32_t chwall_max_types; uint32_t chwall_max_ssidrefs; uint32_t chwall_max_conflictsets; uint32_t chwall_ssid_offset; uint32_t chwall_conflict_sets_offset; uint32_t chwall_running_types_offset; uint32_t chwall_conflict_aggregate_offset; }; struct acm_ste_policy_buffer { uint32_t policy_version; /* ACM_STE_VERSION */ uint32_t policy_code; uint32_t ste_max_types; uint32_t ste_max_ssidrefs; uint32_t ste_ssid_offset; }; struct acm_stats_buffer { uint32_t magic; uint32_t len; uint32_t primary_policy_code; uint32_t primary_stats_offset; uint32_t secondary_policy_code; uint32_t secondary_stats_offset; }; struct acm_ste_stats_buffer { uint32_t ec_eval_count; uint32_t gt_eval_count; uint32_t ec_denied_count; uint32_t gt_denied_count; uint32_t ec_cachehit_count; uint32_t gt_cachehit_count; }; struct acm_ssid_buffer { uint32_t len; ssidref_t ssidref; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_max_types; uint32_t primary_types_offset; uint32_t secondary_policy_code; uint32_t secondary_max_types; uint32_t secondary_types_offset; }; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/xsm/acm_ops.h000066400000000000000000000104741314037446600274510ustar00rootroot00000000000000/* * acm_ops.h: Xen access control module hypervisor commands * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005,2006 International Business Machines Corporation. */ #ifndef __XEN_PUBLIC_ACM_OPS_H__ #define __XEN_PUBLIC_ACM_OPS_H__ #include "../xen.h" #include "acm.h" /* * Make sure you increment the interface version whenever you modify this file! * This makes sure that old versions of acm tools will stop working in a * well-defined way (rather than crashing the machine, for instance). */ #define ACM_INTERFACE_VERSION 0xAAAA000A /************************************************************************/ /* * Prototype for this hypercall is: * int acm_op(int cmd, void *args) * @cmd == ACMOP_??? (access control module operation). * @args == Operation-specific extra arguments (NULL if none). */ #define ACMOP_setpolicy 1 struct acm_setpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pushcache; uint32_t pushcache_size; }; #define ACMOP_getpolicy 2 struct acm_getpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_dumpstats 3 struct acm_dumpstats { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_getssid 4 #define ACM_GETBY_ssidref 1 #define ACM_GETBY_domainid 2 struct acm_getssid { /* IN */ uint32_t get_ssid_by; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id; XEN_GUEST_HANDLE_64(void) ssidbuf; uint32_t ssidbuf_size; }; #define ACMOP_getdecision 5 struct acm_getdecision { /* IN */ uint32_t get_decision_by1; /* ACM_GETBY_* */ uint32_t get_decision_by2; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id1; union { domaintype_t domainid; ssidref_t ssidref; } id2; uint32_t hook; /* OUT */ uint32_t acm_decision; }; #define ACMOP_chgpolicy 6 struct acm_change_policy { /* IN */ XEN_GUEST_HANDLE_64(void) policy_pushcache; uint32_t policy_pushcache_size; XEN_GUEST_HANDLE_64(void) del_array; uint32_t delarray_size; XEN_GUEST_HANDLE_64(void) chg_array; uint32_t chgarray_size; /* OUT */ /* array with error code */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; #define ACMOP_relabeldoms 7 struct acm_relabel_doms { /* IN */ XEN_GUEST_HANDLE_64(void) relabel_map; uint32_t relabel_map_size; /* OUT */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; /* future interface to Xen */ struct xen_acmctl { uint32_t cmd; uint32_t interface_version; union { struct acm_setpolicy setpolicy; struct acm_getpolicy getpolicy; struct acm_dumpstats dumpstats; struct acm_getssid getssid; struct acm_getdecision getdecision; struct acm_change_policy change_policy; struct acm_relabel_doms relabel_doms; } u; }; typedef struct xen_acmctl xen_acmctl_t; DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t); #endif /* __XEN_PUBLIC_ACM_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/interface/xsm/flask_op.h000066400000000000000000000024671314037446600276310ustar00rootroot00000000000000/* * This file contains the flask_op hypercall commands and definitions. * * Author: George Coker, * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ #ifndef __FLASK_OP_H__ #define __FLASK_OP_H__ #define FLASK_LOAD 1 #define FLASK_GETENFORCE 2 #define FLASK_SETENFORCE 3 #define FLASK_CONTEXT_TO_SID 4 #define FLASK_SID_TO_CONTEXT 5 #define FLASK_ACCESS 6 #define FLASK_CREATE 7 #define FLASK_RELABEL 8 #define FLASK_USER 9 #define FLASK_POLICYVERS 10 #define FLASK_GETBOOL 11 #define FLASK_SETBOOL 12 #define FLASK_COMMITBOOLS 13 #define FLASK_MLS 14 #define FLASK_DISABLE 15 #define FLASK_GETAVC_THRESHOLD 16 #define FLASK_SETAVC_THRESHOLD 17 #define FLASK_AVC_HASHSTATS 18 #define FLASK_AVC_CACHESTATS 19 #define FLASK_MEMBER 20 #define FLASK_ADD_OCONTEXT 21 #define FLASK_DEL_OCONTEXT 22 #define FLASK_LAST FLASK_DEL_OCONTEXT typedef struct flask_op { uint32_t cmd; uint32_t size; char *buf; } flask_op_t; DEFINE_XEN_GUEST_HANDLE(flask_op_t); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/page.h000066400000000000000000000000321314037446600241620ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/pcifront.h000066400000000000000000000026601314037446600251030ustar00rootroot00000000000000/* * PCI Frontend - arch-dependendent declarations * * Author: Ryan Wilson */ #ifndef __XEN_ASM_PCIFRONT_H__ #define __XEN_ASM_PCIFRONT_H__ #include #ifdef __KERNEL__ #ifndef __ia64__ #include struct pcifront_device; struct pci_bus; #define pcifront_sd pci_sysdata static inline struct pcifront_device * pcifront_get_pdev(struct pcifront_sd *sd) { return sd->pdev; } static inline void pcifront_init_sd(struct pcifront_sd *sd, unsigned int domain, unsigned int bus, struct pcifront_device *pdev) { sd->domain = domain; sd->pdev = pdev; } static inline void pcifront_setup_root_resources(struct pci_bus *bus, struct pcifront_sd *sd) { } #else /* __ia64__ */ #include #include #define pcifront_sd pci_controller extern void xen_add_resource(struct pci_controller *, unsigned int, unsigned int, struct acpi_resource *); extern void xen_pcibios_setup_root_windows(struct pci_bus *, struct pci_controller *); static inline struct pcifront_device * pcifront_get_pdev(struct pcifront_sd *sd) { return (struct pcifront_device *)sd->platform_data; } static inline void pcifront_setup_root_resources(struct pci_bus *bus, struct pcifront_sd *sd) { xen_pcibios_setup_root_windows(bus, sd); } #endif /* __ia64__ */ extern struct rw_semaphore pci_bus_sem; #endif /* __KERNEL__ */ #endif /* __XEN_ASM_PCIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/public/000077500000000000000000000000001314037446600243605ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/public/evtchn.h000066400000000000000000000056351314037446600260310ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Interface to /dev/xen/evtchn. * * Copyright (c) 2003-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_EVTCHN_H__ #define __LINUX_PUBLIC_EVTCHN_H__ /* * Bind a fresh port to VIRQ @virq. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_VIRQ \ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq)) struct ioctl_evtchn_bind_virq { unsigned int virq; }; /* * Bind a fresh port to remote <@remote_domain, @remote_port>. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_INTERDOMAIN \ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain)) struct ioctl_evtchn_bind_interdomain { unsigned int remote_domain, remote_port; }; /* * Allocate a fresh port for binding to @remote_domain. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_UNBOUND_PORT \ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port)) struct ioctl_evtchn_bind_unbound_port { unsigned int remote_domain; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_UNBIND \ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind)) struct ioctl_evtchn_unbind { unsigned int port; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_NOTIFY \ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify)) struct ioctl_evtchn_notify { unsigned int port; }; /* Clear and reinitialise the event buffer. Clear error condition. */ #define IOCTL_EVTCHN_RESET \ _IOC(_IOC_NONE, 'E', 5, 0) #endif /* __LINUX_PUBLIC_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/public/gntdev.h000066400000000000000000000107401314037446600260220ustar00rootroot00000000000000/****************************************************************************** * gntdev.h * * Interface to /dev/xen/gntdev. * * Copyright (c) 2007, D G Murray * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_GNTDEV_H__ #define __LINUX_PUBLIC_GNTDEV_H__ struct ioctl_gntdev_grant_ref { /* The domain ID of the grant to be mapped. */ uint32_t domid; /* The grant reference of the grant to be mapped. */ uint32_t ref; }; /* * Inserts the grant references into the mapping table of an instance * of gntdev. N.B. This does not perform the mapping, which is deferred * until mmap() is called with @index as the offset. */ #define IOCTL_GNTDEV_MAP_GRANT_REF \ _IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref)) struct ioctl_gntdev_map_grant_ref { /* IN parameters */ /* The number of grants to be mapped. */ uint32_t count; uint32_t pad; /* OUT parameters */ /* The offset to be used on a subsequent call to mmap(). */ uint64_t index; /* Variable IN parameter. */ /* Array of grant references, of size @count. */ struct ioctl_gntdev_grant_ref refs[1]; }; /* * Removes the grant references from the mapping table of an instance of * of gntdev. N.B. munmap() must be called on the relevant virtual address(es) * before this ioctl is called, or an error will result. */ #define IOCTL_GNTDEV_UNMAP_GRANT_REF \ _IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) struct ioctl_gntdev_unmap_grant_ref { /* IN parameters */ /* The offset was returned by the corresponding map operation. */ uint64_t index; /* The number of pages to be unmapped. */ uint32_t count; uint32_t pad; }; /* * Returns the offset in the driver's address space that corresponds * to @vaddr. This can be used to perform a munmap(), followed by an * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by * the caller. The number of pages that were allocated at the same time as * @vaddr is returned in @count. * * N.B. Where more than one page has been mapped into a contiguous range, the * supplied @vaddr must correspond to the start of the range; otherwise * an error will result. It is only possible to munmap() the entire * contiguously-allocated range at once, and not any subrange thereof. */ #define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \ _IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr)) struct ioctl_gntdev_get_offset_for_vaddr { /* IN parameters */ /* The virtual address of the first mapped page in a range. */ uint64_t vaddr; /* OUT parameters */ /* The offset that was used in the initial mmap() operation. */ uint64_t offset; /* The number of pages mapped in the VM area that begins at @vaddr. */ uint32_t count; uint32_t pad; }; /* * Sets the maximum number of grants that may mapped at once by this gntdev * instance. * * N.B. This must be called before any other ioctl is performed on the device. */ #define IOCTL_GNTDEV_SET_MAX_GRANTS \ _IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants)) struct ioctl_gntdev_set_max_grants { /* IN parameter */ /* The maximum number of grants that may be mapped at once. */ uint32_t count; }; #endif /* __LINUX_PUBLIC_GNTDEV_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/public/privcmd.h000066400000000000000000000052141314037446600261770ustar00rootroot00000000000000/****************************************************************************** * privcmd.h * * Interface to /proc/xen/privcmd. * * Copyright (c) 2003-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_PRIVCMD_H__ #define __LINUX_PUBLIC_PRIVCMD_H__ #include #ifndef __user #define __user #endif typedef struct privcmd_hypercall { __u64 op; __u64 arg[5]; } privcmd_hypercall_t; typedef struct privcmd_mmap_entry { __u64 va; __u64 mfn; __u64 npages; } privcmd_mmap_entry_t; typedef struct privcmd_mmap { int num; domid_t dom; /* target domain */ privcmd_mmap_entry_t __user *entry; } privcmd_mmap_t; typedef struct privcmd_mmapbatch { int num; /* number of pages to populate */ domid_t dom; /* target domain */ __u64 addr; /* virtual address */ xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */ } privcmd_mmapbatch_t; /* * @cmd: IOCTL_PRIVCMD_HYPERCALL * @arg: &privcmd_hypercall_t * Return: Value returned from execution of the specified hypercall. */ #define IOCTL_PRIVCMD_HYPERCALL \ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t)) #define IOCTL_PRIVCMD_MMAP \ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t)) #define IOCTL_PRIVCMD_MMAPBATCH \ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t)) #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/public/xenbus.h000066400000000000000000000036731314037446600260460ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Interface to /proc/xen/xenbus. * * Copyright (c) 2008, Diego Ongaro * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_XENBUS_H__ #define __LINUX_PUBLIC_XENBUS_H__ #include typedef struct xenbus_alloc { domid_t dom; __u32 port; __u32 grant_ref; } xenbus_alloc_t; /* * @cmd: IOCTL_XENBUS_ALLOC * @arg: &xenbus_alloc_t * Return: 0, or -1 for error */ #define IOCTL_XENBUS_ALLOC \ _IOC(_IOC_NONE, 'X', 0, sizeof(xenbus_alloc_t)) #endif /* __LINUX_PUBLIC_XENBUS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/sysctl.h000066400000000000000000000002551314037446600245760ustar00rootroot00000000000000#ifndef _XEN_SYSCTL_H #define _XEN_SYSCTL_H /* CTL_XEN names: */ enum { CTL_XEN_INDEPENDENT_WALLCLOCK=1, CTL_XEN_PERMITTED_CLOCK_JITTER=2, }; #endif /* _XEN_SYSCTL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/xen-ops.h000066400000000000000000000005351314037446600246470ustar00rootroot00000000000000#ifndef INCLUDE_XEN_OPS_H #define INCLUDE_XEN_OPS_H #include DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); void xen_pre_suspend(void); void xen_post_suspend(int suspend_cancelled); void xen_mm_pin_all(void); void xen_mm_unpin_all(void); void xen_timer_resume(void); void xen_arch_resume(void); #endif /* INCLUDE_XEN_OPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/xen_proc.h000066400000000000000000000004121314037446600250650ustar00rootroot00000000000000 #ifndef __ASM_XEN_PROC_H__ #define __ASM_XEN_PROC_H__ #include extern struct proc_dir_entry *create_xen_proc_entry_uvp( const char *name, mode_t mode); extern void remove_xen_proc_entry_uvp( const char *name); #endif /* __ASM_XEN_PROC_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/xenbus.h000066400000000000000000000277631314037446600245760ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XEN_XENBUS_H #define _XEN_XENBUS_H #include #include #include #include #include #include #include #include #include #include /* Register callback to watch this node. */ struct xenbus_watch { struct list_head list; /* Path being watched. */ const char *node; /* Callback (executed in a process context with no locks held). */ void (*callback)(struct xenbus_watch *, const char **vec, unsigned int len); #if defined(CONFIG_XEN_no) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /* See XBWF_ definitions below. */ unsigned long flags; #endif }; #if defined(CONFIG_XEN_no) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /* * Execute callback in its own kthread. Useful if the callback is long * running or heavily serialised, to avoid taking out the main xenwatch thread * for a long period of time (or even unwittingly causing a deadlock). */ #define XBWF_new_thread 1 #endif /* A xenbus device. */ struct xenbus_device { const char *devicetype; const char *nodename; const char *otherend; int otherend_id; struct xenbus_watch otherend_watch; struct device dev; enum xenbus_state state; struct completion down; }; static inline struct xenbus_device *to_xenbus_device(struct device *dev) { return container_of(dev, struct xenbus_device, dev); } struct xenbus_device_id { /* .../device// */ char devicetype[32]; /* General class of device. */ }; /* A xenbus driver. */ struct xenbus_driver { const char *name; const struct xenbus_device_id *ids; int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); void (*otherend_changed)(struct xenbus_device *dev, enum xenbus_state backend_state); int (*remove)(struct xenbus_device *dev); int (*suspend)(struct xenbus_device *dev); int (*suspend_cancel)(struct xenbus_device *dev); int (*resume)(struct xenbus_device *dev); int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *); struct device_driver driver; int (*read_otherend_details)(struct xenbus_device *dev); int (*is_ready)(struct xenbus_device *dev); }; static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) { return container_of(drv, struct xenbus_driver, driver); } int __must_check __xenbus_register_frontend_uvp(struct xenbus_driver *drv, struct module *owner, const char *mod_name); static inline int __must_check xenbus_register_frontend(struct xenbus_driver *drv) { return __xenbus_register_frontend_uvp(drv, THIS_MODULE, KBUILD_MODNAME); } int __must_check __xenbus_register_backend_uvp(struct xenbus_driver *drv, struct module *owner, const char *mod_name); static inline int __must_check xenbus_register_backend(struct xenbus_driver *drv) { return __xenbus_register_backend_uvp(drv, THIS_MODULE, KBUILD_MODNAME); } void xenbus_unregister_driver_uvp(struct xenbus_driver *drv); struct xenbus_transaction { u32 id; }; /* Nil transaction ID. */ #define XBT_NIL ((struct xenbus_transaction) { 0 }) char **xenbus_directory_uvp(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num); void *xenbus_read_uvp(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len); int xenbus_write_uvp(struct xenbus_transaction t, const char *dir, const char *node, const char *string); int xenbus_mkdir_uvp(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_exists_uvp(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_rm_uvp(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_transaction_start_uvp(struct xenbus_transaction *t); int xenbus_transaction_end_uvp(struct xenbus_transaction t, int abort); /* Single read and scanf: returns -errno or num scanned if > 0. */ int xenbus_scanf_uvp(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(scanf, 4, 5))); /* Single printf and write: returns -errno or 0. */ int xenbus_printf_uvp(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(printf, 4, 5))); /* Generic read function: NULL-terminated triples of name, * sprintf-style type string, and pointer. Returns 0 or errno.*/ int xenbus_gather_uvp(struct xenbus_transaction t, const char *dir, ...); /* notifer routines for when the xenstore comes up */ int register_xenstore_notifier_uvp(struct notifier_block *nb); void unregister_xenstore_notifier_uvp_uvp(struct notifier_block *nb); int register_xenbus_watch_uvp(struct xenbus_watch *watch); void unregister_xenbus_watch_uvp_uvp(struct xenbus_watch *watch); void xs_suspend(void); void xs_resume(void); void xs_suspend_cancel(void); /* Used by xenbus_dev to borrow kernel's store connection. */ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); /* Prepare for domain suspend: then resume or cancel the suspend. */ void xenbus_suspend_uvp(void); void xenbus_resume_uvp(void); void xenbus_suspend_uvp_cancel(void); #define XENBUS_IS_ERR_READ(str) ({ \ if (!IS_ERR(str) && strlen(str) == 0) { \ kfree(str); \ str = ERR_PTR(-ERANGE); \ } \ IS_ERR(str); \ }) #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE) /** * Register a watch on the given path, using the given xenbus_watch structure * for storage, and the given callback function as the callback. Return 0 on * success, or -errno on error. On success, the given path will be saved as * watch->node, and remains the caller's to free. On error, watch->node will * be NULL, the device will switch to XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path_uvp(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); #if defined(CONFIG_XEN_no) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /** * Register a watch on the given path/path2, using the given xenbus_watch * structure for storage, and the given callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (path/path2) will be saved as watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_path_uvp2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); #else int xenbus_watch_path_uvpfmt(struct xenbus_device *dev, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...) __attribute__ ((format (printf, 4, 5))); #endif /** * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state_uvp(struct xenbus_device *dev, enum xenbus_state new_state); /** * Grant access to the given ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring_uvp(struct xenbus_device *dev, unsigned long ring_mfn); /** * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_uvp_uvp_valloc_uvp allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * xenbus_map_ring_uvp_uvp does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ struct vm_struct *xenbus_map_ring_uvp_uvp_valloc_uvp(struct xenbus_device *dev, int gnt_ref); int xenbus_map_ring_uvp_uvp(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr); /** * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_uvp_uvp_vfree_uvp if you mapped in your memory with * xenbus_map_ring_uvp_uvp_valloc_uvp (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_uvp_uvp_vfree_uvp(struct xenbus_device *dev, struct vm_struct *); int xenbus_unmap_ring_uvp_uvp(struct xenbus_device *dev, grant_handle_t handle, void *vaddr); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn_uvp(struct xenbus_device *dev, int *port); /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn_uvp(struct xenbus_device *dev, int port); /** * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_uvp_driver_state(const char *path); /*** * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error_uvp(struct xenbus_device *dev, int err, const char *fmt, ...); /*** * Equivalent to xenbus_dev_error_uvp(dev, err, fmt, args), followed by * xenbus_switch_state_uvp(dev, NULL, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal_uvp(struct xenbus_device *dev, int err, const char *fmt, ...); int xenbus_dev_init(void); const char *xenbus_strstate_uvp(enum xenbus_state state); int xenbus_dev_is_online_uvp(struct xenbus_device *dev); int xenbus_frontend_closed_uvp(struct xenbus_device *dev); int xenbus_for_each_backend_uvp(void *arg, int (*fn)(struct device *, void *)); int xenbus_for_each_frontend_uvp(void *arg, int (*fn)(struct device *, void *)); #endif /* _XEN_XENBUS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/xencomm.h000066400000000000000000000050611314037446600247230ustar00rootroot00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Copyright (C) IBM Corp. 2006 * * Authors: Hollis Blanchard * Jerone Young */ #ifndef _LINUX_XENCOMM_H_ #define _LINUX_XENCOMM_H_ #include #define XENCOMM_MINI_ADDRS 3 struct xencomm_mini { struct xencomm_desc _desc; uint64_t address[XENCOMM_MINI_ADDRS]; }; /* To avoid additionnal virt to phys conversion, an opaque structure is presented. */ struct xencomm_handle; extern void xencomm_free(struct xencomm_handle *desc); extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes); extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, struct xencomm_mini *xc_area); #if 0 #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ struct xencomm_mini xc_desc ## _base[(n)] \ __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \ struct xencomm_mini *xc_desc = &xc_desc ## _base[0]; #else /* * gcc bug workaround: * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660 * gcc doesn't handle properly stack variable with * __attribute__((__align__(sizeof(struct xencomm_mini)))) */ #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ unsigned char xc_desc ## _base[((n) + 1 ) * \ sizeof(struct xencomm_mini)]; \ struct xencomm_mini *xc_desc = (struct xencomm_mini *) \ ((unsigned long)xc_desc ## _base + \ (sizeof(struct xencomm_mini) - \ ((unsigned long)xc_desc ## _base) % \ sizeof(struct xencomm_mini))); #endif #define xencomm_map_no_alloc(ptr, bytes) \ ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \ __xencomm_map_no_alloc(ptr, bytes, xc_desc); }) /* provided by architecture code: */ extern unsigned long xencomm_vtop(unsigned long vaddr); static inline void *xencomm_pa(void *ptr) { return (void *)xencomm_vtop((unsigned long)ptr); } #define xen_guest_handle(hnd) ((hnd).p) #endif /* _LINUX_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/xencons.h000066400000000000000000000007141314037446600247320ustar00rootroot00000000000000#ifndef __ASM_XENCONS_H__ #define __ASM_XENCONS_H__ struct dom0_vga_console_info; void dom0_init_screen_info(const struct dom0_vga_console_info *, size_t); void xencons_force_flush(void); void xencons_resume(void); /* Interrupt work hooks. Receive data, or kick data out. */ void xencons_rx(char *buf, unsigned len); void xencons_tx(void); int xencons_ring_init(void); int xencons_ring_send(const char *data, unsigned len); #endif /* __ASM_XENCONS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/include/xens/xenoprof.h000066400000000000000000000025701314037446600251170ustar00rootroot00000000000000/****************************************************************************** * xen/xenoprof.h * * Copyright (c) 2006 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_XENOPROF_H__ #define __XEN_XENOPROF_H__ #ifdef CONFIG_XEN_no #include struct oprofile_operations; int xenoprofile_init(struct oprofile_operations * ops); void xenoprofile_exit(void); struct xenoprof_shared_buffer { char *buffer; struct xenoprof_arch_shared_buffer arch; }; #else #define xenoprofile_init(ops) (-ENOSYS) #define xenoprofile_exit() do { } while (0) #endif /* CONFIG_XEN */ #endif /* __XEN_XENOPROF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/version.ini000066400000000000000000000000641314037446600226700ustar00rootroot00000000000000KernModeVersion=2.2.0.112 UserModeVersion=2.2.0.112 UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-balloon/000077500000000000000000000000001314037446600227205ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-balloon/Kbuild000066400000000000000000000002551314037446600240570ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-balloon.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-balloon-y := balloon.o sysfs.o xen-balloon-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-balloon/Makefile000066400000000000000000000000661314037446600243620ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-balloon/balloon.c000066400000000000000000000557531314037446600245310ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /*added by linyuliang 00176349 begin */ #include /*added by linyuliang 00176349 end */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); #ifndef MODULE #include static struct pagevec free_pagevec; #endif struct balloon_stats balloon_stats; unsigned long old_totalram_pages; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; #ifdef CONFIG_HIGHMEM #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else #define inc_totalhigh_pages() ((void)0) #define dec_totalhigh_pages() ((void)0) #endif #ifndef CONFIG_XEN_no /* * In HVM guests accounting here uses the Xen visible values, but the kernel * determined totalram_pages value shouldn't get altered. Since totalram_pages * includes neither the kernel static image nor any memory allocated prior to * or from the bootmem allocator, we have to synchronize the two values. */ static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process); static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page, int account) { unsigned long pfn; /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; if (account) dec_totalhigh_pages(); } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } pfn = page_to_pfn(page); if (account) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); page_zone(page)->present_pages--; } else { BUG_ON(!PageReserved(page)); WARN_ON_ONCE(phys_to_machine_mapping_valid(pfn)); } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(int *was_empty) { struct page *page; struct zone *zone; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); BUG_ON(!PageReserved(page)); if (PageHighMem(page)) { bs.balloon_high--; inc_totalhigh_pages(); } else bs.balloon_low--; zone = page_zone(page); *was_empty |= !populated_zone(zone); zone->present_pages++; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static inline void balloon_free_page(struct page *page) { #ifndef MODULE if (put_page_testzero(page) && !pagevec_add(&free_pagevec, page)) { __pagevec_free(&free_pagevec); pagevec_reinit(&free_pagevec); } #else /* pagevec interface is not being exported. */ __free_page(page); #endif } static inline void balloon_free_and_unlock(unsigned long flags) { #ifndef MODULE if (pagevec_count(&free_pagevec)) { __pagevec_free(&free_pagevec); pagevec_reinit(&free_pagevec); } #endif balloon_unlock(flags); } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = bs.target_pages; if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } unsigned long balloon_minimum_target(void) { #ifndef CONFIG_XEN_no #define max_pfn num_physpages #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN_no #undef max_pfn #endif } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; int need_zonelists_rebuild = 0; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page); page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(&need_zonelists_rebuild); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN_no /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); init_page_count(page); balloon_free_page(page); } bs.current_pages += rc; /* totalram_pages = bs.current_pages; */ if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages + rc; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages = totalram_pages; out: balloon_free_and_unlock(flags); #ifndef MODULE setup_per_zone_wmarks(); if (rc > 0) kswapd_run(0); if (need_zonelists_rebuild) build_all_zonelists(); else vm_total_pages = nr_free_pagecache_pages(); #endif return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); #ifdef CONFIG_XEN_no ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES_uvp else { v = kmap(page); scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN_no /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); balloon_append(pfn_to_page(pfn), 1); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; /* totalram_pages = bs.current_pages; */ if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages - nr_pages; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages = totalram_pages; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ /*modified by linyuliang 00176349 begin */ /*When adjustment be stopped one time,adjustment over! then write current memory to xenstore*/ static int flag; static void balloon_process(struct work_struct *unused) { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); if(totalram_pages > (bs.current_pages - totalram_bias)) { IPRINTK("totalram_pages=%lu, current_pages=%lu, totalram_bias=%lu\n", totalram_pages*4, bs.current_pages*4, totalram_bias*4); bs.current_pages = totalram_pages + totalram_bias; IPRINTK("totalram_pages=%lu, current_pages=%lu\n", totalram_pages*4, bs.current_pages*4); } if(current_target() == bs.current_pages) { mutex_unlock(&balloon_mutex); return; } xenbus_write_uvp(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); //if (!need_sleep) //{ if (current_target() != bs.current_pages || (flag==1)) { sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write_uvp(XBT_NIL, "memory", "target", buffer); } xenbus_write_uvp(XBT_NIL, "control/uvp", "Balloon_flag", "0"); //} /* Schedule more work if there is some still to be done. */ //if (current_target() != bs.current_pages) // mod_timer(&balloon_timer, jiffies + HZ); mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, balloon_minimum_target()); flag = bs.target_pages== target ? 0:1; schedule_work(&balloon_worker); } /*modified by linyuliang 00176349 end */ static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf_uvp(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch_uvp(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf( page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Minimum target: %8lu kB\n" "Maximum target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(balloon_minimum_target()), PAGES2KB(num_physpages), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { /*#if defined(CONFIG_X86) && defined(CONFIG_XEN_no)*/ #if !defined(CONFIG_XEN_no) # ifndef XENMEM_get_pod_target # define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; # endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; #elif defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("[uvp] Initialising new balloon driver.\n"); #ifdef CONFIG_XEN_no pagevec_init(&free_pagevec, true); bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else /* bs.current_pages = totalram_pages; */ totalram_bias = HYPERVISOR_memory_op( HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target) != -ENOSYS ? XENMEM_maximum_reservation : XENMEM_current_reservation, &pod_target.domid); if ((long)totalram_bias != -ENOSYS) { BUG_ON(totalram_bias < totalram_pages); bs.current_pages = totalram_bias; totalram_bias -= totalram_pages; old_totalram_pages = totalram_pages; } else { totalram_bias = 0; bs.current_pages = totalram_pages; } #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #ifdef CONFIG_PROC_FS if ((balloon_pde = create_xen_proc_entry_uvp("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN_no) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(page, 0); } } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier_uvp(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void __exit balloon_exit(void) { balloon_sysfs_exit(); /* XXX - release balloon here */ } module_exit(balloon_exit); void balloon_update_driver_allowance_uvp_uvp(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance_uvp_uvp); #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) #ifdef CONFIG_XEN_no static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long pfn, mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); pfn = __pa(addr) >> PAGE_SHIFT; set_phys_to_machine(pfn, INVALID_P2M_ENTRY); SetPageReserved(pfn_to_page(pfn)); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec_uvp(int nr_pages) { unsigned long flags; void *v; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { balloon_lock(flags); page = balloon_first_page(); if (page && !PageHighMem(page)) { UNLIST_PAGE(page); bs.balloon_low--; balloon_unlock(flags); pagevec[i] = page; continue; } balloon_unlock(flags); page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD); if (page == NULL) goto err; v = page_address(page); scrub_pages(v, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN_no ret = apply_to_page_range(&init_mm, (unsigned long)v, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_free_page(page); balloon_free_and_unlock(flags); goto err; } /* totalram_pages = --bs.current_pages; */ totalram_pages = --bs.current_pages - totalram_bias; if (PageHighMem(page)) dec_totalhigh_pages(); page_zone(page)->present_pages--; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN_no flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i], 0); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec_uvp); #endif /* CONFIG_XEN_BACKEND */ static void _free_empty_pages_and_pagevec_uvp(struct page **pagevec, int nr_pages, int free_vec) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i], !free_vec); } if (!free_vec) totalram_pages = bs.current_pages -= nr_pages; balloon_unlock(flags); if (free_vec) kfree(pagevec); schedule_work(&balloon_worker); } #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) void free_empty_pages_and_pagevec_uvp(struct page **pagevec, int nr_pages) { _free_empty_pages_and_pagevec_uvp(pagevec, nr_pages, 1); } EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec_uvp); #endif /* CONFIG_XEN_BACKEND */ void free_empty_pages(struct page **pagevec, int nr_pages) { _free_empty_pages_and_pagevec_uvp(pagevec, nr_pages, 0); } void balloon_release_driver_page_uvp_uvp(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page, 1); totalram_pages = --bs.current_pages; bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_release_driver_page_uvp_uvp); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-balloon/common.h000066400000000000000000000043741314037446600243710ustar00rootroot00000000000000/****************************************************************************** * balloon/common.h * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_COMMON_H__ #define __XEN_BALLOON_COMMON_H__ #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; extern struct balloon_stats balloon_stats; #define bs balloon_stats int balloon_sysfs_init(void); void balloon_sysfs_exit(void); void balloon_set_new_target(unsigned long target); unsigned long balloon_minimum_target(void); #endif /* __XEN_BALLOON_COMMON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-balloon/scrub.c000066400000000000000000000010461314037446600242030ustar00rootroot00000000000000#include #include #include void scrub_pages(void *v, unsigned int count) { if (likely(cpu_has_xmm2)) { unsigned long n = count * (PAGE_SIZE / sizeof(long) / 4); for (; n--; v += sizeof(long) * 4) asm("movnti %1,(%0)\n\t" "movnti %1,%c2(%0)\n\t" "movnti %1,2*%c2(%0)\n\t" "movnti %1,3*%c2(%0)\n\t" : : "r" (v), "r" (0L), "i" (sizeof(long)) : "memory"); asm volatile("sfence" : : : "memory"); } else for (; count--; v += PAGE_SIZE) clear_page(v); } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-balloon/sysfs.c000066400000000000000000000127521314037446600242420ustar00rootroot00000000000000/****************************************************************************** * balloon/sysfs.c * * Xen balloon driver - sysfs interfaces. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BALLOON_CLASS_NAME "xen_memory" #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ struct sysdev_attribute *attr, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); BALLOON_SHOW(min_kb, "%lu\n", PAGES2KB(balloon_minimum_target())); BALLOON_SHOW(max_kb, "%lu\n", PAGES2KB(num_physpages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = simple_strtoull(buf, &endchar, 0) << 10; balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)balloon_stats.target_pages << PAGE_SHIFT); } static ssize_t store_target(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = memparse(buf, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR, show_target, store_target); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, &attr_target, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_min_kb.attr, &attr_max_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_driver_kb.attr, NULL }; static struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { .name = BALLOON_CLASS_NAME, }; static struct sys_device balloon_sysdev; static int __init register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } static __exit void unregister_balloon(struct sys_device *sysdev) { int i; sysfs_remove_group(&sysdev->kobj, &balloon_info_group); for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); } int __init balloon_sysfs_init(void) { return register_balloon(&balloon_sysdev); } void __exit balloon_sysfs_exit(void) { unregister_balloon(&balloon_sysdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/000077500000000000000000000000001314037446600236675ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/Kbuild000066400000000000000000000013171314037446600250260ustar00rootroot00000000000000include $(M)/config.mk obj-m := xen-platform-pci.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-platform-pci-objs := evtchn.o platform-pci.o gnttab.o xen_support.o xen-platform-pci-objs += features.o platform-compat.o xen-platform-pci-objs += reboot.o machine_reboot.o xen-platform-pci-objs += panic-handler.o xen-platform-pci-objs += platform-pci-unplug.o xen-platform-pci-objs += xenbus_comms.o xen-platform-pci-objs += xenbus_xs.o xen-platform-pci-objs += xenbus_probe.o xen-platform-pci-objs += xenbus_dev.o xen-platform-pci-objs += xenbus_client.o xen-platform-pci-objs += xen_proc.o # Can we do better ? ifeq ($(ARCH),ia64) xen-platform-pci-objs += xencomm.o xencomm_arch.o xcom_hcall.o xcom_asm.o endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/Makefile000066400000000000000000000000661314037446600253310ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/evtchn.c000066400000000000000000000213201314037446600253200ustar00rootroot00000000000000/****************************************************************************** * evtchn.c * * A simplified event channel for para-drivers in unmodified linux * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, Intel Corporation * * This file may be distributed separately from the Linux kernel, or * incorporated into other software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif void *shared_info_area; #define is_valid_evtchn(x) ((x) != 0) #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn) static struct { spinlock_t lock; irq_handler_t handler; void *dev_id; int evtchn; int close:1; /* close on unbind_from_irqhandler_uvp()? */ int inuse:1; int in_handler:1; } irq_evtchn[256]; static int evtchn_to_irq[NR_EVENT_CHANNELS] = { [0 ... NR_EVENT_CHANNELS-1] = -1 }; static DEFINE_SPINLOCK(irq_alloc_lock); static int alloc_xen_irq(void) { static int warned; int irq; spin_lock(&irq_alloc_lock); for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) { if (irq_evtchn[irq].inuse) continue; irq_evtchn[irq].inuse = 1; spin_unlock(&irq_alloc_lock); return irq; } if (!warned) { warned = 1; printk(KERN_WARNING "No available IRQ to bind to: " "increase irq_evtchn[] size in evtchn.c.\n"); } spin_unlock(&irq_alloc_lock); return -ENOSPC; } static void free_xen_irq(int irq) { spin_lock(&irq_alloc_lock); irq_evtchn[irq].inuse = 0; spin_unlock(&irq_alloc_lock); } int irq_to_evtchn_port(int irq) { return irq_evtchn[irq].evtchn; } EXPORT_SYMBOL(irq_to_evtchn_port); void mask_evtchn(int port) { shared_info_t *s = shared_info_area; synch_set_bit(port, &s->evtchn_mask[0]); } EXPORT_SYMBOL(mask_evtchn); void unmask_evtchn(int port) { evtchn_unmask_t op = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op)); } EXPORT_SYMBOL(unmask_evtchn); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { struct evtchn_alloc_unbound alloc_unbound; int err, irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_domain; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { spin_unlock_irq(&irq_evtchn[irq].lock); free_xen_irq(irq); return err; } irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = alloc_unbound.port; irq_evtchn[irq].close = 1; evtchn_to_irq[alloc_unbound.port] = irq; unmask_evtchn(alloc_unbound.port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_listening_port_to_irqhandler); int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = caller_port; irq_evtchn[irq].close = 0; evtchn_to_irq[caller_port] = irq; unmask_evtchn(caller_port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_caller_port_to_irqhandler); void unbind_from_irqhandler_uvp(unsigned int irq, void *dev_id) { int evtchn; spin_lock_irq(&irq_evtchn[irq].lock); evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) { evtchn_to_irq[evtchn] = -1; mask_evtchn(evtchn); if (irq_evtchn[irq].close) { struct evtchn_close close = { .port = evtchn }; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) BUG(); } } irq_evtchn[irq].handler = NULL; irq_evtchn[irq].evtchn = 0; spin_unlock_irq(&irq_evtchn[irq].lock); while (irq_evtchn[irq].in_handler) cpu_relax(); free_xen_irq(irq); } EXPORT_SYMBOL(unbind_from_irqhandler_uvp); void notify_remote_via_irq_uvp(int irq) { int evtchn; evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL(notify_remote_via_irq_uvp); static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 }; static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 }; static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh, unsigned int idx) { return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]); } static irqreturn_t evtchn_interrupt(int irq, void *dev_id #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) , struct pt_regs *regs #else # define handler(irq, dev_id, regs) handler(irq, dev_id) #endif ) { unsigned int l1i, l2i, port; unsigned long masked_l1, masked_l2; /* XXX: All events are bound to vcpu0 but irq may be redirected. */ int cpu = 0; /*smp_processor_id();*/ irq_handler_t handler; shared_info_t *s = shared_info_area; vcpu_info_t *v = &s->vcpu_info[cpu]; unsigned long l1, l2; v->evtchn_upcall_pending = 0; #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ /* Clear master flag /before/ clearing selector flag. */ wmb(); #endif l1 = xchg(&v->evtchn_pending_sel, 0); l1i = per_cpu(last_processed_l1i, cpu); l2i = per_cpu(last_processed_l2i, cpu); while (l1 != 0) { l1i = (l1i + 1) % BITS_PER_LONG; masked_l1 = l1 & ((~0UL) << l1i); if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */ l1i = BITS_PER_LONG - 1; l2i = BITS_PER_LONG - 1; continue; } l1i = __ffs(masked_l1); do { l2 = active_evtchns(cpu, s, l1i); l2i = (l2i + 1) % BITS_PER_LONG; masked_l2 = l2 & ((~0UL) << l2i); if (masked_l2 == 0) { /* if we masked out all events, move on */ l2i = BITS_PER_LONG - 1; break; } l2i = __ffs(masked_l2); /* process port */ port = (l1i * BITS_PER_LONG) + l2i; synch_clear_bit(port, &s->evtchn_pending[0]); irq = evtchn_to_irq[port]; if (irq < 0) continue; spin_lock(&irq_evtchn[irq].lock); handler = irq_evtchn[irq].handler; dev_id = irq_evtchn[irq].dev_id; if (unlikely(handler == NULL)) { printk("Xen IRQ%d (port %d) has no handler!\n", irq, port); spin_unlock(&irq_evtchn[irq].lock); continue; } irq_evtchn[irq].in_handler = 1; spin_unlock(&irq_evtchn[irq].lock); local_irq_enable(); handler(irq, irq_evtchn[irq].dev_id, regs); local_irq_disable(); spin_lock(&irq_evtchn[irq].lock); irq_evtchn[irq].in_handler = 0; spin_unlock(&irq_evtchn[irq].lock); /* if this is the final port processed, we'll pick up here+1 next time */ per_cpu(last_processed_l1i, cpu) = l1i; per_cpu(last_processed_l2i, cpu) = l2i; } while (l2i != BITS_PER_LONG - 1); l2 = active_evtchns(cpu, s, l1i); if (l2 == 0) /* we handled all ports, so we can clear the selector bit */ l1 &= ~(1UL << l1i); } return IRQ_HANDLED; } void irq_resume(void) { int evtchn, irq; for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) { mask_evtchn(evtchn); evtchn_to_irq[evtchn] = -1; } for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) irq_evtchn[irq].evtchn = 0; } int xen_irq_init(struct pci_dev *pdev) { int irq; for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) spin_lock_init(&irq_evtchn[irq].lock); return request_irq(pdev->irq, evtchn_interrupt, #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT, #else IRQF_SHARED | IRQF_SAMPLE_RANDOM | IRQF_DISABLED, #endif "xen-platform-pci", pdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/features.c000066400000000000000000000015041314037446600256510ustar00rootroot00000000000000/****************************************************************************** * features.c * * Xen feature flags. * * Copyright (c) 2006, Ian Campbell, XenSource Inc. */ #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif u8 xen_features_uvp[XENFEAT_NR_SUBMAPS * 32] __read_mostly; /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */ EXPORT_SYMBOL(xen_features_uvp); void xen_setup_features(void) { xen_feature_info_t fi; int i, j; for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) { fi.submap_idx = i; if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j=0; j<32; j++) xen_features_uvp[i*32+j] = !!(fi.submap & 1< #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff #define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t)) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; static unsigned int boot_max_nr_grant_frames; static int gnttab_free_count; static grant_ref_t gnttab_free_head; static DEFINE_SPINLOCK(gnttab_list_lock); static struct grant_entry *shared; static struct gnttab_free_callback *gnttab_free_callback_list; static int gnttab_expand(unsigned int req_entries); #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP]) #define nr_freelist_frames(grant_frames) \ (((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP) static int get_free_entries(int count) { unsigned long flags; int ref, rc; grant_ref_t head; spin_lock_irqsave(&gnttab_list_lock, flags); if ((gnttab_free_count < count) && ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { spin_unlock_irqrestore(&gnttab_list_lock, flags); return rc; } ref = head = gnttab_free_head; gnttab_free_count -= count; while (count-- > 1) head = gnttab_entry(head); gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } #define get_free_entry() get_free_entries(1) static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; callback = gnttab_free_callback_list; gnttab_free_callback_list = NULL; while (callback != NULL) { next = callback->next; if (gnttab_free_count >= callback->count) { callback->next = NULL; callback->queued = 0; callback->fn(callback->arg); } else { callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; } callback = next; } } static inline void check_free_callbacks(void) { if (unlikely(gnttab_free_callback_list)) do_free_callbacks(); } static void put_free_entry(grant_ref_t ref) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; gnttab_free_count++; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } /* * Public grant-issuing interface functions */ int gnttab_grant_foreign_access_uvp(domid_t domid, unsigned long frame, int flags) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_uvp); void gnttab_grant_foreign_access_uvp_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags) { shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_uvp_ref); int gnttab_query_foreign_access_uvp(grant_ref_t ref) { u16 nflags; nflags = shared[ref].flags; return (nflags & (GTF_reading|GTF_writing)); } EXPORT_SYMBOL_GPL(gnttab_query_foreign_access_uvp); int gnttab_end_foreign_access_uvp_ref_uvp(grant_ref_t ref) { u16 flags, nflags; nflags = shared[ref].flags; do { if ((flags = nflags) & (GTF_reading|GTF_writing)) { printk(KERN_DEBUG "WARNING: g.e. still in use!\n"); return 0; } } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) != flags); return 1; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_uvp_ref_uvp); void gnttab_end_foreign_access_uvp(grant_ref_t ref, unsigned long page) { if (gnttab_end_foreign_access_uvp_ref_uvp(ref)) { put_free_entry(ref); if (page != 0) free_page(page); } else { /* XXX This needs to be fixed so that the ref and page are placed on a list to be freed up later. */ printk(KERN_DEBUG "WARNING: leaking g.e. and page still in use!\n"); } } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_uvp); int gnttab_grant_foreign_transfer_uvp(domid_t domid, unsigned long pfn) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; gnttab_grant_foreign_transfer_uvp_ref(ref, domid, pfn); return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_uvp); void gnttab_grant_foreign_transfer_uvp_ref(grant_ref_t ref, domid_t domid, unsigned long pfn) { shared[ref].frame = pfn; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_accept_transfer; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_uvp_ref); unsigned long gnttab_end_foreign_transfer_uvp_ref_uvp(grant_ref_t ref) { unsigned long frame; u16 flags; /* * If a transfer is not even yet started, try to reclaim the grant * reference and return failure (== 0). */ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags) return 0; cpu_relax(); } /* If a transfer is in progress then wait until it is completed. */ while (!(flags & GTF_transfer_completed)) { flags = shared[ref].flags; cpu_relax(); } /* Read the frame number /after/ reading completion status. */ rmb(); frame = shared[ref].frame; BUG_ON(frame == 0); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_uvp_ref_uvp); unsigned long gnttab_end_foreign_transfer_uvp(grant_ref_t ref) { unsigned long frame = gnttab_end_foreign_transfer_uvp_ref_uvp(ref); put_free_entry(ref); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_uvp); void gnttab_free_grant_reference_uvp(grant_ref_t ref) { put_free_entry(ref); } EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_uvp); void gnttab_free_grant_reference_uvps(grant_ref_t head) { grant_ref_t ref; unsigned long flags; int count = 1; if (head == GNTTAB_LIST_END) return; spin_lock_irqsave(&gnttab_list_lock, flags); ref = head; while (gnttab_entry(ref) != GNTTAB_LIST_END) { ref = gnttab_entry(ref); count++; } gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = head; gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_uvps); int gnttab_alloc_grant_references_uvp(u16 count, grant_ref_t *head) { int h = get_free_entries(count); if (h < 0) return -ENOSPC; *head = h; return 0; } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references_uvp); int gnttab_empty_grant_references_uvp(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); } EXPORT_SYMBOL_GPL(gnttab_empty_grant_references_uvp); int gnttab_claim_grant_reference_uvp(grant_ref_t *private_head) { grant_ref_t g = *private_head; if (unlikely(g == GNTTAB_LIST_END)) return -ENOSPC; *private_head = gnttab_entry(g); return g; } EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference_uvp); void gnttab_release_grant_reference_uvp(grant_ref_t *private_head, grant_ref_t release) { gnttab_entry(release) = *private_head; *private_head = release; } EXPORT_SYMBOL_GPL(gnttab_release_grant_reference_uvp); void gnttab_request_free_callback_uvp(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); if (callback->queued) goto out; callback->fn = fn; callback->arg = arg; callback->count = count; callback->queued = 1; callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; check_free_callbacks(); out: spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_request_free_callback_uvp); void gnttab_cancel_free_callback_uvp(struct gnttab_free_callback *callback) { struct gnttab_free_callback **pcb; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { if (*pcb == callback) { *pcb = callback->next; callback->queued = 0; break; } } spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback_uvp); static int grow_gnttab_list(unsigned int more_frames) { unsigned int new_nr_grant_frames, extra_entries, i; unsigned int nr_glist_frames, new_nr_glist_frames; new_nr_grant_frames = nr_grant_frames + more_frames; extra_entries = more_frames * ENTRIES_PER_GRANT_FRAME; nr_glist_frames = nr_freelist_frames(nr_grant_frames); new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames); for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); if (!gnttab_list[i]) goto grow_nomem; } for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; check_free_callbacks(); return 0; grow_nomem: for ( ; i >= nr_glist_frames; i--) free_page((unsigned long) gnttab_list[i]); return -ENOMEM; } static unsigned int __max_nr_grant_frames(void) { struct gnttab_query_size query; int rc; query.dom = DOMID_SELF; rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); if ((rc < 0) || (query.status != GNTST_okay)) return 4; /* Legacy max supported number of frames */ return query.max_nr_frames; } static inline unsigned int max_nr_grant_frames(void) { unsigned int xen_max = __max_nr_grant_frames(); if (xen_max > boot_max_nr_grant_frames) return boot_max_nr_grant_frames; return xen_max; } #ifdef CONFIG_XEN_no static DEFINE_SEQLOCK(gnttab_dma_lock); #ifdef CONFIG_X86 static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } #ifdef CONFIG_PM_SLEEP static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } #endif void *arch_gnttab_alloc_shared(unsigned long *frames) { struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames()); BUG_ON(area == NULL); return area->addr; } #endif /* CONFIG_X86 */ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); if (shared == NULL) shared = arch_gnttab_alloc_shared(frames); #ifdef CONFIG_X86 rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); BUG_ON(rc); frames -= nr_gframes; /* adjust after map_pte_fn() */ #endif /* CONFIG_X86 */ kfree(frames); return 0; } static void gnttab_page_free(struct page *page, unsigned int order) { BUG_ON(order); ClearPageForeign(page); gnttab_reset_grant_page_uvp(page); put_page(page); } /* * Must not be called with IRQs off. This should only be used on the * slow path. * * Copy a foreign granted page to local memory. */ int gnttab_copy_grant_page_uvp(grant_ref_t ref, struct page **pagep) { struct gnttab_unmap_and_replace unmap; mmu_update_t mmu; struct page *page; struct page *new_page; void *new_addr; void *addr; paddr_t pfn; maddr_t mfn; maddr_t new_mfn; int err; page = *pagep; if (!get_page_unless_zero(page)) return -ENOENT; err = -ENOMEM; new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!new_page) goto out; new_addr = page_address(new_page); addr = page_address(page); memcpy(new_addr, addr, PAGE_SIZE); pfn = page_to_pfn(page); mfn = pfn_to_mfn(pfn); new_mfn = virt_to_mfn(new_addr); write_seqlock(&gnttab_dma_lock); /* Make seq visible before checking page_mapped. */ smp_mb(); /* Has the page been DMA-mapped? */ if (unlikely(page_mapped(page))) { write_sequnlock(&gnttab_dma_lock); put_page(new_page); err = -EBUSY; goto out; } if (!xen_feature(XENFEAT_auto_translated_physmap)) set_phys_to_machine(pfn, new_mfn); gnttab_set_replace_op(&unmap, (unsigned long)addr, (unsigned long)new_addr, ref); err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace, &unmap, 1); BUG_ON(err); BUG_ON(unmap.status); write_sequnlock(&gnttab_dma_lock); if (!xen_feature(XENFEAT_auto_translated_physmap)) { set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY); mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu.val = pfn; err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF); BUG_ON(err); } new_page->mapping = page->mapping; new_page->index = page->index; set_bit(PG_foreign, &new_page->flags); *pagep = new_page; SetPageForeign(page, gnttab_page_free); page->mapping = NULL; out: put_page(page); return err; } EXPORT_SYMBOL_GPL(gnttab_copy_grant_page_uvp); void gnttab_reset_grant_page_uvp(struct page *page) { init_page_count(page); reset_page_mapcount(page); } EXPORT_SYMBOL_GPL(gnttab_reset_grant_page_uvp); /* * Keep track of foreign pages marked as PageForeign so that we don't * return them to the remote domain prematurely. * * PageForeign pages are pinned down by increasing their mapcount. * * All other pages are simply returned as is. */ void __gnttab_dma_map_page(struct page *page) { unsigned int seq; if (!is_running_on_xen() || !PageForeign(page)) return; do { seq = read_seqbegin(&gnttab_dma_lock); if (gnttab_dma_local_pfn(page)) break; atomic_set(&page->_mapcount, 0); /* Make _mapcount visible before read_seqretry. */ smp_mb(); } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); } #ifdef __HAVE_ARCH_PTE_SPECIAL static unsigned int GNTMAP_pte_special; bool gnttab_pre_map_adjust(unsigned int cmd, struct gnttab_map_grant_ref *map, unsigned int count) { unsigned int i; if (unlikely(cmd != GNTTABOP_map_grant_ref)) count = 0; for (i = 0; i < count; ++i, ++map) { if (!(map->flags & GNTMAP_host_map) || !(map->flags & GNTMAP_application_map)) continue; if (GNTMAP_pte_special) map->flags |= GNTMAP_pte_special; else { BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); return true; } } return false; } EXPORT_SYMBOL(gnttab_pre_map_adjust); #if CONFIG_XEN_COMPAT < 0x030400 int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *map, unsigned int count) { unsigned int i; int rc = 0; for (i = 0; i < count && rc == 0; ++i, ++map) { pte_t pte; if (!(map->flags & GNTMAP_host_map) || !(map->flags & GNTMAP_application_map)) continue; #ifdef CONFIG_X86 pte = __pte_ma((map->dev_bus_addr | _PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NX | _PAGE_SPECIAL) & __supported_pte_mask); #else #error Architecture not yet supported. #endif if (!(map->flags & GNTMAP_readonly)) pte = pte_mkwrite(pte); if (map->flags & GNTMAP_contains_pte) { mmu_update_t u; u.ptr = map->host_addr; u.val = __pte_val(pte); rc = HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF); } else rc = HYPERVISOR_update_va_mapping(map->host_addr, pte, 0); } return rc; } EXPORT_SYMBOL(gnttab_post_map_adjust); #endif #endif /* __HAVE_ARCH_PTE_SPECIAL */ static int gnttab_resume(struct sys_device *dev) { if (max_nr_grant_frames() < nr_grant_frames) return -ENOSYS; return gnttab_map(0, nr_grant_frames - 1); } #define gnttab_resume() gnttab_resume(NULL) #ifdef CONFIG_PM_SLEEP #ifdef CONFIG_X86 static int gnttab_suspend(struct sys_device *dev, pm_message_t state) { apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); return 0; } #else #define gnttab_suspend NULL #endif static struct sysdev_class gnttab_sysclass = { .name = "gnttab", .resume = gnttab_resume, .suspend = gnttab_suspend, }; static struct sys_device device_gnttab = { .id = 0, .cls = &gnttab_sysclass, }; #endif #else /* !CONFIG_XEN */ #include static unsigned long resume_frames; static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; /* Loop backwards, so that the first hypercall has the largest index, * ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); } while (i-- > start_idx); return 0; } int gnttab_resume(void) { unsigned int max_nr_gframes, nr_gframes; nr_gframes = nr_grant_frames; max_nr_gframes = max_nr_grant_frames(); if (max_nr_gframes < nr_gframes) return -ENOSYS; if (!resume_frames) { resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); if (shared == NULL) { printk("error to ioremap gnttab share frames\n"); return -1; } } gnttab_map(0, nr_gframes - 1); return 0; } #endif /* !CONFIG_XEN */ static int gnttab_expand(unsigned int req_entries) { int rc; unsigned int cur, extra; cur = nr_grant_frames; extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) / ENTRIES_PER_GRANT_FRAME); if (cur + extra > max_nr_grant_frames()) return -ENOSPC; if ((rc = gnttab_map(cur, cur + extra - 1)) == 0) rc = grow_gnttab_list(extra); return rc; } int __devinit gnttab_init(void) { int i; unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int nr_init_grefs; if (!is_running_on_xen()) return -ENODEV; #if defined(CONFIG_XEN_no) && defined(CONFIG_PM_SLEEP) if (!is_initial_xendomain()) { int err = sysdev_class_register(&gnttab_sysclass); if (!err) err = sysdev_register(&device_gnttab); if (err) return err; } #endif nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; nr_glist_frames = nr_freelist_frames(nr_grant_frames); for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) goto ini_nomem; } if (gnttab_resume() < 0) return -ENODEV; nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; #if defined(CONFIG_XEN_no) && defined(__HAVE_ARCH_PTE_SPECIAL) if (!xen_feature(XENFEAT_auto_translated_physmap) && xen_feature(XENFEAT_gnttab_map_avail_bits)) { #ifdef CONFIG_X86 GNTMAP_pte_special = (__pte_val(pte_mkspecial(__pte_ma(0))) >> _PAGE_BIT_UNUSED1) << _GNTMAP_guest_avail0; #else #error Architecture not yet supported. #endif } #endif return 0; ini_nomem: for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); return -ENOMEM; } #ifdef CONFIG_XEN_no core_initcall(gnttab_init); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/machine_reboot.c000066400000000000000000000043161314037446600270150ustar00rootroot00000000000000#include #include #include #include #include #include "platform-pci.h" #include struct ap_suspend_info { int do_spin; atomic_t nr_spinning; }; #ifdef CONFIG_SMP /* * Spinning prevents, for example, APs touching grant table entries while * the shared grant table is not mapped into the address space imemdiately * after resume. */ static void ap_suspend(void *_info) { struct ap_suspend_info *info = _info; BUG_ON(!irqs_disabled()); atomic_inc(&info->nr_spinning); mb(); while (info->do_spin) cpu_relax(); mb(); atomic_dec(&info->nr_spinning); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0, 0) #else #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0) #endif #else /* !defined(CONFIG_SMP) */ #define initiate_ap_suspend(i) 0 #endif static int bp_suspend(void) { int suspend_cancelled; BUG_ON(!irqs_disabled()); suspend_cancelled = HYPERVISOR_suspend(0); if (!suspend_cancelled) { platform_pci_resume(); gnttab_resume(); irq_resume(); } return suspend_cancelled; } int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)) { int err, suspend_cancelled, nr_cpus; struct ap_suspend_info info; xenbus_suspend_uvp(); preempt_disable(); /* Prevent any races with evtchn_interrupt() handler. */ disable_irq(xen_platform_pdev->irq); info.do_spin = 1; atomic_set(&info.nr_spinning, 0); smp_mb(); nr_cpus = num_online_cpus() - 1; err = initiate_ap_suspend(&info); if (err < 0) { preempt_enable(); xenbus_suspend_uvp_cancel(); return err; } while (atomic_read(&info.nr_spinning) != nr_cpus) cpu_relax(); local_irq_disable(); suspend_cancelled = bp_suspend(); resume_notifier(suspend_cancelled); local_irq_enable(); smp_mb(); info.do_spin = 0; while (atomic_read(&info.nr_spinning) != 0) cpu_relax(); enable_irq(xen_platform_pdev->irq); preempt_enable(); if (!suspend_cancelled){ xenbus_resume_uvp(); } else xenbus_suspend_uvp_cancel(); // update uvp flags write_driver_resume_flag(); write_feature_flag(); write_monitor_service_flag(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/panic-handler.c000066400000000000000000000016561314037446600265500ustar00rootroot00000000000000#include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif MODULE_LICENSE("GPL"); #ifdef __ia64__ static void xen_panic_hypercall(struct unw_frame_info *info, void *arg) { current->thread.ksp = (__u64)info->sw - 16; HYPERVISOR_shutdown(SHUTDOWN_crash); /* we're never actually going to get here... */ } #endif static int xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { #ifdef __ia64__ unw_init_running(xen_panic_hypercall, NULL); #else /* !__ia64__ */ HYPERVISOR_shutdown(SHUTDOWN_crash); #endif /* we're never actually going to get here... */ return NOTIFY_DONE; } static struct notifier_block xen_panic_block = { .notifier_call = xen_panic_event }; int xen_panic_handler_init(void) { atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/platform-compat.c000066400000000000000000000067611314037446600271520ustar00rootroot00000000000000#include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) static int system_state = 1; EXPORT_SYMBOL(system_state); #endif void ctrl_alt_del(void) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) kill_proc(1, SIGINT, 1); /* interrupt init */ #else kill_cad_pid(SIGINT, 1); #endif } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) size_t strcspn(const char *s, const char *reject) { const char *p; const char *r; size_t count = 0; for (p = s; *p != '\0'; ++p) { for (r = reject; *r != '\0'; ++r) { if (*p == *r) return count; } ++count; } return count; } EXPORT_SYMBOL(strcspn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(void * vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; } EXPORT_SYMBOL(wait_for_completion_timeout); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) /* fake do_exit using complete_and_exit */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) asmlinkage NORET_TYPE void do_exit(long code) #else fastcall NORET_TYPE void do_exit(long code) #endif { complete_and_exit(NULL, code); } EXPORT_SYMBOL_GPL(do_exit); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout) { __set_current_state(TASK_INTERRUPTIBLE); return schedule_timeout(timeout); } EXPORT_SYMBOL(schedule_timeout_interruptible); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. */ void *kzalloc(size_t size, int flags) { void *ret = kmalloc(size, flags); if (ret) memset(ret, 0, size); return ret; } EXPORT_SYMBOL(kzalloc); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) /* Simplified asprintf. */ char *kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; unsigned int len; char *p, dummy[1]; va_start(ap, fmt); len = vsnprintf(dummy, 0, fmt, ap); va_end(ap); p = kmalloc(len + 1, gfp); if (!p) return NULL; va_start(ap, fmt); vsprintf(p, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL(kasprintf); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/platform-pci-unplug.c000066400000000000000000000117421314037446600277450ustar00rootroot00000000000000/****************************************************************************** * platform-pci-unplug.c * * Xen platform PCI device driver * Copyright (c) 2010, Citrix * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include /****************************************************************************** * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0xffff #define XEN_IOPORT_LINUX_DRVVER ((LINUX_VERSION_CODE << 8) + 0x0) #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define UNPLUG_ALL_IDE_DISKS 1 #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 #define UNPLUG_ALL 7 #define UNPLUG_IGNORE 8 int xen_platform_pci; EXPORT_SYMBOL_GPL(xen_platform_pci); static int xen_emul_unplug; void xen_unplug_emulated_devices(void) { /* If the version matches enable the Xen platform PCI driver. * Also enable the Xen platform PCI driver if the version is really old * and the user told us to ignore it. */ xen_platform_pci = 1; xen_emul_unplug |= UNPLUG_ALL_NICS; xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; /* Set the default value of xen_emul_unplug depending on whether or * not the Xen PV frontends and the Xen platform PCI driver have * been compiled for this kernel (modules or built-in are both OK). */ if (xen_platform_pci && !xen_emul_unplug) { #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Netfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated ICs.\n"); xen_emul_unplug |= UNPLUG_ALL_NICS; #endif #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Blkfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated disks.\n" "You might have to change the root device\n" "from /dev/hd[a-d] to /dev/xvd[a-d]\n" "in your root= kernel command line option\n"); xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; #endif } /* Now unplug the emulated devices */ if (xen_platform_pci && !(xen_emul_unplug & UNPLUG_IGNORE)) outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/platform-pci.c000066400000000000000000000240671314037446600264410ustar00rootroot00000000000000/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __ia64__ #include #endif #include "platform-pci.h" #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DRV_NAME "xen-platform-pci" #define DRV_VERSION "0.10" #define DRV_RELDATE "03/03/2005" static int max_hypercall_stub_pages, nr_hypercall_stub_pages; char *hypercall_stubs; EXPORT_SYMBOL(hypercall_stubs); MODULE_AUTHOR("ssmith@xensource.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); struct pci_dev *xen_platform_pdev; static unsigned long shared_info_frame; static uint64_t callback_via; /* driver should write xenstore flag to tell xen which feature supported */ void write_feature_flag() { int rc; char str[32] = {0}; (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); rc = xenbus_write_uvp(XBT_NIL, "control/uvp", "feature_flag", str); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); } } /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { int rc; rc = xenbus_write_uvp(XBT_NIL, "control/uvp", "monitor-service-flag", "true"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/monitor-service-flag failed \n"); } } /*DTS2014042508949. driver write this flag when the xenpci resume succeed.*/ void write_driver_resume_flag() { int rc = 0; rc = xenbus_write_uvp(XBT_NIL, "control/uvp", "driver-resume-flag", "1"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/driver-resume-flag failed \n"); } } static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; extern void *shared_info_area; #ifdef __ia64__ xencomm_initialize(); #endif setup_xen_features_uvp(); shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); shared_info_area = ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); return 0; } static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } #ifndef __ia64__ static uint32_t xen_cpuid_base(void) { uint32_t base, eax, ebx, ecx, edx; char signature[13]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { cpuid(base, &eax, &ebx, &ecx, &edx); *(uint32_t*)(signature + 0) = ebx; *(uint32_t*)(signature + 4) = ecx; *(uint32_t*)(signature + 8) = edx; signature[12] = 0; if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) return base; } return 0; } static int init_hypercall_stubs(void) { uint32_t eax, ebx, ecx, edx, pages, msr, i, base; base = xen_cpuid_base(); if (base == 0) { printk(KERN_WARNING "Detected Xen platform device but not Xen VMM?\n"); return -EINVAL; } cpuid(base + 1, &eax, &ebx, &ecx, &edx); printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff); /* * Find largest supported number of hypercall pages. * We'll create as many as possible up to this number. */ cpuid(base + 2, &pages, &msr, &ecx, &edx); /* * Use __vmalloc() because vmalloc_exec() is not an exported symbol. * PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL. * hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE); */ while (pages > 0) { hypercall_stubs = __vmalloc( pages * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM, __pgprot(__PAGE_KERNEL & ~_PAGE_NX)); if (hypercall_stubs != NULL) break; pages--; /* vmalloc failed: try one fewer pages */ } if (hypercall_stubs == NULL) return -ENOMEM; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; max_hypercall_stub_pages = pages; printk(KERN_INFO "Hypercall area is %u pages.\n", pages); return 0; } static void resume_hypercall_stubs(void) { uint32_t base, ecx, edx, pages, msr, i; base = xen_cpuid_base(); BUG_ON(base == 0); cpuid(base + 2, &pages, &msr, &ecx, &edx); if (pages > max_hypercall_stub_pages) pages = max_hypercall_stub_pages; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; } #else /* __ia64__ */ #define init_hypercall_stubs() (0) #define resume_hypercall_stubs() ((void)0) #endif static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; #ifdef __ia64__ for (irq = 0; irq < 16; irq++) { if (isa_irq_to_vector(irq) == pdev->irq) return irq; /* ISA IRQ */ } #else /* !__ia64__ */ irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) pin = pdev->pin; #else pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); #endif /* We don't know the GSI. Specify the PCI INTx line instead. */ return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3)); } static int set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } int xen_irq_init(struct pci_dev *pdev); int xenbus_init(void); int xen_reboot_init(void); int xen_panic_handler_init(void); int gnttab_init(void); void xen_unplug_emulated_devices(void); static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr, iolen; long mmio_addr, mmio_len; xen_unplug_emulated_devices(); if (xen_platform_pdev) return -EBUSY; xen_platform_pdev = pdev; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); callback_via = get_callback_via(pdev); if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { printk(KERN_WARNING DRV_NAME ":no resources found\n"); return -ENOENT; } if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) { printk(KERN_ERR ":MEM I/O resource 0x%lx @ 0x%lx busy\n", mmio_addr, mmio_len); return -EBUSY; } if (request_region(ioaddr, iolen, DRV_NAME) == NULL) { printk(KERN_ERR DRV_NAME ":I/O resource 0x%lx @ 0x%lx busy\n", iolen, ioaddr); release_mem_region(mmio_addr, mmio_len); return -EBUSY; } platform_mmio = mmio_addr; platform_mmiolen = mmio_len; ret = init_hypercall_stubs(); if (ret < 0) goto out; if ((ret = init_xen_info())) goto out; if ((ret = gnttab_init())) goto out; if ((ret = xen_irq_init(pdev))) goto out; if ((ret = set_callback_via(callback_via))) goto out; if ((ret = xenbus_init())) goto out; if ((ret = xen_reboot_init())) goto out; if ((ret = xen_panic_handler_init())) goto out; /* write flag to xenstore when xenbus is available */ write_feature_flag(); out: if (ret) { release_mem_region(mmio_addr, mmio_len); release_region(ioaddr, iolen); } return ret; } #define XEN_PLATFORM_VENDOR_ID 0x5853 #define XEN_PLATFORM_DEVICE_ID 0x0001 static struct pci_device_id platform_pci_tbl[] __devinitdata = { {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* Continue to recognise the old ID for now */ {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { name: DRV_NAME, probe: platform_pci_init, id_table: platform_pci_tbl, }; static int pci_device_registered; void platform_pci_resume(void) { struct xen_add_to_physmap xatp; resume_hypercall_stubs(); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); if (set_callback_via(callback_via)) printk("platform_pci_resume failure!\n"); } static int __init platform_pci_module_init(void) { int rc; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) rc = pci_module_init(&platform_driver); #else rc = pci_register_driver(&platform_driver); #endif if (rc) { printk(KERN_INFO DRV_NAME ": No platform pci device model found\n"); return rc; } pci_device_registered = 1; return 0; } module_init(platform_pci_module_init); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/platform-pci.h000066400000000000000000000026611314037446600264420ustar00rootroot00000000000000/****************************************************************************** * platform-pci.h * * Xen platform PCI device driver * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. */ #ifndef _XEN_PLATFORM_PCI_H #define _XEN_PLATFORM_PCI_H #include /* the features pvdriver supported */ #define XENPAGING 0x1 #define SUPPORTED_FEATURE XENPAGING extern void write_feature_flag(void); extern void write_monitor_service_flag(void); /*when xenpci resume succeed, write this flag, then uvpmonitor call ndsend*/ extern void write_driver_resume_flag(void); unsigned long alloc_xen_mmio(unsigned long len); void platform_pci_resume(void); extern struct pci_dev *xen_platform_pdev; #endif /* _XEN_PLATFORM_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/reboot.c000066400000000000000000000177531314037446600253420ustar00rootroot00000000000000#define __KERNEL_SYSCALLS__ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #undef handle_sysrq #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN_no sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN_no */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } #ifdef CONFIG_PM_SLEEP static int setup_suspend_evtchn(void); /* Was last suspend request cancelled? */ static int suspend_cancelled; static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); #ifdef DEBIAN5 err = set_cpus_allowed(current, cpumask_of_cpu(0)); #else err = set_cpus_allowed_ptr(current, cpumask_of(0)); #endif if (err) { printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { printk(KERN_ERR "Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_delayed_work(&shutdown_work, 0); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } #else # define xen_suspend NULL #endif static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_delayed_work(&shutdown_work, 0); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(struct work_struct *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start_uvp(&xbt); if (err) return; str = (char *)xenbus_read_uvp(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end_uvp(xbt, 1); return; } /*պǨʱsuspendʧᵼǨƹȥsuspendʧܺǨƳʱ*/ //xenbus_write_uvp(xbt, "control", "shutdown", ""); err = xenbus_transaction_end_uvp(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } printk(KERN_WARNING "%s(%d): receive shutdown request %s\n", __FUNCTION__, __LINE__, str); if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) ctrl_alt_del(); #ifdef CONFIG_PM_SLEEP else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; #endif else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else printk("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start_uvp(&xbt); if (err) return; if (!xenbus_scanf_uvp(xbt, "control", "sysrq", "%c", &sysrq_key)) { printk(KERN_ERR "Unable to read sysrq code in " "control/sysrq\n"); xenbus_transaction_end_uvp(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf_uvp(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end_uvp(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key, NULL); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #ifdef CONFIG_PM_SLEEP static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler_uvp(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); printk(KERN_INFO "suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write_uvp(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } #else #define setup_suspend_evtchn() 0 #endif static int setup_shutdown_watcher(void) { int err; xenbus_scanf_uvp(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch_uvp(&shutdown_watch); if (err) { printk(KERN_ERR "Failed to set shutdown watcher\n"); return err; } err = register_xenbus_watch_uvp(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { printk(KERN_ERR "Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN_no static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier_uvp(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN_no) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN_no) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/reboot_64.c000066400000000000000000000174641314037446600256520ustar00rootroot00000000000000#define __KERNEL_SYSCALLS__ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #undef handle_sysrq #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN_no sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } #ifdef CONFIG_PM_SLEEP static int setup_suspend_evtchn(void); /* Was last suspend request cancelled? */ static int suspend_cancelled; static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed_ptr(current, cpumask_of(0)); if (err) { printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { printk(KERN_ERR "Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_delayed_work(&shutdown_work, 0); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } #else # define xen_suspend NULL #endif static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_delayed_work(&shutdown_work, 0); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(struct work_struct *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start_uvp(&xbt); if (err) return; str = (char *)xenbus_read_uvp(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end_uvp(xbt, 1); return; } xenbus_write_uvp(xbt, "control", "shutdown", ""); err = xenbus_transaction_end_uvp(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } printk(KERN_WARNING "%s(%d): receive shutdown request %s\n", __FUNCTION__, __LINE__, str); if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) ctrl_alt_del(); #ifdef CONFIG_PM_SLEEP else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; #endif else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else printk("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start_uvp(&xbt); if (err) return; if (!xenbus_scanf_uvp(xbt, "control", "sysrq", "%c", &sysrq_key)) { printk(KERN_ERR "Unable to read sysrq code in " "control/sysrq\n"); xenbus_transaction_end_uvp(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf_uvp(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end_uvp(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key, NULL); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #ifdef CONFIG_PM_SLEEP static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler_uvp(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); printk(KERN_INFO "suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write_uvp(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } #else #define setup_suspend_evtchn() 0 #endif static int setup_shutdown_watcher(void) { int err; xenbus_scanf_uvp(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch_uvp(&shutdown_watch); if (err) { printk(KERN_ERR "Failed to set shutdown watcher\n"); return err; } err = register_xenbus_watch_uvp(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { printk(KERN_ERR "Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN_no static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier_uvp(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN_no) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN_no) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/xen_proc.c000066400000000000000000000010601314037446600256450ustar00rootroot00000000000000 #include #include #include static struct proc_dir_entry *xen_base; struct proc_dir_entry *create_xen_proc_entry_uvp(const char *name, mode_t mode) { if ( xen_base == NULL ) if ( (xen_base = proc_mkdir("xen", NULL)) == NULL ) panic("Couldn't create /proc/xen"); return create_proc_entry(name, mode, xen_base); } EXPORT_SYMBOL_GPL(create_xen_proc_entry_uvp); void remove_xen_proc_entry_uvp(const char *name) { remove_proc_entry(name, xen_base); } EXPORT_SYMBOL_GPL(remove_xen_proc_entry_uvp); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/xen_support.c000066400000000000000000000041211314037446600264170ustar00rootroot00000000000000/****************************************************************************** * support.c * Xen module support functions. * Copyright (C) 2004, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if defined (__ia64__) unsigned long __hypercall(unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long cmd) { unsigned long __res; __asm__ __volatile__ (";;\n" "mov r2=%1\n" "break 0x1000 ;;\n" "mov %0=r8 ;;\n" : "=r"(__res) : "r"(cmd) : "r2", "r8", "memory"); return __res; } EXPORT_SYMBOL(__hypercall); int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) { return xencomm_hypercall_grant_table_op(cmd, uop, count); } EXPORT_SYMBOL(HYPERVISOR_grant_table_op); /* without using balloon driver on PV-on-HVM for ia64 */ void balloon_update_driver_allowance_uvp_uvp(long delta) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance_uvp_uvp); void balloon_release_driver_page_uvp_uvp(struct page *page) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_release_driver_page_uvp_uvp); #endif /* __ia64__ */ void xen_machphys_update(unsigned long mfn, unsigned long pfn) { BUG(); } EXPORT_SYMBOL(xen_machphys_update); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/xenbus_backend_client.c000066400000000000000000000113351314037446600303470ustar00rootroot00000000000000/****************************************************************************** * Backend-client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code in the backend * driver. * * Copyright (C) 2005-2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /* Based on Rusty Russell's skeleton driver's map_page */ struct vm_struct *xenbus_map_ring_uvp_uvp_valloc_uvp(struct xenbus_device *dev, int gnt_ref) { struct gnttab_map_grant_ref op; struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE); if (!area) return ERR_PTR(-ENOMEM); gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map, gnt_ref, dev->otherend_id); do { if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); msleep(10); } while(op.status == GNTST_eagain); if (op.status != GNTST_okay) { free_vm_area(area); xenbus_dev_fatal_uvp(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); BUG_ON(!IS_ERR(ERR_PTR(op.status))); return ERR_PTR(op.status); } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; return area; } EXPORT_SYMBOL_GPL(xenbus_map_ring_uvp_uvp_valloc_uvp); int xenbus_map_ring_uvp_uvp(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, dev->otherend_id); do { if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); msleep(10); } while(op.status == GNTST_eagain); if (op.status != GNTST_okay) { xenbus_dev_fatal_uvp(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring_uvp_uvp); /* Based on Rusty Russell's skeleton driver's unmap_page */ int xenbus_unmap_ring_uvp_uvp_vfree_uvp(struct xenbus_device *dev, struct vm_struct *area) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map, (grant_handle_t)area->phys_addr); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) free_vm_area(area); else xenbus_dev_error_uvp(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_uvp_uvp_vfree_uvp); int xenbus_unmap_ring_uvp_uvp(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error_uvp(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_uvp_uvp); int xenbus_dev_is_online_uvp(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf_uvp(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online_uvp); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/xenbus_client.c000066400000000000000000000430341314037446600267010ustar00rootroot00000000000000/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #if defined(CONFIG_XEN_no) || defined(MODULE) #include #include #include #include #else #include #include #include #include #include #include #include #endif #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif const char *xenbus_strstate_uvp(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", [ XenbusStateReconfiguring ] = "Reconfiguring", [ XenbusStateReconfigured ] = "Reconfigured", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate_uvp); /** * xenbus_watch_path_uvp - register a watch * @dev: xenbus device * @path: path to watch * @watch: watch to register * @callback: callback to register * * Register a @watch on the given path, using the given xenbus_watch structure * for storage, and the given @callback function as the callback. Return 0 on * success, or -errno on error. On success, the given @path will be saved as * @watch->node, and remains the caller's to free. On error, @watch->node will * be NULL, the device will switch to %XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path_uvp(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; watch->callback = callback; err = register_xenbus_watch_uvp(watch); if (err) { watch->node = NULL; watch->callback = NULL; xenbus_dev_fatal_uvp(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path_uvp); #if defined(CONFIG_XEN_no) || defined(MODULE) int xenbus_watch_path_uvp2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2); if (!state) { xenbus_dev_fatal_uvp(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path_uvp(dev, state, watch, callback); if (err) kfree(state); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path_uvp2); #else /** * xenbus_watch_path_uvpfmt - register a watch on a sprintf-formatted path * @dev: xenbus device * @watch: watch to register * @callback: callback to register * @pathfmt: format of path to watch * * Register a watch on the given @path, using the given xenbus_watch * structure for storage, and the given @callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (@path/@path2) will be saved as @watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to %XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_path_uvpfmt(struct xenbus_device *dev, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...) { int err; va_list ap; char *path; va_start(ap, pathfmt); path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); va_end(ap); if (!path) { xenbus_dev_fatal_uvp(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path_uvp(dev, path, watch, callback); if (err) kfree(path); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path_uvpfmt); #endif /** * xenbus_switch_state_uvp * @dev: xenbus device * @state: new state * * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state_uvp(struct xenbus_device *dev, enum xenbus_state state) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not work inside a Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ int current_state; int err; if (state == dev->state) return 0; err = xenbus_scanf_uvp(XBT_NIL, dev->nodename, "state", "%d", ¤t_state); if (err != 1) return 0; err = xenbus_printf_uvp(XBT_NIL, dev->nodename, "state", "%d", state); if (err) { if (state != XenbusStateClosing) /* Avoid looping */ xenbus_dev_fatal_uvp(dev, err, "writing new state"); return err; } dev->state = state; return 0; } EXPORT_SYMBOL_GPL(xenbus_switch_state_uvp); int xenbus_frontend_closed_uvp(struct xenbus_device *dev) { xenbus_switch_state_uvp(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed_uvp); /** * Return the path to the error node for the given device, or NULL on failure. * If the value returned is non-NULL, then it is the caller's to kfree. */ static char *error_path(struct xenbus_device *dev) { return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); } static void _dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list ap) { int ret; unsigned int len; char *printf_buffer = NULL, *path_buffer = NULL; #define PRINTF_BUFFER_SIZE 4096 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); if (printf_buffer == NULL) goto fail; len = sprintf(printf_buffer, "%i ", -err); ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = error_path(dev); if (path_buffer == NULL) { dev_err(&dev->dev, "xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } if (xenbus_write_uvp(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { dev_err(&dev->dev, "xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } fail: kfree(printf_buffer); kfree(path_buffer); } /** * xenbus_dev_error_uvp * @dev: xenbus device * @err: error to report * @fmt: error message format * * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error_uvp(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error_uvp); /** * xenbus_dev_fatal_uvp * @dev: xenbus device * @err: error to report * @fmt: error message format * * Equivalent to xenbus_dev_error_uvp(dev, err, fmt, args), followed by * xenbus_switch_state_uvp(dev, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal_uvp(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); xenbus_switch_state_uvp(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal_uvp); /** * xenbus_grant_ring_uvp * @dev: xenbus device * @ring_mfn: mfn of ring to grant * * Grant access to the given @ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring_uvp(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access_uvp(dev->otherend_id, ring_mfn, 0); if (err < 0) xenbus_dev_fatal_uvp(dev, err, "granting access to ring page"); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring_uvp); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn_uvp(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal_uvp(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn_uvp); #if 0 /* !defined(CONFIG_XEN) && !defined(MODULE) */ /** * Bind to an existing interdomain event channel in another domain. Returns 0 * on success and stores the local port in *port. On error, returns -errno, * switches the device to XenbusStateClosing, and saves the error in XenStore. */ int xenbus_bind_evtchn_uvp(struct xenbus_device *dev, int remote_port, int *port) { struct evtchn_bind_interdomain bind_interdomain; int err; bind_interdomain.remote_dom = dev->otherend_id; bind_interdomain.remote_port = remote_port; err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); if (err) xenbus_dev_fatal_uvp(dev, err, "binding to event channel %d from domain %d", remote_port, dev->otherend_id); else *port = bind_interdomain.local_port; return err; } EXPORT_SYMBOL_GPL(xenbus_bind_evtchn_uvp); #endif /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn_uvp(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error_uvp(dev, err, "freeing event channel %d", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn_uvp); #if 0 /* !defined(CONFIG_XEN) && !defined(MODULE) */ /** * xenbus_map_ring_uvp_uvp_valloc_uvp * @dev: xenbus device * @gnt_ref: grant reference * @vaddr: pointer to address to be filled out by mapping * * Based on Rusty Russell's skeleton driver's map_page. * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_uvp_uvp_valloc_uvp allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring_uvp_uvp_valloc_uvp(struct xenbus_device *dev, int gnt_ref, void **vaddr) { struct gnttab_map_grant_ref op = { .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; struct vm_struct *area; *vaddr = NULL; area = xen_alloc_vm_area(PAGE_SIZE); if (!area) return -ENOMEM; op.host_addr = (unsigned long)area->addr; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xen_free_vm_area(area); xenbus_dev_fatal_uvp(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); return op.status; } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; *vaddr = area->addr; return 0; } EXPORT_SYMBOL_GPL(xenbus_map_ring_uvp_uvp_valloc_uvp); /** * xenbus_map_ring_uvp_uvp * @dev: xenbus device * @gnt_ref: grant reference * @handle: pointer to grant handle to be filled * @vaddr: address to be mapped to * * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_uvp_uvp does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring_uvp_uvp(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op = { .host_addr = (unsigned long)vaddr, .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xenbus_dev_fatal_uvp(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring_uvp_uvp); /** * xenbus_unmap_ring_uvp_uvp_vfree_uvp * @dev: xenbus device * @vaddr: addr to unmap * * Based on Rusty Russell's skeleton driver's unmap_page. * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_uvp_uvp_vfree_uvp if you mapped in your memory with * xenbus_map_ring_uvp_uvp_valloc_uvp (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_uvp_uvp_vfree_uvp(struct xenbus_device *dev, void *vaddr) { struct vm_struct *area; struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, }; /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) * method so that we don't have to muck with vmalloc internals here. * We could force the user to hang on to their struct vm_struct from * xenbus_map_ring_uvp_uvp_valloc_uvp, but these 6 lines considerably simplify * this API. */ read_lock(&vmlist_lock); for (area = vmlist; area != NULL; area = area->next) { if (area->addr == vaddr) break; } read_unlock(&vmlist_lock); if (!area) { xenbus_dev_error_uvp(dev, -ENOENT, "can't find mapped virtual address %p", vaddr); return GNTST_bad_virt_addr; } op.handle = (grant_handle_t)area->phys_addr; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) xen_free_vm_area(area); else xenbus_dev_error_uvp(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_uvp_uvp_vfree_uvp); /** * xenbus_unmap_ring_uvp_uvp * @dev: xenbus device * @handle: grant handle * @vaddr: addr to unmap * * Unmap a page of memory in this domain that was imported from another domain. * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_uvp_uvp(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, .handle = handle, }; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error_uvp(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_uvp_uvp); #endif /** * xenbus_read_uvp_driver_state * @path: path for driver * * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_uvp_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather_uvp(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_uvp_driver_state); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/xenbus_comms.c000066400000000000000000000157561314037446600265530ustar00rootroot00000000000000/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #if defined(CONFIG_XEN_no) || defined(MODULE) #include #include #else #include #include #include #endif #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_irq; extern void xenbus_probe(struct work_struct *); static DECLARE_WORK(probe_work, xenbus_probe); static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); static irqreturn_t wake_waiting(int irq, void *unused) { int old, new; old = atomic_read(&xenbus_xsd_state); switch (old) { case XENBUS_XSD_UNCOMMITTED: BUG(); return IRQ_HANDLED; case XENBUS_XSD_FOREIGN_INIT: new = XENBUS_XSD_FOREIGN_READY; break; case XENBUS_XSD_LOCAL_INIT: new = XENBUS_XSD_LOCAL_READY; break; case XENBUS_XSD_FOREIGN_READY: case XENBUS_XSD_LOCAL_READY: default: goto wake; } old = atomic_cmpxchg(&xenbus_xsd_state, old, new); if (old != new) schedule_work(&probe_work); wake: wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } /** * xb_write - low level write * @data: buffer to send * @len: length of buffer * * Returns 0 on success, error otherwise. */ int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { return wait_event_interruptible(xb_waitq, xb_data_to_read()); } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /** * xb_init_comms - Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; int err; if (intf->req_prod != intf->req_cons) printk(KERN_ERR "XENBUS request ring is not quiescent " "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { printk(KERN_WARNING "XENBUS response ring is not quiescent " "(%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); intf->rsp_cons = intf->rsp_prod; } #if defined(CONFIG_XEN_no) || defined(MODULE) if (xenbus_irq) unbind_from_irqhandler_uvp(xenbus_irq, &xb_waitq); err = bind_caller_port_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; #else if (xenbus_irq) { /* Already have an irq; assume we're resuming */ rebind_evtchn_irq(xen_store_evtchn, xenbus_irq); } else { err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; } #endif return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/xenbus_comms.h000066400000000000000000000043301314037446600265420ustar00rootroot00000000000000/* * Private include for xenbus communications. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_COMMS_H #define _XENBUS_COMMS_H int xs_init(void); int xb_init_comms(void); /* Low level routines. */ int xb_write(const void *data, unsigned len); int xb_read(void *data, unsigned len); int xb_data_to_read(void); int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; /* For xenbus internal use. */ enum { XENBUS_XSD_UNCOMMITTED = 0, XENBUS_XSD_FOREIGN_INIT, XENBUS_XSD_FOREIGN_READY, XENBUS_XSD_LOCAL_INIT, XENBUS_XSD_LOCAL_READY, }; extern atomic_t xenbus_xsd_state; static inline int is_xenstored_ready(void) { int s = atomic_read(&xenbus_xsd_state); return s == XENBUS_XSD_FOREIGN_READY || s == XENBUS_XSD_LOCAL_READY; } #endif /* _XENBUS_COMMS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/xenbus_dev.c000066400000000000000000000254011314037446600261770ustar00rootroot00000000000000/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include #include #include #include //#include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #include struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[PAGE_SIZE]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static struct proc_dir_entry *xenbus_dev_intf; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; if (!is_xenstored_ready()) return -ENODEV; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static void queue_reply(struct xenbus_dev_data *u, char *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); BUG_ON(rb == NULL); rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, &u->read_buffers); wake_up(&u->read_waitq); } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int path_len, tok_len, body_len, data_len = 0; path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr)); queue_reply(adap->dev_data, (char *)path, path_len); queue_reply(adap->dev_data, (char *)token, tok_len); if (len > 2) queue_reply(adap->dev_data, (char *)vec[2], data_len); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply; char *path, *token; struct watch_adapter *watch, *tmp_watch; int err, rc = len; if (!is_xenstored_ready()) return -ENODEV; if ((len + u->len) > sizeof(u->u.buffer)) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: { static const char *XS_RESP = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); watch->watch.node = kmalloc(strlen(path)+1, GFP_KERNEL); strcpy((char *)watch->watch.node, path); watch->watch.callback = watch_fired; watch->token = kmalloc(strlen(token)+1, GFP_KERNEL); strcpy(watch->token, token); watch->dev_data = u; err = register_xenbus_watch_uvp(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch_uvp_uvp(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = strlen(XS_RESP) + 1; mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&hdr, sizeof(hdr)); queue_reply(u, (char *)XS_RESP, hdr.len); mutex_unlock(&u->reply_mutex); break; } default: if (msg_type == XS_TRANSACTION_START) { trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } } reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; BUG_ON(&trans->list == &u->transactions); list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg)); queue_reply(u, (char *)reply, u->u.msg.len); mutex_unlock(&u->reply_mutex); kfree(reply); break; } out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end_uvp(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch_uvp_uvp(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; if (!is_xenstored_ready()) return -ENODEV; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } #ifdef HAVE_UNLOCKED_IOCTL static long xenbus_dev_ioctl(struct file *file, unsigned int cmd, unsigned long data) { extern int xenbus_conn(domid_t remote_dom, int *grant_ref, evtchn_port_t *local_port); void __user *udata = (void __user *) data; int ret = -ENOTTY; if (!is_initial_xendomain()) return -ENODEV; switch (cmd) { case IOCTL_XENBUS_ALLOC: { xenbus_alloc_t xa; int old; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_FOREIGN_INIT); if (old != XENBUS_XSD_UNCOMMITTED) return -EBUSY; if (copy_from_user(&xa, udata, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } ret = xenbus_conn(xa.dom, &xa.grant_ref, &xa.port); if (ret != 0) { atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } if (copy_to_user(udata, &xa, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } } break; default: break; } return ret; } #endif static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .poll = xenbus_dev_poll, #ifdef HAVE_UNLOCKED_IOCTL .unlocked_ioctl = xenbus_dev_ioctl #endif }; int xenbus_dev_init(void) { xenbus_dev_intf = create_xen_proc_entry_uvp("xenbus", 0400); if (xenbus_dev_intf) xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops; return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/xenbus_probe.c000066400000000000000000001034601314037446600265320ustar00rootroot00000000000000/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_XEN_no) || defined(MODULE) #include #include #include #include #include #include #ifdef MODULE #include #endif #else #include #include #include #include #endif #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif int xen_store_evtchn; #if !defined(CONFIG_XEN_no) && !defined(MODULE) EXPORT_SYMBOL(xen_store_evtchn); #endif struct xenstore_domain_interface *xen_store_interface; static unsigned long xen_store_mfn; extern struct mutex xenwatch_mutex; static BLOCKING_NOTIFIER_HEAD(xenstore_chain); static void wait_for_devices(struct xenbus_driver *xendrv); static int xenbus_probe_frontend(const char *type, const char *name); static void xenbus_dev_shutdown(struct device *_dev); #if !defined(CONFIG_XEN_no) && !defined(MODULE) static int xenbus_dev_suspend(struct device *dev, pm_message_t state); static int xenbus_dev_resume(struct device *dev); #endif /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } /* device// => - */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); if (!strchr(bus_id, '/')) { printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch_uvp_uvp(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather_uvp(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal_uvp(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists_uvp(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal_uvp(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } static int read_backend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "backend-id", "backend"); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ #if defined(CONFIG_XEN_no) || defined(MODULE) add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); #endif add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype); return 0; } #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) static struct device_attribute xenbus_dev_attrs[] = { __ATTR_NULL }; #endif /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { .name = "xen", .match = xenbus_match, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_frontend, #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) .dev_attrs = xenbus_dev_attrs, #endif #if !defined(CONFIG_XEN_no) && !defined(MODULE) .suspend = xenbus_dev_suspend, .resume = xenbus_dev_resume, #endif }, #if defined(CONFIG_XEN_no) || defined(MODULE) .dev = { #ifdef DEBIAN5 .bus_id = "xen", #else .init_name = "xen", #endif }, #endif }; static void otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]); return; } state = xenbus_read_uvp_driver_state(dev->otherend); dev_dbg(&dev->dev, "state is %d (%s), %s, %s", state, xenbus_strstate_uvp(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { struct xen_bus_type *bus = bus; bus = container_of(dev->dev.bus, struct xen_bus_type, bus); /* If we're frontend, drive the state machine to Closed. */ /* This should cause the backend to release our resources. */ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) xenbus_frontend_closed_uvp(dev); return; } #endif if (drv->otherend_changed) drv->otherend_changed(dev, state); } static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { #if defined(CONFIG_XEN_no) || defined(MODULE) return xenbus_watch_path_uvp2(dev, dev->otherend, "state", &dev->otherend_watch, otherend_changed); #else return xenbus_watch_path_uvpfmt(dev, &dev->otherend_watch, otherend_changed, "%s/%s", dev->otherend, "state"); #endif } int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error_uvp(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state_uvp(dev, XenbusStateClosed); return -ENODEV; } int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); free_otherend_details(dev); if (drv->remove) drv->remove(dev); xenbus_switch_state_uvp(dev, XenbusStateClosed); return 0; } static void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); /* Commented out since xenstored stubdom is now minios based not linux based #define XENSTORE_DOMAIN_SHARES_THIS_KERNEL */ #ifndef XENSTORE_DOMAIN_SHARES_THIS_KERNEL if (is_initial_xendomain()) #endif return; get_device(&dev->dev); if (dev->state != XenbusStateConnected) { dev_info(&dev->dev, "%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate_uvp(dev->state)); goto out; } xenbus_switch_state_uvp(dev, XenbusStateClosing); if (!strcmp(dev->devicetype, "vfb")) goto out; timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) dev_info(&dev->dev, "%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); } int xenbus_register_driver_common2(struct xenbus_driver *drv, struct xen_bus_type *bus, struct module *owner, const char *mod_name) { int ret; if (bus->error) return bus->error; drv->driver.name = drv->name; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) drv->driver.owner = owner; #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) drv->driver.mod_name = mod_name; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; drv->driver.remove = xenbus_dev_remove; drv->driver.shutdown = xenbus_dev_shutdown; #endif mutex_lock(&xenwatch_mutex); ret = driver_register(&drv->driver); mutex_unlock(&xenwatch_mutex); return ret; } int __xenbus_register_frontend_uvp(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common2(drv, &xenbus_frontend, owner, mod_name); if (ret) return ret; // If this driver is loaded as a module wait for devices to attach. wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(__xenbus_register_frontend_uvp); void xenbus_unregister_driver_uvp(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver_uvp); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t xendev_show_nodename(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); static ssize_t xendev_show_devtype(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); static ssize_t xendev_show_modalias(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_uvp_driver_state(nodename); if (bus->error) return bus->error; if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); #if defined(CONFIG_XEN_no) || defined(MODULE) xendev->dev.parent = &bus->dev; #endif xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) { char devname[XEN_BUS_ID_SIZE]; err = bus->get_bus_id(devname, xendev->nodename); if (!err) dev_set_name(&xendev->dev, devname); } #else err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); #endif if (err) goto fail; /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) goto fail_unregister; err = device_create_file(&xendev->dev, &dev_attr_devtype); if (err) goto fail_remove_nodename; err = device_create_file(&xendev->dev, &dev_attr_modalias); if (err) goto fail_remove_devtype; return 0; fail_remove_devtype: device_remove_file(&xendev->dev, &dev_attr_devtype); fail_remove_nodename: device_remove_file(&xendev->dev, &dev_attr_nodename); fail_unregister: device_unregister(&xendev->dev); fail: kfree(xendev); return err; } /* device// */ static int xenbus_probe_frontend(const char *type, const char *name) { char *nodename; int err; if (!strcmp(type, "console")) return 0; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(&xenbus_frontend, type, nodename); kfree(nodename); return err; } static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory_uvp(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n; if (bus->error) return bus->error; dir = xenbus_directory_uvp(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void xenbus_dev_changed_uvp(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[XEN_BUS_ID_SIZE]; const char *p, *root; if (bus->error || char_count(node, '/') < 2) return; exists = xenbus_exists_uvp(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend//... or device//... */ p = strchr(node, '/') + 1; snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[XEN_BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } #if !defined(CONFIG_XEN_no) && !defined(MODULE) EXPORT_SYMBOL_GPL(xenbus_dev_changed_uvp); #endif static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed_uvp(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; #if !defined(CONFIG_XEN_no) && !defined(MODULE) static int xenbus_dev_suspend(struct device *dev, pm_message_t state) #else static int suspend_dev(struct device *dev, void *data) #endif { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend) #if !defined(CONFIG_XEN_no) && !defined(MODULE) err = drv->suspend(xdev, state); #else err = drv->suspend(xdev); #endif if (err) printk(KERN_WARNING "xenbus: suspend %s failed: %i\n", dev_name(dev), err); return 0; } #if defined(CONFIG_XEN_no) || defined(MODULE) static int suspend_cancel_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend_cancel) err = drv->suspend_cancel(xdev); if (err) printk(KERN_WARNING "xenbus: suspend_cancel %s failed: %i\n", dev_name(dev), err); return 0; } #endif #if !defined(CONFIG_XEN_no) && !defined(MODULE) static int xenbus_dev_resume(struct device *dev) #else static int resume_dev(struct device *dev, void *data) #endif { int err; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); err = talk_to_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus: resume (talk_to_otherend) %s failed: %i\n", dev_name(dev), err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { printk(KERN_WARNING "xenbus: resume %s failed: %i\n", dev_name(dev), err); return err; } } err = watch_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus_probe: resume (watch_otherend) %s failed: " "%d.\n", dev_name(dev), err); return err; } return 0; } #if defined(CONFIG_XEN_no) || defined(MODULE) void xenbus_suspend_uvp(void) { DPRINTK(""); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); xenbus_backend_suspend(suspend_dev); xs_suspend(); } EXPORT_SYMBOL_GPL(xenbus_suspend_uvp); void xen_unplug_emulated_devices(void); void xenbus_resume_uvp(void) { xb_init_comms(); xs_resume(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); xenbus_backend_resume(resume_dev); xen_unplug_emulated_devices(); } EXPORT_SYMBOL_GPL(xenbus_resume_uvp); void xenbus_suspend_uvp_cancel(void) { xs_suspend_cancel(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); xenbus_backend_resume(suspend_cancel_dev); } EXPORT_SYMBOL_GPL(xenbus_suspend_uvp_cancel); #endif /* A flag to determine if xenstored is 'ready' (i.e. has started) */ atomic_t xenbus_xsd_state = ATOMIC_INIT(XENBUS_XSD_UNCOMMITTED); int register_xenstore_notifier_uvp(struct notifier_block *nb) { int ret = 0; if (is_xenstored_ready()) ret = nb->notifier_call(nb, 0, NULL); else blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } EXPORT_SYMBOL_GPL(register_xenstore_notifier_uvp); void unregister_xenstore_notifier_uvp_uvp(struct notifier_block *nb) { blocking_notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier_uvp_uvp); void xenbus_probe(struct work_struct *unused) { BUG_ON(!is_xenstored_ready()); /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch_uvp(&fe_watch); xenbus_backend_probe_and_watch(); /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) static struct file_operations xsd_kva_fops; static struct proc_dir_entry *xsd_kva_intf; static struct proc_dir_entry *xsd_port_intf; static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; int old; int rc; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_LOCAL_INIT); switch (old) { case XENBUS_XSD_UNCOMMITTED: rc = xb_init_comms(); if (rc != 0) return rc; break; case XENBUS_XSD_FOREIGN_INIT: case XENBUS_XSD_FOREIGN_READY: return -EBUSY; case XENBUS_XSD_LOCAL_INIT: case XENBUS_XSD_LOCAL_READY: default: break; } if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static int xsd_kva_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "0x%p", xen_store_interface); *eof = 1; return len; } static int xsd_port_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "%d", xen_store_evtchn); *eof = 1; return len; } #endif #if defined(CONFIG_XEN_no) || defined(MODULE) static int xb_free_port(evtchn_port_t port) { struct evtchn_close close; close.port = port; return HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); } int xenbus_conn(domid_t remote_dom, unsigned long *grant_ref, evtchn_port_t *local_port) { struct evtchn_alloc_unbound alloc_unbound; int rc, rc2; BUG_ON(atomic_read(&xenbus_xsd_state) != XENBUS_XSD_FOREIGN_INIT); BUG_ON(!is_initial_xendomain()); #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) remove_xen_proc_entry_uvp("xsd_kva"); remove_xen_proc_entry_uvp("xsd_port"); #endif rc = xb_free_port(xen_store_evtchn); if (rc != 0) goto fail0; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_dom; rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (rc != 0) goto fail0; *local_port = xen_store_evtchn = alloc_unbound.port; /* keep the old page (xen_store_mfn, xen_store_interface) */ rc = gnttab_grant_foreign_access_uvp(remote_dom, xen_store_mfn, GTF_permit_access); if (rc < 0) goto fail1; *grant_ref = rc; rc = xb_init_comms(); if (rc != 0) goto fail1; return 0; fail1: rc2 = xb_free_port(xen_store_evtchn); if (rc2 != 0) printk(KERN_WARNING "XENBUS: Error freeing xenstore event channel: %d\n", rc2); fail0: xen_store_evtchn = -1; return rc; } #endif #ifndef MODULE static int __init xenbus_probe_init(void) #else static int __devinit xenbus_probe_init(void) #endif { int err = 0; #if defined(CONFIG_XEN_no) || defined(MODULE) unsigned long page = 0; #endif DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) printk(KERN_WARNING "XENBUS: Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { #if defined(CONFIG_XEN_no) || defined(MODULE) struct evtchn_alloc_unbound alloc_unbound; /* Allocate page. */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOMID_SELF; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ xsd_kva_intf = create_xen_proc_entry_uvp("xsd_kva", 0600); if (xsd_kva_intf) { memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, sizeof(xsd_kva_fops)); xsd_kva_fops.mmap = xsd_kva_mmap; xsd_kva_intf->proc_fops = &xsd_kva_fops; xsd_kva_intf->read_proc = xsd_kva_read; } xsd_port_intf = create_xen_proc_entry_uvp("xsd_port", 0400); if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif #else /* dom0 not yet supported */ #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else { atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY); #ifndef MODULE xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); #else xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN); xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN); xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); #endif /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) goto err; } #if defined(CONFIG_XEN_no) || defined(MODULE) xenbus_dev_init(); #endif /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); goto err; } #if defined(CONFIG_XEN_no) || defined(MODULE) /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); if (xenbus_frontend.error) { bus_unregister(&xenbus_frontend.bus); printk(KERN_WARNING "XENBUS: Error registering frontend device: %i\n", xenbus_frontend.error); } } #endif xenbus_backend_device_register(); if (!is_initial_xendomain()) xenbus_probe(NULL); #if defined(CONFIG_XEN_COMPAT_XENFS_uvp) && !defined(MODULE) /* * Create xenfs mountpoint in /proc for compatibility with * utilities that expect to find "xenbus" under "/proc/xen". */ proc_mkdir("xen", NULL); #endif return 0; err: #if defined(CONFIG_XEN_no) || defined(MODULE) if (page) free_page(page); #endif /* * Do not unregister the xenbus front/backend buses here. The buses * must exist because front/backend drivers will use them when they are * registered. */ return err; } #ifndef MODULE postcore_initcall(xenbus_probe_init); #ifdef CONFIG_XEN_no MODULE_LICENSE("Dual BSD/GPL"); #else MODULE_LICENSE("GPL"); #endif #else int __devinit xenbus_init(void) { return xenbus_probe_init(); } #endif static int is_device_connecting(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_connecting_device(struct device_driver *drv) { if (xenbus_frontend.error) return xenbus_frontend.error; return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", xendev->nodename); return 0; } if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_uvp_driver_state(xendev->otherend); printk(KERN_WARNING "XENBUS: Timeout connecting " "to device: %s (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } xendrv = to_xenbus_driver(dev->driver); if (xendrv->is_ready && !xendrv->is_ready(xendev)) printk(KERN_WARNING "XENBUS: Device not ready: %s\n", xendev->nodename); return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; } if (!ready_to_wait_for_devices || !is_running_on_xen()) return; while (exists_connecting_device(drv)) { if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) printk(KERN_WARNING "XENBUS: Waiting for " "devices to initialise: "); seconds_waited += 5; printk("%us...", 150 - seconds_waited); if (seconds_waited == 150) break; } schedule_timeout_interruptible(HZ/10); } if (seconds_waited) printk("seconds_waited\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } #ifndef MODULE static int __init boot_wait_for_devices(void) { if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; wait_for_devices(NULL); } return 0; } late_initcall(boot_wait_for_devices); #endif int xenbus_for_each_frontend_uvp(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_frontend_uvp); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/xenbus_probe.h000066400000000000000000000071771314037446600265470ustar00rootroot00000000000000/****************************************************************************** * xenbus_probe.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_PROBE_H #define _XENBUS_PROBE_H #ifndef BUS_ID_SIZE #define XEN_BUS_ID_SIZE 20 #else #define XEN_BUS_ID_SIZE BUS_ID_SIZE #endif #ifdef CONFIG_PARAVIRT_XEN #define is_running_on_xen() xen_domain() #define is_initial_xendomain() xen_initial_domain() #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) #define dev_name(dev) ((dev)->bus_id) #endif #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); extern void xenbus_backend_probe_and_watch(void); extern void xenbus_backend_bus_register(void); extern void xenbus_backend_device_register(void); #else static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_probe_and_watch(void) {} static inline void xenbus_backend_bus_register(void) {} static inline void xenbus_backend_device_register(void) {} #endif struct xen_bus_type { char *root; int error; unsigned int levels; int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); int (*probe)(const char *type, const char *dir); struct bus_type bus; #if defined(CONFIG_XEN_no) || defined(MODULE) struct device dev; #endif }; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus, struct module *owner, const char *mod_name); int __xenbus_register_frontend_uvp(struct xenbus_driver *drv, struct module *owner, const char *mod_name); extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); extern int xenbus_probe_devices(struct xen_bus_type *bus); extern void xenbus_dev_changed_uvp(const char *node, struct xen_bus_type *bus); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/xenbus_probe_backend.c000066400000000000000000000170001314037446600301730ustar00rootroot00000000000000/****************************************************************************** * Talks to Xen Store to figure out what devices we have (backend half). * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env); static int xenbus_probe_backend(const char *type, const char *domid); extern int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node); static int read_frontend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "frontend-id", "frontend"); } /* backend/// => -- */ static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { int domid, err; const char *devid, *type, *frontend; unsigned int typelen; type = strchr(nodename, '/'); if (!type) return -EINVAL; type++; typelen = strcspn(type, "/"); if (!typelen || type[typelen] != '/') return -EINVAL; devid = strrchr(nodename, '/') + 1; err = xenbus_gather_uvp(XBT_NIL, nodename, "frontend-id", "%i", &domid, "frontend", NULL, &frontend, NULL); if (err) return err; if (strlen(frontend) == 0) err = -ERANGE; if (!err && !xenbus_exists_uvp(XBT_NIL, frontend, "")) err = -ENOENT; kfree(frontend); if (err) return err; if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s", typelen, type, domid, devid) >= XEN_BUS_ID_SIZE) return -ENOSPC; return 0; } static struct device_attribute xenbus_backend_attrs[] = { __ATTR_NULL }; static struct xen_bus_type xenbus_backend = { .root = "backend", .levels = 3, /* backend/type// */ .get_bus_id = backend_bus_id, .probe = xenbus_probe_backend, .error = -ENODEV, .bus = { .name = "xen-backend", .match = xenbus_match, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, // .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_backend, .dev_attrs = xenbus_backend_attrs, }, .dev = { .init_name = "xen-backend", }, }; static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; struct xenbus_driver *drv; DPRINTK(""); if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); add_uevent_var(env, "XENBUS_BASE_PATH=%s", xenbus_backend.root); if (dev->driver) { drv = to_xenbus_driver(dev->driver); if (drv && drv->uevent) return drv->uevent(xdev, env); } return 0; } int __xenbus_register_backend_uvp(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { drv->read_otherend_details = read_frontend_details; return xenbus_register_driver_common(drv, &xenbus_backend, owner, mod_name); } EXPORT_SYMBOL_GPL(__xenbus_register_backend_uvp); /* backend/// */ static int xenbus_probe_backend_unit(const char *dir, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); if (!nodename) return -ENOMEM; DPRINTK("%s\n", nodename); err = xenbus_probe_node(&xenbus_backend, type, nodename); kfree(nodename); return err; } /* backend// */ static int xenbus_probe_backend(const char *type, const char *domid) { char *nodename; int err = 0; char **dir; unsigned int i, dir_n = 0; DPRINTK(""); nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid); if (!nodename) return -ENOMEM; dir = xenbus_directory_uvp(XBT_NIL, nodename, "", &dir_n); if (IS_ERR(dir)) { kfree(nodename); return PTR_ERR(dir); } for (i = 0; i < dir_n; i++) { err = xenbus_probe_backend_unit(nodename, type, dir[i]); if (err) break; } kfree(dir); kfree(nodename); return err; } static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed_uvp(vec[XS_WATCH_PATH], &xenbus_backend); } static struct xenbus_watch be_watch = { .node = "backend", .callback = backend_changed, }; void xenbus_backend_suspend(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_resume(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_probe_and_watch(void) { xenbus_probe_devices(&xenbus_backend); register_xenbus_watch_uvp(&be_watch); } void xenbus_backend_bus_register(void) { xenbus_backend.error = bus_register(&xenbus_backend.bus); if (xenbus_backend.error) printk(KERN_WARNING "XENBUS: Error registering backend bus: %i\n", xenbus_backend.error); } void xenbus_backend_device_register(void) { if (xenbus_backend.error) return; xenbus_backend.error = device_register(&xenbus_backend.dev); if (xenbus_backend.error) { bus_unregister(&xenbus_backend.bus); printk(KERN_WARNING "XENBUS: Error registering backend device: %i\n", xenbus_backend.error); } } int xenbus_for_each_backend_uvp(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_backend_uvp); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-platform-pci/xenbus_xs.c000066400000000000000000000541231314037446600260560ustar00rootroot00000000000000/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. * * transaction_mutex must be held before incrementing * transaction_count. The mutex is held when a suspend is in * progress to prevent new transactions starting. * * When decrementing transaction_count to zero the wait queue * should be woken up, the suspend code waits for count to * reach zero. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct mutex transaction_mutex; atomic_t transaction_count; wait_queue_head_t transaction_wq; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { printk(KERN_WARNING "XENBUS xen store gave: unknown error %s", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } static void transaction_start(void) { mutex_lock(&xs_state.transaction_mutex); atomic_inc(&xs_state.transaction_count); mutex_unlock(&xs_state.transaction_mutex); } static void transaction_end(void) { if (atomic_dec_and_test(&xs_state.transaction_count)) wake_up(&xs_state.transaction_wq); } static void transaction_suspend(void) { mutex_lock(&xs_state.transaction_mutex); wait_event(xs_state.transaction_wq, atomic_read(&xs_state.transaction_count) == 0); } static void transaction_resume(void) { mutex_unlock(&xs_state.transaction_mutex); } void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; struct xsd_sockmsg req_msg = *msg; int err; if (req_msg.type == XS_TRANSACTION_START) transaction_start(); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((req_msg.type == XS_TRANSACTION_END) || ((req_msg.type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) transaction_end(); return ret; } #if !defined(CONFIG_XEN_no) && !defined(MODULE) EXPORT_SYMBOL(xenbus_dev_request_and_reply); #endif /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { if (printk_ratelimit()) printk(KERN_WARNING "XENBUS unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len) + 1; /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; ret[*num] = strings + len; return ret; } char **xenbus_directory_uvp(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory_uvp); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists_uvp(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory_uvp(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists_uvp); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read_uvp(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read_uvp); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write_uvp(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write_uvp); /* Create a new directory. */ int xenbus_mkdir_uvp(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir_uvp); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm_uvp(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm_uvp); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start_uvp(struct xenbus_transaction *t) { char *id_str; transaction_start(); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { transaction_end(); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start_uvp); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end_uvp(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); transaction_end(); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end_uvp); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf_uvp(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read_uvp(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf_uvp); /* Single printf and write: returns -errno or 0. */ int xenbus_printf_uvp(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; #define PRINTF_BUFFER_SIZE 4096 char *printf_buffer; printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH); if (printf_buffer == NULL) return -ENOMEM; va_start(ap, fmt); ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap); va_end(ap); BUG_ON(ret > PRINTF_BUFFER_SIZE-1); ret = xenbus_write_uvp(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf_uvp); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather_uvp(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read_uvp(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather_uvp); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } /* Register callback to watch this node. */ int register_xenbus_watch_uvp(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); /* Ignore errors due to multiple registration. */ if ((err != 0) && (err != -EEXIST)) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch_uvp); void unregister_xenbus_watch_uvp_uvp(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; #if defined(CONFIG_XEN_no) || defined(MODULE) BUG_ON(watch->flags & XBWF_new_thread); #endif sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) printk(KERN_WARNING "XENBUS Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Make sure there are no callbacks running currently (unless its us) */ if (current->pid != xenwatch_pid) mutex_lock(&xenwatch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); if (current->pid != xenwatch_pid) mutex_unlock(&xenwatch_mutex); } EXPORT_SYMBOL_GPL(unregister_xenbus_watch_uvp_uvp); void xs_suspend(void) { transaction_suspend(); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; #if !defined(CONFIG_XEN_no) && !defined(MODULE) xb_init_comms(); #endif mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); transaction_resume(); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); mutex_unlock(&xs_state.transaction_mutex); } #if defined(CONFIG_XEN_no) || defined(MODULE) static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } #endif static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); #if defined(CONFIG_XEN_no) || defined(MODULE) /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch_uvp_uvp() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } #else msg->u.watch.handle->callback( msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); mutex_unlock(&xenwatch_mutex); kfree(msg->u.watch.vec); kfree(msg); #endif } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) printk(KERN_WARNING "XENBUS error %d while reading " "message\n", err); if (kthread_should_stop()) break; } return 0; } int xs_init(void) { struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); mutex_init(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); atomic_set(&xs_state.transaction_count, 0); init_waitqueue_head(&xs_state.transaction_wq); task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-scsi/000077500000000000000000000000001314037446600222335ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-scsi/Kbuild000066400000000000000000000001241314037446600233650ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-scsi.o xen-scsi-objs := scsifront.o xenbus.o UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-scsi/Makefile000066400000000000000000000000661314037446600236750ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-scsi/common.h000066400000000000000000000101141314037446600236710ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_SCSIFRONT_H__ #define __XEN_DRIVERS_SCSIFRONT_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define GRANT_INVALID_REF 0 #define VSCSI_IN_ABORT 1 #define VSCSI_IN_RESET 2 /* tuning point*/ #define VSCSIIF_DEFAULT_CMD_PER_LUN 10 #define VSCSIIF_MAX_TARGET 64 #define VSCSIIF_MAX_LUN 255 #define VSCSIIF_RING_SIZE \ __RING_SIZE((struct vscsiif_sring *)0, PAGE_SIZE) #define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE struct vscsifrnt_shadow { uint16_t next_free; /* command between backend and frontend * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */ unsigned char act; /* do reset function */ wait_queue_head_t wq_reset; /* reset work queue */ int wait_reset; /* reset work queue condition */ int32_t rslt_reset; /* reset response status */ /* (SUCESS or FAILED) */ /* for DMA_TO_DEVICE(1), DMA_FROM_DEVICE(2), DMA_NONE(3) requests */ unsigned int sc_data_direction; /* Number of pieces of scatter-gather */ unsigned int nr_segments; /* requested struct scsi_cmnd is stored from kernel */ unsigned long req_scsi_cmnd; int gref[VSCSIIF_SG_TABLESIZE]; }; struct vscsifrnt_info { struct xenbus_device *dev; struct Scsi_Host *host; spinlock_t io_lock; spinlock_t shadow_lock; unsigned int evtchn; unsigned int irq; grant_ref_t ring_ref; struct vscsiif_front_ring ring; struct vscsiif_response ring_res; struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS]; uint32_t shadow_free; struct task_struct *kthread; wait_queue_head_t wq; unsigned int waiting_resp; }; #define DPRINTK(_f, _a...) \ pr_debug("(file=%s, line=%d) " _f, \ __FILE__ , __LINE__ , ## _a ) int scsifront_xenbus_init(void); void scsifront_xenbus_unregister(void); int scsifront_schedule(void *data); irqreturn_t scsifront_intr(int irq, void *dev_id); int scsifront_cmd_done(struct vscsifrnt_info *info); #endif /* __XEN_DRIVERS_SCSIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-scsi/scsifront.c000066400000000000000000000265311314037446600244200ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "common.h" static int get_id_from_freelist(struct vscsifrnt_info *info) { unsigned long flags; uint32_t free; spin_lock_irqsave(&info->shadow_lock, flags); free = info->shadow_free; BUG_ON(free > VSCSIIF_MAX_REQS); info->shadow_free = info->shadow[free].next_free; info->shadow[free].next_free = 0x0fff; info->shadow[free].wait_reset = 0; spin_unlock_irqrestore(&info->shadow_lock, flags); return free; } static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id) { unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); info->shadow[id].next_free = info->shadow_free; info->shadow[id].req_scsi_cmnd = 0; info->shadow_free = id; spin_unlock_irqrestore(&info->shadow_lock, flags); } struct vscsiif_request * scsifront_pre_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); vscsiif_request_t *ring_req; uint32_t id; ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); ring->req_prod_pvt++; id = get_id_from_freelist(info); /* use id by response */ ring_req->rqid = (uint16_t)id; return ring_req; } static void scsifront_notify_work(struct vscsifrnt_info *info) { info->waiting_resp = 1; wake_up(&info->wq); } static void scsifront_do_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); unsigned int irq = info->irq; int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); if (notify) notify_remote_via_irq_uvp(irq); } irqreturn_t scsifront_intr(int irq, void *dev_id) { scsifront_notify_work((struct vscsifrnt_info *)dev_id); return IRQ_HANDLED; } static void scsifront_gnttab_done(struct vscsifrnt_shadow *s, uint32_t id) { int i; if (s->sc_data_direction == DMA_NONE) return; if (s->nr_segments) { for (i = 0; i < s->nr_segments; i++) { if (unlikely(gnttab_query_foreign_access_uvp( s->gref[i]) != 0)) { printk(KERN_ALERT "scsifront: " "grant still in use by backend.\n"); BUG(); } gnttab_end_foreign_access_uvp(s->gref[i], 0UL); } } return; } static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { struct scsi_cmnd *sc; uint32_t id; uint8_t sense_len; id = ring_res->rqid; sc = (struct scsi_cmnd *)info->shadow[id].req_scsi_cmnd; if (sc == NULL) BUG(); scsifront_gnttab_done(&info->shadow[id], id); add_id_to_freelist(info, id); sc->result = ring_res->rslt; scsi_set_resid(sc, ring_res->residual_len); if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE) sense_len = VSCSIIF_SENSE_BUFFERSIZE; else sense_len = ring_res->sense_len; if (sense_len) memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len); sc->scsi_done(sc); return; } static void scsifront_sync_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { uint16_t id = ring_res->rqid; unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); info->shadow[id].wait_reset = 1; info->shadow[id].rslt_reset = ring_res->rslt; spin_unlock_irqrestore(&info->shadow_lock, flags); wake_up(&(info->shadow[id].wq_reset)); } int scsifront_cmd_done(struct vscsifrnt_info *info) { vscsiif_response_t *ring_res; RING_IDX i, rp; int more_to_do = 0; unsigned long flags; spin_lock_irqsave(&info->io_lock, flags); rp = info->ring.sring->rsp_prod; rmb(); for (i = info->ring.rsp_cons; i != rp; i++) { ring_res = RING_GET_RESPONSE(&info->ring, i); if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB) scsifront_cdb_cmd_done(info, ring_res); else scsifront_sync_cmd_done(info, ring_res); } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); } else { info->ring.sring->rsp_event = i + 1; } spin_unlock_irqrestore(&info->io_lock, flags); /* Yield point for this unbounded loop. */ cond_resched(); return more_to_do; } int scsifront_schedule(void *data) { struct vscsifrnt_info *info = (struct vscsifrnt_info *)data; while (!kthread_should_stop()) { wait_event_interruptible( info->wq, info->waiting_resp || kthread_should_stop()); info->waiting_resp = 0; smp_mb(); if (scsifront_cmd_done(info)) info->waiting_resp = 1; } return 0; } static int map_data_for_request(struct vscsifrnt_info *info, struct scsi_cmnd *sc, vscsiif_request_t *ring_req, uint32_t id) { grant_ref_t gref_head; struct page *page; int err, ref, ref_cnt = 0; int write = (sc->sc_data_direction == DMA_TO_DEVICE); unsigned int i, nr_pages, off, len, bytes; unsigned long buffer_pfn; if (sc->sc_data_direction == DMA_NONE) return 0; err = gnttab_alloc_grant_references_uvp(VSCSIIF_SG_TABLESIZE, &gref_head); if (err) { printk(KERN_ERR "scsifront: gnttab_alloc_grant_references_uvp() error\n"); return -ENOMEM; } if (scsi_bufflen(sc)) { /* quoted scsi_lib.c/scsi_req_map_sg . */ struct scatterlist *sg, *sgl = scsi_sglist(sc); unsigned int data_len = scsi_bufflen(sc); nr_pages = (data_len + sgl->offset + PAGE_SIZE - 1) >> PAGE_SHIFT; if (nr_pages > VSCSIIF_SG_TABLESIZE) { printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n"); ref_cnt = (-E2BIG); goto big_to_sg; } for_each_sg (sgl, sg, scsi_sg_count(sc), i) { page = sg_page(sg); off = sg->offset; len = sg->length; buffer_pfn = page_to_phys(page) >> PAGE_SHIFT; while (len > 0 && data_len > 0) { /* * sg sends a scatterlist that is larger than * the data_len it wants transferred for certain * IO sizes */ bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); ref = gnttab_claim_grant_reference_uvp(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_uvp_ref(ref, info->dev->otherend_id, buffer_pfn, write); info->shadow[id].gref[ref_cnt] = ref; ring_req->seg[ref_cnt].gref = ref; ring_req->seg[ref_cnt].offset = (uint16_t)off; ring_req->seg[ref_cnt].length = (uint16_t)bytes; buffer_pfn++; len -= bytes; data_len -= bytes; off = 0; ref_cnt++; } } } big_to_sg: gnttab_free_grant_reference_uvps(gref_head); return ref_cnt; } static int scsifront_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { struct vscsifrnt_info *info = (struct vscsifrnt_info *) sc->device->host->hostdata; vscsiif_request_t *ring_req; int ref_cnt; uint16_t rqid; if (RING_FULL(&info->ring)) { goto out_host_busy; } sc->scsi_done = done; sc->result = 0; ring_req = scsifront_pre_request(info); rqid = ring_req->rqid; ring_req->act = VSCSIIF_ACT_SCSI_CDB; ring_req->id = sc->device->id; ring_req->lun = sc->device->lun; ring_req->channel = sc->device->channel; ring_req->cmd_len = sc->cmd_len; BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); if ( sc->cmd_len ) memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); else memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->timeout_per_command = (sc->request->timeout / HZ); info->shadow[rqid].req_scsi_cmnd = (unsigned long)sc; info->shadow[rqid].sc_data_direction = sc->sc_data_direction; info->shadow[rqid].act = ring_req->act; ref_cnt = map_data_for_request(info, sc, ring_req, rqid); if (ref_cnt < 0) { add_id_to_freelist(info, rqid); if (ref_cnt == (-ENOMEM)) goto out_host_busy; else { sc->result = (DID_ERROR << 16); goto out_fail_command; } } ring_req->nr_segments = (uint8_t)ref_cnt; info->shadow[rqid].nr_segments = ref_cnt; scsifront_do_request(info); return 0; out_host_busy: return SCSI_MLQUEUE_HOST_BUSY; out_fail_command: done(sc); return 0; } static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) { return (FAILED); } /* vscsi supports only device_reset, because it is each of LUNs */ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc) { struct Scsi_Host *host = sc->device->host; struct vscsifrnt_info *info = (struct vscsifrnt_info *) sc->device->host->hostdata; vscsiif_request_t *ring_req; uint16_t rqid; int err; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_lock_irq(host->host_lock); #endif ring_req = scsifront_pre_request(info); ring_req->act = VSCSIIF_ACT_SCSI_RESET; rqid = ring_req->rqid; info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET; ring_req->channel = sc->device->channel; ring_req->id = sc->device->id; ring_req->lun = sc->device->lun; ring_req->cmd_len = sc->cmd_len; if ( sc->cmd_len ) memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); else memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->timeout_per_command = (sc->request->timeout / HZ); ring_req->nr_segments = 0; scsifront_do_request(info); spin_unlock_irq(host->host_lock); wait_event_interruptible(info->shadow[rqid].wq_reset, info->shadow[rqid].wait_reset); spin_lock_irq(host->host_lock); err = info->shadow[rqid].rslt_reset; add_id_to_freelist(info, rqid); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_unlock_irq(host->host_lock); #endif return (err); } struct scsi_host_template scsifront_sht = { .module = THIS_MODULE, .name = "Xen SCSI frontend driver", .queuecommand = scsifront_queuecommand, .eh_abort_handler = scsifront_eh_abort_handler, .eh_device_reset_handler= scsifront_dev_reset_handler, .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN, .can_queue = VSCSIIF_MAX_REQS, .this_id = -1, .sg_tablesize = VSCSIIF_SG_TABLESIZE, .use_clustering = DISABLE_CLUSTERING, .proc_name = "scsifront", }; static int __init scsifront_init(void) { int err; if (!is_running_on_xen()) return -ENODEV; err = scsifront_xenbus_init(); return err; } static void __exit scsifront_exit(void) { scsifront_xenbus_unregister(); } module_init(scsifront_init); module_exit(scsifront_exit); MODULE_DESCRIPTION("Xen SCSI frontend driver"); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-scsi/xenbus.c000066400000000000000000000242051314037446600237060ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "common.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) #define DEFAULT_TASK_COMM_LEN 16 #else #define DEFAULT_TASK_COMM_LEN TASK_COMM_LEN #endif extern struct scsi_host_template scsifront_sht; static void scsifront_free(struct vscsifrnt_info *info) { struct Scsi_Host *host = info->host; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) if (host->shost_state != SHOST_DEL) { #else if (!test_bit(SHOST_DEL, &host->shost_state)) { #endif scsi_remove_host(info->host); } if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access_uvp(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler_uvp(info->irq, info); info->irq = 0; scsi_host_put(info->host); } static int scsifront_alloc_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct vscsiif_sring *sring; int err = -ENOMEM; info->ring_ref = GRANT_INVALID_REF; /***** Frontend to Backend ring start *****/ sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL); if (!sring) { xenbus_dev_fatal_uvp(dev, err, "fail to allocate shared ring (Front to Back)"); return err; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); err = xenbus_grant_ring_uvp(dev, virt_to_mfn(sring)); if (err < 0) { free_page((unsigned long) sring); info->ring.sring = NULL; xenbus_dev_fatal_uvp(dev, err, "fail to grant shared ring (Front to Back)"); goto free_sring; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, scsifront_intr, IRQF_SAMPLE_RANDOM, "scsifront", info); if (err <= 0) { xenbus_dev_fatal_uvp(dev, err, "bind_listening_port_to_irqhandler"); goto free_sring; } info->irq = err; return 0; /* free resource */ free_sring: scsifront_free(info); return err; } static int scsifront_init_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct xenbus_transaction xbt; int err; DPRINTK("%s\n",__FUNCTION__); err = scsifront_alloc_ring(info); if (err) return err; DPRINTK("%u %u\n", info->ring_ref, info->evtchn); again: err = xenbus_transaction_start_uvp(&xbt); if (err) { xenbus_dev_fatal_uvp(dev, err, "starting transaction"); } err = xenbus_printf_uvp(xbt, dev->nodename, "ring-ref", "%u", info->ring_ref); if (err) { xenbus_dev_fatal_uvp(dev, err, "%s", "writing ring-ref"); goto fail; } err = xenbus_printf_uvp(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { xenbus_dev_fatal_uvp(dev, err, "%s", "writing event-channel"); goto fail; } err = xenbus_transaction_end_uvp(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal_uvp(dev, err, "completing transaction"); goto free_sring; } return 0; fail: xenbus_transaction_end_uvp(xbt, 1); free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { struct vscsifrnt_info *info; struct Scsi_Host *host; int i, err = -ENOMEM; char name[DEFAULT_TASK_COMM_LEN]; host = scsi_host_alloc(&scsifront_sht, sizeof(*info)); if (!host) { xenbus_dev_fatal_uvp(dev, err, "fail to allocate scsi host"); return err; } info = (struct vscsifrnt_info *) host->hostdata; info->host = host; dev_set_drvdata(&dev->dev, info); info->dev = dev; for (i = 0; i < VSCSIIF_MAX_REQS; i++) { info->shadow[i].next_free = i + 1; init_waitqueue_head(&(info->shadow[i].wq_reset)); info->shadow[i].wait_reset = 0; } info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff; err = scsifront_init_ring(info); if (err) { scsi_host_put(host); return err; } init_waitqueue_head(&info->wq); spin_lock_init(&info->io_lock); spin_lock_init(&info->shadow_lock); snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no); info->kthread = kthread_run(scsifront_schedule, info, name); if (IS_ERR(info->kthread)) { err = PTR_ERR(info->kthread); info->kthread = NULL; printk(KERN_ERR "scsifront: kthread start err %d\n", err); goto free_sring; } host->max_id = VSCSIIF_MAX_TARGET; host->max_channel = 0; host->max_lun = VSCSIIF_MAX_LUN; host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512; err = scsi_add_host(host, &dev->dev); if (err) { printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err); goto free_sring; } xenbus_switch_state_uvp(dev, XenbusStateInitialised); return 0; free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_remove(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename); if (info->kthread) { kthread_stop(info->kthread); info->kthread = NULL; } scsifront_free(info); return 0; } static int scsifront_disconnect(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct Scsi_Host *host = info->host; DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename); /* When this function is executed, all devices of Frontend have been deleted. Therefore, it need not block I/O before remove_host. */ scsi_remove_host(host); xenbus_frontend_closed_uvp(dev); return 0; } #define VSCSIFRONT_OP_ADD_LUN 1 #define VSCSIFRONT_OP_DEL_LUN 2 static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) { struct xenbus_device *dev = info->dev; int i, err = 0; char str[64], state_str[64]; char **dir; unsigned int dir_n = 0; unsigned int device_state; unsigned int hst, chn, tgt, lun; struct scsi_device *sdev; dir = xenbus_directory_uvp(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n); if (IS_ERR(dir)) return; for (i = 0; i < dir_n; i++) { /* read status */ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]); err = xenbus_scanf_uvp(XBT_NIL, dev->otherend, str, "%u", &device_state); if (XENBUS_EXIST_ERR(err)) continue; /* virtual SCSI device */ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]); err = xenbus_scanf_uvp(XBT_NIL, dev->otherend, str, "%u:%u:%u:%u", &hst, &chn, &tgt, &lun); if (XENBUS_EXIST_ERR(err)) continue; /* front device state path */ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]); switch (op) { case VSCSIFRONT_OP_ADD_LUN: if (device_state == XenbusStateInitialised) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { printk(KERN_ERR "scsifront: Device already in use.\n"); scsi_device_put(sdev); xenbus_printf_uvp(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } else { scsi_add_device(info->host, chn, tgt, lun); xenbus_printf_uvp(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); } } break; case VSCSIFRONT_OP_DEL_LUN: if (device_state == XenbusStateClosing) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); xenbus_printf_uvp(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } } break; default: break; } } kfree(dir); return; } static void scsifront_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%p %u %u\n", dev, dev->state, backend_state); switch (backend_state) { case XenbusStateUnknown: case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateClosed: break; case XenbusStateInitialised: break; case XenbusStateConnected: if (xenbus_read_uvp_driver_state(dev->nodename) == XenbusStateInitialised) { scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); } if (dev->state == XenbusStateConnected) break; xenbus_switch_state_uvp(dev, XenbusStateConnected); break; case XenbusStateClosing: scsifront_disconnect(info); break; case XenbusStateReconfiguring: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN); xenbus_switch_state_uvp(dev, XenbusStateReconfiguring); break; case XenbusStateReconfigured: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); xenbus_switch_state_uvp(dev, XenbusStateConnected); break; } } static struct xenbus_device_id scsifront_ids[] = { { "vscsi" }, { "" } }; static struct xenbus_driver scsifront_driver = { .name = "vscsi", .ids = scsifront_ids, .probe = scsifront_probe, .remove = scsifront_remove, /* .resume = scsifront_resume, */ .otherend_changed = scsifront_backend_changed, }; int scsifront_xenbus_init(void) { return xenbus_register_frontend(&scsifront_driver); } void scsifront_xenbus_unregister(void) { xenbus_unregister_driver_uvp(&scsifront_driver); } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vbd/000077500000000000000000000000001314037446600220455ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vbd/Kbuild000066400000000000000000000001231314037446600231760ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-vbd.o xen-vbd-objs := blkfront.o vbd.o vcd.o UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vbd/Makefile000066400000000000000000000000661314037446600235070ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vbd/blkfront.c000066400000000000000000001450761314037446600240470ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #if (defined SUSE_2629) || (defined DEBIAN5) #include "md.h" #endif #include /* ssleep prototype */ #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 #define UNPLUG_DISK_TMOUT 60 static void connect(struct blkfront_info *); static void blkfront_closing(struct xenbus_device *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(struct work_struct *arg); static int blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *, struct blkfront_info *, struct blkif_response *); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write_uvp(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err = 0, vdevice = 0; struct blkfront_info *info; enum xenbus_state backend_state; /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf_uvp(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf_uvp(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal_uvp(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal_uvp(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } INIT_LIST_HEAD(&info->grants); INIT_LIST_HEAD(&info->indirect_pages); info->xbdev = dev; info->vdevice = vdevice; /* for persistent grant */ info->persistent_gnts_c = 0; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); backend_state = xenbus_read_uvp_driver_state(dev->otherend); /* * XenbusStateInitWait would be the correct state to enter here, * but (at least) blkback considers this a fatal error. */ xenbus_switch_state_uvp(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_backend(dev, info); if (err) { kfree(info); dev_set_drvdata(&dev->dev, NULL); return err; } return 0; } static int fill_grant_buffer(struct blkfront_info *info, int num) { struct page *granted_page; struct grant *gnt_list_entry, *n; int i = 0; while(i < num) { gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); if (!gnt_list_entry) goto out_of_memory; /* If frontend uses feature_persistent, it will preallocate * (256 + 1) * 256 pages, almost 257M. */ if (info->feature_persistent) { granted_page = alloc_page(GFP_NOIO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; } gnt_list_entry->pfn = page_to_pfn(granted_page); } gnt_list_entry->gref = GRANT_INVALID_REF; list_add(&gnt_list_entry->node, &info->grants); i++; } return 0; out_of_memory: list_for_each_entry_safe(gnt_list_entry, n, &info->grants, node) { list_del(&gnt_list_entry->node); if (info->feature_persistent) __free_page(pfn_to_page(gnt_list_entry->pfn)); kfree(gnt_list_entry); i--; } BUG_ON(i != 0); return -ENOMEM; } static int blkfront_setup_indirect(struct blkfront_info *info) { unsigned int indirect_segments, segs; int err, i; info->max_indirect_segments = 0; segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; err = xenbus_gather_uvp(XBT_NIL, info->xbdev->otherend, "feature-max-indirect-segments", "%u", &indirect_segments, NULL); if (!err && indirect_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) { info->max_indirect_segments = min(indirect_segments, xen_blkif_max_segments); segs = info->max_indirect_segments; } printk("[%s:%d], segs %d\n", __func__, __LINE__, segs); /*info->sg = kzalloc(sizeof(info->sg[0]) * segs, GFP_KERNEL); if (info->sg == NULL) goto out_of_memory; memset(info->sg, 0, sizeof(info->sg));*/ err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * RING_SIZE(&info->ring)); if (err) goto out_of_memory; if (!info->feature_persistent && info->max_indirect_segments) { /* * We are using indirect descriptors but not persistent * grants, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int num = INDIRECT_GREFS(segs) * RING_SIZE(&info->ring); BUG_ON(!list_empty(&info->indirect_pages)); for (i = 0; i < num; i++) { struct page *indirect_page = alloc_page(GFP_NOIO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &info->indirect_pages); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { info->shadow[i].grants_used = kzalloc( sizeof(info->shadow[i].grants_used[0]) * segs, GFP_NOIO); /* malloc space for every shadow's sg */ info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO); if (info->max_indirect_segments) info->shadow[i].indirect_grants = kzalloc( sizeof(info->shadow[i].indirect_grants[0]) * INDIRECT_GREFS(segs), GFP_NOIO); if ((info->shadow[i].grants_used == NULL) || (info->shadow[i].sg == NULL) || (info->max_indirect_segments && (info->shadow[i].indirect_grants == NULL))) goto out_of_memory; /* initialise every shadow's sg */ sg_init_table(info->shadow[i].sg, segs); } return 0; out_of_memory: //kfree(info->sg); //info->sg = NULL; for (i = 0; i < RING_SIZE(&info->ring); i++) { kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; /* free every shadow's sg */ kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; } if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } return -ENOMEM; } /* * This is a clone of md_trim_bio, used to split a bio into smaller ones */ static void trim_bio(struct bio *bio, int offset, int size) { /* 'bio' is a cloned bio which we need to trim to match * the given offset and size. * This requires adjusting bi_sector, bi_size, and bi_io_vec */ int i; struct bio_vec *bvec; int sofar = 0; size <<= 9; if (offset == 0 && size == bio->bi_size) return; bio->bi_sector += offset; bio->bi_size = size; offset <<= 9; clear_bit(BIO_SEG_VALID, &bio->bi_flags); while (bio->bi_idx < bio->bi_vcnt && bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { /* remove this whole bio_vec */ offset -= bio->bi_io_vec[bio->bi_idx].bv_len; bio->bi_idx++; } if (bio->bi_idx < bio->bi_vcnt) { bio->bi_io_vec[bio->bi_idx].bv_offset += offset; bio->bi_io_vec[bio->bi_idx].bv_len -= offset; } /* avoid any complications with bi_idx being non-zero*/ if (bio->bi_idx) { memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); bio->bi_vcnt -= bio->bi_idx; bio->bi_idx = 0; } /* Make sure vcnt and last bv are not too big */ bio_for_each_segment(bvec, bio, i) { if (sofar + bvec->bv_len > size) bvec->bv_len = size - sofar; if (bvec->bv_len == 0) { bio->bi_vcnt = i; break; } sofar += bvec->bv_len; } } static void split_bio_end(struct bio *bio, int error) { struct split_bio *split_bio = bio->bi_private; if (error) split_bio->err = error; if (atomic_dec_and_test(&split_bio->pending)) { split_bio->bio->bi_phys_segments = 0; bio_endio(split_bio->bio, split_bio->err); kfree(split_bio); } bio_put(bio); } static struct grant *get_grant(grant_ref_t *gref_head, unsigned long pfn, struct blkfront_info *info) { struct grant *gnt_list_entry; unsigned long buffer_mfn; BUG_ON(list_empty(&info->grants)); gnt_list_entry = list_first_entry(&info->grants, struct grant, node); list_del(&gnt_list_entry->node); /* for persistent grant */ if (gnt_list_entry->gref != GRANT_INVALID_REF) { info->persistent_gnts_c--; return gnt_list_entry; } /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference_uvp(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); if (!info->feature_persistent) { BUG_ON(!pfn); gnt_list_entry->pfn = pfn; } buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); gnttab_grant_foreign_access_uvp_ref(gnt_list_entry->gref, info->xbdev->otherend_id, buffer_mfn, 0); return gnt_list_entry; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); int err; enum xenbus_state backend_state; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); backend_state = xenbus_read_uvp_driver_state(dev->otherend); /* See respective comment in blkfront_probe(). */ xenbus_switch_state_uvp(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_backend(dev, info); return err; } static void shadow_init(struct blk_shadow *shadow, unsigned int ring_size) { unsigned int i = 0; WARN_ON(!ring_size); while (++i < ring_size) shadow[i - 1].req.u.rw.id = i; shadow[i - 1].req.u.rw.id = 0x0fffffff; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { unsigned int ring_size, ring_order; const char *what = NULL; struct xenbus_transaction xbt; int err; if (dev->state >= XenbusStateInitialised) return 0; err = xenbus_scanf_uvp(XBT_NIL, dev->otherend, "max-ring-pages", "%u", &ring_size); if (err != 1) ring_size = 0; else if (!ring_size) printk("blkfront: %s: zero max-ring-pages\n", dev->nodename); err = xenbus_scanf_uvp(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &ring_order); if (err != 1) ring_order = ring_size ? ilog2(ring_size) : 0; else if (!ring_size) /* nothing */; else if ((ring_size - 1) >> ring_order) printk("blkfront: %s: max-ring-pages (%#x) inconsistent with" " max-ring-page-order (%u)\n", dev->nodename, ring_size, ring_order); else ring_order = ilog2(ring_size); if (ring_order > BLK_MAX_RING_PAGE_ORDER) ring_order = BLK_MAX_RING_PAGE_ORDER; /* * While for larger rings not all pages are actually used, be on the * safe side and set up a full power of two to please as many backends * as possible. */ printk("talk_to_blkback ring_size %d, ring_order %d\n", 1U<ring_size = ring_size = 1U << ring_order; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start_uvp(&xbt); if (err) { xenbus_dev_fatal_uvp(dev, err, "starting transaction"); goto destroy_blkring; } if (ring_size == 1) { what = "ring-ref"; err = xenbus_printf_uvp(xbt, dev->nodename, what, "%u", info->ring_refs[0]); if (err) { goto abort_transaction; } } else { unsigned int i; char buf[16]; what = "ring-page-order"; err = xenbus_printf_uvp(xbt, dev->nodename, what, "%u", ring_order); if (err) goto abort_transaction; what = "num-ring-pages"; err = xenbus_printf_uvp(xbt, dev->nodename, what, "%u", ring_size); if (err) goto abort_transaction; what = buf; for (i = 0; i < ring_size; i++) { snprintf(buf, sizeof(buf), "ring-ref%u", i); err = xenbus_printf_uvp(xbt, dev->nodename, what, "%u", info->ring_refs[i]); if (err) goto abort_transaction; } } what = "event-channel"; err = xenbus_printf_uvp(xbt, dev->nodename, what, "%u", irq_to_evtchn_port(info->irq)); if (err) { goto abort_transaction; } what = "protocol"; err = xenbus_printf_uvp(xbt, dev->nodename, what, "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { goto abort_transaction; } /* for persistent grant */ err = xenbus_printf_uvp(xbt, dev->nodename, "feature-persistent", "%u", 1); if (err) dev_warn(&dev->dev, "writing persistent grants feature to xenbus"); err = xenbus_transaction_end_uvp(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal_uvp(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state_uvp(dev, XenbusStateInitialised); ring_size = RING_SIZE(&info->ring); switch (info->connected) { case BLKIF_STATE_DISCONNECTED: shadow_init(info->shadow, ring_size); break; case BLKIF_STATE_SUSPENDED: err = blkif_recover(info); if (err) goto out; break; } pr_info("blkfront: %s: ring-pages=%u nr_ents=%u\n", dev->nodename, info->ring_size, ring_size); return 0; abort_transaction: xenbus_transaction_end_uvp(xbt, 1); if (what) xenbus_dev_fatal_uvp(dev, err, "writing %s", what); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; unsigned int nr; int i; for (nr = 0; nr < info->ring_size; nr++) { info->ring_refs[nr] = GRANT_INVALID_REF; info->ring_pages[nr] = alloc_page(GFP_NOIO | __GFP_HIGH); if (!info->ring_pages[nr]) break; } sring = nr == info->ring_size ? vmap(info->ring_pages, nr, VM_MAP, PAGE_KERNEL) : NULL; if (!sring) { while (nr--) __free_page(info->ring_pages[nr]); xenbus_dev_fatal_uvp(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, (unsigned long)info->ring_size << PAGE_SHIFT); //sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); for (i=0; iring_size; i++) { err = xenbus_grant_ring_uvp(dev, pfn_to_mfn(page_to_pfn(info->ring_pages[i]))); if (err < 0) goto fail; info->ring_refs[i] = err; } err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, IRQF_SAMPLE_RANDOM, "blkif", info); if (err <= 0) { xenbus_dev_fatal_uvp(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } static int xenwatch_unplugdisk_callback(void *data) { int len; char *devname; int iTimeout = 0; char *bestate; struct block_device *bd; struct xenbus_device *dev = data; struct blkfront_info *info = dev_get_drvdata(&dev->dev); if(!strcmp(info->gd->disk_name, "xvda")) { printk(KERN_ERR "xvda disk is unallowed to unplug!\n"); return 0; } devname = xenbus_read_uvp(XBT_NIL, dev->otherend, "dev", &len); if (IS_ERR(devname)){ printk(KERN_ERR "read %s xenstore error!\n", dev->otherend); return 0; } else{ xenbus_write_uvp(XBT_NIL, "control/uvp", "unplug-disk", devname); kfree(devname); } while(info && info->users != 0){ if(iTimeout > UNPLUG_DISK_TMOUT) break; printk(KERN_INFO "info->users=%d,ssleep(1),iTimeout=%d!\n", info->users, iTimeout); ssleep(1); iTimeout++; } if(info && !info->users) { printk(KERN_INFO "finish to umount,info->users has changed to 0!\n"); } /* if (!info->gd) { printk(KERN_ERR "unplug_disk, info->gd is NULL\n"); xenbus_frontend_closed_uvp(dev); return 0; } */ bd = bdget_disk(info->gd, 0); if (bd == NULL) xenbus_dev_fatal_uvp(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error_uvp(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); bestate = xenbus_read_uvp(XBT_NIL, dev->otherend, "state", &len); if (IS_ERR(bestate)){ printk(KERN_ERR "read %s state error!\n", dev->otherend); } else{ if(strncmp(bestate, "5", 1) || iTimeout > UNPLUG_DISK_TMOUT) { kfree(bestate); return 0; } kfree(bestate); } return 0; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (talk_to_backend(dev, info)) { dev_set_drvdata(&dev->dev, NULL); kfree(info); } break; case XenbusStateConnected: connect(info); break; case XenbusStateClosing: kthread_run(xenwatch_unplugdisk_callback, dev, "xenwatch_unplugdisk"); break; } } /* ** Connection ** */ /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err; /* for persistent grant */ int persistent; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf_uvp(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (XENBUS_EXIST_ERR(err)) return; printk(KERN_INFO "Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); #ifndef DEBIAN5 revalidate_disk(info->gd); #endif return; /* fall through */ case BLKIF_STATE_SUSPENDED: blkif_recover(info); return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather_uvp(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal_uvp(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } err = xenbus_gather_uvp(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%lu", &info->feature_barrier, NULL); if (err) info->feature_barrier = 0; err = xenbus_gather_uvp(XBT_NIL, info->xbdev->otherend, "feature-persistent", "%u", &persistent, NULL); if (err) info->feature_persistent = 0; else info->feature_persistent = persistent; err = blkfront_setup_indirect(info); if (err) { xenbus_dev_fatal_uvp(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); return; } err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal_uvp(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal_uvp(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state_uvp(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&blkif_io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); add_disk(info->gd); info->is_ready = 1; register_vcd(info); if (strstr(info->xbdev->nodename,"vbd")) { printk("%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); unsigned long flags; DPRINTK("blkfront_closing: %s removed\n", dev->nodename); if (info->rq == NULL) goto out; spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback_uvp(&info->callback); spin_unlock_irqrestore(&blkif_io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); xlvbd_sysfs_delif(info); ssleep(2); while(RING_FREE_REQUESTS(&info->ring) != RING_SIZE(&info->ring)) { ssleep(1); } spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_start_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); unregister_vcd(info); xlvbd_del(info); out: if(dev) xenbus_frontend_closed_uvp(dev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); kfree(info); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= RING_SIZE(&info->ring)); info->shadow_free = info->shadow[free].req.u.rw.id; info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ return free; } static inline void ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { info->shadow[id].req.u.rw.id = info->shadow_free; info->shadow[id].request = NULL; info->shadow_free = id; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq_uvp(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(struct work_struct *arg) { struct blkfront_info *info = container_of(arg, struct blkfront_info, work); spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_open(struct inode *inode, struct file *filep) { struct block_device *bd = inode->i_bdev; #else int blkif_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; info->users++; return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_release(struct inode *inode, struct file *filep) { struct gendisk *disk = inode->i_bdev->bd_disk; #else int blkif_release(struct gendisk *disk, fmode_t mode) { #endif struct blkfront_info *info = disk->private_data; info->users--; #if 0 if (info->users == 0) { /* Check whether we have been instructed to close. We will have ignored this request initially, as the device was still mounted. */ struct xenbus_device * dev = info->xbdev; enum xenbus_state state = xenbus_read_uvp_driver_state(dev->otherend); if (state == XenbusStateClosing && info->is_ready) blkfront_closing(dev); } #endif return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { struct block_device *bd = inode->i_bdev; #else int blkif_ioctl(struct block_device *bd, fmode_t mode, unsigned command, unsigned long argument) { #endif int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: { struct blkfront_info *info = bd->bd_disk->private_data; struct gendisk *gd = info->gd; if (gd->flags & GENHD_FL_CD) return 0; return -EINVAL; } default: /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", command);*/ return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * blkif_queue_request * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; struct blkif_request_segment_aligned *segments = NULL; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref, n; bool new_persistent_gnts; grant_ref_t gref_head; struct scatterlist *sg; struct grant *gnt_list_entry = NULL; int nseg, max_grefs; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; max_grefs = info->max_indirect_segments ? info->max_indirect_segments + INDIRECT_GREFS(info->max_indirect_segments) : BLKIF_MAX_SEGMENTS_PER_REQUEST; /* Check if we have enought grants to allocate a requests */ if (info->persistent_gnts_c < max_grefs) { new_persistent_gnts = 1; if (gnttab_alloc_grant_references_uvp( max_grefs - info->persistent_gnts_c, &gref_head) < 0) { gnttab_request_free_callback_uvp( &info->callback, blkif_restart_queue_callback, info, max_grefs); return 1; } } else new_persistent_gnts = 0; /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = req; BUG_ON(info->max_indirect_segments == 0 && req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); BUG_ON(info->max_indirect_segments && req->nr_phys_segments > info->max_indirect_segments); nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); ring_req->u.rw.id = id; if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { /* * The indirect operation can only be a BLKIF_OP_READ or * BLKIF_OP_WRITE */ BUG_ON(req->cmd_flags & (REQ_FUA)); ring_req->operation = BLKIF_OP_INDIRECT; ring_req->u.indirect.indirect_op = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; #if (defined SUSE_2629) || (defined DEBIAN5) ring_req->u.indirect.sector_number = (blkif_sector_t)req->sector; #else ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); #endif ring_req->u.indirect.handle = info->handle; ring_req->u.indirect.nr_segments = nseg; } else { #if (defined SUSE_2629) || (defined DEBIAN5) ring_req->u.rw.sector_number = (blkif_sector_t)req->sector; #else ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); #endif ring_req->u.rw.handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (blk_barrier_rq(req)) ring_req->operation = BLKIF_OP_WRITE_BARRIER; if (blk_pc_request(req)) ring_req->operation = BLKIF_OP_PACKET; ring_req->u.rw.nr_segments = nseg; } for_each_sg(info->shadow[id].sg, sg, nseg, i) { fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; if ((ring_req->operation == BLKIF_OP_INDIRECT) && (i % SEGS_PER_INDIRECT_FRAME == 0)) { unsigned long uninitialized_var(pfn); if (segments) kunmap_atomic(segments, KM_USER0); n = i / SEGS_PER_INDIRECT_FRAME; if (!info->feature_persistent) { struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ BUG_ON(list_empty(&info->indirect_pages)); indirect_page = list_first_entry(&info->indirect_pages, struct page, lru); list_del(&indirect_page->lru); pfn = page_to_pfn(indirect_page); } gnt_list_entry = get_grant(&gref_head, pfn, info); info->shadow[id].indirect_grants[n] = gnt_list_entry; segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_USER0); ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; } gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); ref = gnt_list_entry->gref; info->shadow[id].grants_used[i] = gnt_list_entry; /* If use persistent grant, it will have a memcpy, * just copy the data from sg page to grant page. */ if (rq_data_dir(req) && info->feature_persistent) { char *bvec_data; void *shared_data; BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_USER0); if (in_interrupt()) { if (in_irq()) bvec_data = kmap_atomic(sg_page(sg), KM_IRQ0); else if (in_softirq()) bvec_data = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); else printk(KERN_WARNING "[kmap] in interrupt, but not irq!\n"); } else bvec_data = kmap_atomic(sg_page(sg), KM_USER0); /* * this does not wipe data stored outside the * range sg->offset..sg->offset+sg->length. * Therefore, blkback *could* see data from * previous requests. This is OK as long as * persistent grants are shared with just one * domain. It may need refactoring if this * changes */ memcpy(shared_data + sg->offset, bvec_data + sg->offset, sg->length); if (in_interrupt()) { if (in_irq()) kunmap_atomic(bvec_data, KM_IRQ0); else if (in_softirq()) kunmap_atomic(bvec_data, KM_SOFTIRQ0); else printk(KERN_WARNING "[kunmap] in interrupt, but not irq!\n"); } else kunmap_atomic(bvec_data, KM_USER0); kunmap_atomic(shared_data, KM_USER0); } if (ring_req->operation != BLKIF_OP_INDIRECT) { ring_req->u.rw.seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } else { n = i % SEGS_PER_INDIRECT_FRAME; segments[n] = (struct blkif_request_segment_aligned) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } } if (segments) kunmap_atomic(segments, KM_USER0); info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; /* for persistent grant */ if (new_persistent_gnts) gnttab_free_grant_reference_uvps(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ #if (defined SUSE_2629) || (defined DEBIAN5) void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = elv_next_request(rq)) != NULL) { info = req->rq_disk->private_data; if (!blk_fs_request(req) && !blk_pc_request(req)) { end_request(req, 0); continue; } if (RING_FULL(&info->ring)) goto wait; DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%li) buffer:%p [%s]\n", req, req->cmd, (long long)req->sector, req->current_nr_sectors, req->nr_sectors, req->buffer, rq_data_dir(req) ? "write" : "read"); blkdev_dequeue_request(req); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } #else void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = blk_peek_request(rq)) != NULL) { info = req->rq_disk->private_data; if (RING_FULL(&info->ring)) goto wait; blk_start_request(req); if (!blk_fs_request(req) && !blk_pc_request(req)) { __blk_end_request_all(req, -EIO); continue; } DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%u) buffer:%p [%s]\n", req, req->cmd, (long long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), req->buffer, rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } #endif static irqreturn_t blkif_int(int irq, void *dev_id) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; spin_lock_irqsave(&blkif_io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = info->shadow[id].request; blkif_completion(&info->shadow[id], info, bret); ADD_ID_TO_FREELIST(info, id); ret = bret->status == BLKIF_RSP_OKAY ? 0 : -EIO; switch (bret->operation) { case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk("blkfront: %s: write barrier op failed\n", info->gd->disk_name); ret = -EOPNOTSUPP; info->feature_barrier = 0; xlvbd_barrier(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_PACKET: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); #if (defined SUSE_2629) || (defined DEBIAN5) ret = __blk_end_request(req, ret, blk_rq_bytes(req)); BUG_ON(ret); #else __blk_end_request_all(req, ret); #endif break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { struct grant *persistent_gnt; struct grant *n; int i, j, segs; /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* Remove all persistent grants */ if (!list_empty(&info->grants)) { list_for_each_entry_safe(persistent_gnt, n, &info->grants, node) { list_del(&persistent_gnt->node); if (persistent_gnt->gref != GRANT_INVALID_REF) { gnttab_end_foreign_access_uvp(persistent_gnt->gref, 0UL); info->persistent_gnts_c--; } if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } } BUG_ON(info->persistent_gnts_c != 0); /* * Remove indirect pages, this only happens when using indirect * descriptors but not persistent grants */ if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; BUG_ON(info->feature_persistent); list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { /* * Clear persistent grants present in requests already * on the shared ring */ if (!info->shadow[i].request) goto free_shadow; segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? info->shadow[i].req.u.indirect.nr_segments : info->shadow[i].req.u.rw.nr_segments; for (j = 0; j < segs; j++) { persistent_gnt = info->shadow[i].grants_used[j]; gnttab_end_foreign_access_uvp(persistent_gnt->gref, 0UL); if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT) /* * If this is not an indirect operation don't try to * free indirect segments */ goto free_shadow; for (j = 0; j < INDIRECT_GREFS(segs); j++) { persistent_gnt = info->shadow[i].indirect_grants[j]; gnttab_end_foreign_access_uvp(persistent_gnt->gref, 0UL); __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } free_shadow: kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; } //kfree(info->sg); //info->sg = NULL; /* No more gnttab callback work. */ gnttab_cancel_free_callback_uvp(&info->callback); spin_unlock_irq(&blkif_io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); /* Free resources associated with old device channel. */ vunmap(info->ring.sring); info->ring.sring = NULL; for (i = 0; i < info->ring_size; i++) { if (info->ring_refs[i] != GRANT_INVALID_REF) { //gnttab_end_foreign_access_uvp(info->ring_refs[i], // (unsigned long)mfn_to_virt(pfn_to_mfn(page_to_pfn(info->ring_pages[i])))); gnttab_end_foreign_access_uvp(info->ring_refs[i],0UL); if(info->ring_pages[i]) { __free_page(info->ring_pages[i]); info->ring_pages[i] = NULL; } info->ring_refs[i] = GRANT_INVALID_REF; } } if (info->irq) unbind_from_irqhandler_uvp(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, struct blkif_response *bret) { int i; int nseg; /* for persistent grant */ struct scatterlist *sg; char *bvec_data; void *shared_data; nseg = s->req.operation == BLKIF_OP_INDIRECT ? s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { /* * Copy the data received from the backend into the bvec. * Since bv_offset can be different than 0, and bv_len different * than PAGE_SIZE, we have to keep track of the current offset, * to be sure we are copying the data from the right shared page. */ for_each_sg(s->sg, sg, nseg, i) { BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic( pfn_to_page(s->grants_used[i]->pfn), KM_USER0); if (in_interrupt()) { if (in_irq()) bvec_data = kmap_atomic(sg_page(sg), KM_IRQ0); else if (in_softirq()) bvec_data = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); else printk(KERN_WARNING "[kmap] in interrupt, but not irq!\n"); } else bvec_data = kmap_atomic(sg_page(sg), KM_USER0); memcpy(bvec_data + sg->offset, shared_data + sg->offset, sg->length); if (in_interrupt()) { if (in_irq()) kunmap_atomic(bvec_data, KM_IRQ0); else if (in_softirq()) kunmap_atomic(bvec_data, KM_SOFTIRQ0); else printk(KERN_WARNING "[kunmap] in interrupt, but not irq!\n"); } else kunmap_atomic(bvec_data, KM_USER0); kunmap_atomic(shared_data, KM_USER0); } } /* Add the persistent grant into the list of free grants */ for (i = 0; i < nseg; i++) { if (gnttab_query_foreign_access_uvp(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->grants_used[i]->gref); list_add(&s->grants_used[i]->node, &info->grants); info->persistent_gnts_c++; } else { /* * If the grant is not mapped by the backend we end the * foreign access and add it to the tail of the list, * so it will not be picked again unless we run out of * persistent grants. */ gnttab_end_foreign_access_uvp(s->grants_used[i]->gref, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &info->grants); } } if (s->req.operation == BLKIF_OP_INDIRECT) { for (i = 0; i < INDIRECT_GREFS(nseg); i++) { if (gnttab_query_foreign_access_uvp(s->indirect_grants[i]->gref)) { if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->indirect_grants[i]->gref); list_add(&s->indirect_grants[i]->node, &info->grants); info->persistent_gnts_c++; } else { struct page *indirect_page; gnttab_end_foreign_access_uvp(s->indirect_grants[i]->gref, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. */ if (!info->feature_persistent) { indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); list_add(&indirect_page->lru, &info->indirect_pages); } s->indirect_grants[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->indirect_grants[i]->node, &info->grants); } } } } static int blkif_recover(struct blkfront_info *info) { int i; // struct blkif_request *req; struct request *req, *n; struct blk_shadow *copy; int rc; struct bio *bio, *cloned_bio; struct bio_list bio_list, merge_bio; unsigned int segs, offset; int pending, size; struct split_bio *split_bio; struct list_head requests; //printk("[INTO]----->blkif_recover\n"); /* Stage 1: Make a safe copy of the shadow state. */ copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); if (!copy) return -ENOMEM; memcpy(copy, info->shadow, sizeof(info->shadow)); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); shadow_init(info->shadow, RING_SIZE(&info->ring)); info->shadow_free = info->ring.req_prod_pvt; rc = blkfront_setup_indirect(info); if (rc) { kfree(copy); return rc; } segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; #if (defined UBUNTU_1004) || (defined UBUNTU_9) || (defined SUSE_2629) || (defined DEBIAN5) || (defined DEBIAN6_32) blk_queue_max_phys_segments(info->rq, segs); blk_queue_max_hw_segments(info->rq, segs); #else blk_queue_max_segments(info->rq, segs); #endif bio_list_init(&bio_list); INIT_LIST_HEAD(&requests); /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < RING_SIZE(&info->ring); i++) { /* Not in use? */ if (!copy[i].request) continue; #if 0 /* Grab a request slot and copy shadow state into it. */ req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); *req = copy[i].req; /* We get a new request id, and must reset the shadow state. */ req->u.rw.id = GET_ID_FROM_FREELIST(info); memcpy(&info->shadow[req->u.rw.id], ©[i], sizeof(copy[i])); /* Rewrite any grant references invalidated by susp/resume. */ for (j = 0; j < req->u.rw.nr_segments; j++) gnttab_grant_foreign_access_uvp_ref( req->u.rw.seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->u.rw.id].grants_used[j]->pfn), rq_data_dir( (struct request *) info->shadow[req->u.rw.id].request)); info->shadow[req->u.rw.id].req = *req; info->ring.req_prod_pvt++; #endif merge_bio.head = copy[i].request->bio; merge_bio.tail = copy[i].request->biotail; bio_list_merge(&bio_list, &merge_bio); copy[i].request->bio = NULL; blk_put_request(copy[i].request); } kfree(copy); /* * Empty the queue, this is important because we might have * requests in the queue with more segments than what we * can handle now. */ spin_lock_irq(&blkif_io_lock); #if (defined SUSE_2629) || (defined DEBIAN5) while ((req = elv_next_request(info->rq)) != NULL) { blkdev_dequeue_request(req); #else while ((req = blk_fetch_request(info->rq)) != NULL) { #endif //printk("[blkif_recover] while req:%p\n", req); merge_bio.head = req->bio; merge_bio.tail = req->biotail; bio_list_merge(&bio_list, &merge_bio); req->bio = NULL; if (req->cmd_flags & (REQ_FUA)) printk("diskcache flush request found!\n"); __blk_put_request(info->rq, req); } spin_unlock_irq(&blkif_io_lock); (void)xenbus_switch_state_uvp(info->xbdev, XenbusStateConnected); spin_lock_irq(&blkif_io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Send off requeued requests */ //flush_requests(info); /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); list_for_each_entry_safe(req, n, &requests, queuelist) { /* Requeue pending requests (flush or discard) */ list_del_init(&req->queuelist); BUG_ON(req->nr_phys_segments > segs); blk_requeue_request(info->rq, req); } spin_unlock_irq(&blkif_io_lock); while ((bio = bio_list_pop(&bio_list)) != NULL) { /* Traverse the list of pending bios and re-queue them */ if (bio_segments(bio) > segs) { /* * This bio has more segments than what we can * handle, we have to split it. */ pending = (bio_segments(bio) + segs - 1) / segs; split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO); BUG_ON(split_bio == NULL); atomic_set(&split_bio->pending, pending); split_bio->bio = bio; for (i = 0; i < pending; i++) { offset = (i * segs * PAGE_SIZE) >> 9; size = min((unsigned int)(segs * PAGE_SIZE) >> 9, (unsigned int)(bio->bi_size >> 9) - offset); cloned_bio = bio_clone(bio, GFP_NOIO); BUG_ON(cloned_bio == NULL); trim_bio(cloned_bio, offset, size); cloned_bio->bi_private = split_bio; cloned_bio->bi_end_io = split_bio_end; submit_bio(cloned_bio->bi_rw, cloned_bio); } /* * Now we have to wait for all those smaller bios to * end, so we can also end the "parent" bio. */ continue; } /* We don't need to split this bio */ submit_bio(bio->bi_rw, bio); } if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } return 0; } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); return info->is_ready; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static struct xenbus_driver blkfront = { .name = "vbd", .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, }; static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront); } module_init(xlblk_init); static void __exit xlblk_exit(void) { return xenbus_unregister_driver_uvp(&blkfront); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vbd/block.h000066400000000000000000000150661314037446600233200ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #if 0 #define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a) #else #define DPRINTK_IOCTL(_f, _a...) ((void)0) #endif struct xlbd_type_info { int partn_shift; int disks_per_major; char *devname; char *diskname; }; struct xlbd_major_info { int major; int index; int usage; struct xlbd_type_info *type; }; struct grant { grant_ref_t gref; unsigned long pfn; struct list_head node; }; struct blk_shadow { blkif_request_t req; struct request *request; struct grant **grants_used; struct grant **indirect_grants; /* If use persistent grant, it will copy response * from grant page to sg when read io completion. */ struct scatterlist *sg; }; struct split_bio { struct bio *bio; atomic_t pending; int err; }; /* * Maximum number of segments in indirect requests, the actual value used by * the frontend driver is the minimum of this value and the value provided * by the backend driver. */ static unsigned int xen_blkif_max_segments = 256; //module_param_named(max, xen_blkif_max_segments, int, S_IRUGO); //MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 256 (1M))"); //#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) #define BLK_MAX_RING_PAGE_ORDER 4U #define BLK_MAX_RING_PAGES (1U << BLK_MAX_RING_PAGE_ORDER) #define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, \ BLK_MAX_RING_PAGES * PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; struct gendisk *gd; int vdevice; blkif_vdev_t handle; int connected; unsigned int ring_size; blkif_front_ring_t ring; /* move sg to blk_shadow struct */ //struct scatterlist *sg; unsigned int irq; struct xlbd_major_info *mi; struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_MAX_RING_SIZE]; grant_ref_t ring_refs[BLK_MAX_RING_PAGES]; struct page *ring_pages[BLK_MAX_RING_PAGES]; struct list_head grants; struct list_head indirect_pages; unsigned int persistent_gnts_c; unsigned int feature_persistent:1; unsigned long shadow_free; int feature_barrier; unsigned int max_indirect_segments; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; extern spinlock_t blkif_io_lock; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); #else extern int blkif_open(struct block_device *bdev, fmode_t mode); extern int blkif_release(struct gendisk *disk, fmode_t mode); extern int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument); #endif extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (struct request_queue *rq); #define SEGS_PER_INDIRECT_FRAME \ (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) #define INDIRECT_GREFS(_segs) \ ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); int xlvbd_barrier(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif /* Virtual cdrom block-device */ extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vbd/dm-bio-list.h000066400000000000000000000020461314037446600243400ustar00rootroot00000000000000/* * Copyright (C) 2004 Red Hat UK Ltd. * * This file is released under the GPL. */ #ifndef DM_BIO_LIST_H #define DM_BIO_LIST_H #include struct bio_list { struct bio *head; struct bio *tail; }; static inline void bio_list_init(struct bio_list *bl) { bl->head = bl->tail = NULL; } static inline void bio_list_add(struct bio_list *bl, struct bio *bio) { bio->bi_next = NULL; if (bl->tail) bl->tail->bi_next = bio; else bl->head = bio; bl->tail = bio; } static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) { if (!bl2->head) return; if (bl->tail) bl->tail->bi_next = bl2->head; else bl->head = bl2->head; bl->tail = bl2->tail; } static inline struct bio *bio_list_pop(struct bio_list *bl) { struct bio *bio = bl->head; if (bio) { bl->head = bl->head->bi_next; if (!bl->head) bl->tail = NULL; bio->bi_next = NULL; } return bio; } static inline struct bio *bio_list_get(struct bio_list *bl) { struct bio *bio = bl->head; bl->head = bl->tail = NULL; return bio; } #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vbd/md.h000066400000000000000000000066451314037446600226310ustar00rootroot00000000000000/* md.h : Multiple Devices driver for Linux Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman Copyright (C) 1994-96 Marc ZYNGIER or This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. You should have received a copy of the GNU General Public License (for example /usr/src/linux/COPYING); if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _MD_H #define _MD_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * 'md_p.h' holds the 'physical' layout of RAID devices * 'md_u.h' holds the user <=> kernel API * * 'md_k.h' holds kernel internal definitions */ #include #include #include "md_k.h" #ifdef CONFIG_MD /* * Different major versions are not compatible. * Different minor versions are only downward compatible. * Different patchlevel versions are downward and upward compatible. */ #define MD_MAJOR_VERSION 0 #define MD_MINOR_VERSION 90 /* * MD_PATCHLEVEL_VERSION indicates kernel functionality. * >=1 means different superblock formats are selectable using SET_ARRAY_INFO * and major_version/minor_version accordingly * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT * in the super status byte * >=3 means that bitmap superblock version 4 is supported, which uses * little-ending representation rather than host-endian */ #define MD_PATCHLEVEL_VERSION 3 extern int mdp_major; extern int register_md_personality (struct mdk_personality *p); extern int unregister_md_personality (struct mdk_personality *p); extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev), mddev_t *mddev, const char *name); extern void md_unregister_thread (mdk_thread_t *thread); extern void md_wakeup_thread(mdk_thread_t *thread); extern void md_check_recovery(mddev_t *mddev); extern void md_write_start(mddev_t *mddev, struct bio *bi); extern void md_write_end(mddev_t *mddev); extern void md_handle_safemode(mddev_t *mddev); extern void md_done_sync(mddev_t *mddev, int blocks, int ok); extern void md_error (mddev_t *mddev, mdk_rdev_t *rdev); extern void md_unplug_mddev(mddev_t *mddev); extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, sector_t sector, int size, struct page *page); extern void md_super_wait(mddev_t *mddev); extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, struct page *page, int rw); extern void md_do_sync(mddev_t *mddev); extern void md_new_event(mddev_t *mddev); extern void md_allow_write(mddev_t *mddev); extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); #endif /* CONFIG_MD */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vbd/md_k.h000066400000000000000000000261671314037446600231440ustar00rootroot00000000000000/* md_k.h : kernel internal structure of the Linux MD driver Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. You should have received a copy of the GNU General Public License (for example /usr/src/linux/COPYING); if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _MD_K_H #define _MD_K_H /* and dm-bio-list.h is not under include/linux because.... ??? */ #include "dm-bio-list.h" #ifdef CONFIG_BLOCK #define LEVEL_MULTIPATH (-4) #define LEVEL_LINEAR (-1) #define LEVEL_FAULTY (-5) /* we need a value for 'no level specified' and 0 * means 'raid0', so we need something else. This is * for internal use only */ #define LEVEL_NONE (-1000000) #define MaxSector (~(sector_t)0) typedef struct mddev_s mddev_t; typedef struct mdk_rdev_s mdk_rdev_t; /* * options passed in raidrun: */ /* Currently this must fit in an 'int' */ #define MAX_CHUNK_SIZE (1<<30) /* * MD's 'extended' device */ struct mdk_rdev_s { struct list_head same_set; /* RAID devices within the same set */ sector_t size; /* Device size (in blocks) */ mddev_t *mddev; /* RAID array if running */ long last_events; /* IO event timestamp */ struct block_device *bdev; /* block device handle */ struct page *sb_page; int sb_loaded; __u64 sb_events; sector_t data_offset; /* start of data in array */ sector_t sb_offset; int sb_size; /* bytes in the superblock */ int preferred_minor; /* autorun support */ struct kobject kobj; /* A device can be in one of three states based on two flags: * Not working: faulty==1 in_sync==0 * Fully working: faulty==0 in_sync==1 * Working, but not * in sync with array * faulty==0 in_sync==0 * * It can never have faulty==1, in_sync==1 * This reduces the burden of testing multiple flags in many cases */ unsigned long flags; #define Faulty 1 /* device is known to have a fault */ #define In_sync 2 /* device is in_sync with rest of array */ #define WriteMostly 4 /* Avoid reading if at all possible */ #define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ #define AllReserved 6 /* If whole device is reserved for * one array */ #define AutoDetected 7 /* added by auto-detect */ #define Blocked 8 /* An error occured on an externally * managed array, don't allow writes * until it is cleared */ wait_queue_head_t blocked_wait; int desc_nr; /* descriptor index in the superblock */ int raid_disk; /* role of device in array */ int saved_raid_disk; /* role that device used to have in the * array and could again if we did a partial * resync from the bitmap */ sector_t recovery_offset;/* If this device has been partially * recovered, this is where we were * up to. */ atomic_t nr_pending; /* number of pending requests. * only maintained for arrays that * support hot removal */ atomic_t read_errors; /* number of consecutive read errors that * we have tried to ignore. */ atomic_t corrected_errors; /* number of corrected read errors, * for reporting to userspace and storing * in superblock. */ struct work_struct del_work; /* used for delayed sysfs removal */ }; struct mddev_s { void *private; struct mdk_personality *pers; dev_t unit; int md_minor; struct list_head disks; unsigned long flags; #define MD_CHANGE_DEVS 0 /* Some device status has changed */ #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ #define MD_CHANGE_PENDING 2 /* superblock update in progress */ int ro; struct gendisk *gendisk; struct kobject kobj; /* Superblock information */ int major_version, minor_version, patch_version; int persistent; int external; /* metadata is * managed externally */ char metadata_type[17]; /* externally set*/ int chunk_size; time_t ctime, utime; int level, layout; char clevel[16]; int raid_disks; int max_disks; sector_t size; /* used size of component devices */ sector_t array_size; /* exported array size */ __u64 events; char uuid[16]; /* If the array is being reshaped, we need to record the * new shape and an indication of where we are up to. * This is written to the superblock. * If reshape_position is MaxSector, then no reshape is happening (yet). */ sector_t reshape_position; int delta_disks, new_level, new_layout, new_chunk; struct mdk_thread_s *thread; /* management thread */ struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ sector_t curr_resync; /* last block scheduled */ unsigned long resync_mark; /* a recent timestamp */ sector_t resync_mark_cnt;/* blocks written at resync_mark */ sector_t curr_mark_cnt; /* blocks scheduled now */ sector_t resync_max_sectors; /* may be set by personality */ sector_t resync_mismatches; /* count of sectors where * parity/replica mismatch found */ /* allow user-space to request suspension of IO to regions of the array */ sector_t suspend_lo; sector_t suspend_hi; /* if zero, use the system-wide default */ int sync_speed_min; int sync_speed_max; /* resync even though the same disks are shared among md-devices */ int parallel_resync; int ok_start_degraded; /* recovery/resync flags * NEEDED: we might need to start a resync/recover * RUNNING: a thread is running, or about to be started * SYNC: actually doing a resync, not a recovery * INTR: resync needs to be aborted for some reason * DONE: thread is done and is waiting to be reaped * REQUEST: user-space has requested a sync (used with SYNC) * CHECK: user-space request for for check-only, no repair * RESHAPE: A reshape is happening * * If neither SYNC or RESHAPE are set, then it is a recovery. */ #define MD_RECOVERY_RUNNING 0 #define MD_RECOVERY_SYNC 1 #define MD_RECOVERY_INTR 3 #define MD_RECOVERY_DONE 4 #define MD_RECOVERY_NEEDED 5 #define MD_RECOVERY_REQUESTED 6 #define MD_RECOVERY_CHECK 7 #define MD_RECOVERY_RESHAPE 8 #define MD_RECOVERY_FROZEN 9 unsigned long recovery; int in_sync; /* know to not need resync */ struct mutex reconfig_mutex; atomic_t active; int changed; /* true if we might need to reread partition info */ int degraded; /* whether md should consider * adding a spare */ int barriers_work; /* initialised to true, cleared as soon * as a barrier request to slave * fails. Only supported */ struct bio *biolist; /* bios that need to be retried * because BIO_RW_BARRIER is not supported */ atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; sector_t recovery_cp; sector_t resync_max; /* resync should pause * when it gets here */ spinlock_t write_lock; wait_queue_head_t sb_wait; /* for waiting on superblock updates */ atomic_t pending_writes; /* number of active superblock writes */ unsigned int safemode; /* if set, update "clean" superblock * when no writes pending. */ unsigned int safemode_delay; struct timer_list safemode_timer; atomic_t writes_pending; struct request_queue *queue; /* for plugging ... */ atomic_t write_behind; /* outstanding async IO */ unsigned int max_write_behind; /* 0 = sync */ struct bitmap *bitmap; /* the bitmap for the device */ struct file *bitmap_file; /* the bitmap file */ long bitmap_offset; /* offset from superblock of * start of bitmap. May be * negative, but not '0' */ long default_bitmap_offset; /* this is the offset to use when * hot-adding a bitmap. It should * eventually be settable by sysfs. */ struct list_head all_mddevs; }; static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) { int faulty = test_bit(Faulty, &rdev->flags); if (atomic_dec_and_test(&rdev->nr_pending) && faulty) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) { atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); } struct mdk_personality { char *name; int level; struct list_head list; struct module *owner; int (*make_request)(struct request_queue *q, struct bio *bio); int (*run)(mddev_t *mddev); int (*stop)(mddev_t *mddev); void (*status)(struct seq_file *seq, mddev_t *mddev); /* error_handler must set ->faulty and clear ->in_sync * if appropriate, and should abort recovery if needed */ void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev); int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev); int (*hot_remove_disk) (mddev_t *mddev, int number); int (*spare_active) (mddev_t *mddev); sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); int (*resize) (mddev_t *mddev, sector_t sectors); int (*check_reshape) (mddev_t *mddev); int (*start_reshape) (mddev_t *mddev); int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); /* quiesce moves between quiescence states * 0 - fully active * 1 - no new requests allowed * others - reserved */ void (*quiesce) (mddev_t *mddev, int state); }; struct md_sysfs_entry { struct attribute attr; ssize_t (*show)(mddev_t *, char *); ssize_t (*store)(mddev_t *, const char *, size_t); }; static inline char * mdname (mddev_t * mddev) { return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; } /* * iterates through some rdev ringlist. It's safe to remove the * current 'rdev'. Dont touch 'tmp' though. */ #define rdev_for_each_list(rdev, tmp, list) \ \ for ((tmp) = (list).next; \ (rdev) = (list_entry((tmp), mdk_rdev_t, same_set)), \ (tmp) = (tmp)->next, (tmp)->prev != &(list) \ ; ) /* * iterates through the 'same array disks' ringlist */ #define rdev_for_each(rdev, tmp, mddev) \ rdev_for_each_list(rdev, tmp, (mddev)->disks) typedef struct mdk_thread_s { void (*run) (mddev_t *mddev); mddev_t *mddev; wait_queue_head_t wqueue; unsigned long flags; struct task_struct *tsk; unsigned long timeout; } mdk_thread_t; #define THREAD_WAKEUP 0 #define __wait_event_lock_irq(wq, condition, lock, cmd) \ do { \ wait_queue_t __wait; \ init_waitqueue_entry(&__wait, current); \ \ add_wait_queue(&wq, &__wait); \ for (;;) { \ set_current_state(TASK_UNINTERRUPTIBLE); \ if (condition) \ break; \ spin_unlock_irq(&lock); \ cmd; \ schedule(); \ spin_lock_irq(&lock); \ } \ current->state = TASK_RUNNING; \ remove_wait_queue(&wq, &__wait); \ } while (0) #define wait_event_lock_irq(wq, condition, lock, cmd) \ do { \ if (condition) \ break; \ __wait_event_lock_irq(wq, condition, lock, cmd); \ } while (0) static inline void safe_put_page(struct page *p) { if (p) put_page(p); } #endif /* CONFIG_BLOCK */ #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vbd/vbd.c000066400000000000000000000304741314037446600227740ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #include /* ssleep prototype */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) /*llj: the major of vitual_device_ext */ #define EXT_MAJOR 202 #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; DEFINE_SPINLOCK(blkif_io_lock); static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; int do_register, i; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SCSI_RANGE: ptr->type = &xlbd_scsi_type; ptr->index = index - XLBD_MAJOR_SCSI_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major EXT_MAJOR, * don't try to register it again */ for (i = XLBD_MAJOR_VBD_START; i < XLBD_MAJOR_VBD_START+ NUM_VBD_MAJORS; i++ ) { if (major_info[i] != NULL && major_info[i]->major == ptr->major) { do_register = 0; break; } } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(ptr); return NULL; } printk("xen-vbd: registered block device major %i\n", ptr->major); } major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = 10; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = 11 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = 18 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = 26; break; default: if (!VDEV_IS_EXTENDED(vdevice)) index = 27; else index = 28; break; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, unsigned int segments) { struct request_queue *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); #endif /* Hard sector size and max sectors impersonate the equiv. hardware. */ #if (defined SUSE_2629) || (defined DEBIAN5) blk_queue_hardsect_size(rq, sector_size); #else blk_queue_logical_block_size(rq, sector_size); #endif #ifdef UBUNTU #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) blk_queue_max_hw_sectors(rq, segments*8); #else blk_queue_max_sectors(rq, segments*8); #endif #else #if (defined SUSE_2629) || (defined DEBIAN5) blk_queue_max_sectors(rq, segments*8); #else blk_queue_max_hw_sectors(rq, segments*8); #endif #endif /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ #ifdef UBUNTU #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) blk_queue_max_segments(rq, segments); #else blk_queue_max_phys_segments(rq, segments); blk_queue_max_hw_segments(rq, segments); #endif #else #if (defined SUSE_2629) || (defined DEBIAN5) || (defined DEBIAN6_32) blk_queue_max_phys_segments(rq, segments); blk_queue_max_hw_segments(rq, segments); #else blk_queue_max_segments(rq, segments); #endif #endif /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; return 0; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { int major, minor; struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; unsigned int offset; if ((vdevice>>EXT_SHIFT) > 1) { // this is above the extended range; something is wrong printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = EXT_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); } BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if (!(vdisk_info & VDISK_CDROM) && (minor & ((1 << mi->type->partn_shift) - 1)) == 0) nr_minors = 1 << mi->type->partn_shift; gd = alloc_disk(nr_minors); if (gd == NULL) goto out; offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (nr_minors > 1 || (vdisk_info & VDISK_CDROM)) { if (offset < 26) { sprintf(gd->disk_name, "%s%c", mi->type->diskname, 'a' + offset ); } else { sprintf(gd->disk_name, "%s%c%c", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26) ); } } else { if (offset < 26) { sprintf(gd->disk_name, "%s%c%d", mi->type->diskname, 'a' + offset, minor & ((1 << mi->type->partn_shift) - 1)); } else { sprintf(gd->disk_name, "%s%c%c%d", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26), minor & ((1 << mi->type->partn_shift) - 1)); } } gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); printk("Set blk queue %d\n", info->max_indirect_segments ? :BLKIF_MAX_SEGMENTS_PER_REQUEST); if (xlvbd_init_blk_queue(gd, sector_size, info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST)) { del_gendisk(gd); goto out; } info->rq = gd->queue; info->gd = gd; if (info->feature_barrier) xlvbd_barrier(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } void xlvbd_del(struct blkfront_info *info) { unsigned long flags; if (info->mi == NULL) return; BUG_ON(info->gd == NULL); del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); while (!list_empty(&info->rq->queue_head)){ ssleep(1); } blk_cleanup_queue(info->rq); info->rq = NULL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int xlvbd_barrier(struct blkfront_info *info) { int err; err = blk_queue_ordered(info->rq, info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL); if (err) return err; printk(KERN_INFO "blkfront: %s: barriers %s\n", info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled"); return 0; } #else int xlvbd_barrier(struct blkfront_info *info) { printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name); return -ENOSYS; } #endif #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = dev_get_drvdata(&xendev->dev); if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vbd/vcd.c000066400000000000000000000315321314037446600227710ustar00rootroot00000000000000/******************************************************************************* * vcd.c * * Implements CDROM cmd packet passing between frontend guest and backend driver. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define REVISION "$Revision: 1.0 $" #include #include #include #include #include #include "block.h" /* List of cdrom_device_info, can have as many as blkfront supports */ struct vcd_disk { struct list_head vcd_entry; struct cdrom_device_info vcd_cdrom_info; spinlock_t vcd_cdrom_info_lock; }; static LIST_HEAD(vcd_disks); static DEFINE_SPINLOCK(vcd_disks_lock); static struct vcd_disk *xencdrom_get_list_entry(struct gendisk *disk) { struct vcd_disk *ret_vcd = NULL; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { spin_lock(&vcd->vcd_cdrom_info_lock); ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd; } static void submit_message(struct blkfront_info *info, void *sp) { struct request *req = NULL; req = blk_get_request(info->rq, READ, __GFP_WAIT); if (blk_rq_map_kern(info->rq, req, sp, PAGE_SIZE, __GFP_WAIT)) goto out; req->rq_disk = info->gd; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_NOMERGE; #else req->flags |= REQ_BLOCK_PC; #endif #if (defined SUSE_2629) || (defined DEBIAN5) req->sector = 0; req->nr_sectors = 1; #else req->__sector = 0; req->__data_len = PAGE_SIZE; #endif req->timeout = 60*HZ; blk_execute_rq(req->q, info->gd, req, 1); out: blk_put_request(req); } static int submit_cdrom_cmd(struct blkfront_info *info, struct packet_command *cgc) { int ret = 0; struct page *page; size_t size; union xen_block_packet *sp; struct xen_cdrom_packet *xcp; struct vcd_generic_command *vgc; if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) { printk(KERN_WARNING "%s() Packet buffer length is to large \n", __func__); return -EIO; } page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } size = PAGE_SIZE; memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xcp = &(sp->xcp); xcp->type = XEN_TYPE_CDROM_PACKET; xcp->payload_offset = PACKET_PAYLOAD_OFFSET; vgc = (struct vcd_generic_command *)((char *)sp + xcp->payload_offset); memcpy(vgc->cmd, cgc->cmd, CDROM_PACKET_SIZE); vgc->stat = cgc->stat; vgc->data_direction = cgc->data_direction; vgc->quiet = cgc->quiet; vgc->timeout = cgc->timeout; if (cgc->sense) { vgc->sense_offset = PACKET_SENSE_OFFSET; memcpy((char *)sp + vgc->sense_offset, cgc->sense, sizeof(struct request_sense)); } if (cgc->buffer) { vgc->buffer_offset = PACKET_BUFFER_OFFSET; memcpy((char *)sp + vgc->buffer_offset, cgc->buffer, cgc->buflen); vgc->buflen = cgc->buflen; } submit_message(info,sp); if (xcp->ret) ret = xcp->err; if (cgc->sense) { memcpy(cgc->sense, (char *)sp + PACKET_SENSE_OFFSET, sizeof(struct request_sense)); } if (cgc->buffer && cgc->buflen) { memcpy(cgc->buffer, (char *)sp + PACKET_BUFFER_OFFSET, cgc->buflen); } __free_page(page); return ret; } static int xencdrom_open(struct cdrom_device_info *cdi, int purpose) { int ret = 0; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_open *xco; info = cdi->disk->private_data; if (strlen(info->xbdev->otherend) > MAX_PACKET_DATA) { return -EIO; } page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xco = &(sp->xco); xco->type = XEN_TYPE_CDROM_OPEN; xco->payload_offset = sizeof(struct xen_cdrom_open); strcpy((char *)sp + xco->payload_offset, info->xbdev->otherend); submit_message(info,sp); if (xco->ret) { ret = xco->err; goto out; } if (xco->media_present) set_capacity(cdi->disk, xco->sectors); out: __free_page(page); return ret; } static void xencdrom_release(struct cdrom_device_info *cdi) { } static int xencdrom_media_changed(struct cdrom_device_info *cdi, int disc_nr) { int ret; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_media_changed *xcmc; info = cdi->disk->private_data; page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xcmc = &(sp->xcmc); xcmc->type = XEN_TYPE_CDROM_MEDIA_CHANGED; submit_message(info,sp); ret = xcmc->media_changed; __free_page(page); return ret; } static int xencdrom_tray_move(struct cdrom_device_info *cdi, int position) { int ret; struct packet_command cgc; struct blkfront_info *info; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_START_STOP_UNIT; if (position) cgc.cmd[4] = 2; else cgc.cmd[4] = 3; ret = submit_cdrom_cmd(info, &cgc); return ret; } static int xencdrom_lock_door(struct cdrom_device_info *cdi, int lock) { int ret = 0; struct blkfront_info *info; struct packet_command cgc; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; cgc.cmd[4] = lock; ret = submit_cdrom_cmd(info, &cgc); return ret; } static int xencdrom_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { int ret = -EIO; struct blkfront_info *info; info = cdi->disk->private_data; ret = submit_cdrom_cmd(info, cgc); cgc->stat = ret; return ret; } static int xencdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { return -EINVAL; } /* Query backend to see if CDROM packets are supported */ static int xencdrom_supported(struct blkfront_info *info) { struct page *page; union xen_block_packet *sp; struct xen_cdrom_support *xcs; page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xcs = &(sp->xcs); xcs->type = XEN_TYPE_CDROM_SUPPORT; submit_message(info,sp); return xcs->supported; } static struct cdrom_device_ops xencdrom_dops = { .open = xencdrom_open, .release = xencdrom_release, .media_changed = xencdrom_media_changed, .tray_move = xencdrom_tray_move, .lock_door = xencdrom_lock_door, .generic_packet = xencdrom_packet, .audio_ioctl = xencdrom_audio_ioctl, .capability = (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | \ CDC_MEDIA_CHANGED | CDC_GENERIC_PACKET | CDC_DVD | \ CDC_CD_R), .n_minors = 1, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_open(struct inode *inode, struct file *file) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_open(&vcd->vcd_cdrom_info, inode, file); #else ret = cdrom_open(&vcd->vcd_cdrom_info, bd, mode); #endif info->users = vcd->vcd_cdrom_info.use_count; spin_unlock(&vcd->vcd_cdrom_info_lock); } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_release(struct inode *inode, struct file *file) { struct gendisk *gd = inode->i_bdev->bd_disk; #else static int xencdrom_block_release(struct gendisk *gd, fmode_t mode) { #endif struct blkfront_info *info = gd->private_data; struct vcd_disk *vcd; int ret = 0; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_release(&vcd->vcd_cdrom_info, file); #else cdrom_release(&vcd->vcd_cdrom_info, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); if (vcd->vcd_cdrom_info.use_count == 0) { info->users = 1; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) blkif_release(inode, file); #else blkif_release(gd, mode); #endif } } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_ioctl(struct block_device *bd, fmode_t mode, unsigned cmd, unsigned long arg) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!(vcd = xencdrom_get_list_entry(info->gd))) goto out; switch (cmd) { case 2285: /* SG_IO */ ret = -ENOSYS; break; case CDROMEJECT: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 1); break; case CDROMCLOSETRAY: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 0); break; case CDROM_GET_CAPABILITY: ret = vcd->vcd_cdrom_info.ops->capability & ~vcd->vcd_cdrom_info.mask; break; case CDROM_SET_OPTIONS: ret = vcd->vcd_cdrom_info.options; break; case CDROM_SEND_PACKET: ret = submit_cdrom_cmd(info, (struct packet_command *)arg); break; default: /* Not supported, augment supported above if necessary */ printk("%s():%d Unsupported IOCTL:%x \n", __func__, __LINE__, cmd); ret = -ENOTTY; break; } spin_unlock(&vcd->vcd_cdrom_info_lock); out: return ret; } /* Called as result of cdrom_open, vcd_cdrom_info_lock already held */ static int xencdrom_block_media_changed(struct gendisk *disk) { struct vcd_disk *vcd; struct vcd_disk *ret_vcd = NULL; int ret = 0; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); if (ret_vcd) { ret = cdrom_media_changed(&ret_vcd->vcd_cdrom_info); } return ret; } static struct block_device_operations xencdrom_bdops = { .owner = THIS_MODULE, .open = xencdrom_block_open, .release = xencdrom_block_release, .ioctl = xencdrom_block_ioctl, .media_changed = xencdrom_block_media_changed, }; void register_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; /* Make sure this is for a CD device */ if (!(gd->flags & GENHD_FL_CD)) goto out; /* Make sure we have backend support */ if (!xencdrom_supported(info)) { goto out; } /* Create new vcd_disk and fill in cdrom_info */ vcd = (struct vcd_disk *)kzalloc(sizeof(struct vcd_disk), GFP_KERNEL); if (!vcd) { printk(KERN_INFO "%s(): Unable to allocate vcd struct!\n", __func__); goto out; } spin_lock_init(&vcd->vcd_cdrom_info_lock); vcd->vcd_cdrom_info.ops = &xencdrom_dops; vcd->vcd_cdrom_info.speed = 4; vcd->vcd_cdrom_info.capacity = 1; vcd->vcd_cdrom_info.options = 0; strcpy(vcd->vcd_cdrom_info.name, gd->disk_name); vcd->vcd_cdrom_info.mask = (CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_SELECT_SPEED | CDC_MRW | CDC_MRW_W | CDC_RAM); if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) { printk(KERN_WARNING "%s() Cannot register blkdev as a cdrom %d!\n", __func__, gd->major); goto err_out; } xencdrom_bdops.owner = gd->fops->owner; gd->fops = &xencdrom_bdops; vcd->vcd_cdrom_info.disk = gd; spin_lock(&vcd_disks_lock); list_add(&(vcd->vcd_entry), &vcd_disks); spin_unlock(&vcd_disks_lock); out: return; err_out: kfree(vcd); } void unregister_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == gd) { spin_lock(&vcd->vcd_cdrom_info_lock); unregister_cdrom(&vcd->vcd_cdrom_info); list_del(&vcd->vcd_entry); spin_unlock(&vcd->vcd_cdrom_info_lock); kfree(vcd); break; } } spin_unlock(&vcd_disks_lock); } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vnif/000077500000000000000000000000001314037446600222345ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vnif/Kbuild000066400000000000000000000001411314037446600233650ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-vnif.o xen-vnif-objs := netfront.o xen-vnif-objs += accel.o UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vnif/Makefile000066400000000000000000000000661314037446600236760ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vnif/accel.c000066400000000000000000000534441314037446600234610ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront/accel: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront/accel: " fmt, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); static void netfront_accelerator_remove_watch(struct netfront_info *np); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Workqueue to process acceleration configuration changes */ struct workqueue_struct *accel_watch_workqueue; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); accel_watch_workqueue = create_workqueue("net_accel"); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; flush_workqueue(accel_watch_workqueue); destroy_workqueue(accel_watch_workqueue); /* No lock required as everything else should be quiet by now */ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read_uvp(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded_uvp() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); queue_work(accel_watch_workqueue, &vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* * If old watch exists, e.g. from before suspend/resume, * remove it now */ netfront_accelerator_remove_watch(np); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path_uvp2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_purge_watch(struct netfront_accel_vif_state *vif_state) { flush_workqueue(accel_watch_workqueue); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch_uvp_uvp(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; netfront_accelerator_purge_watch(vif_state); } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. Must be called with the accelerator * mutex held. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } } /* * Initialise the state to track an accelerator plugin module. * * Must be called with the accelerator mutex held. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; list_add(&accelerator->link, &accelerators_list); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { struct netfront_accelerator *accelerator; unsigned long flags; DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); accelerator = vif_state->np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); vif_state->hooks = accelerator->hooks; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks && hooks->new_device(np->netdev, dev) == 0) accelerator_set_vif_state_hooks(&np->accel_vif_state); return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* Holds accelerator_mutex to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if (hooks->new_device(np->netdev, vif_state->dev) == 0) accelerator_set_vif_state_hooks(vif_state); } } /* * Called by the netfront accelerator plugin module when it has * loaded. * * Takes the accelerator_mutex and vif_states_lock spinlock. */ int netfront_accelerator_loaded_uvp(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded_uvp); /* * Remove the hooks from a single vif state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { unsigned long flags; /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Safely remove the accelerator function hooks from a netfront state. * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { if(vif_state->hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ vif_state->hooks->get_stats(vif_state->np->netdev, &vif_state->np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. * * Takes the accelerator mutex, and vif_states_lock spinlock. */ void netfront_accelerator_stop_uvp(const char *frontend) { struct netfront_accelerator *accelerator; mutex_lock(&accelerator_mutex); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_remove_hooks(accelerator); goto out; } } out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop_uvp); /* * Helper for call_remove and do_suspend * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator = np->accelerator; unsigned long flags; int rc = 0; if (np->accel_vif_state.hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ np->accel_vif_state.hooks->get_stats(np->netdev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); } return rc; } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock */ static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev); np->accelerator = NULL; return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { int rc = 0; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ rc = do_remove(np, dev); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { netfront_accelerator_purge_watch(&np->accel_vif_state); /* * Gratuitously fire the watch handler to reinstate the * configured accelerator */ if (dev->state == XenbusStateConnected) queue_work(accel_watch_workqueue, &np->accel_vif_state.accel_work); return 0; } /* * No lock pre-requisites. Takes the accelerator mutex */ void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; break; } } out: mutex_unlock(&accelerator_mutex); return; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vnif/acpi_bus.h000066400000000000000000000261731314037446600242030ustar00rootroot00000000000000/* * acpi_bus.h - ACPI Bus Driver ($Revision: 22 $) * * Copyright (C) 2001, 2002 Andy Grover * Copyright (C) 2001, 2002 Paul Diefenbaugh * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef __ACPI_BUS_H__ #define __ACPI_BUS_H__ #include #include /* TBD: Make dynamic */ #define ACPI_MAX_HANDLES 10 struct acpi_handle_list { u32 count; acpi_handle handles[ACPI_MAX_HANDLES]; }; /* acpi_utils.h */ acpi_status acpi_extract_package(union acpi_object *package, struct acpi_buffer *format, struct acpi_buffer *buffer); acpi_status acpi_evaluate_integer(acpi_handle handle, acpi_string pathname, struct acpi_object_list *arguments, unsigned long long *data); acpi_status acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, struct acpi_object_list *arguments, struct acpi_handle_list *list); #ifdef CONFIG_ACPI #include #define ACPI_BUS_FILE_ROOT "acpi" extern struct proc_dir_entry *acpi_root_dir; enum acpi_bus_removal_type { ACPI_BUS_REMOVAL_NORMAL = 0, ACPI_BUS_REMOVAL_EJECT, ACPI_BUS_REMOVAL_SUPRISE, ACPI_BUS_REMOVAL_TYPE_COUNT }; enum acpi_bus_device_type { ACPI_BUS_TYPE_DEVICE = 0, ACPI_BUS_TYPE_POWER, ACPI_BUS_TYPE_PROCESSOR, ACPI_BUS_TYPE_THERMAL, ACPI_BUS_TYPE_POWER_BUTTON, ACPI_BUS_TYPE_SLEEP_BUTTON, ACPI_BUS_DEVICE_TYPE_COUNT }; struct acpi_driver; struct acpi_device; /* * ACPI Driver * ----------- */ typedef int (*acpi_op_add) (struct acpi_device * device); typedef int (*acpi_op_remove) (struct acpi_device * device, int type); typedef int (*acpi_op_start) (struct acpi_device * device); typedef int (*acpi_op_suspend) (struct acpi_device * device, pm_message_t state); typedef int (*acpi_op_resume) (struct acpi_device * device); typedef int (*acpi_op_bind) (struct acpi_device * device); typedef int (*acpi_op_unbind) (struct acpi_device * device); typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event); struct acpi_bus_ops { u32 acpi_op_add:1; u32 acpi_op_start:1; }; struct acpi_device_ops { acpi_op_add add; acpi_op_remove remove; acpi_op_start start; acpi_op_suspend suspend; acpi_op_resume resume; acpi_op_bind bind; acpi_op_unbind unbind; acpi_op_notify notify; }; #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */ struct acpi_driver { char name[80]; char class[80]; const struct acpi_device_id *ids; /* Supported Hardware IDs */ unsigned int flags; struct acpi_device_ops ops; struct device_driver drv; struct module *owner; }; /* * ACPI Device * ----------- */ /* Status (_STA) */ struct acpi_device_status { u32 present:1; u32 enabled:1; u32 show_in_ui:1; u32 functional:1; u32 battery_present:1; u32 reserved:27; }; /* Flags */ struct acpi_device_flags { u32 dynamic_status:1; u32 bus_address:1; u32 removable:1; u32 ejectable:1; u32 lockable:1; u32 suprise_removal_ok:1; u32 power_manageable:1; u32 performance_manageable:1; u32 wake_capable:1; /* Wakeup(_PRW) supported? */ u32 force_power_state:1; u32 reserved:22; }; /* File System */ struct acpi_device_dir { struct proc_dir_entry *entry; }; #define acpi_device_dir(d) ((d)->dir.entry) /* Plug and Play */ typedef char acpi_bus_id[8]; typedef unsigned long acpi_bus_address; typedef char acpi_device_name[40]; typedef char acpi_device_class[20]; struct acpi_hardware_id { struct list_head list; char *id; }; struct acpi_device_pnp { acpi_bus_id bus_id; /* Object name */ acpi_bus_address bus_address; /* _ADR */ char *unique_id; /* _UID */ struct list_head ids; /* _HID and _CIDs */ acpi_device_name device_name; /* Driver-determined */ acpi_device_class device_class; /* " */ }; #define acpi_device_bid(d) ((d)->pnp.bus_id) #define acpi_device_adr(d) ((d)->pnp.bus_address) char *acpi_device_hid(struct acpi_device *device); #define acpi_device_name(d) ((d)->pnp.device_name) #define acpi_device_class(d) ((d)->pnp.device_class) /* Power Management */ struct acpi_device_power_flags { u32 explicit_get:1; /* _PSC present? */ u32 power_resources:1; /* Power resources */ u32 inrush_current:1; /* Serialize Dx->D0 */ u32 power_removed:1; /* Optimize Dx->D0 */ u32 reserved:28; }; struct acpi_device_power_state { struct { u8 valid:1; u8 explicit_set:1; /* _PSx present? */ u8 reserved:6; } flags; int power; /* % Power (compared to D0) */ int latency; /* Dx->D0 time (microseconds) */ struct acpi_handle_list resources; /* Power resources referenced */ }; struct acpi_device_power { int state; /* Current state */ struct acpi_device_power_flags flags; struct acpi_device_power_state states[4]; /* Power states (D0-D3) */ }; /* Performance Management */ struct acpi_device_perf_flags { u8 reserved:8; }; struct acpi_device_perf_state { struct { u8 valid:1; u8 reserved:7; } flags; u8 power; /* % Power (compared to P0) */ u8 performance; /* % Performance ( " ) */ int latency; /* Px->P0 time (microseconds) */ }; struct acpi_device_perf { int state; struct acpi_device_perf_flags flags; int state_count; struct acpi_device_perf_state *states; }; /* Wakeup Management */ struct acpi_device_wakeup_flags { u8 valid:1; /* Can successfully enable wakeup? */ u8 run_wake:1; /* Run-Wake GPE devices */ }; struct acpi_device_wakeup_state { u8 enabled:1; }; struct acpi_device_wakeup { acpi_handle gpe_device; acpi_integer gpe_number; acpi_integer sleep_state; struct acpi_handle_list resources; struct acpi_device_wakeup_state state; struct acpi_device_wakeup_flags flags; int prepare_count; }; /* Device */ struct acpi_device { int device_type; acpi_handle handle; /* no handle for fixed hardware */ struct acpi_device *parent; struct list_head children; struct list_head node; struct list_head wakeup_list; struct acpi_device_status status; struct acpi_device_flags flags; struct acpi_device_pnp pnp; struct acpi_device_power power; struct acpi_device_wakeup wakeup; struct acpi_device_perf performance; struct acpi_device_dir dir; struct acpi_device_ops ops; struct acpi_driver *driver; void *driver_data; struct device dev; struct acpi_bus_ops bus_ops; /* workaround for different code path for hotplug */ enum acpi_bus_removal_type removal_type; /* indicate for different removal type */ }; static inline void *acpi_driver_data(struct acpi_device *d) { return d->driver_data; } #define to_acpi_device(d) container_of(d, struct acpi_device, dev) #define to_acpi_driver(d) container_of(d, struct acpi_driver, drv) /* acpi_device.dev.bus == &acpi_bus_type */ extern struct bus_type acpi_bus_type; /* * Events * ------ */ struct acpi_bus_event { struct list_head node; acpi_device_class device_class; acpi_bus_id bus_id; u32 type; u32 data; }; extern struct kobject *acpi_kobj; extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); void acpi_bus_private_data_handler(acpi_handle, void *); int acpi_bus_get_private_data(acpi_handle, void **); extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); extern int register_acpi_notifier(struct notifier_block *); extern int unregister_acpi_notifier(struct notifier_block *); extern int register_acpi_bus_notifier(struct notifier_block *nb); extern void unregister_acpi_bus_notifier(struct notifier_block *nb); /* * External Functions */ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); void acpi_bus_data_handler(acpi_handle handle, void *context); acpi_status acpi_bus_get_status_handle(acpi_handle handle, unsigned long long *sta); int acpi_bus_get_status(struct acpi_device *device); int acpi_bus_get_power(acpi_handle handle, int *state); int acpi_bus_set_power(acpi_handle handle, int state); bool acpi_bus_power_manageable(acpi_handle handle); bool acpi_bus_can_wakeup(acpi_handle handle); #ifdef CONFIG_ACPI_PROC_EVENT int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data); int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data); int acpi_bus_receive_event(struct acpi_bus_event *event); #else static inline int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data) { return 0; } #endif int acpi_bus_register_driver(struct acpi_driver *driver); void acpi_bus_unregister_driver(struct acpi_driver *driver); int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent, acpi_handle handle, int type); int acpi_bus_trim(struct acpi_device *start, int rmdevice); int acpi_bus_start(struct acpi_device *device); acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd); int acpi_match_device_ids(struct acpi_device *device, const struct acpi_device_id *ids); int acpi_create_dir(struct acpi_device *); void acpi_remove_dir(struct acpi_device *); /* * Bind physical devices with ACPI devices */ struct acpi_bus_type { struct list_head list; struct bus_type *bus; /* For general devices under the bus */ int (*find_device) (struct device *, acpi_handle *); /* For bridges, such as PCI root bridge, IDE controller */ int (*find_bridge) (struct device *, acpi_handle *); }; int register_acpi_bus_type(struct acpi_bus_type *); int unregister_acpi_bus_type(struct acpi_bus_type *); struct device *acpi_get_physical_device(acpi_handle); struct acpi_pci_root { struct list_head node; struct acpi_device * device; struct acpi_pci_id id; struct pci_bus *bus; u16 segment; u8 bus_nr; u32 osc_support_set; /* _OSC state of support bits */ u32 osc_control_set; /* _OSC state of control bits */ u32 osc_control_qry; /* the latest _OSC query result */ u32 osc_queried:1; /* has _OSC control been queried? */ }; /* helper */ acpi_handle acpi_get_child(acpi_handle, acpi_integer); int acpi_is_root_bridge(acpi_handle); acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int); struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->archdata.acpi_handle)) #ifdef CONFIG_PM_SLEEP int acpi_pm_device_sleep_state(struct device *, int *); int acpi_pm_device_sleep_wake(struct device *, bool); #else /* !CONFIG_PM_SLEEP */ static inline int acpi_pm_device_sleep_state(struct device *d, int *p) { if (p) *p = ACPI_STATE_D0; return ACPI_STATE_D3; } static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) { return -ENODEV; } #endif /* !CONFIG_PM_SLEEP */ #endif /* CONFIG_ACPI */ #endif /*__ACPI_BUS_H__*/ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vnif/actypes.h000066400000000000000000001033731314037446600240640ustar00rootroot00000000000000/****************************************************************************** * * Name: actypes.h - Common data types for the entire ACPI subsystem * *****************************************************************************/ /* * Copyright (C) 2000 - 2008, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #ifndef __ACTYPES_H__ #define __ACTYPES_H__ /* acpisrc:struct_defs -- for acpisrc conversion */ /* * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header * and must be either 32 or 64. 16-bit ACPICA is no longer supported, as of * 12/2006. */ #ifndef ACPI_MACHINE_WIDTH #error ACPI_MACHINE_WIDTH not defined #endif /*! [Begin] no source code translation */ /* * Data type ranges * Note: These macros are designed to be compiler independent as well as * working around problems that some 32-bit compilers have with 64-bit * constants. */ #define ACPI_UINT8_MAX (UINT8) (~((UINT8) 0)) /* 0xFF */ #define ACPI_UINT16_MAX (UINT16)(~((UINT16) 0)) /* 0xFFFF */ #define ACPI_UINT32_MAX (UINT32)(~((UINT32) 0)) /* 0xFFFFFFFF */ #define ACPI_UINT64_MAX (UINT64)(~((UINT64) 0)) /* 0xFFFFFFFFFFFFFFFF */ #define ACPI_ASCII_MAX 0x7F /* * Architecture-specific ACPICA Subsystem Data Types * * The goal of these types is to provide source code portability across * 16-bit, 32-bit, and 64-bit targets. * * 1) The following types are of fixed size for all targets (16/32/64): * * BOOLEAN Logical boolean * * UINT8 8-bit (1 byte) unsigned value * UINT16 16-bit (2 byte) unsigned value * UINT32 32-bit (4 byte) unsigned value * UINT64 64-bit (8 byte) unsigned value * * INT16 16-bit (2 byte) signed value * INT32 32-bit (4 byte) signed value * INT64 64-bit (8 byte) signed value * * COMPILER_DEPENDENT_UINT64/INT64 - These types are defined in the * compiler-dependent header(s) and were introduced because there is no common * 64-bit integer type across the various compilation models, as shown in * the table below. * * Datatype LP64 ILP64 LLP64 ILP32 LP32 16bit * char 8 8 8 8 8 8 * short 16 16 16 16 16 16 * _int32 32 * int 32 64 32 32 16 16 * long 64 64 32 32 32 32 * long long 64 64 * pointer 64 64 64 32 32 32 * * Note: ILP64 and LP32 are currently not supported. * * * 2) These types represent the native word size of the target mode of the * processor, and may be 16-bit, 32-bit, or 64-bit as required. They are * usually used for memory allocation, efficient loop counters, and array * indexes. The types are similar to the size_t type in the C library and are * required because there is no C type that consistently represents the native * data width. ACPI_SIZE is needed because there is no guarantee that a * kernel-level C library is present. * * ACPI_SIZE 16/32/64-bit unsigned value * ACPI_NATIVE_INT 16/32/64-bit signed value * */ /******************************************************************************* * * Common types for all compilers, all targets * ******************************************************************************/ typedef unsigned char BOOLEAN; typedef unsigned char UINT8; typedef unsigned short UINT16; typedef COMPILER_DEPENDENT_UINT64 UINT64; typedef COMPILER_DEPENDENT_INT64 INT64; /*! [End] no source code translation !*/ /******************************************************************************* * * Types specific to 64-bit targets * ******************************************************************************/ #if ACPI_MACHINE_WIDTH == 64 /*! [Begin] no source code translation (keep the typedefs as-is) */ typedef unsigned int UINT32; typedef int INT32; /*! [End] no source code translation !*/ typedef s64 acpi_native_int; typedef u64 acpi_size; typedef u64 acpi_io_address; typedef u64 acpi_physical_address; #define ACPI_MAX_PTR ACPI_UINT64_MAX #define ACPI_SIZE_MAX ACPI_UINT64_MAX #define ACPI_USE_NATIVE_DIVIDE /* Has native 64-bit integer support */ /* * In the case of the Itanium Processor Family (IPF), the hardware does not * support misaligned memory transfers. Set the MISALIGNMENT_NOT_SUPPORTED flag * to indicate that special precautions must be taken to avoid alignment faults. * (IA64 or ia64 is currently used by existing compilers to indicate IPF.) * * Note: Em64_t and other X86-64 processors support misaligned transfers, * so there is no need to define this flag. */ #if defined (__IA64__) || defined (__ia64__) #define ACPI_MISALIGNMENT_NOT_SUPPORTED #endif /******************************************************************************* * * Types specific to 32-bit targets * ******************************************************************************/ #elif ACPI_MACHINE_WIDTH == 32 /*! [Begin] no source code translation (keep the typedefs as-is) */ typedef unsigned int UINT32; typedef int INT32; /*! [End] no source code translation !*/ typedef s32 acpi_native_int; typedef u32 acpi_size; typedef u32 acpi_io_address; typedef u32 acpi_physical_address; #define ACPI_MAX_PTR ACPI_UINT32_MAX #define ACPI_SIZE_MAX ACPI_UINT32_MAX #else /* ACPI_MACHINE_WIDTH must be either 64 or 32 */ #error unknown ACPI_MACHINE_WIDTH #endif /******************************************************************************* * * OS-dependent types * * If the defaults below are not appropriate for the host system, they can * be defined in the OS-specific header, and this will take precedence. * ******************************************************************************/ /* Value returned by acpi_os_get_thread_id */ #ifndef acpi_thread_id #define acpi_thread_id acpi_size #endif /* Flags for acpi_os_acquire_lock/acpi_os_release_lock */ #ifndef acpi_cpu_flags #define acpi_cpu_flags acpi_size #endif /* Object returned from acpi_os_create_cache */ #ifndef acpi_cache_t #ifdef ACPI_USE_LOCAL_CACHE #define acpi_cache_t struct acpi_memory_list #else #define acpi_cache_t void * #endif #endif /* * Synchronization objects - Mutexes, Semaphores, and spin_locks */ #if (ACPI_MUTEX_TYPE == ACPI_BINARY_SEMAPHORE) /* * These macros are used if the host OS does not support a mutex object. * Map the OSL Mutex interfaces to binary semaphores. */ #define acpi_mutex acpi_semaphore #define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle) #define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle) #define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time) #define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1) #endif /* Configurable types for synchronization objects */ #ifndef acpi_spinlock #define acpi_spinlock void * #endif #ifndef acpi_semaphore #define acpi_semaphore void * #endif #ifndef acpi_mutex #define acpi_mutex void * #endif /******************************************************************************* * * Compiler-dependent types * * If the defaults below are not appropriate for the host compiler, they can * be defined in the compiler-specific header, and this will take precedence. * ******************************************************************************/ /* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ #ifndef acpi_uintptr_t #define acpi_uintptr_t void * #endif /* * ACPI_PRINTF_LIKE is used to tag functions as "printf-like" because * some compilers can catch printf format string problems */ #ifndef ACPI_PRINTF_LIKE #define ACPI_PRINTF_LIKE(c) #endif /* * Some compilers complain about unused variables. Sometimes we don't want to * use all the variables (for example, _acpi_module_name). This allows us * to tell the compiler in a per-variable manner that a variable * is unused */ #ifndef ACPI_UNUSED_VAR #define ACPI_UNUSED_VAR #endif /* * All ACPICA functions that are available to the rest of the kernel are * tagged with this macro which can be defined as appropriate for the host. */ #ifndef ACPI_EXPORT_SYMBOL #define ACPI_EXPORT_SYMBOL(symbol) #endif /****************************************************************************** * * ACPI Specification constants (Do not change unless the specification changes) * *****************************************************************************/ /* Number of distinct FADT-based GPE register blocks (GPE0 and GPE1) */ #define ACPI_MAX_GPE_BLOCKS 2 /* Default ACPI register widths */ #define ACPI_GPE_REGISTER_WIDTH 8 #define ACPI_PM1_REGISTER_WIDTH 16 #define ACPI_PM2_REGISTER_WIDTH 8 #define ACPI_PM_TIMER_WIDTH 32 /* Names within the namespace are 4 bytes long */ #define ACPI_NAME_SIZE 4 #define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */ #define ACPI_PATH_SEPARATOR '.' /* Sizes for ACPI table headers */ #define ACPI_OEM_ID_SIZE 6 #define ACPI_OEM_TABLE_ID_SIZE 8 /* ACPI/PNP hardware IDs */ #define PCI_ROOT_HID_STRING "PNP0A03" #define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08" /* PM Timer ticks per second (HZ) */ #define PM_TIMER_FREQUENCY 3579545 /******************************************************************************* * * Independent types * ******************************************************************************/ /* Logical defines and NULL */ #ifdef FALSE #undef FALSE #endif #define FALSE (1 == 0) #ifdef TRUE #undef TRUE #endif #define TRUE (1 == 1) #ifndef NULL #define NULL (void *) 0 #endif /* * Miscellaneous types */ typedef u32 acpi_status; /* All ACPI Exceptions */ typedef u32 acpi_name; /* 4-byte ACPI name */ typedef char *acpi_string; /* Null terminated ASCII string */ typedef void *acpi_handle; /* Actually a ptr to a NS Node */ /* Owner IDs are used to track namespace nodes for selective deletion */ typedef u8 acpi_owner_id; #define ACPI_OWNER_ID_MAX 0xFF struct uint64_struct { u32 lo; u32 hi; }; union uint64_overlay { u64 full; struct uint64_struct part; }; struct uint32_struct { u32 lo; u32 hi; }; /* * Acpi integer width. In ACPI version 1, integers are 32 bits. In ACPI * version 2, integers are 64 bits. Note that this pertains to the ACPI integer * type only, not other integers used in the implementation of the ACPI CA * subsystem. */ typedef unsigned long long acpi_integer; #define ACPI_INTEGER_MAX ACPI_UINT64_MAX #define ACPI_INTEGER_BIT_SIZE 64 #define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */ #if ACPI_MACHINE_WIDTH == 64 #define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */ #endif #define ACPI_MAX64_DECIMAL_DIGITS 20 #define ACPI_MAX32_DECIMAL_DIGITS 10 #define ACPI_MAX16_DECIMAL_DIGITS 5 #define ACPI_MAX8_DECIMAL_DIGITS 3 /* PM Timer ticks per second (HZ) */ #define PM_TIMER_FREQUENCY 3579545 /* * Constants with special meanings */ #define ACPI_ROOT_OBJECT ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR) #define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */ #define ACPI_DO_NOT_WAIT 0 /******************************************************************************* * * Commonly used macros * ******************************************************************************/ /* Data manipulation */ #define ACPI_LOBYTE(integer) ((u8) (u16)(integer)) #define ACPI_HIBYTE(integer) ((u8) (((u16)(integer)) >> 8)) #define ACPI_LOWORD(integer) ((u16) (u32)(integer)) #define ACPI_HIWORD(integer) ((u16)(((u32)(integer)) >> 16)) #define ACPI_LODWORD(integer64) ((u32) (u64)(integer64)) #define ACPI_HIDWORD(integer64) ((u32)(((u64)(integer64)) >> 32)) #define ACPI_SET_BIT(target,bit) ((target) |= (bit)) #define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit)) #define ACPI_MIN(a,b) (((a)<(b))?(a):(b)) #define ACPI_MAX(a,b) (((a)>(b))?(a):(b)) /* Size calculation */ #define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) /* Pointer manipulation */ #define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p)) #define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p)) #define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b))) #define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b))) /* Pointer/Integer type conversions */ #define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) NULL,(acpi_size) i) #define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) NULL) #define ACPI_OFFSET(d, f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f), (void *) NULL) #define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED #define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) #else #define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) #endif /******************************************************************************* * * Miscellaneous constants * ******************************************************************************/ /* * Initialization sequence */ #define ACPI_FULL_INITIALIZATION 0x00 #define ACPI_NO_ADDRESS_SPACE_INIT 0x01 #define ACPI_NO_HARDWARE_INIT 0x02 #define ACPI_NO_EVENT_INIT 0x04 #define ACPI_NO_HANDLER_INIT 0x08 #define ACPI_NO_ACPI_ENABLE 0x10 #define ACPI_NO_DEVICE_INIT 0x20 #define ACPI_NO_OBJECT_INIT 0x40 /* * Initialization state */ #define ACPI_SUBSYSTEM_INITIALIZE 0x01 #define ACPI_INITIALIZED_OK 0x02 /* * Power state values */ #define ACPI_STATE_UNKNOWN (u8) 0xFF #define ACPI_STATE_S0 (u8) 0 #define ACPI_STATE_S1 (u8) 1 #define ACPI_STATE_S2 (u8) 2 #define ACPI_STATE_S3 (u8) 3 #define ACPI_STATE_S4 (u8) 4 #define ACPI_STATE_S5 (u8) 5 #define ACPI_S_STATES_MAX ACPI_STATE_S5 #define ACPI_S_STATE_COUNT 6 #define ACPI_STATE_D0 (u8) 0 #define ACPI_STATE_D1 (u8) 1 #define ACPI_STATE_D2 (u8) 2 #define ACPI_STATE_D3 (u8) 3 #define ACPI_D_STATES_MAX ACPI_STATE_D3 #define ACPI_D_STATE_COUNT 4 #define ACPI_STATE_C0 (u8) 0 #define ACPI_STATE_C1 (u8) 1 #define ACPI_STATE_C2 (u8) 2 #define ACPI_STATE_C3 (u8) 3 #define ACPI_C_STATES_MAX ACPI_STATE_C3 #define ACPI_C_STATE_COUNT 4 /* * Sleep type invalid value */ #define ACPI_SLEEP_TYPE_MAX 0x7 #define ACPI_SLEEP_TYPE_INVALID 0xFF /* * Standard notify values */ #define ACPI_NOTIFY_BUS_CHECK (u8) 0x00 #define ACPI_NOTIFY_DEVICE_CHECK (u8) 0x01 #define ACPI_NOTIFY_DEVICE_WAKE (u8) 0x02 #define ACPI_NOTIFY_EJECT_REQUEST (u8) 0x03 #define ACPI_NOTIFY_DEVICE_CHECK_LIGHT (u8) 0x04 #define ACPI_NOTIFY_FREQUENCY_MISMATCH (u8) 0x05 #define ACPI_NOTIFY_BUS_MODE_MISMATCH (u8) 0x06 #define ACPI_NOTIFY_POWER_FAULT (u8) 0x07 #define ACPI_NOTIFY_CAPABILITIES_CHECK (u8) 0x08 #define ACPI_NOTIFY_DEVICE_PLD_CHECK (u8) 0x09 #define ACPI_NOTIFY_RESERVED (u8) 0x0A #define ACPI_NOTIFY_LOCALITY_UPDATE (u8) 0x0B #define ACPI_NOTIFY_MAX 0x0B /* * Types associated with ACPI names and objects. The first group of * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition * of the ACPI object_type() operator (See the ACPI Spec). Therefore, * only add to the first group if the spec changes. * * NOTE: Types must be kept in sync with the global acpi_ns_properties * and acpi_ns_type_names arrays. */ typedef u32 acpi_object_type; #define ACPI_TYPE_ANY 0x00 #define ACPI_TYPE_INTEGER 0x01 /* Byte/Word/Dword/Zero/One/Ones */ #define ACPI_TYPE_STRING 0x02 #define ACPI_TYPE_BUFFER 0x03 #define ACPI_TYPE_PACKAGE 0x04 /* byte_const, multiple data_term/Constant/super_name */ #define ACPI_TYPE_FIELD_UNIT 0x05 #define ACPI_TYPE_DEVICE 0x06 /* Name, multiple Node */ #define ACPI_TYPE_EVENT 0x07 #define ACPI_TYPE_METHOD 0x08 /* Name, byte_const, multiple Code */ #define ACPI_TYPE_MUTEX 0x09 #define ACPI_TYPE_REGION 0x0A #define ACPI_TYPE_POWER 0x0B /* Name,byte_const,word_const,multi Node */ #define ACPI_TYPE_PROCESSOR 0x0C /* Name,byte_const,Dword_const,byte_const,multi nm_o */ #define ACPI_TYPE_THERMAL 0x0D /* Name, multiple Node */ #define ACPI_TYPE_BUFFER_FIELD 0x0E #define ACPI_TYPE_DDB_HANDLE 0x0F #define ACPI_TYPE_DEBUG_OBJECT 0x10 #define ACPI_TYPE_EXTERNAL_MAX 0x10 /* * These are object types that do not map directly to the ACPI * object_type() operator. They are used for various internal purposes only. * If new predefined ACPI_TYPEs are added (via the ACPI specification), these * internal types must move upwards. (There is code that depends on these * values being contiguous with the external types above.) */ #define ACPI_TYPE_LOCAL_REGION_FIELD 0x11 #define ACPI_TYPE_LOCAL_BANK_FIELD 0x12 #define ACPI_TYPE_LOCAL_INDEX_FIELD 0x13 #define ACPI_TYPE_LOCAL_REFERENCE 0x14 /* Arg#, Local#, Name, Debug, ref_of, Index */ #define ACPI_TYPE_LOCAL_ALIAS 0x15 #define ACPI_TYPE_LOCAL_METHOD_ALIAS 0x16 #define ACPI_TYPE_LOCAL_NOTIFY 0x17 #define ACPI_TYPE_LOCAL_ADDRESS_HANDLER 0x18 #define ACPI_TYPE_LOCAL_RESOURCE 0x19 #define ACPI_TYPE_LOCAL_RESOURCE_FIELD 0x1A #define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */ #define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */ /* * These are special object types that never appear in * a Namespace node, only in a union acpi_operand_object */ #define ACPI_TYPE_LOCAL_EXTRA 0x1C #define ACPI_TYPE_LOCAL_DATA 0x1D #define ACPI_TYPE_LOCAL_MAX 0x1D /* All types above here are invalid */ #define ACPI_TYPE_INVALID 0x1E #define ACPI_TYPE_NOT_FOUND 0xFF #define ACPI_NUM_NS_TYPES (ACPI_TYPE_INVALID + 1) /* * All I/O */ #define ACPI_READ 0 #define ACPI_WRITE 1 #define ACPI_IO_MASK 1 /* * Event Types: Fixed & General Purpose */ typedef u32 acpi_event_type; /* * Fixed events */ #define ACPI_EVENT_PMTIMER 0 #define ACPI_EVENT_GLOBAL 1 #define ACPI_EVENT_POWER_BUTTON 2 #define ACPI_EVENT_SLEEP_BUTTON 3 #define ACPI_EVENT_RTC 4 #define ACPI_EVENT_MAX 4 #define ACPI_NUM_FIXED_EVENTS ACPI_EVENT_MAX + 1 /* * Event Status - Per event * ------------- * The encoding of acpi_event_status is illustrated below. * Note that a set bit (1) indicates the property is TRUE * (e.g. if bit 0 is set then the event is enabled). * +-------------+-+-+-+ * | Bits 31:3 |2|1|0| * +-------------+-+-+-+ * | | | | * | | | +- Enabled? * | | +--- Enabled for wake? * | +----- Set? * +----------- */ typedef u32 acpi_event_status; #define ACPI_EVENT_FLAG_DISABLED (acpi_event_status) 0x00 #define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01 #define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02 #define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04 #define ACPI_EVENT_FLAG_HANDLE (acpi_event_status) 0x08 /* * General Purpose Events (GPE) */ #define ACPI_GPE_INVALID 0xFF #define ACPI_GPE_MAX 0xFF #define ACPI_NUM_GPE 256 #define ACPI_GPE_ENABLE 0 #define ACPI_GPE_DISABLE 1 /* * GPE info flags - Per GPE * +-+-+-+---+---+-+ * |7|6|5|4:3|2:1|0| * +-+-+-+---+---+-+ * | | | | | | * | | | | | +--- Interrupt type: Edge or Level Triggered * | | | | +--- Type: Wake-only, Runtime-only, or wake/runtime * | | | +--- Type of dispatch -- to method, handler, or none * | | +--- Enabled for runtime? * | +--- Enabled for wake? * +--- Unused */ #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01 #define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01 #define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 #define ACPI_GPE_TYPE_MASK (u8) 0x06 #define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x06 #define ACPI_GPE_TYPE_WAKE (u8) 0x02 #define ACPI_GPE_TYPE_RUNTIME (u8) 0x04 /* Default */ #define ACPI_GPE_DISPATCH_MASK (u8) 0x18 #define ACPI_GPE_DISPATCH_HANDLER (u8) 0x08 #define ACPI_GPE_DISPATCH_METHOD (u8) 0x10 #define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 /* Default */ #define ACPI_GPE_RUN_ENABLE_MASK (u8) 0x20 #define ACPI_GPE_RUN_ENABLED (u8) 0x20 #define ACPI_GPE_RUN_DISABLED (u8) 0x00 /* Default */ #define ACPI_GPE_WAKE_ENABLE_MASK (u8) 0x40 #define ACPI_GPE_WAKE_ENABLED (u8) 0x40 #define ACPI_GPE_WAKE_DISABLED (u8) 0x00 /* Default */ #define ACPI_GPE_ENABLE_MASK (u8) 0x60 /* Both run/wake */ /* * Flags for GPE and Lock interfaces */ #define ACPI_EVENT_WAKE_ENABLE 0x2 /* acpi_gpe_enable */ #define ACPI_EVENT_WAKE_DISABLE 0x2 /* acpi_gpe_disable */ #define ACPI_NOT_ISR 0x1 #define ACPI_ISR 0x0 /* Notify types */ #define ACPI_SYSTEM_NOTIFY 0x1 #define ACPI_DEVICE_NOTIFY 0x2 #define ACPI_ALL_NOTIFY (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY) #define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3 #define ACPI_MAX_SYS_NOTIFY 0x7f /* Address Space (Operation Region) Types */ typedef u8 acpi_adr_space_type; #define ACPI_ADR_SPACE_SYSTEM_MEMORY (acpi_adr_space_type) 0 #define ACPI_ADR_SPACE_SYSTEM_IO (acpi_adr_space_type) 1 #define ACPI_ADR_SPACE_PCI_CONFIG (acpi_adr_space_type) 2 #define ACPI_ADR_SPACE_EC (acpi_adr_space_type) 3 #define ACPI_ADR_SPACE_SMBUS (acpi_adr_space_type) 4 #define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5 #define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6 #define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7 #define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 8 #define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 127 /* * bit_register IDs * * These values are intended to be used by the hardware interfaces * and are mapped to individual bitfields defined within the ACPI * registers. See the acpi_gbl_bit_register_info global table in utglobal.c * for this mapping. */ /* PM1 Status register */ #define ACPI_BITREG_TIMER_STATUS 0x00 #define ACPI_BITREG_BUS_MASTER_STATUS 0x01 #define ACPI_BITREG_GLOBAL_LOCK_STATUS 0x02 #define ACPI_BITREG_POWER_BUTTON_STATUS 0x03 #define ACPI_BITREG_SLEEP_BUTTON_STATUS 0x04 #define ACPI_BITREG_RT_CLOCK_STATUS 0x05 #define ACPI_BITREG_WAKE_STATUS 0x06 #define ACPI_BITREG_PCIEXP_WAKE_STATUS 0x07 /* PM1 Enable register */ #define ACPI_BITREG_TIMER_ENABLE 0x08 #define ACPI_BITREG_GLOBAL_LOCK_ENABLE 0x09 #define ACPI_BITREG_POWER_BUTTON_ENABLE 0x0A #define ACPI_BITREG_SLEEP_BUTTON_ENABLE 0x0B #define ACPI_BITREG_RT_CLOCK_ENABLE 0x0C #define ACPI_BITREG_PCIEXP_WAKE_DISABLE 0x0D /* PM1 Control register */ #define ACPI_BITREG_SCI_ENABLE 0x0E #define ACPI_BITREG_BUS_MASTER_RLD 0x0F #define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10 #define ACPI_BITREG_SLEEP_TYPE 0x11 #define ACPI_BITREG_SLEEP_ENABLE 0x12 /* PM2 Control register */ #define ACPI_BITREG_ARB_DISABLE 0x13 #define ACPI_BITREG_MAX 0x13 #define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1 /* Status register values. A 1 clears a status bit. 0 = no effect */ #define ACPI_CLEAR_STATUS 1 /* Enable and Control register values */ #define ACPI_ENABLE_EVENT 1 #define ACPI_DISABLE_EVENT 0 /* * External ACPI object definition */ /* * Note: Type == ACPI_TYPE_ANY (0) is used to indicate a NULL package element * or an unresolved named reference. */ union acpi_object { acpi_object_type type; /* See definition of acpi_ns_type for values */ struct { acpi_object_type type; /* ACPI_TYPE_INTEGER */ acpi_integer value; /* The actual number */ } integer; struct { acpi_object_type type; /* ACPI_TYPE_STRING */ u32 length; /* # of bytes in string, excluding trailing null */ char *pointer; /* points to the string value */ } string; struct { acpi_object_type type; /* ACPI_TYPE_BUFFER */ u32 length; /* # of bytes in buffer */ u8 *pointer; /* points to the buffer */ } buffer; struct { acpi_object_type type; /* ACPI_TYPE_PACKAGE */ u32 count; /* # of elements in package */ union acpi_object *elements; /* Pointer to an array of ACPI_OBJECTs */ } package; struct { acpi_object_type type; /* ACPI_TYPE_LOCAL_REFERENCE */ acpi_object_type actual_type; /* Type associated with the Handle */ acpi_handle handle; /* object reference */ } reference; struct { acpi_object_type type; /* ACPI_TYPE_PROCESSOR */ u32 proc_id; acpi_io_address pblk_address; u32 pblk_length; } processor; struct { acpi_object_type type; /* ACPI_TYPE_POWER */ u32 system_level; u32 resource_order; } power_resource; }; /* * List of objects, used as a parameter list for control method evaluation */ struct acpi_object_list { u32 count; union acpi_object *pointer; }; /* * Miscellaneous common Data Structures used by the interfaces */ #define ACPI_NO_BUFFER 0 #define ACPI_ALLOCATE_BUFFER (acpi_size) (-1) #define ACPI_ALLOCATE_LOCAL_BUFFER (acpi_size) (-2) struct acpi_buffer { acpi_size length; /* Length in bytes of the buffer */ void *pointer; /* pointer to buffer */ }; /* * name_type for acpi_get_name */ #define ACPI_FULL_PATHNAME 0 #define ACPI_SINGLE_NAME 1 #define ACPI_NAME_TYPE_MAX 1 /* * Predefined Namespace items */ struct acpi_predefined_names { char *name; u8 type; char *val; }; /* * Structure and flags for acpi_get_system_info */ #define ACPI_SYS_MODE_UNKNOWN 0x0000 #define ACPI_SYS_MODE_ACPI 0x0001 #define ACPI_SYS_MODE_LEGACY 0x0002 #define ACPI_SYS_MODES_MASK 0x0003 /* * System info returned by acpi_get_system_info() */ struct acpi_system_info { u32 acpi_ca_version; u32 flags; u32 timer_resolution; u32 reserved1; u32 reserved2; u32 debug_level; u32 debug_layer; }; /* Table Event Types */ #define ACPI_TABLE_EVENT_LOAD 0x0 #define ACPI_TABLE_EVENT_UNLOAD 0x1 #define ACPI_NUM_TABLE_EVENTS 2 /* * Types specific to the OS service interfaces */ typedef u32(ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context); typedef void (ACPI_SYSTEM_XFACE * acpi_osd_exec_callback) (void *context); /* * Various handlers and callback procedures */ typedef u32(*acpi_event_handler) (void *context); typedef void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context); typedef void (*acpi_object_handler) (acpi_handle object, void *data); typedef acpi_status(*acpi_init_handler) (acpi_handle object, u32 function); #define ACPI_INIT_DEVICE_INI 1 typedef acpi_status(*acpi_exception_handler) (acpi_status aml_status, acpi_name name, u16 opcode, u32 aml_offset, void *context); /* Table Event handler (Load, load_table, etc.) and types */ typedef acpi_status(*acpi_tbl_handler) (u32 event, void *table, void *context); /* Address Spaces (For Operation Regions) */ typedef acpi_status(*acpi_adr_space_handler) (u32 function, acpi_physical_address address, u32 bit_width, acpi_integer * value, void *handler_context, void *region_context); #define ACPI_DEFAULT_HANDLER NULL typedef acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle, u32 function, void *handler_context, void **region_context); #define ACPI_REGION_ACTIVATE 0 #define ACPI_REGION_DEACTIVATE 1 typedef acpi_status(*acpi_walk_callback) (acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value); /* Interrupt handler return values */ #define ACPI_INTERRUPT_NOT_HANDLED 0x00 #define ACPI_INTERRUPT_HANDLED 0x01 /* Length of 32-bit EISAID values when converted back to a string */ #define ACPI_EISAID_STRING_SIZE 8 /* Includes null terminator */ /* Length of UUID (string) values */ #define ACPI_UUID_LENGTH 16 /* Structures used for device/processor HID, UID, CID */ struct acpica_device_id { u32 length; /* Length of string + null */ char *string; }; struct acpica_device_id_list { u32 count; /* Number of IDs in Ids array */ u32 list_size; /* Size of list, including ID strings */ struct acpica_device_id ids[1]; /* ID array */ }; /* * Structure returned from acpi_get_object_info. * Optimized for both 32- and 64-bit builds */ struct acpi_device_info { u32 info_size; /* Size of info, including ID strings */ u32 name; /* ACPI object Name */ acpi_object_type type; /* ACPI object Type */ u8 param_count; /* If a method, required parameter count */ u8 valid; /* Indicates which optional fields are valid */ u8 flags; /* Miscellaneous info */ u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ u32 current_status; /* _STA value */ acpi_integer address; /* _ADR value */ struct acpica_device_id hardware_id; /* _HID value */ struct acpica_device_id unique_id; /* _UID value */ struct acpica_device_id_list compatible_id_list; /* _CID list */ }; /* Values for Flags field above (acpi_get_object_info) */ #define ACPI_PCI_ROOT_BRIDGE 0x01 /* Flags for Valid field above (acpi_get_object_info) */ #define ACPI_VALID_STA 0x01 #define ACPI_VALID_ADR 0x02 #define ACPI_VALID_HID 0x04 #define ACPI_VALID_UID 0x08 #define ACPI_VALID_CID 0x10 #define ACPI_VALID_SXDS 0x20 #define ACPI_VALID_SXWS 0x40 /* Flags for _STA method */ #define ACPI_STA_DEVICE_PRESENT 0x01 #define ACPI_STA_DEVICE_ENABLED 0x02 #define ACPI_STA_DEVICE_UI 0x04 #define ACPI_STA_DEVICE_FUNCTIONING 0x08 #define ACPI_STA_DEVICE_OK 0x08 /* Synonym */ #define ACPI_STA_BATTERY_PRESENT 0x10 /* Context structs for address space handlers */ struct acpi_pci_id { u16 segment; u16 bus; u16 device; u16 function; }; struct acpi_mem_space_context { u32 length; acpi_physical_address address; acpi_physical_address mapped_physical_address; u8 *mapped_logical_address; acpi_size mapped_length; }; /* * struct acpi_memory_list is used only if the ACPICA local cache is enabled */ struct acpi_memory_list { char *list_name; void *list_head; u16 object_size; u16 max_depth; u16 current_depth; u16 link_offset; #ifdef ACPI_DBG_TRACK_ALLOCATIONS /* Statistics for debug memory tracking only */ u32 total_allocated; u32 total_freed; u32 max_occupied; u32 total_size; u32 current_total_size; u32 requests; u32 hits; #endif }; #endif /* __ACTYPES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vnif/netfront.c000066400000000000000000001675551314037446600242620ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "acpi_bus.h" #include "actypes.h" struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN_no static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif #define RX_COPY_THRESHOLD 256 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define HAVE_CSUM_OFFLOAD 1 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= ~NETIF_F_GSO_MASK; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define HAVE_CSUM_OFFLOAD 0 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define HAVE_CSUM_OFFLOAD 0 #define netif_needs_gso(dev, skb) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif #define GRANT_INVALID_REF 0 struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront: " fmt, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static void send_fake_arp(struct net_device *,int arpType); static irqreturn_t netif_int(int irq, void *dev_id); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of nic8139 */ static int rtl8139_acpi_bus_trim(acpi_handle handle) { struct acpi_device *device; int retval; retval = acpi_bus_get_device(handle, &device); if (retval) { printk(KERN_INFO "acpi_device not found\n"); return retval; } retval = acpi_bus_trim(device, 1); if (retval) printk(KERN_INFO "cannot remove from acpi list\n"); return retval; } static void remove_rtl8139(void) { struct pci_dev *pdev = NULL; acpi_handle phandle = NULL; pdev = pci_get_device(0x10ec, 0x8139, pdev); if (pdev) { phandle = DEVICE_ACPI_HANDLE(&pdev->dev); printk(KERN_INFO "remove_rtl8139 : rtl8139 of subsystem(0x%2x,0x%2x) removed.\n", pdev->subsystem_vendor, pdev->subsystem_device); pci_disable_device(pdev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /* check kernel version */ pci_remove_bus_device(pdev); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pdev); pci_remove_bus_device(pdev); #else pci_stop_and_remove_bus_device(pdev); #endif /* end check kernel version */ pci_dev_put(pdev); } if (phandle) { rtl8139_acpi_bus_trim(phandle); } return; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; /* remove rtl8139 when xen nic devices appear */ remove_rtl8139(); netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal_uvp(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { printk(KERN_WARNING "%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); printk(KERN_WARNING "%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } static int __devexit netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read_uvp(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(info->mac)) { err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal_uvp(dev, err, "parsing %s/mac", dev->nodename); goto out; } } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start_uvp(&xbt); if (err) { xenbus_dev_fatal_uvp(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf_uvp(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "feature-no-csum-offload", "%d", !HAVE_CSUM_OFFLOAD); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf_uvp(xbt, dev->nodename, "feature-gso-tcpv4", "%d", HAVE_TSO); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end_uvp(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal_uvp(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end_uvp(xbt, 1); xenbus_dev_fatal_uvp(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal_uvp(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring_uvp(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal_uvp(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring_uvp(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, IRQF_SAMPLE_RANDOM, netdev->name, netdev); if (err < 0) goto fail; info->irq = err; return 0; fail: return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate_uvp(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state_uvp(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); break; case XenbusStateClosing: xenbus_frontend_closed_uvp(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @return 0 on success, error code otherwise */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; // creat GARP if ( ARPOP_REQUEST == arpType ) { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); } else { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); } if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); memset(&np->stats, 0, sizeof(np->stats)); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access_uvp( np->grant_tx_ref[id]) != 0)) { printk(KERN_ALERT "network_tx_buf_gc: warning " "-- grant still in use by backend " "domain.\n"); BUG(); } gnttab_end_foreign_access_uvp_ref_uvp(np->grant_tx_ref[id]); gnttab_release_grant_reference_uvp( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; struct xen_memory_reservation reservation; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference_uvp(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); if(!vaddr) { printk("vaddr is null\n"); } req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_uvp_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_uvp_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if ( nr_flips != 0 ) { /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance_uvp_uvp(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); reservation.nr_extents = nr_flips; reservation.extent_order = 0; reservation.address_bits = 0; reservation.domid = DOMID_SELF; if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ np->rx_mcl[i].op = __HYPERVISOR_memory_op; np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; np->rx_mcl[i].args[1] = (unsigned long)&reservation; /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (nr_flips--) BUG_ON(np->rx_mcl[nr_flips].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq_uvp(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference_uvp(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_uvp_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference_uvp(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_uvp_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return NETDEV_TX_OK; } frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irq(&np->tx_lock); if (unlikely(!netfront_carrier_ok(np) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb))) { spin_unlock_irq(&np->tx_lock); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference_uvp(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_uvp_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; #ifdef CONFIG_XEN_no if (skb->proto_data_valid) /* remote but checksummed? */ tx->flags |= NETTXF_data_validated; #endif #if HAVE_TSO if (skb_shinfo(skb)->gso_size) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq_uvp(np->irq); np->stats.tx_bytes += skb->len; np->stats.tx_packets++; dev->trans_start = jiffies; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irq(&np->tx_lock); return NETDEV_TX_OK; drop: np->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static irqreturn_t netif_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) WPRINTK("Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) WPRINTK("Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) WPRINTK("rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) WPRINTK("Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_uvp_ref_uvp(ref))) { if (net_ratelimit()) WPRINTK("Unfulfilled rx req " "(id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); if(!vaddr) { printk("vaddr is null\n"); } mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_uvp_ref_uvp(ref); BUG_ON(!ret); } gnttab_release_grant_reference_uvp(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) WPRINTK("Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) WPRINTK("GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) WPRINTK("Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) WPRINTK("GSO unsupported by this kernel.\n"); return -EINVAL; #endif } static int xen_skb_checksum_setup(struct sk_buff *skb) { struct iphdr *iph; unsigned char *th; int err = -EPROTO; if (skb->protocol != htons(ETH_P_IP)) goto out; iph = (void *)skb->data; th = skb->data + 4 * iph->ihl; if (th >= skb_tail_pointer(skb)) goto out; skb->csum_start = th - skb->head; switch (iph->protocol) { case IPPROTO_TCP: skb->csum_offset = offsetof(struct tcphdr, check); break; case IPPROTO_UDP: skb->csum_offset = offsetof(struct udphdr, check); break; default: if (net_ratelimit()) printk(KERN_ERR "Attempting to checksum a non-" "TCP/UDP packet, dropping a protocol" " %d packet", iph->protocol); goto out; } if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) goto out; err = 0; out: return err; } static int netif_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; struct multicall_entry *mcl; int work_done, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); np->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize must approximates the size of true data plus * any supervisor overheads. Adding hypervisor overheads * has been shown to significantly reduce achievable * bandwidth with the default receive buffer size. It is * therefore not wise to account for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to * RX_COPY_THRESHOLD + the supervisor overheads. Here, we * add the size of the data pulled in xennet_fill_frags(). * * We also adjust for any unused space in the main data * area by subtracting (RX_COPY_THRESHOLD - len). This is * especially important with drivers which split incoming * packets into header and data, using only 66 bytes of * the main data area (see the e1000 driver for example.) * On such systems, without this last adjustement, our * achievable receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; /* * Old backends do not assert data_validated but we * can infer it from csum_blank so test both flags. */ if (rx->flags & NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; #ifdef CONFIG_XEN_no skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE); skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank); #endif np->stats.rx_packets++; np->stats.rx_bytes += skb->len; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance_uvp_uvp(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { mcl = np->rx_mcl + pages_flipped; mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = pages_flipped; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } __skb_queue_purge(&errq); while ((skb = __skb_dequeue(&rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); if (skb->ip_summed == CHECKSUM_PARTIAL) { if (xen_skb_checksum_setup(skb)) { kfree_skb(skb); np->stats.rx_errors++; continue; } } /* Pass it up. */ netif_receive_skb(skb); } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_uvp_ref_uvp(np->grant_tx_ref[i]); gnttab_release_grant_reference_uvp( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_uvp_ref_uvp(ref); gnttab_release_grant_reference_uvp(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); if (0 == mfn) { struct page *page = skb_shinfo(skb)->frags[0].page; balloon_release_driver_page_uvp_uvp(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); if(!vaddr) { printk("vaddr is null\n"); } MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance_uvp_uvp(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = mmu - np->rx_mmu; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; mcl++; rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl - np->rx_mcl, NULL); BUG_ON(rc); } } __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_uvp_ref_uvp(ref)) { busy++; continue; } gnttab_release_grant_reference_uvp(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static struct net_device_stats *network_get_stats(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_get_stats(np, dev); return &np->stats; } static int xennet_set_mac_address(struct net_device *dev, void *p) { struct netfront_info *np = netdev_priv(dev); struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(np->mac, addr->sa_data, ETH_ALEN); return 0; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static int xennet_set_sg(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf_uvp(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } else if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; return ethtool_op_set_sg(dev, data); } static int xennet_set_tso(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf_uvp(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } return ethtool_op_set_tso(dev, data); } static void xennet_set_features(struct net_device *dev) { dev_disable_gso_features(dev); xennet_set_sg(dev, 0); /* We need checksum offload to enable scatter/gather and TSO. */ if (!(dev->features & NETIF_F_IP_CSUM)) return; if (xennet_set_sg(dev, 1)) return; /* Before 2.6.9 TSO seems to be unreliable so do not enable it * on older kernels. */ if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)) xennet_set_tso(dev, 1); } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf_uvp(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf_uvp(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; xennet_set_features(dev); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_uvp_ref( ref, np->xbdev->otherend_id, page_to_pfn(skb_shinfo(skb)->frags->page)); } else { gnttab_grant_foreign_access_uvp_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq_uvp(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_reference_uvps(np->gref_tx_head); gnttab_free_grant_reference_uvps(np->gref_rx_head); } static const struct ethtool_ops network_ethtool_ops = { .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = xennet_set_sg, #if HAVE_TSO .get_tso = ethtool_op_get_tso, .set_tso = xennet_set_tso, #endif .get_link = ethtool_op_get_link, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { error = device_create_file(&netdev->dev, &xennet_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } #ifndef DEBIAN5 static const struct net_device_ops xennet_netdev_ops = { .ndo_uninit = netif_uninit, .ndo_open = network_open, .ndo_stop = network_close, .ndo_start_xmit = network_start_xmit, .ndo_set_multicast_list = network_set_multicast_list, .ndo_set_mac_address = xennet_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats = network_get_stats, }; #endif static struct net_device * __devinit create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references_uvp(TX_MAX_TARGET, &np->gref_tx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references_uvp(RX_MAX_TARGET, &np->gref_rx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } #ifdef DEBIAN5 netdev->open = network_open; netdev->hard_start_xmit = network_start_xmit; netdev->stop = network_close; netdev->get_stats = network_get_stats; netdev->set_multicast_list = network_set_multicast_list; netdev->uninit = netif_uninit; netdev->set_mac_address = xennet_set_mac_address; netdev->change_mtu = xennet_change_mtu; netif_napi_add(netdev, &np->napi, netif_poll, 64); #else netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, netif_poll, 64); #endif netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_reference_uvps(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } #ifdef CONFIG_INET /* * We use this notifier to send out a fake ARP reply to reset switches and * router ARP caches when an IP interface is brought up on a VIF. */ static int inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) { int count = 0; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ #ifdef DEBIAN5 if (event == NETDEV_UP && dev->open == network_open) #else if (event == NETDEV_UP && dev->netdev_ops->ndo_open == network_open) #endif { for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REQUEST); } for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REPLY); } } return NOTIFY_DONE; } static struct notifier_block notifier_inetdev = { .notifier_call = inetdev_notify, .next = NULL, .priority = 0 }; #endif static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler_uvp(info->irq, info->netdev); info->irq = 0; end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access_uvp(ref, (unsigned long)page); } /* ** Driver registration ** */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static struct xenbus_driver netfront_driver = { .name = "vif", .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(netfront_remove), .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, }; static int __init netif_init(void) { int err; if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN_no if (MODPARM_rx_flip && MODPARM_rx_copy) { WPRINTK("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_flip = 1; /* Default is to flip. */ #endif netif_init_accel(); IPRINTK("Initialising virtual ethernet driver.\n"); #ifdef CONFIG_INET (void)register_inetaddr_notifier(¬ifier_inetdev); #endif err = xenbus_register_frontend(&netfront_driver); if (err) { #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif } return err; } module_init(netif_init); static void __exit netif_exit(void) { #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif xenbus_unregister_driver_uvp(&netfront_driver); netif_exit_accel(); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu10/xen-vnif/netfront.h000066400000000000000000000210271314037446600242460ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include #include #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct net_device_stats stats; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; struct napi_struct napi; unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded_uvp(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded_uvp() */ extern void netfront_accelerator_stop_uvp(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/000077500000000000000000000000001314037446600204315ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/Makefile000066400000000000000000000017311314037446600220730ustar00rootroot00000000000000############################################################################### ###### File name : Makefile ###### ###### Author : ###### ###### Modify : songjiangtao 00202546 ###### ###### Description : To optimize the PV_ON_HVM drivers's Makefile ###### ###### History : ###### ###### 2012-02-24 : Create the file ###### ############################################################################### M=$(shell pwd) COMPILEARGS = $(CROSSCOMPILE) obj-m += xen-platform-pci/ obj-m += xen-balloon/ obj-m += xen-vbd/ obj-m += xen-vnif/ all: make -C $(KERNDIR) M=$(M) modules $(COMPILEARGS) modules_install: make -C $(KERNDIR) M=$(M) modules_install $(COMPILEARGS) clean: make -C $(KERNDIR) M=$(M) clean $(COMPILEARGS) rm -rf Module.symvers UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/config.mk000066400000000000000000000014431314037446600222310ustar00rootroot00000000000000# Hack: we need to use the config which was used to build the kernel, # except that that won't have the right headers etc., so duplicate # some of the mach-xen infrastructure in here. # # (i.e. we need the native config for things like -mregparm, but # a Xen kernel to find the right headers) _XEN_CPPFLAGS += -D__XEN_INTERFACE_VERSION__=0x00030205 _XEN_CPPFLAGS += -DCONFIG_XEN_COMPAT=0xffffff _XEN_CPPFLAGS += -I$(M)/include -I$(M)/compat-include -DHAVE_XEN_PLATFORM_COMPAT_H AUTOCONF := $(shell find $(CROSS_COMPILE_KERNELSOURCE) -name autoconf.h) _XEN_CPPFLAGS += -include $(AUTOCONF) FEDORA8 = $(shell echo $(KERNDIR) | grep fc8) ifneq ("$(FEDORA8)","") _XEN_CPPFLAGS += -DFedora8 endif EXTRA_CFLAGS += $(_XEN_CPPFLAGS) EXTRA_AFLAGS += $(_XEN_CPPFLAGS) CPPFLAGS := -I$(M)/include $(CPPFLAGS) UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/000077500000000000000000000000001314037446600220545ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/arch/000077500000000000000000000000001314037446600227715ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/arch/hypercall.h000066400000000000000000000001211314037446600251170ustar00rootroot00000000000000#ifdef __i386__ #include "hypercall_32.h" #else #include "hypercall_64.h" #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/arch/hypercall_32.h000066400000000000000000000245711314037446600254420ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #include /* memcpy() */ #include #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #ifdef CONFIG_XEN #define HYPERCALL_STR(name) \ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" #else #define HYPERCALL_STR(name) \ "mov hypercall_stubs,%%eax; " \ "add $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ "call *%%eax" #endif #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res) \ : \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, a1) \ ({ \ type __res; \ long __ign1; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1) \ : "1" ((long)(a1)) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ long __ign1, __ign2; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ : "1" ((long)(a1)), "2" ((long)(a2)) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ long __ign1, __ign2, __ign3, __ign4; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ long __ign1, __ign2, __ign3, __ign4, __ign5; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)), "4" ((long)(a4)), \ "5" ((long)(a5)) \ : "memory" ); \ __res; \ }) static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_selector, unsigned long event_address, unsigned long failsafe_selector, unsigned long failsafe_address) { return _hypercall4(int, set_callbacks, event_selector, event_address, failsafe_selector, failsafe_address); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { unsigned long timeout_hi = (unsigned long)(timeout>>32); unsigned long timeout_lo = (unsigned long)timeout; return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); } static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_update_descriptor( u64 ma, u64 desc) { return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; /* when hypercall memory_op failed, retry */ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall4(int, update_va_mapping, va, new_val.pte_low, pte_hi, flags); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { return _hypercall3(int, grant_table_op, cmd, uop, count); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall5(int, update_va_mapping_otherdomain, va, new_val.pte_low, pte_hi, flags, domid); } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } static inline int __must_check HYPERVISOR_tmem_op( struct tmem_op *op) { return _hypercall1(int, tmem_op, op); } #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/arch/hypercall_64.h000066400000000000000000000250321314037446600254400ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * 64-bit updates: * Benjamin Liu * Jun Nakajima * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #include /* memcpy() */ #include #include #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #ifdef CONFIG_XEN #define HYPERCALL_STR(name) \ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)" #else #define HYPERCALL_STR(name) \ "mov $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\ "add hypercall_stubs(%%rip),%%rax; " \ "call *%%rax" #endif #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res) \ : \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, a1) \ ({ \ type __res; \ long __ign1; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1) \ : "1" ((long)(a1)) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ long __ign1, __ign2; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \ : "1" ((long)(a1)), "2" ((long)(a2)) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ register long __arg4 asm("r10") = (long)(a4); \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3), "+r" (__arg4) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ long __ign1, __ign2, __ign3; \ register long __arg4 asm("r10") = (long)(a4); \ register long __arg5 asm("r8") = (long)(a5); \ asm volatile ( \ HYPERCALL_STR(name) \ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \ "=d" (__ign3), "+r" (__arg4), "+r" (__arg5) \ : "1" ((long)(a1)), "2" ((long)(a2)), \ "3" ((long)(a3)) \ : "memory" ); \ __res; \ }) static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_address, unsigned long failsafe_address, unsigned long syscall_address) { return _hypercall3(int, set_callbacks, event_address, failsafe_address, syscall_address); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall1(long, set_timer_op, timeout); } static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_mca( struct xen_mc *mc_op) { mc_op->interface_version = XEN_MCA_INTERFACE_VERSION; return _hypercall1(int, mca, mc_op); } static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_update_descriptor( unsigned long ma, unsigned long word) { return _hypercall2(int, update_descriptor, ma, word); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; /* when hypercall memory_op failed, retry */ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { return _hypercall3(int, update_va_mapping, va, new_val.pte, flags); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { return _hypercall3(int, grant_table_op, cmd, uop, count); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { return _hypercall4(int, update_va_mapping_otherdomain, va, new_val.pte, flags, domid); } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_set_segment_base( int reg, unsigned long value) { return _hypercall2(int, set_segment_base, reg, value); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } static inline int __must_check HYPERVISOR_tmem_op( struct tmem_op *op) { return _hypercall1(int, tmem_op, op); } #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/arch/xen-mca.h000066400000000000000000000342111314037446600244730ustar00rootroot00000000000000/****************************************************************************** * arch-x86/mca.h * * Contributed by Advanced Micro Devices, Inc. * Author: Christoph Egger * * Guest OS machine check interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /* Full MCA functionality has the following Usecases from the guest side: * * Must have's: * 1. Dom0 and DomU register machine check trap callback handlers * (already done via "set_trap_table" hypercall) * 2. Dom0 registers machine check event callback handler * (doable via EVTCHNOP_bind_virq) * 3. Dom0 and DomU fetches machine check data * 4. Dom0 wants Xen to notify a DomU * 5. Dom0 gets DomU ID from physical address * 6. Dom0 wants Xen to kill DomU (already done for "xm destroy") * * Nice to have's: * 7. Dom0 wants Xen to deactivate a physical CPU * This is better done as separate task, physical CPU hotplugging, * and hypercall(s) should be sysctl's * 8. Page migration proposed from Xen NUMA work, where Dom0 can tell Xen to * move a DomU (or Dom0 itself) away from a malicious page * producing correctable errors. * 9. offlining physical page: * Xen free's and never re-uses a certain physical page. * 10. Testfacility: Allow Dom0 to write values into machine check MSR's * and tell Xen to trigger a machine check */ #ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__ #define __XEN_PUBLIC_ARCH_X86_MCA_H__ /* Hypercall */ #define __HYPERVISOR_mca __HYPERVISOR_arch_0 /* * The xen-unstable repo has interface version 0x03000001; out interface * is incompatible with that and any future minor revisions, so we * choose a different version number range that is numerically less * than that used in xen-unstable. */ #define XEN_MCA_INTERFACE_VERSION 0x01ecc003 /* IN: Dom0 calls hypercall to retrieve nonurgent telemetry */ #define XEN_MC_NONURGENT 0x0001 /* IN: Dom0/DomU calls hypercall to retrieve urgent telemetry */ #define XEN_MC_URGENT 0x0002 /* IN: Dom0 acknowledges previosly-fetched telemetry */ #define XEN_MC_ACK 0x0004 /* OUT: All is ok */ #define XEN_MC_OK 0x0 /* OUT: Domain could not fetch data. */ #define XEN_MC_FETCHFAILED 0x1 /* OUT: There was no machine check data to fetch. */ #define XEN_MC_NODATA 0x2 /* OUT: Between notification time and this hypercall an other * (most likely) correctable error happened. The fetched data, * does not match the original machine check data. */ #define XEN_MC_NOMATCH 0x4 /* OUT: DomU did not register MC NMI handler. Try something else. */ #define XEN_MC_CANNOTHANDLE 0x8 /* OUT: Notifying DomU failed. Retry later or try something else. */ #define XEN_MC_NOTDELIVERED 0x10 /* Note, XEN_MC_CANNOTHANDLE and XEN_MC_NOTDELIVERED are mutually exclusive. */ #ifndef __ASSEMBLY__ #define VIRQ_MCA VIRQ_ARCH_0 /* G. (DOM0) Machine Check Architecture */ /* * Machine Check Architecure: * structs are read-only and used to report all kinds of * correctable and uncorrectable errors detected by the HW. * Dom0 and DomU: register a handler to get notified. * Dom0 only: Correctable errors are reported via VIRQ_MCA * Dom0 and DomU: Uncorrectable errors are reported via nmi handlers */ #define MC_TYPE_GLOBAL 0 #define MC_TYPE_BANK 1 #define MC_TYPE_EXTENDED 2 #define MC_TYPE_RECOVERY 3 struct mcinfo_common { uint16_t type; /* structure type */ uint16_t size; /* size of this struct in bytes */ }; #define MC_FLAG_CORRECTABLE (1 << 0) #define MC_FLAG_UNCORRECTABLE (1 << 1) #define MC_FLAG_RECOVERABLE (1 << 2) #define MC_FLAG_POLLED (1 << 3) #define MC_FLAG_RESET (1 << 4) #define MC_FLAG_CMCI (1 << 5) #define MC_FLAG_MCE (1 << 6) /* contains global x86 mc information */ struct mcinfo_global { struct mcinfo_common common; /* running domain at the time in error (most likely the impacted one) */ uint16_t mc_domid; uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */ uint32_t mc_socketid; /* physical socket of the physical core */ uint16_t mc_coreid; /* physical impacted core */ uint16_t mc_core_threadid; /* core thread of physical core */ uint32_t mc_apicid; uint32_t mc_flags; uint64_t mc_gstatus; /* global status */ }; /* contains bank local x86 mc information */ struct mcinfo_bank { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint16_t mc_domid; /* Usecase 5: domain referenced by mc_addr on dom0 * and if mc_addr is valid. Never valid on DomU. */ uint64_t mc_status; /* bank status */ uint64_t mc_addr; /* bank address, only valid * if addr bit is set in mc_status */ uint64_t mc_misc; uint64_t mc_ctrl2; uint64_t mc_tsc; }; struct mcinfo_msr { uint64_t reg; /* MSR */ uint64_t value; /* MSR value */ }; /* contains mc information from other * or additional mc MSRs */ struct mcinfo_extended { struct mcinfo_common common; /* You can fill up to five registers. * If you need more, then use this structure * multiple times. */ uint32_t mc_msrs; /* Number of msr with valid values. */ /* * Currently Intel extended MSR (32/64) include all gp registers * and E(R)FLAGS, E(R)IP, E(R)MISC, up to 11/19 of them might be * useful at present. So expand this array to 16/32 to leave room. */ struct mcinfo_msr mc_msr[sizeof(void *) * 4]; }; /* Recovery Action flags. Giving recovery result information to DOM0 */ /* Xen takes successful recovery action, the error is recovered */ #define REC_ACTION_RECOVERED (0x1 << 0) /* No action is performed by XEN */ #define REC_ACTION_NONE (0x1 << 1) /* It's possible DOM0 might take action ownership in some case */ #define REC_ACTION_NEED_RESET (0x1 << 2) /* Different Recovery Action types, if the action is performed successfully, * REC_ACTION_RECOVERED flag will be returned. */ /* Page Offline Action */ #define MC_ACTION_PAGE_OFFLINE (0x1 << 0) /* CPU offline Action */ #define MC_ACTION_CPU_OFFLINE (0x1 << 1) /* L3 cache disable Action */ #define MC_ACTION_CACHE_SHRINK (0x1 << 2) /* Below interface used between XEN/DOM0 for passing XEN's recovery action * information to DOM0. * usage Senario: After offlining broken page, XEN might pass its page offline * recovery action result to DOM0. DOM0 will save the information in * non-volatile memory for further proactive actions, such as offlining the * easy broken page earlier when doing next reboot. */ struct page_offline_action { /* Params for passing the offlined page number to DOM0 */ uint64_t mfn; uint64_t status; }; struct cpu_offline_action { /* Params for passing the identity of the offlined CPU to DOM0 */ uint32_t mc_socketid; uint16_t mc_coreid; uint16_t mc_core_threadid; }; #define MAX_UNION_SIZE 16 struct mcinfo_recovery { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint8_t action_flags; uint8_t action_types; union { struct page_offline_action page_retire; struct cpu_offline_action cpu_offline; uint8_t pad[MAX_UNION_SIZE]; } action_info; }; #define MCINFO_HYPERCALLSIZE 1024 #define MCINFO_MAXSIZE 768 struct mc_info { /* Number of mcinfo_* entries in mi_data */ uint32_t mi_nentries; uint32_t _pad0; uint64_t mi_data[(MCINFO_MAXSIZE - 1) / 8]; }; typedef struct mc_info mc_info_t; DEFINE_XEN_GUEST_HANDLE(mc_info_t); #define __MC_MSR_ARRAYSIZE 8 #define __MC_NMSRS 1 #define MC_NCAPS 7 /* 7 CPU feature flag words */ #define MC_CAPS_STD_EDX 0 /* cpuid level 0x00000001 (%edx) */ #define MC_CAPS_AMD_EDX 1 /* cpuid level 0x80000001 (%edx) */ #define MC_CAPS_TM 2 /* cpuid level 0x80860001 (TransMeta) */ #define MC_CAPS_LINUX 3 /* Linux-defined */ #define MC_CAPS_STD_ECX 4 /* cpuid level 0x00000001 (%ecx) */ #define MC_CAPS_VIA 5 /* cpuid level 0xc0000001 */ #define MC_CAPS_AMD_ECX 6 /* cpuid level 0x80000001 (%ecx) */ struct mcinfo_logical_cpu { uint32_t mc_cpunr; uint32_t mc_chipid; uint16_t mc_coreid; uint16_t mc_threadid; uint32_t mc_apicid; uint32_t mc_clusterid; uint32_t mc_ncores; uint32_t mc_ncores_active; uint32_t mc_nthreads; int32_t mc_cpuid_level; uint32_t mc_family; uint32_t mc_vendor; uint32_t mc_model; uint32_t mc_step; char mc_vendorid[16]; char mc_brandid[64]; uint32_t mc_cpu_caps[MC_NCAPS]; uint32_t mc_cache_size; uint32_t mc_cache_alignment; int32_t mc_nmsrvals; struct mcinfo_msr mc_msrvalues[__MC_MSR_ARRAYSIZE]; }; typedef struct mcinfo_logical_cpu xen_mc_logical_cpu_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_logical_cpu_t); /* * OS's should use these instead of writing their own lookup function * each with its own bugs and drawbacks. * We use macros instead of static inline functions to allow guests * to include this header in assembly files (*.S). */ /* Prototype: * uint32_t x86_mcinfo_nentries(struct mc_info *mi); */ #define x86_mcinfo_nentries(_mi) \ (_mi)->mi_nentries /* Prototype: * struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi); */ #define x86_mcinfo_first(_mi) \ ((struct mcinfo_common *)(_mi)->mi_data) /* Prototype: * struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic); */ #define x86_mcinfo_next(_mic) \ ((struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size)) /* Prototype: * void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type); */ #define x86_mcinfo_lookup(_ret, _mi, _type) \ do { \ uint32_t found, i; \ struct mcinfo_common *_mic; \ \ found = 0; \ (_ret) = NULL; \ if (_mi == NULL) break; \ _mic = x86_mcinfo_first(_mi); \ for (i = 0; i < x86_mcinfo_nentries(_mi); i++) { \ if (_mic->type == (_type)) { \ found = 1; \ break; \ } \ _mic = x86_mcinfo_next(_mic); \ } \ (_ret) = found ? _mic : NULL; \ } while (0) /* Usecase 1 * Register machine check trap callback handler * (already done via "set_trap_table" hypercall) */ /* Usecase 2 * Dom0 registers machine check event callback handler * done by EVTCHNOP_bind_virq */ /* Usecase 3 * Fetch machine check data from hypervisor. * Note, this hypercall is special, because both Dom0 and DomU must use this. */ #define XEN_MC_fetch 1 struct xen_mc_fetch { /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_NONURGENT, XEN_MC_URGENT, XEN_MC_ACK if ack'ing an earlier fetch */ /* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED, XEN_MC_NODATA, XEN_MC_NOMATCH */ uint32_t _pad0; uint64_t fetch_id; /* OUT: id for ack, IN: id we are ack'ing */ /* OUT variables. */ XEN_GUEST_HANDLE(mc_info_t) data; }; typedef struct xen_mc_fetch xen_mc_fetch_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t); /* Usecase 4 * This tells the hypervisor to notify a DomU about the machine check error */ #define XEN_MC_notifydomain 2 struct xen_mc_notifydomain { /* IN variables. */ uint16_t mc_domid; /* The unprivileged domain to notify. */ uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify. * Usually echo'd value from the fetch hypercall. */ /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */ /* OUT: XEN_MC_OK, XEN_MC_CANNOTHANDLE, XEN_MC_NOTDELIVERED, XEN_MC_NOMATCH */ }; typedef struct xen_mc_notifydomain xen_mc_notifydomain_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t); #define XEN_MC_physcpuinfo 3 struct xen_mc_physcpuinfo { /* IN/OUT */ uint32_t ncpus; uint32_t _pad0; /* OUT */ XEN_GUEST_HANDLE(xen_mc_logical_cpu_t) info; }; #define XEN_MC_msrinject 4 #define MC_MSRINJ_MAXMSRS 8 struct xen_mc_msrinject { /* IN */ uint32_t mcinj_cpunr; /* target processor id */ uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */ uint32_t mcinj_count; /* 0 .. count-1 in array are valid */ uint32_t _pad0; struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS]; }; /* Flags for mcinj_flags above; bits 16-31 are reserved */ #define MC_MSRINJ_F_INTERPOSE 0x1 #define XEN_MC_mceinject 5 struct xen_mc_mceinject { unsigned int mceinj_cpunr; /* target processor id */ }; struct xen_mc { uint32_t cmd; uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */ union { struct xen_mc_fetch mc_fetch; struct xen_mc_notifydomain mc_notifydomain; struct xen_mc_physcpuinfo mc_physcpuinfo; struct xen_mc_msrinject mc_msrinject; struct xen_mc_mceinject mc_mceinject; } u; }; typedef struct xen_mc xen_mc_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_t); #endif /* __ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/arch/xen-x86_32.h000066400000000000000000000145401314037446600246670ustar00rootroot00000000000000/****************************************************************************** * xen-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2007, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ /* * Hypercall interface: * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5) * Output: %eax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; int $0x82 */ #define TRAP_INSTR "int $0x82" #endif /* * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ #define FLAT_KERNEL_CS FLAT_RING1_CS #define FLAT_KERNEL_DS FLAT_RING1_DS #define FLAT_KERNEL_SS FLAT_RING1_SS #define FLAT_USER_CS FLAT_RING3_CS #define FLAT_USER_DS FLAT_RING3_DS #define FLAT_USER_SS FLAT_RING3_SS #define __HYPERVISOR_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_END_PAE 0xF6800000 #define HYPERVISOR_VIRT_START_PAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE) #define MACH2PHYS_VIRT_START_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE) #define MACH2PHYS_VIRT_END_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE) /* Non-PAE bounds are obsolete. */ #define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000 #define HYPERVISOR_VIRT_START_NONPAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_START_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_END_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE) #define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE #define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE #define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) #endif /* 32-/64-bit invariability for control interfaces (domctl/sysctl). */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #undef ___DEFINE_XEN_GUEST_HANDLE #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } \ __guest_handle_ ## name; \ typedef struct { union { type *p; uint64_aligned_t q; }; } \ __guest_handle_64_ ## name #undef set_xen_guest_handle #define set_xen_guest_handle(hnd, val) \ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \ (hnd).p = val; \ } while ( 0 ) #define uint64_aligned_t uint64_t __attribute__((aligned(8))) #define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name #define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name) #endif #ifndef __ASSEMBLY__ struct cpu_user_regs { uint32_t ebx; uint32_t ecx; uint32_t edx; uint32_t esi; uint32_t edi; uint32_t ebp; uint32_t eax; uint16_t error_code; /* private */ uint16_t entry_vector; /* private */ uint32_t eip; uint16_t cs; uint8_t saved_upcall_mask; uint8_t _pad0; uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ uint32_t esp; uint16_t ss, _pad1; uint16_t es, _pad2; uint16_t ds, _pad3; uint16_t fs, _pad4; uint16_t gs, _pad5; }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); /* * Page-directory addresses above 4GB do not fit into architectural %cr3. * When accessing %cr3, or equivalent field in vcpu_guest_context, guests * must use the following accessor macros to pack/unpack valid MFNs. */ #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) struct arch_vcpu_info { unsigned long cr2; unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; struct xen_callback { unsigned long cs; unsigned long eip; }; typedef struct xen_callback xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/arch/xen-x86_64.h000066400000000000000000000157351314037446600247030ustar00rootroot00000000000000/****************************************************************************** * xen-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ /* * Hypercall interface: * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5) * Output: %rax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi) */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 /* * Legacy hypercall interface: * As above, except the entry sequence to the hypervisor is: * mov $hypercall-number*32,%eax ; syscall * Clobbered: %rcx, %r11, argument registers (as above) */ #define TRAP_INSTR "syscall" #endif /* * 64-bit segment selectors * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING3_CS32 0xe023 /* GDT index 260 */ #define FLAT_RING3_CS64 0xe033 /* GDT index 261 */ #define FLAT_RING3_DS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_DS64 0x0000 /* NULL selector */ #define FLAT_RING3_SS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_SS64 0xe02b /* GDT index 262 */ #define FLAT_KERNEL_DS64 FLAT_RING3_DS64 #define FLAT_KERNEL_DS32 FLAT_RING3_DS32 #define FLAT_KERNEL_DS FLAT_KERNEL_DS64 #define FLAT_KERNEL_CS64 FLAT_RING3_CS64 #define FLAT_KERNEL_CS32 FLAT_RING3_CS32 #define FLAT_KERNEL_CS FLAT_KERNEL_CS64 #define FLAT_KERNEL_SS64 FLAT_RING3_SS64 #define FLAT_KERNEL_SS32 FLAT_RING3_SS32 #define FLAT_KERNEL_SS FLAT_KERNEL_SS64 #define FLAT_USER_DS64 FLAT_RING3_DS64 #define FLAT_USER_DS32 FLAT_RING3_DS32 #define FLAT_USER_DS FLAT_USER_DS64 #define FLAT_USER_CS64 FLAT_RING3_CS64 #define FLAT_USER_CS32 FLAT_RING3_CS32 #define FLAT_USER_CS FLAT_USER_CS64 #define FLAT_USER_SS64 FLAT_RING3_SS64 #define FLAT_USER_SS32 FLAT_RING3_SS32 #define FLAT_USER_SS FLAT_USER_SS64 #define __HYPERVISOR_VIRT_START 0xFFFF800000000000 #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) #endif /* * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) * @which == SEGBASE_* ; @base == 64-bit base address * Returns 0 on success. */ #define SEGBASE_FS 0 #define SEGBASE_GS_USER 1 #define SEGBASE_GS_KERNEL 2 #define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */ /* * int HYPERVISOR_iret(void) * All arguments are on the kernel stack, in the following format. * Never returns if successful. Current kernel context is lost. * The saved CS is mapped as follows: * RING0 -> RING3 kernel mode. * RING1 -> RING3 kernel mode. * RING2 -> RING3 kernel mode. * RING3 -> RING3 user mode. * However RING0 indicates that the guest kernel should return to iteself * directly with * orb $3,1*8(%rsp) * iretq * If flags contains VGCF_in_syscall: * Restore RAX, RIP, RFLAGS, RSP. * Discard R11, RCX, CS, SS. * Otherwise: * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP. * All other registers are saved on hypercall entry and restored to user. */ /* Guest exited in SYSCALL context? Return to guest with SYSRET? */ #define _VGCF_in_syscall 8 #define VGCF_in_syscall (1<<_VGCF_in_syscall) #define VGCF_IN_SYSCALL VGCF_in_syscall #ifndef __ASSEMBLY__ struct iret_context { /* Top of stack (%rsp at point of hypercall). */ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; /* Bottom of iret stack frame. */ }; #if defined(__GNUC__) && !defined(__STRICT_ANSI__) /* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ #define __DECL_REG(name) union { \ uint64_t r ## name, e ## name; \ uint32_t _e ## name; \ } #else /* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ #define __DECL_REG(name) uint64_t r ## name #endif struct cpu_user_regs { uint64_t r15; uint64_t r14; uint64_t r13; uint64_t r12; __DECL_REG(bp); __DECL_REG(bx); uint64_t r11; uint64_t r10; uint64_t r9; uint64_t r8; __DECL_REG(ax); __DECL_REG(cx); __DECL_REG(dx); __DECL_REG(si); __DECL_REG(di); uint32_t error_code; /* private */ uint32_t entry_vector; /* private */ __DECL_REG(ip); uint16_t cs, _pad0[1]; uint8_t saved_upcall_mask; uint8_t _pad1[3]; __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */ __DECL_REG(sp); uint16_t ss, _pad2[3]; uint16_t es, _pad3[3]; uint16_t ds, _pad4[3]; uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */ }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); #undef __DECL_REG #define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) #define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) struct arch_vcpu_info { unsigned long cr2; unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; typedef unsigned long xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/arch/xen.h000066400000000000000000000170051314037446600237370ustar00rootroot00000000000000/****************************************************************************** * arch-x86/xen.h * * Guest OS interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "../public/xen.h" #ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_H__ /* Structural guest handles introduced in 0x00030201. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030201 #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name #else #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef type * __guest_handle_ ## name #endif #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ ___DEFINE_XEN_GUEST_HANDLE(name, type); \ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif #if defined(__i386__) #include #elif defined(__x86_64__) #include #endif #ifndef __ASSEMBLY__ typedef unsigned long xen_pfn_t; #define PRI_xen_pfn "lx" #endif /* * SEGMENT DESCRIPTOR TABLES */ /* * A number of GDT entries are reserved by Xen. These are not situated at the * start of the GDT because some stupid OSes export hard-coded selector values * in their ABI. These hard-coded values are always near the start of the GDT, * so Xen places itself out of the way, at the far end of the GDT. */ #define FIRST_RESERVED_GDT_PAGE 14 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) /* Maximum number of virtual CPUs in multi-processor guests. */ #define MAX_VIRT_CPUS 32 #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; /* * Send an array of these to HYPERVISOR_set_trap_table(). * The privilege level specifies which modes may enter a trap via a software * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate * privilege levels as follows: * Level == 0: Noone may enter * Level == 1: Kernel may enter * Level == 2: Kernel may enter * Level == 3: Everyone may enter */ #define TI_GET_DPL(_ti) ((_ti)->flags & 3) #define TI_GET_IF(_ti) ((_ti)->flags & 4) #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) struct trap_info { uint8_t vector; /* exception vector */ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ uint16_t cs; /* code selector */ unsigned long address; /* code offset */ }; typedef struct trap_info trap_info_t; DEFINE_XEN_GUEST_HANDLE(trap_info_t); typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* * The following is all CPU context. Note that the fpu_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ struct vcpu_guest_context { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ #define VGCF_I387_VALID (1<<0) #define VGCF_IN_KERNEL (1<<2) #define _VGCF_i387_valid 0 #define VGCF_i387_valid (1<<_VGCF_i387_valid) #define _VGCF_in_kernel 2 #define VGCF_in_kernel (1<<_VGCF_in_kernel) #define _VGCF_failsafe_disables_events 3 #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) #define _VGCF_syscall_disables_events 4 #define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) #define _VGCF_online 5 #define VGCF_online (1<<_VGCF_online) unsigned long flags; /* VGCF_* flags */ struct cpu_user_regs user_regs; /* User-level CPU registers */ struct trap_info trap_ctxt[256]; /* Virtual IDT */ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ #ifdef __i386__ unsigned long event_callback_cs; /* CS:EIP of event callback */ unsigned long event_callback_eip; unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ unsigned long failsafe_callback_eip; #else unsigned long event_callback_eip; unsigned long failsafe_callback_eip; #ifdef __XEN__ union { unsigned long syscall_callback_eip; struct { unsigned int event_callback_cs; /* compat CS of event cb */ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */ }; }; #else unsigned long syscall_callback_eip; #endif #endif unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ #ifdef __x86_64__ /* Segment base addresses. */ uint64_t fs_base; uint64_t gs_base_kernel; uint64_t gs_base_user; #endif }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); struct arch_shared_info { unsigned long max_pfn; /* max pfn that appears in table */ /* Frame containing list of mfns containing list of mfns containing p2m. */ xen_pfn_t pfn_to_mfn_frame_list_list; unsigned long nmi_reason; uint64_t pad[32]; }; typedef struct arch_shared_info arch_shared_info_t; #endif /* !__ASSEMBLY__ */ /* * Prefix forces emulation of some non-trapping instructions. * Currently only CPUID. */ #ifdef __ASSEMBLY__ #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; #define XEN_CPUID XEN_EMULATE_PREFIX cpuid #else #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" #endif #endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/balloon/000077500000000000000000000000001314037446600235025ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/balloon/balloon.h000066400000000000000000000046071314037446600253100ustar00rootroot00000000000000/****************************************************************************** * balloon.h * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_BALLOON_H__ #define __ASM_BALLOON_H__ /* * Inform the balloon driver that it should allow some slop for device-driver * memory activities. */ void balloon_update_driver_allowance(long delta); /* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */ struct page **alloc_empty_pages_and_pagevec(int nr_pages); void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages); void balloon_release_driver_page(struct page *page); /* * Prevent the balloon driver from changing the memory reservation during * a driver critical region. */ extern spinlock_t balloon_lock; #define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags) #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags) #endif /* __ASM_BALLOON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/interface/000077500000000000000000000000001314037446600240145ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/interface/features.h000066400000000000000000000055731314037446600260150ustar00rootroot00000000000000/****************************************************************************** * features.h * * Feature flags, reported by XENVER_get_features. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Keir Fraser */ #ifndef __INTERFACE_FEATURES_H__ #define __INTERFACE_FEATURES_H__ /* * If set, the guest does not need to write-protect its pagetables, and can * update them via direct writes. */ #define XENFEAT_writable_page_tables 0 /* * If set, the guest does not need to write-protect its segment descriptor * tables, and can update them via direct writes. */ #define XENFEAT_writable_descriptor_tables 1 /* * If set, translation between the guest's 'pseudo-physical' address space * and the host's machine address space are handled by the hypervisor. In this * mode the guest does not need to perform phys-to/from-machine translations * when performing page table operations. */ #define XENFEAT_auto_translated_physmap 2 /* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ #define XENFEAT_supervisor_mode_kernel 3 /* * If set, the guest does not need to allocate x86 PAE page directories * below 4GB. This flag is usually implied by auto_translated_physmap. */ #define XENFEAT_pae_pgdir_above_4gb 4 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ #define XENFEAT_mmu_pt_update_preserve_ad 5 /* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */ #define XENFEAT_highmem_assist 6 /* * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel * available pte bits. */ #define XENFEAT_gnttab_map_avail_bits 7 #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/interface/grant_table.h000066400000000000000000000407171314037446600264600ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ #define __XEN_PUBLIC_GRANT_TABLE_H__ #include /*********************************** * GRANT TABLE REPRESENTATION */ /* Some rough guidelines on accessing and updating grant-table entries * in a concurrency-safe manner. For more information, Linux contains a * reference implementation for guest OSes (arch/xen/kernel/grant_table.c). * * NB. WMB is a no-op on current-generation x86 processors. However, a * compiler barrier will still be required. * * Introducing a valid entry into the grant table: * 1. Write ent->domid. * 2. Write ent->frame: * GTF_permit_access: Frame to which access is permitted. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new * frame, or zero if none. * 3. Write memory barrier (WMB). * 4. Write ent->flags, inc. valid type. * * Invalidating an unused GTF_permit_access entry: * 1. flags = ent->flags. * 2. Observe that !(flags & (GTF_reading|GTF_writing)). * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * * Invalidating an in-use GTF_permit_access entry: * This cannot be done directly. Request assistance from the domain controller * which can set a timeout on the use of a grant entry and take necessary * action. (NB. This is not yet implemented!). * * Invalidating an unused GTF_accept_transfer entry: * 1. flags = ent->flags. * 2. Observe that !(flags & GTF_transfer_committed). [*] * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. * The guest must /not/ modify the grant entry until the address of the * transferred frame is written. It is safe for the guest to spin waiting * for this to occur (detect by observing GTF_transfer_completed in * ent->flags). * * Invalidating a committed GTF_accept_transfer entry: * 1. Wait for (ent->flags & GTF_transfer_completed). * * Changing a GTF_permit_access from writable to read-only: * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. * * Changing a GTF_permit_access from read-only to writable: * Use SMP-safe bit-setting instruction. */ /* * A grant table comprises a packed array of grant entries in one or more * page frames shared between Xen and a guest. * [XEN]: This field is written by Xen and read by the sharing guest. * [GST]: This field is written by the guest and read by Xen. */ struct grant_entry { /* GTF_xxx: various type and flag information. [XEN,GST] */ uint16_t flags; /* The domain being granted foreign privileges. [GST] */ domid_t domid; /* * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] */ uint32_t frame; }; typedef struct grant_entry grant_entry_t; /* * Type of grant entry. * GTF_invalid: This grant entry grants no privileges. * GTF_permit_access: Allow @domid to map/access @frame. * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame * to this guest. Xen writes the page number to @frame. */ #define GTF_invalid (0U<<0) #define GTF_permit_access (1U<<0) #define GTF_accept_transfer (2U<<0) #define GTF_type_mask (3U<<0) /* * Subflags for GTF_permit_access. * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST] */ #define _GTF_readonly (2) #define GTF_readonly (1U<<_GTF_readonly) #define _GTF_reading (3) #define GTF_reading (1U<<_GTF_reading) #define _GTF_writing (4) #define GTF_writing (1U<<_GTF_writing) #define _GTF_PWT (5) #define GTF_PWT (1U<<_GTF_PWT) #define _GTF_PCD (6) #define GTF_PCD (1U<<_GTF_PCD) #define _GTF_PAT (7) #define GTF_PAT (1U<<_GTF_PAT) /* * Subflags for GTF_accept_transfer: * GTF_transfer_committed: Xen sets this flag to indicate that it is committed * to transferring ownership of a page frame. When a guest sees this flag * it must /not/ modify the grant entry until GTF_transfer_completed is * set by Xen. * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag * after reading GTF_transfer_committed. Xen will always write the frame * address, followed by ORing this flag, in a timely manner. */ #define _GTF_transfer_committed (2) #define GTF_transfer_committed (1U<<_GTF_transfer_committed) #define _GTF_transfer_completed (3) #define GTF_transfer_completed (1U<<_GTF_transfer_completed) /*********************************** * GRANT TABLE QUERIES AND USES */ /* * Reference to a grant entry in a specified domain's grant table. */ typedef uint32_t grant_ref_t; /* * Handle to track a mapping created via a grant reference. */ typedef uint32_t grant_handle_t; /* * GNTTABOP_map_grant_ref: Map the grant entry (,) for access * by devices and/or host CPUs. If successful, is a tracking number * that must be presented later to destroy the mapping(s). On error, * is a negative status code. * NOTES: * 1. If GNTMAP_device_map is specified then is the address * via which I/O devices may access the granted frame. * 2. If GNTMAP_host_map is specified then a mapping will be added at * either a host virtual address in the current address space, or at * a PTE at the specified machine address. The type of mapping to * perform is selected through the GNTMAP_contains_pte flag, and the * address is specified in . * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a * host mapping is destroyed by other means then it is *NOT* guaranteed * to be accounted to the correct grant reference! */ #define GNTTABOP_map_grant_ref 0 struct gnttab_map_grant_ref { /* IN parameters. */ uint64_t host_addr; uint32_t flags; /* GNTMAP_* */ grant_ref_t ref; domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ grant_handle_t handle; uint64_t dev_bus_addr; }; typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t); /* * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings * tracked by . If or is zero, that * field is ignored. If non-zero, they must refer to a device/host mapping * that is tracked by * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 3. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_grant_ref 1 struct gnttab_unmap_grant_ref { /* IN parameters. */ uint64_t host_addr; uint64_t dev_bus_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t); /* * GNTTABOP_setup_table: Set up a grant table for comprising at least * pages. The frame addresses are written to the . * Only addresses are written, even if the table is larger. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. * 3. Xen may not support more than a single grant-table page per domain. */ #define GNTTABOP_setup_table 2 struct gnttab_setup_table { /* IN parameters. */ domid_t dom; uint32_t nr_frames; /* OUT parameters. */ int16_t status; /* GNTST_* */ XEN_GUEST_HANDLE(ulong) frame_list; }; typedef struct gnttab_setup_table gnttab_setup_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t); /* * GNTTABOP_dump_table: Dump the contents of the grant table to the * xen console. Debugging use only. */ #define GNTTABOP_dump_table 3 struct gnttab_dump_table { /* IN parameters. */ domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_dump_table gnttab_dump_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t); /* * GNTTABOP_transfer_grant_ref: Transfer to a foreign domain. The * foreign domain has previously registered its interest in the transfer via * . * * Note that, even if the transfer fails, the specified page no longer belongs * to the calling domain *unless* the error is GNTST_bad_page. */ #define GNTTABOP_transfer 4 struct gnttab_transfer { /* IN parameters. */ xen_pfn_t mfn; domid_t domid; grant_ref_t ref; /* OUT parameters. */ int16_t status; }; typedef struct gnttab_transfer gnttab_transfer_t; DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t); /* * GNTTABOP_copy: Hypervisor based copy * source and destinations can be eithers MFNs or, for foreign domains, * grant references. the foreign domain has to grant read/write access * in its grant table. * * The flags specify what type source and destinations are (either MFN * or grant reference). * * Note that this can also be used to copy data between two domains * via a third party if the source and destination domains had previously * grant appropriate access to their pages to the third party. * * source_offset specifies an offset in the source frame, dest_offset * the offset in the target frame and len specifies the number of * bytes to be copied. */ #define _GNTCOPY_source_gref (0) #define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) #define _GNTCOPY_dest_gref (1) #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) #define GNTTABOP_copy 5 typedef struct gnttab_copy { /* IN parameters. */ struct { union { grant_ref_t ref; xen_pfn_t gmfn; } u; domid_t domid; uint16_t offset; } source, dest; uint16_t len; uint16_t flags; /* GNTCOPY_* */ /* OUT parameters. */ int16_t status; } gnttab_copy_t; DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t); /* * GNTTABOP_query_size: Query the current and maximum sizes of the shared * grant table. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. */ #define GNTTABOP_query_size 6 struct gnttab_query_size { /* IN parameters. */ domid_t dom; /* OUT parameters. */ uint32_t nr_frames; uint32_t max_nr_frames; int16_t status; /* GNTST_* */ }; typedef struct gnttab_query_size gnttab_query_size_t; DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t); /* * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings * tracked by but atomically replace the page table entry with one * pointing to the machine address under . will be * redirected to the null entry. * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 2. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_and_replace 7 struct gnttab_unmap_and_replace { /* IN parameters. */ uint64_t host_addr; uint64_t new_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t); /* * Bitfield values for gnttab_map_grant_ref.flags. */ /* Map the grant entry for access by I/O devices. */ #define _GNTMAP_device_map (0) #define GNTMAP_device_map (1<<_GNTMAP_device_map) /* Map the grant entry for access by host CPUs. */ #define _GNTMAP_host_map (1) #define GNTMAP_host_map (1<<_GNTMAP_host_map) /* Accesses to the granted frame will be restricted to read-only access. */ #define _GNTMAP_readonly (2) #define GNTMAP_readonly (1<<_GNTMAP_readonly) /* * GNTMAP_host_map subflag: * 0 => The host mapping is usable only by the guest OS. * 1 => The host mapping is usable by guest OS + current application. */ #define _GNTMAP_application_map (3) #define GNTMAP_application_map (1<<_GNTMAP_application_map) /* * GNTMAP_contains_pte subflag: * 0 => This map request contains a host virtual address. * 1 => This map request contains the machine addess of the PTE to update. */ #define _GNTMAP_contains_pte (4) #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) /* * Bits to be placed in guest kernel available PTE bits (architecture * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set). */ #define _GNTMAP_guest_avail0 (16) #define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0) /* * Values for error status returns. All errors are -ve. */ #define GNTST_okay (0) /* Normal return. */ #define GNTST_general_error (-1) /* General undefined error. */ #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ #define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ #define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ #define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ #define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ #define GNTST_address_too_big (-11) /* transfer page address too large. */ #define GNTTABOP_error_msgs { \ "okay", \ "undefined error", \ "unrecognised domain id", \ "invalid grant reference", \ "invalid mapping handle", \ "invalid virtual address", \ "invalid device address", \ "no spare translation slot in the I/O MMU", \ "permission denied", \ "bad page", \ "copy arguments cross page boundary", \ "page address size too large" \ } #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/interface/xencomm.h000066400000000000000000000032131314037446600256320ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) IBM Corp. 2006 */ #ifndef _XEN_XENCOMM_H_ #define _XEN_XENCOMM_H_ /* A xencomm descriptor is a scatter/gather list containing physical * addresses corresponding to a virtually contiguous memory area. The * hypervisor translates these physical addresses to machine addresses to copy * to and from the virtually contiguous area. */ #define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */ #define XENCOMM_INVALID (~0UL) struct xencomm_desc { uint32_t magic; uint32_t nr_addrs; /* the number of entries in address[] */ uint64_t address[0]; }; #endif /* _XEN_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/000077500000000000000000000000001314037446600233325ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/common/000077500000000000000000000000001314037446600246225ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/common/xenbus.h000066400000000000000000000037411314037446600263040ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Interface to /proc/xen/xenbus. * * Copyright (c) 2008, Diego Ongaro * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_XENBUS_H__ #define __LINUX_PUBLIC_XENBUS_H__ #include #ifndef __user #define __user #endif typedef struct xenbus_alloc { domid_t dom; __u32 port; __u32 grant_ref; } xenbus_alloc_t; /* * @cmd: IOCTL_XENBUS_ALLOC * @arg: &xenbus_alloc_t * Return: 0, or -1 for error */ #define IOCTL_XENBUS_ALLOC \ _IOC(_IOC_NONE, 'X', 0, sizeof(xenbus_alloc_t)) #endif /* __LINUX_PUBLIC_XENBUS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/event_channel.h000066400000000000000000000213471314037446600263230ustar00rootroot00000000000000/****************************************************************************** * event_channel.h * * Event channels between domains. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, K A Fraser. */ #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ #define __XEN_PUBLIC_EVENT_CHANNEL_H__ #include "xen.h" /* * Prototype for this hypercall is: * int event_channel_op(int cmd, void *args) * @cmd == EVTCHNOP_??? (event-channel operation). * @args == Operation-specific extra arguments (NULL if none). */ typedef uint32_t evtchn_port_t; DEFINE_XEN_GUEST_HANDLE(evtchn_port_t); /* * EVTCHNOP_alloc_unbound: Allocate a port in domain and mark as * accepting interdomain bindings from domain . A fresh port * is allocated in and returned as . * NOTES: * 1. If the caller is unprivileged then must be DOMID_SELF. * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_alloc_unbound 6 struct evtchn_alloc_unbound { /* IN parameters */ domid_t dom, remote_dom; /* OUT parameters */ evtchn_port_t port; }; typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; /* * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between * the calling domain and . must identify * a port that is unbound and marked as accepting bindings from the calling * domain. A fresh port is allocated in the calling domain and returned as * . * NOTES: * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_bind_interdomain 0 struct evtchn_bind_interdomain { /* IN parameters. */ domid_t remote_dom; evtchn_port_t remote_port; /* OUT parameters. */ evtchn_port_t local_port; }; typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t; /* * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ on specified * vcpu. * NOTES: * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list * in xen.h for the classification of each VIRQ. * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be * re-bound via EVTCHNOP_bind_vcpu. * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. * The allocated event channel is bound to the specified vcpu and the * binding cannot be changed. */ #define EVTCHNOP_bind_virq 1 struct evtchn_bind_virq { /* IN parameters. */ uint32_t virq; uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_virq evtchn_bind_virq_t; /* * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ . * NOTES: * 1. A physical IRQ may be bound to at most one event channel per domain. * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. */ #define EVTCHNOP_bind_pirq 2 struct evtchn_bind_pirq { /* IN parameters. */ uint32_t pirq; #define BIND_PIRQ__WILL_SHARE 1 uint32_t flags; /* BIND_PIRQ__* */ /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_pirq evtchn_bind_pirq_t; /* * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. * NOTES: * 1. The allocated event channel is bound to the specified vcpu. The binding * may not be changed. */ #define EVTCHNOP_bind_ipi 7 struct evtchn_bind_ipi { uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_ipi evtchn_bind_ipi_t; /* * EVTCHNOP_close: Close a local event channel . If the channel is * interdomain then the remote end is placed in the unbound state * (EVTCHNSTAT_unbound), awaiting a new connection. */ #define EVTCHNOP_close 3 struct evtchn_close { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_close evtchn_close_t; /* * EVTCHNOP_send: Send an event to the remote end of the channel whose local * endpoint is . */ #define EVTCHNOP_send 4 struct evtchn_send { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_send evtchn_send_t; /* * EVTCHNOP_status: Get the current status of the communication channel which * has an endpoint at . * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may obtain the status of an event * channel for which is not DOMID_SELF. */ #define EVTCHNOP_status 5 struct evtchn_status { /* IN parameters */ domid_t dom; evtchn_port_t port; /* OUT parameters */ #define EVTCHNSTAT_closed 0 /* Channel is not in use. */ #define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ #define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ uint32_t status; uint32_t vcpu; /* VCPU to which this channel is bound. */ union { struct { domid_t dom; } unbound; /* EVTCHNSTAT_unbound */ struct { domid_t dom; evtchn_port_t port; } interdomain; /* EVTCHNSTAT_interdomain */ uint32_t pirq; /* EVTCHNSTAT_pirq */ uint32_t virq; /* EVTCHNSTAT_virq */ } u; }; typedef struct evtchn_status evtchn_status_t; /* * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an * event is pending. * NOTES: * 1. IPI-bound channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 3. All other channels notify vcpu0 by default. This default is set when * the channel is allocated (a port that is freed and subsequently reused * has its binding reset to vcpu0). */ #define EVTCHNOP_bind_vcpu 8 struct evtchn_bind_vcpu { /* IN parameters. */ evtchn_port_t port; uint32_t vcpu; }; typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t; /* * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver * a notification to the appropriate VCPU if an event is pending. */ #define EVTCHNOP_unmask 9 struct evtchn_unmask { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_unmask evtchn_unmask_t; /* * EVTCHNOP_reset: Close all event channels associated with specified domain. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. */ #define EVTCHNOP_reset 10 struct evtchn_reset { /* IN parameters. */ domid_t dom; }; typedef struct evtchn_reset evtchn_reset_t; /* * Argument to event_channel_op_compat() hypercall. Superceded by new * event_channel_op() hypercall since 0x00030202. */ struct evtchn_op { uint32_t cmd; /* EVTCHNOP_* */ union { struct evtchn_alloc_unbound alloc_unbound; struct evtchn_bind_interdomain bind_interdomain; struct evtchn_bind_virq bind_virq; struct evtchn_bind_pirq bind_pirq; struct evtchn_bind_ipi bind_ipi; struct evtchn_close close; struct evtchn_send send; struct evtchn_status status; struct evtchn_bind_vcpu bind_vcpu; struct evtchn_unmask unmask; } u; }; typedef struct evtchn_op evtchn_op_t; DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/evtchn.h000066400000000000000000000112251314037446600247730ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Communication via Xen event channels. * Also definitions for the device that demuxes notifications to userspace. * * Copyright (c) 2004-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_EVTCHN_H__ #define __ASM_EVTCHN_H__ #include #include #include /* * pv drivers header files */ #include "hypervisor.h" #include "synch_bitops.h" #include "event_channel.h" /* * LOW-LEVEL DEFINITIONS */ /* * Dynamically bind an event source to an IRQ-like callback handler. * On some platforms this may not be implemented via the Linux IRQ subsystem. * The IRQ argument passed to the callback handler is the same as returned * from the bind call. It may not correspond to a Linux IRQ number. * Returns IRQ or negative errno. */ int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_interdomain_evtchn_to_irqhandler( unsigned int remote_domain, unsigned int remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irqhandler( unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (except for bindings * made with bind_caller_port_to_irqhandler()). */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); void irq_resume(void); /* Entry point for notifications into Linux subsystems. */ asmlinkage void evtchn_do_upcall(struct pt_regs *regs); /* Entry point for notifications into the userland character device. */ void evtchn_device_upcall(int port); /* Mark a PIRQ as unavailable for dynamic allocation. */ void evtchn_register_pirq(int irq); /* Map a Xen-supplied PIRQ to a dynamically allocated one. */ int evtchn_map_pirq(int irq, int xen_pirq); /* Look up a Xen-supplied PIRQ for a dynamically allocated one. */ int evtchn_get_xen_pirq(int irq); void mask_evtchn(int port); void disable_all_local_evtchn(void); void unmask_evtchn(int port); #ifdef CONFIG_SMP void rebind_evtchn_to_cpu(int port, unsigned int cpu); #else #define rebind_evtchn_to_cpu(port, cpu) ((void)0) #endif static inline int test_and_set_evtchn_mask(int port) { shared_info_t *s = HYPERVISOR_shared_info; return synch_test_and_set_bit(port, s->evtchn_mask); } static inline void clear_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; synch_clear_bit(port, s->evtchn_pending); } static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send)); } /* * Use these to access the event channel underlying the IRQ handle returned * by bind_*_to_irqhandler(). */ void notify_remote_via_irq(int irq); int irq_to_evtchn_port(int irq); #endif /* __ASM_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/features.h000066400000000000000000000007561314037446600253310ustar00rootroot00000000000000/****************************************************************************** * features.h * * Query the features reported by Xen. * * Copyright (c) 2006, Ian Campbell */ #ifndef __FEATURES_H__ #define __FEATURES_H__ #include #include extern void setup_xen_features(void); //void setup_xen_features(void); extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32]; #define xen_feature(flag) (xen_features[flag]) #endif /* __ASM_XEN_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/gnttab.h000066400000000000000000000125041314037446600247640ustar00rootroot00000000000000/****************************************************************************** * gnttab.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include /* * pv driver header files */ #include #include #include #include /* maddr_t */ struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; u8 queued; }; int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_ref(grant_ref_t ref); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); int gnttab_query_foreign_access(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags); void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep); void __gnttab_dma_map_page(struct page *page); static inline void __gnttab_dma_unmap_page(struct page *page) { } void gnttab_reset_grant_page(struct page *page); int gnttab_suspend(void); int gnttab_resume(void); void *arch_gnttab_alloc_shared(unsigned long *frames); static inline void gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr, uint32_t flags, grant_ref_t ref, domid_t domid) { if (flags & GNTMAP_contains_pte) map->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) map->host_addr = __pa(addr); else map->host_addr = addr; map->flags = flags; map->ref = ref; map->dom = domid; } static inline void gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr, uint32_t flags, grant_handle_t handle) { if (flags & GNTMAP_contains_pte) unmap->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) unmap->host_addr = __pa(addr); else unmap->host_addr = addr; unmap->handle = handle; unmap->dev_bus_addr = 0; } static inline void gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr, maddr_t new_addr, grant_handle_t handle) { if (xen_feature(XENFEAT_auto_translated_physmap)) { unmap->host_addr = __pa(addr); unmap->new_addr = __pa(new_addr); } else { unmap->host_addr = addr; unmap->new_addr = new_addr; } unmap->handle = handle; } #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/grant_table.h000066400000000000000000000101301314037446600257600ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ #define NR_GRANT_FRAMES 4 struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; }; int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ //int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly); int gnttab_end_foreign_access_ref(grant_ref_t ref); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ //void gnttab_end_foreign_access(grant_ref_t ref, int readonly, // unsigned long page); void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); int gnttab_query_foreign_access(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly); void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/hvm.h000066400000000000000000000006751314037446600243050ustar00rootroot00000000000000/* Simple wrappers around HVM functions */ #ifndef XEN_HVM_H__ #define XEN_HVM_H__ #include static inline unsigned long hvm_get_parameter(int idx) { struct xen_hvm_param xhv; int r; xhv.domid = DOMID_SELF; xhv.index = idx; r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); if (r < 0) { printk(KERN_ERR "cannot get hvm parameter %d: %d.\n", idx, r); return 0; } return xhv.value; } #endif /* XEN_HVM_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/hvm_op.h000066400000000000000000000112741314037446600250000ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ #define __XEN_PUBLIC_HVM_HVM_OP_H__ #include "xen.h" /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */ #define HVMOP_set_param 0 #define HVMOP_get_param 1 struct xen_hvm_param { domid_t domid; /* IN */ uint32_t index; /* IN */ uint64_t value; /* IN/OUT */ }; typedef struct xen_hvm_param xen_hvm_param_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); /* Set the logical level of one of a domain's PCI INTx wires. */ #define HVMOP_set_pci_intx_level 2 struct xen_hvm_set_pci_intx_level { /* Domain to be updated. */ domid_t domid; /* PCI INTx identification in PCI topology (domain:bus:device:intx). */ uint8_t domain, bus, device, intx; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t); /* Set the logical level of one of a domain's ISA IRQ wires. */ #define HVMOP_set_isa_irq_level 3 struct xen_hvm_set_isa_irq_level { /* Domain to be updated. */ domid_t domid; /* ISA device identification, by ISA IRQ (0-15). */ uint8_t isa_irq; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t); #define HVMOP_set_pci_link_route 4 struct xen_hvm_set_pci_link_route { /* Domain to be updated. */ domid_t domid; /* PCI link identifier (0-3). */ uint8_t link; /* ISA IRQ (1-15), or 0 (disable link). */ uint8_t isa_irq; }; typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); /* Flushes all VCPU TLBs: @arg must be NULL. */ #define HVMOP_flush_tlbs 5 /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Track dirty VRAM. */ #define HVMOP_track_dirty_vram 6 struct xen_hvm_track_dirty_vram { /* Domain to be tracked. */ domid_t domid; /* First pfn to track. */ uint64_aligned_t first_pfn; /* Number of pages to track. */ uint64_aligned_t nr; /* OUT variable. */ /* Dirty bitmap buffer. */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; }; typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t); /* Notify that some pages got modified by the Device Model. */ #define HVMOP_modified_memory 7 struct xen_hvm_modified_memory { /* Domain to be updated. */ domid_t domid; /* First pfn. */ uint64_aligned_t first_pfn; /* Number of pages. */ uint64_aligned_t nr; }; typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); #define HVMOP_set_mem_type 8 typedef enum { HVMMEM_ram_rw, /* Normal read/write guest RAM */ HVMMEM_ram_ro, /* Read-only; writes are discarded */ HVMMEM_mmio_dm, /* Reads and write go to the device model */ } hvmmem_type_t; /* Notify that a region of memory is to be treated in a specific way. */ struct xen_hvm_set_mem_type { /* Domain to be updated. */ domid_t domid; /* Memory type */ hvmmem_type_t hvmmem_type; /* First pfn. */ uint64_aligned_t first_pfn; /* Number of pages. */ uint64_aligned_t nr; }; typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t); #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/hypercall.h000066400000000000000000000013761314037446600254750ustar00rootroot00000000000000#ifndef __XEN_HYPERCALL_H__ #define __XEN_HYPERCALL_H__ #include static inline int __must_check HYPERVISOR_multicall_check( multicall_entry_t *call_list, unsigned int nr_calls, const unsigned long *rc_list) { int rc = HYPERVISOR_multicall(call_list, nr_calls); if (unlikely(rc < 0)) return rc; BUG_ON(rc); BUG_ON((int)nr_calls < 0); for ( ; nr_calls > 0; --nr_calls, ++call_list) if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0))) return nr_calls; return 0; } /* A construct to ignore the return value of hypercall wrappers in a few * exceptional cases (simply casting the function result to void doesn't * avoid the compiler warning): */ #define VOID(expr) ((void)((expr)?:0)) #endif /* __XEN_HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/hypervisor.h000066400000000000000000000161051314037446600257200ustar00rootroot00000000000000/****************************************************************************** * hypervisor.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERVISOR_H__ #define __HYPERVISOR_H__ #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include #include #include #include #if defined(__i386__) #ifdef CONFIG_X86_PAE #include "pgtable-nopud.h" #else #include "pgtable-nopmd.h" #endif #elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) #include "pgtable-nopud.h" #endif extern shared_info_t *HYPERVISOR_shared_info; #define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu)) #ifdef CONFIG_SMP #define current_vcpu_info() vcpu_info(smp_processor_id()) #else #define current_vcpu_info() vcpu_info(0) #endif #ifdef CONFIG_X86_32 extern unsigned long hypervisor_virt_start; #endif /* arch/xen/i386/kernel/setup.c */ extern start_info_t *xen_start_info; #ifdef CONFIG_XEN_PRIVILEGED_GUEST #define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN) #else #define is_initial_xendomain() 0 #endif /* arch/xen/kernel/evtchn.c */ /* Force a proper event-channel callback from Xen. */ void force_evtchn_callback(void); /* arch/xen/kernel/process.c */ void xen_cpu_idle (void); /* arch/xen/i386/kernel/hypervisor.c */ void do_hypervisor_callback(struct pt_regs *regs); /* arch/xen/i386/mm/hypervisor.c */ /* * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already * be MACHINE addresses. */ void xen_pt_switch(unsigned long ptr); void xen_new_user_pt(unsigned long ptr); /* x86_64 only */ void xen_load_gs(unsigned int selector); /* x86_64 only */ void xen_tlb_flush(void); void xen_invlpg(unsigned long ptr); void xen_l1_entry_update(pte_t *ptr, pte_t val); void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */ void xen_pgd_pin(unsigned long ptr); void xen_pgd_unpin(unsigned long ptr); void xen_set_ldt(const void *ptr, unsigned int ents); #ifdef CONFIG_SMP #include void xen_tlb_flush_all(void); void xen_invlpg_all(unsigned long ptr); void xen_tlb_flush_mask(cpumask_t *mask); void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr); #else #define xen_tlb_flush_all xen_tlb_flush #define xen_invlpg_all xen_invlpg #endif /* Returns zero on success else negative errno. */ int xen_create_contiguous_region( unsigned long vstart, unsigned int order, unsigned int address_bits); void xen_destroy_contiguous_region( unsigned long vstart, unsigned int order); struct page; int xen_limit_pages_to_max_mfn( struct page *pages, unsigned int order, unsigned int address_bits); /* Turn jiffies into Xen system time. */ u64 jiffies_to_st(unsigned long jiffies); #ifdef CONFIG_XEN_SCRUB_PAGES void scrub_pages(void *, unsigned int); #else #define scrub_pages(_p,_n) ((void)0) #endif #include #if defined(CONFIG_X86_64) #define MULTI_UVMFLAGS_INDEX 2 #define MULTI_UVMDOMID_INDEX 3 #else #define MULTI_UVMFLAGS_INDEX 3 #define MULTI_UVMDOMID_INDEX 4 #endif #ifdef CONFIG_XEN #define is_running_on_xen() 1 #else extern char *hypercall_stubs; #define is_running_on_xen() (!!hypercall_stubs) #endif static inline int HYPERVISOR_yield( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int HYPERVISOR_block( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0); #endif return rc; } static inline void /*__noreturn*/ HYPERVISOR_shutdown( unsigned int reason) { struct sched_shutdown sched_shutdown = { .reason = reason }; VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown)); #if CONFIG_XEN_COMPAT <= 0x030002 VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason)); #endif /* Don't recurse needlessly. */ BUG_ON(reason != SHUTDOWN_crash); for(;;); } static inline int __must_check HYPERVISOR_poll( evtchn_port_t *ports, unsigned int nr_ports, u64 timeout) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports, .timeout = jiffies_to_st(timeout) }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } #ifdef CONFIG_XEN static inline void MULTI_update_va_mapping( multicall_entry_t *mcl, unsigned long va, pte_t new_val, unsigned long flags) { mcl->op = __HYPERVISOR_update_va_mapping; mcl->args[0] = va; #if defined(CONFIG_X86_64) mcl->args[1] = new_val.pte; #elif defined(CONFIG_X86_PAE) mcl->args[1] = new_val.pte_low; mcl->args[2] = new_val.pte_high; #else mcl->args[1] = new_val.pte_low; mcl->args[2] = 0; #endif mcl->args[MULTI_UVMFLAGS_INDEX] = flags; } static inline void MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd, void *uop, unsigned int count) { mcl->op = __HYPERVISOR_grant_table_op; mcl->args[0] = cmd; mcl->args[1] = (unsigned long)uop; mcl->args[2] = count; } #else /* !defined(CONFIG_XEN) */ /* Multicalls not supported for HVM guests. */ #define MULTI_update_va_mapping(a,b,c,d) ((void)0) #define MULTI_grant_table_op(a,b,c,d) ((void)0) #endif #endif /* __HYPERVISOR_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/io/000077500000000000000000000000001314037446600237415ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/io/ring.h000066400000000000000000000351261314037446600250600ustar00rootroot00000000000000/****************************************************************************** * ring.h * * Shared producer-consumer ring macros. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Tim Deegan and Andrew Warfield November 2004. */ #ifndef __XEN_PUBLIC_IO_RING_H__ #define __XEN_PUBLIC_IO_RING_H__ #include #if __XEN_INTERFACE_VERSION__ < 0x00030208 #define xen_mb() mb() #define xen_rmb() rmb() #define xen_wmb() wmb() #endif typedef unsigned int RING_IDX; /* Round a 32-bit unsigned constant down to the nearest power of two. */ #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) /* * Calculate size of a shared ring, given the total available space for the * ring and indexes (_sz), and the name tag of the request/response structure. * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ #define __RING_SIZE(_s, _sz) \ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* * Macros to make the correct C datatypes for a new kind of ring. * * To make a new ring datatype, you need to have two message structures, * let's say request_t, and response_t already defined. * * In a header where you want the ring datatype declared, you then do: * * DEFINE_RING_TYPES(mytag, request_t, response_t); * * These expand out to give you a set of types, as you can see below. * The most important of these are: * * mytag_sring_t - The shared ring. * mytag_front_ring_t - The 'front' half of the ring. * mytag_back_ring_t - The 'back' half of the ring. * * To initialize a ring in your code you need to know the location and size * of the shared memory area (PAGE_SIZE, for instance). To initialise * the front half: * * mytag_front_ring_t front_ring; * SHARED_RING_INIT((mytag_sring_t *)shared_page); * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); * * Initializing the back follows similarly (note that only the front * initializes the shared ring): * * mytag_back_ring_t back_ring; * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); */ #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ \ /* Shared ring entry */ \ union __name##_sring_entry { \ __req_t req; \ __rsp_t rsp; \ }; \ \ /* Shared ring page */ \ struct __name##_sring { \ RING_IDX req_prod, req_event; \ RING_IDX rsp_prod, rsp_event; \ uint8_t netfront_smartpoll_active; \ uint8_t pad[47]; \ union __name##_sring_entry ring[1]; /* variable-length */ \ }; \ \ /* "Front" end's private variables */ \ struct __name##_front_ring { \ RING_IDX req_prod_pvt; \ RING_IDX rsp_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* "Back" end's private variables */ \ struct __name##_back_ring { \ RING_IDX rsp_prod_pvt; \ RING_IDX req_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* Syntactic sugar */ \ typedef struct __name##_sring __name##_sring_t; \ typedef struct __name##_front_ring __name##_front_ring_t; \ typedef struct __name##_back_ring __name##_back_ring_t /* * Macros for manipulating rings. * * FRONT_RING_whatever works on the "front end" of a ring: here * requests are pushed on to the ring and responses taken off it. * * BACK_RING_whatever works on the "back end" of a ring: here * requests are taken off the ring and responses put on. * * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. * This is OK in 1-for-1 request-response situations where the * requestor (front end) never has more than RING_SIZE()-1 * outstanding requests. */ /* Initialising empty rings */ #define SHARED_RING_INIT(_s) do { \ (_s)->req_prod = (_s)->rsp_prod = 0; \ (_s)->req_event = (_s)->rsp_event = 1; \ (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \ } while(0) #define FRONT_RING_INIT(_r, _s, __size) do { \ (_r)->req_prod_pvt = 0; \ (_r)->rsp_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) #define BACK_RING_INIT(_r, _s, __size) do { \ (_r)->rsp_prod_pvt = 0; \ (_r)->req_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) /* Initialize to existing shared indexes -- for recovery */ #define FRONT_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->req_prod_pvt = (_s)->req_prod; \ (_r)->rsp_cons = (_s)->rsp_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) #define BACK_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ (_r)->req_cons = (_s)->req_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) /* How big is this ring? */ #define RING_SIZE(_r) \ ((_r)->nr_ents) /* Number of free requests (for use on front side only). */ #define RING_FREE_REQUESTS(_r) \ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) /* Test if there is an empty slot available on the front ring. * (This is only meaningful from the front. ) */ #define RING_FULL(_r) \ (RING_FREE_REQUESTS(_r) == 0) /* Test if there are outstanding messages to be processed on a ring. */ #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) #ifdef __GNUC__ #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ unsigned int rsp = RING_SIZE(_r) - \ ((_r)->req_cons - (_r)->rsp_prod_pvt); \ req < rsp ? req : rsp; \ }) #else /* Same as above, but without the nice GCC ({ ... }) syntax. */ #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ ((((_r)->sring->req_prod - (_r)->req_cons) < \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ ((_r)->sring->req_prod - (_r)->req_cons) : \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) #endif /* Direct access to individual ring elements, by index. */ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) #define RING_GET_RESPONSE(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) /* Loop termination condition: Would the specified index overflow the ring? */ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) #define RING_PUSH_REQUESTS(_r) do { \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) #define RING_PUSH_RESPONSES(_r) do { \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ } while (0) /* * Notification hold-off (req_event and rsp_event): * * When queueing requests or responses on a shared ring, it may not always be * necessary to notify the remote end. For example, if requests are in flight * in a backend, the front may be able to queue further requests without * notifying the back (if the back checks for new requests when it queues * responses). * * When enqueuing requests or responses: * * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument * is a boolean return value. True indicates that the receiver requires an * asynchronous notification. * * After dequeuing requests or responses (before sleeping the connection): * * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). * The second argument is a boolean return value. True indicates that there * are pending messages on the ring (i.e., the connection should not be put * to sleep). * * These macros will set the req_event/rsp_event field to trigger a * notification on the very next message that is enqueued. If you want to * create batches of work (i.e., only receive a notification after several * messages have been enqueued) then you will need to create a customised * version of the FINAL_CHECK macro in your own code, which sets the event * field appropriately. */ #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->req_prod; \ RING_IDX __new = (_r)->req_prod_pvt; \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = __new; \ xen_mb(); /* back sees new requests /before/ we check req_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->rsp_prod; \ RING_IDX __new = (_r)->rsp_prod_pvt; \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = __new; \ xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ if (_work_to_do) break; \ (_r)->sring->req_event = (_r)->req_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ } while (0) #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ if (_work_to_do) break; \ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ } while (0) #endif /* __XEN_PUBLIC_IO_RING_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/io/xenbus.h000066400000000000000000000046401314037446600254220ustar00rootroot00000000000000/***************************************************************************** * xenbus.h * * Xenbus protocol details. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 XenSource Ltd. */ #ifndef _XEN_PUBLIC_IO_XENBUS_H #define _XEN_PUBLIC_IO_XENBUS_H /* * The state of either end of the Xenbus, i.e. the current communication * status of initialisation across the bus. States here imply nothing about * the state of the connection between the driver and the kernel's device * layers. */ enum xenbus_state { XenbusStateUnknown = 0, XenbusStateInitialising = 1, /* * InitWait: Finished early initialisation but waiting for information * from the peer or hotplug scripts. */ XenbusStateInitWait = 2, /* * Initialised: Waiting for a connection from the peer. */ XenbusStateInitialised = 3, XenbusStateConnected = 4, /* * Closing: The device is being closed due to an error or an unplug event. */ XenbusStateClosing = 5, XenbusStateClosed = 6, /* * Reconfiguring: The device is being reconfigured. */ XenbusStateReconfiguring = 7, XenbusStateReconfigured = 8 }; typedef enum xenbus_state XenbusState; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/ioreq.h000066400000000000000000000105071314037446600246250ustar00rootroot00000000000000/* * ioreq.h: I/O request definitions for device models * Copyright (c) 2004, Intel Corporation. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef _IOREQ_H_ #define _IOREQ_H_ #define IOREQ_READ 1 #define IOREQ_WRITE 0 #define STATE_IOREQ_NONE 0 #define STATE_IOREQ_READY 1 #define STATE_IOREQ_INPROCESS 2 #define STATE_IORESP_READY 3 #define IOREQ_TYPE_PIO 0 /* pio */ #define IOREQ_TYPE_COPY 1 /* mmio ops */ #define IOREQ_TYPE_TIMEOFFSET 7 #define IOREQ_TYPE_INVALIDATE 8 /* mapcache */ /* * VMExit dispatcher should cooperate with instruction decoder to * prepare this structure and notify service OS and DM by sending * virq */ struct ioreq { uint64_t addr; /* physical address */ uint64_t size; /* size in bytes */ uint64_t count; /* for rep prefixes */ uint64_t data; /* data (or paddr of data) */ uint8_t state:4; uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr * of the real data to use. */ uint8_t dir:1; /* 1=read, 0=write */ uint8_t df:1; uint8_t pad:1; uint8_t type; /* I/O type */ uint8_t _pad0[6]; uint64_t io_count; /* How many IO done on a vcpu */ }; typedef struct ioreq ioreq_t; struct vcpu_iodata { struct ioreq vp_ioreq; /* Event channel port, used for notifications to/from the device model. */ uint32_t vp_eport; uint32_t _pad0; }; typedef struct vcpu_iodata vcpu_iodata_t; struct shared_iopage { struct vcpu_iodata vcpu_iodata[1]; }; typedef struct shared_iopage shared_iopage_t; struct buf_ioreq { uint8_t type; /* I/O type */ uint8_t pad:1; uint8_t dir:1; /* 1=read, 0=write */ uint8_t size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */ uint32_t addr:20;/* physical address */ uint32_t data; /* data */ }; typedef struct buf_ioreq buf_ioreq_t; #define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */ struct buffered_iopage { unsigned int read_pointer; unsigned int write_pointer; buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM]; }; /* NB. Size of this structure must be no greater than one page. */ typedef struct buffered_iopage buffered_iopage_t; #if defined(__ia64__) struct pio_buffer { uint32_t page_offset; uint32_t pointer; uint32_t data_end; uint32_t buf_size; void *opaque; }; #define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */ #define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */ #define PIO_BUFFER_ENTRY_NUM 2 struct buffered_piopage { struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM]; uint8_t buffer[1]; }; #endif /* defined(__ia64__) */ #define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40 #define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04) #define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08) #define ACPI_GPE0_BLK_ADDRESS (ACPI_PM_TMR_BLK_ADDRESS + 0x20) #define ACPI_GPE0_BLK_LEN 0x08 #endif /* _IOREQ_H_ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/maddr.h000066400000000000000000000114501314037446600245730ustar00rootroot00000000000000#ifndef _X86_64_MADDR_H #define _X86_64_MADDR_H #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<63) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ typedef unsigned long paddr_t; typedef unsigned long maddr_t; #ifdef CONFIG_XEN extern unsigned long *phys_to_machine_mapping; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return pfn; BUG_ON(end_pfn && pfn >= end_pfn); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return 1; BUG_ON(end_pfn && pfn >= end_pfn); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return end_pfn; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movq %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movq %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 8\n" " .quad 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if ((pfn < end_pfn) && !xen_feature(XENFEAT_auto_translated_physmap) && (phys_to_machine_mapping[pfn] != mfn)) return end_pfn; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { BUG_ON(end_pfn && pfn >= end_pfn); if (xen_feature(XENFEAT_auto_translated_physmap)) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } static inline paddr_t pte_phys_to_machine(paddr_t phys) { maddr_t machine; machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { paddr_t phys; phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #define __pte_ma(x) ((pte_t) { (x) } ) #define pfn_pte_ma(pfn, prot) __pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask) #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _X86_64_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/memory.h000066400000000000000000000233031314037446600250140ustar00rootroot00000000000000/****************************************************************************** * memory.h * * Memory reservation and information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_MEMORY_H__ #define __XEN_PUBLIC_MEMORY_H__ #include "xen.h" /* * Increase or decrease the specified domain's memory reservation. Returns the * number of extents successfully allocated or freed. * arg == addr of struct xen_memory_reservation. */ #define XENMEM_increase_reservation 0 #define XENMEM_decrease_reservation 1 #define XENMEM_populate_physmap 6 #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* * Maximum # bits addressable by the user of the allocated region (e.g., I/O * devices often have a 32-bit limitation even in 64-bit systems). If zero * then the user has no addressing restriction. This field is not used by * XENMEM_decrease_reservation. */ #define XENMEMF_address_bits(x) (x) #define XENMEMF_get_address_bits(x) ((x) & 0xffu) /* NUMA node to allocate from. */ #define XENMEMF_node(x) (((x) + 1) << 8) #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu) /* Flag to populate physmap with populate-on-demand entries */ #define XENMEMF_populate_on_demand (1<<16) #endif struct xen_memory_reservation { /* * XENMEM_increase_reservation: * OUT: MFN (*not* GMFN) bases of extents that were allocated * XENMEM_decrease_reservation: * IN: GMFN bases of extents to free * XENMEM_populate_physmap: * IN: GPFN bases of extents to populate with memory * OUT: GMFN bases of extents that were allocated * (NB. This command also updates the mach_to_phys translation table) */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* Number of extents, and size/alignment of each (2^extent_order pages). */ xen_ulong_t nr_extents; unsigned int extent_order; #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* XENMEMF flags. */ unsigned int mem_flags; #else unsigned int address_bits; #endif /* * Domain whose reservation is being changed. * Unprivileged domains can specify only DOMID_SELF. */ domid_t domid; }; typedef struct xen_memory_reservation xen_memory_reservation_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); /* * An atomic exchange of memory pages. If return code is zero then * @out.extent_list provides GMFNs of the newly-allocated memory. * Returns zero on complete success, otherwise a negative error code. * On complete success then always @nr_exchanged == @in.nr_extents. * On partial success @nr_exchanged indicates how much work was done. */ #define XENMEM_exchange 11 struct xen_memory_exchange { /* * [IN] Details of memory extents to be exchanged (GMFN bases). * Note that @in.address_bits is ignored and unused. */ struct xen_memory_reservation in; /* * [IN/OUT] Details of new memory extents. * We require that: * 1. @in.domid == @out.domid * 2. @in.nr_extents << @in.extent_order == * @out.nr_extents << @out.extent_order * 3. @in.extent_start and @out.extent_start lists must not overlap * 4. @out.extent_start lists GPFN bases to be populated * 5. @out.extent_start is overwritten with allocated GMFN bases */ struct xen_memory_reservation out; /* * [OUT] Number of input extents that were successfully exchanged: * 1. The first @nr_exchanged input extents were successfully * deallocated. * 2. The corresponding first entries in the output extent list correctly * indicate the GMFNs that were successfully exchanged. * 3. All other input and output extents are untouched. * 4. If not all input exents are exchanged then the return code of this * command will be non-zero. * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! */ xen_ulong_t nr_exchanged; }; typedef struct xen_memory_exchange xen_memory_exchange_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); /* * Returns the maximum machine frame number of mapped RAM in this system. * This command always succeeds (it never returns an error code). * arg == NULL. */ #define XENMEM_maximum_ram_page 2 /* * Returns the current or maximum memory reservation, in pages, of the * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. * arg == addr of domid_t. */ #define XENMEM_current_reservation 3 #define XENMEM_maximum_reservation 4 /* * Returns the maximum GPFN in use by the guest, or -ve errcode on failure. */ #define XENMEM_maximum_gpfn 14 /* * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys * mapping table. Architectures which do not have a m2p table do not implement * this command. * arg == addr of xen_machphys_mfn_list_t. */ #define XENMEM_machphys_mfn_list 5 struct xen_machphys_mfn_list { /* * Size of the 'extent_start' array. Fewer entries will be filled if the * machphys table is smaller than max_extents * 2MB. */ unsigned int max_extents; /* * Pointer to buffer to fill with list of extent starts. If there are * any large discontiguities in the machine address space, 2MB gaps in * the machphys table will be represented by an MFN base of zero. */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* * Number of extents written to the above array. This will be smaller * than 'max_extents' if the machphys table is smaller than max_e * 2MB. */ unsigned int nr_extents; }; typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); /* * Returns the location in virtual address space of the machine_to_phys * mapping table. Architectures which do not have a m2p table, or which do not * map it by default into guest address space, do not implement this command. * arg == addr of xen_machphys_mapping_t. */ #define XENMEM_machphys_mapping 12 struct xen_machphys_mapping { xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ }; typedef struct xen_machphys_mapping xen_machphys_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); /* * Sets the GPFN at which a particular page appears in the specified guest's * pseudophysical address space. * arg == addr of xen_add_to_physmap_t. */ #define XENMEM_add_to_physmap 7 struct xen_add_to_physmap { /* Which domain to change the mapping for. */ domid_t domid; /* Source mapping space. */ #define XENMAPSPACE_shared_info 0 /* shared info page */ #define XENMAPSPACE_grant_table 1 /* grant table page */ #define XENMAPSPACE_gmfn 2 /* GMFN */ unsigned int space; /* Index into source mapping space. */ xen_ulong_t idx; /* GPFN where the source mapping page should appear. */ xen_pfn_t gpfn; }; typedef struct xen_add_to_physmap xen_add_to_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); /*** REMOVED ***/ /*#define XENMEM_translate_gpfn_list 8*/ /* * Returns the pseudo-physical memory map as it was when the domain * was started (specified by XENMEM_set_memory_map). * arg == addr of xen_memory_map_t. */ #define XENMEM_memory_map 9 struct xen_memory_map { /* * On call the number of entries which can be stored in buffer. On * return the number of entries which have been stored in * buffer. */ unsigned int nr_entries; /* * Entries in the buffer are in the same format as returned by the * BIOS INT 0x15 EAX=0xE820 call. */ XEN_GUEST_HANDLE(void) buffer; }; typedef struct xen_memory_map xen_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); /* * Returns the real physical memory map. Passes the same structure as * XENMEM_memory_map. * arg == addr of xen_memory_map_t. */ #define XENMEM_machine_memory_map 10 /* * Set the pseudo-physical memory map of a domain, as returned by * XENMEM_memory_map. * arg == addr of xen_foreign_memory_map_t. */ #define XENMEM_set_memory_map 13 struct xen_foreign_memory_map { domid_t domid; struct xen_memory_map map; }; typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); #define XENMEM_set_pod_target 16 #define XENMEM_get_pod_target 17 struct xen_pod_target { /* IN */ uint64_t target_pages; /* OUT */ uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; /* IN */ domid_t domid; }; typedef struct xen_pod_target xen_pod_target_t; #endif /* __XEN_PUBLIC_MEMORY_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/nmi.h000066400000000000000000000053061314037446600242720ustar00rootroot00000000000000/****************************************************************************** * nmi.h * * NMI callback registration and reason codes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_NMI_H__ #define __XEN_PUBLIC_NMI_H__ #include "xen.h" /* * NMI reason codes: * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. */ /* I/O-check error reported via ISA port 0x61, bit 6. */ #define _XEN_NMIREASON_io_error 0 #define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) /* Parity error reported via ISA port 0x61, bit 7. */ #define _XEN_NMIREASON_parity_error 1 #define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error) /* Unknown hardware-generated NMI. */ #define _XEN_NMIREASON_unknown 2 #define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) /* * long nmi_op(unsigned int cmd, void *arg) * NB. All ops return zero on success, else a negative error code. */ /* * Register NMI callback for this (calling) VCPU. Currently this only makes * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. * arg == pointer to xennmi_callback structure. */ #define XENNMI_register_callback 0 struct xennmi_callback { unsigned long handler_address; unsigned long pad; }; typedef struct xennmi_callback xennmi_callback_t; DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t); /* * Deregister NMI callback for this (calling) VCPU. * arg == NULL. */ #define XENNMI_unregister_callback 1 #endif /* __XEN_PUBLIC_NMI_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/params.h000066400000000000000000000076431314037446600250000ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_PARAMS_H__ #define __XEN_PUBLIC_HVM_PARAMS_H__ #include /* * Parameter space for HVMOP_{set,get}_param. */ /* * How should CPU0 event-channel notifications be delivered? * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: * Domain = val[47:32], Bus = val[31:16], * DevFn = val[15: 8], IntX = val[ 1: 0] * If val == 0 then CPU0 event-channel notifications are not delivered. */ #define HVM_PARAM_CALLBACK_IRQ 0 /* * These are not used by Xen. They are here for convenience of HVM-guest * xenbus implementations. */ #define HVM_PARAM_STORE_PFN 1 #define HVM_PARAM_STORE_EVTCHN 2 #define HVM_PARAM_PAE_ENABLED 4 #define HVM_PARAM_IOREQ_PFN 5 #define HVM_PARAM_BUFIOREQ_PFN 6 #ifdef __ia64__ #define HVM_PARAM_NVRAM_FD 7 #define HVM_PARAM_VHPT_SIZE 8 #define HVM_PARAM_BUFPIOREQ_PFN 9 #elif defined(__i386__) || defined(__x86_64__) /* Expose Viridian interfaces to this HVM guest? */ #define HVM_PARAM_VIRIDIAN 9 #endif /* * Set mode for virtual timers (currently x86 only): * delay_for_missed_ticks (default): * Do not advance a vcpu's time beyond the correct delivery time for * interrupts that have been missed due to preemption. Deliver missed * interrupts when the vcpu is rescheduled and advance the vcpu's virtual * time stepwise for each one. * no_delay_for_missed_ticks: * As above, missed interrupts are delivered, but guest time always tracks * wallclock (i.e., real) time while doing so. * no_missed_ticks_pending: * No missed interrupts are held pending. Instead, to ensure ticks are * delivered at some non-zero rate, if we detect missed ticks then the * internal tick alarm is not disabled if the VCPU is preempted during the * next tick period. * one_missed_tick_pending: * Missed interrupts are collapsed together and delivered as one 'late tick'. * Guest time always tracks wallclock (i.e., real) time. */ #define HVM_PARAM_TIMER_MODE 10 #define HVMPTM_delay_for_missed_ticks 0 #define HVMPTM_no_delay_for_missed_ticks 1 #define HVMPTM_no_missed_ticks_pending 2 #define HVMPTM_one_missed_tick_pending 3 /* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */ #define HVM_PARAM_HPET_ENABLED 11 /* Identity-map page directory used by Intel EPT when CR0.PG=0. */ #define HVM_PARAM_IDENT_PT 12 /* Device Model domain, defaults to 0. */ #define HVM_PARAM_DM_DOMAIN 13 /* ACPI S state: currently support S0 and S3 on x86. */ #define HVM_PARAM_ACPI_S_STATE 14 /* TSS used on Intel when CR0.PE=0. */ #define HVM_PARAM_VM86_TSS 15 /* Boolean: Enable aligning all periodic vpts to reduce interrupts */ #define HVM_PARAM_VPT_ALIGN 16 #define HVM_NR_PARAMS 17 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/pgtable-nopmd.h000066400000000000000000000005611314037446600262360ustar00rootroot00000000000000#ifndef _PGTABLE_NOPMD_H #define _PGTABLE_NOPMD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopmd.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPMD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/pgtable-nopud.h000066400000000000000000000006211314037446600262430ustar00rootroot00000000000000#ifndef _PGTABLE_NOPUD_H #define _PGTABLE_NOPUD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopud.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define pud_bad(pud) 0 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPUD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/physdev.h000066400000000000000000000171151314037446600251720ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_PHYSDEV_H__ #define __XEN_PUBLIC_PHYSDEV_H__ #include "xen.h" /* * Prototype for this hypercall is: * int physdev_op(int cmd, void *args) * @cmd == PHYSDEVOP_??? (physdev operation). * @args == Operation-specific extra arguments (NULL if none). */ /* * Notify end-of-interrupt (EOI) for the specified IRQ. * @arg == pointer to physdev_eoi structure. */ #define PHYSDEVOP_eoi 12 struct physdev_eoi { /* IN */ uint32_t irq; }; typedef struct physdev_eoi physdev_eoi_t; DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t); /* * Register a shared page for the hypervisor to indicate whether the guest * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly * once the guest used this function in that the associated event channel * will automatically get unmasked. The page registered is used as a bit * array indexed by Xen's PIRQ value. */ #define PHYSDEVOP_pirq_eoi_gmfn 17 struct physdev_pirq_eoi_gmfn { /* IN */ xen_pfn_t gmfn; }; typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t; DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t); /* * Query the status of an IRQ line. * @arg == pointer to physdev_irq_status_query structure. */ #define PHYSDEVOP_irq_status_query 5 struct physdev_irq_status_query { /* IN */ uint32_t irq; /* OUT */ uint32_t flags; /* XENIRQSTAT_* */ }; typedef struct physdev_irq_status_query physdev_irq_status_query_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t); /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ #define _XENIRQSTAT_needs_eoi (0) #define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) /* IRQ shared by multiple guests? */ #define _XENIRQSTAT_shared (1) #define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) /* * Set the current VCPU's I/O privilege level. * @arg == pointer to physdev_set_iopl structure. */ #define PHYSDEVOP_set_iopl 6 struct physdev_set_iopl { /* IN */ uint32_t iopl; }; typedef struct physdev_set_iopl physdev_set_iopl_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t); /* * Set the current VCPU's I/O-port permissions bitmap. * @arg == pointer to physdev_set_iobitmap structure. */ #define PHYSDEVOP_set_iobitmap 7 struct physdev_set_iobitmap { /* IN */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(uint8) bitmap; #else uint8_t *bitmap; #endif uint32_t nr_ports; }; typedef struct physdev_set_iobitmap physdev_set_iobitmap_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t); /* * Read or write an IO-APIC register. * @arg == pointer to physdev_apic structure. */ #define PHYSDEVOP_apic_read 8 #define PHYSDEVOP_apic_write 9 struct physdev_apic { /* IN */ unsigned long apic_physbase; uint32_t reg; /* IN or OUT */ uint32_t value; }; typedef struct physdev_apic physdev_apic_t; DEFINE_XEN_GUEST_HANDLE(physdev_apic_t); /* * Allocate or free a physical upcall vector for the specified IRQ line. * @arg == pointer to physdev_irq structure. */ #define PHYSDEVOP_alloc_irq_vector 10 #define PHYSDEVOP_free_irq_vector 11 struct physdev_irq { /* IN */ uint32_t irq; /* IN or OUT */ uint32_t vector; }; typedef struct physdev_irq physdev_irq_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_t); #define MAP_PIRQ_TYPE_MSI 0x0 #define MAP_PIRQ_TYPE_GSI 0x1 #define MAP_PIRQ_TYPE_UNKNOWN 0x2 #define PHYSDEVOP_map_pirq 13 struct physdev_map_pirq { domid_t domid; /* IN */ int type; /* IN */ int index; /* IN or OUT */ int pirq; /* IN */ int bus; /* IN */ int devfn; /* IN */ int entry_nr; /* IN */ uint64_t table_base; }; typedef struct physdev_map_pirq physdev_map_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t); #define PHYSDEVOP_unmap_pirq 14 struct physdev_unmap_pirq { domid_t domid; /* IN */ int pirq; }; typedef struct physdev_unmap_pirq physdev_unmap_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t); #define PHYSDEVOP_manage_pci_add 15 #define PHYSDEVOP_manage_pci_remove 16 struct physdev_manage_pci { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_manage_pci physdev_manage_pci_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t); #define PHYSDEVOP_restore_msi 19 struct physdev_restore_msi { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_restore_msi physdev_restore_msi_t; DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t); #define PHYSDEVOP_manage_pci_add_ext 20 struct physdev_manage_pci_ext { /* IN */ uint8_t bus; uint8_t devfn; unsigned is_extfn; unsigned is_virtfn; struct { uint8_t bus; uint8_t devfn; } physfn; }; typedef struct physdev_manage_pci_ext physdev_manage_pci_ext_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_ext_t); /* * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() * hypercall since 0x00030202. */ struct physdev_op { uint32_t cmd; union { struct physdev_irq_status_query irq_status_query; struct physdev_set_iopl set_iopl; struct physdev_set_iobitmap set_iobitmap; struct physdev_apic apic_op; struct physdev_irq irq_op; } u; }; typedef struct physdev_op physdev_op_t; DEFINE_XEN_GUEST_HANDLE(physdev_op_t); /* * Notify that some PIRQ-bound event channels have been unmasked. * ** This command is obsolete since interface version 0x00030202 and is ** * ** unsupported by newer versions of Xen. ** */ #define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 /* * These all-capitals physdev operation names are superceded by the new names * (defined above) since interface version 0x00030202. */ #define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query #define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl #define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap #define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read #define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write #define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector #define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi #define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared #endif /* __XEN_PUBLIC_PHYSDEV_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/platform-compat.h000066400000000000000000000127371314037446600266220ustar00rootroot00000000000000#ifndef COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #define COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #include #include /* * pv drivers header files */ #include #if defined(__LINUX_COMPILER_H) && !defined(__always_inline) #define __always_inline inline #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_SPINLOCK) #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED #endif #if defined(_LINUX_INIT_H) && !defined(__init) #define __init #endif #if defined(__LINUX_CACHE_H) && !defined(__read_mostly) #define __read_mostly #endif #if defined(_LINUX_SKBUFF_H) && !defined(NET_IP_ALIGN) #define NET_IP_ALIGN 0 #endif #if defined(_LINUX_SKBUFF_H) && !defined(CHECKSUM_HW) #define CHECKSUM_HW CHECKSUM_PARTIAL #endif #if defined(_LINUX_ERR_H) && !defined(IS_ERR_VALUE) #define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) #endif #if defined(_ASM_IA64_PGTABLE_H) && !defined(_PGTABLE_NOPUD_H) #include #endif /* Some kernels have this typedef backported so we cannot reliably * detect based on version number, hence we forcibly #define it. */ #if defined(__LINUX_TYPES_H) || defined(__LINUX_GFP_H) || defined(_LINUX_KERNEL_H) #define gfp_t unsigned #endif #if defined(_LINUX_NOTIFIER_H) && !defined(ATOMIC_NOTIFIER_HEAD) #define ATOMIC_NOTIFIER_HEAD(name) struct notifier_block *name #define atomic_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define atomic_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define atomic_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_NOTIFIER_H) && !defined(BLOCKING_NOTIFIER_HEAD) #define BLOCKING_NOTIFIER_HEAD(name) struct notifier_block *name #define blocking_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define blocking_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define blocking_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_MM_H) && defined set_page_count #define init_page_count(page) set_page_count(page, 1) #endif #if defined(__LINUX_GFP_H) && !defined __GFP_NOMEMALLOC #define __GFP_NOMEMALLOC 0 #endif #if defined(_LINUX_FS_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) #define nonseekable_open(inode, filp) /* Nothing to do */ #endif #if defined(_LINUX_MM_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) unsigned long vmalloc_to_pfn(void *addr); #endif #if defined(__LINUX_COMPLETION_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); #endif #if defined(_LINUX_SCHED_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout); #endif #if defined(_LINUX_SLAB_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) void *kzalloc(size_t size, int flags); #endif #if defined(_LINUX_BLKDEV_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define end_that_request_last(req, uptodate) end_that_request_last(req) #endif #if defined(_LINUX_CAPABILITY_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define capable(cap) (1) #endif #if defined(_LINUX_KERNEL_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) extern char *kasprintf(gfp_t gfp, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); #endif #if defined(_LINUX_SYSRQ_H) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) #define handle_sysrq(x,y,z) handle_sysrq(x,y) #endif #if defined(_PAGE_PRESENT) && !defined(_PAGE_NX) #define _PAGE_NX 0 /* * This variable at present is referenced by netfront, but only in code that * is dead when running in hvm guests. To detect potential active uses of it * in the future, don't try to supply a 'valid' value here, so that any * mappings created with it will fault when accessed. */ #define __supported_pte_mask ((maddr_t)0) #endif /* This code duplication is not ideal, but || does not seem to properly * short circuit in a #if condition. **/ #if defined(_LINUX_NETDEVICE_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) #if !defined(SLE_VERSION) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #elif SLE_VERSION_CODE < SLE_VERSION(10,1,0) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #endif #endif #if defined(__LINUX_SEQLOCK_H) && !defined(DEFINE_SEQLOCK) #define DEFINE_SEQLOCK(x) seqlock_t x = SEQLOCK_UNLOCKED #endif /* Bug in RHEL4-U3: rw_lock_t is mistakenly defined in DEFINE_RWLOCK() macro */ #if defined(__LINUX_SPINLOCK_H) && defined(DEFINE_RWLOCK) #define rw_lock_t rwlock_t #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_RWLOCK) #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED #endif //#if defined(_LINUX_INTERRUPT_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /** * RHEL4-U5 pulled back this feature into the older kernel * Since it is a typedef, and not a macro - detect this kernel via * RHEL_VERSION */ //#if !defined(RHEL_VERSION) || (RHEL_VERSION == 4 && RHEL_UPDATE < 5) //#if !defined(RHEL_MAJOR) || (RHEL_MAJOR == 4 && RHEL_MINOR < 5) //typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *); //#endif //#endif //#endif //#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) //#define setup_xen_features xen_setup_features //#endif #ifndef atomic_cmpxchg #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/platform.h000066400000000000000000000301271314037446600253320ustar00rootroot00000000000000/****************************************************************************** * platform.h * * Hardware platform operations. Intended for use by domain-0 kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_PLATFORM_H__ #define __XEN_PUBLIC_PLATFORM_H__ #include "xen.h" #define XENPF_INTERFACE_VERSION 0x03000001 /* * Set clock such that it would read after 00:00:00 UTC, * 1 January, 1970 if the current system time was . */ #define XENPF_settime 17 struct xenpf_settime { /* IN variables. */ uint32_t secs; uint32_t nsecs; uint64_t system_time; }; typedef struct xenpf_settime xenpf_settime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t); /* * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type. * On x86, @type is an architecture-defined MTRR memory type. * On success, returns the MTRR that was used (@reg) and a handle that can * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting. * (x86-specific). */ #define XENPF_add_memtype 31 struct xenpf_add_memtype { /* IN variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; /* OUT variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_add_memtype xenpf_add_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t); /* * Tear down an existing memory-range type. If @handle is remembered then it * should be passed in to accurately tear down the correct setting (in case * of overlapping memory regions with differing types). If it is not known * then @handle should be set to zero. In all cases @reg must be set. * (x86-specific). */ #define XENPF_del_memtype 32 struct xenpf_del_memtype { /* IN variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_del_memtype xenpf_del_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t); /* Read current type of an MTRR (x86-specific). */ #define XENPF_read_memtype 33 struct xenpf_read_memtype { /* IN variables. */ uint32_t reg; /* OUT variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; }; typedef struct xenpf_read_memtype xenpf_read_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t); #define XENPF_microcode_update 35 struct xenpf_microcode_update { /* IN variables. */ XEN_GUEST_HANDLE(const_void) data;/* Pointer to microcode data */ uint32_t length; /* Length of microcode data. */ }; typedef struct xenpf_microcode_update xenpf_microcode_update_t; DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t); #define XENPF_platform_quirk 39 #define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */ #define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */ #define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */ struct xenpf_platform_quirk { /* IN variables. */ uint32_t quirk_id; }; typedef struct xenpf_platform_quirk xenpf_platform_quirk_t; DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t); #define XENPF_firmware_info 50 #define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */ #define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */ #define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */ struct xenpf_firmware_info { /* IN variables. */ uint32_t type; uint32_t index; /* OUT variables. */ union { struct { /* Int13, Fn48: Check Extensions Present. */ uint8_t device; /* %dl: bios device number */ uint8_t version; /* %ah: major version */ uint16_t interface_support; /* %cx: support bitmap */ /* Int13, Fn08: Legacy Get Device Parameters. */ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */ uint8_t legacy_max_head; /* %dh: max head # */ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */ /* NB. First uint16_t of buffer must be set to buffer size. */ XEN_GUEST_HANDLE(void) edd_params; } disk_info; /* XEN_FW_DISK_INFO */ struct { uint8_t device; /* bios device number */ uint32_t mbr_signature; /* offset 0x1b8 in mbr */ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */ struct { /* Int10, AX=4F15: Get EDID info. */ uint8_t capabilities; uint8_t edid_transfer_time; /* must refer to 128-byte buffer */ XEN_GUEST_HANDLE(uint8) edid; } vbeddc_info; /* XEN_FW_VBEDDC_INFO */ } u; }; typedef struct xenpf_firmware_info xenpf_firmware_info_t; DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t); #define XENPF_enter_acpi_sleep 51 struct xenpf_enter_acpi_sleep { /* IN variables */ uint16_t pm1a_cnt_val; /* PM1a control value. */ uint16_t pm1b_cnt_val; /* PM1b control value. */ uint32_t sleep_state; /* Which state to enter (Sn). */ uint32_t flags; /* Must be zero. */ }; typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t; DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t); #define XENPF_change_freq 52 struct xenpf_change_freq { /* IN variables */ uint32_t flags; /* Must be zero. */ uint32_t cpu; /* Physical cpu. */ uint64_t freq; /* New frequency (Hz). */ }; typedef struct xenpf_change_freq xenpf_change_freq_t; DEFINE_XEN_GUEST_HANDLE(xenpf_change_freq_t); /* * Get idle times (nanoseconds since boot) for physical CPUs specified in the * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is * indexed by CPU number; only entries with the corresponding @cpumap_bitmap * bit set are written to. On return, @cpumap_bitmap is modified so that any * non-existent CPUs are cleared. Such CPUs have their @idletime array entry * cleared. */ #define XENPF_getidletime 53 struct xenpf_getidletime { /* IN/OUT variables */ /* IN: CPUs to interrogate; OUT: subset of IN which are present */ XEN_GUEST_HANDLE(uint8) cpumap_bitmap; /* IN variables */ /* Size of cpumap bitmap. */ uint32_t cpumap_nr_cpus; /* Must be indexable for every cpu in cpumap_bitmap. */ XEN_GUEST_HANDLE(uint64) idletime; /* OUT variables */ /* System time when the idletime snapshots were taken. */ uint64_t now; }; typedef struct xenpf_getidletime xenpf_getidletime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t); #define XENPF_set_processor_pminfo 54 /* ability bits */ #define XEN_PROCESSOR_PM_CX 1 #define XEN_PROCESSOR_PM_PX 2 #define XEN_PROCESSOR_PM_TX 4 /* cmd type */ #define XEN_PM_CX 0 #define XEN_PM_PX 1 #define XEN_PM_TX 2 /* Px sub info type */ #define XEN_PX_PCT 1 #define XEN_PX_PSS 2 #define XEN_PX_PPC 4 #define XEN_PX_PSD 8 struct xen_power_register { uint32_t space_id; uint32_t bit_width; uint32_t bit_offset; uint32_t access_size; uint64_t address; }; struct xen_processor_csd { uint32_t domain; /* domain number of one dependent group */ uint32_t coord_type; /* coordination type */ uint32_t num; /* number of processors in same domain */ }; typedef struct xen_processor_csd xen_processor_csd_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_csd_t); struct xen_processor_cx { struct xen_power_register reg; /* GAS for Cx trigger register */ uint8_t type; /* cstate value, c0: 0, c1: 1, ... */ uint32_t latency; /* worst latency (ms) to enter/exit this cstate */ uint32_t power; /* average power consumption(mW) */ uint32_t dpcnt; /* number of dependency entries */ XEN_GUEST_HANDLE(xen_processor_csd_t) dp; /* NULL if no dependency */ }; typedef struct xen_processor_cx xen_processor_cx_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_cx_t); struct xen_processor_flags { uint32_t bm_control:1; uint32_t bm_check:1; uint32_t has_cst:1; uint32_t power_setup_done:1; uint32_t bm_rld_set:1; }; struct xen_processor_power { uint32_t count; /* number of C state entries in array below */ struct xen_processor_flags flags; /* global flags of this processor */ XEN_GUEST_HANDLE(xen_processor_cx_t) states; /* supported c states */ }; struct xen_pct_register { uint8_t descriptor; uint16_t length; uint8_t space_id; uint8_t bit_width; uint8_t bit_offset; uint8_t reserved; uint64_t address; }; struct xen_processor_px { uint64_t core_frequency; /* megahertz */ uint64_t power; /* milliWatts */ uint64_t transition_latency; /* microseconds */ uint64_t bus_master_latency; /* microseconds */ uint64_t control; /* control value */ uint64_t status; /* success indicator */ }; typedef struct xen_processor_px xen_processor_px_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_px_t); struct xen_psd_package { uint64_t num_entries; uint64_t revision; uint64_t domain; uint64_t coord_type; uint64_t num_processors; }; struct xen_processor_performance { uint32_t flags; /* flag for Px sub info type */ uint32_t platform_limit; /* Platform limitation on freq usage */ struct xen_pct_register control_register; struct xen_pct_register status_register; uint32_t state_count; /* total available performance states */ XEN_GUEST_HANDLE(xen_processor_px_t) states; struct xen_psd_package domain_info; uint32_t shared_type; /* coordination type of this processor */ }; typedef struct xen_processor_performance xen_processor_performance_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_performance_t); struct xenpf_set_processor_pminfo { /* IN variables */ uint32_t id; /* ACPI CPU ID */ uint32_t type; /* {XEN_PM_CX, XEN_PM_PX} */ union { struct xen_processor_power power;/* Cx: _CST/_CSD */ struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */ } u; }; typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t; DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t); struct xen_platform_op { uint32_t cmd; uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ union { struct xenpf_settime settime; struct xenpf_add_memtype add_memtype; struct xenpf_del_memtype del_memtype; struct xenpf_read_memtype read_memtype; struct xenpf_microcode_update microcode; struct xenpf_platform_quirk platform_quirk; struct xenpf_firmware_info firmware_info; struct xenpf_enter_acpi_sleep enter_acpi_sleep; struct xenpf_change_freq change_freq; struct xenpf_getidletime getidletime; struct xenpf_set_processor_pminfo set_pminfo; uint8_t pad[128]; } u; }; typedef struct xen_platform_op xen_platform_op_t; DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t); #endif /* __XEN_PUBLIC_PLATFORM_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/protocols.h000066400000000000000000000032101314037446600255230ustar00rootroot00000000000000/****************************************************************************** * protocols.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PROTOCOLS_H__ #define __XEN_PROTOCOLS_H__ #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi" #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi" #define XEN_IO_PROTO_ABI_IA64 "ia64-abi" #if defined(__i386__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 #elif defined(__x86_64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 #elif defined(__ia64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64 #else # error arch fixup needed here #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/sched.h000066400000000000000000000104321314037446600245710ustar00rootroot00000000000000/****************************************************************************** * sched.h * * Scheduler state interactions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_SCHED_H__ #define __XEN_PUBLIC_SCHED_H__ #include "event_channel.h" /* * The prototype for this hypercall is: * long sched_op(int cmd, void *arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == Operation-specific extra argument(s), as described below. * * Versions of Xen prior to 3.0.2 provided only the following legacy version * of this hypercall, supporting only the commands yield, block and shutdown: * long sched_op(int cmd, unsigned long arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) * == SHUTDOWN_* code (SCHEDOP_shutdown) * This legacy version is available to new guests as sched_op_compat(). */ /* * Voluntarily yield the CPU. * @arg == NULL. */ #define SCHEDOP_yield 0 /* * Block execution of this VCPU until an event is received for processing. * If called with event upcalls masked, this operation will atomically * reenable event delivery and check for pending events before blocking the * VCPU. This avoids a "wakeup waiting" race. * @arg == NULL. */ #define SCHEDOP_block 1 /* * Halt execution of this domain (all VCPUs) and notify the system controller. * @arg == pointer to sched_shutdown structure. */ #define SCHEDOP_shutdown 2 struct sched_shutdown { unsigned int reason; /* SHUTDOWN_* */ }; typedef struct sched_shutdown sched_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t); /* * Poll a set of event-channel ports. Return when one or more are pending. An * optional timeout may be specified. * @arg == pointer to sched_poll structure. */ #define SCHEDOP_poll 3 struct sched_poll { XEN_GUEST_HANDLE(evtchn_port_t) ports; unsigned int nr_ports; uint64_t timeout; }; typedef struct sched_poll sched_poll_t; DEFINE_XEN_GUEST_HANDLE(sched_poll_t); /* * Declare a shutdown for another domain. The main use of this function is * in interpreting shutdown requests and reasons for fully-virtualized * domains. A para-virtualized domain may use SCHEDOP_shutdown directly. * @arg == pointer to sched_remote_shutdown structure. */ #define SCHEDOP_remote_shutdown 4 struct sched_remote_shutdown { domid_t domain_id; /* Remote domain ID */ unsigned int reason; /* SHUTDOWN_xxx reason */ }; typedef struct sched_remote_shutdown sched_remote_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t); /* * Reason codes for SCHEDOP_shutdown. These may be interpreted by control * software to determine the appropriate action. For the most part, Xen does * not care about the shutdown code. */ #define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #endif /* __XEN_PUBLIC_SCHED_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/synch_bitops.h000066400000000000000000000062341314037446600262140ustar00rootroot00000000000000#ifndef __XEN_SYNCH_BITOPS_H__ #define __XEN_SYNCH_BITOPS_H__ /* * Copyright 1992, Linus Torvalds. * Heavily modified to provide guaranteed strong synchronisation * when communicating with Xen or other guest OSes running on other CPUs. */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define ADDR (*(volatile long *) addr) static __inline__ void synch_set_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btsl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_clear_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btrl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_change_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btcl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btsl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btrl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btcl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } struct __synch_xchg_dummy { unsigned long a[100]; }; #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x)) #define synch_cmpxchg(ptr, old, new) \ ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ (unsigned long)(old), \ (unsigned long)(new), \ sizeof(*(ptr)))) static inline unsigned long __synch_cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__("lock; cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__("lock; cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #ifdef CONFIG_X86_64 case 4: __asm__ __volatile__("lock; cmpxchgl %k1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__("lock; cmpxchgq %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #else case 4: __asm__ __volatile__("lock; cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #endif } return old; } #define synch_test_bit test_bit #define synch_cmpxchg_subword synch_cmpxchg #endif /* __XEN_SYNCH_BITOPS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/tmem.h000066400000000000000000000067431314037446600244570ustar00rootroot00000000000000/****************************************************************************** * tmem.h * * Guest OS interface to Xen Transcendent Memory. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_TMEM_H__ #define __XEN_PUBLIC_TMEM_H__ #include "xen.h" /* Commands to HYPERVISOR_tmem_op() */ #define TMEM_CONTROL 0 #define TMEM_NEW_POOL 1 #define TMEM_DESTROY_POOL 2 #define TMEM_NEW_PAGE 3 #define TMEM_PUT_PAGE 4 #define TMEM_GET_PAGE 5 #define TMEM_FLUSH_PAGE 6 #define TMEM_FLUSH_OBJECT 7 #define TMEM_READ 8 #define TMEM_WRITE 9 #define TMEM_XCHG 10 /* Subops for HYPERVISOR_tmem_op(TMEM_CONTROL) */ #define TMEMC_THAW 0 #define TMEMC_FREEZE 1 #define TMEMC_FLUSH 2 #define TMEMC_DESTROY 3 #define TMEMC_LIST 4 #define TMEMC_SET_WEIGHT 5 #define TMEMC_SET_CAP 6 #define TMEMC_SET_COMPRESS 7 /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ #define TMEM_POOL_PERSIST 1 #define TMEM_POOL_SHARED 2 #define TMEM_POOL_PAGESIZE_SHIFT 4 #define TMEM_POOL_PAGESIZE_MASK 0xf #define TMEM_POOL_VERSION_SHIFT 24 #define TMEM_POOL_VERSION_MASK 0xff /* Special errno values */ #define EFROZEN 1000 #define EEMPTY 1001 #ifndef __ASSEMBLY__ typedef xen_pfn_t tmem_cli_mfn_t; typedef XEN_GUEST_HANDLE(char) tmem_cli_va_t; struct tmem_op { uint32_t cmd; int32_t pool_id; /* private > 0; shared < 0; 0 is invalid */ union { struct { /* for cmd == TMEM_NEW_POOL */ uint64_t uuid[2]; uint32_t flags; } new; struct { /* for cmd == TMEM_CONTROL */ uint32_t subop; uint32_t cli_id; uint32_t arg1; uint32_t arg2; tmem_cli_va_t buf; } ctrl; struct { uint64_t object; uint32_t index; uint32_t tmem_offset; uint32_t pfn_offset; uint32_t len; tmem_cli_mfn_t cmfn; /* client machine page frame */ } gen; } u; }; typedef struct tmem_op tmem_op_t; DEFINE_XEN_GUEST_HANDLE(tmem_op_t); #endif #endif /* __XEN_PUBLIC_TMEM_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/version.h000066400000000000000000000060571314037446600252000ustar00rootroot00000000000000/****************************************************************************** * version.h * * Xen version, type, and compile information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Nguyen Anh Quynh * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VERSION_H__ #define __XEN_PUBLIC_VERSION_H__ /* NB. All ops return zero on success, except XENVER_{version,pagesize} */ /* arg == NULL; returns major:minor (16:16). */ #define XENVER_version 0 /* arg == xen_extraversion_t. */ #define XENVER_extraversion 1 typedef char xen_extraversion_t[16]; #define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t)) /* arg == xen_compile_info_t. */ #define XENVER_compile_info 2 struct xen_compile_info { char compiler[64]; char compile_by[16]; char compile_domain[32]; char compile_date[32]; }; typedef struct xen_compile_info xen_compile_info_t; #define XENVER_capabilities 3 typedef char xen_capabilities_info_t[1024]; #define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t)) #define XENVER_changeset 4 typedef char xen_changeset_info_t[64]; #define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t)) #define XENVER_platform_parameters 5 struct xen_platform_parameters { unsigned long virt_start; }; typedef struct xen_platform_parameters xen_platform_parameters_t; #define XENVER_get_features 6 struct xen_feature_info { unsigned int submap_idx; /* IN: which 32-bit submap to return */ uint32_t submap; /* OUT: 32-bit submap */ }; typedef struct xen_feature_info xen_feature_info_t; /* Declares the features reported by XENVER_get_features. */ #include "features.h" /* arg == NULL; returns host memory page size. */ #define XENVER_pagesize 7 /* arg == xen_domain_handle_t. */ #define XENVER_guest_handle 8 #define XENVER_commandline 9 typedef char xen_commandline_t[1024]; #endif /* __XEN_PUBLIC_VERSION_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/xen-compat.h000066400000000000000000000036361314037446600255660ustar00rootroot00000000000000/****************************************************************************** * xen-compat.h * * Guest OS interface to Xen. Compatibility layer. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Christian Limpach */ #ifndef __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_LATEST_INTERFACE_VERSION__ 0x00030209 #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Xen is built with matching headers and implements the latest interface. */ #define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__ #elif !defined(__XEN_INTERFACE_VERSION__) /* Guests which do not specify a version get the legacy interface. */ #define __XEN_INTERFACE_VERSION__ 0x00000000 #endif #if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__ #error "These header files do not support the requested interface version." #endif #endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/xen.h000066400000000000000000000624451314037446600243100ustar00rootroot00000000000000/****************************************************************************** * xen.h * * Guest OS interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_XEN_H__ #define __XEN_PUBLIC_XEN_H__ #include #if defined(__i386__) || defined(__x86_64__) #include #elif defined(__ia64__) #include #else #error "Unsupported architecture" #endif #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ DEFINE_XEN_GUEST_HANDLE(char); __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); DEFINE_XEN_GUEST_HANDLE(int); __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); DEFINE_XEN_GUEST_HANDLE(long); __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); DEFINE_XEN_GUEST_HANDLE(void); DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); #endif /* * HYPERCALLS */ #define __HYPERVISOR_set_trap_table 0 #define __HYPERVISOR_mmu_update 1 #define __HYPERVISOR_set_gdt 2 #define __HYPERVISOR_stack_switch 3 #define __HYPERVISOR_set_callbacks 4 #define __HYPERVISOR_fpu_taskswitch 5 #define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */ #define __HYPERVISOR_platform_op 7 #define __HYPERVISOR_set_debugreg 8 #define __HYPERVISOR_get_debugreg 9 #define __HYPERVISOR_update_descriptor 10 #define __HYPERVISOR_memory_op 12 #define __HYPERVISOR_multicall 13 #define __HYPERVISOR_update_va_mapping 14 #define __HYPERVISOR_set_timer_op 15 #define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */ #define __HYPERVISOR_xen_version 17 #define __HYPERVISOR_console_io 18 #define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */ #define __HYPERVISOR_grant_table_op 20 #define __HYPERVISOR_vm_assist 21 #define __HYPERVISOR_update_va_mapping_otherdomain 22 #define __HYPERVISOR_iret 23 /* x86 only */ #define __HYPERVISOR_vcpu_op 24 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ #define __HYPERVISOR_mmuext_op 26 #define __HYPERVISOR_xsm_op 27 #define __HYPERVISOR_nmi_op 28 #define __HYPERVISOR_sched_op 29 #define __HYPERVISOR_callback_op 30 #define __HYPERVISOR_xenoprof_op 31 #define __HYPERVISOR_event_channel_op 32 #define __HYPERVISOR_physdev_op 33 #define __HYPERVISOR_hvm_op 34 #define __HYPERVISOR_sysctl 35 #define __HYPERVISOR_domctl 36 #define __HYPERVISOR_kexec_op 37 #define __HYPERVISOR_tmem_op 38 /* Architecture-specific hypercall definitions. */ #define __HYPERVISOR_arch_0 48 #define __HYPERVISOR_arch_1 49 #define __HYPERVISOR_arch_2 50 #define __HYPERVISOR_arch_3 51 #define __HYPERVISOR_arch_4 52 #define __HYPERVISOR_arch_5 53 #define __HYPERVISOR_arch_6 54 #define __HYPERVISOR_arch_7 55 /* * HYPERCALL COMPATIBILITY. */ /* New sched_op hypercall introduced in 0x00030101. */ #if __XEN_INTERFACE_VERSION__ < 0x00030101 #undef __HYPERVISOR_sched_op #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat #endif /* New event-channel and physdev hypercalls introduced in 0x00030202. */ #if __XEN_INTERFACE_VERSION__ < 0x00030202 #undef __HYPERVISOR_event_channel_op #define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat #undef __HYPERVISOR_physdev_op #define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat #endif /* New platform_op hypercall introduced in 0x00030204. */ #if __XEN_INTERFACE_VERSION__ < 0x00030204 #define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op #endif /* * VIRTUAL INTERRUPTS * * Virtual interrupts that a guest OS may receive from Xen. * * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. * The latter can be allocated only once per guest: they must initially be * allocated to VCPU0 but can subsequently be re-bound. */ #define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ #define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ #define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ #define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ #define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ #define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ #define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ #define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ /* Architecture-specific VIRQ definitions. */ #define VIRQ_ARCH_0 16 #define VIRQ_ARCH_1 17 #define VIRQ_ARCH_2 18 #define VIRQ_ARCH_3 19 #define VIRQ_ARCH_4 20 #define VIRQ_ARCH_5 21 #define VIRQ_ARCH_6 22 #define VIRQ_ARCH_7 23 #define NR_VIRQS 24 /* * MMU-UPDATE REQUESTS * * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * ptr[1:0] specifies the appropriate MMU_* command. * * ptr[1:0] == MMU_NORMAL_PT_UPDATE: * Updates an entry in a page table. If updating an L1 table, and the new * table entry is valid/present, the mapped frame must belong to the FD, if * an FD has been specified. If attempting to map an I/O page then the * caller assumes the privilege of the FD. * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. * FD == DOMID_XEN: Map restricted areas of Xen's heap space. * ptr[:2] -- Machine address of the page-table entry to modify. * val -- Value to write. * * ptr[1:0] == MMU_MACHPHYS_UPDATE: * Updates an entry in the machine->pseudo-physical mapping table. * ptr[:2] -- Machine address within the frame whose mapping to modify. * The frame must belong to the FD, if one is specified. * val -- Value to write into the mapping entry. * * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed * with those in @val. */ #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ /* * MMU EXTENDED OPERATIONS * * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * * cmd: MMUEXT_(UN)PIN_*_TABLE * mfn: Machine frame number to be (un)pinned as a p.t. page. * The frame must belong to the FD, if one is specified. * * cmd: MMUEXT_NEW_BASEPTR * mfn: Machine frame number of new page-table base to install in MMU. * * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] * mfn: Machine frame number of new page-table base to install in MMU * when in user space. * * cmd: MMUEXT_TLB_FLUSH_LOCAL * No additional arguments. Flushes local TLB. * * cmd: MMUEXT_INVLPG_LOCAL * linear_addr: Linear address to be flushed from the local TLB. * * cmd: MMUEXT_TLB_FLUSH_MULTI * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_INVLPG_MULTI * linear_addr: Linear address to be flushed. * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_TLB_FLUSH_ALL * No additional arguments. Flushes all VCPUs' TLBs. * * cmd: MMUEXT_INVLPG_ALL * linear_addr: Linear address to be flushed from all VCPUs' TLBs. * * cmd: MMUEXT_FLUSH_CACHE * No additional arguments. Writes back and flushes cache contents. * * cmd: MMUEXT_SET_LDT * linear_addr: Linear address of LDT base (NB. must be page-aligned). * nr_ents: Number of entries in LDT. * * cmd: MMUEXT_CLEAR_PAGE * mfn: Machine frame number to be cleared. * * cmd: MMUEXT_COPY_PAGE * mfn: Machine frame number of the destination page. * src_mfn: Machine frame number of the source page. */ #define MMUEXT_PIN_L1_TABLE 0 #define MMUEXT_PIN_L2_TABLE 1 #define MMUEXT_PIN_L3_TABLE 2 #define MMUEXT_PIN_L4_TABLE 3 #define MMUEXT_UNPIN_TABLE 4 #define MMUEXT_NEW_BASEPTR 5 #define MMUEXT_TLB_FLUSH_LOCAL 6 #define MMUEXT_INVLPG_LOCAL 7 #define MMUEXT_TLB_FLUSH_MULTI 8 #define MMUEXT_INVLPG_MULTI 9 #define MMUEXT_TLB_FLUSH_ALL 10 #define MMUEXT_INVLPG_ALL 11 #define MMUEXT_FLUSH_CACHE 12 #define MMUEXT_SET_LDT 13 #define MMUEXT_NEW_USER_BASEPTR 15 #define MMUEXT_CLEAR_PAGE 16 #define MMUEXT_COPY_PAGE 17 #ifndef __ASSEMBLY__ struct mmuext_op { unsigned int cmd; union { /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR * CLEAR_PAGE, COPY_PAGE */ xen_pfn_t mfn; /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ unsigned long linear_addr; } arg1; union { /* SET_LDT */ unsigned int nr_ents; /* TLB_FLUSH_MULTI, INVLPG_MULTI */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(const_void) vcpumask; #else const void *vcpumask; #endif /* COPY_PAGE */ xen_pfn_t src_mfn; } arg2; }; typedef struct mmuext_op mmuext_op_t; DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); #endif /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ #define UVMF_NONE (0UL<<0) /* No flushing at all. */ #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ #define UVMF_FLUSHTYPE_MASK (3UL<<0) #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ /* * Commands to HYPERVISOR_console_io(). */ #define CONSOLEIO_write 0 #define CONSOLEIO_read 1 /* * Commands to HYPERVISOR_vm_assist(). */ #define VMASST_CMD_enable 0 #define VMASST_CMD_disable 1 /* x86/32 guests: simulate full 4GB segment limits. */ #define VMASST_TYPE_4gb_segments 0 /* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ #define VMASST_TYPE_4gb_segments_notify 1 /* * x86 guests: support writes to bottom-level PTEs. * NB1. Page-directory entries cannot be written. * NB2. Guest must continue to remove all writable mappings of PTEs. */ #define VMASST_TYPE_writable_pagetables 2 /* x86/PAE guests: support PDPTs above 4GB. */ #define VMASST_TYPE_pae_extended_cr3 3 #define MAX_VMASST_TYPE 3 #ifndef __ASSEMBLY__ typedef uint16_t domid_t; /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ #define DOMID_FIRST_RESERVED (0x7FF0U) /* DOMID_SELF is used in certain contexts to refer to oneself. */ #define DOMID_SELF (0x7FF0U) /* * DOMID_IO is used to restrict page-table updates to mapping I/O memory. * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO * is useful to ensure that no mappings to the OS's own heap are accidentally * installed. (e.g., in Linux this could cause havoc as reference counts * aren't adjusted on the I/O-mapping code path). * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can * be specified by any calling domain. */ #define DOMID_IO (0x7FF1U) /* * DOMID_XEN is used to allow privileged domains to map restricted parts of * Xen's heap space (e.g., the machine_to_phys table). * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if * the caller is privileged. */ #define DOMID_XEN (0x7FF2U) /* DOMID_INVALID is used to identity invalid domid */ #define DOMID_INVALID (0x7FFFU) /* * Send an array of these to HYPERVISOR_mmu_update(). * NB. The fields are natural pointer/address size for this architecture. */ struct mmu_update { uint64_t ptr; /* Machine address of PTE. */ uint64_t val; /* New contents of PTE. */ }; typedef struct mmu_update mmu_update_t; DEFINE_XEN_GUEST_HANDLE(mmu_update_t); /* * Send an array of these to HYPERVISOR_multicall(). * NB. The fields are natural register size for this architecture. */ struct multicall_entry { unsigned long op, result; unsigned long args[6]; }; typedef struct multicall_entry multicall_entry_t; DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); /* * Event channel endpoints per domain: * 1024 if a long is 32 bits; 4096 if a long is 64 bits. */ #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) struct vcpu_time_info { /* * Updates to the following values are preceded and followed by an * increment of 'version'. The guest can therefore detect updates by * looking for changes to 'version'. If the least-significant bit of * the version number is set then an update is in progress and the guest * must wait to read a consistent set of values. * The correct way to interact with the version number is similar to * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry. */ uint32_t version; uint32_t pad0; uint64_t tsc_timestamp; /* TSC at last update of time vals. */ uint64_t system_time; /* Time, in nanosecs, since boot. */ /* * Current system time: * system_time + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32) * CPU frequency (Hz): * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift */ uint32_t tsc_to_system_mul; int8_t tsc_shift; int8_t pad1[3]; }; /* 32 bytes */ typedef struct vcpu_time_info vcpu_time_info_t; struct vcpu_info { /* * 'evtchn_upcall_pending' is written non-zero by Xen to indicate * a pending notification for a particular VCPU. It is then cleared * by the guest OS /before/ checking for pending work, thus avoiding * a set-and-check race. Note that the mask is only accessed by Xen * on the CPU that is currently hosting the VCPU. This means that the * pending and mask flags can be updated by the guest without special * synchronisation (i.e., no need for the x86 LOCK prefix). * This may seem suboptimal because if the pending flag is set by * a different CPU then an IPI may be scheduled even when the mask * is set. However, note: * 1. The task of 'interrupt holdoff' is covered by the per-event- * channel mask bits. A 'noisy' event that is continually being * triggered can be masked at source at this very precise * granularity. * 2. The main purpose of the per-VCPU mask is therefore to restrict * reentrant execution: whether for concurrency control, or to * prevent unbounded stack usage. Whatever the purpose, we expect * that the mask will be asserted only for short periods at a time, * and so the likelihood of a 'spurious' IPI is suitably small. * The mask is read before making an event upcall to the guest: a * non-zero mask therefore guarantees that the VCPU will not receive * an upcall activation. The mask is cleared when the VCPU requests * to block: this avoids wakeup-waiting races. */ uint8_t evtchn_upcall_pending; uint8_t evtchn_upcall_mask; unsigned long evtchn_pending_sel; struct arch_vcpu_info arch; struct vcpu_time_info time; }; /* 64 bytes (x86) */ #ifndef __XEN__ typedef struct vcpu_info vcpu_info_t; #endif /* * Xen/kernel shared data -- pointer provided in start_info. * * This structure is defined to be both smaller than a page, and the * only data on the shared page, but may vary in actual size even within * compatible Xen versions; guests should not rely on the size * of this structure remaining constant. */ struct shared_info { struct vcpu_info vcpu_info[MAX_VIRT_CPUS]; /* * A domain can create "event channels" on which it can send and receive * asynchronous event notifications. There are three classes of event that * are delivered by this mechanism: * 1. Bi-directional inter- and intra-domain connections. Domains must * arrange out-of-band to set up a connection (usually by allocating * an unbound 'listener' port and avertising that via a storage service * such as xenstore). * 2. Physical interrupts. A domain with suitable hardware-access * privileges can bind an event-channel port to a physical interrupt * source. * 3. Virtual interrupts ('events'). A domain can bind an event-channel * port to a virtual interrupt source, such as the virtual-timer * device or the emergency console. * * Event channels are addressed by a "port index". Each channel is * associated with two bits of information: * 1. PENDING -- notifies the domain that there is a pending notification * to be processed. This bit is cleared by the guest. * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING * will cause an asynchronous upcall to be scheduled. This bit is only * updated by the guest. It is read-only within Xen. If a channel * becomes pending while the channel is masked then the 'edge' is lost * (i.e., when the channel is unmasked, the guest must manually handle * pending notifications as no upcall will be scheduled by Xen). * * To expedite scanning of pending notifications, any 0->1 pending * transition on an unmasked channel causes a corresponding bit in a * per-vcpu selector word to be set. Each bit in the selector covers a * 'C long' in the PENDING bitfield array. */ unsigned long evtchn_pending[sizeof(unsigned long) * 8]; unsigned long evtchn_mask[sizeof(unsigned long) * 8]; /* * Wallclock time: updated only by control software. Guests should base * their gettimeofday() syscall on this wallclock-base value. */ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ struct arch_shared_info arch; }; #ifndef __XEN__ typedef struct shared_info shared_info_t; #endif /* * Start-of-day memory layout: * 1. The domain is started within contiguous virtual-memory region. * 2. The contiguous region ends on an aligned 4MB boundary. * 3. This the order of bootstrap elements in the initial virtual region: * a. relocated kernel image * b. initial ram disk [mod_start, mod_len] * c. list of allocated page frames [mfn_list, nr_pages] * (unless relocated due to XEN_ELFNOTE_INIT_P2M) * d. start_info_t structure [register ESI (x86)] * e. bootstrap page tables [pt_base, CR3 (x86)] * f. bootstrap stack [register ESP (x86)] * 4. Bootstrap elements are packed together, but each is 4kB-aligned. * 5. The initial ram disk may be omitted. * 6. The list of page frames forms a contiguous 'pseudo-physical' memory * layout for the domain. In particular, the bootstrap virtual-memory * region is a 1:1 mapping to the first section of the pseudo-physical map. * 7. All bootstrap elements are mapped read-writable for the guest OS. The * only exception is the bootstrap page table, which is mapped read-only. * 8. There is guaranteed to be at least 512kB padding after the final * bootstrap element. If necessary, the bootstrap virtual region is * extended by an extra 4MB to ensure this. */ #define MAX_GUEST_CMDLINE 1024 struct start_info { /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ char magic[32]; /* "xen--". */ unsigned long nr_pages; /* Total pages allocated to this domain. */ unsigned long shared_info; /* MACHINE address of shared info struct. */ uint32_t flags; /* SIF_xxx flags. */ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ uint32_t store_evtchn; /* Event channel for store communication. */ union { struct { xen_pfn_t mfn; /* MACHINE page number of console page. */ uint32_t evtchn; /* Event channel for console page. */ } domU; struct { uint32_t info_off; /* Offset of console_info struct. */ uint32_t info_size; /* Size of console_info struct from start.*/ } dom0; } console; /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ unsigned long pt_base; /* VIRTUAL address of page directory. */ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ int8_t cmd_line[MAX_GUEST_CMDLINE]; /* The pfn range here covers both page table and p->m table frames. */ unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */ unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */ }; typedef struct start_info start_info_t; /* New console union for dom0 introduced in 0x00030203. */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 #define console_mfn console.domU.mfn #define console_evtchn console.domU.evtchn #endif /* These flags are passed in the 'flags' field of start_info_t. */ #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ #define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ typedef struct dom0_vga_console_info { uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ #define XEN_VGATYPE_TEXT_MODE_3 0x03 #define XEN_VGATYPE_VESA_LFB 0x23 union { struct { /* Font height, in pixels. */ uint16_t font_height; /* Cursor location (column, row). */ uint16_t cursor_x, cursor_y; /* Number of rows and columns (dimensions in characters). */ uint16_t rows, columns; } text_mode_3; struct { /* Width and height, in pixels. */ uint16_t width, height; /* Bytes per scan line. */ uint16_t bytes_per_line; /* Bits per pixel. */ uint16_t bits_per_pixel; /* LFB physical address, and size (in units of 64kB). */ uint32_t lfb_base; uint32_t lfb_size; /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ uint8_t red_pos, red_size; uint8_t green_pos, green_size; uint8_t blue_pos, blue_size; uint8_t rsvd_pos, rsvd_size; #if __XEN_INTERFACE_VERSION__ >= 0x00030206 /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ uint32_t gbl_caps; /* Mode attributes (offset 0x0, VESA command 0x4f01). */ uint16_t mode_attrs; #endif } vesa_lfb; } u; } dom0_vga_console_info_t; #define xen_vga_console_info dom0_vga_console_info #define xen_vga_console_info_t dom0_vga_console_info_t typedef uint8_t xen_domain_handle_t[16]; /* Turn a plain number into a C unsigned long constant. */ #define __mk_unsigned_long(x) x ## UL #define mk_unsigned_long(x) __mk_unsigned_long(x) __DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t); __DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t); __DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t); __DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t); #else /* __ASSEMBLY__ */ /* In assembly code we cannot use C numeric constant suffixes. */ #define mk_unsigned_long(x) x #endif /* !__ASSEMBLY__ */ /* Default definitions for macros used by domctl/sysctl. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #ifndef uint64_aligned_t #define uint64_aligned_t uint64_t #endif #ifndef XEN_GUEST_HANDLE_64 #define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) #endif #endif #endif /* __XEN_PUBLIC_XEN_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/xen_proc.h000066400000000000000000000004021314037446600253140ustar00rootroot00000000000000 #ifndef __ASM_XEN_PROC_H__ #define __ASM_XEN_PROC_H__ #include extern struct proc_dir_entry *create_xen_proc_entry( const char *name, mode_t mode); extern void remove_xen_proc_entry( const char *name); #endif /* __ASM_XEN_PROC_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/xenbus.h000066400000000000000000000255601314037446600250170ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XEN_XENBUS_H #define _XEN_XENBUS_H #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include /* Register callback to watch this node. */ struct xenbus_watch { struct list_head list; /* Path being watched. */ const char *node; /* Callback (executed in a process context with no locks held). */ void (*callback)(struct xenbus_watch *, const char **vec, unsigned int len); /* See XBWF_ definitions below. */ unsigned long flags; }; /* * Execute callback in its own kthread. Useful if the callback is long * running or heavily serialised, to avoid taking out the main xenwatch thread * for a long period of time (or even unwittingly causing a deadlock). */ #define XBWF_new_thread 1 /* A xenbus device. */ struct xenbus_device { const char *devicetype; const char *nodename; const char *otherend; int otherend_id; struct xenbus_watch otherend_watch; struct device dev; enum xenbus_state state; struct completion down; }; static inline struct xenbus_device *to_xenbus_device(struct device *dev) { return container_of(dev, struct xenbus_device, dev); } struct xenbus_device_id { /* .../device// */ char devicetype[32]; /* General class of device. */ }; /* A xenbus driver. */ struct xenbus_driver { char *name; struct module *owner; const struct xenbus_device_id *ids; int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); void (*otherend_changed)(struct xenbus_device *dev, enum xenbus_state backend_state); int (*remove)(struct xenbus_device *dev); int (*suspend)(struct xenbus_device *dev); int (*suspend_cancel)(struct xenbus_device *dev); int (*resume)(struct xenbus_device *dev); int (*uevent)(struct xenbus_device *, char **, int, char *, int); struct device_driver driver; int (*read_otherend_details)(struct xenbus_device *dev); int (*is_ready)(struct xenbus_device *dev); }; static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) { return container_of(drv, struct xenbus_driver, driver); } int xenbus_register_frontend(struct xenbus_driver *drv); int xenbus_register_backend(struct xenbus_driver *drv); void xenbus_unregister_driver(struct xenbus_driver *drv); struct xenbus_transaction { u32 id; }; /* Nil transaction ID. */ #define XBT_NIL ((struct xenbus_transaction) { 0 }) char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num); void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len); int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string); int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_transaction_start(struct xenbus_transaction *t); int xenbus_transaction_end(struct xenbus_transaction t, int abort); /* Single read and scanf: returns -errno or num scanned if > 0. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(scanf, 4, 5))); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(printf, 4, 5))); /* Generic read function: NULL-terminated triples of name, * sprintf-style type string, and pointer. Returns 0 or errno.*/ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...); /* notifer routines for when the xenstore comes up */ int register_xenstore_notifier(struct notifier_block *nb); void unregister_xenstore_notifier(struct notifier_block *nb); int register_xenbus_watch(struct xenbus_watch *watch); void unregister_xenbus_watch(struct xenbus_watch *watch); void xs_suspend(void); void xs_resume(void); void xs_suspend_cancel(void); /* Used by xenbus_dev to borrow kernel's store connection. */ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); /* Prepare for domain suspend: then resume or cancel the suspend. */ void xenbus_suspend(void); void xenbus_resume(void); void xenbus_suspend_cancel(void); #define XENBUS_IS_ERR_READ(str) ({ \ if (!IS_ERR(str) && strlen(str) == 0) { \ kfree(str); \ str = ERR_PTR(-ERANGE); \ } \ IS_ERR(str); \ }) #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE) /** * Register a watch on the given path, using the given xenbus_watch structure * for storage, and the given callback function as the callback. Return 0 on * success, or -errno on error. On success, the given path will be saved as * watch->node, and remains the caller's to free. On error, watch->node will * be NULL, the device will switch to XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); /** * Register a watch on the given path/path2, using the given xenbus_watch * structure for storage, and the given callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (path/path2) will be saved as watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); /** * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); /** * Grant access to the given ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn); /** * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_valloc allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * xenbus_map_ring does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr); /** * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_vfree if you mapped in your memory with * xenbus_map_ring_valloc (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port); /** * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path); /*** * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...); /*** * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...); int xenbus_dev_init(void); const char *xenbus_strstate(enum xenbus_state state); int xenbus_dev_is_online(struct xenbus_device *dev); int xenbus_frontend_closed(struct xenbus_device *dev); int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)); int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)); #endif /* _XEN_XENBUS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/public/xs_wire.h000066400000000000000000000070141314037446600251650ustar00rootroot00000000000000/* * Details of the "wire" protocol between Xen Store Daemon and client * library or guest kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Rusty Russell IBM Corporation */ #ifndef _XS_WIRE_H #define _XS_WIRE_H enum xsd_sockmsg_type { XS_DEBUG, XS_DIRECTORY, XS_READ, XS_GET_PERMS, XS_WATCH, XS_UNWATCH, XS_TRANSACTION_START, XS_TRANSACTION_END, XS_INTRODUCE, XS_RELEASE, XS_GET_DOMAIN_PATH, XS_WRITE, XS_MKDIR, XS_RM, XS_SET_PERMS, XS_WATCH_EVENT, XS_ERROR, XS_IS_DOMAIN_INTRODUCED, XS_RESUME, XS_SET_TARGET }; #define XS_WRITE_NONE "NONE" #define XS_WRITE_CREATE "CREATE" #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" /* We hand errors as strings, for portability. */ struct xsd_errors { int errnum; const char *errstring; }; #ifdef EINVAL #define XSD_ERROR(x) { x, #x } /* LINTED: static unused */ static struct xsd_errors xsd_errors[] #if defined(__GNUC__) __attribute__((unused)) #endif = { XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), XSD_ERROR(EISDIR), XSD_ERROR(ENOENT), XSD_ERROR(ENOMEM), XSD_ERROR(ENOSPC), XSD_ERROR(EIO), XSD_ERROR(ENOTEMPTY), XSD_ERROR(ENOSYS), XSD_ERROR(EROFS), XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN) }; #endif struct xsd_sockmsg { uint32_t type; /* XS_??? */ uint32_t req_id;/* Request identifier, echoed in daemon's response. */ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ uint32_t len; /* Length of data following this. */ /* Generally followed by nul-terminated string(s). */ }; enum xs_watch_type { XS_WATCH_PATH = 0, XS_WATCH_TOKEN }; /* Inter-domain shared memory communications. */ #define XENSTORE_RING_SIZE 1024 typedef uint32_t XENSTORE_RING_IDX; #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) struct xenstore_domain_interface { char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ XENSTORE_RING_IDX req_cons, req_prod; XENSTORE_RING_IDX rsp_cons, rsp_prod; }; /* Violating this is very bad. See docs/misc/xenstore.txt. */ #define XENSTORE_PAYLOAD_MAX 4096 /* Violating these just gets you an error back */ #define XENSTORE_ABS_PATH_MAX 3072 #define XENSTORE_REL_PATH_MAX 2048 #endif /* _XS_WIRE_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/vbd/000077500000000000000000000000001314037446600226275ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/vbd/blkif.h000066400000000000000000000134061314037446600240730ustar00rootroot00000000000000/****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_BLKIF_H__ #define __XEN_PUBLIC_IO_BLKIF_H__ #include #include /* * Front->back notifications: When enqueuing a new request, sending a * notification can be made conditional on req_event (i.e., the generic * hold-off mechanism provided by the ring macros). Backends must set * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). * * Back->front notifications: When enqueuing a new response, sending a * notification can be made conditional on rsp_event (i.e., the generic * hold-off mechanism provided by the ring macros). Frontends must set * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). */ #ifndef blkif_vdev_t #define blkif_vdev_t uint16_t #endif #define blkif_sector_t uint64_t /* * REQUEST CODES. */ #define BLKIF_OP_READ 0 #define BLKIF_OP_WRITE 1 /* * Recognised only if "feature-barrier" is present in backend xenbus info. * The "feature-barrier" node contains a boolean indicating whether barrier * requests are likely to succeed or fail. Either way, a barrier request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt barrier requests. * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* * create the "feature-barrier" node! */ #define BLKIF_OP_WRITE_BARRIER 2 /* * Recognised if "feature-flush-cache" is present in backend xenbus * info. A flush will ask the underlying storage hardware to flush its * non-volatile caches as appropriate. The "feature-flush-cache" node * contains a boolean indicating whether flush requests are likely to * succeed or fail. Either way, a flush request may fail at any time * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying * block-device hardware. The boolean simply indicates whether or not it * is worthwhile for the frontend to attempt flushes. If a backend does * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the * "feature-flush-cache" node! */ #define BLKIF_OP_FLUSH_DISKCACHE 3 /* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 /* * NB. first_sect and last_sect in blkif_request_segment, as well as * sector_number in blkif_request, are always expressed in 512-byte units. * However they must be properly aligned to the real sector size of the * physical disk, which is reported in the "sector-size" node in the backend * xenbus info. Also the xenbus "sectors" node is expressed in 512-byte units. */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; }; struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; typedef struct blkif_request blkif_request_t; struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_response blkif_response_t; /* * STATUS RETURN CODES. */ /* Operation not supported (only happens on barrier writes). */ #define BLKIF_RSP_EOPNOTSUPP -2 /* Operation failed for some unspecified reason (-EIO). */ #define BLKIF_RSP_ERROR -1 /* Operation completed successfully. */ #define BLKIF_RSP_OKAY 0 /* * Generate blkif ring structures and types. */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/vnif/000077500000000000000000000000001314037446600230165ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/vnif/netif.h000066400000000000000000000163561314037446600243070ustar00rootroot00000000000000/****************************************************************************** * netif.h * * Unified network-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_NETIF_H__ #define __XEN_PUBLIC_IO_NETIF_H__ #include #include /* * Notifications after enqueuing any type of message should be conditional on * the appropriate req_event or rsp_event field in the shared ring. * If the client sends notification for rx requests then it should specify * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume * that it cannot safely queue packets (as it may not be kicked to send them). */ /* * This is the 'wire' format for packets: * Request 1: netif_tx_request -- NETTXF_* (any flags) * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info) * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE) * Request 4: netif_tx_request -- NETTXF_more_data * Request 5: netif_tx_request -- NETTXF_more_data * ... * Request N: netif_tx_request -- 0 */ /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETTXF_csum_blank (0) #define NETTXF_csum_blank (1U<<_NETTXF_csum_blank) /* Packet data has been validated against protocol checksum. */ #define _NETTXF_data_validated (1) #define NETTXF_data_validated (1U<<_NETTXF_data_validated) /* Packet continues in the next request descriptor. */ #define _NETTXF_more_data (2) #define NETTXF_more_data (1U<<_NETTXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETTXF_extra_info (3) #define NETTXF_extra_info (1U<<_NETTXF_extra_info) struct netif_tx_request { grant_ref_t gref; /* Reference to buffer page */ uint16_t offset; /* Offset within buffer page */ uint16_t flags; /* NETTXF_* */ uint16_t id; /* Echoed in response message. */ uint16_t size; /* Packet size in bytes. */ }; typedef struct netif_tx_request netif_tx_request_t; /* Types of netif_extra_info descriptors. */ #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MAX (4) /* netif_extra_info flags. */ #define _XEN_NETIF_EXTRA_FLAG_MORE (0) #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) /* GSO types - only TCPv4 currently supported. */ #define XEN_NETIF_GSO_TYPE_TCPV4 (1) /* * This structure needs to fit within both netif_tx_request and * netif_rx_response for compatibility. */ struct netif_extra_info { uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ union { /* * XEN_NETIF_EXTRA_TYPE_GSO: */ struct { /* * Maximum payload size of each segment. For example, for TCP this * is just the path MSS. */ uint16_t size; /* * GSO type. This determines the protocol of the packet and any * extra features required to segment the packet properly. */ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ /* Future expansion. */ uint8_t pad; /* * GSO features. This specifies any extra GSO features required * to process this packet, such as ECN support for TCPv4. */ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ } gso; /* * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}: * Backend advertises availability via 'feature-multicast-control' * xenbus node containing value '1'. * Frontend requests this feature by advertising * 'request-multicast-control' xenbus node containing value '1'. * If multicast control is requested then multicast flooding is * disabled and the frontend must explicitly register its interest * in multicast groups using dummy transmit requests containing * MCAST_{ADD,DEL} extra-info fragments. */ struct { uint8_t addr[6]; /* Address to add/remove. */ } mcast; uint16_t pad[3]; } u; }; typedef struct netif_extra_info netif_extra_info_t; struct netif_tx_response { uint16_t id; int16_t status; /* NETIF_RSP_* */ }; typedef struct netif_tx_response netif_tx_response_t; struct netif_rx_request { uint16_t id; /* Echoed in response message. */ grant_ref_t gref; /* Reference to incoming granted frame */ }; typedef struct netif_rx_request netif_rx_request_t; /* Packet data has been validated against protocol checksum. */ #define _NETRXF_data_validated (0) #define NETRXF_data_validated (1U<<_NETRXF_data_validated) /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETRXF_csum_blank (1) #define NETRXF_csum_blank (1U<<_NETRXF_csum_blank) /* Packet continues in the next request descriptor. */ #define _NETRXF_more_data (2) #define NETRXF_more_data (1U<<_NETRXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETRXF_extra_info (3) #define NETRXF_extra_info (1U<<_NETRXF_extra_info) struct netif_rx_response { uint16_t id; uint16_t offset; /* Offset in page of start of received packet */ uint16_t flags; /* NETRXF_* */ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ }; typedef struct netif_rx_response netif_rx_response_t; /* * Generate netif ring structures and types. */ DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response); DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response); #define NETIF_RSP_DROPPED -2 #define NETIF_RSP_ERROR -1 #define NETIF_RSP_OKAY 0 /* No response: used for auxiliary requests (e.g., netif_tx_extra). */ #define NETIF_RSP_NULL 1 #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/vpci/000077500000000000000000000000001314037446600230155ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/vpci/driver_util.h000066400000000000000000000005421314037446600255170ustar00rootroot00000000000000 #ifndef __ASM_XEN_DRIVER_UTIL_H__ #define __ASM_XEN_DRIVER_UTIL_H__ #include #include /* Allocate/destroy a 'vmalloc' VM area. */ extern struct vm_struct *alloc_vm_area(size_t size); extern void free_vm_area(struct vm_struct *area); extern struct class *get_xen_class(void); #endif /* __ASM_XEN_DRIVER_UTIL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/vpci/gnttab_dma.h000066400000000000000000000026471314037446600252770ustar00rootroot00000000000000/* * Copyright (c) 2007 Herbert Xu * Copyright (c) 2007 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _ASM_I386_GNTTAB_DMA_H #define _ASM_I386_GNTTAB_DMA_H static inline int gnttab_dma_local_pfn(struct page *page) { /* Has it become a local MFN? */ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page)))); } static inline maddr_t gnttab_dma_map_page(struct page *page) { __gnttab_dma_map_page(page); return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT); } static inline void gnttab_dma_unmap_page(maddr_t maddr) { __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr))); } #endif /* _ASM_I386_GNTTAB_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/vpci/platform-pci.h000066400000000000000000000026611314037446600255700ustar00rootroot00000000000000/****************************************************************************** * platform-pci.h * * Xen platform PCI device driver * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. */ #ifndef _XEN_PLATFORM_PCI_H #define _XEN_PLATFORM_PCI_H #include /* the features pvdriver supported */ #define XENPAGING 0x1 #define SUPPORTED_FEATURE XENPAGING extern void write_feature_flag(void); extern void write_monitor_service_flag(void); /*when xenpci resume succeed, write this flag, then uvpmonitor call ndsend*/ extern void write_driver_resume_flag(void); unsigned long alloc_xen_mmio(unsigned long len); void platform_pci_resume(void); extern struct pci_dev *xen_platform_pdev; #endif /* _XEN_PLATFORM_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/vpci/xenbus_comms.h000066400000000000000000000043341314037446600256740ustar00rootroot00000000000000/* * Private include for xenbus communications. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_COMMS_H #define _XENBUS_COMMS_H int xs_init(void); int xb_init_comms(void); /* Low level routines. */ int xb_write(const void *data, unsigned len); int xb_read(void *data, unsigned len); int xb_data_to_read(void); int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; /* For xenbus internal use. */ enum { XENBUS_XSD_UNCOMMITTED = 0, XENBUS_XSD_FOREIGN_INIT, XENBUS_XSD_FOREIGN_READY, XENBUS_XSD_LOCAL_INIT, XENBUS_XSD_LOCAL_READY, }; extern atomic_t xenbus_xsd_state; static inline int is_xenstored_ready(void) { int s = atomic_read(&xenbus_xsd_state); return s == XENBUS_XSD_FOREIGN_READY || s == XENBUS_XSD_LOCAL_READY; } #endif /* _XENBUS_COMMS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/include/vpci/xenbus_probe.h000066400000000000000000000061171314037446600256660ustar00rootroot00000000000000/****************************************************************************** * xenbus_probe.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_PROBE_H #define _XENBUS_PROBE_H #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); extern void xenbus_backend_probe_and_watch(void); extern void xenbus_backend_bus_register(void); extern void xenbus_backend_device_register(void); #else static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_probe_and_watch(void) {} static inline void xenbus_backend_bus_register(void) {} static inline void xenbus_backend_device_register(void) {} #endif struct xen_bus_type { char *root; int error; unsigned int levels; int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename); int (*probe)(const char *type, const char *dir); struct bus_type bus; struct device dev; }; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); extern int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus); extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); extern int xenbus_probe_devices(struct xen_bus_type *bus); extern void dev_changed(const char *node, struct xen_bus_type *bus); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/version.ini000066400000000000000000000000641314037446600226170ustar00rootroot00000000000000KernModeVersion=2.2.0.112 UserModeVersion=2.2.0.112 UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-balloon/000077500000000000000000000000001314037446600226475ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-balloon/Kbuild000066400000000000000000000001511314037446600240010ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-balloon.o xen-balloon-objs := balloon.o xen-balloon-objs += sysfs.o UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-balloon/Makefile000066400000000000000000000000661314037446600243110ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-balloon/balloon.c000066400000000000000000000500051314037446600244410ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /*added by linyuliang 00176349 begin */ #include #include /*added by linyuliang 00176349 end */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * pv driver header file */ #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); struct balloon_stats balloon_stats; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; /* VM /proc information for memory */ extern unsigned long totalram_pages; /*added by linyuliang 00176349 begin */ #ifndef CONFIG_XEN /* * In HVM guests accounting here uses the Xen visible values, but the kernel * determined totalram_pages value shouldn't get altered. Since totalram_pages * includes neither the kernel static image nor any memory allocated prior to * or from the bootmem allocator, we have to synchronize the two values. */ static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif /*added by linyuliang 00176349 end */ #ifndef MODULE extern unsigned long totalhigh_pages; #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else #define inc_totalhigh_pages() ((void)0) #define dec_totalhigh_pages() ((void)0) #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process); #else static void balloon_process(void *unused); static DECLARE_WORK(balloon_worker, balloon_process, NULL); #endif static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page) { /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; dec_totalhigh_pages(); } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(void) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); if (PageHighMem(page)) { bs.balloon_high--; inc_totalhigh_pages(); } else bs.balloon_low--; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static inline void balloon_free_page(struct page *page) { #ifndef MODULE if (put_page_testzero(page)) free_cold_page(page); #else /* free_cold_page() is not being exported. */ __free_page(page); #endif } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = bs.target_pages; if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } /*modified by linyuliang 00176349 begin */ unsigned long balloon_minimum_target(void) /*modified by linyuliang 00176349 end */ { #ifndef CONFIG_XEN #define max_pfn num_physpages #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN #undef max_pfn #endif } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page); page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); init_page_count(page); balloon_free_page(page); } bs.current_pages += rc; /*modified by linyuliang 00176349 begin */ totalram_pages = bs.current_pages - totalram_bias; /*modified by linyuliang 00176349 end */ out: balloon_unlock(flags); return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; /*added by linyuliang 00176349 begin */ IPRINTK("nr_pages = %lu\n",nr_pages); IPRINTK("need_sleep= %d\n",need_sleep); /*added by linyuliang 00176349 end */ break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(pfn_to_page(pfn)); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; /*modified by linyuliang 00176349 begin */ totalram_pages = bs.current_pages - totalram_bias; /*modified by linyuliang 00176349 end */ balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ /*modified by linyuliang 00176349 begin */ /*When adjustment be stopped one time,adjustment over! then write current memory to xenstore*/ static int flag; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static void balloon_process(struct work_struct *unused) #else static void balloon_process(void *unused) #endif { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); if(current_target() == bs.current_pages) { mutex_unlock(&balloon_mutex); return; } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); // if (!need_sleep) // { if (current_target() != bs.current_pages || (flag==1)) { sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); // } /* Schedule more work if there is some still to be done. */ // if (current_target() != bs.current_pages) // mod_timer(&balloon_timer, jiffies + HZ); mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, balloon_minimum_target()); flag = bs.target_pages== target ? 0:1; /*modified by linyuliang 00176349 end */ // IPRINTK("b_set_new_tar_bs.target_pages= %lu",bs.target_pages); schedule_work(&balloon_worker); } static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf( page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { /*added by linyuliang 00176349 begin */ #if !defined(CONFIG_XEN) # ifndef XENMEM_get_pod_target # define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; # endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; #elif defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("Initialising balloon driver.\n"); #ifdef CONFIG_XEN bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else totalram_bias = HYPERVISOR_memory_op( HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target) != -ENOSYS ? XENMEM_maximum_reservation : XENMEM_current_reservation, &pod_target.domid); if ((long)totalram_bias != -ENOSYS) { BUG_ON(totalram_bias < totalram_pages); bs.current_pages = totalram_bias; totalram_bias -= totalram_pages; } else { totalram_bias = 0; bs.current_pages = totalram_pages; } /*added by linyuliang 00176349 end */ #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #ifdef CONFIG_PROC_FS if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) balloon_append(page); } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void __exit balloon_exit(void) { balloon_sysfs_exit(); /* XXX - release balloon here */ } module_exit(balloon_exit); void balloon_update_driver_allowance(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } #ifdef CONFIG_XEN static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long flags; void *v; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD); if (page == NULL) goto err; v = page_address(page); scrub_pages(v, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, (unsigned long)v, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_unlock(flags); balloon_free_page(page); goto err; } /*added by linyuliang 00176349 begin */ totalram_pages = --bs.current_pages - totalram_bias; /*added by linyuliang 00176349 end */ balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i]); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i]); } balloon_unlock(flags); kfree(pagevec); schedule_work(&balloon_worker); } void balloon_release_driver_page(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page); bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-balloon/common.h000066400000000000000000000043201314037446600243070ustar00rootroot00000000000000/****************************************************************************** * balloon/common.h * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_COMMON_H__ #define __XEN_BALLOON_COMMON_H__ #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; extern struct balloon_stats balloon_stats; #define bs balloon_stats int balloon_sysfs_init(void); void balloon_sysfs_exit(void); void balloon_set_new_target(unsigned long target); #endif /* __XEN_BALLOON_COMMON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-balloon/sysfs.c000066400000000000000000000111161314037446600241620ustar00rootroot00000000000000/****************************************************************************** * balloon/sysfs.c * * Xen balloon driver - sysfs interfaces. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BALLOON_CLASS_NAME "xen_memory" #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, const char *buf, size_t count) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ strcpy(memstring, buf); target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_driver_kb.attr, NULL }; static struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { set_kset_name(BALLOON_CLASS_NAME), }; static struct sys_device balloon_sysdev; static int __init register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } static __exit void unregister_balloon(struct sys_device *sysdev) { int i; sysfs_remove_group(&sysdev->kobj, &balloon_info_group); for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); } int __init balloon_sysfs_init(void) { return register_balloon(&balloon_sysdev); } void __exit balloon_sysfs_exit(void) { unregister_balloon(&balloon_sysdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/000077500000000000000000000000001314037446600236165ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/Kbuild000066400000000000000000000017051314037446600247560ustar00rootroot00000000000000########################################################### ### Author: King ### ########################################################### include $(M)/config.mk obj-m := xen-platform-pci.o xen-platform-pci-objs += evtchn.o platform-pci.o gnttab.o xen_support.o xen-platform-pci-objs += features.o platform-compat.o xen-platform-pci-objs += reboot.o machine_reboot.o xen-platform-pci-objs += panic-handler.o xen-platform-pci-objs += platform-pci-unplug.o xen-platform-pci-objs += xenbus_comms.o xen-platform-pci-objs += xenbus_xs.o xen-platform-pci-objs += xenbus_probe.o xen-platform-pci-objs += xenbus_dev.o xen-platform-pci-objs += xenbus_client.o xen-platform-pci-objs += xen_proc.o modules: $(MAKE) -C $(KERNELDIR) M=$(PWD) modules ########################################################### ### END ### ########################################################### UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/Makefile000066400000000000000000000000661314037446600252600ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/evtchn.c000066400000000000000000000231571314037446600252610ustar00rootroot00000000000000/****************************************************************************** * evtchn.c * * A simplified event channel for para-drivers in unmodified linux * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, Intel Corporation * * This file may be distributed separately from the Linux kernel, or * incorporated into other software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include /* * pv drivers header files */ #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif void *shared_info_area; #define is_valid_evtchn(x) ((x) != 0) #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn) static struct { spinlock_t lock; irq_handler_t handler; void *dev_id; int evtchn; int close:1; /* close on unbind_from_irqhandler()? */ int inuse:1; int in_handler:1; } irq_evtchn[256]; static int evtchn_to_irq[NR_EVENT_CHANNELS] = { [0 ... NR_EVENT_CHANNELS-1] = -1 }; static DEFINE_SPINLOCK(irq_alloc_lock); static int alloc_xen_irq(void) { static int warned; int irq; spin_lock(&irq_alloc_lock); for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) { if (irq_evtchn[irq].inuse) continue; irq_evtchn[irq].inuse = 1; spin_unlock(&irq_alloc_lock); return irq; } if (!warned) { warned = 1; printk(KERN_WARNING "No available IRQ to bind to: " "increase irq_evtchn[] size in evtchn.c.\n"); } spin_unlock(&irq_alloc_lock); return -ENOSPC; } static void free_xen_irq(int irq) { spin_lock(&irq_alloc_lock); irq_evtchn[irq].inuse = 0; spin_unlock(&irq_alloc_lock); } int irq_to_evtchn_port(int irq) { return irq_evtchn[irq].evtchn; } EXPORT_SYMBOL(irq_to_evtchn_port); void mask_evtchn(int port) { shared_info_t *s = shared_info_area; synch_set_bit(port, &s->evtchn_mask[0]); } EXPORT_SYMBOL(mask_evtchn); void unmask_evtchn(int port) { evtchn_unmask_t op = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op)); } EXPORT_SYMBOL(unmask_evtchn); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { struct evtchn_alloc_unbound alloc_unbound; int err, irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_domain; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { spin_unlock_irq(&irq_evtchn[irq].lock); free_xen_irq(irq); return err; } irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = alloc_unbound.port; irq_evtchn[irq].close = 1; evtchn_to_irq[alloc_unbound.port] = irq; unmask_evtchn(alloc_unbound.port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } #else int bind_listening_port_to_irqhandler( unsigned int remote_domain, irqreturn_t handler, unsigned long irqflags, const char *devname, void *dev_id) { struct evtchn_alloc_unbound alloc_unbound; int err, irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_domain; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { spin_unlock_irq(&irq_evtchn[irq].lock); free_xen_irq(irq); return err; } irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = alloc_unbound.port; irq_evtchn[irq].close = 1; evtchn_to_irq[alloc_unbound.port] = irq; unmask_evtchn(alloc_unbound.port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } #endif EXPORT_SYMBOL(bind_listening_port_to_irqhandler); int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = caller_port; irq_evtchn[irq].close = 0; evtchn_to_irq[caller_port] = irq; unmask_evtchn(caller_port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_caller_port_to_irqhandler); void unbind_from_irqhandler(unsigned int irq, void *dev_id) { int evtchn; spin_lock_irq(&irq_evtchn[irq].lock); evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) { evtchn_to_irq[evtchn] = -1; mask_evtchn(evtchn); if (irq_evtchn[irq].close) { struct evtchn_close close = { .port = evtchn }; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) BUG(); } } irq_evtchn[irq].handler = NULL; irq_evtchn[irq].evtchn = 0; spin_unlock_irq(&irq_evtchn[irq].lock); while (irq_evtchn[irq].in_handler) cpu_relax(); free_xen_irq(irq); } EXPORT_SYMBOL(unbind_from_irqhandler); void notify_remote_via_irq(int irq) { int evtchn; evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL(notify_remote_via_irq); static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 }; static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 }; static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh, unsigned int idx) { return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]); } static irqreturn_t evtchn_interrupt(int irq, void *dev_id #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) , struct pt_regs *regs #else # define handler(irq, dev_id, regs) handler(irq, dev_id) #endif ) { unsigned int l1i, l2i, port; unsigned long masked_l1, masked_l2; /* XXX: All events are bound to vcpu0 but irq may be redirected. */ int cpu = 0; /*smp_processor_id();*/ irq_handler_t handler; shared_info_t *s = shared_info_area; vcpu_info_t *v = &s->vcpu_info[cpu]; unsigned long l1, l2; v->evtchn_upcall_pending = 0; #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ /* Clear master flag /before/ clearing selector flag. */ wmb(); #endif l1 = xchg(&v->evtchn_pending_sel, 0); l1i = per_cpu(last_processed_l1i, cpu); l2i = per_cpu(last_processed_l2i, cpu); while (l1 != 0) { l1i = (l1i + 1) % BITS_PER_LONG; masked_l1 = l1 & ((~0UL) << l1i); if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */ l1i = BITS_PER_LONG - 1; l2i = BITS_PER_LONG - 1; continue; } l1i = __ffs(masked_l1); do { l2 = active_evtchns(cpu, s, l1i); l2i = (l2i + 1) % BITS_PER_LONG; masked_l2 = l2 & ((~0UL) << l2i); if (masked_l2 == 0) { /* if we masked out all events, move on */ l2i = BITS_PER_LONG - 1; break; } l2i = __ffs(masked_l2); /* process port */ port = (l1i * BITS_PER_LONG) + l2i; synch_clear_bit(port, &s->evtchn_pending[0]); irq = evtchn_to_irq[port]; if (irq < 0) continue; spin_lock(&irq_evtchn[irq].lock); handler = irq_evtchn[irq].handler; dev_id = irq_evtchn[irq].dev_id; if (unlikely(handler == NULL)) { printk("Xen IRQ%d (port %d) has no handler!\n", irq, port); spin_unlock(&irq_evtchn[irq].lock); continue; } irq_evtchn[irq].in_handler = 1; spin_unlock(&irq_evtchn[irq].lock); local_irq_enable(); handler(irq, irq_evtchn[irq].dev_id, regs); local_irq_disable(); spin_lock(&irq_evtchn[irq].lock); irq_evtchn[irq].in_handler = 0; spin_unlock(&irq_evtchn[irq].lock); /* if this is the final port processed, we'll pick up here+1 next time */ per_cpu(last_processed_l1i, cpu) = l1i; per_cpu(last_processed_l2i, cpu) = l2i; } while (l2i != BITS_PER_LONG - 1); l2 = active_evtchns(cpu, s, l1i); if (l2 == 0) /* we handled all ports, so we can clear the selector bit */ l1 &= ~(1UL << l1i); } return IRQ_HANDLED; } void irq_resume(void) { int evtchn, irq; for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) { mask_evtchn(evtchn); evtchn_to_irq[evtchn] = -1; } for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) irq_evtchn[irq].evtchn = 0; } int xen_irq_init(struct pci_dev *pdev) { int irq; for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) spin_lock_init(&irq_evtchn[irq].lock); return request_irq(pdev->irq, evtchn_interrupt, #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT, #else IRQF_SHARED | IRQF_SAMPLE_RANDOM | IRQF_DISABLED, #endif "xen-platform-pci", pdev); } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/features.c000066400000000000000000000015371314037446600256060ustar00rootroot00000000000000/****************************************************************************** * features.c * * Xen feature flags. * * Copyright (c) 2006, Ian Campbell, XenSource Inc. */ #include #include #include /* * pv drivers header files */ #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */ EXPORT_SYMBOL(xen_features); void setup_xen_features(void) { xen_feature_info_t fi; int i, j; for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) { fi.submap_idx = i; if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j=0; j<32; j++) xen_features[i*32+j] = !!(fi.submap & 1< #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff #define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t)) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; static unsigned int boot_max_nr_grant_frames; static int gnttab_free_count; static grant_ref_t gnttab_free_head; static DEFINE_SPINLOCK(gnttab_list_lock); static struct grant_entry *shared; static struct gnttab_free_callback *gnttab_free_callback_list; static int gnttab_expand(unsigned int req_entries); #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP]) #define nr_freelist_frames(grant_frames) \ (((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP) static int get_free_entries(int count) { unsigned long flags; int ref, rc; grant_ref_t head; spin_lock_irqsave(&gnttab_list_lock, flags); if ((gnttab_free_count < count) && ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { spin_unlock_irqrestore(&gnttab_list_lock, flags); return rc; } ref = head = gnttab_free_head; gnttab_free_count -= count; while (count-- > 1) head = gnttab_entry(head); gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } #define get_free_entry() get_free_entries(1) static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; callback = gnttab_free_callback_list; gnttab_free_callback_list = NULL; while (callback != NULL) { next = callback->next; if (gnttab_free_count >= callback->count) { callback->next = NULL; callback->queued = 0; callback->fn(callback->arg); } else { callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; } callback = next; } } static inline void check_free_callbacks(void) { if (unlikely(gnttab_free_callback_list)) do_free_callbacks(); } static void put_free_entry(grant_ref_t ref) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; gnttab_free_count++; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } /* * Public grant-issuing interface functions */ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags) { shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); int gnttab_query_foreign_access(grant_ref_t ref) { u16 nflags; nflags = shared[ref].flags; return (nflags & (GTF_reading|GTF_writing)); } EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); int gnttab_end_foreign_access_ref(grant_ref_t ref) { u16 flags, nflags; nflags = shared[ref].flags; do { if ((flags = nflags) & (GTF_reading|GTF_writing)) { printk(KERN_DEBUG "WARNING: g.e. still in use!\n"); return 0; } } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) != flags); return 1; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page) { if (gnttab_end_foreign_access_ref(ref)) { put_free_entry(ref); if (page != 0) free_page(page); } else { /* XXX This needs to be fixed so that the ref and page are placed on a list to be freed up later. */ printk(KERN_DEBUG "WARNING: leaking g.e. and page still in use!\n"); } } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; gnttab_grant_foreign_transfer_ref(ref, domid, pfn); return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, unsigned long pfn) { shared[ref].frame = pfn; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_accept_transfer; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) { unsigned long frame; u16 flags; /* * If a transfer is not even yet started, try to reclaim the grant * reference and return failure (== 0). */ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags) return 0; cpu_relax(); } /* If a transfer is in progress then wait until it is completed. */ while (!(flags & GTF_transfer_completed)) { flags = shared[ref].flags; cpu_relax(); } /* Read the frame number /after/ reading completion status. */ rmb(); frame = shared[ref].frame; BUG_ON(frame == 0); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) { unsigned long frame = gnttab_end_foreign_transfer_ref(ref); put_free_entry(ref); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); void gnttab_free_grant_reference(grant_ref_t ref) { put_free_entry(ref); } EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; unsigned long flags; int count = 1; if (head == GNTTAB_LIST_END) return; spin_lock_irqsave(&gnttab_list_lock, flags); ref = head; while (gnttab_entry(ref) != GNTTAB_LIST_END) { ref = gnttab_entry(ref); count++; } gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = head; gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_references); int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) { int h = get_free_entries(count); if (h < 0) return -ENOSPC; *head = h; return 0; } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); } EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); int gnttab_claim_grant_reference(grant_ref_t *private_head) { grant_ref_t g = *private_head; if (unlikely(g == GNTTAB_LIST_END)) return -ENOSPC; *private_head = gnttab_entry(g); return g; } EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release) { gnttab_entry(release) = *private_head; *private_head = release; } EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); if (callback->queued) goto out; callback->fn = fn; callback->arg = arg; callback->count = count; callback->queued = 1; callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; check_free_callbacks(); out: spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_request_free_callback); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) { struct gnttab_free_callback **pcb; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { if (*pcb == callback) { *pcb = callback->next; callback->queued = 0; break; } } spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); static int grow_gnttab_list(unsigned int more_frames) { unsigned int new_nr_grant_frames, extra_entries, i; unsigned int nr_glist_frames, new_nr_glist_frames; new_nr_grant_frames = nr_grant_frames + more_frames; extra_entries = more_frames * ENTRIES_PER_GRANT_FRAME; nr_glist_frames = nr_freelist_frames(nr_grant_frames); new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames); for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); if (!gnttab_list[i]) goto grow_nomem; } for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; check_free_callbacks(); return 0; grow_nomem: for ( ; i >= nr_glist_frames; i--) free_page((unsigned long) gnttab_list[i]); return -ENOMEM; } static unsigned int __max_nr_grant_frames(void) { struct gnttab_query_size query; int rc; query.dom = DOMID_SELF; rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); if ((rc < 0) || (query.status != GNTST_okay)) return 4; /* Legacy max supported number of frames */ return query.max_nr_frames; } static inline unsigned int max_nr_grant_frames(void) { unsigned int xen_max = __max_nr_grant_frames(); if (xen_max > boot_max_nr_grant_frames) return boot_max_nr_grant_frames; return xen_max; } #ifdef CONFIG_XEN static DEFINE_SEQLOCK(gnttab_dma_lock); #ifdef CONFIG_X86 static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } void *arch_gnttab_alloc_shared(unsigned long *frames) { struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames()); BUG_ON(area == NULL); return area->addr; } #endif /* CONFIG_X86 */ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status); if (shared == NULL) shared = arch_gnttab_alloc_shared(frames); #ifdef CONFIG_X86 rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); BUG_ON(rc); frames -= nr_gframes; /* adjust after map_pte_fn() */ #endif /* CONFIG_X86 */ kfree(frames); return 0; } static void gnttab_page_free(struct page *page, unsigned int order) { BUG_ON(order); ClearPageForeign(page); gnttab_reset_grant_page(page); put_page(page); } /* * Must not be called with IRQs off. This should only be used on the * slow path. * * Copy a foreign granted page to local memory. */ int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep) { struct gnttab_unmap_and_replace unmap; mmu_update_t mmu; struct page *page; struct page *new_page; void *new_addr; void *addr; paddr_t pfn; maddr_t mfn; maddr_t new_mfn; int err; page = *pagep; if (!get_page_unless_zero(page)) return -ENOENT; err = -ENOMEM; new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!new_page) goto out; new_addr = page_address(new_page); addr = page_address(page); memcpy(new_addr, addr, PAGE_SIZE); pfn = page_to_pfn(page); mfn = pfn_to_mfn(pfn); new_mfn = virt_to_mfn(new_addr); write_seqlock(&gnttab_dma_lock); /* Make seq visible before checking page_mapped. */ smp_mb(); /* Has the page been DMA-mapped? */ if (unlikely(page_mapped(page))) { write_sequnlock(&gnttab_dma_lock); put_page(new_page); err = -EBUSY; goto out; } if (!xen_feature(XENFEAT_auto_translated_physmap)) set_phys_to_machine(pfn, new_mfn); gnttab_set_replace_op(&unmap, (unsigned long)addr, (unsigned long)new_addr, ref); err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace, &unmap, 1); BUG_ON(err); BUG_ON(unmap.status); write_sequnlock(&gnttab_dma_lock); if (!xen_feature(XENFEAT_auto_translated_physmap)) { set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY); mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu.val = pfn; err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF); BUG_ON(err); } new_page->mapping = page->mapping; new_page->index = page->index; set_bit(PG_foreign, &new_page->flags); *pagep = new_page; SetPageForeign(page, gnttab_page_free); page->mapping = NULL; out: put_page(page); return err; } EXPORT_SYMBOL_GPL(gnttab_copy_grant_page); void gnttab_reset_grant_page(struct page *page) { init_page_count(page); reset_page_mapcount(page); } EXPORT_SYMBOL_GPL(gnttab_reset_grant_page); /* * Keep track of foreign pages marked as PageForeign so that we don't * return them to the remote domain prematurely. * * PageForeign pages are pinned down by increasing their mapcount. * * All other pages are simply returned as is. */ void __gnttab_dma_map_page(struct page *page) { unsigned int seq; if (!is_running_on_xen() || !PageForeign(page)) return; do { seq = read_seqbegin(&gnttab_dma_lock); if (gnttab_dma_local_pfn(page)) break; atomic_set(&page->_mapcount, 0); /* Make _mapcount visible before read_seqretry. */ smp_mb(); } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); } int gnttab_resume(void) { if (max_nr_grant_frames() < nr_grant_frames) return -ENOSYS; return gnttab_map(0, nr_grant_frames - 1); } int gnttab_suspend(void) { #ifdef CONFIG_X86 apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); #endif return 0; } #else /* !CONFIG_XEN */ #include static unsigned long resume_frames; static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; /* Loop backwards, so that the first hypercall has the largest index, * ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); } while (i-- > start_idx); return 0; } int gnttab_resume(void) { unsigned int max_nr_gframes, nr_gframes; nr_gframes = nr_grant_frames; max_nr_gframes = max_nr_grant_frames(); if (max_nr_gframes < nr_gframes) return -ENOSYS; if (!resume_frames) { resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); if (shared == NULL) { printk("error to ioremap gnttab share frames\n"); return -1; } } gnttab_map(0, nr_gframes - 1); return 0; } #endif /* !CONFIG_XEN */ static int gnttab_expand(unsigned int req_entries) { int rc; unsigned int cur, extra; cur = nr_grant_frames; extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) / ENTRIES_PER_GRANT_FRAME); if (cur + extra > max_nr_grant_frames()) return -ENOSPC; if ((rc = gnttab_map(cur, cur + extra - 1)) == 0) rc = grow_gnttab_list(extra); return rc; } int __devinit gnttab_init(void) { int i; unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int nr_init_grefs; if (!is_running_on_xen()) return -ENODEV; nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; nr_glist_frames = nr_freelist_frames(nr_grant_frames); for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) goto ini_nomem; } if (gnttab_resume() < 0) return -ENODEV; nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; return 0; ini_nomem: for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); return -ENOMEM; } #ifdef CONFIG_XEN core_initcall(gnttab_init); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/machine_reboot.c000066400000000000000000000043521314037446600267440ustar00rootroot00000000000000#include #include /* * pv drivers header files */ #include #include #include #include #include struct ap_suspend_info { int do_spin; atomic_t nr_spinning; }; #ifdef CONFIG_SMP /* * Spinning prevents, for example, APs touching grant table entries while * the shared grant table is not mapped into the address space imemdiately * after resume. */ static void ap_suspend(void *_info) { struct ap_suspend_info *info = _info; BUG_ON(!irqs_disabled()); atomic_inc(&info->nr_spinning); mb(); while (info->do_spin) cpu_relax(); mb(); atomic_dec(&info->nr_spinning); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0, 0) #else #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0) #endif #else /* !defined(CONFIG_SMP) */ #define initiate_ap_suspend(i) 0 #endif static int bp_suspend(void) { int suspend_cancelled; BUG_ON(!irqs_disabled()); suspend_cancelled = HYPERVISOR_suspend(0); if (!suspend_cancelled) { platform_pci_resume(); gnttab_resume(); irq_resume(); } return suspend_cancelled; } int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)) { int err, suspend_cancelled, nr_cpus; struct ap_suspend_info info; xenbus_suspend(); preempt_disable(); /* Prevent any races with evtchn_interrupt() handler. */ disable_irq(xen_platform_pdev->irq); info.do_spin = 1; atomic_set(&info.nr_spinning, 0); smp_mb(); nr_cpus = num_online_cpus() - 1; err = initiate_ap_suspend(&info); if (err < 0) { preempt_enable(); xenbus_suspend_cancel(); return err; } while (atomic_read(&info.nr_spinning) != nr_cpus) cpu_relax(); local_irq_disable(); suspend_cancelled = bp_suspend(); resume_notifier(suspend_cancelled); local_irq_enable(); smp_mb(); info.do_spin = 0; while (atomic_read(&info.nr_spinning) != 0) cpu_relax(); enable_irq(xen_platform_pdev->irq); preempt_enable(); if (!suspend_cancelled){ xenbus_resume(); } else xenbus_suspend_cancel(); // update uvp flags write_driver_resume_flag(); write_feature_flag(); write_monitor_service_flag(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/panic-handler.c000066400000000000000000000017221314037446600264710ustar00rootroot00000000000000#include #include #include /* * pv drivers header files */ #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif MODULE_LICENSE("GPL"); #ifdef __ia64__ static void xen_panic_hypercall(struct unw_frame_info *info, void *arg) { current->thread.ksp = (__u64)info->sw - 16; HYPERVISOR_shutdown(SHUTDOWN_crash); /* we're never actually going to get here... */ } #endif static int xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { #ifdef __ia64__ unw_init_running(xen_panic_hypercall, NULL); #else /* !__ia64__ */ HYPERVISOR_shutdown(SHUTDOWN_crash); #endif /* we're never actually going to get here... */ return NOTIFY_DONE; } static struct notifier_block xen_panic_block = { .notifier_call = xen_panic_event }; int xen_panic_handler_init(void) { atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/platform-compat.c000066400000000000000000000070241314037446600270720ustar00rootroot00000000000000#include #include #include #include #include /* * pv drivers header files */ #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) static int system_state = 1; EXPORT_SYMBOL(system_state); #endif void ctrl_alt_del(void) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) kill_proc(1, SIGINT, 1); /* interrupt init */ #else kill_cad_pid(SIGINT, 1); #endif } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) size_t strcspn(const char *s, const char *reject) { const char *p; const char *r; size_t count = 0; for (p = s; *p != '\0'; ++p) { for (r = reject; *r != '\0'; ++r) { if (*p == *r) return count; } ++count; } return count; } EXPORT_SYMBOL(strcspn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(void * vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; } EXPORT_SYMBOL(wait_for_completion_timeout); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) /* fake do_exit using complete_and_exit */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) asmlinkage NORET_TYPE void do_exit(long code) #else fastcall NORET_TYPE void do_exit(long code) #endif { complete_and_exit(NULL, code); } EXPORT_SYMBOL_GPL(do_exit); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout) { __set_current_state(TASK_INTERRUPTIBLE); return schedule_timeout(timeout); } EXPORT_SYMBOL(schedule_timeout_interruptible); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. */ void *kzalloc(size_t size, int flags) { void *ret = kmalloc(size, flags); if (ret) memset(ret, 0, size); return ret; } EXPORT_SYMBOL(kzalloc); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) /* Simplified asprintf. */ char *kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; unsigned int len; char *p, dummy[1]; va_start(ap, fmt); len = vsnprintf(dummy, 0, fmt, ap); va_end(ap); p = kmalloc(len + 1, gfp); if (!p) return NULL; va_start(ap, fmt); vsprintf(p, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL(kasprintf); #endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/platform-pci-unplug.c000066400000000000000000000117421314037446600276740ustar00rootroot00000000000000/****************************************************************************** * platform-pci-unplug.c * * Xen platform PCI device driver * Copyright (c) 2010, Citrix * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include /****************************************************************************** * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0xffff #define XEN_IOPORT_LINUX_DRVVER ((LINUX_VERSION_CODE << 8) + 0x0) #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define UNPLUG_ALL_IDE_DISKS 1 #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 #define UNPLUG_ALL 7 #define UNPLUG_IGNORE 8 int xen_platform_pci; EXPORT_SYMBOL_GPL(xen_platform_pci); static int xen_emul_unplug; void xen_unplug_emulated_devices(void) { /* If the version matches enable the Xen platform PCI driver. * Also enable the Xen platform PCI driver if the version is really old * and the user told us to ignore it. */ xen_platform_pci = 1; xen_emul_unplug |= UNPLUG_ALL_NICS; xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; /* Set the default value of xen_emul_unplug depending on whether or * not the Xen PV frontends and the Xen platform PCI driver have * been compiled for this kernel (modules or built-in are both OK). */ if (xen_platform_pci && !xen_emul_unplug) { #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Netfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated ICs.\n"); xen_emul_unplug |= UNPLUG_ALL_NICS; #endif #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Blkfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated disks.\n" "You might have to change the root device\n" "from /dev/hd[a-d] to /dev/xvd[a-d]\n" "in your root= kernel command line option\n"); xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; #endif } /* Now unplug the emulated devices */ if (xen_platform_pci && !(xen_emul_unplug & UNPLUG_IGNORE)) outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/platform-pci.c000066400000000000000000000241011314037446600263550ustar00rootroot00000000000000/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include #include #ifdef __ia64__ #include #endif #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DRV_NAME "xen-platform-pci" #define DRV_VERSION "0.10" #define DRV_RELDATE "03/03/2005" static int max_hypercall_stub_pages, nr_hypercall_stub_pages; char *hypercall_stubs; EXPORT_SYMBOL(hypercall_stubs); MODULE_AUTHOR("ssmith@xensource.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); struct pci_dev *xen_platform_pdev; static unsigned long shared_info_frame; static uint64_t callback_via; /* driver should write xenstore flag to tell xen which feature supported */ void write_feature_flag() { int rc; char str[32] = {0}; (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); rc = xenbus_write(XBT_NIL, "control/uvp", "feature_flag", str); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); } } /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { int rc; rc = xenbus_write(XBT_NIL, "control/uvp", "monitor-service-flag", "true"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/monitor-service-flag failed \n"); } } /*DTS2014042508949. driver write this flag when the xenpci resume succeed.*/ void write_driver_resume_flag() { int rc = 0; rc = xenbus_write(XBT_NIL, "control/uvp", "driver-resume-flag", "1"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/driver-resume-flag failed \n"); } } static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; extern void *shared_info_area; #ifdef __ia64__ xencomm_initialize(); #endif setup_xen_features(); shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); shared_info_area = ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); return 0; } static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } #ifndef __ia64__ static uint32_t xen_cpuid_base(void) { uint32_t base, eax, ebx, ecx, edx; char signature[13]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { cpuid(base, &eax, &ebx, &ecx, &edx); *(uint32_t*)(signature + 0) = ebx; *(uint32_t*)(signature + 4) = ecx; *(uint32_t*)(signature + 8) = edx; signature[12] = 0; if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) return base; } return 0; } static int init_hypercall_stubs(void) { uint32_t eax, ebx, ecx, edx, pages, msr, i, base; base = xen_cpuid_base(); if (base == 0) { printk(KERN_WARNING "Detected Xen platform device but not Xen VMM?\n"); return -EINVAL; } cpuid(base + 1, &eax, &ebx, &ecx, &edx); printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff); /* * Find largest supported number of hypercall pages. * We'll create as many as possible up to this number. */ cpuid(base + 2, &pages, &msr, &ecx, &edx); /* * Use __vmalloc() because vmalloc_exec() is not an exported symbol. * PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL. * hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE); */ while (pages > 0) { hypercall_stubs = __vmalloc( pages * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM, __pgprot(__PAGE_KERNEL & ~_PAGE_NX)); if (hypercall_stubs != NULL) break; pages--; /* vmalloc failed: try one fewer pages */ } if (hypercall_stubs == NULL) return -ENOMEM; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; max_hypercall_stub_pages = pages; printk(KERN_INFO "Hypercall area is %u pages.\n", pages); return 0; } static void resume_hypercall_stubs(void) { uint32_t base, ecx, edx, pages, msr, i; base = xen_cpuid_base(); BUG_ON(base == 0); cpuid(base + 2, &pages, &msr, &ecx, &edx); if (pages > max_hypercall_stub_pages) pages = max_hypercall_stub_pages; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; } #else /* __ia64__ */ #define init_hypercall_stubs() (0) #define resume_hypercall_stubs() ((void)0) #endif static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; #ifdef __ia64__ for (irq = 0; irq < 16; irq++) { if (isa_irq_to_vector(irq) == pdev->irq) return irq; /* ISA IRQ */ } #else /* !__ia64__ */ irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) pin = pdev->pin; #else pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); #endif /* We don't know the GSI. Specify the PCI INTx line instead. */ return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3)); } static int set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } int xen_irq_init(struct pci_dev *pdev); int xenbus_init(void); int xen_reboot_init(void); int xen_panic_handler_init(void); int gnttab_init(void); void xen_unplug_emulated_devices(void); static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr, iolen; long mmio_addr, mmio_len; xen_unplug_emulated_devices(); if (xen_platform_pdev) return -EBUSY; xen_platform_pdev = pdev; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); callback_via = get_callback_via(pdev); if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { printk(KERN_WARNING DRV_NAME ":no resources found\n"); return -ENOENT; } if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) { printk(KERN_ERR ":MEM I/O resource 0x%lx @ 0x%lx busy\n", mmio_addr, mmio_len); return -EBUSY; } if (request_region(ioaddr, iolen, DRV_NAME) == NULL) { printk(KERN_ERR DRV_NAME ":I/O resource 0x%lx @ 0x%lx busy\n", iolen, ioaddr); release_mem_region(mmio_addr, mmio_len); return -EBUSY; } platform_mmio = mmio_addr; platform_mmiolen = mmio_len; ret = init_hypercall_stubs(); if (ret < 0) goto out; if ((ret = init_xen_info())) goto out; if ((ret = gnttab_init())) goto out; if ((ret = xen_irq_init(pdev))) goto out; if ((ret = set_callback_via(callback_via))) goto out; if ((ret = xenbus_init())) goto out; if ((ret = xen_reboot_init())) goto out; if ((ret = xen_panic_handler_init())) goto out; /* write flag to xenstore when xenbus is available */ write_feature_flag(); out: if (ret) { release_mem_region(mmio_addr, mmio_len); release_region(ioaddr, iolen); } return ret; } #define XEN_PLATFORM_VENDOR_ID 0x5853 #define XEN_PLATFORM_DEVICE_ID 0x0001 static struct pci_device_id platform_pci_tbl[] __devinitdata = { {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* Continue to recognise the old ID for now */ {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { name: DRV_NAME, probe: platform_pci_init, id_table: platform_pci_tbl, }; static int pci_device_registered; void platform_pci_resume(void) { struct xen_add_to_physmap xatp; resume_hypercall_stubs(); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); if (set_callback_via(callback_via)) printk("platform_pci_resume failure!\n"); } static int __init platform_pci_module_init(void) { int rc; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) rc = pci_module_init(&platform_driver); #else rc = pci_register_driver(&platform_driver); #endif if (rc) { printk(KERN_INFO DRV_NAME ": No platform pci device model found\n"); return rc; } pci_device_registered = 1; return 0; } module_init(platform_pci_module_init); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/reboot.c000066400000000000000000000211461314037446600252600ustar00rootroot00000000000000#define __KERNEL_SYSCALLS__ #include #include #include #include #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Was last suspend request cancelled? */ static int suspend_cancelled; /* Can we leave APs online when we suspend? */ static int fast_suspend; /* * modified by King @ 2011-01-26 */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); #else static void __shutdown_handler(void *unused); static DECLARE_WORK(shutdown_work, __shutdown_handler); #endif /*********************************************************/ static int setup_suspend_evtchn(void); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed(current, cpumask_of_cpu(0)); if (err) { printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { printk(KERN_ERR "Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) schedule_delayed_work(&shutdown_work, 0); #else schedule_work(&shutdown_work); #endif break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) schedule_delayed_work(&shutdown_work, 0); #else schedule_work(&shutdown_work); #endif else BUG_ON(old_state != SHUTDOWN_RESUMING); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static void __shutdown_handler(struct work_struct *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } #else static void __shutdown_handler(void *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } #endif static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } printk(KERN_WARNING "%s(%d): receive shutdown request %s\n", __FUNCTION__, __LINE__, str); if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) ctrl_alt_del(); else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else printk("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) { printk(KERN_ERR "Unable to read sysrq code in " "control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key, NULL, NULL); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static irqreturn_t suspend_int(int irq, void* dev_id) #else static irqreturn_t suspend_int(int irq, void* dev_id, struct pt_regs *ptregs) #endif { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); printk(KERN_INFO "suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } static int setup_shutdown_watcher(void) { int err; xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { printk(KERN_ERR "Failed to set shutdown watcher\n"); return err; } err = register_xenbus_watch(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { printk(KERN_ERR "Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/xen_proc.c000066400000000000000000000011111314037446600255710ustar00rootroot00000000000000#include #include /* * pv drivers header files */ #include static struct proc_dir_entry *xen_base; struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode) { if ( xen_base == NULL ) if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL ) panic("Couldn't create /proc/xen"); return create_proc_entry(name, mode, xen_base); } EXPORT_SYMBOL_GPL(create_xen_proc_entry); void remove_xen_proc_entry(const char *name) { remove_proc_entry(name, xen_base); } EXPORT_SYMBOL_GPL(remove_xen_proc_entry); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/xen_support.c000066400000000000000000000041241314037446600263510ustar00rootroot00000000000000/****************************************************************************** * support.c * Xen module support functions. * Copyright (C) 2004, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include /* * pv drivers header files */ #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if defined (__ia64__) unsigned long __hypercall(unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long cmd) { unsigned long __res; __asm__ __volatile__ (";;\n" "mov r2=%1\n" "break 0x1000 ;;\n" "mov %0=r8 ;;\n" : "=r"(__res) : "r"(cmd) : "r2", "r8", "memory"); return __res; } EXPORT_SYMBOL(__hypercall); int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) { return xencomm_hypercall_grant_table_op(cmd, uop, count); } EXPORT_SYMBOL(HYPERVISOR_grant_table_op); /* without using balloon driver on PV-on-HVM for ia64 */ void balloon_update_driver_allowance(long delta) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); void balloon_release_driver_page(struct page *page) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_release_driver_page); #endif /* __ia64__ */ void xen_machphys_update(unsigned long mfn, unsigned long pfn) { BUG(); } EXPORT_SYMBOL(xen_machphys_update); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/xenbus_client.c000066400000000000000000000175071314037446600266360ustar00rootroot00000000000000/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include /* * pv drivers header files */ #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DPRINTK(fmt, args...) \ pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args) const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", [ XenbusStateReconfiguring ] = "Reconfiguring", [ XenbusStateReconfigured ] = "Reconfigured", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate); int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path); int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2); if (!state) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, state, watch, callback); if (err) kfree(state); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path2); int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not work inside a Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ int current_state; int err; if (state == dev->state) return 0; err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d", ¤t_state); if (err != 1) return 0; err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state); if (err) { if (state != XenbusStateClosing) /* Avoid looping */ xenbus_dev_fatal(dev, err, "writing new state"); return err; } dev->state = state; return 0; } EXPORT_SYMBOL_GPL(xenbus_switch_state); int xenbus_frontend_closed(struct xenbus_device *dev) { xenbus_switch_state(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed); /** * Return the path to the error node for the given device, or NULL on failure. * If the value returned is non-NULL, then it is the caller's to kfree. */ static char *error_path(struct xenbus_device *dev) { return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); } void _dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list ap) { int ret; unsigned int len; char *printf_buffer = NULL, *path_buffer = NULL; #define PRINTF_BUFFER_SIZE 4096 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); if (printf_buffer == NULL) goto fail; len = sprintf(printf_buffer, "%i ", -err); ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = error_path(dev); if (path_buffer == NULL) { printk("xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { printk("xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } fail: if (printf_buffer) kfree(printf_buffer); if (path_buffer) kfree(path_buffer); } void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error); void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); xenbus_switch_state(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal); int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); if (err < 0) xenbus_dev_fatal(dev, err, "granting access to ring page"); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error(dev, err, "freeing event channel %d", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn); enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/xenbus_comms.c000066400000000000000000000151161314037446600264700ustar00rootroot00000000000000/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_irq; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) extern void xenbus_probe(struct work_struct *); #else extern void xenbus_probe(void *); #endif static DECLARE_WORK(probe_work, xenbus_probe); static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); //static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs) static irqreturn_t wake_waiting(int irq, void *unused) { int old, new; old = atomic_read(&xenbus_xsd_state); switch (old) { case XENBUS_XSD_UNCOMMITTED: BUG(); return IRQ_HANDLED; case XENBUS_XSD_FOREIGN_INIT: new = XENBUS_XSD_FOREIGN_READY; break; case XENBUS_XSD_LOCAL_INIT: new = XENBUS_XSD_LOCAL_READY; break; case XENBUS_XSD_FOREIGN_READY: case XENBUS_XSD_LOCAL_READY: default: goto wake; } old = atomic_cmpxchg(&xenbus_xsd_state, old, new); if (old != new) schedule_work(&probe_work); wake: wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { return wait_event_interruptible(xb_waitq, xb_data_to_read()); } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /* Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; int err; if (intf->req_prod != intf->req_cons) printk(KERN_ERR "XENBUS request ring is not quiescent " "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { printk(KERN_WARNING "XENBUS response ring is not quiescent " "(%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); intf->rsp_cons = intf->rsp_prod; } if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); err = bind_caller_port_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/xenbus_dev.c000066400000000000000000000253331314037446600261320ustar00rootroot00000000000000/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[PAGE_SIZE]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static struct proc_dir_entry *xenbus_dev_intf; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; if (!is_xenstored_ready()) return -ENODEV; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static void queue_reply(struct xenbus_dev_data *u, char *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); BUG_ON(rb == NULL); rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, &u->read_buffers); wake_up(&u->read_waitq); } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int path_len, tok_len, body_len, data_len = 0; path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr)); queue_reply(adap->dev_data, (char *)path, path_len); queue_reply(adap->dev_data, (char *)token, tok_len); if (len > 2) queue_reply(adap->dev_data, (char *)vec[2], data_len); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply; char *path, *token; struct watch_adapter *watch, *tmp_watch; int err, rc = len; if (!is_xenstored_ready()) return -ENODEV; if ((len + u->len) > sizeof(u->u.buffer)) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: { static const char *XS_RESP = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); watch->watch.node = kmalloc(strlen(path)+1, GFP_KERNEL); strcpy((char *)watch->watch.node, path); watch->watch.callback = watch_fired; watch->token = kmalloc(strlen(token)+1, GFP_KERNEL); strcpy(watch->token, token); watch->dev_data = u; err = register_xenbus_watch(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = strlen(XS_RESP) + 1; mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&hdr, sizeof(hdr)); queue_reply(u, (char *)XS_RESP, hdr.len); mutex_unlock(&u->reply_mutex); break; } default: if (msg_type == XS_TRANSACTION_START) { trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } } reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; BUG_ON(&trans->list == &u->transactions); list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg)); queue_reply(u, (char *)reply, u->u.msg.len); mutex_unlock(&u->reply_mutex); kfree(reply); break; } out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; if (!is_xenstored_ready()) return -ENODEV; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } #ifdef HAVE_UNLOCKED_IOCTL static long xenbus_dev_ioctl(struct file *file, unsigned int cmd, unsigned long data) { extern int xenbus_conn(domid_t remote_dom, int *grant_ref, evtchn_port_t *local_port); void __user *udata = (void __user *) data; int ret = -ENOTTY; if (!is_initial_xendomain()) return -ENODEV; switch (cmd) { case IOCTL_XENBUS_ALLOC: { xenbus_alloc_t xa; int old; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_FOREIGN_INIT); if (old != XENBUS_XSD_UNCOMMITTED) return -EBUSY; if (copy_from_user(&xa, udata, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } ret = xenbus_conn(xa.dom, &xa.grant_ref, &xa.port); if (ret != 0) { atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } if (copy_to_user(udata, &xa, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } } break; default: break; } return ret; } #endif static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .poll = xenbus_dev_poll, #ifdef HAVE_UNLOCKED_IOCTL .unlocked_ioctl = xenbus_dev_ioctl #endif }; int xenbus_dev_init(void) { xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400); if (xenbus_dev_intf) xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops; return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/xenbus_probe.c000066400000000000000000000764551314037446600264760ustar00rootroot00000000000000/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include /* * pv driver header files */ #include #include #include #include #include #include #include #ifdef MODULE #include #endif #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif int xen_store_evtchn; struct xenstore_domain_interface *xen_store_interface; static unsigned long xen_store_mfn; extern struct mutex xenwatch_mutex; static BLOCKING_NOTIFIER_HEAD(xenstore_chain); static void wait_for_devices(struct xenbus_driver *xendrv); static int xenbus_probe_frontend(const char *type, const char *name); static void xenbus_dev_shutdown(struct device *_dev); /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } /* device// => - */ static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) { printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, BUS_ID_SIZE); if (!strchr(bus_id, '/')) { printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } static int read_backend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "backend-id", "backend"); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ #if defined(CONFIG_XEN) || defined(MODULE) add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); #endif add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype); return 0; } #else static int xenbus_uevent_frontend(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) { struct xenbus_device *xdev; int length = 0, i = 0; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "XENBUS_PATH=%s", xdev->nodename); add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, "MODALIAS=xen:%s", xdev->devicetype); return 0; } #endif /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { .name = "xen", .match = xenbus_match, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_frontend, #endif }, .dev = { .bus_id = "xen", }, }; static void otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]); return; } state = xenbus_read_driver_state(dev->otherend); DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { struct xen_bus_type *bus = bus; bus = container_of(dev->dev.bus, struct xen_bus_type, bus); /* If we're frontend, drive the state machine to Closed. */ /* This should cause the backend to release our resources. */ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) xenbus_frontend_closed(dev); return; } #endif if (drv->otherend_changed) drv->otherend_changed(dev, state); } static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { return xenbus_watch_path2(dev, dev->otherend, "state", &dev->otherend_watch, otherend_changed); } int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { printk(KERN_WARNING "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { printk(KERN_WARNING "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); return -ENODEV; } int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); free_otherend_details(dev); if (drv->remove) drv->remove(dev); xenbus_switch_state(dev, XenbusStateClosed); return 0; } static void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); /* Commented out since xenstored stubdom is now minios based not linux based #define XENSTORE_DOMAIN_SHARES_THIS_KERNEL */ #ifndef XENSTORE_DOMAIN_SHARES_THIS_KERNEL if (is_initial_xendomain()) #endif return; get_device(&dev->dev); if (dev->state != XenbusStateConnected) { printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); } int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus) { int ret; if (bus->error) return bus->error; drv->driver.name = drv->name; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) drv->driver.owner = drv->owner; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; drv->driver.remove = xenbus_dev_remove; drv->driver.shutdown = xenbus_dev_shutdown; #endif mutex_lock(&xenwatch_mutex); ret = driver_register(&drv->driver); mutex_unlock(&xenwatch_mutex); return ret; } int xenbus_register_frontend(struct xenbus_driver *drv) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(xenbus_register_frontend); void xenbus_unregister_driver(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t xendev_show_nodename(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); static ssize_t xendev_show_devtype(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); static ssize_t xendev_show_modalias(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_driver_state(nodename); if (bus->error) return bus->error; if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); xendev->dev.parent = &bus->dev; xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); if (err) goto fail; /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) goto unregister; err = device_create_file(&xendev->dev, &dev_attr_devtype); if (err) goto unregister; err = device_create_file(&xendev->dev, &dev_attr_modalias); if (err) goto unregister; return 0; unregister: device_remove_file(&xendev->dev, &dev_attr_nodename); device_remove_file(&xendev->dev, &dev_attr_devtype); device_unregister(&xendev->dev); fail: kfree(xendev); return err; } /* device// */ static int xenbus_probe_frontend(const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(&xenbus_frontend, type, nodename); kfree(nodename); return err; } static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n; if (bus->error) return bus->error; dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void dev_changed(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[BUS_ID_SIZE]; const char *p, *root; if (bus->error || char_count(node, '/') < 2) return; exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend//... or device//... */ p = strchr(node, '/') + 1; snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int suspend_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend) err = drv->suspend(xdev); if (err) printk(KERN_WARNING "xenbus: suspend %s failed: %i\n", dev->bus_id, err); return 0; } static int suspend_cancel_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend_cancel) err = drv->suspend_cancel(xdev); if (err) printk(KERN_WARNING "xenbus: suspend_cancel %s failed: %i\n", dev->bus_id, err); return 0; } static int resume_dev(struct device *dev, void *data) { int err; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); err = talk_to_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus: resume (talk_to_otherend) %s failed: %i\n", dev->bus_id, err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { printk(KERN_WARNING "xenbus: resume %s failed: %i\n", dev->bus_id, err); return err; } } err = watch_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus_probe: resume (watch_otherend) %s failed: " "%d.\n", dev->bus_id, err); return err; } return 0; } void xenbus_suspend(void) { DPRINTK(""); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); xenbus_backend_suspend(suspend_dev); xs_suspend(); } EXPORT_SYMBOL_GPL(xenbus_suspend); void xen_unplug_emulated_devices(void); void xenbus_resume(void) { xb_init_comms(); xs_resume(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); xenbus_backend_resume(resume_dev); xen_unplug_emulated_devices(); } EXPORT_SYMBOL_GPL(xenbus_resume); void xenbus_suspend_cancel(void) { xs_suspend_cancel(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); xenbus_backend_resume(suspend_cancel_dev); } EXPORT_SYMBOL_GPL(xenbus_suspend_cancel); /* A flag to determine if xenstored is 'ready' (i.e. has started) */ atomic_t xenbus_xsd_state = ATOMIC_INIT(XENBUS_XSD_UNCOMMITTED); int register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; if (is_xenstored_ready()) ret = nb->notifier_call(nb, 0, NULL); else blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } EXPORT_SYMBOL_GPL(register_xenstore_notifier); void unregister_xenstore_notifier(struct notifier_block *nb) { blocking_notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) void xenbus_probe(struct work_struct *unused) #else void xenbus_probe(void *unused) #endif { BUG_ON(!is_xenstored_ready()); /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); xenbus_backend_probe_and_watch(); /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) static struct file_operations xsd_kva_fops; static struct proc_dir_entry *xsd_kva_intf; static struct proc_dir_entry *xsd_port_intf; static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; int old; int rc; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_LOCAL_INIT); switch (old) { case XENBUS_XSD_UNCOMMITTED: rc = xb_init_comms(); if (rc != 0) return rc; break; case XENBUS_XSD_FOREIGN_INIT: case XENBUS_XSD_FOREIGN_READY: return -EBUSY; case XENBUS_XSD_LOCAL_INIT: case XENBUS_XSD_LOCAL_READY: default: break; } if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static int xsd_kva_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "0x%p", xen_store_interface); *eof = 1; return len; } static int xsd_port_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "%d", xen_store_evtchn); *eof = 1; return len; } #endif static int xb_free_port(evtchn_port_t port) { struct evtchn_close close; close.port = port; return HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); } int xenbus_conn(domid_t remote_dom, unsigned long *grant_ref, evtchn_port_t *local_port) { struct evtchn_alloc_unbound alloc_unbound; int rc, rc2; BUG_ON(atomic_read(&xenbus_xsd_state) != XENBUS_XSD_FOREIGN_INIT); BUG_ON(!is_initial_xendomain()); #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) remove_xen_proc_entry("xsd_kva"); remove_xen_proc_entry("xsd_port"); #endif rc = xb_free_port(xen_store_evtchn); if (rc != 0) goto fail0; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_dom; rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (rc != 0) goto fail0; *local_port = xen_store_evtchn = alloc_unbound.port; /* keep the old page (xen_store_mfn, xen_store_interface) */ rc = gnttab_grant_foreign_access(remote_dom, xen_store_mfn, GTF_permit_access); if (rc < 0) goto fail1; *grant_ref = rc; rc = xb_init_comms(); if (rc != 0) goto fail1; return 0; fail1: rc2 = xb_free_port(xen_store_evtchn); if (rc2 != 0) printk(KERN_WARNING "XENBUS: Error freeing xenstore event channel: %d\n", rc2); fail0: xen_store_evtchn = -1; return rc; } static int xenbus_probe_init(void) { int err = 0; unsigned long page = 0; DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) printk(KERN_WARNING "XENBUS: Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { struct evtchn_alloc_unbound alloc_unbound; /* Allocate page. */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOMID_SELF; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600); if (xsd_kva_intf) { memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, sizeof(xsd_kva_fops)); xsd_kva_fops.mmap = xsd_kva_mmap; xsd_kva_intf->proc_fops = &xsd_kva_fops; xsd_kva_intf->read_proc = xsd_kva_read; } xsd_port_intf = create_xen_proc_entry("xsd_port", 0400); if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else { atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY); #ifdef CONFIG_XEN xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); #else xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN); xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN); xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); #endif /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) goto err; } xenbus_dev_init(); /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); goto err; } /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); if (xenbus_frontend.error) { bus_unregister(&xenbus_frontend.bus); printk(KERN_WARNING "XENBUS: Error registering frontend device: %i\n", xenbus_frontend.error); } } xenbus_backend_device_register(); if (!is_initial_xendomain()) xenbus_probe(NULL); return 0; err: if (page) free_page(page); /* * Do not unregister the xenbus front/backend buses here. The buses * must exist because front/backend drivers will use them when they are * registered. */ return err; } #ifdef CONFIG_XEN postcore_initcall(xenbus_probe_init); MODULE_LICENSE("Dual BSD/GPL"); #else int xenbus_init(void) { return xenbus_probe_init(); } #endif static int is_device_connecting(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_connecting_device(struct device_driver *drv) { if (xenbus_frontend.error) return xenbus_frontend.error; return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", xendev->nodename); return 0; } if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); printk(KERN_WARNING "XENBUS: Timeout connecting " "to device: %s (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } xendrv = to_xenbus_driver(dev->driver); if (xendrv->is_ready && !xendrv->is_ready(xendev)) printk(KERN_WARNING "XENBUS: Device not ready: %s\n", xendev->nodename); return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!ready_to_wait_for_devices || !is_running_on_xen()) return; while (exists_connecting_device(drv)) { if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) printk(KERN_WARNING "XENBUS: Waiting for " "devices to initialise: "); seconds_waited += 5; printk("%us...", 300 - seconds_waited); if (seconds_waited == 300) break; } schedule_timeout_interruptible(HZ/10); } if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } #ifndef MODULE static int __init boot_wait_for_devices(void) { if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; wait_for_devices(NULL); } return 0; } late_initcall(boot_wait_for_devices); #endif int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-platform-pci/xenbus_xs.c000066400000000000000000000526761314037446600260200ustar00rootroot00000000000000/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include /* * pv drivers header files */ #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. * * transaction_mutex must be held before incrementing * transaction_count. The mutex is held when a suspend is in * progress to prevent new transactions starting. * * When decrementing transaction_count to zero the wait queue * should be woken up, the suspend code waits for count to * reach zero. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct mutex transaction_mutex; atomic_t transaction_count; wait_queue_head_t transaction_wq; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { printk(KERN_WARNING "XENBUS xen store gave: unknown error %s", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } static void transaction_start(void) { mutex_lock(&xs_state.transaction_mutex); atomic_inc(&xs_state.transaction_count); mutex_unlock(&xs_state.transaction_mutex); } static void transaction_end(void) { if (atomic_dec_and_test(&xs_state.transaction_count)) wake_up(&xs_state.transaction_wq); } static void transaction_suspend(void) { mutex_lock(&xs_state.transaction_mutex); wait_event(xs_state.transaction_wq, atomic_read(&xs_state.transaction_count) == 0); } static void transaction_resume(void) { mutex_unlock(&xs_state.transaction_mutex); } void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; struct xsd_sockmsg req_msg = *msg; int err; if (req_msg.type == XS_TRANSACTION_START) transaction_start(); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((req_msg.type == XS_TRANSACTION_END) || ((req_msg.type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) transaction_end(); return ret; } /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len);; if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { if (printk_ratelimit()) printk(KERN_WARNING "XENBUS unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len) + 1; /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; ret[*num] = strings + len; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; transaction_start(); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { transaction_end(); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); transaction_end(); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; #define PRINTF_BUFFER_SIZE 4096 char *printf_buffer; printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH); if (printf_buffer == NULL) return -ENOMEM; va_start(ap, fmt); ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap); va_end(ap); BUG_ON(ret > PRINTF_BUFFER_SIZE-1); ret = xenbus_write(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); /* Ignore errors due to multiple registration. */ if ((err != 0) && (err != -EEXIST)) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; BUG_ON(watch->flags & XBWF_new_thread); sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) printk(KERN_WARNING "XENBUS Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); /* Flush any currently-executing callback, unless we are it. :-) */ if (current->pid != xenwatch_pid) { mutex_lock(&xenwatch_mutex); mutex_unlock(&xenwatch_mutex); } } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); void xs_suspend(void) { transaction_suspend(); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); transaction_resume(); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); mutex_unlock(&xs_state.transaction_mutex); } static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) printk(KERN_WARNING "XENBUS error %d while reading " "message\n", err); if (kthread_should_stop()) break; } return 0; } int xs_init(void) { struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); mutex_init(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); atomic_set(&xs_state.transaction_count, 0); init_waitqueue_head(&xs_state.transaction_wq); task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-vbd/000077500000000000000000000000001314037446600217745ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-vbd/Kbuild000066400000000000000000000001151314037446600231260ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-vbd.o xen-vbd-objs := blkfront.o vbd.o UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-vbd/Makefile000066400000000000000000000000661314037446600234360ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-vbd/blkfront.c000066400000000000000000000627511314037446600237740ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include /* * pv driver header files */ #include #include #include #include #include #include #include #include "block.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 static void connect(struct blkfront_info *); static void blkfront_closing(struct xenbus_device *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); //static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs); static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(void *arg); static void blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice, i; struct blkfront_info *info; /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } info->xbdev = dev; info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19) INIT_WORK(&info->work, blkif_restart_queue); #else INIT_WORK(&info->work, blkif_restart_queue, (void *)info); #endif for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev->dev.driver_data = info; err = talk_to_backend(dev, info); if (err) { kfree(info); dev->dev.driver_data = NULL; return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; int err; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); err = talk_to_backend(dev, info); if (info->connected == BLKIF_STATE_SUSPENDED && !err) blkif_recover(info); return err; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { const char *message = NULL; struct xenbus_transaction xbt; int err; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } err = xenbus_printf(xbt, dev->nodename, "ring-ref","%u", info->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { message = "writing protocol"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (message) xenbus_dev_fatal(dev, err, "%s", message); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; info->ring_ref = GRANT_INVALID_REF; sring = (blkif_sring_t *)__get_free_page(GFP_NOIO | __GFP_HIGH); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); memset(info->sg, 0, sizeof(info->sg)); err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); if (err < 0) { free_page((unsigned long)sring); info->ring.sring = NULL; goto fail; } info->ring_ref = err; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, IRQF_SAMPLE_RANDOM, "blkif", info); #else err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info); #endif if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev->dev.driver_data; struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateConnected: connect(info); break; case XenbusStateClosing: bd = bdget(info->dev); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); break; } } /* ** Connection ** */ /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (XENBUS_EXIST_ERR(err)) return; printk(KERN_INFO "Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); //revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%lu", &info->feature_barrier, NULL); if (err) info->feature_barrier = 0; err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&blkif_io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); add_disk(info->gd); info->is_ready = 1; if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; unsigned long flags; DPRINTK("blkfront_closing: %s removed\n", dev->nodename); if (info->rq == NULL) goto out; spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&blkif_io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); xlvbd_sysfs_delif(info); xlvbd_del(info); out: xenbus_frontend_closed(dev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); kfree(info); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= BLK_RING_SIZE); info->shadow_free = info->shadow[free].req.id; info->shadow[free].req.id = 0x0fffffee; /* debug */ return free; } static inline void ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = 0; info->shadow_free = id; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } int blkif_open(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users++; return 0; } int blkif_release(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users--; if (info->users == 0) { /* Check whether we have been instructed to close. We will have ignored this request initially, as the device was still mounted. */ struct xenbus_device * dev = info->xbdev; enum xenbus_state state = xenbus_read_driver_state(dev->otherend); if (state == XenbusStateClosing && info->is_ready) blkfront_closing(dev); } return 0; } int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct block_device *bd = inode->i_bdev; struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; struct gendisk *gd = info->gd; if (gd->flags & GENHD_FL_CD) return 0; return -EINVAL; } default: /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", command);*/ return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * blkif_queue_request * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; unsigned long buffer_mfn; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref; grant_ref_t gref_head; struct scatterlist *sg; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; if (gnttab_alloc_grant_references( BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, BLKIF_MAX_SEGMENTS_PER_REQUEST); return 1; } /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = (unsigned long)req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)req->sector; ring_req->handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (blk_barrier_rq(req)) ring_req->operation = BLKIF_OP_WRITE_BARRIER; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) if (blk_pc_request(req)) ring_req->operation = BLKIF_OP_PACKET; #endif ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) for_each_sg(info->sg, sg, ring_req->nr_segments, i) { buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; /* install a grant reference. */ ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ? GTF_readonly : 0 ); info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); ring_req->seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } #else for (i = 0; i < ring_req->nr_segments; ++i) { sg = info->sg + i; buffer_mfn = page_to_phys(sg->page) >> PAGE_SHIFT; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; /* install a grant reference. */ ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ? GTF_readonly : 0 ); info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); ring_req->seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } #endif info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(request_queue_t *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = elv_next_request(rq)) != NULL) { info = req->rq_disk->private_data; if (!blk_fs_request(req)) { end_request(req, 0); continue; } if (RING_FULL(&info->ring)) goto wait; DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%li) buffer:%p [%s]\n", req, req->cmd, (long long)req->sector, req->current_nr_sectors, req->nr_sectors, req->buffer, rq_data_dir(req) ? "write" : "read"); blkdev_dequeue_request(req); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } //static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs) static irqreturn_t blkif_int(int irq, void *dev_id) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; int uptodate; spin_lock_irqsave(&blkif_io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = (struct request *)info->shadow[id].request; blkif_completion(&info->shadow[id]); ADD_ID_TO_FREELIST(info, id); uptodate = (bret->status == BLKIF_RSP_OKAY); switch (bret->operation) { case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk("blkfront: %s: write barrier op failed\n", info->gd->disk_name); uptodate = -EOPNOTSUPP; info->feature_barrier = 0; xlvbd_barrier(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); ret = end_that_request_first(req, uptodate, req->hard_nr_sectors); BUG_ON(ret); end_that_request_last(req, uptodate); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&blkif_io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); /* Free resources associated with old device channel. */ if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s) { int i; for (i = 0; i < s->req.nr_segments; i++) gnttab_end_foreign_access(s->req.seg[i].gref, 0UL); } static void blkif_recover(struct blkfront_info *info) { int i; blkif_request_t *req; struct blk_shadow *copy; int j; /* Stage 1: Make a safe copy of the shadow state. */ copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); memcpy(copy, info->shadow, sizeof(info->shadow)); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow_free = info->ring.req_prod_pvt; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ if (copy[i].request == 0) continue; /* Grab a request slot and copy shadow state into it. */ req = RING_GET_REQUEST( &info->ring, info->ring.req_prod_pvt); *req = copy[i].req; /* We get a new request id, and must reset the shadow state. */ req->id = GET_ID_FROM_FREELIST(info); memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i])); /* Rewrite any grant references invalidated by susp/resume. */ for (j = 0; j < req->nr_segments; j++) gnttab_grant_foreign_access_ref( req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), rq_data_dir((struct request *) info->shadow[req->id].request) ? GTF_readonly : 0); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; } kfree(copy); (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&blkif_io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Send off requeued requests */ flush_requests(info); /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; return info->is_ready; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static struct xenbus_driver blkfront = { .name = "vbd", .owner = THIS_MODULE, .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, }; static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront); } module_init(xlblk_init); static void __exit xlblk_exit(void) { return xenbus_unregister_driver(&blkfront); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-vbd/block.h000066400000000000000000000114351314037446600232430ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #if 0 #define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a) #else #define DPRINTK_IOCTL(_f, _a...) ((void)0) #endif struct xlbd_type_info { int partn_shift; int disks_per_major; char *devname; char *diskname; }; struct xlbd_major_info { int major; int index; int usage; struct xlbd_type_info *type; }; struct blk_shadow { blkif_request_t req; unsigned long request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; dev_t dev; struct gendisk *gd; int vdevice; blkif_vdev_t handle; int connected; int ring_ref; blkif_front_ring_t ring; struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int irq; struct xlbd_major_info *mi; request_queue_t *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; unsigned long shadow_free; int feature_barrier; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; extern spinlock_t blkif_io_lock; extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (request_queue_t *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); int xlvbd_barrier(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-vbd/vbd.c000066400000000000000000000261411314037446600227170ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include /* * pv drivers header files */ #include "block.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; DEFINE_SPINLOCK(blkif_io_lock); static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; int do_register; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SCSI_RANGE: ptr->type = &xlbd_scsi_type; ptr->index = index - XLBD_MAJOR_SCSI_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major 202, * don't try to register it again */ if (major_info[XLBD_MAJOR_VBD_START] != NULL) do_register = 0; break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(ptr); return NULL; } printk("xen-vbd: registered block device major %i\n", ptr->major); } major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = 10; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = 11 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = 18 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = 26; break; default: if (!VDEV_IS_EXTENDED(vdevice)) index = 27; else index = 28; break; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) { request_queue_t *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) return -1; /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_hardsect_size(rq, sector_size); blk_queue_max_sectors(rq, 512); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; return 0; } static int xlvbd_alloc_gendisk(int major, int minor, blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; unsigned int offset; BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0) nr_minors = 1 << mi->type->partn_shift; gd = alloc_disk(nr_minors); if (gd == NULL) goto out; offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (nr_minors > 1) { if (offset < 26) { sprintf(gd->disk_name, "%s%c", mi->type->diskname, 'a' + offset ); } else { sprintf(gd->disk_name, "%s%c%c", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26) ); } } else { if (offset < 26) { sprintf(gd->disk_name, "%s%c%d", mi->type->diskname, 'a' + offset, minor & ((1 << mi->type->partn_shift) - 1)); } else { sprintf(gd->disk_name, "%s%c%c%d", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26), minor & ((1 << mi->type->partn_shift) - 1)); } } gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size)) { del_gendisk(gd); goto out; } info->rq = gd->queue; info->gd = gd; if (info->feature_barrier) xlvbd_barrier(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { struct block_device *bd; int err = 0; int major, minor; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = 202; minor = BLKIF_MINOR_EXT(vdevice); } info->dev = MKDEV(major, minor); bd = bdget(info->dev); if (bd == NULL) return -ENODEV; err = xlvbd_alloc_gendisk(major, minor, capacity, vdevice, vdisk_info, sector_size, info); bdput(bd); return err; } void xlvbd_del(struct blkfront_info *info) { if (info->mi == NULL) return; BUG_ON(info->gd == NULL); del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); blk_cleanup_queue(info->rq); info->rq = NULL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int xlvbd_barrier(struct blkfront_info *info) { int err; err = blk_queue_ordered(info->rq, info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL); if (err) return err; printk(KERN_INFO "blkfront: %s: barriers %s\n", info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled"); return 0; } #else int xlvbd_barrier(struct blkfront_info *info) { printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name); return -ENOSYS; } #endif #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = xendev->dev.driver_data; if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-vnif/000077500000000000000000000000001314037446600221635ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-vnif/Kbuild000066400000000000000000000001171314037446600233170ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-vnif.o xen-vnif-objs := netfront.o accel.o UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-vnif/Makefile000066400000000000000000000000661314037446600236250ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-vnif/accel.c000066400000000000000000000542521314037446600234060ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include /* * pv drivers header files */ #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront/accel: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront/accel: " fmt, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); static void netfront_accelerator_remove_watch(struct netfront_info *np); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Workqueue to process acceleration configuration changes */ struct workqueue_struct *accel_watch_workqueue; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); accel_watch_workqueue = create_workqueue("net_accel"); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; flush_workqueue(accel_watch_workqueue); destroy_workqueue(accel_watch_workqueue); /* No lock required as everything else should be quiet by now */ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); queue_work(accel_watch_workqueue, &vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* * If old watch exists, e.g. from before suspend/resume, * remove it now */ netfront_accelerator_remove_watch(np); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_purge_watch(struct netfront_accel_vif_state *vif_state) { flush_workqueue(accel_watch_workqueue); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; netfront_accelerator_purge_watch(vif_state); } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. Must be called with the accelerator * mutex held. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } } /* * Initialise the state to track an accelerator plugin module. * * Must be called with the accelerator mutex held. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; list_add(&accelerator->link, &accelerators_list); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { struct netfront_accelerator *accelerator; unsigned long flags; DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) napi_disable(&vif_state->np->napi); #else netif_poll_disable(vif_state->np->netdev); #endif netif_tx_lock_bh(vif_state->np->netdev); accelerator = vif_state->np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); vif_state->hooks = accelerator->hooks; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) napi_enable(&vif_state->np->napi); #else netif_poll_enable(vif_state->np->netdev); #endif } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks && hooks->new_device(np->netdev, dev) == 0) accelerator_set_vif_state_hooks(&np->accel_vif_state); return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* Holds accelerator_mutex to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if (hooks->new_device(np->netdev, vif_state->dev) == 0) accelerator_set_vif_state_hooks(vif_state); } } /* * Called by the netfront accelerator plugin module when it has * loaded. * * Takes the accelerator_mutex and vif_states_lock spinlock. */ int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded); /* * Remove the hooks from a single vif state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { unsigned long flags; /* Make sure there are no data path operations going on */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) napi_disable(&vif_state->np->napi); #else netif_poll_disable(vif_state->np->netdev); #endif netif_tx_lock_bh(vif_state->np->netdev); spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) napi_enable(&vif_state->np->napi); #else netif_poll_enable(vif_state->np->netdev); #endif } /* * Safely remove the accelerator function hooks from a netfront state. * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { if(vif_state->hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ vif_state->hooks->get_stats(vif_state->np->netdev, &vif_state->np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. * * Takes the accelerator mutex, and vif_states_lock spinlock. */ void netfront_accelerator_stop(const char *frontend) { struct netfront_accelerator *accelerator; mutex_lock(&accelerator_mutex); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_remove_hooks(accelerator); goto out; } } out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop); /* * Helper for call_remove and do_suspend * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator = np->accelerator; unsigned long flags; int rc = 0; if (np->accel_vif_state.hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ np->accel_vif_state.hooks->get_stats(np->netdev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); } return rc; } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock */ static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev); np->accelerator = NULL; return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { int rc = 0; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ rc = do_remove(np, dev); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { netfront_accelerator_purge_watch(&np->accel_vif_state); /* * Gratuitously fire the watch handler to reinstate the * configured accelerator */ if (dev->state == XenbusStateConnected) queue_work(accel_watch_workqueue, &np->accel_vif_state.accel_work); return 0; } /* * No lock pre-requisites. Takes the accelerator mutex */ void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; break; } } out: mutex_unlock(&accelerator_mutex); return; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-vnif/netfront.c000066400000000000000000001742371314037446600242040ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * pv drivers header files */ #include #include #include #include #include #include #include struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif #define RX_COPY_THRESHOLD 256 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define HAVE_CSUM_OFFLOAD 1 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define HAVE_CSUM_OFFLOAD 0 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || unlikely(skb->ip_summed != CHECKSUM_HW)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define HAVE_CSUM_OFFLOAD 0 #define netif_needs_gso(dev, skb) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif #define GRANT_INVALID_REF 0 struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront: " fmt, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static void send_fake_arp(struct net_device *, int); //static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs); static irqreturn_t netif_int(int irq, void *dev_id); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of nic8139 */ #include #include static void remove_nic8139() { struct pci_dev *pci_dev_nic8139 = NULL; //do { pci_dev_nic8139 = pci_get_device(0x10ec, 0x8139, pci_dev_nic8139); if (pci_dev_nic8139) { printk(KERN_INFO "remove_nic8139 : 8139 NIC of subsystem(0x%2x,0x%2x) removed.\n", pci_dev_nic8139->subsystem_vendor, pci_dev_nic8139->subsystem_device); pci_disable_device(pci_dev_nic8139); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) pci_remove_bus_device(pci_dev_nic8139); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pci_dev_nic8139); pci_remove_bus_device(pci_dev_nic8139); #else pci_stop_and_remove_bus_device(pci_dev_nic8139); #endif /* check kernel version */ } //} while (pci_dev_nic8139); # comment off because fedora12 will crash return; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; // remove 8139 nic when xen nic devices appear remove_nic8139(); netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev->dev.driver_data = info; err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { printk(KERN_WARNING "%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); printk(KERN_WARNING "%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev->dev.driver_data = NULL; return err; } static int __devexit netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(info->mac)) { err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", !HAVE_CSUM_OFFLOAD); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", HAVE_TSO); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, IRQF_SAMPLE_RANDOM, netdev->name, netdev); #else err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, SA_SAMPLE_RANDOM, netdev->name, netdev); #endif if (err < 0) goto fail; info->irq = err; return 0; fail: return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev->dev.driver_data; struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); break; case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @param arpType ARPOP_REQUEST or ARPOP_REPLY * @return 0 on success, error code otherwise */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; skb = arp_create(arpType, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); memset(&np->stats, 0, sizeof(np->stats)); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) napi_enable(&np->napi); #endif spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) napi_schedule(&np->napi); #else netif_rx_schedule(dev); #endif } } spin_unlock_bh(&np->rx_lock); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) netif_start_queue(dev); #else network_maybe_wake_tx(dev); #endif return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { printk(KERN_ALERT "network_tx_buf_gc: warning " "-- grant still in use by backend " "domain.\n"); BUG(); } gnttab_end_foreign_access_ref(np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) napi_schedule(&np->napi); #else netif_rx_schedule(dev); #endif } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; struct xen_memory_reservation reservation; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if ( nr_flips != 0 ) { /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); reservation.nr_extents = nr_flips; reservation.extent_order = 0; reservation.address_bits = 0; reservation.domid = DOMID_SELF; if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ np->rx_mcl[i].op = __HYPERVISOR_memory_op; np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; np->rx_mcl[i].args[1] = (unsigned long)&reservation; /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (nr_flips--) BUG_ON(np->rx_mcl[nr_flips].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return 0; } frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irq(&np->tx_lock); if (unlikely(!netfront_carrier_ok(np) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb))) { spin_unlock_irq(&np->tx_lock); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_HW) /* local packet? */ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; #ifdef CONFIG_XEN if (skb->proto_data_valid) /* remote but checksummed? */ tx->flags |= NETTXF_data_validated; #endif #if HAVE_TSO if (skb_shinfo(skb)->gso_size) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->irq); np->stats.tx_bytes += skb->len; np->stats.tx_packets++; dev->trans_start = jiffies; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irq(&np->tx_lock); return 0; drop: np->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } //static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs) static irqreturn_t netif_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) napi_schedule(&np->napi); #else netif_rx_schedule(dev); #endif dev->last_rx = jiffies; } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) WPRINTK("Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) WPRINTK("Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) WPRINTK("rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) WPRINTK("Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { if (net_ratelimit()) WPRINTK("Unfulfilled rx req " "(id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); BUG_ON(!ret); } gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) WPRINTK("Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) WPRINTK("GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) WPRINTK("Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) WPRINTK("GSO unsupported by this kernel.\n"); return -EINVAL; #endif } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) static int netif_poll(struct napi_struct *napi, int budget) #else static int netif_poll(struct net_device *dev, int *pbudget) #endif { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct net_device *dev = np->netdev; #else struct netfront_info *np = netdev_priv(dev); #endif struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; struct multicall_entry *mcl; #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) int budget; #endif int work_done, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) if ((budget = *pbudget) > dev->quota) budget = dev->quota; #endif rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); np->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize must approximates the size of true data plus * any supervisor overheads. Adding hypervisor overheads * has been shown to significantly reduce achievable * bandwidth with the default receive buffer size. It is * therefore not wise to account for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to * RX_COPY_THRESHOLD + the supervisor overheads. Here, we * add the size of the data pulled in xennet_fill_frags(). * * We also adjust for any unused space in the main data * area by subtracting (RX_COPY_THRESHOLD - len). This is * especially important with drivers which split incoming * packets into header and data, using only 66 bytes of * the main data area (see the e1000 driver for example.) * On such systems, without this last adjustement, our * achievable receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; /* * Old backends do not assert data_validated but we * can infer it from csum_blank so test both flags. */ if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; #ifdef CONFIG_XEN skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE); skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank); #endif np->stats.rx_packets++; np->stats.rx_bytes += skb->len; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { mcl = np->rx_mcl + pages_flipped; mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = pages_flipped; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) __skb_queue_purge(&errq); #endif while ((skb = __skb_dequeue(&errq))) kfree_skb(skb); while ((skb = __skb_dequeue(&rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); /* Pass it up. */ netif_receive_skb(skb); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) dev->last_rx = jiffies; #endif } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) *pbudget -= work_done; dev->quota -= work_done; #endif if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) __napi_complete(napi); #else __netif_rx_complete(dev); #endif local_irq_restore(flags); } spin_unlock(&np->rx_lock); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) return work_done; #else return more_to_do | accel_more_to_do; #endif } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); if (0 == mfn) { struct page *page = skb_shinfo(skb)->frags[0].page; balloon_release_driver_page(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = mmu - np->rx_mmu; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; mcl++; rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl - np->rx_mcl, NULL); BUG_ON(rc); } } while ((skb = __skb_dequeue(&free_list)) != NULL) dev_kfree_skb(skb); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_ref(ref)) { busy++; continue; } gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) napi_disable(&np->napi); #endif netif_stop_queue(np->netdev); return 0; } static struct net_device_stats *network_get_stats(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_get_stats(np, dev); return &np->stats; } static int xennet_set_mac_address(struct net_device *dev, void *p) { struct netfront_info *np = netdev_priv(dev); struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(np->mac, addr->sa_data, ETH_ALEN); return 0; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static int xennet_set_sg(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } else if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; return ethtool_op_set_sg(dev, data); } static int xennet_set_tso(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } return ethtool_op_set_tso(dev, data); } static void xennet_set_features(struct net_device *dev) { dev_disable_gso_features(dev); xennet_set_sg(dev, 0); /* We need checksum offload to enable scatter/gather and TSO. */ if (!(dev->features & NETIF_F_IP_CSUM)) return; if (xennet_set_sg(dev, 1)) return; /* Before 2.6.9 TSO seems to be unreliable so do not enable it * on older kernels. */ if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)) xennet_set_tso(dev, 1); } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; xennet_set_features(dev); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref( ref, np->xbdev->otherend_id, page_to_pfn(skb_shinfo(skb)->frags->page)); } else { gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static struct ethtool_ops network_ethtool_ops = { .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = xennet_set_sg, #if HAVE_TSO .get_tso = ethtool_op_get_tso, .set_tso = xennet_set_tso, #endif .get_link = ethtool_op_get_link, }; #ifdef CONFIG_SYSFS #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) #else static ssize_t show_rxbuf_min(struct class_device *cd, char *buf) #endif { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) struct netfront_info *info = netdev_priv(to_net_dev(dev)); #else struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); #endif return sprintf(buf, "%u\n", info->rx_min_target); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) #else static ssize_t store_rxbuf_min(struct class_device *cd, const char *buf, size_t len) #endif { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) struct net_device *netdev = to_net_dev(dev); #else struct net_device *netdev = container_of(cd, struct net_device, class_dev); #endif struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) #else static ssize_t show_rxbuf_max(struct class_device *cd, char *buf) #endif { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) struct netfront_info *info = netdev_priv(to_net_dev(dev)); #else struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); #endif return sprintf(buf, "%u\n", info->rx_max_target); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) #else static ssize_t store_rxbuf_max(struct class_device *cd, const char *buf, size_t len) #endif { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) struct net_device *netdev = to_net_dev(dev); #else struct net_device *netdev = container_of(cd, struct net_device, class_dev); #endif struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) #else static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf) #endif { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) struct netfront_info *info = netdev_priv(to_net_dev(dev)); #else struct net_device *netdev = container_of(cd, struct net_device, class_dev); struct netfront_info *info = netdev_priv(netdev); #endif return sprintf(buf, "%u\n", info->rx_target); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) static struct device_attribute xennet_attrs[] = { #else static const struct class_device_attribute xennet_attrs[] = { #endif __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) error = device_create_file(&netdev->dev, &xennet_attrs[i]); #else error = class_device_create_file(&netdev->class_dev, &xennet_attrs[i]); #endif if (error) goto fail; } return 0; fail: while (--i >= 0) { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) device_remove_file(&netdev->dev, &xennet_attrs[i]); #else class_device_remove_file(&netdev->class_dev, &xennet_attrs[i]); #endif } return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) device_remove_file(&netdev->dev, &xennet_attrs[i]); #else class_device_remove_file(&netdev->class_dev, &xennet_attrs[i]); #endif } } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) static const struct net_device_ops xennet_netdev_ops = { .ndo_uninit = netif_uninit, .ndo_open = network_open, .ndo_stop = network_close, .ndo_start_xmit = network_start_xmit, .ndo_set_multicast_list = network_set_multicast_list, .ndo_set_mac_address = xennet_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats = network_get_stats, }; #endif static struct net_device * __devinit create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, netif_poll, 64); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) && \ LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) netdev->open = network_open; netdev->hard_start_xmit = network_start_xmit; netdev->stop = network_close; netdev->get_stats = network_get_stats; netdev->set_multicast_list = network_set_multicast_list; netdev->uninit = netif_uninit; netdev->set_mac_address = xennet_set_mac_address; netdev->change_mtu = xennet_change_mtu; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) netif_napi_add(netdev, &np->napi, netif_poll, 64); #endif #endif #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) netdev->poll = netif_poll; netdev->weight = 64; #endif netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) SET_MODULE_OWNER(netdev); #endif SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } #ifdef CONFIG_INET /* * We use this notifier to send out a fake ARP reply to reset switches and * router ARP caches when an IP interface is brought up on a VIF. */ static int inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) { int i = 0; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ if (event == NETDEV_UP && dev->open == network_open) { //send ARP_REQUEST packets first in case many switchboard refuse to //transmit ARPOP_REPLY packets without ARPOP_REQUEST received before. //then send ARPOP_REPLY packets for (i=0; i<3; i++) { send_fake_arp(dev, ARPOP_REQUEST); } for (i=0; i<3; i++) { send_fake_arp(dev, ARPOP_REPLY); } } return NOTIFY_DONE; } static struct notifier_block notifier_inetdev = { .notifier_call = inetdev_notify, .next = NULL, .priority = 0 }; #endif static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler(info->irq, info->netdev); info->irq = 0; end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, (unsigned long)page); } /* ** Driver registration ** */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static struct xenbus_driver netfront_driver = { .name = "vif", .owner = THIS_MODULE, .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(netfront_remove), .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, }; static int __init netif_init(void) { int err; if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN if (MODPARM_rx_flip && MODPARM_rx_copy) { WPRINTK("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_flip = 1; /* Default is to flip. */ #endif netif_init_accel(); IPRINTK("Initialising virtual ethernet driver.\n"); #ifdef CONFIG_INET (void)register_inetaddr_notifier(¬ifier_inetdev); #endif err = xenbus_register_frontend(&netfront_driver); if (err) { #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif } return err; } module_init(netif_init); static void __exit netif_exit(void) { #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif xenbus_unregister_driver(&netfront_driver); netif_exit_accel(); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/others/Ubuntu8/xen-vnif/netfront.h000066400000000000000000000211371314037446600241770ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include /* * pv drivers header files */ #include #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct net_device_stats stats; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) struct napi_struct napi; #endif unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded() */ extern void netfront_accelerator_stop(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/000077500000000000000000000000001314037446600240115ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/Makefile000066400000000000000000000045761314037446600254650ustar00rootroot00000000000000############################################################################### ###### File name : Makefile ###### ###### Author : ###### ###### Description : To optimize the PV_ON_HVM drivers's Makefile ###### ###### History : ###### ###### 2012-06-4 : Create the file ###### ############################################################################### M=$(shell pwd) -include $(M)/config.mk COMPILEARGS = $(CROSSCOMPILE) reboot=$(OSVERSION)/reboot.c obj-m += xen-platform-pci/ obj-m += xen-balloon/ obj-m += xen-vbd/ obj-m += xen-vnif/ ifeq (2.6.32, $(OSVERSION)) ifeq (x86_64, $(KERNELARCH)) reboot=reboot_64.c endif endif ifeq (3.0.58, $(OSVERSION)) OSVERSION=3.0.13 endif ifeq (3.0.80, $(OSVERSION)) OSVERSION=3.0.13 endif ###vni_front for VSA ifeq ("2.6.32.59-0.7-default", "$(BUILDKERNEL)") obj-m += vni_front/ endif ifeq ("3.0.13-0.27-default", "$(BUILDKERNEL)") obj-m += vni_front/ endif ifeq ("3.0.13-0.27-pae", "$(BUILDKERNEL)") obj-m += vni_front/ endif ifeq (3.0.76, $(OSVERSION)) obj-m += vni_front/ endif all:lnfile rename_headers make -C $(KERNDIR) M=$(M) modules $(COMPILEARGS) modules_install:lnfile rename_headers make -C $(KERNDIR) M=$(M) modules_install $(COMPILEARGS) mkinitrd lnfile: @set -e; \ for dir in $(obj-m); \ do \ cd $$dir; \ for file in `ls $(OSVERSION)`; \ do \ ln -sf $(OSVERSION)/$$file $$file; \ done; \ cd -; \ done; \ cd xen-platform-pci/; \ ln -sf $(reboot) reboot.c; \ cd .. rename_headers: @if [ -d "/lib/modules/$(shell uname -r)/source/include/linux/" ]; then \ cd /lib/modules/$(shell uname -r)/source/include/linux/; \ elif [ -d "/lib/modules/$(shell uname -r)/build/include/linux/" ]; then \ cd /lib/modules/$(shell uname -r)/build/include/linux/; \ else \ exit 0; \ fi; \ rename virtio VIRTIO virtio* 1>/dev/null 2>&1; \ cd - clean: make -C $(KERNDIR) M=$(M) clean $(COMPILEARGS) @find ./ -name modules.order | xargs rm -f @rm -rf Module.symvers @cd $$M @find ./ -type l | xargs rm -f @if [ -d "/lib/modules/$(shell uname -r)/source/include/linux/" ]; then \ cd /lib/modules/$(shell uname -r)/source/include/linux/; \ elif [ -d "/lib/modules/$(shell uname -r)/build/include/linux/" ]; then \ cd /lib/modules/$(shell uname -r)/build/include/linux/; \ else \ exit 0; \ fi; \ rename VIRTIO virtio VIRTIO* 1>/dev/null 2>&1; \ cd - UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/compat-include/000077500000000000000000000000001314037446600267155ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/compat-include/asm-generic/000077500000000000000000000000001314037446600311075ustar00rootroot00000000000000pgtable-nopmd.h000066400000000000000000000005611314037446600337340ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/compat-include/asm-generic#ifndef _PGTABLE_NOPMD_H #define _PGTABLE_NOPMD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopmd.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPMD_H */ pgtable-nopud.h000066400000000000000000000006211314037446600337410ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/compat-include/asm-generic#ifndef _PGTABLE_NOPUD_H #define _PGTABLE_NOPUD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopud.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define pud_bad(pud) 0 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPUD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/compat-include/linux/000077500000000000000000000000001314037446600300545ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/compat-include/linux/io.h000066400000000000000000000002771314037446600306420ustar00rootroot00000000000000#ifndef _LINUX_IO_H #define _LINUX_IO_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) #error "This version of Linux should not need compat linux/io.h" #endif #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/compat-include/linux/mutex.h000066400000000000000000000015411314037446600313700ustar00rootroot00000000000000/* * Copyright (c) 2006 Cisco Systems. All rights reserved. * * This file is released under the GPLv2. */ /* mutex compatibility for pre-2.6.16 kernels */ #ifndef __LINUX_MUTEX_H #define __LINUX_MUTEX_H #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) #error "This version of Linux should not need compat mutex.h" #endif #include #include #define mutex semaphore #define DEFINE_MUTEX(foo) DECLARE_MUTEX(foo) #define mutex_init(foo) init_MUTEX(foo) #define mutex_lock(foo) down(foo) #define mutex_lock_interruptible(foo) down_interruptible(foo) /* this function follows the spin_trylock() convention, so * * it is negated to the down_trylock() return values! Be careful */ #define mutex_trylock(foo) !down_trylock(foo) #define mutex_unlock(foo) up(foo) #endif /* __LINUX_MUTEX_H */ scatterlist.h000066400000000000000000000003761314037446600325150ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/compat-include/linux#ifndef _LINUX_SCATTERLIST_H #define _LINUX_SCATTERLIST_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) #error "This version of Linux should not need compat linux/scatterlist.h" #endif #include #endif /* _LINUX_SCATTERLIST_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/compat-include/xen/000077500000000000000000000000001314037446600275075ustar00rootroot00000000000000platform-compat.h000066400000000000000000000135461314037446600327170ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/compat-include/xen#ifndef COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #define COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #include #include #include #if defined(__LINUX_COMPILER_H) && !defined(__always_inline) #define __always_inline inline #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_SPINLOCK) #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED #endif #if defined(_LINUX_INIT_H) && !defined(__init) #define __init #endif #if defined(__LINUX_CACHE_H) && !defined(__read_mostly) #define __read_mostly #endif #if defined(_LINUX_SKBUFF_H) && !defined(NET_IP_ALIGN) #define NET_IP_ALIGN 0 #endif #if defined(_LINUX_SKBUFF_H) && !defined(CHECKSUM_HW) #define CHECKSUM_HW CHECKSUM_PARTIAL #endif #if defined(_LINUX_ERR_H) && !defined(IS_ERR_VALUE) #define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) #endif #if defined(_ASM_IA64_PGTABLE_H) && !defined(_PGTABLE_NOPUD_H) #include #endif /* Some kernels have this typedef backported so we cannot reliably * detect based on version number, hence we forcibly #define it. */ #if defined(__LINUX_TYPES_H) || defined(__LINUX_GFP_H) || defined(_LINUX_KERNEL_H) #define gfp_t unsigned #endif #if defined(_LINUX_NOTIFIER_H) && !defined(ATOMIC_NOTIFIER_HEAD) #define ATOMIC_NOTIFIER_HEAD(name) struct notifier_block *name #define atomic_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define atomic_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define atomic_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_NOTIFIER_H) && !defined(BLOCKING_NOTIFIER_HEAD) #define BLOCKING_NOTIFIER_HEAD(name) struct notifier_block *name #define blocking_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define blocking_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define blocking_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_MM_H) && defined set_page_count #define init_page_count(page) set_page_count(page, 1) #endif #if defined(__LINUX_GFP_H) && !defined __GFP_NOMEMALLOC #define __GFP_NOMEMALLOC 0 #endif #if defined(_LINUX_FS_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) #define nonseekable_open(inode, filp) /* Nothing to do */ #endif #if defined(_LINUX_MM_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) unsigned long vmalloc_to_pfn(void *addr); #endif #if defined(__LINUX_COMPLETION_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); #endif #if defined(_LINUX_SCHED_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout); #endif #if defined(_LINUX_SLAB_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) void *kzalloc(size_t size, int flags); #endif #if defined(_LINUX_BLKDEV_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define end_that_request_last(req, uptodate) end_that_request_last(req) #endif #if defined(_LINUX_CAPABILITY_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define capable(cap) (1) #endif #if defined(_LINUX_KERNEL_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) extern char *kasprintf(gfp_t gfp, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); #endif #if defined(_LINUX_SYSRQ_H) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) #define handle_sysrq(x,y,z) handle_sysrq(x,y) #endif #if defined(_PAGE_PRESENT) && !defined(_PAGE_NX) #define _PAGE_NX 0 /* * This variable at present is referenced by netfront, but only in code that * is dead when running in hvm guests. To detect potential active uses of it * in the future, don't try to supply a 'valid' value here, so that any * mappings created with it will fault when accessed. */ #define __supported_pte_mask ((maddr_t)0) #endif /* This code duplication is not ideal, but || does not seem to properly * short circuit in a #if condition. **/ #if defined(_LINUX_NETDEVICE_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) #if !defined(SLE_VERSION) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #elif SLE_VERSION_CODE < SLE_VERSION(10,1,0) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #endif #endif #if defined(__LINUX_SEQLOCK_H) && !defined(DEFINE_SEQLOCK) #define DEFINE_SEQLOCK(x) seqlock_t x = SEQLOCK_UNLOCKED #endif /* Bug in RHEL4-U3: rw_lock_t is mistakenly defined in DEFINE_RWLOCK() macro */ #if defined(__LINUX_SPINLOCK_H) && defined(DEFINE_RWLOCK) #define rw_lock_t rwlock_t #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_RWLOCK) #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED #endif #if defined(_LINUX_INTERRUPT_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /** * RHEL4-U5 pulled back this feature into the older kernel * Since it is a typedef, and not a macro - detect this kernel via * RHEL_VERSION */ #if !defined(RHEL_VERSION) || (RHEL_VERSION == 4 && RHEL_UPDATE < 5) #if !defined(RHEL_MAJOR) || (RHEL_MAJOR == 4 && RHEL_MINOR < 5) typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *); #endif #endif #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) #define setup_xen_features xen_setup_features #endif #ifndef atomic_cmpxchg #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) #endif #ifdef sync_test_bit #define synch_change_bit sync_change_bit #define synch_clear_bit sync_clear_bit #define synch_set_bit sync_set_bit #define synch_test_and_change_bit sync_test_and_change_bit #define synch_test_and_clear_bit sync_test_and_clear_bit #define synch_test_and_set_bit sync_test_and_set_bit #define synch_test_bit sync_test_bit #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/config.mk000066400000000000000000000017521314037446600256140ustar00rootroot00000000000000# Hack: we need to use the config which was used to build the kernel, # except that that won't have the right headers etc., so duplicate # some of the mach-xen infrastructure in here. # # (i.e. we need the native config for things like -mregparm, but # a Xen kernel to find the right headers) _XEN_CPPFLAGS += -D__XEN_INTERFACE_VERSION__=0x00030205 _XEN_CPPFLAGS += -DCONFIG_XEN_COMPAT=0xffffff _XEN_CPPFLAGS += -I$(M)/include -I$(M)/compat-include -DHAVE_XEN_PLATFORM_COMPAT_H KERNELARCH= $(shell uname -p) OSVERSION = $(shell echo $(BUILDKERNEL) | awk -F"." '{print $$1 "." $$2 "." $$3}' | awk -F"-" '{print $$1}') VRMDIST = $(shell echo $(BUILDKERNEL) | grep "2.6.32.59-0.3-default\|2.6.32.59-0.7-default") ifneq ($(VRMDIST),) _XEN_CPPFLAGS += -DVRM endif ifeq ($(ARCH),ia64) _XEN_CPPFLAGS += -DCONFIG_VMX_GUEST endif ifeq (3.0.76, $(OSVERSION)) _XEN_CPPFLAGS += -DSUSE_1103 endif EXTRA_CFLAGS += $(_XEN_CPPFLAGS) EXTRA_AFLAGS += $(_XEN_CPPFLAGS) CPPFLAGS := -I$(M)/include $(CPPFLAGS) UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/000077500000000000000000000000001314037446600254345ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/asm-i386/000077500000000000000000000000001314037446600267035ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/asm-i386/README000066400000000000000000000000001314037446600275510ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/asm/000077500000000000000000000000001314037446600262145ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/asm/gnttab_dma.h000066400000000000000000000026471314037446600304760ustar00rootroot00000000000000/* * Copyright (c) 2007 Herbert Xu * Copyright (c) 2007 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _ASM_I386_GNTTAB_DMA_H #define _ASM_I386_GNTTAB_DMA_H static inline int gnttab_dma_local_pfn(struct page *page) { /* Has it become a local MFN? */ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page)))); } static inline maddr_t gnttab_dma_map_page(struct page *page) { __gnttab_dma_map_page(page); return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT); } static inline void gnttab_dma_unmap_page(maddr_t maddr) { __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr))); } #endif /* _ASM_I386_GNTTAB_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/asm/hypercall.h000066400000000000000000000251021314037446600303500ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * 64-bit updates: * Benjamin Liu * Jun Nakajima * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif /* if hypercall memory_op function return failed, need retry */ #define HYPERCALL_X86_RETRY 128 #define HYPERCALL_X86_ERROR 11 #if CONFIG_XEN_COMPAT <= 0x030002 # include /* memcpy() */ #endif #ifdef CONFIG_XEN #define HYPERCALL_ASM_OPERAND "%c" #define HYPERCALL_LOCATION(op) (hypercall_page + (op) * 32) #define HYPERCALL_C_OPERAND(name) "i" (HYPERCALL_LOCATION(__HYPERVISOR_##name)) #else #define HYPERCALL_ASM_OPERAND "*%" #define HYPERCALL_LOCATION(op) (hypercall_stubs + (op) * 32) #define HYPERCALL_C_OPERAND(name) "g" (HYPERCALL_LOCATION(__HYPERVISOR_##name)) #endif #define HYPERCALL_ARG(arg, n) \ register typeof((arg)+0) __arg##n asm(HYPERCALL_arg##n) = (arg) #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "1" \ : "=a" (__res) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, arg) \ ({ \ type __res; \ HYPERCALL_ARG(arg, 1); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "2" \ : "=a" (__res), "+r" (__arg1) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "3" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "4" \ : "=a" (__res), "+r" (__arg1), \ "+r" (__arg2), "+r" (__arg3) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ HYPERCALL_ARG(a4, 4); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "5" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ "+r" (__arg3), "+r" (__arg4) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ HYPERCALL_ARG(a4, 4); \ HYPERCALL_ARG(a5, 5); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "6" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall(type, op, a1, a2, a3, a4, a5) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ HYPERCALL_ARG(a4, 4); \ HYPERCALL_ARG(a5, 5); \ asm volatile ( \ "call *%6" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ : "g" (HYPERCALL_LOCATION(op)) \ : "memory" ); \ __res; \ }) #ifdef CONFIG_X86_32 # include "hypercall_32.h" #else # include "hypercall_64.h" #endif static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { if (arch_use_lazy_mmu_mode()) return xen_multi_mmu_update(req, count, success_count, domid); return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { if (arch_use_lazy_mmu_mode()) return xen_multi_mmuext_op(op, count, success_count, domid); return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } #endif static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { int rc = 0; unsigned int count = 0; if (arch_use_lazy_mmu_mode()) xen_multicall_flush(false); /* when hypercall memory_op failed,retry */ do{ rc = _hypercall2(int, memory_op, cmd, arg); if (unlikely(rc == -HYPERCALL_X86_ERROR)){ printk(KERN_WARNING "HYPERVISOR_memory_op: Calling HYPERVISOR_memory need retry \n"); } count++; }while((rc == -HYPERCALL_X86_ERROR) && ( count < HYPERCALL_X86_RETRY) ); return rc; } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { bool fixup = false; int rc; if (arch_use_lazy_mmu_mode()) xen_multicall_flush(false); #ifdef GNTTABOP_map_grant_ref if (cmd == GNTTABOP_map_grant_ref) #endif fixup = gnttab_pre_map_adjust(cmd, uop, count); rc = _hypercall3(int, grant_table_op, cmd, uop, count); if (rc == 0 && fixup) rc = gnttab_post_map_adjust(uop, count); return rc; } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/asm/hypercall_32.h000066400000000000000000000031401314037446600306520ustar00rootroot00000000000000#define HYPERCALL_arg1 "ebx" #define HYPERCALL_arg2 "ecx" #define HYPERCALL_arg3 "edx" #define HYPERCALL_arg4 "esi" #define HYPERCALL_arg5 "edi" #if CONFIG_XEN_COMPAT <= 0x030002 static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_selector, unsigned long event_address, unsigned long failsafe_selector, unsigned long failsafe_address) { return _hypercall4(int, set_callbacks, event_selector, event_address, failsafe_selector, failsafe_address); } #endif static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall2(long, set_timer_op, (unsigned long)timeout, (unsigned long)(timeout>>32)); } static inline int __must_check HYPERVISOR_update_descriptor( u64 ma, u64 desc) { return _hypercall4(int, update_descriptor, (unsigned long)ma, (unsigned long)(ma>>32), (unsigned long)desc, (unsigned long)(desc>>32)); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { unsigned long pte_hi = 0; if (arch_use_lazy_mmu_mode()) return xen_multi_update_va_mapping(va, new_val, flags); #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall4(int, update_va_mapping, va, new_val.pte_low, pte_hi, flags); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall5(int, update_va_mapping_otherdomain, va, new_val.pte_low, pte_hi, flags, domid); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/asm/hypercall_64.h000066400000000000000000000025661314037446600306720ustar00rootroot00000000000000#define HYPERCALL_arg1 "rdi" #define HYPERCALL_arg2 "rsi" #define HYPERCALL_arg3 "rdx" #define HYPERCALL_arg4 "r10" #define HYPERCALL_arg5 "r8" #if CONFIG_XEN_COMPAT <= 0x030002 static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_address, unsigned long failsafe_address, unsigned long syscall_address) { return _hypercall3(int, set_callbacks, event_address, failsafe_address, syscall_address); } #endif static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall1(long, set_timer_op, timeout); } static inline int __must_check HYPERVISOR_update_descriptor( unsigned long ma, unsigned long word) { return _hypercall2(int, update_descriptor, ma, word); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { if (arch_use_lazy_mmu_mode()) return xen_multi_update_va_mapping(va, new_val, flags); return _hypercall3(int, update_va_mapping, va, new_val.pte, flags); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { return _hypercall4(int, update_va_mapping_otherdomain, va, new_val.pte, flags, domid); } static inline int __must_check HYPERVISOR_set_segment_base( int reg, unsigned long value) { return _hypercall2(int, set_segment_base, reg, value); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/asm/hypervisor.h000066400000000000000000000234361314037446600306070ustar00rootroot00000000000000/****************************************************************************** * hypervisor.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERVISOR_H__ #define __HYPERVISOR_H__ #include #include #include #include #include #include #include #include #include #include #include #include extern shared_info_t *HYPERVISOR_shared_info; #define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu)) #ifdef CONFIG_SMP #define current_vcpu_info() vcpu_info(smp_processor_id()) #else #define current_vcpu_info() vcpu_info(0) #endif #ifdef CONFIG_X86_32 extern unsigned long hypervisor_virt_start; #endif /* arch/xen/i386/kernel/setup.c */ extern start_info_t *xen_start_info; #ifdef CONFIG_XEN_PRIVILEGED_GUEST #define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN) #else #define is_initial_xendomain() 0 #endif #define init_hypervisor(c) ((void)((c)->x86_hyper_vendor = X86_HYPER_VENDOR_XEN)) /* arch/xen/kernel/evtchn.c */ /* Force a proper event-channel callback from Xen. */ void force_evtchn_callback(void); /* arch/xen/kernel/process.c */ void xen_cpu_idle (void); /* arch/xen/i386/kernel/hypervisor.c */ void do_hypervisor_callback(struct pt_regs *regs); /* arch/xen/i386/mm/hypervisor.c */ /* * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already * be MACHINE addresses. */ void xen_pt_switch(pgd_t *); void xen_new_user_pt(pgd_t *); /* x86_64 only */ void xen_load_gs(unsigned int selector); /* x86_64 only */ void xen_tlb_flush(void); void xen_invlpg(unsigned long ptr); void xen_l1_entry_update(pte_t *ptr, pte_t val); void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */ void xen_pgd_pin(pgd_t *); void xen_pgd_unpin(pgd_t *); void xen_init_pgd_pin(void); void xen_set_ldt(const void *ptr, unsigned int ents); #ifdef CONFIG_SMP #include void xen_tlb_flush_all(void); void xen_invlpg_all(unsigned long ptr); void xen_tlb_flush_mask(cpumask_t *mask); void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr); #endif /* Returns zero on success else negative errno. */ int xen_create_contiguous_region( unsigned long vstart, unsigned int order, unsigned int address_bits); void xen_destroy_contiguous_region( unsigned long vstart, unsigned int order); struct page; int xen_limit_pages_to_max_mfn( struct page *pages, unsigned int order, unsigned int address_bits); /* Turn jiffies into Xen system time. */ u64 jiffies_to_st(unsigned long jiffies); #ifdef CONFIG_XEN_SCRUB_PAGES void scrub_pages(void *, unsigned int); void xen_scrub_pages(void *, unsigned int); #else #define scrub_pages(_p,_n) ((void)0) #define xen_scrub_pages(_p,_n) ((void)0) #endif #ifdef CONFIG_XEN DECLARE_PER_CPU(bool, xen_lazy_mmu); int xen_multicall_flush(bool); int __must_check xen_multi_update_va_mapping(unsigned long va, pte_t, unsigned long flags); int __must_check xen_multi_mmu_update(mmu_update_t *, unsigned int count, unsigned int *success_count, domid_t); int __must_check xen_multi_mmuext_op(struct mmuext_op *, unsigned int count, unsigned int *success_count, domid_t); #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE static inline void arch_enter_lazy_mmu_mode(void) { __get_cpu_var(xen_lazy_mmu) = true; } static inline void arch_leave_lazy_mmu_mode(void) { __get_cpu_var(xen_lazy_mmu) = false; xen_multicall_flush(false); } #if defined(CONFIG_X86_32) #define arch_use_lazy_mmu_mode() unlikely(x86_read_percpu(xen_lazy_mmu)) #elif !defined(arch_use_lazy_mmu_mode) #define arch_use_lazy_mmu_mode() unlikely(__get_cpu_var(xen_lazy_mmu)) #endif struct gnttab_map_grant_ref; bool gnttab_pre_map_adjust(unsigned int cmd, struct gnttab_map_grant_ref *, unsigned int count); #if CONFIG_XEN_COMPAT < 0x030400 int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *, unsigned int); #else static inline int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *m, unsigned int count) { BUG(); return -ENOSYS; } #endif #else /* CONFIG_XEN */ static inline void xen_multicall_flush(bool ignore) {} #define arch_use_lazy_mmu_mode() false #define xen_multi_update_va_mapping(...) ({ BUG(); -ENOSYS; }) #define xen_multi_mmu_update(...) ({ BUG(); -ENOSYS; }) #define xen_multi_mmuext_op(...) ({ BUG(); -ENOSYS; }) #define gnttab_pre_map_adjust(...) false #define gnttab_post_map_adjust(...) ({ BUG(); -ENOSYS; }) #endif /* CONFIG_XEN */ #if defined(CONFIG_X86_64) #define MULTI_UVMFLAGS_INDEX 2 #define MULTI_UVMDOMID_INDEX 3 #else #define MULTI_UVMFLAGS_INDEX 3 #define MULTI_UVMDOMID_INDEX 4 #endif #ifdef CONFIG_XEN #define is_running_on_xen() 1 extern char hypercall_page[PAGE_SIZE]; #else extern char *hypercall_stubs; #define is_running_on_xen() (!!hypercall_stubs) #endif #include static inline int HYPERVISOR_yield( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int HYPERVISOR_block( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0); #endif return rc; } static inline void __noreturn HYPERVISOR_shutdown( unsigned int reason) { struct sched_shutdown sched_shutdown = { .reason = reason }; VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown)); #if CONFIG_XEN_COMPAT <= 0x030002 VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason)); #endif /* Don't recurse needlessly. */ BUG_ON(reason != SHUTDOWN_crash); for(;;); } static inline int __must_check HYPERVISOR_poll( evtchn_port_t *ports, unsigned int nr_ports, u64 timeout) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports, .timeout = jiffies_to_st(timeout) }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int __must_check HYPERVISOR_poll_no_timeout( evtchn_port_t *ports, unsigned int nr_ports) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } #ifdef CONFIG_XEN static inline void MULTI_update_va_mapping( multicall_entry_t *mcl, unsigned long va, pte_t new_val, unsigned long flags) { mcl->op = __HYPERVISOR_update_va_mapping; mcl->args[0] = va; #if defined(CONFIG_X86_64) mcl->args[1] = new_val.pte; #elif defined(CONFIG_X86_PAE) mcl->args[1] = new_val.pte_low; mcl->args[2] = new_val.pte_high; #else mcl->args[1] = new_val.pte_low; mcl->args[2] = 0; #endif mcl->args[MULTI_UVMFLAGS_INDEX] = flags; } static inline void MULTI_mmu_update(multicall_entry_t *mcl, mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)req; mcl->args[1] = count; mcl->args[2] = (unsigned long)success_count; mcl->args[3] = domid; } static inline void MULTI_memory_op(multicall_entry_t *mcl, unsigned int cmd, void *arg) { mcl->op = __HYPERVISOR_memory_op; mcl->args[0] = cmd; mcl->args[1] = (unsigned long)arg; } static inline void MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd, void *uop, unsigned int count) { mcl->op = __HYPERVISOR_grant_table_op; mcl->args[0] = cmd; mcl->args[1] = (unsigned long)uop; mcl->args[2] = count; } #else /* !defined(CONFIG_XEN) */ /* Multicalls not supported for HVM guests. */ static inline void MULTI_bug(multicall_entry_t *mcl, ...) { BUG_ON(mcl); } #define MULTI_update_va_mapping(a,b,c,d) ((void)0) #define MULTI_mmu_update(a,b,c,d,e) ((void)0) #define MULTI_memory_op(a,b,c) ((void)0) #define MULTI_grant_table_op(a,b,c,d) ((void)0) #endif #ifdef LINUX /* drivers/staging/rt28?0/ use Windows-style types, including VOID */ #undef VOID #endif #endif /* __HYPERVISOR_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/asm/maddr.h000066400000000000000000000001201314037446600274450ustar00rootroot00000000000000#ifdef CONFIG_X86_32 # include "maddr_32.h" #else # include "maddr_64.h" #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/asm/maddr_32.h000066400000000000000000000126221314037446600277630ustar00rootroot00000000000000#ifndef _I386_MADDR_H #define _I386_MADDR_H #include #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<31) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ #ifdef CONFIG_X86_PAE typedef unsigned long long paddr_t; typedef unsigned long long maddr_t; #else typedef unsigned long paddr_t; typedef unsigned long maddr_t; #endif #ifdef CONFIG_XEN extern unsigned long *phys_to_machine_mapping; extern unsigned long max_mapnr; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return pfn; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return 1; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return max_mapnr; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movl %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movl %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if (likely(pfn < max_mapnr) && likely(!xen_feature(XENFEAT_auto_translated_physmap)) && unlikely(phys_to_machine_mapping[pfn] != mfn)) return max_mapnr; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } #ifdef CONFIG_X86_PAE static inline paddr_t pte_phys_to_machine(paddr_t phys) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to pfn_to_mfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to mfn_to_pfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #else #define pte_phys_to_machine phys_to_machine #define pte_machine_to_phys machine_to_phys #endif #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _I386_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/asm/maddr_64.h000066400000000000000000000115021314037446600277640ustar00rootroot00000000000000#ifndef _X86_64_MADDR_H #define _X86_64_MADDR_H #include #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL<<63) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ typedef unsigned long paddr_t; typedef unsigned long maddr_t; #ifdef CONFIG_XEN extern unsigned long *phys_to_machine_mapping; extern unsigned long max_mapnr; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return pfn; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return 1; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return max_mapnr; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: movq %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: movq %2,%0\n" " jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 8\n" " .quad 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if (likely(pfn < max_mapnr) && likely(!xen_feature(XENFEAT_auto_translated_physmap)) && unlikely(phys_to_machine_mapping[pfn] != mfn)) return max_mapnr; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } static inline paddr_t pte_phys_to_machine(paddr_t phys) { maddr_t machine; machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { paddr_t phys; phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _X86_64_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/asm/synch_bitops.h000066400000000000000000000062311314037446600310730ustar00rootroot00000000000000#ifndef __XEN_SYNCH_BITOPS_H__ #define __XEN_SYNCH_BITOPS_H__ /* * Copyright 1992, Linus Torvalds. * Heavily modified to provide guaranteed strong synchronisation * when communicating with Xen or other guest OSes running on other CPUs. */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define ADDR (*(volatile long *) addr) static __inline__ void synch_set_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btsl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_clear_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btrl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ void synch_change_bit(int nr, volatile void * addr) { __asm__ __volatile__ ( "lock btcl %1,%0" : "+m" (ADDR) : "Ir" (nr) : "memory" ); } static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btsl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btrl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) { int oldbit; __asm__ __volatile__ ( "lock btcl %2,%1\n\tsbbl %0,%0" : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory"); return oldbit; } struct __synch_xchg_dummy { unsigned long a[100]; }; #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x)) #define synch_cmpxchg(ptr, old, new) \ ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\ (unsigned long)(old), \ (unsigned long)(new), \ sizeof(*(ptr)))) static inline unsigned long __synch_cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: __asm__ __volatile__("lock; cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 2: __asm__ __volatile__("lock; cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #ifdef CONFIG_X86_64 case 4: __asm__ __volatile__("lock; cmpxchgl %k1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__("lock; cmpxchgq %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #else case 4: __asm__ __volatile__("lock; cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__synch_xg(ptr)), "0"(old) : "memory"); return prev; #endif } return old; } #define synch_test_bit test_bit #define synch_cmpxchg_subword synch_cmpxchg #endif /* __XEN_SYNCH_BITOPS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/linux/000077500000000000000000000000001314037446600265735ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/linux/compat_virtionet.h000066400000000000000000000052411314037446600323340ustar00rootroot00000000000000#ifndef __COMPAT_VIRTIONET_H__ #define __COMPAT_VIRTIONET_H__ #include #ifndef netif_set_real_num_tx_queues #define netif_set_real_num_tx_queues(dev, max_queue_pairs) #endif #ifndef netif_set_real_num_rx_queues #define netif_set_real_num_rx_queues(dev, max_queue_pairs) #endif #ifndef this_cpu_ptr #define this_cpu_ptr(field) \ per_cpu_ptr(field, smp_processor_id()) #endif #ifndef net_ratelimited_function #define net_ratelimited_function(function, ...) \ do { \ if (net_ratelimit()) \ function(__VA_ARGS__); \ } while (0) #endif /* net_ratelimited_function */ #ifndef pr_warn #define pr_warn(fmt, arg...) printk(KERN_WARNING fmt, ##arg) #endif /* pr_warn */ #ifndef net_warn_ratelimited #define net_warn_ratelimited(fmt, ...) \ net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) #endif /* net_warn_ratelimited */ #ifndef net_dbg_ratelimited #define net_dbg_ratelimited(fmt, ...) \ net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) #endif /* net_dbg_ratelimited */ #ifndef eth_hw_addr_random #define eth_hw_addr_random(dev) \ random_ether_addr(dev->dev_addr) #endif /* eth_hw_addr_random */ #ifndef netdev_for_each_uc_addr #define netdev_for_each_uc_addr(ha, dev) \ list_for_each_entry(ha, &dev->uc.list, list) #endif /* netdev_for_each_uc_addr */ #ifndef netdev_mc_count #define netdev_mc_count(dev) \ dev->mc_count #endif /* netdev_mc_count */ #ifndef netdev_uc_count #define netdev_uc_count(dev) \ dev->uc.count #endif /* netdev_uc_count */ #ifndef __percpu #define __percpu #endif /* __percpu */ #ifndef skb_checksum_start_offset #define skb_checksum_start_offset(skb) \ skb->csum_start - skb_headroom(skb) #endif /* skb_checksum_start_offset */ /** * module_driver() - Helper macro for drivers that don't do anything * special in module init/exit. This eliminates a lot of boilerplate. * Each module may only use this macro once, and calling it replaces * module_init() and module_exit(). * * @__driver: driver name * @__register: register function for this driver type * @__unregister: unregister function for this driver type * @...: Additional arguments to be passed to __register and __unregister. * * Use this macro to construct bus specific macros for registering * drivers, and do not use it on its own. */ #ifndef module_driver #define module_driver(__driver, __register, __unregister, ...) \ static int __init __driver##_init(void) \ { \ return __register(&(__driver) , ##__VA_ARGS__); \ } \ module_init(__driver##_init); \ static void __exit __driver##_exit(void) \ { \ __unregister(&(__driver) , ##__VA_ARGS__); \ } \ module_exit(__driver##_exit); #endif /* module_driver */ #endif /* __COMPAT_VIRTIONET_H__ */UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/linux/u64_stats_sync.h000066400000000000000000000102541314037446600316360ustar00rootroot00000000000000#ifndef _LINUX_U64_STATS_SYNC_H #define _LINUX_U64_STATS_SYNC_H /* * To properly implement 64bits network statistics on 32bit and 64bit hosts, * we provide a synchronization point, that is a noop on 64bit or UP kernels. * * Key points : * 1) Use a seqcount on SMP 32bits, with low overhead. * 2) Whole thing is a noop on 64bit arches or UP kernels. * 3) Write side must ensure mutual exclusion or one seqcount update could * be lost, thus blocking readers forever. * If this synchronization point is not a mutex, but a spinlock or * spinlock_bh() or disable_bh() : * 3.1) Write side should not sleep. * 3.2) Write side should not allow preemption. * 3.3) If applicable, interrupts should be disabled. * * 4) If reader fetches several counters, there is no guarantee the whole values * are consistent (remember point 1) : this is a noop on 64bit arches anyway) * * 5) readers are allowed to sleep or be preempted/interrupted : They perform * pure reads. But if they have to fetch many values, it's better to not allow * preemptions/interruptions to avoid many retries. * * 6) If counter might be written by an interrupt, readers should block interrupts. * (On UP, there is no seqcount_t protection, a reader allowing interrupts could * read partial values) * * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and * u64_stats_fetch_retry_bh() helpers * * Usage : * * Stats producer (writer) should use following template granted it already got * an exclusive access to counters (a lock is already taken, or per cpu * data is used [in a non preemptable context]) * * spin_lock_bh(...) or other synchronization to get exclusive access * ... * u64_stats_update_begin(&stats->syncp); * stats->bytes64 += len; // non atomic operation * stats->packets64++; // non atomic operation * u64_stats_update_end(&stats->syncp); * * While a consumer (reader) should use following template to get consistent * snapshot for each variable (but no guarantee on several ones) * * u64 tbytes, tpackets; * unsigned int start; * * do { * start = u64_stats_fetch_begin(&stats->syncp); * tbytes = stats->bytes64; // non atomic operation * tpackets = stats->packets64; // non atomic operation * } while (u64_stats_fetch_retry(&stats->syncp, start)); * * * Example of use in drivers/net/loopback.c, using per_cpu containers, * in BH disabled context. */ #include struct u64_stats_sync { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) seqcount_t seq; #endif }; static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_begin(&syncp->seq); #endif } static inline void u64_stats_update_end(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_end(&syncp->seq); #endif } static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); #else #if BITS_PER_LONG==32 preempt_disable(); #endif return 0; #endif } static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_retry(&syncp->seq, start); #else #if BITS_PER_LONG==32 preempt_enable(); #endif return false; #endif } /* * In case softirq handlers can update u64 counters, readers can use following helpers * - SMP 32bit arches use seqcount protection, irq safe. * - UP 32bit must disable BH. * - 64bit have no problem atomically reading u64 values, irq safe. */ static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); #else #if BITS_PER_LONG==32 local_bh_disable(); #endif return 0; #endif } static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_retry(&syncp->seq, start); #else #if BITS_PER_LONG==32 local_bh_enable(); #endif return false; #endif } #endif /* _LINUX_U64_STATS_SYNC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/linux/virtio.h000066400000000000000000000120701314037446600302600ustar00rootroot00000000000000#ifndef _LINUX_VIRTIO_H #define _LINUX_VIRTIO_H /* Everything a virtio driver needs to work with any particular virtio * implementation. */ #include #include #include #include #include #include //#include /** * virtqueue - a queue to register buffers for sending or receiving. * @list: the chain of virtqueues for this device * @callback: the function to call when buffers are consumed (can be NULL). * @name: the name of this virtqueue (mainly for debugging) * @vdev: the virtio device this queue was created for. * @priv: a pointer for the virtqueue implementation to use. * @index: the zero-based ordinal number for this queue. * @num_free: number of elements we expect to be able to fit. * * A note on @num_free: with indirect buffers, each buffer needs one * element in the queue, otherwise a buffer will need one element per * sg element. */ struct virtqueue { struct list_head list; void (*callback)(struct virtqueue *vq); const char *name; struct virtio_device *vdev; unsigned int index; unsigned int num_free; void *priv; }; int virtqueue_add_buf(struct virtqueue *vq, struct scatterlist sg[], unsigned int out_num, unsigned int in_num, void *data, gfp_t gfp); int virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp); int virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp); int virtqueue_add_sgs(struct virtqueue *vq, struct scatterlist *sgs[], unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp); void virtqueue_kick(struct virtqueue *vq); bool virtqueue_kick_prepare(struct virtqueue *vq); void virtqueue_notify(struct virtqueue *vq); void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); void virtqueue_disable_cb(struct virtqueue *vq); bool virtqueue_enable_cb(struct virtqueue *vq); unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq); bool virtqueue_poll(struct virtqueue *vq, unsigned); bool virtqueue_enable_cb_delayed(struct virtqueue *vq); void *virtqueue_detach_unused_buf(struct virtqueue *vq); unsigned int virtqueue_get_vring_size(struct virtqueue *vq); bool virtqueue_is_broken(struct virtqueue *vq); /** * virtio_device - representation of a device using virtio * @index: unique position on the virtio bus * @dev: underlying device. * @id: the device type identification (used to match it with a driver). * @config: the configuration ops for this device. * @vringh_config: configuration ops for host vrings. * @vqs: the list of virtqueues for this device. * @features: the features supported by both driver and device. * @priv: private pointer for the driver's use. */ struct virtio_device { int index; struct device dev; struct virtio_device_id id; const struct virtio_config_ops *config; const struct vringh_config_ops *vringh_config; struct list_head vqs; /* Note that this is a Linux set_bit-style bitmap. */ unsigned long features[1]; void *priv; }; static inline struct virtio_device *dev_to_virtio(struct device *_dev) { return container_of(_dev, struct virtio_device, dev); } int register_virtio_device(struct virtio_device *dev); void unregister_virtio_device(struct virtio_device *dev); /** * virtio_driver - operations for a virtio I/O driver * @driver: underlying device driver (populate name and owner). * @id_table: the ids serviced by this driver. * @feature_table: an array of feature numbers supported by this driver. * @feature_table_size: number of entries in the feature table array. * @probe: the function to call when a device is found. Returns 0 or -errno. * @remove: the function to call when a device is removed. * @config_changed: optional function to call when the device configuration * changes; may be called in interrupt context. */ struct virtio_driver { struct device_driver driver; const struct virtio_device_id *id_table; const unsigned int *feature_table; unsigned int feature_table_size; int (*probe)(struct virtio_device *dev); void (*scan)(struct virtio_device *dev); void (*remove)(struct virtio_device *dev); void (*config_changed)(struct virtio_device *dev); #ifdef CONFIG_PM int (*freeze)(struct virtio_device *dev); int (*restore)(struct virtio_device *dev); #endif }; static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv) { return container_of(drv, struct virtio_driver, driver); } int register_virtio_driver(struct virtio_driver *drv); void unregister_virtio_driver(struct virtio_driver *drv); /* module_virtio_driver() - Helper macro for drivers that don't do * anything special in module init/exit. This eliminates a lot of * boilerplate. Each module may only use this macro once, and * calling it replaces module_init() and module_exit() */ #define module_virtio_driver(__virtio_driver) \ module_driver(__virtio_driver, register_virtio_driver, \ unregister_virtio_driver) #endif /* _LINUX_VIRTIO_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/linux/virtio_config.h000066400000000000000000000127611314037446600316140ustar00rootroot00000000000000#ifndef _LINUX_VIRTIO_CONFIG_H #define _LINUX_VIRTIO_CONFIG_H #include #include #include #include #include /** * virtio_config_ops - operations for configuring a virtio device * @get: read the value of a configuration field * vdev: the virtio_device * offset: the offset of the configuration field * buf: the buffer to write the field value into. * len: the length of the buffer * @set: write the value of a configuration field * vdev: the virtio_device * offset: the offset of the configuration field * buf: the buffer to read the field value from. * len: the length of the buffer * @get_status: read the status byte * vdev: the virtio_device * Returns the status byte * @set_status: write the status byte * vdev: the virtio_device * status: the new status byte * @reset: reset the device * vdev: the virtio device * After this, status and feature negotiation must be done again * Device must not be reset from its vq/config callbacks, or in * parallel with being added/removed. * @find_vqs: find virtqueues and instantiate them. * vdev: the virtio_device * nvqs: the number of virtqueues to find * vqs: on success, includes new virtqueues * callbacks: array of callbacks, for each virtqueue * include a NULL entry for vqs that do not need a callback * names: array of virtqueue names (mainly for debugging) * include a NULL entry for vqs unused by driver * Returns 0 on success or error status * @del_vqs: free virtqueues found by find_vqs(). * @get_features: get the array of feature bits for this device. * vdev: the virtio_device * Returns the first 32 feature bits (all we currently need). * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device * This gives the final feature bits for the device: it can change * the dev->feature bits if it wants. * @bus_name: return the bus name associated with the device * vdev: the virtio_device * This returns a pointer to the bus name a la pci_name from which * the caller can then copy. * @set_vq_affinity: set the affinity for a virtqueue. */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { void (*get)(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len); void (*set)(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len); u8 (*get_status)(struct virtio_device *vdev); void (*set_status)(struct virtio_device *vdev, u8 status); void (*reset)(struct virtio_device *vdev); int (*find_vqs)(struct virtio_device *, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]); void (*del_vqs)(struct virtio_device *); u32 (*get_features)(struct virtio_device *vdev); void (*finalize_features)(struct virtio_device *vdev); const char *(*bus_name)(struct virtio_device *vdev); int (*set_vq_affinity)(struct virtqueue *vq, int cpu); }; /* If driver didn't advertise the feature, it will never appear. */ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit); /** * virtio_has_feature - helper to determine if this device has this feature. * @vdev: the device * @fbit: the feature bit */ static inline bool virtio_has_feature(const struct virtio_device *vdev, unsigned int fbit) { /* Did you forget to fix assumptions on max features? */ if (__builtin_constant_p(fbit)) #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) BUILD_BUG_ON(fbit >= 32); #else MAYBE_BUILD_BUG_ON(fbit >= 32); #endif else BUG_ON(fbit >= 32); if (fbit < VIRTIO_TRANSPORT_F_START) virtio_check_driver_offered_feature(vdev, fbit); return test_bit(fbit, vdev->features); } /** * virtio_config_val - look for a feature and get a virtio config entry. * @vdev: the virtio device * @fbit: the feature bit * @offset: the type to search for. * @v: a pointer to the value to fill in. * * The return value is -ENOENT if the feature doesn't exist. Otherwise * the config value is copied into whatever is pointed to by v. */ #define virtio_config_val(vdev, fbit, offset, v) \ virtio_config_buf((vdev), (fbit), (offset), (v), sizeof(*v)) #define virtio_config_val_len(vdev, fbit, offset, v, len) \ virtio_config_buf((vdev), (fbit), (offset), (v), (len)) static inline int virtio_config_buf(struct virtio_device *vdev, unsigned int fbit, unsigned int offset, void *buf, unsigned len) { if (!virtio_has_feature(vdev, fbit)) return -ENOENT; vdev->config->get(vdev, offset, buf, len); return 0; } static inline struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, vq_callback_t *c, const char *n) { vq_callback_t *callbacks[] = { c }; const char *names[] = { n }; struct virtqueue *vq; int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names); if (err < 0) return ERR_PTR(err); return vq; } static inline const char *virtio_bus_name(struct virtio_device *vdev) { if (!vdev->config->bus_name) return "virtio"; return vdev->config->bus_name(vdev); } /** * virtqueue_set_affinity - setting affinity for a virtqueue * @vq: the virtqueue * @cpu: the cpu no. * * Pay attention the function are best-effort: the affinity hint may not be set * due to config support, irq type and sharing. * */ static inline int virtqueue_set_affinity(struct virtqueue *vq, int cpu) { struct virtio_device *vdev = vq->vdev; if (vdev->config->set_vq_affinity) return vdev->config->set_vq_affinity(vq, cpu); return 0; } #endif /* _LINUX_VIRTIO_CONFIG_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/linux/virtio_ids.h000066400000000000000000000042401314037446600311170ustar00rootroot00000000000000#ifndef _LINUX_VIRTIO_IDS_H #define _LINUX_VIRTIO_IDS_H /* * Virtio IDs * * This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of IBM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #define VIRTIO_ID_NET 1 /* virtio net */ #define VIRTIO_ID_BLOCK 2 /* virtio block */ #define VIRTIO_ID_CONSOLE 3 /* virtio console */ #define VIRTIO_ID_RNG 4 /* virtio rng */ #define VIRTIO_ID_BALLOON 5 /* virtio balloon */ #define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */ #define VIRTIO_ID_SCSI 8 /* virtio scsi */ #define VIRTIO_ID_9P 9 /* 9p virtio console */ #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ #define VIRTIO_ID_CAIF 12 /* Virtio caif */ #endif /* _LINUX_VIRTIO_IDS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/linux/virtio_net.h000066400000000000000000000204651314037446600311350ustar00rootroot00000000000000#ifndef _LINUX_VIRTIO_NET_H #define _LINUX_VIRTIO_NET_H /* This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of IBM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include /* The feature bitmap for virtio net */ #define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ #define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ #define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ #define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */ #define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ #define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ #define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */ #define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */ #define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */ #define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */ #define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */ #define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ #define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ #define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */ #define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */ #define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */ #define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */ #define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */ #define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the * network */ #define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow * Steering */ #define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ #define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ #define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */ struct virtio_net_config { /* The config defining mac address (if VIRTIO_NET_F_MAC) */ __u8 mac[6]; /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ __u16 status; /* Maximum number of each of transmit and receive queues; * see VIRTIO_NET_F_MQ and VIRTIO_NET_CTRL_MQ. * Legal values are between 1 and 0x8000 */ __u16 max_virtqueue_pairs; } __attribute__((packed)); /* This is the first element of the scatter-gather list. If you don't * specify GSO or CSUM features, you can simply ignore the header. */ struct virtio_net_hdr { #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset #define VIRTIO_NET_HDR_F_DATA_VALID 2 // Csum is valid __u8 flags; #define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame #define VIRTIO_NET_HDR_GSO_TCPV4 1 // GSO frame, IPv4 TCP (TSO) #define VIRTIO_NET_HDR_GSO_UDP 3 // GSO frame, IPv4 UDP (UFO) #define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP #define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set __u8 gso_type; __u16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ __u16 gso_size; /* Bytes to append to hdr_len per frame */ __u16 csum_start; /* Position to start checksumming from */ __u16 csum_offset; /* Offset after that to place checksum */ }; /* This is the version of the header to use when the MRG_RXBUF * feature has been negotiated. */ struct virtio_net_hdr_mrg_rxbuf { struct virtio_net_hdr hdr; __u16 num_buffers; /* Number of merged rx buffers */ }; /* * Control virtqueue data structures * * The control virtqueue expects a header in the first sg entry * and an ack/status response in the last entry. Data for the * command goes in between. */ struct virtio_net_ctrl_hdr { __u8 class; __u8 cmd; } __attribute__((packed)); typedef __u8 virtio_net_ctrl_ack; #define VIRTIO_NET_OK 0 #define VIRTIO_NET_ERR 1 /* * Control the RX mode, ie. promisucous, allmulti, etc... * All commands require an "out" sg entry containing a 1 byte * state value, zero = disable, non-zero = enable. Commands * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature. * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA. */ #define VIRTIO_NET_CTRL_RX 0 #define VIRTIO_NET_CTRL_RX_PROMISC 0 #define VIRTIO_NET_CTRL_RX_ALLMULTI 1 #define VIRTIO_NET_CTRL_RX_ALLUNI 2 #define VIRTIO_NET_CTRL_RX_NOMULTI 3 #define VIRTIO_NET_CTRL_RX_NOUNI 4 #define VIRTIO_NET_CTRL_RX_NOBCAST 5 /* * Control the MAC * * The MAC filter table is managed by the hypervisor, the guest should * assume the size is infinite. Filtering should be considered * non-perfect, ie. based on hypervisor resources, the guest may * received packets from sources not specified in the filter list. * * In addition to the class/cmd header, the TABLE_SET command requires * two out scatterlists. Each contains a 4 byte count of entries followed * by a concatenated byte stream of the ETH_ALEN MAC addresses. The * first sg list contains unicast addresses, the second is for multicast. * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature * is available. * * The ADDR_SET command requests one out scatterlist, it contains a * 6 bytes MAC address. This functionality is present if the * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available. */ struct virtio_net_ctrl_mac { __u32 entries; __u8 macs[][ETH_ALEN]; } __attribute__((packed)); #define VIRTIO_NET_CTRL_MAC 1 #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 #define VIRTIO_NET_CTRL_MAC_ADDR_SET 1 /* * Control VLAN filtering * * The VLAN filter table is controlled via a simple ADD/DEL interface. * VLAN IDs not added may be filterd by the hypervisor. Del is the * opposite of add. Both commands expect an out entry containing a 2 * byte VLAN ID. VLAN filterting is available with the * VIRTIO_NET_F_CTRL_VLAN feature bit. */ #define VIRTIO_NET_CTRL_VLAN 2 #define VIRTIO_NET_CTRL_VLAN_ADD 0 #define VIRTIO_NET_CTRL_VLAN_DEL 1 /* * Control link announce acknowledgement * * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that * driver has recevied the notification; device would clear the * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives * this command. */ #define VIRTIO_NET_CTRL_ANNOUNCE 3 #define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0 /* * Control Receive Flow Steering * * The command VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET * enables Receive Flow Steering, specifying the number of the transmit and * receive queues that will be used. After the command is consumed and acked by * the device, the device will not steer new packets on receive virtqueues * other than specified nor read from transmit virtqueues other than specified. * Accordingly, driver should not transmit new packets on virtqueues other than * specified. */ struct virtio_net_ctrl_mq { __u16 virtqueue_pairs; }; #define VIRTIO_NET_CTRL_MQ 4 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 #endif /* _LINUX_VIRTIO_NET_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/linux/virtio_pci.h000066400000000000000000000072341314037446600311210ustar00rootroot00000000000000/* * Virtio PCI driver * * This module allows virtio devices to be used over a virtual PCI device. * This can be used with QEMU based VMMs like KVM or Xen. * * Copyright IBM Corp. 2007 * * Authors: * Anthony Liguori * * This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of IBM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _LINUX_VIRTIO_PCI_H #define _LINUX_VIRTIO_PCI_H #include /* A 32-bit r/o bitmask of the features supported by the host */ #define VIRTIO_PCI_HOST_FEATURES 0 /* A 32-bit r/w bitmask of features activated by the guest */ #define VIRTIO_PCI_GUEST_FEATURES 4 /* A 32-bit r/w PFN for the currently selected queue */ #define VIRTIO_PCI_QUEUE_PFN 8 /* A 16-bit r/o queue size for the currently selected queue */ #define VIRTIO_PCI_QUEUE_NUM 12 /* A 16-bit r/w queue selector */ #define VIRTIO_PCI_QUEUE_SEL 14 /* A 16-bit r/w queue notifier */ #define VIRTIO_PCI_QUEUE_NOTIFY 16 /* An 8-bit device status register. */ #define VIRTIO_PCI_STATUS 18 /* An 8-bit r/o interrupt status register. Reading the value will return the * current contents of the ISR and will also clear it. This is effectively * a read-and-acknowledge. */ #define VIRTIO_PCI_ISR 19 /* The bit of the ISR which indicates a device configuration change. */ #define VIRTIO_PCI_ISR_CONFIG 0x2 /* MSI-X registers: only enabled if MSI-X is enabled. */ /* A 16-bit vector for configuration changes. */ #define VIRTIO_MSI_CONFIG_VECTOR 20 /* A 16-bit vector for selected queue notifications. */ #define VIRTIO_MSI_QUEUE_VECTOR 22 /* Vector value used to disable MSI for queue */ #define VIRTIO_MSI_NO_VECTOR 0xffff /* The remaining space is defined by each driver as the per-driver * configuration space */ #define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20) /* Virtio ABI version, this must match exactly */ #define VIRTIO_PCI_ABI_VERSION 0 /* How many bits to shift physical queue address written to QUEUE_PFN. * 12 is historical, and due to x86 page size. */ #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12 /* The alignment to use between consumer and producer parts of vring. * x86 pagesize again. */ #define VIRTIO_PCI_VRING_ALIGN 4096 #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/linux/virtio_ring.h000066400000000000000000000035421314037446600313030ustar00rootroot00000000000000#ifndef _LINUX_VIRTIO_RING_H #define _LINUX_VIRTIO_RING_H #include #include /* * Barriers in virtio are tricky. Non-SMP virtio guests can't assume * they're not on an SMP host system, so they need to assume real * barriers. Non-SMP virtio hosts could skip the barriers, but does * anyone care? * * For virtio_pci on SMP, we don't need to order with respect to MMIO * accesses through relaxed memory I/O windows, so smp_mb() et al are * sufficient. * * For using virtio to talk to real devices (eg. other heterogeneous * CPUs) we do need real barriers. In theory, we could be using both * kinds of virtio, so it's a runtime decision, and the branch is * actually quite cheap. */ #ifdef CONFIG_SMP static inline void virtio_mb(bool weak_barriers) { if (weak_barriers) smp_mb(); else mb(); } static inline void virtio_rmb(bool weak_barriers) { if (weak_barriers) smp_rmb(); else rmb(); } static inline void virtio_wmb(bool weak_barriers) { if (weak_barriers) smp_wmb(); else wmb(); } #else static inline void virtio_mb(bool weak_barriers) { mb(); } static inline void virtio_rmb(bool weak_barriers) { rmb(); } static inline void virtio_wmb(bool weak_barriers) { wmb(); } #endif struct virtio_device; struct virtqueue; struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, void *pages, void (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq), const char *name); void vring_del_virtqueue(struct virtqueue *vq); /* Filter out transport-specific feature bits. */ void vring_transport_features(struct virtio_device *vdev); irqreturn_t vring_interrupt(int irq, void *_vq); #endif /* _LINUX_VIRTIO_RING_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/uapi/000077500000000000000000000000001314037446600263725ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/uapi/linux/000077500000000000000000000000001314037446600275315ustar00rootroot00000000000000virtio_config.h000066400000000000000000000053301314037446600324650ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/uapi/linux#ifndef _UAPI_LINUX_VIRTIO_CONFIG_H #define _UAPI_LINUX_VIRTIO_CONFIG_H /* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so * anyone can use the definitions to implement compatible drivers/servers. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of IBM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Virtio devices use a standardized configuration space to define their * features and pass configuration information, but each implementation can * store and access that space differently. */ #include /* Status byte for guest to report progress, and synchronize features. */ /* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */ #define VIRTIO_CONFIG_S_ACKNOWLEDGE 1 /* We have found a driver for the device. */ #define VIRTIO_CONFIG_S_DRIVER 2 /* Driver has used its parts of the config, and is happy */ #define VIRTIO_CONFIG_S_DRIVER_OK 4 /* We've given up on this device. */ #define VIRTIO_CONFIG_S_FAILED 0x80 /* Some virtio feature bits (currently bits 28 through 31) are reserved for the * transport being used (eg. virtio_ring), the rest are per-device feature * bits. */ #define VIRTIO_TRANSPORT_F_START 28 #define VIRTIO_TRANSPORT_F_END 32 /* Do we get callbacks when the ring is completely used, even if we've * suppressed them? */ #define VIRTIO_F_NOTIFY_ON_EMPTY 24 #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/uapi/linux/virtio_ring.h000066400000000000000000000135341314037446600322430ustar00rootroot00000000000000#ifndef _UAPI_LINUX_VIRTIO_RING_H #define _UAPI_LINUX_VIRTIO_RING_H /* An interface for efficient virtio implementation, currently for use by KVM * and lguest, but hopefully others soon. Do NOT change this since it will * break existing servers and clients. * * This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of IBM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Copyright Rusty Russell IBM Corporation 2007. */ #include /* This marks a buffer as continuing via the next field. */ #define VRING_DESC_F_NEXT 1 /* This marks a buffer as write-only (otherwise read-only). */ #define VRING_DESC_F_WRITE 2 /* This means the buffer contains a list of buffer descriptors. */ #define VRING_DESC_F_INDIRECT 4 /* The Host uses this in used->flags to advise the Guest: don't kick me when * you add a buffer. It's unreliable, so it's simply an optimization. Guest * will still kick if it's out of buffers. */ #define VRING_USED_F_NO_NOTIFY 1 /* The Guest uses this in avail->flags to advise the Host: don't interrupt me * when you consume a buffer. It's unreliable, so it's simply an * optimization. */ #define VRING_AVAIL_F_NO_INTERRUPT 1 /* We support indirect buffer descriptors */ #define VIRTIO_RING_F_INDIRECT_DESC 28 /* The Guest publishes the used index for which it expects an interrupt * at the end of the avail ring. Host should ignore the avail->flags field. */ /* The Host publishes the avail index for which it expects a kick * at the end of the used ring. Guest should ignore the used->flags field. */ #define VIRTIO_RING_F_EVENT_IDX 29 /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ struct vring_desc { /* Address (guest-physical). */ __u64 addr; /* Length. */ __u32 len; /* The flags as indicated above. */ __u16 flags; /* We chain unused descriptors via this, too */ __u16 next; }; struct vring_avail { __u16 flags; __u16 idx; __u16 ring[]; }; /* u32 is used here for ids for padding reasons. */ struct vring_used_elem { /* Index of start of used descriptor chain. */ __u32 id; /* Total length of the descriptor chain which was used (written to) */ __u32 len; }; struct vring_used { __u16 flags; __u16 idx; struct vring_used_elem ring[]; }; struct vring { unsigned int num; struct vring_desc *desc; struct vring_avail *avail; struct vring_used *used; }; /* The standard layout for the ring is a continuous chunk of memory which looks * like this. We assume num is a power of 2. * * struct vring * { * // The actual descriptors (16 bytes each) * struct vring_desc desc[num]; * * // A ring of available descriptor heads with free-running index. * __u16 avail_flags; * __u16 avail_idx; * __u16 available[num]; * __u16 used_event_idx; * * // Padding to the next align boundary. * char pad[]; * * // A ring of used descriptor heads with free-running index. * __u16 used_flags; * __u16 used_idx; * struct vring_used_elem used[num]; * __u16 avail_event_idx; * }; */ /* We publish the used event index at the end of the available ring, and vice * versa. They are at the end for backwards compatibility. */ #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) #define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num]) static inline void vring_init(struct vring *vr, unsigned int num, void *p, unsigned long align) { vr->num = num; vr->desc = p; vr->avail = p + num*sizeof(struct vring_desc); vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__u16) + align-1) & ~(align - 1)); } static inline unsigned vring_size(unsigned int num, unsigned long align) { return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) + align - 1) & ~(align - 1)) + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num; } /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ /* Assuming a given event_idx value from the other size, if * we have just incremented index from old to new_idx, * should we trigger an event? */ static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) { /* Note: Xen has similar logic for notification hold-off * in include/xen/interface/io/ring.h with req_event and req_prod * corresponding to event_idx + 1 and new_idx respectively. * Note also that req_event and req_prod in Xen start at 1, * event indexes in virtio start at 0. */ return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old); } #endif /* _UAPI_LINUX_VIRTIO_RING_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/000077500000000000000000000000001314037446600262265ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/balloon.h000066400000000000000000000052351314037446600300320ustar00rootroot00000000000000/****************************************************************************** * balloon.h * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_H__ #define __XEN_BALLOON_H__ #include #if !defined(CONFIG_PARAVIRT_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /* * Inform the balloon driver that it should allow some slop for device-driver * memory activities. */ void balloon_update_driver_allowance(long delta); /* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */ struct page **alloc_empty_pages_and_pagevec(int nr_pages); void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages); /* Free an empty page range (not allocated through alloc_empty_pages_and_pagevec), adding to the balloon. */ void free_empty_pages(struct page **pagevec, int nr_pages); void balloon_release_driver_page(struct page *page); /* * Prevent the balloon driver from changing the memory reservation during * a driver critical region. */ extern spinlock_t balloon_lock; #define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags) #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags) #endif #endif /* __XEN_BALLOON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/blkif.h000066400000000000000000000112031314037446600274630ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_BLKIF_H__ #define __XEN_BLKIF_H__ #include #include #include /* Not a real protocol. Used to generate ring structs which contain * the elements common to all protocols only. This way we get a * compiler-checkable way to use common struct elements, so we can * avoid using switch(protocol) in a number of places. */ struct blkif_common_request { char dummy; }; struct blkif_common_response { char dummy; }; /* i386 protocol version */ #pragma pack(push, 4) struct blkif_x86_32_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_32_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_x86_32_request blkif_x86_32_request_t; typedef struct blkif_x86_32_response blkif_x86_32_response_t; #pragma pack(pop) /* x86_64 protocol version */ struct blkif_x86_64_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t __attribute__((__aligned__(8))) id; blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_64_response { uint64_t __attribute__((__aligned__(8))) id; uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_x86_64_request blkif_x86_64_request_t; typedef struct blkif_x86_64_response blkif_x86_64_response_t; DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response); DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response); DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response); union blkif_back_rings { blkif_back_ring_t native; blkif_common_back_ring_t common; blkif_x86_32_back_ring_t x86_32; blkif_x86_64_back_ring_t x86_64; }; typedef union blkif_back_rings blkif_back_rings_t; enum blkif_protocol { BLKIF_PROTOCOL_NATIVE = 1, BLKIF_PROTOCOL_X86_32 = 2, BLKIF_PROTOCOL_X86_64 = 3, }; static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; dst->id = src->id; dst->sector_number = src->sector_number; barrier(); if (n > dst->nr_segments) n = dst->nr_segments; for (i = 0; i < n; i++) dst->seg[i] = src->seg[i]; } static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src) { int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; dst->id = src->id; dst->sector_number = src->sector_number; barrier(); if (n > dst->nr_segments) n = dst->nr_segments; for (i = 0; i < n; i++) dst->seg[i] = src->seg[i]; } #endif /* __XEN_BLKIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/compat_ioctl.h000066400000000000000000000030411314037446600310520ustar00rootroot00000000000000/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright IBM Corp. 2007 * * Authors: Jimi Xenidis * Hollis Blanchard */ #ifndef __LINUX_XEN_COMPAT_H__ #define __LINUX_XEN_COMPAT_H__ #include extern int privcmd_ioctl_32(int fd, unsigned int cmd, unsigned long arg); struct privcmd_mmap_32 { int num; domid_t dom; compat_uptr_t entry; }; struct privcmd_mmapbatch_32 { int num; /* number of pages to populate */ domid_t dom; /* target domain */ __u64 addr; /* virtual address */ compat_uptr_t arr; /* array of mfns - top nibble set on err */ }; #define IOCTL_PRIVCMD_MMAP_32 \ _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap_32)) #define IOCTL_PRIVCMD_MMAPBATCH_32 \ _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch_32)) #endif /* __LINUX_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/cpu_hotplug.h000066400000000000000000000014751314037446600307370ustar00rootroot00000000000000#ifndef __XEN_CPU_HOTPLUG_H__ #define __XEN_CPU_HOTPLUG_H__ #include #include #if defined(CONFIG_X86) && defined(CONFIG_SMP) extern cpumask_t cpu_initialized_map; #endif #if defined(CONFIG_HOTPLUG_CPU) int cpu_up_check(unsigned int cpu); void init_xenbus_allowed_cpumask(void); int smp_suspend(void); void smp_resume(void); void cpu_bringup(void); #else /* !defined(CONFIG_HOTPLUG_CPU) */ #define cpu_up_check(cpu) (0) #define init_xenbus_allowed_cpumask() ((void)0) static inline int smp_suspend(void) { if (num_online_cpus() > 1) { printk(KERN_WARNING "Can't suspend SMP guests " "without CONFIG_HOTPLUG_CPU\n"); return -EOPNOTSUPP; } return 0; } static inline void smp_resume(void) { } #endif /* !defined(CONFIG_HOTPLUG_CPU) */ #endif /* __XEN_CPU_HOTPLUG_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/driver_util.h000066400000000000000000000003161314037446600307270ustar00rootroot00000000000000 #ifndef __ASM_XEN_DRIVER_UTIL_H__ #define __ASM_XEN_DRIVER_UTIL_H__ #include #include extern struct class *get_xen_class(void); #endif /* __ASM_XEN_DRIVER_UTIL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/events.h000066400000000000000000000032211314037446600277010ustar00rootroot00000000000000#ifndef _XEN_EVENTS_H #define _XEN_EVENTS_H #include #include #include #include int bind_evtchn_to_irq(unsigned int evtchn); int bind_evtchn_to_irqhandler(unsigned int evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_ipi_to_irqhandler(enum ipi_vector ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (even for bindings * made with bind_evtchn_to_irqhandler()). */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); int resend_irq_on_evtchn(unsigned int irq); void rebind_evtchn_irq(int evtchn, int irq); static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); } extern void notify_remote_via_irq(int irq); extern void xen_irq_resume(void); /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq); /* Poll waiting for an irq to become pending. In the usual case, the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq(int irq); #endif /* _XEN_EVENTS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/evtchn.h000066400000000000000000000161161314037446600276730ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Communication via Xen event channels. * Also definitions for the device that demuxes notifications to userspace. * * Copyright (c) 2004-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_EVTCHN_H__ #define __ASM_EVTCHN_H__ #include #include #include #include #include #include /* * LOW-LEVEL DEFINITIONS */ /* * Dynamically bind an event source to an IRQ-like callback handler. * On some platforms this may not be implemented via the Linux IRQ subsystem. * The IRQ argument passed to the callback handler is the same as returned * from the bind call. It may not correspond to a Linux IRQ number. * Returns IRQ or negative errno. */ int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_interdomain_evtchn_to_irqhandler( unsigned int remote_domain, unsigned int remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irqhandler( unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); #if defined(CONFIG_SMP) && defined(CONFIG_XEN) && defined(CONFIG_X86) int bind_virq_to_irqaction( unsigned int virq, unsigned int cpu, struct irqaction *action); #else #define bind_virq_to_irqaction(virq, cpu, action) \ bind_virq_to_irqhandler(virq, cpu, (action)->handler, \ (action)->flags | IRQF_NOBALANCING, \ (action)->name, action) #endif #if defined(CONFIG_SMP) && !defined(MODULE) #ifndef CONFIG_X86 int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); #else int bind_ipi_to_irqaction( unsigned int ipi, unsigned int cpu, struct irqaction *action); #endif #endif /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (except for bindings * made with bind_caller_port_to_irqhandler()). */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); #if defined(CONFIG_SMP) && defined(CONFIG_XEN) && defined(CONFIG_X86) /* Specialized unbind function for per-CPU IRQs. */ void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu, struct irqaction *); #else #define unbind_from_per_cpu_irq(irq, cpu, action) \ unbind_from_irqhandler(irq, action) #endif #ifndef CONFIG_XEN void irq_resume(void); #endif /* Entry point for notifications into Linux subsystems. */ asmlinkage void evtchn_do_upcall(struct pt_regs *regs); /* Entry point for notifications into the userland character device. */ void evtchn_device_upcall(int port); /* Mark a PIRQ as unavailable for dynamic allocation. */ void evtchn_register_pirq(int irq); /* Map a Xen-supplied PIRQ to a dynamically allocated one. */ int evtchn_map_pirq(int irq, int xen_pirq); /* Look up a Xen-supplied PIRQ for a dynamically allocated one. */ int evtchn_get_xen_pirq(int irq); void mask_evtchn(int port); void disable_all_local_evtchn(void); void unmask_evtchn(int port); #ifdef CONFIG_SMP void rebind_evtchn_to_cpu(int port, unsigned int cpu); #else #define rebind_evtchn_to_cpu(port, cpu) ((void)0) #endif static inline int test_and_set_evtchn_mask(int port) { shared_info_t *s = HYPERVISOR_shared_info; return synch_test_and_set_bit(port, s->evtchn_mask); } static inline void clear_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; synch_clear_bit(port, s->evtchn_pending); } static inline void set_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; synch_set_bit(port, s->evtchn_pending); } static inline int test_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; return synch_test_bit(port, s->evtchn_pending); } static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send)); } static inline void multi_notify_remote_via_evtchn(multicall_entry_t *mcl, int port) { struct evtchn_send *send = (void *)(mcl->args + 2); BUILD_BUG_ON(sizeof(*send) > sizeof(mcl->args) - 2 * sizeof(*mcl->args)); send->port = port; mcl->op = __HYPERVISOR_event_channel_op; mcl->args[0] = EVTCHNOP_send; mcl->args[1] = (unsigned long)send; } /* Clear an irq's pending state, in preparation for polling on it. */ void xen_clear_irq_pending(int irq); /* Set an irq's pending state, to avoid blocking on it. */ void xen_set_irq_pending(int irq); /* Test an irq's pending state. */ int xen_test_irq_pending(int irq); /* Poll waiting for an irq to become pending. In the usual case, the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq(int irq); /* * Use these to access the event channel underlying the IRQ handle returned * by bind_*_to_irqhandler(). */ void notify_remote_via_irq(int irq); int multi_notify_remote_via_irq(multicall_entry_t *, int irq); int irq_to_evtchn_port(int irq); #define PIRQ_SET_MAPPING 0x0 #define PIRQ_CLEAR_MAPPING 0x1 #define PIRQ_GET_MAPPING 0x3 int pirq_mapstatus(int pirq, int action); int set_pirq_hw_action(int pirq, int (*action)(int pirq, int action)); int clear_pirq_hw_action(int pirq); #define PIRQ_STARTUP 1 #define PIRQ_SHUTDOWN 2 #define PIRQ_ENABLE 3 #define PIRQ_DISABLE 4 #define PIRQ_END 5 #define PIRQ_ACK 6 #if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86) void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu); #endif #endif /* __ASM_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/features.h000066400000000000000000000007561314037446600302250ustar00rootroot00000000000000/****************************************************************************** * features.h * * Query the features reported by Xen. * * Copyright (c) 2006, Ian Campbell */ #ifndef __XEN_FEATURES_H__ #define __XEN_FEATURES_H__ #include #include void xen_setup_features(void); extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32]; static inline int xen_feature(int flag) { return xen_features[flag]; } #endif /* __XEN_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/firmware.h000066400000000000000000000003011314037446600302050ustar00rootroot00000000000000#ifndef __XEN_FIRMWARE_H__ #define __XEN_FIRMWARE_H__ #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) void copy_edd(void); #endif void copy_edid(void); #endif /* __XEN_FIRMWARE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/gnttab.h000066400000000000000000000124361314037446600276640ustar00rootroot00000000000000/****************************************************************************** * gnttab.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include /* maddr_t */ #include #include #include struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; u8 queued; }; int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_ref(grant_ref_t ref); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); int gnttab_query_foreign_access(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags); void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep); void __gnttab_dma_map_page(struct page *page); static inline void __gnttab_dma_unmap_page(struct page *page) { } void gnttab_reset_grant_page(struct page *page); #ifndef CONFIG_XEN int gnttab_resume(void); #endif void *arch_gnttab_alloc_shared(unsigned long *frames); static inline void gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr, uint32_t flags, grant_ref_t ref, domid_t domid) { if (flags & GNTMAP_contains_pte) map->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) map->host_addr = __pa(addr); else map->host_addr = addr; map->flags = flags; map->ref = ref; map->dom = domid; } static inline void gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr, uint32_t flags, grant_handle_t handle) { if (flags & GNTMAP_contains_pte) unmap->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) unmap->host_addr = __pa(addr); else unmap->host_addr = addr; unmap->handle = handle; unmap->dev_bus_addr = 0; } static inline void gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr, maddr_t new_addr, grant_handle_t handle) { if (xen_feature(XENFEAT_auto_translated_physmap)) { unmap->host_addr = __pa(addr); unmap->new_addr = __pa(new_addr); } else { unmap->host_addr = addr; unmap->new_addr = new_addr; } unmap->handle = handle; } #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/grant_table.h000066400000000000000000000104501314037446600306610ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include #include /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ #define NR_GRANT_FRAMES 4 struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; }; int gnttab_suspend(void); int gnttab_resume(void); int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); int gnttab_query_foreign_access(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly); void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, struct grant_entry **__shared); void arch_gnttab_unmap_shared(struct grant_entry *shared, unsigned long nr_gframes); #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/hvc-console.h000066400000000000000000000007011314037446600306150ustar00rootroot00000000000000#ifndef XEN_HVC_CONSOLE_H #define XEN_HVC_CONSOLE_H extern struct console xenboot_console; #ifdef CONFIG_HVC_XEN void xen_console_resume(void); void xen_raw_console_write(const char *str); void xen_raw_printk(const char *fmt, ...); #else static inline void xen_console_resume(void) { } static inline void xen_raw_console_write(const char *str) { } static inline void xen_raw_printk(const char *fmt, ...) { } #endif #endif /* XEN_HVC_CONSOLE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/hvm.h000066400000000000000000000007101314037446600271670ustar00rootroot00000000000000/* Simple wrappers around HVM functions */ #ifndef XEN_HVM_H__ #define XEN_HVM_H__ #include static inline unsigned long hvm_get_parameter(int idx) { struct xen_hvm_param xhv; int r; xhv.domid = DOMID_SELF; xhv.index = idx; r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); if (r < 0) { printk(KERN_ERR "cannot get hvm parameter %d: %d.\n", idx, r); return 0; } return xhv.value; } #endif /* XEN_HVM_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/hypercall.h000066400000000000000000000013751314037446600303700ustar00rootroot00000000000000#ifndef __XEN_HYPERCALL_H__ #define __XEN_HYPERCALL_H__ #include static inline int __must_check HYPERVISOR_multicall_check( multicall_entry_t *call_list, unsigned int nr_calls, const unsigned long *rc_list) { int rc = HYPERVISOR_multicall(call_list, nr_calls); if (unlikely(rc < 0)) return rc; BUG_ON(rc); BUG_ON((int)nr_calls < 0); for ( ; nr_calls > 0; --nr_calls, ++call_list) if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0))) return nr_calls; return 0; } /* A construct to ignore the return value of hypercall wrappers in a few * exceptional cases (simply casting the function result to void doesn't * avoid the compiler warning): */ #define VOID(expr) ((void)((expr)?:0)) #endif /* __XEN_HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/hypervisor_sysfs.h000066400000000000000000000015051314037446600320410ustar00rootroot00000000000000/* * copyright (c) 2006 IBM Corporation * Authored by: Mike D. Day * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _HYP_SYSFS_H_ #define _HYP_SYSFS_H_ #include #include #define HYPERVISOR_ATTR_RO(_name) \ static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) #define HYPERVISOR_ATTR_RW(_name) \ static struct hyp_sysfs_attr _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) struct hyp_sysfs_attr { struct attribute attr; ssize_t (*show)(struct hyp_sysfs_attr *, char *); ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t); void *hyp_attr_data; }; #endif /* _HYP_SYSFS_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/page.h000066400000000000000000000000321314037446600273060ustar00rootroot00000000000000#include UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/pcifront.h000066400000000000000000000026601314037446600302270ustar00rootroot00000000000000/* * PCI Frontend - arch-dependendent declarations * * Author: Ryan Wilson */ #ifndef __XEN_ASM_PCIFRONT_H__ #define __XEN_ASM_PCIFRONT_H__ #include #ifdef __KERNEL__ #ifndef __ia64__ #include struct pcifront_device; struct pci_bus; #define pcifront_sd pci_sysdata static inline struct pcifront_device * pcifront_get_pdev(struct pcifront_sd *sd) { return sd->pdev; } static inline void pcifront_init_sd(struct pcifront_sd *sd, unsigned int domain, unsigned int bus, struct pcifront_device *pdev) { sd->domain = domain; sd->pdev = pdev; } static inline void pcifront_setup_root_resources(struct pci_bus *bus, struct pcifront_sd *sd) { } #else /* __ia64__ */ #include #include #define pcifront_sd pci_controller extern void xen_add_resource(struct pci_controller *, unsigned int, unsigned int, struct acpi_resource *); extern void xen_pcibios_setup_root_windows(struct pci_bus *, struct pci_controller *); static inline struct pcifront_device * pcifront_get_pdev(struct pcifront_sd *sd) { return (struct pcifront_device *)sd->platform_data; } static inline void pcifront_setup_root_resources(struct pci_bus *bus, struct pcifront_sd *sd) { xen_pcibios_setup_root_windows(bus, sd); } #endif /* __ia64__ */ extern struct rw_semaphore pci_bus_sem; #endif /* __KERNEL__ */ #endif /* __XEN_ASM_PCIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/public/000077500000000000000000000000001314037446600275045ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/public/blkif.h000066400000000000000000000305011314037446600307430ustar00rootroot00000000000000/****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_BLKIF_H__ #define __XEN_PUBLIC_IO_BLKIF_H__ #include #include /* * Front->back notifications: When enqueuing a new request, sending a * notification can be made conditional on req_event (i.e., the generic * hold-off mechanism provided by the ring macros). Backends must set * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). * * Back->front notifications: When enqueuing a new response, sending a * notification can be made conditional on rsp_event (i.e., the generic * hold-off mechanism provided by the ring macros). Frontends must set * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). */ #ifndef blkif_vdev_t #define blkif_vdev_t uint16_t #endif #define blkif_sector_t uint64_t /* * REQUEST CODES. */ #define BLKIF_OP_READ 0 #define BLKIF_OP_WRITE 1 /* * Recognised only if "feature-barrier" is present in backend xenbus info. * The "feature-barrier" node contains a boolean indicating whether barrier * requests are likely to succeed or fail. Either way, a barrier request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt barrier requests. * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* * create the "feature-barrier" node! */ #define BLKIF_OP_WRITE_BARRIER 2 /* * Recognised if "feature-flush-cache" is present in backend xenbus * info. A flush will ask the underlying storage hardware to flush its * non-volatile caches as appropriate. The "feature-flush-cache" node * contains a boolean indicating whether flush requests are likely to * succeed or fail. Either way, a flush request may fail at any time * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying * block-device hardware. The boolean simply indicates whether or not it * is worthwhile for the frontend to attempt flushes. If a backend does * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the * "feature-flush-cache" node! */ #define BLKIF_OP_FLUSH_DISKCACHE 3 /* * Device specific command packet contained within the request */ #define BLKIF_OP_PACKET 4 /* * Recognised only if "feature-discard" is present in backend xenbus info. * The "feature-discard" node contains a boolean indicating whether trim * (ATA) or unmap (SCSI) - conviently called discard requests are likely * to succeed or fail. Either way, a discard request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt discard requests. * If a backend does not recognise BLKIF_OP_DISCARD, it should *not* * create the "feature-discard" node! * * Discard operation is a request for the underlying block device to mark * extents to be erased. However, discard does not guarantee that the blocks * will be erased from the device - it is just a hint to the device * controller that these blocks are no longer in use. What the device * controller does with that information is left to the controller. * Discard operations are passed with sector_number as the * sector index to begin discard operations at and nr_sectors as the number of * sectors to be discarded. The specified sectors should be discarded if the * underlying block device supports trim (ATA) or unmap (SCSI) operations, * or a BLKIF_RSP_EOPNOTSUPP should be returned. * More information about trim/unmap operations at: * http://t13.org/Documents/UploadedDocuments/docs2008/ * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc * http://www.seagate.com/staticfiles/support/disc/manuals/ * Interface%20manuals/100293068c.pdf * The backend can optionally provide these extra XenBus attributes to * further optimize the discard functionality: * 'discard-aligment' - Devices that support discard functionality may * internally allocate space in units that are bigger than the exported * logical block size. The discard-alignment parameter indicates how many bytes * the beginning of the partition is offset from the internal allocation unit's * natural alignment. Do not confuse this with natural disk alignment offset. * 'discard-granularity' - Devices that support discard functionality may * internally allocate space using units that are bigger than the logical block * size. The discard-granularity parameter indicates the size of the internal * allocation unit in bytes if reported by the device. Otherwise the * discard-granularity will be set to match the device's physical block size. * It is the minimum size you can discard. * 'discard-secure' - All copies of the discarded sectors (potentially created * by garbage collection) must also be erased. To use this feature, the flag * BLKIF_DISCARD_SECURE must be set in the blkif_request_discard. */ #define BLKIF_OP_DISCARD 5 /* * Recognized if "feature-max-indirect-segments" in present in the backend * xenbus info. The "feature-max-indirect-segments" node contains the maximum * number of segments allowed by the backend per request. If the node is * present, the frontend might use blkif_request_indirect structs in order to * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The * maximum number of indirect segments is fixed by the backend, but the * frontend can issue requests with any number of indirect segments as long as * it's less than the number provided by the backend. The indirect_grefs field * in blkif_request_indirect should be filled by the frontend with the * grant references of the pages that are holding the indirect segments. * This pages are filled with an array of blkif_request_segment_aligned * that hold the information about the segments. The number of indirect * pages to use is determined by the maximum number of segments * a indirect request contains. Every indirect page can contain a maximum * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), * so to calculate the number of indirect pages to use we have to do * ceil(indirect_segments/512). * * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* * create the "feature-max-indirect-segments" node! */ #define BLKIF_OP_INDIRECT 6 /* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 struct blkif_request_segment_aligned { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ } __attribute__((__packed__)); struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; } __attribute__((__packed__)); struct blkif_request_discard { uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ blkif_vdev_t _pad1; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number; uint64_t nr_sectors; uint8_t _pad3; } __attribute__((__packed__)); struct blkif_request_other { uint8_t _pad1; blkif_vdev_t _pad2; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ } __attribute__((__packed__)); struct blkif_request_indirect { uint8_t indirect_op; uint16_t nr_segments; #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ #endif uint64_t id; blkif_sector_t sector_number; blkif_vdev_t handle; uint16_t _pad2; grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; #ifndef CONFIG_X86_32 uint32_t _pad3; /* make it 64 byte aligned */ #else uint64_t _pad3; /* make it 64 byte aligned */ #endif } __attribute__((__packed__)); struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_request_rw rw; struct blkif_request_discard discard; struct blkif_request_other other; struct blkif_request_indirect indirect; } u; } __attribute__((__packed__)); typedef struct blkif_request blkif_request_t; struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_response blkif_response_t; /* * STATUS RETURN CODES. */ /* Operation not supported (only happens on barrier writes). */ #define BLKIF_RSP_EOPNOTSUPP -2 /* Operation failed for some unspecified reason (-EIO). */ #define BLKIF_RSP_ERROR -1 /* Operation completed successfully. */ #define BLKIF_RSP_OKAY 0 /* * Generate blkif ring structures and types. */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 /* Xen-defined major numbers for virtual disks, they look strangely * familiar */ #define XEN_IDE0_MAJOR 3 #define XEN_IDE1_MAJOR 22 #define XEN_SCSI_DISK0_MAJOR 8 #define XEN_SCSI_DISK1_MAJOR 65 #define XEN_SCSI_DISK2_MAJOR 66 #define XEN_SCSI_DISK3_MAJOR 67 #define XEN_SCSI_DISK4_MAJOR 68 #define XEN_SCSI_DISK5_MAJOR 69 #define XEN_SCSI_DISK6_MAJOR 70 #define XEN_SCSI_DISK7_MAJOR 71 #define XEN_SCSI_DISK8_MAJOR 128 #define XEN_SCSI_DISK9_MAJOR 129 #define XEN_SCSI_DISK10_MAJOR 130 #define XEN_SCSI_DISK11_MAJOR 131 #define XEN_SCSI_DISK12_MAJOR 132 #define XEN_SCSI_DISK13_MAJOR 133 #define XEN_SCSI_DISK14_MAJOR 134 #define XEN_SCSI_DISK15_MAJOR 135 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/public/evtchn.h000066400000000000000000000056351314037446600311550ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Interface to /dev/xen/evtchn. * * Copyright (c) 2003-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_EVTCHN_H__ #define __LINUX_PUBLIC_EVTCHN_H__ /* * Bind a fresh port to VIRQ @virq. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_VIRQ \ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq)) struct ioctl_evtchn_bind_virq { unsigned int virq; }; /* * Bind a fresh port to remote <@remote_domain, @remote_port>. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_INTERDOMAIN \ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain)) struct ioctl_evtchn_bind_interdomain { unsigned int remote_domain, remote_port; }; /* * Allocate a fresh port for binding to @remote_domain. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_UNBOUND_PORT \ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port)) struct ioctl_evtchn_bind_unbound_port { unsigned int remote_domain; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_UNBIND \ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind)) struct ioctl_evtchn_unbind { unsigned int port; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_NOTIFY \ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify)) struct ioctl_evtchn_notify { unsigned int port; }; /* Clear and reinitialise the event buffer. Clear error condition. */ #define IOCTL_EVTCHN_RESET \ _IOC(_IOC_NONE, 'E', 5, 0) #endif /* __LINUX_PUBLIC_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/public/gntdev.h000066400000000000000000000107401314037446600311460ustar00rootroot00000000000000/****************************************************************************** * gntdev.h * * Interface to /dev/xen/gntdev. * * Copyright (c) 2007, D G Murray * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_GNTDEV_H__ #define __LINUX_PUBLIC_GNTDEV_H__ struct ioctl_gntdev_grant_ref { /* The domain ID of the grant to be mapped. */ uint32_t domid; /* The grant reference of the grant to be mapped. */ uint32_t ref; }; /* * Inserts the grant references into the mapping table of an instance * of gntdev. N.B. This does not perform the mapping, which is deferred * until mmap() is called with @index as the offset. */ #define IOCTL_GNTDEV_MAP_GRANT_REF \ _IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref)) struct ioctl_gntdev_map_grant_ref { /* IN parameters */ /* The number of grants to be mapped. */ uint32_t count; uint32_t pad; /* OUT parameters */ /* The offset to be used on a subsequent call to mmap(). */ uint64_t index; /* Variable IN parameter. */ /* Array of grant references, of size @count. */ struct ioctl_gntdev_grant_ref refs[1]; }; /* * Removes the grant references from the mapping table of an instance of * of gntdev. N.B. munmap() must be called on the relevant virtual address(es) * before this ioctl is called, or an error will result. */ #define IOCTL_GNTDEV_UNMAP_GRANT_REF \ _IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) struct ioctl_gntdev_unmap_grant_ref { /* IN parameters */ /* The offset was returned by the corresponding map operation. */ uint64_t index; /* The number of pages to be unmapped. */ uint32_t count; uint32_t pad; }; /* * Returns the offset in the driver's address space that corresponds * to @vaddr. This can be used to perform a munmap(), followed by an * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by * the caller. The number of pages that were allocated at the same time as * @vaddr is returned in @count. * * N.B. Where more than one page has been mapped into a contiguous range, the * supplied @vaddr must correspond to the start of the range; otherwise * an error will result. It is only possible to munmap() the entire * contiguously-allocated range at once, and not any subrange thereof. */ #define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \ _IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr)) struct ioctl_gntdev_get_offset_for_vaddr { /* IN parameters */ /* The virtual address of the first mapped page in a range. */ uint64_t vaddr; /* OUT parameters */ /* The offset that was used in the initial mmap() operation. */ uint64_t offset; /* The number of pages mapped in the VM area that begins at @vaddr. */ uint32_t count; uint32_t pad; }; /* * Sets the maximum number of grants that may mapped at once by this gntdev * instance. * * N.B. This must be called before any other ioctl is performed on the device. */ #define IOCTL_GNTDEV_SET_MAX_GRANTS \ _IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants)) struct ioctl_gntdev_set_max_grants { /* IN parameter */ /* The maximum number of grants that may be mapped at once. */ uint32_t count; }; #endif /* __LINUX_PUBLIC_GNTDEV_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/public/privcmd.h000066400000000000000000000052141314037446600313230ustar00rootroot00000000000000/****************************************************************************** * privcmd.h * * Interface to /proc/xen/privcmd. * * Copyright (c) 2003-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_PRIVCMD_H__ #define __LINUX_PUBLIC_PRIVCMD_H__ #include #ifndef __user #define __user #endif typedef struct privcmd_hypercall { __u64 op; __u64 arg[5]; } privcmd_hypercall_t; typedef struct privcmd_mmap_entry { __u64 va; __u64 mfn; __u64 npages; } privcmd_mmap_entry_t; typedef struct privcmd_mmap { int num; domid_t dom; /* target domain */ privcmd_mmap_entry_t __user *entry; } privcmd_mmap_t; typedef struct privcmd_mmapbatch { int num; /* number of pages to populate */ domid_t dom; /* target domain */ __u64 addr; /* virtual address */ xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */ } privcmd_mmapbatch_t; /* * @cmd: IOCTL_PRIVCMD_HYPERCALL * @arg: &privcmd_hypercall_t * Return: Value returned from execution of the specified hypercall. */ #define IOCTL_PRIVCMD_HYPERCALL \ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t)) #define IOCTL_PRIVCMD_MMAP \ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t)) #define IOCTL_PRIVCMD_MMAPBATCH \ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t)) #endif /* __LINUX_PUBLIC_PRIVCMD_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/public/vscsiif.h000066400000000000000000000135041314037446600313260ustar00rootroot00000000000000/****************************************************************************** * vscsiif.h * * Based on the blkif.h code. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright(c) FUJITSU Limited 2008. */ #ifndef __XEN__PUBLIC_IO_SCSI_H__ #define __XEN__PUBLIC_IO_SCSI_H__ #include #include /* commands between backend and frontend */ #define VSCSIIF_ACT_SCSI_CDB 1 /* SCSI CDB command */ #define VSCSIIF_ACT_SCSI_ABORT 2 /* SCSI Device(Lun) Abort*/ #define VSCSIIF_ACT_SCSI_RESET 3 /* SCSI Device(Lun) Reset*/ #define VSCSIIF_ACT_SCSI_SG_PRESET 4 /* Preset SG elements */ /* indirect: indirect requestڴΣֵҲbackend */ #define MAX_VSCSI_INDIRECT_SEGMENTS 128U /* indirect: ҳܱ512 */ #define SEGS_PER_VSCSI_INDIRECT_FRAME \ (PAGE_SIZE/sizeof(struct scsiif_request_segment_aligned)) /* indirect: ĿǰSCSIIF_MAX_SEGMENTS_PER_REQUEST 8 ʹindirect * ÿСʹSCSIIF_MAX_SEGMENTS_PER_REQUEST(8)ҳ * Ŀǰֻʹõ1ҳ(256 * 4K)Ѿ㹻 */ #define MAX_VSCSI_INDIRECT_PAGES \ ((MAX_VSCSI_INDIRECT_SEGMENTS + SEGS_PER_VSCSI_INDIRECT_FRAME - 1)/SEGS_PER_VSCSI_INDIRECT_FRAME) /* indirect: 㵱ǰindirect pageĿǰֻʹ 1 */ #define VSCSI_INDIRECT_PAGES(_segs) \ ((_segs + SEGS_PER_VSCSI_INDIRECT_FRAME - 1)/SEGS_PER_VSCSI_INDIRECT_FRAME) /* * Maximum scatter/gather segments per request. * * Considering balance between allocating at least 16 "vscsiif_request" * structures on one page (4096 bytes) and the number of scatter/gather * elements needed, we decided to use 26 as a magic number. */ #define VSCSIIF_SG_TABLESIZE 26 /* indirect: ÿindirect requestֻʹõindirect page */ #define SCSIIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 /* * based on Linux kernel 2.6.18 */ #define VSCSIIF_MAX_COMMAND_SIZE 16 #define VSCSIIF_SENSE_BUFFERSIZE 96 /* indirect: indirect pageбĽṹ壬ҳ */ struct scsiif_request_segment_aligned { grant_ref_t gref; uint16_t offset; uint16_t length; } __attribute__((__packed__)); struct scsiif_request_segment { grant_ref_t gref; uint16_t offset; uint16_t length; }; typedef struct scsiif_request_segment vscsiif_segment_t; /* */ struct vscsiif_request_rw { uint8_t nr_segments; /* Number of pieces of scatter-gather */ vscsiif_segment_t seg[VSCSIIF_SG_TABLESIZE]; uint32_t reserved[3]; } __attribute__((__packed__)); /* indirect: indirect */ struct vscsiif_request_indirect { uint8_t indirect_op; grant_ref_t indirect_grefs[SCSIIF_MAX_INDIRECT_PAGES_PER_REQUEST]; uint16_t nr_segments; uint16_t _pad1; uint32_t reserved[3]; } __attribute__((__packed__)); /* е */ struct vscsiif_request { uint16_t rqid; /* private guest value, echoed in resp */ uint8_t act; /* command between backend and frontend */ uint8_t cmd_len; uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; uint16_t timeout_per_command; /* The command is issued by twice the value in Backend. */ uint16_t channel, id, lun; uint16_t padding; uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1) DMA_FROM_DEVICE(2) DMA_NONE(3) requests */ union { struct vscsiif_request_rw rw; struct vscsiif_request_indirect indirect; } u; } __attribute__((__packed__)); typedef struct vscsiif_request vscsiif_request_t; #define VSCSIIF_SG_LIST_SIZE ((sizeof(vscsiif_request_t) - 4) \ / sizeof(vscsiif_segment_t)) struct vscsiif_sg_list { /* First two fields must match struct vscsiif_request! */ uint16_t rqid; /* private guest value, must match main req */ uint8_t act; /* VSCSIIF_ACT_SCSI_SG_PRESET */ uint8_t nr_segments; /* Number of pieces of scatter-gather */ vscsiif_segment_t seg[VSCSIIF_SG_LIST_SIZE]; }; typedef struct vscsiif_sg_list vscsiif_sg_list_t; struct vscsiif_response { uint16_t rqid; uint8_t act; /* valid only when backend supports SG_PRESET */ uint8_t sense_len; uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE]; int32_t rslt; uint32_t residual_len; /* request bufflen - return the value from physical device */ uint32_t reserved[36]; }; typedef struct vscsiif_response vscsiif_response_t; DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response); #endif /*__XEN__PUBLIC_IO_SCSI_H__*/ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/sysctl.h000066400000000000000000000002551314037446600277220ustar00rootroot00000000000000#ifndef _XEN_SYSCTL_H #define _XEN_SYSCTL_H /* CTL_XEN names: */ enum { CTL_XEN_INDEPENDENT_WALLCLOCK=1, CTL_XEN_PERMITTED_CLOCK_JITTER=2, }; #endif /* _XEN_SYSCTL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/xen-ops.h000066400000000000000000000005351314037446600277730ustar00rootroot00000000000000#ifndef INCLUDE_XEN_OPS_H #define INCLUDE_XEN_OPS_H #include DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); void xen_pre_suspend(void); void xen_post_suspend(int suspend_cancelled); void xen_mm_pin_all(void); void xen_mm_unpin_all(void); void xen_timer_resume(void); void xen_arch_resume(void); #endif /* INCLUDE_XEN_OPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/xen_proc.h000066400000000000000000000004021314037446600302100ustar00rootroot00000000000000 #ifndef __ASM_XEN_PROC_H__ #define __ASM_XEN_PROC_H__ #include extern struct proc_dir_entry *create_xen_proc_entry( const char *name, mode_t mode); extern void remove_xen_proc_entry( const char *name); #endif /* __ASM_XEN_PROC_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/xenbus.h000066400000000000000000000273521314037446600277140ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XEN_XENBUS_H #define _XEN_XENBUS_H #include #include #include #include #include #include #include #include #include #include /* Register callback to watch this node. */ struct xenbus_watch { struct list_head list; /* Path being watched. */ const char *node; /* Callback (executed in a process context with no locks held). */ void (*callback)(struct xenbus_watch *, const char **vec, unsigned int len); #if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /* See XBWF_ definitions below. */ unsigned long flags; #endif }; #if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /* * Execute callback in its own kthread. Useful if the callback is long * running or heavily serialised, to avoid taking out the main xenwatch thread * for a long period of time (or even unwittingly causing a deadlock). */ #define XBWF_new_thread 1 #endif /* A xenbus device. */ struct xenbus_device { const char *devicetype; const char *nodename; const char *otherend; int otherend_id; struct xenbus_watch otherend_watch; struct device dev; enum xenbus_state state; struct completion down; }; static inline struct xenbus_device *to_xenbus_device(struct device *dev) { return container_of(dev, struct xenbus_device, dev); } struct xenbus_device_id { /* .../device// */ char devicetype[32]; /* General class of device. */ }; /* A xenbus driver. */ struct xenbus_driver { const char *name; const struct xenbus_device_id *ids; int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); void (*otherend_changed)(struct xenbus_device *dev, enum xenbus_state backend_state); int (*remove)(struct xenbus_device *dev); int (*suspend)(struct xenbus_device *dev); int (*suspend_cancel)(struct xenbus_device *dev); int (*resume)(struct xenbus_device *dev); int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *); struct device_driver driver; int (*read_otherend_details)(struct xenbus_device *dev); int (*is_ready)(struct xenbus_device *dev); }; static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) { return container_of(drv, struct xenbus_driver, driver); } int __must_check __xenbus_register_frontend(struct xenbus_driver *drv, struct module *owner, const char *mod_name); static inline int __must_check xenbus_register_frontend(struct xenbus_driver *drv) { return __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME); } int __must_check __xenbus_register_backend(struct xenbus_driver *drv, struct module *owner, const char *mod_name); static inline int __must_check xenbus_register_backend(struct xenbus_driver *drv) { return __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME); } void xenbus_unregister_driver(struct xenbus_driver *drv); struct xenbus_transaction { u32 id; }; /* Nil transaction ID. */ #define XBT_NIL ((struct xenbus_transaction) { 0 }) char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num); void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len); int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string); int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_transaction_start(struct xenbus_transaction *t); int xenbus_transaction_end(struct xenbus_transaction t, int abort); /* Single read and scanf: returns -errno or num scanned if > 0. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(scanf, 4, 5))); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(printf, 4, 5))); /* Generic read function: NULL-terminated triples of name, * sprintf-style type string, and pointer. Returns 0 or errno.*/ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...); /* notifer routines for when the xenstore comes up */ int register_xenstore_notifier(struct notifier_block *nb); void unregister_xenstore_notifier(struct notifier_block *nb); int register_xenbus_watch(struct xenbus_watch *watch); void unregister_xenbus_watch(struct xenbus_watch *watch); void xs_suspend(void); void xs_resume(void); void xs_suspend_cancel(void); /* Used by xenbus_dev to borrow kernel's store connection. */ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); /* Prepare for domain suspend: then resume or cancel the suspend. */ void xenbus_suspend(void); void xenbus_resume(void); void xenbus_suspend_cancel(void); #define XENBUS_IS_ERR_READ(str) ({ \ if (!IS_ERR(str) && strlen(str) == 0) { \ kfree(str); \ str = ERR_PTR(-ERANGE); \ } \ IS_ERR(str); \ }) #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE) /** * Register a watch on the given path, using the given xenbus_watch structure * for storage, and the given callback function as the callback. Return 0 on * success, or -errno on error. On success, the given path will be saved as * watch->node, and remains the caller's to free. On error, watch->node will * be NULL, the device will switch to XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); #if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /** * Register a watch on the given path/path2, using the given xenbus_watch * structure for storage, and the given callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (path/path2) will be saved as watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); #else int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...) __attribute__ ((format (printf, 4, 5))); #endif /** * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); /** * Grant access to the given ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn); /** * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_valloc allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * xenbus_map_ring does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr); /** * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_vfree if you mapped in your memory with * xenbus_map_ring_valloc (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port); /** * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path); /*** * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...); /*** * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...); int xenbus_dev_init(void); const char *xenbus_strstate(enum xenbus_state state); int xenbus_dev_is_online(struct xenbus_device *dev); int xenbus_frontend_closed(struct xenbus_device *dev); int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)); int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)); #endif /* _XEN_XENBUS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/xencomm.h000066400000000000000000000045171314037446600300540ustar00rootroot00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Copyright (C) IBM Corp. 2006 * * Authors: Hollis Blanchard * Jerone Young */ #ifndef _LINUX_XENCOMM_H_ #define _LINUX_XENCOMM_H_ #include #define XENCOMM_MINI_ADDRS 3 struct xencomm_mini { struct xencomm_desc _desc; uint64_t address[XENCOMM_MINI_ADDRS]; }; /* To avoid additionnal virt to phys conversion, an opaque structure is presented. */ struct xencomm_handle; extern void xencomm_free(struct xencomm_handle *desc); extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes); extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, struct xencomm_mini *xc_area); /* * gcc bug workaround: * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660 * gcc doesn't handle properly stack variable with * __attribute__((__align__(sizeof(struct xencomm_mini)))) */ #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ unsigned char xc_desc ## _base[((n) + 1 ) * \ sizeof(struct xencomm_mini)]; \ struct xencomm_mini *xc_desc = (struct xencomm_mini *) \ ((unsigned long)xc_desc ## _base + \ (sizeof(struct xencomm_mini) - \ ((unsigned long)xc_desc ## _base) % \ sizeof(struct xencomm_mini))); #define xencomm_map_no_alloc(ptr, bytes) \ ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \ __xencomm_map_no_alloc(ptr, bytes, xc_desc); }) /* provided by architecture code: */ extern unsigned long xencomm_vtop(unsigned long vaddr); static inline void *xencomm_pa(void *ptr) { return (void *)xencomm_vtop((unsigned long)ptr); } #define xen_guest_handle(hnd) ((hnd).p) #endif /* _LINUX_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/xencons.h000066400000000000000000000007141314037446600300560ustar00rootroot00000000000000#ifndef __ASM_XENCONS_H__ #define __ASM_XENCONS_H__ struct dom0_vga_console_info; void dom0_init_screen_info(const struct dom0_vga_console_info *, size_t); void xencons_force_flush(void); void xencons_resume(void); /* Interrupt work hooks. Receive data, or kick data out. */ void xencons_rx(char *buf, unsigned len); void xencons_tx(void); int xencons_ring_init(void); int xencons_ring_send(const char *data, unsigned len); #endif /* __ASM_XENCONS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/include/xen/xenoprof.h000066400000000000000000000025651314037446600302470ustar00rootroot00000000000000/****************************************************************************** * xen/xenoprof.h * * Copyright (c) 2006 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_XENOPROF_H__ #define __XEN_XENOPROF_H__ #ifdef CONFIG_XEN #include struct oprofile_operations; int xenoprofile_init(struct oprofile_operations * ops); void xenoprofile_exit(void); struct xenoprof_shared_buffer { char *buffer; struct xenoprof_arch_shared_buffer arch; }; #else #define xenoprofile_init(ops) (-ENOSYS) #define xenoprofile_exit() do { } while (0) #endif /* CONFIG_XEN */ #endif /* __XEN_XENOPROF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/version.ini000066400000000000000000000000631314037446600261760ustar00rootroot00000000000000KernModeVersion=2.2.0.209 UserModeVersion=2.2.0.202UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/000077500000000000000000000000001314037446600260155ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/2.6.32/000077500000000000000000000000001314037446600265455ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/2.6.32/config.c000066400000000000000000000004541314037446600301610ustar00rootroot00000000000000/* Configuration space parsing helpers for virtio. * * The configuration is [type][len][... len bytes ...] fields. * * Copyright 2007 Rusty Russell, IBM Corporation. * GPL v2 or later. */ #include #include #include #include UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/2.6.32/virtio.c000066400000000000000000000156031314037446600302320ustar00rootroot00000000000000#include #include #include #include #include #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) /* Unique numbering for virtio devices. */ static DEFINE_IDA(virtio_index_ida); #endif static ssize_t device_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.device); } static ssize_t vendor_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.vendor); } static ssize_t status_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); } static ssize_t modalias_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "virtio:d%08Xv%08X\n", dev->id.device, dev->id.vendor); } static ssize_t features_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); unsigned int i; ssize_t len = 0; /* We actually represent this as a bitstring, as it could be * arbitrary length in future. */ for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++) len += sprintf(buf+len, "%c", test_bit(i, dev->features) ? '1' : '0'); len += sprintf(buf+len, "\n"); return len; } static struct device_attribute virtio_dev_attrs[] = { __ATTR_RO(device), __ATTR_RO(vendor), __ATTR_RO(status), __ATTR_RO(modalias), __ATTR_RO(features), __ATTR_NULL }; static inline int virtio_id_match(const struct virtio_device *dev, const struct virtio_device_id *id) { if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID) return 0; return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; } /* This looks through all the IDs a driver claims to support. If any of them * match, we return 1 and the kernel will call virtio_dev_probe(). */ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) { unsigned int i; struct virtio_device *dev = dev_to_virtio(_dv); const struct virtio_device_id *ids; ids = drv_to_virtio(_dr)->id_table; for (i = 0; ids[i].device; i++) if (virtio_id_match(dev, &ids[i])) return 1; return 0; } static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) { struct virtio_device *dev = dev_to_virtio(_dv); return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", dev->id.device, dev->id.vendor); } static void add_status(struct virtio_device *dev, unsigned status) { dev->config->set_status(dev, dev->config->get_status(dev) | status); } void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit) { unsigned int i; struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); for (i = 0; i < drv->feature_table_size; i++) if (drv->feature_table[i] == fbit) return; BUG(); } EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); static int virtio_dev_probe(struct device *_d) { int err, i; struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); u32 device_features; /* We have a driver! */ add_status(dev, VIRTIO_CONFIG_S_DRIVER); /* Figure out what features the device supports. */ device_features = dev->config->get_features(dev); /* Features supported by both device and driver into dev->features. */ memset(dev->features, 0, sizeof(dev->features)); for (i = 0; i < drv->feature_table_size; i++) { unsigned int f = drv->feature_table[i]; BUG_ON(f >= 32); if (device_features & (1 << f)) set_bit(f, dev->features); } /* Transport features always preserved to pass to finalize_features. */ for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) if (device_features & (1 << i)) set_bit(i, dev->features); dev->config->finalize_features(dev); err = drv->probe(dev); if (err) add_status(dev, VIRTIO_CONFIG_S_FAILED); else { add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); if (drv->scan) drv->scan(dev); } return err; } static int virtio_dev_remove(struct device *_d) { struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); drv->remove(dev); /* Driver should have reset device. */ WARN_ON_ONCE(dev->config->get_status(dev)); /* Acknowledge the device's existence again. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); return 0; } static struct bus_type virtio_bus = { .name = "virtio", .match = virtio_dev_match, .dev_attrs = virtio_dev_attrs, .uevent = virtio_uevent, .probe = virtio_dev_probe, .remove = virtio_dev_remove, }; int register_virtio_driver(struct virtio_driver *driver) { /* Catch this early. */ BUG_ON(driver->feature_table_size && !driver->feature_table); driver->driver.bus = &virtio_bus; return driver_register(&driver->driver); } EXPORT_SYMBOL_GPL(register_virtio_driver); void unregister_virtio_driver(struct virtio_driver *driver) { driver_unregister(&driver->driver); } EXPORT_SYMBOL_GPL(unregister_virtio_driver); #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,14) /* Unique numbering for virtio devices. */ static unsigned int dev_index; #endif int register_virtio_device(struct virtio_device *dev) { int err; dev->dev.bus = &virtio_bus; /* Assign a unique device index and hence name. */ #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL); if (err < 0) goto out; dev->index = err; #else dev->index = dev_index++; #endif dev_set_name(&dev->dev, "virtio%u", dev->index); /* We always start by resetting the device, in case a previous * driver messed it up. This also tests that code path a little. */ dev->config->reset(dev); /* Acknowledge that we've seen the device. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); INIT_LIST_HEAD(&dev->vqs); /* device_register() causes the bus infrastructure to look for a * matching driver. */ err = device_register(&dev->dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) out: #endif if (err) add_status(dev, VIRTIO_CONFIG_S_FAILED); return err; } EXPORT_SYMBOL_GPL(register_virtio_device); void unregister_virtio_device(struct virtio_device *dev) { #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) int index = dev->index; /* save for after device release */ #endif device_unregister(&dev->dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) ida_simple_remove(&virtio_index_ida, index); #endif } EXPORT_SYMBOL_GPL(unregister_virtio_device); static int virtio_init(void) { if (bus_register(&virtio_bus) != 0) panic("virtio bus registration failed"); return 0; } static void __exit virtio_exit(void) { bus_unregister(&virtio_bus); } core_initcall(virtio_init); module_exit(virtio_exit); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/2.6.32/virtio_net.c000066400000000000000000001447701314037446600311100ustar00rootroot00000000000000/* A network driver using virtio. * * Copyright 2007 Rusty Russell IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ //#define DEBUG #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* device supports hardware address * change when it's running */ static unsigned int iff_live_addr_change = 1; static int napi_weight = 64; module_param(napi_weight, int, 0444); static bool csum = true, gso = true; module_param(csum, bool, 0444); module_param(gso, bool, 0444); /* FIXME: MTU in config. */ #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 #ifndef VIRTIO_F_ANY_LAYOUT #define VIRTIO_F_ANY_LAYOUT 27 #endif #define VIRTNET_DRIVER_VERSION "1.0.0" struct virtnet_stats { struct u64_stats_sync tx_syncp; struct u64_stats_sync rx_syncp; u64 tx_bytes; u64 tx_packets; u64 rx_bytes; u64 rx_packets; }; /* Internal representation of a send virtqueue */ struct send_queue { /* Virtqueue associated with this send _queue */ struct virtqueue *vq; /* TX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Name of the send queue: output.$index */ char name[40]; }; /* Internal representation of a receive virtqueue */ struct receive_queue { /* Virtqueue associated with this receive_queue */ struct virtqueue *vq; struct napi_struct napi; /* Number of input buffers, and max we've ever had. */ unsigned int num, max; /* Chain pages by the private ptr. */ struct page *pages; /* RX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Name of this receive queue: input.$index */ char name[40]; }; struct virtnet_info { struct virtio_device *vdev; struct virtqueue *cvq; struct net_device *dev; struct send_queue *sq; struct receive_queue *rq; unsigned int status; /* Max # of queue pairs supported by the device */ u16 max_queue_pairs; /* # of queue pairs currently used by the driver */ u16 curr_queue_pairs; /* I like... big packets and I cannot lie! */ bool big_packets; /* Host will merge rx buffers for big packets (shake it! shake it!) */ bool mergeable_rx_bufs; /* Has control virtqueue */ bool has_cvq; /* Host can handle any s/g split between our header and packet data */ bool any_header_sg; /* enable config space updates */ bool config_enable; /* Active statistics */ struct virtnet_stats __percpu *stats; /* Work struct for refilling if we run low on memory. */ struct delayed_work refill; /* Work struct for config space updates */ struct work_struct config_work; /* Lock for config space updates */ struct mutex config_lock; /* Does the affinity hint is set for virtqueues? */ bool affinity_hint_set; /* Per-cpu variable to show the mapping from CPU to virtqueue */ int __percpu *vq_index; /* CPU hot plug notifier */ struct notifier_block nb; }; struct skb_vnet_hdr { union { struct virtio_net_hdr hdr; struct virtio_net_hdr_mrg_rxbuf mhdr; }; }; struct padded_vnet_hdr { struct virtio_net_hdr hdr; /* * virtio_net_hdr should be in a separated sg buffer because of a * QEMU bug, and data sg buffer shares same page with this header sg. * This padding makes next sg 16 byte aligned after virtio_net_hdr. */ char padding[6]; }; /* Converting between virtqueue no. and kernel tx/rx queue no. * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq */ static int vq2txq(struct virtqueue *vq) { return (vq->index - 1) / 2; } static int txq2vq(int txq) { return txq * 2 + 1; } static int vq2rxq(struct virtqueue *vq) { return vq->index / 2; } static int rxq2vq(int rxq) { return rxq * 2; } static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) { return (struct skb_vnet_hdr *)skb->cb; } /* * private is used to chain pages for big packets, put the whole * most recent used list in the beginning for reuse */ static void give_pages(struct receive_queue *rq, struct page *page) { struct page *end; /* Find end of list, sew whole thing into vi->rq.pages. */ for (end = page; end->private; end = (struct page *)end->private); end->private = (unsigned long)rq->pages; rq->pages = page; } static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) { struct page *p = rq->pages; if (p) { rq->pages = (struct page *)p->private; /* clear private here, it is used to chain pages */ p->private = 0; } else p = alloc_page(gfp_mask); return p; } static void skb_xmit_done(struct virtqueue *vq) { struct virtnet_info *vi = vq->vdev->priv; /* Suppress further interrupts. */ virtqueue_disable_cb(vq); /* We were probably waiting for more output buffers. */ netif_wake_subqueue(vi->dev, vq2txq(vq)); } static void set_skb_frag(struct sk_buff *skb, struct page *page, unsigned int offset, unsigned int *len) { int size = min((unsigned)PAGE_SIZE - offset, *len); int i = skb_shinfo(skb)->nr_frags; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,13)) skb_fill_page_desc(skb, i, page, offset, size); #else __skb_fill_page_desc(skb, i, page, offset, size); skb_shinfo(skb)->nr_frags++; #endif skb->data_len += size; skb->len += size; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,17)) skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; #endif *len -= size; } /* Called from bottom half context */ static struct sk_buff *page_to_skb(struct receive_queue *rq, struct page *page, unsigned int len) { struct virtnet_info *vi = rq->vq->vdev->priv; struct sk_buff *skb; struct skb_vnet_hdr *hdr; unsigned int copy, hdr_len, offset; char *p; p = page_address(page); /* copy small packet so we can reuse these pages for small data */ skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); if (unlikely(!skb)) return NULL; hdr = skb_vnet_hdr(skb); if (vi->mergeable_rx_bufs) { hdr_len = sizeof hdr->mhdr; offset = hdr_len; } else { hdr_len = sizeof hdr->hdr; offset = sizeof(struct padded_vnet_hdr); } memcpy(hdr, p, hdr_len); len -= hdr_len; p += offset; copy = len; if (copy > skb_tailroom(skb)) copy = skb_tailroom(skb); memcpy(skb_put(skb, copy), p, copy); len -= copy; offset += copy; /* * Verify that we can indeed put this data into a skb. * This is here to handle cases when the device erroneously * tries to receive more than is possible. This is usually * the case of a broken device. */ if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { net_dbg_ratelimited("%s: too much data\n", skb->dev->name); dev_kfree_skb(skb); return NULL; } while (len) { set_skb_frag(skb, page, offset, &len); page = (struct page *)page->private; offset = 0; } if (page) give_pages(rq, page); return skb; } static struct sk_buff *receive_small(void *buf, unsigned int len) { struct sk_buff * skb = buf; len -= sizeof(struct virtio_net_hdr); skb_trim(skb, len); return skb; } static struct sk_buff *receive_big(struct net_device *dev, struct receive_queue *rq, void *buf) { struct page *page = buf; struct sk_buff *skb = page_to_skb(rq, page, 0); if (unlikely(!skb)) goto err; return skb; err: dev->stats.rx_dropped++; give_pages(rq, page); return NULL; } static struct sk_buff *receive_mergeable(struct net_device *dev, struct receive_queue *rq, void *buf, unsigned int len) { struct skb_vnet_hdr *hdr = page_address(buf); int num_buf = hdr->mhdr.num_buffers; struct page *page = buf; struct sk_buff *skb = page_to_skb(rq, page, len); int i; if (unlikely(!skb)) goto err_skb; while (--num_buf) { i = skb_shinfo(skb)->nr_frags; if (i >= MAX_SKB_FRAGS) { pr_debug("%s: packet too long\n", skb->dev->name); skb->dev->stats.rx_length_errors++; goto err_frags; } page = virtqueue_get_buf(rq->vq, &len); if (!page) { pr_debug("%s: rx error: %d buffers %d missing\n", dev->name, hdr->mhdr.num_buffers, num_buf); dev->stats.rx_length_errors++; goto err_buf; } if (len > PAGE_SIZE) len = PAGE_SIZE; set_skb_frag(skb, page, 0, &len); --rq->num; } return skb; err_skb: give_pages(rq, page); while (--num_buf) { err_frags: buf = virtqueue_get_buf(rq->vq, &len); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers missing\n", dev->name, num_buf); dev->stats.rx_length_errors++; break; } page = buf; give_pages(rq, page); --rq->num; } err_buf: dev->stats.rx_dropped++; dev_kfree_skb(skb); return NULL; } static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) { struct virtnet_info *vi = rq->vq->vdev->priv; struct net_device *dev = vi->dev; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); struct sk_buff *skb; struct skb_vnet_hdr *hdr; if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(rq, buf); else dev_kfree_skb(buf); return; } if (vi->mergeable_rx_bufs) skb = receive_mergeable(dev, rq, buf, len); else if (vi->big_packets) skb = receive_big(dev, rq, buf); else skb = receive_small(buf, len); if (unlikely(!skb)) return; hdr = skb_vnet_hdr(skb); skb->truesize += skb->data_len; u64_stats_update_begin(&stats->rx_syncp); stats->rx_bytes += skb->len; stats->rx_packets++; u64_stats_update_end(&stats->rx_syncp); if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { pr_debug("Needs csum!\n"); if (!skb_partial_csum_set(skb, hdr->hdr.csum_start, hdr->hdr.csum_offset)) goto frame_err; } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { skb->ip_summed = CHECKSUM_UNNECESSARY; } skb->protocol = eth_type_trans(skb, dev); pr_debug("Receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type); if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_UDP: skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; case VIRTIO_NET_HDR_GSO_TCPV6: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; default: net_warn_ratelimited("%s: bad gso type %u.\n", dev->name, hdr->hdr.gso_type); goto frame_err; } if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; if (skb_shinfo(skb)->gso_size == 0) { net_warn_ratelimited("%s: zero gso size.\n", dev->name); goto frame_err; } /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; } netif_receive_skb(skb); return; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); } static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) { struct virtnet_info *vi = rq->vq->vdev->priv; struct sk_buff *skb; struct skb_vnet_hdr *hdr; int err; #if (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13)) skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); #else skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); #endif if (unlikely(!skb)) return -ENOMEM; skb_put(skb, MAX_PACKET_LEN); hdr = skb_vnet_hdr(skb); sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); if (err < 0) dev_kfree_skb(skb); return err; } static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) { struct page *first, *list = NULL; char *p; int i, err, offset; /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { first = get_a_page(rq, gfp); if (!first) { if (list) give_pages(rq, list); return -ENOMEM; } sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); /* chain new page in list head to match sg */ first->private = (unsigned long)list; list = first; } first = get_a_page(rq, gfp); if (!first) { give_pages(rq, list); return -ENOMEM; } p = page_address(first); /* rq->sg[0], rq->sg[1] share the same page */ /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); /* rq->sg[1] for data packet, from offset */ offset = sizeof(struct padded_vnet_hdr); sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); /* chain first in list head */ first->private = (unsigned long)list; err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, first, gfp); if (err < 0) give_pages(rq, first); return err; } static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) { struct page *page; int err; page = get_a_page(rq, gfp); if (!page) return -ENOMEM; sg_init_one(rq->sg, page_address(page), PAGE_SIZE); err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp); if (err < 0) give_pages(rq, page); return err; } /* * Returns false if we couldn't fill entirely (OOM). * * Normally run in the receive path, but can also be run from ndo_open * before we're receiving packets, or from refill_work which is * careful to disable receiving (using napi_disable). */ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) { struct virtnet_info *vi = rq->vq->vdev->priv; int err; bool oom; do { if (vi->mergeable_rx_bufs) err = add_recvbuf_mergeable(rq, gfp); else if (vi->big_packets) err = add_recvbuf_big(rq, gfp); else err = add_recvbuf_small(rq, gfp); oom = err == -ENOMEM; if (err) break; ++rq->num; } while (rq->vq->num_free); if (unlikely(rq->num > rq->max)) rq->max = rq->num; virtqueue_kick(rq->vq); return !oom; } static void skb_recv_done(struct virtqueue *rvq) { struct virtnet_info *vi = rvq->vdev->priv; struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; /* Schedule NAPI, Suppress further interrupts if successful. */ if (napi_schedule_prep(&rq->napi)) { virtqueue_disable_cb(rvq); __napi_schedule(&rq->napi); } } static void virtnet_napi_enable(struct receive_queue *rq) { napi_enable(&rq->napi); /* If all buffers were filled by other side before we napi_enabled, we * won't get another interrupt, so process any outstanding packets * now. virtnet_poll wants re-enable the queue, so we disable here. * We synchronize against interrupts via NAPI_STATE_SCHED */ if (napi_schedule_prep(&rq->napi)) { virtqueue_disable_cb(rq->vq); local_bh_disable(); __napi_schedule(&rq->napi); local_bh_enable(); } } static void refill_work(struct work_struct *work) { struct virtnet_info *vi = container_of(work, struct virtnet_info, refill.work); bool still_empty; int i; for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; napi_disable(&rq->napi); still_empty = !try_fill_recv(rq, GFP_KERNEL); virtnet_napi_enable(rq); /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. */ if (still_empty) schedule_delayed_work(&vi->refill, HZ/2); } } static int virtnet_poll(struct napi_struct *napi, int budget) { struct receive_queue *rq = container_of(napi, struct receive_queue, napi); struct virtnet_info *vi = rq->vq->vdev->priv; void *buf; unsigned int r, len, received = 0; again: while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { receive_buf(rq, buf, len); --rq->num; received++; } if (rq->num < rq->max / 2) { if (!try_fill_recv(rq, GFP_ATOMIC)) schedule_delayed_work(&vi->refill, 0); } /* Out of packets? */ if (received < budget) { r = virtqueue_enable_cb_prepare(rq->vq); napi_complete(napi); if (unlikely(virtqueue_poll(rq->vq, r)) && napi_schedule_prep(napi)) { virtqueue_disable_cb(rq->vq); __napi_schedule(napi); goto again; } } return received; } static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; for (i = 0; i < vi->max_queue_pairs; i++) { if (i < vi->curr_queue_pairs) /* Make sure we have some buffers: if oom use wq. */ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); virtnet_napi_enable(&vi->rq[i]); } return 0; } static void free_old_xmit_skbs(struct send_queue *sq) { struct sk_buff *skb; unsigned int len; struct virtnet_info *vi = sq->vq->vdev->priv; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { pr_debug("Sent skb %p\n", skb); u64_stats_update_begin(&stats->tx_syncp); stats->tx_bytes += skb->len; stats->tx_packets++; u64_stats_update_end(&stats->tx_syncp); dev_kfree_skb_any(skb); } } static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) { struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; struct virtnet_info *vi = sq->vq->vdev->priv; unsigned num_sg; unsigned hdr_len; bool can_push; pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); if (vi->mergeable_rx_bufs) hdr_len = sizeof hdr->mhdr; else hdr_len = sizeof hdr->hdr; can_push = vi->any_header_sg && !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; /* Even if we can, don't push here yet as this would skew * csum_start offset below. */ if (can_push) hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len); else hdr = skb_vnet_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->hdr.csum_start = skb_checksum_start_offset(skb); hdr->hdr.csum_offset = skb->csum_offset; } else { hdr->hdr.flags = 0; hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; } if (skb_is_gso(skb)) { hdr->hdr.hdr_len = skb_headlen(skb); hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; } else { hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; } if (vi->mergeable_rx_bufs) hdr->mhdr.num_buffers = 0; if (can_push) { __skb_push(skb, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); /* Pull header back to avoid skew in tx bytes calculations. */ __skb_pull(skb, hdr_len); } else { sg_set_buf(sq->sg, hdr, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; } return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); } static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int qnum = skb_get_queue_mapping(skb); struct send_queue *sq = &vi->sq[qnum]; int err; /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(sq); /* Try to transmit */ err = xmit_skb(sq, skb); /* This should not happen! */ if (unlikely(err)) { dev->stats.tx_fifo_errors++; if (net_ratelimit()) dev_warn(&dev->dev, "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } virtqueue_kick(sq->vq); /* Don't wait up for transmitted skbs to be freed. */ skb_orphan(skb); nf_reset(skb); /* Apparently nice girls don't return TX_BUSY; stop the queue * before it gets out of hand. Naturally, this wastes entries. */ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { netif_stop_subqueue(dev, qnum); if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { /* More just got used, free them then recheck. */ free_old_xmit_skbs(sq); if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { netif_start_subqueue(dev, qnum); virtqueue_disable_cb(sq->vq); } } } return NETDEV_TX_OK; } /* * Send command via the control virtqueue and check status. Commands * supported by the hypervisor, as indicated by feature bits, should * never fail unless improperly formated. */ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, struct scatterlist *out, struct scatterlist *in) { struct scatterlist *sgs[4], hdr, stat; struct virtio_net_ctrl_hdr ctrl; virtio_net_ctrl_ack status = ~0; unsigned out_num = 0, in_num = 0, tmp; /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); ctrl.class = class; ctrl.cmd = cmd; /* Add header */ sg_init_one(&hdr, &ctrl, sizeof(ctrl)); sgs[out_num++] = &hdr; if (out) sgs[out_num++] = out; if (in) sgs[out_num + in_num++] = in; /* Add return status. */ sg_init_one(&stat, &status, sizeof(status)); sgs[out_num + in_num++] = &stat; BUG_ON(out_num + in_num > ARRAY_SIZE(sgs)); BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC) < 0); virtqueue_kick(vi->cvq); /* Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. */ while (!virtqueue_get_buf(vi->cvq, &tmp) && !virtqueue_is_broken(vi->cvq)) cpu_relax(); return status == VIRTIO_NET_OK; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) /** * eth_prepare_mac_addr_change - prepare for mac change * @dev: network device * @p: socket address */ static int eth_prepare_mac_addr_change(struct net_device *dev, void *p) { struct sockaddr *addr = p; if (!iff_live_addr_change && netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; return 0; } /** * eth_commit_mac_addr_change - commit mac change * @dev: network device * @p: socket address */ static void eth_commit_mac_addr_change(struct net_device *dev, void *p) { struct sockaddr *addr = p; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); } #endif static int virtnet_set_mac_address(struct net_device *dev, void *p) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; int ret; struct sockaddr *addr = p; struct scatterlist sg; ret = eth_prepare_mac_addr_change(dev, p); if (ret) return ret; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { sg_init_one(&sg, addr->sa_data, dev->addr_len); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg, NULL)) { dev_warn(&vdev->dev, "Failed to set mac address by vq command.\n"); return -EINVAL; } } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), addr->sa_data, dev->addr_len); } eth_commit_mac_addr_change(dev, p); return 0; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) static struct net_device_stats *virtnet_stats(struct net_device *dev) { int cpu; unsigned int start; struct virtnet_info *vi = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; stats->rx_packets = 0; stats->tx_packets = 0; stats->rx_bytes = 0; stats->tx_bytes = 0; for_each_possible_cpu(cpu) { struct virtnet_stats *stats_tmp = per_cpu_ptr(vi->stats, cpu); u64 tpackets, tbytes, rpackets, rbytes; do { start = u64_stats_fetch_begin_bh(&stats_tmp->tx_syncp); tpackets = stats_tmp->tx_packets; tbytes = stats_tmp->tx_bytes; } while (u64_stats_fetch_retry_bh(&stats_tmp->tx_syncp, start)); do { start = u64_stats_fetch_begin_bh(&stats_tmp->rx_syncp); rpackets = stats_tmp->rx_packets; rbytes = stats_tmp->rx_bytes; } while (u64_stats_fetch_retry_bh(&stats_tmp->rx_syncp, start)); stats->rx_packets += rpackets; stats->tx_packets += tpackets; stats->rx_bytes += rbytes; stats->tx_bytes += tbytes; } return stats; } #else static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct virtnet_info *vi = netdev_priv(dev); int cpu; unsigned int start; for_each_possible_cpu(cpu) { struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); u64 tpackets, tbytes, rpackets, rbytes; do { start = u64_stats_fetch_begin_bh(&stats->tx_syncp); tpackets = stats->tx_packets; tbytes = stats->tx_bytes; } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); do { start = u64_stats_fetch_begin_bh(&stats->rx_syncp); rpackets = stats->rx_packets; rbytes = stats->rx_bytes; } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); tot->rx_packets += rpackets; tot->tx_packets += tpackets; tot->rx_bytes += rbytes; tot->tx_bytes += tbytes; } tot->tx_dropped = dev->stats.tx_dropped; tot->tx_fifo_errors = dev->stats.tx_fifo_errors; tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; return tot; } #endif #ifdef CONFIG_NET_POLL_CONTROLLER static void virtnet_netpoll(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; for (i = 0; i < vi->curr_queue_pairs; i++) napi_schedule(&vi->rq[i].napi); } #endif static void virtnet_ack_link_announce(struct virtnet_info *vi) { rtnl_lock(); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL)) dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); rtnl_unlock(); } static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { struct scatterlist sg; struct virtio_net_ctrl_mq s; struct net_device *dev = vi->dev; if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; s.virtqueue_pairs = queue_pairs; sg_init_one(&sg, &s, sizeof(s)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) { dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", queue_pairs); return -EINVAL; } else { vi->curr_queue_pairs = queue_pairs; /* virtnet_open() will refill when device is going to up. */ if (dev->flags & IFF_UP) schedule_delayed_work(&vi->refill, 0); } return 0; } static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); for (i = 0; i < vi->max_queue_pairs; i++) napi_disable(&vi->rq[i].napi); return 0; } #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) static int virtnet_set_tx_csum(struct net_device *dev, u32 data) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) return -ENOSYS; return ethtool_op_set_tx_hw_csum(dev, data); } #endif static void virtnet_set_rx_mode(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg[2]; u8 promisc, allmulti; struct virtio_net_ctrl_mac *mac_data; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) struct dev_addr_list *addr; #endif struct netdev_hw_addr *ha; int uc_count; int mc_count; void *buf; int i; /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; promisc = ((dev->flags & IFF_PROMISC) != 0); allmulti = ((dev->flags & IFF_ALLMULTI) != 0); sg_init_one(sg, &promisc, sizeof(promisc)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg, NULL)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", promisc ? "en" : "dis"); sg_init_one(sg, &allmulti, sizeof(allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg, NULL)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", allmulti ? "en" : "dis"); uc_count = netdev_uc_count(dev); mc_count = netdev_mc_count(dev); /* MAC filter - use one buffer for both lists */ buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + (2 * sizeof(mac_data->entries)), GFP_ATOMIC); mac_data = buf; if (!buf) return; sg_init_table(sg, 2); /* Store the unicast list and count in the front of the buffer */ mac_data->entries = uc_count; i = 0; netdev_for_each_uc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); sg_set_buf(&sg[0], mac_data, sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); /* multicast list and count fill the end */ mac_data = (void *)&mac_data->macs[uc_count][0]; mac_data->entries = mc_count; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) addr = dev->mc_list; for (i = 0; i < dev->mc_count; i++, addr = addr->next) memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN); #else i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); #endif sg_set_buf(&sg[1], mac_data, sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, sg, NULL)) dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); kfree(buf); } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,24)) static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) #else static int virtnet_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) #endif { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL)) dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,24)) return 0; #endif } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,24)) static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) #else static int virtnet_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) #endif { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL)) dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,24)) return 0; #endif } static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) { int i; int cpu; if (vi->affinity_hint_set) { for (i = 0; i < vi->max_queue_pairs; i++) { virtqueue_set_affinity(vi->rq[i].vq, -1); virtqueue_set_affinity(vi->sq[i].vq, -1); } vi->affinity_hint_set = false; } i = 0; for_each_online_cpu(cpu) { if (cpu == hcpu) { *per_cpu_ptr(vi->vq_index, cpu) = -1; } else { *per_cpu_ptr(vi->vq_index, cpu) = ++i % vi->curr_queue_pairs; } } } static void virtnet_set_affinity(struct virtnet_info *vi) { int i; int cpu; /* In multiqueue mode, when the number of cpu is equal to the number of * queue pairs, we let the queue pairs to be private to one cpu by * setting the affinity hint to eliminate the contention. */ if (vi->curr_queue_pairs == 1 || vi->max_queue_pairs != num_online_cpus()) { virtnet_clean_affinity(vi, -1); return; } i = 0; for_each_online_cpu(cpu) { virtqueue_set_affinity(vi->rq[i].vq, cpu); virtqueue_set_affinity(vi->sq[i].vq, cpu); *per_cpu_ptr(vi->vq_index, cpu) = i; i++; } vi->affinity_hint_set = true; } static int virtnet_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); switch(action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: case CPU_DOWN_FAILED: case CPU_DEAD: virtnet_set_affinity(vi); break; case CPU_DOWN_PREPARE: virtnet_clean_affinity(vi, (long)hcpu); break; default: break; } return NOTIFY_OK; } static void virtnet_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { struct virtnet_info *vi = netdev_priv(dev); ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); ring->rx_pending = ring->rx_max_pending; ring->tx_pending = ring->tx_max_pending; } static void virtnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34)) /* TODO: Eliminate OOO packets during switching */ static int virtnet_set_channels(struct net_device *dev, struct ethtool_channels *channels) { struct virtnet_info *vi = netdev_priv(dev); u16 queue_pairs = channels->combined_count; int err; /* We don't support separate rx/tx channels. * We don't allow setting 'other' channels. */ if (channels->rx_count || channels->tx_count || channels->other_count) return -EINVAL; if (queue_pairs > vi->max_queue_pairs) return -EINVAL; get_online_cpus(); err = virtnet_set_queues(vi, queue_pairs); if (!err) { netif_set_real_num_tx_queues(dev, queue_pairs); netif_set_real_num_rx_queues(dev, queue_pairs); virtnet_set_affinity(vi); } put_online_cpus(); return err; } static void virtnet_get_channels(struct net_device *dev, struct ethtool_channels *channels) { struct virtnet_info *vi = netdev_priv(dev); channels->combined_count = vi->curr_queue_pairs; channels->max_combined = vi->max_queue_pairs; channels->max_other = 0; channels->rx_count = 0; channels->tx_count = 0; channels->other_count = 0; } #endif static const struct ethtool_ops virtnet_ethtool_ops = { .get_drvinfo = virtnet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = virtnet_get_ringparam, #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) .set_tx_csum = virtnet_set_tx_csum, .set_sg = ethtool_op_set_sg, .set_tso = ethtool_op_set_tso, .set_ufo = ethtool_op_set_ufo, #else .set_channels = virtnet_set_channels, .get_channels = virtnet_get_channels, #endif }; #define MIN_MTU 68 #define MAX_MTU 65535 static int virtnet_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) return -EINVAL; dev->mtu = new_mtu; return 0; } static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, .ndo_start_xmit = start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = virtnet_set_mac_address, .ndo_set_rx_mode = virtnet_set_rx_mode, .ndo_change_mtu = virtnet_change_mtu, #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) .ndo_get_stats = virtnet_stats, #else .ndo_get_stats64 = virtnet_stats, #endif .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = virtnet_netpoll, #endif }; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,32)) /* from higher version kernel code. For redhat*/ static void inetdev_send_gratuitous_arp(struct net_device *dev, struct in_device *in_dev) { struct in_ifaddr *ifa; for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { arp_send(ARPOP_REQUEST, ETH_P_ARP, ifa->ifa_local, dev, ifa->ifa_local, NULL, dev->dev_addr, NULL); } } /** * netif_notify_peers - notify network peers about existence of @dev * @dev: network device * * Generate traffic such that interested network peers are aware of * @dev, such as by generating a gratuitous ARP. This may be used when * a device wants to inform the rest of the network about some sort of * reconfiguration such as a failover event or virtual machine * migration. * For redhat. */ void netif_notify_peers(struct net_device *dev) { struct in_device *in_dev; rtnl_lock(); in_dev = __in_dev_get_rtnl(dev); if(in_dev) inetdev_send_gratuitous_arp(dev, in_dev); rtnl_unlock(); } #endif static void virtnet_config_changed_work(struct work_struct *work) { struct virtnet_info *vi = container_of(work, struct virtnet_info, config_work); u16 v; mutex_lock(&vi->config_lock); if (!vi->config_enable) goto done; if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, offsetof(struct virtio_net_config, status), &v) < 0) goto done; if (v & VIRTIO_NET_S_ANNOUNCE) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) netif_notify_peers(vi->dev); #else netdev_notify_peers(vi->dev); #endif virtnet_ack_link_announce(vi); } /* Ignore unknown (future) status bits */ v &= VIRTIO_NET_S_LINK_UP; if (vi->status == v) goto done; vi->status = v; if (vi->status & VIRTIO_NET_S_LINK_UP) { netif_carrier_on(vi->dev); netif_tx_wake_all_queues(vi->dev); } else { netif_carrier_off(vi->dev); netif_tx_stop_all_queues(vi->dev); } done: mutex_unlock(&vi->config_lock); } static void virtnet_config_changed(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; schedule_work(&vi->config_work); } static void virtnet_free_queues(struct virtnet_info *vi) { int i; for (i = 0; i < vi->max_queue_pairs; i++) netif_napi_del(&vi->rq[i].napi); kfree(vi->rq); kfree(vi->sq); } static void free_receive_bufs(struct virtnet_info *vi) { int i; for (i = 0; i < vi->max_queue_pairs; i++) { while (vi->rq[i].pages) __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); } } static void free_unused_bufs(struct virtnet_info *vi) { void *buf; int i; for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->sq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) dev_kfree_skb(buf); } for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->rq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(&vi->rq[i], buf); else dev_kfree_skb(buf); --vi->rq[i].num; } BUG_ON(vi->rq[i].num != 0); } } static void virtnet_del_vqs(struct virtnet_info *vi) { struct virtio_device *vdev = vi->vdev; virtnet_clean_affinity(vi, -1); vdev->config->del_vqs(vdev); virtnet_free_queues(vi); } static int virtnet_find_vqs(struct virtnet_info *vi) { vq_callback_t **callbacks; struct virtqueue **vqs; int ret = -ENOMEM; int i, total_vqs; const char **names; /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by * possible control vq. */ total_vqs = vi->max_queue_pairs * 2 + virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); /* Allocate space for find_vqs parameters */ vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); if (!vqs) goto err_vq; callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); if (!callbacks) goto err_callback; names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); if (!names) goto err_names; /* Parameters for control virtqueue, if any */ if (vi->has_cvq) { callbacks[total_vqs - 1] = NULL; names[total_vqs - 1] = "control"; } /* Allocate/initialize parameters for send/receive virtqueues */ for (i = 0; i < vi->max_queue_pairs; i++) { callbacks[rxq2vq(i)] = skb_recv_done; callbacks[txq2vq(i)] = skb_xmit_done; sprintf(vi->rq[i].name, "input.%d", i); sprintf(vi->sq[i].name, "output.%d", i); names[rxq2vq(i)] = vi->rq[i].name; names[txq2vq(i)] = vi->sq[i].name; } ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, names); if (ret) goto err_find; if (vi->has_cvq) { vi->cvq = vqs[total_vqs - 1]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) vi->dev->features |= NETIF_F_HW_VLAN_FILTER; #else vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; #endif } for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].vq = vqs[rxq2vq(i)]; vi->sq[i].vq = vqs[txq2vq(i)]; } kfree(names); kfree(callbacks); kfree(vqs); return 0; err_find: kfree(names); err_names: kfree(callbacks); err_callback: kfree(vqs); err_vq: return ret; } static int virtnet_alloc_queues(struct virtnet_info *vi) { int i; vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); if (!vi->sq) goto err_sq; vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); if (!vi->rq) goto err_rq; INIT_DELAYED_WORK(&vi->refill, refill_work); for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].pages = NULL; netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, napi_weight); sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); } return 0; err_rq: kfree(vi->sq); err_sq: return -ENOMEM; } static int init_vqs(struct virtnet_info *vi) { int ret; /* Allocate send & receive queues */ ret = virtnet_alloc_queues(vi); if (ret) goto err; ret = virtnet_find_vqs(vi); if (ret) goto err_free; get_online_cpus(); virtnet_set_affinity(vi); put_online_cpus(); return 0; err_free: virtnet_free_queues(vi); err: return ret; } static int virtnet_probe(struct virtio_device *vdev) { int i, err; struct net_device *dev; struct virtnet_info *vi; u16 max_queue_pairs; /* Find if host supports multiqueue virtio_net device */ err = virtio_config_val(vdev, VIRTIO_NET_F_MQ, offsetof(struct virtio_net_config, max_virtqueue_pairs), &max_queue_pairs); /* We need at least 2 queue's */ if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) max_queue_pairs = 1; /* Allocate ourselves a network device with room for our info */ dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); if (!dev) return -ENOMEM; /* Set up network device as normal. */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,17)) dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,24)) dev->priv_flags |= IFF_UNICAST_FLT; #endif dev->netdev_ops = &virtnet_netdev; dev->features = NETIF_F_HIGHDMA; SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); SET_NETDEV_DEV(dev, &vdev->dev); /* Do we support "hardware" checksums? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { /* This opens up the world of extra features. */ #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) dev->features |= NETIF_F_TSO; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) dev->features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->features |= NETIF_F_TSO_ECN; dev->features |= NETIF_F_GSO_ROBUST; #else dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (csum) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) dev->hw_features |= NETIF_F_TSO; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) dev->hw_features |= NETIF_F_UFO; if (gso) dev->features |= dev->hw_features & NETIF_F_ALL_TSO; #endif /* (!csum && gso) case will be fixed by register_netdev() */ } dev->vlan_features = dev->features; /* Configuration may specify what MAC to use. Otherwise random. */ if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, offsetof(struct virtio_net_config, mac), dev->dev_addr, dev->addr_len) < 0) eth_hw_addr_random(dev); /* Set up our device-specific information */ vi = netdev_priv(dev); vi->dev = dev; vi->vdev = vdev; vdev->priv = vi; vi->stats = alloc_percpu(struct virtnet_stats); err = -ENOMEM; if (vi->stats == NULL) goto free; vi->vq_index = alloc_percpu(int); if (vi->vq_index == NULL) goto free_stats; mutex_init(&vi->config_lock); vi->config_enable = true; INIT_WORK(&vi->config_work, virtnet_config_changed_work); /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) vi->big_packets = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) vi->mergeable_rx_bufs = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) vi->has_cvq = true; if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) { vi->any_header_sg = true; if (vi->mergeable_rx_bufs) dev->needed_headroom = sizeof (struct virtio_net_hdr_mrg_rxbuf); else dev->needed_headroom = sizeof (struct virtio_net_hdr); } /* Use single tx/rx queue pair as default */ vi->curr_queue_pairs = 1; vi->max_queue_pairs = max_queue_pairs; /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ err = init_vqs(vi); if (err) goto free_index; get_online_cpus(); err = virtnet_set_queues(vi, max_queue_pairs); if (!err) { netif_set_real_num_tx_queues(dev, max_queue_pairs); netif_set_real_num_rx_queues(dev, max_queue_pairs); virtnet_set_affinity(vi); } put_online_cpus(); err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); goto free_vqs; } /* Last of all, set up some receive buffers. */ for (i = 0; i < vi->curr_queue_pairs; i++) { try_fill_recv(&vi->rq[i], GFP_KERNEL); /* If we didn't even get one input buffer, we're useless. */ if (vi->rq[i].num == 0) { free_unused_bufs(vi); err = -ENOMEM; goto free_recv_bufs; } } vi->nb.notifier_call = &virtnet_cpu_callback; err = register_hotcpu_notifier(&vi->nb); if (err) { pr_debug("virtio_net: registering cpu notifier failed\n"); goto free_recv_bufs; } /* Assume link up if device can't report link status, otherwise get link status from config. */ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { netif_carrier_off(dev); schedule_work(&vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; netif_carrier_on(dev); } pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", dev->name, max_queue_pairs); return 0; free_recv_bufs: free_receive_bufs(vi); unregister_netdev(dev); free_vqs: cancel_delayed_work_sync(&vi->refill); virtnet_del_vqs(vi); free_index: free_percpu(vi->vq_index); free_stats: free_percpu(vi->stats); free: free_netdev(dev); return err; } static void remove_vq_common(struct virtnet_info *vi) { vi->vdev->config->reset(vi->vdev); /* Free unused buffers in both send and recv, if any. */ free_unused_bufs(vi); free_receive_bufs(vi); virtnet_del_vqs(vi); } static void virtnet_remove(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; unregister_hotcpu_notifier(&vi->nb); /* Prevent config work handler from accessing the device. */ mutex_lock(&vi->config_lock); vi->config_enable = false; mutex_unlock(&vi->config_lock); unregister_netdev(vi->dev); remove_vq_common(vi); flush_work(&vi->config_work); free_percpu(vi->vq_index); free_percpu(vi->stats); free_netdev(vi->dev); } #ifdef CONFIG_PM static int virtnet_freeze(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int i; unregister_hotcpu_notifier(&vi->nb); /* Prevent config work handler from accessing the device */ mutex_lock(&vi->config_lock); vi->config_enable = false; mutex_unlock(&vi->config_lock); netif_device_detach(vi->dev); cancel_delayed_work_sync(&vi->refill); if (netif_running(vi->dev)) for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi); } remove_vq_common(vi); flush_work(&vi->config_work); return 0; } static int virtnet_restore(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int err, i; err = init_vqs(vi); if (err) return err; if (netif_running(vi->dev)) { for (i = 0; i < vi->curr_queue_pairs; i++) if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); for (i = 0; i < vi->max_queue_pairs; i++) virtnet_napi_enable(&vi->rq[i]); } netif_device_attach(vi->dev); mutex_lock(&vi->config_lock); vi->config_enable = true; mutex_unlock(&vi->config_lock); rtnl_lock(); virtnet_set_queues(vi, vi->curr_queue_pairs); rtnl_unlock(); err = register_hotcpu_notifier(&vi->nb); if (err) return err; return 0; } #endif static struct virtio_device_id id_table[] = { { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, VIRTIO_NET_F_CTRL_MAC_ADDR, VIRTIO_F_ANY_LAYOUT, }; static struct virtio_driver virtio_net_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtnet_probe, .remove = virtnet_remove, .config_changed = virtnet_config_changed, #ifdef CONFIG_PM .freeze = virtnet_freeze, .restore = virtnet_restore, #endif }; module_virtio_driver(virtio_net_driver); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio network driver"); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/2.6.32/virtio_pci.c000066400000000000000000000564271314037446600310760ustar00rootroot00000000000000/* * Virtio PCI driver * * This module allows virtio devices to be used over a virtual PCI device. * This can be used with QEMU based VMMs like KVM or Xen. * * Copyright IBM Corp. 2007 * * Authors: * Anthony Liguori * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include #include #include #include #include #include #include #include #include #include #include #include MODULE_AUTHOR("Anthony Liguori "); MODULE_DESCRIPTION("virtio-pci"); MODULE_LICENSE("GPL"); MODULE_VERSION("1"); /* Our device structure */ struct virtio_pci_device { struct virtio_device vdev; struct pci_dev *pci_dev; /* the IO mapping for the PCI config space */ void __iomem *ioaddr; /* a list of queues so we can dispatch IRQs */ spinlock_t lock; struct list_head virtqueues; /* MSI-X support */ int msix_enabled; int intx_enabled; struct msix_entry *msix_entries; cpumask_var_t *msix_affinity_masks; /* Name strings for interrupts. This size should be enough, * and I'm too lazy to allocate each name separately. */ char (*msix_names)[256]; /* Number of available vectors */ unsigned msix_vectors; /* Vectors allocated, excluding per-vq vectors if any */ unsigned msix_used_vectors; /* Status saved during hibernate/restore */ u8 saved_status; /* Whether we have vector per vq */ bool per_vq_vectors; }; /* Constants for MSI-X */ /* Use first vector for configuration changes, second and the rest for * virtqueues Thus, we need at least 2 vectors for MSI. */ enum { VP_MSIX_CONFIG_VECTOR = 0, VP_MSIX_VQ_VECTOR = 1, }; struct virtio_pci_vq_info { /* the actual virtqueue */ struct virtqueue *vq; /* the number of entries in the queue */ int num; /* the virtual address of the ring queue */ void *queue; /* the list node for the virtqueues list */ struct list_head node; /* MSI-X vector (or none) */ unsigned msix_vector; }; /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ static DEFINE_PCI_DEVICE_TABLE(virtio_pci_id_table) = { { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, { 0 } }; MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); /* Convert a generic virtio device to our structure */ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) { return container_of(vdev, struct virtio_pci_device, vdev); } /* virtio config->get_features() implementation */ static u32 vp_get_features(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* When someone needs more than 32 feature bits, we'll need to * steal a bit to indicate that the rest are somewhere else. */ return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); } /* virtio config->finalize_features() implementation */ static void vp_finalize_features(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); /* We only support 32 feature bits. */ BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1); iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES); } /* virtio config->get() implementation */ static void vp_get(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG(vp_dev) + offset; u8 *ptr = buf; int i; for (i = 0; i < len; i++) ptr[i] = ioread8(ioaddr + i); } /* the config->set() implementation. it's symmetric to the config->get() * implementation */ static void vp_set(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG(vp_dev) + offset; const u8 *ptr = buf; int i; for (i = 0; i < len; i++) iowrite8(ptr[i], ioaddr + i); } /* config->{get,set}_status() implementations */ static u8 vp_get_status(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); } static void vp_set_status(struct virtio_device *vdev, u8 status) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* We should never be setting status to 0. */ BUG_ON(status == 0); iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); } /* wait for pending irq handlers */ static void vp_synchronize_vectors(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; if (vp_dev->intx_enabled) synchronize_irq(vp_dev->pci_dev->irq); for (i = 0; i < vp_dev->msix_vectors; ++i) synchronize_irq(vp_dev->msix_entries[i].vector); } static void vp_reset(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* 0 status means a reset. */ iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); /* Flush out the status write, and flush in device writes, * including MSi-X interrupts, if any. */ ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); /* Flush pending VQ/configuration callbacks. */ vp_synchronize_vectors(vdev); } /* the notify function used when creating a virt queue */ static void vp_notify(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); /* we write the queue's selector into the notification register to * signal the other end */ iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); } /* Handle a configuration change: Tell driver if it wants to know. */ static irqreturn_t vp_config_changed(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; struct virtio_driver *drv; drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); if (drv && drv->config_changed) drv->config_changed(&vp_dev->vdev); return IRQ_HANDLED; } /* Notify all virtqueues on an interrupt. */ static irqreturn_t vp_vring_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; struct virtio_pci_vq_info *info; irqreturn_t ret = IRQ_NONE; unsigned long flags; spin_lock_irqsave(&vp_dev->lock, flags); list_for_each_entry(info, &vp_dev->virtqueues, node) { if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) ret = IRQ_HANDLED; } spin_unlock_irqrestore(&vp_dev->lock, flags); return ret; } /* A small wrapper to also acknowledge the interrupt when it's handled. * I really need an EIO hook for the vring so I can ack the interrupt once we * know that we'll be handling the IRQ but before we invoke the callback since * the callback may notify the host which results in the host attempting to * raise an interrupt that we would then mask once we acknowledged the * interrupt. */ static irqreturn_t vp_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; u8 isr; /* reading the ISR has the effect of also clearing it so it's very * important to save off the value. */ isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); /* It's definitely not us if the ISR was not high */ if (!isr) return IRQ_NONE; /* Configuration change? Tell driver if it wants to know. */ if (isr & VIRTIO_PCI_ISR_CONFIG) vp_config_changed(irq, opaque); return vp_vring_interrupt(irq, opaque); } static void vp_free_vectors(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; if (vp_dev->intx_enabled) { free_irq(vp_dev->pci_dev->irq, vp_dev); vp_dev->intx_enabled = 0; } for (i = 0; i < vp_dev->msix_used_vectors; ++i) free_irq(vp_dev->msix_entries[i].vector, vp_dev); for (i = 0; i < vp_dev->msix_vectors; i++) if (vp_dev->msix_affinity_masks[i]) free_cpumask_var(vp_dev->msix_affinity_masks[i]); if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); /* Flush the write out to device */ ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); pci_disable_msix(vp_dev->pci_dev); vp_dev->msix_enabled = 0; vp_dev->msix_vectors = 0; } vp_dev->msix_used_vectors = 0; kfree(vp_dev->msix_names); vp_dev->msix_names = NULL; kfree(vp_dev->msix_entries); vp_dev->msix_entries = NULL; kfree(vp_dev->msix_affinity_masks); vp_dev->msix_affinity_masks = NULL; } static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, bool per_vq_vectors) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); const char *name = dev_name(&vp_dev->vdev.dev); unsigned i, v; int err = -ENOMEM; vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, GFP_KERNEL); if (!vp_dev->msix_entries) goto error; vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, GFP_KERNEL); if (!vp_dev->msix_names) goto error; vp_dev->msix_affinity_masks = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, GFP_KERNEL); if (!vp_dev->msix_affinity_masks) goto error; for (i = 0; i < nvectors; ++i) if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], GFP_KERNEL)) goto error; for (i = 0; i < nvectors; ++i) vp_dev->msix_entries[i].entry = i; /* pci_enable_msix returns positive if we can't get this many. */ err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); if (err > 0) err = -ENOSPC; if (err) goto error; vp_dev->msix_vectors = nvectors; vp_dev->msix_enabled = 1; /* Set the vector used for configuration */ v = vp_dev->msix_used_vectors; snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, "%s-config", name); err = request_irq(vp_dev->msix_entries[v].vector, vp_config_changed, 0, vp_dev->msix_names[v], vp_dev); if (err) goto error; ++vp_dev->msix_used_vectors; iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); /* Verify we had enough resources to assign the vector */ v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); if (v == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto error; } if (!per_vq_vectors) { /* Shared vector for all VQs */ v = vp_dev->msix_used_vectors; snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, "%s-virtqueues", name); err = request_irq(vp_dev->msix_entries[v].vector, vp_vring_interrupt, 0, vp_dev->msix_names[v], vp_dev); if (err) goto error; ++vp_dev->msix_used_vectors; } return 0; error: vp_free_vectors(vdev); return err; } static int vp_request_intx(struct virtio_device *vdev) { int err; struct virtio_pci_device *vp_dev = to_vp_device(vdev); err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vp_dev); if (!err) vp_dev->intx_enabled = 1; return err; } static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, u16 msix_vec) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info; struct virtqueue *vq; unsigned long flags, size; u16 num; int err; /* Select the queue we're interested in */ iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); /* Check if queue is either not available or already active. */ num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) return ERR_PTR(-ENOENT); /* allocate and fill out our structure the represents an active * queue */ info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); info->num = num; info->msix_vector = msix_vec; size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); if (info->queue == NULL) { err = -ENOMEM; goto out_info; } /* activate the queue */ iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); /* create the vring */ vq = vring_new_virtqueue(index, info->num, VIRTIO_PCI_VRING_ALIGN, vdev, true, info->queue, vp_notify, callback, name); if (!vq) { err = -ENOMEM; goto out_activate_queue; } vq->priv = info; info->vq = vq; if (msix_vec != VIRTIO_MSI_NO_VECTOR) { iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); if (msix_vec == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto out_assign; } } if (callback) { spin_lock_irqsave(&vp_dev->lock, flags); list_add(&info->node, &vp_dev->virtqueues); spin_unlock_irqrestore(&vp_dev->lock, flags); } else { INIT_LIST_HEAD(&info->node); } return vq; out_assign: vring_del_virtqueue(vq); out_activate_queue: iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); free_pages_exact(info->queue, size); out_info: kfree(info); return ERR_PTR(err); } static void vp_del_vq(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_vq_info *info = vq->priv; unsigned long flags, size; spin_lock_irqsave(&vp_dev->lock, flags); list_del(&info->node); spin_unlock_irqrestore(&vp_dev->lock, flags); iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); /* Flush the write out to device */ ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); } vring_del_virtqueue(vq); /* Select and deactivate the queue */ iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); free_pages_exact(info->queue, size); kfree(info); } /* the config->del_vqs() implementation */ static void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq, *n; struct virtio_pci_vq_info *info; list_for_each_entry_safe(vq, n, &vdev->vqs, list) { info = vq->priv; if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR) free_irq(vp_dev->msix_entries[info->msix_vector].vector, vq); vp_del_vq(vq); } vp_dev->per_vq_vectors = false; vp_free_vectors(vdev); } static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[], bool use_msix, bool per_vq_vectors) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); u16 msix_vec; int i, err, nvectors, allocated_vectors; if (!use_msix) { /* Old style: one normal interrupt for change and all vqs. */ err = vp_request_intx(vdev); if (err) goto error_request; } else { if (per_vq_vectors) { /* Best option: one for change interrupt, one per vq. */ nvectors = 1; for (i = 0; i < nvqs; ++i) if (callbacks[i]) ++nvectors; } else { /* Second best: one for change, shared for all vqs. */ nvectors = 2; } err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); if (err) goto error_request; } vp_dev->per_vq_vectors = per_vq_vectors; allocated_vectors = vp_dev->msix_used_vectors; for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } else if (!callbacks[i] || !vp_dev->msix_enabled) msix_vec = VIRTIO_MSI_NO_VECTOR; else if (vp_dev->per_vq_vectors) msix_vec = allocated_vectors++; else msix_vec = VP_MSIX_VQ_VECTOR; vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); goto error_find; } if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) continue; /* allocate per-vq irq if available and necessary */ snprintf(vp_dev->msix_names[msix_vec], sizeof *vp_dev->msix_names, "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); err = request_irq(vp_dev->msix_entries[msix_vec].vector, vring_interrupt, 0, vp_dev->msix_names[msix_vec], vqs[i]); if (err) { vp_del_vq(vqs[i]); goto error_find; } } return 0; error_find: vp_del_vqs(vdev); error_request: return err; } /* the config->find_vqs() implementation */ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) { int err; /* Try MSI-X with one vector per queue. */ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); if (!err) return 0; /* Fallback: MSI-X with one vector for config, one shared for queues. */ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, false); if (!err) return 0; /* Finally fall back to regular interrupts. */ return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, false, false); } static const char *vp_bus_name(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); return pci_name(vp_dev->pci_dev); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) /* Setup the affinity for a virtqueue: * - force the affinity for per vq vector * - OR over all affinities for shared MSI * - ignore the affinity request if we're using INTX */ static int vp_set_vq_affinity(struct virtqueue *vq, int cpu) { struct virtio_device *vdev = vq->vdev; struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info = vq->priv; struct cpumask *mask; unsigned int irq; if (!vq->callback) return -EINVAL; if (vp_dev->msix_enabled) { mask = vp_dev->msix_affinity_masks[info->msix_vector]; irq = vp_dev->msix_entries[info->msix_vector].vector; if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { cpumask_set_cpu(cpu, mask); irq_set_affinity_hint(irq, mask); } } return 0; } #endif static const struct virtio_config_ops virtio_pci_config_ops = { .get = vp_get, .set = vp_set, .get_status = vp_get_status, .set_status = vp_set_status, .reset = vp_reset, .find_vqs = vp_find_vqs, .del_vqs = vp_del_vqs, .get_features = vp_get_features, .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) .set_vq_affinity = vp_set_vq_affinity, #endif }; static void virtio_pci_release_dev(struct device *_d) { /* * No need for a release method as we allocate/free * all devices together with the pci devices. * Provide an empty one to avoid getting a warning from core. */ } #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) /** * pci_msi_off - disables any msi or msix capabilities * @dev: the PCI device to operate on * * If you want to use msi see pci_enable_msi and friends. * This is a lower level primitive that allows us to disable * msi operation at the device level. */ void pci_msi_off(struct pci_dev *dev) { int pos; u16 control; pos = pci_find_capability(dev, PCI_CAP_ID_MSI); if (pos) { pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); control &= ~PCI_MSI_FLAGS_ENABLE; pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); } pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); if (pos) { pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); control &= ~PCI_MSIX_FLAGS_ENABLE; pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); } } #endif /* the PCI probing function */ static int virtio_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { struct virtio_pci_device *vp_dev; int err; /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) return -ENODEV; if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", VIRTIO_PCI_ABI_VERSION, pci_dev->revision); return -ENODEV; } /* allocate our structure and fill it out */ vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); if (vp_dev == NULL) return -ENOMEM; vp_dev->vdev.dev.parent = &pci_dev->dev; vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->vdev.config = &virtio_pci_config_ops; vp_dev->pci_dev = pci_dev; INIT_LIST_HEAD(&vp_dev->virtqueues); spin_lock_init(&vp_dev->lock); /* Disable MSI/MSIX to bring device to a known good state. */ pci_msi_off(pci_dev); /* enable the device */ err = pci_enable_device(pci_dev); if (err) goto out; err = pci_request_regions(pci_dev, "virtio-pci"); if (err) goto out_enable_device; vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); if (vp_dev->ioaddr == NULL) { err = -ENOMEM; goto out_req_regions; } pci_set_drvdata(pci_dev, vp_dev); pci_set_master(pci_dev); /* we use the subsystem vendor/device id as the virtio vendor/device * id. this allows us to use the same PCI vendor/device id for all * virtio devices and to identify the particular virtio driver by * the subsystem ids */ vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; vp_dev->vdev.id.device = pci_dev->subsystem_device; /* finally register the virtio device */ err = register_virtio_device(&vp_dev->vdev); if (err) goto out_set_drvdata; return 0; out_set_drvdata: pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); out_req_regions: pci_release_regions(pci_dev); out_enable_device: pci_disable_device(pci_dev); out: kfree(vp_dev); return err; } static void virtio_pci_remove(struct pci_dev *pci_dev) { struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); unregister_virtio_device(&vp_dev->vdev); vp_del_vqs(&vp_dev->vdev); pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); pci_release_regions(pci_dev); pci_disable_device(pci_dev); kfree(vp_dev); } #ifdef CONFIG_PM static int virtio_pci_freeze(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); struct virtio_driver *drv; int ret; drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); ret = 0; vp_dev->saved_status = vp_get_status(&vp_dev->vdev); if (drv && drv->freeze) ret = drv->freeze(&vp_dev->vdev); if (!ret) pci_disable_device(pci_dev); return ret; } static int virtio_pci_restore(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); struct virtio_driver *drv; int ret; drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); ret = pci_enable_device(pci_dev); if (ret) return ret; pci_set_master(pci_dev); vp_finalize_features(&vp_dev->vdev); if (drv && drv->restore) ret = drv->restore(&vp_dev->vdev); /* Finally, tell the device we're all set */ if (!ret) vp_set_status(&vp_dev->vdev, vp_dev->saved_status); return ret; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) static const struct dev_pm_ops virtio_pci_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore) }; #else static const struct dev_pm_ops virtio_pci_pm_ops = { .suspend = virtio_pci_freeze, .resume = virtio_pci_restore, .freeze = virtio_pci_freeze, .thaw = virtio_pci_restore, .restore = virtio_pci_restore, .poweroff = virtio_pci_freeze, }; #endif #endif static struct pci_driver virtio_pci_driver = { .name = "virtio-pci", .id_table = virtio_pci_id_table, .probe = virtio_pci_probe, .remove = virtio_pci_remove, #ifdef CONFIG_PM .driver.pm = &virtio_pci_pm_ops, #endif }; static int __init virtio_pci_init(void) { return pci_register_driver(&virtio_pci_driver); } module_init(virtio_pci_init); static void __exit virtio_pci_exit(void) { pci_unregister_driver(&virtio_pci_driver); } module_exit(virtio_pci_exit); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/2.6.32/virtio_ring.c000066400000000000000000000570751314037446600312620ustar00rootroot00000000000000/* Virtio ring implementation. * * Copyright 2007 Rusty Russell IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&(_vq)->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ BUG(); \ } while (0) /* Caller is supposed to guarantee no reentry. */ #define START_USE(_vq) \ do { \ if ((_vq)->in_use) \ panic("%s:in_use = %i\n", \ (_vq)->vq.name, (_vq)->in_use); \ (_vq)->in_use = __LINE__; \ } while (0) #define END_USE(_vq) \ do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) #else #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&_vq->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ (_vq)->broken = true; \ } while (0) #define START_USE(vq) #define END_USE(vq) #endif struct vring_virtqueue { struct virtqueue vq; /* Actual memory layout for this queue */ struct vring vring; /* Can we use weak barriers? */ bool weak_barriers; /* Other side has made a mess, don't try any more. */ bool broken; /* Host supports indirect buffers */ bool indirect; /* Host publishes avail event idx */ bool event; /* Head of free buffer list. */ unsigned int free_head; /* Number we've added since last sync. */ unsigned int num_added; /* Last used index we've seen. */ u16 last_used_idx; /* How to notify other side. FIXME: commonalize hcalls! */ void (*notify)(struct virtqueue *vq); #ifdef DEBUG /* They're supposed to lock for us. */ unsigned int in_use; /* Figure out if their kicks are too delayed. */ bool last_add_time_valid; ktime_t last_add_time; #endif /* Tokens for callbacks. */ void *data[]; }; #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) static inline struct scatterlist *sg_next_chained(struct scatterlist *sg, unsigned int *count) { return sg_next(sg); } static inline struct scatterlist *sg_next_arr(struct scatterlist *sg, unsigned int *count) { if (--(*count) == 0) return NULL; return sg + 1; } /* Set up an indirect table of descriptors and add it to the queue. */ static inline int vring_add_indirect(struct vring_virtqueue *vq, struct scatterlist *sgs[], struct scatterlist *(*next) (struct scatterlist *, unsigned int *), unsigned int total_sg, unsigned int total_out, unsigned int total_in, unsigned int out_sgs, unsigned int in_sgs, gfp_t gfp) { struct vring_desc *desc; unsigned head; struct scatterlist *sg; int i, n; /* * We require lowmem mappings for the descriptors because * otherwise virt_to_phys will give us bogus addresses in the * virtqueue. */ gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); if (!desc) return -ENOMEM; /* Transfer entries from the sg lists into the indirect page */ i = 0; for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { desc[i].flags = VRING_DESC_F_NEXT; desc[i].addr = sg_phys(sg); desc[i].len = sg->length; desc[i].next = i+1; i++; } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; desc[i].addr = sg_phys(sg); desc[i].len = sg->length; desc[i].next = i+1; i++; } } BUG_ON(i != total_sg); /* Last one doesn't continue. */ desc[i-1].flags &= ~VRING_DESC_F_NEXT; desc[i-1].next = 0; /* We're about to use a buffer */ vq->vq.num_free--; /* Use a single buffer which doesn't continue */ head = vq->free_head; vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; vq->vring.desc[head].addr = virt_to_phys(desc); vq->vring.desc[head].len = i * sizeof(struct vring_desc); /* Update free pointer */ vq->free_head = vq->vring.desc[head].next; return head; } static inline int virtqueue_add(struct virtqueue *_vq, struct scatterlist *sgs[], struct scatterlist *(*next) (struct scatterlist *, unsigned int *), unsigned int total_out, unsigned int total_in, unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); struct scatterlist *sg; unsigned int i, n, avail, uninitialized_var(prev), total_sg; int head; START_USE(vq); BUG_ON(data == NULL); #ifdef DEBUG { ktime_t now = ktime_get(); /* No kick or get, with .1 second between? Warn. */ if (vq->last_add_time_valid) WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) > 100); vq->last_add_time = now; vq->last_add_time_valid = true; } #endif total_sg = total_in + total_out; /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ if (vq->indirect && total_sg > 2 && vq->vq.num_free) { head = vring_add_indirect(vq, sgs, next, total_sg, total_out, total_in, out_sgs, in_sgs, gfp); if (likely(head >= 0)) goto add_head; } BUG_ON(total_sg > vq->vring.num); BUG_ON(total_sg == 0); if (vq->vq.num_free < total_sg) { pr_debug("Can't add buf len %i - avail = %i\n", total_sg, vq->vq.num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ if (out_sgs) vq->notify(&vq->vq); END_USE(vq); return -ENOSPC; } /* We're about to use some buffers from the free list. */ vq->vq.num_free -= total_sg; head = i = vq->free_head; for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; i = vq->vring.desc[i].next; } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; i = vq->vring.desc[i].next; } } /* Last one doesn't continue. */ vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; /* Update free pointer */ vq->free_head = i; add_head: /* Set token. */ vq->data[head] = data; /* Put entry in available array (but don't update avail->idx until they * do sync). */ avail = (vq->vring.avail->idx & (vq->vring.num-1)); vq->vring.avail->ring[avail] = head; /* Descriptors and available array need to be set before we expose the * new available array entries. */ virtio_wmb(vq->weak_barriers); vq->vring.avail->idx++; vq->num_added++; /* This is very unlikely, but theoretically possible. Kick * just in case. */ if (unlikely(vq->num_added == (1 << 16) - 1)) virtqueue_kick(_vq); pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); return 0; } /** * virtqueue_add_buf - expose buffer to other end * @vq: the struct virtqueue we're talking about. * @sg: the description of the buffer(s). * @out_num: the number of sg readable by other side * @in_num: the number of sg which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_buf(struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, unsigned int in, void *data, gfp_t gfp) { struct scatterlist *sgs[2]; sgs[0] = sg; sgs[1] = sg + out; return virtqueue_add(_vq, sgs, sg_next_arr, out, in, out ? 1 : 0, in ? 1 : 0, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_buf); /** * virtqueue_add_sgs - expose buffers to other end * @vq: the struct virtqueue we're talking about. * @sgs: array of terminated scatterlists. * @out_num: the number of scatterlists readable by other side * @in_num: the number of scatterlists which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_sgs(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { unsigned int i, total_out, total_in; /* Count them first. */ for (i = total_out = total_in = 0; i < out_sgs; i++) { struct scatterlist *sg; for (sg = sgs[i]; sg; sg = sg_next(sg)) total_out++; } for (; i < out_sgs + in_sgs; i++) { struct scatterlist *sg; for (sg = sgs[i]; sg; sg = sg_next(sg)) total_in++; } return virtqueue_add(_vq, sgs, sg_next_chained, total_out, total_in, out_sgs, in_sgs, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_sgs); /** * virtqueue_add_outbuf - expose output buffers to other end * @vq: the struct virtqueue we're talking about. * @sgs: array of scatterlists (need not be terminated!) * @num: the number of scatterlists readable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); /** * virtqueue_add_inbuf - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sgs: array of scatterlists (need not be terminated!) * @num: the number of scatterlists writable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); /** * virtqueue_kick_prepare - first half of split virtqueue_kick call. * @vq: the struct virtqueue * * Instead of virtqueue_kick(), you can do: * if (virtqueue_kick_prepare(vq)) * virtqueue_notify(vq); * * This is sometimes useful because the virtqueue_kick_prepare() needs * to be serialized, but the actual virtqueue_notify() call does not. */ bool virtqueue_kick_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 new, old; bool needs_kick; START_USE(vq); /* We need to expose available array entries before checking avail * event. */ virtio_mb(vq->weak_barriers); old = vq->vring.avail->idx - vq->num_added; new = vq->vring.avail->idx; vq->num_added = 0; #ifdef DEBUG if (vq->last_add_time_valid) { WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), vq->last_add_time)) > 100); } vq->last_add_time_valid = false; #endif if (vq->event) { needs_kick = vring_need_event(vring_avail_event(&vq->vring), new, old); } else { needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); } END_USE(vq); return needs_kick; } EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); /** * virtqueue_notify - second half of split virtqueue_kick call. * @vq: the struct virtqueue * * This does not need to be serialized. */ void virtqueue_notify(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); /* Prod other side to tell it about changes. */ vq->notify(_vq); } EXPORT_SYMBOL_GPL(virtqueue_notify); /** * virtqueue_kick - update after add_buf * @vq: the struct virtqueue * * After one or more virtqueue_add_buf calls, invoke this to kick * the other side. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ void virtqueue_kick(struct virtqueue *vq) { if (virtqueue_kick_prepare(vq)) virtqueue_notify(vq); } EXPORT_SYMBOL_GPL(virtqueue_kick); static void detach_buf(struct vring_virtqueue *vq, unsigned int head) { unsigned int i; /* Clear data ptr. */ vq->data[head] = NULL; /* Put back on free list: find end */ i = head; /* Free the indirect table */ if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) kfree(phys_to_virt(vq->vring.desc[i].addr)); while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { i = vq->vring.desc[i].next; vq->vq.num_free++; } vq->vring.desc[i].next = vq->free_head; vq->free_head = head; /* Plus final descriptor */ vq->vq.num_free++; } static inline bool more_used(const struct vring_virtqueue *vq) { return vq->last_used_idx != vq->vring.used->idx; } /** * virtqueue_get_buf - get the next used buffer * @vq: the struct virtqueue we're talking about. * @len: the length written into the buffer * * If the driver wrote data into the buffer, @len will be set to the * amount written. This means you don't need to clear the buffer * beforehand to ensure there's no data leakage in the case of short * writes. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). * * Returns NULL if there are no used buffers, or the "data" token * handed to virtqueue_add_buf(). */ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) { struct vring_virtqueue *vq = to_vvq(_vq); void *ret; unsigned int i; u16 last_used; START_USE(vq); if (unlikely(vq->broken)) { END_USE(vq); return NULL; } if (!more_used(vq)) { pr_debug("No more buffers in queue\n"); END_USE(vq); return NULL; } /* Only get used array entries after they have been exposed by host. */ virtio_rmb(vq->weak_barriers); last_used = (vq->last_used_idx & (vq->vring.num - 1)); i = vq->vring.used->ring[last_used].id; *len = vq->vring.used->ring[last_used].len; if (unlikely(i >= vq->vring.num)) { BAD_RING(vq, "id %u out of range\n", i); return NULL; } if (unlikely(!vq->data[i])) { BAD_RING(vq, "id %u is not a head!\n", i); return NULL; } /* detach_buf clears data, so grab it now. */ ret = vq->data[i]; detach_buf(vq, i); vq->last_used_idx++; /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call. */ if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { vring_used_event(&vq->vring) = vq->last_used_idx; virtio_mb(vq->weak_barriers); } #ifdef DEBUG vq->last_add_time_valid = false; #endif END_USE(vq); return ret; } EXPORT_SYMBOL_GPL(virtqueue_get_buf); /** * virtqueue_disable_cb - disable callbacks * @vq: the struct virtqueue we're talking about. * * Note that this is not necessarily synchronous, hence unreliable and only * useful as an optimization. * * Unlike other operations, this need not be serialized. */ void virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; } EXPORT_SYMBOL_GPL(virtqueue_disable_cb); /** * virtqueue_enable_cb_prepare - restart callbacks after disable_cb * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns current queue state * in an opaque unsigned value. This value should be later tested by * virtqueue_poll, to detect a possible race between the driver checking for * more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 last_used_idx; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; END_USE(vq); return last_used_idx; } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); /** * virtqueue_poll - query pending used buffers * @vq: the struct virtqueue we're talking about. * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). * * Returns "true" if there are pending used buffers in the queue. * * This does not need to be serialized. */ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); virtio_mb(vq->weak_barriers); return (u16)last_used_idx != vq->vring.used->idx; } EXPORT_SYMBOL_GPL(virtqueue_poll); /** * virtqueue_enable_cb - restart callbacks after disable_cb. * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns "false" if there are pending * buffers in the queue, to detect a possible race between the driver * checking for more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb(struct virtqueue *_vq) { unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); return !virtqueue_poll(_vq, last_used_idx); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb); /** * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks but hints to the other side to delay * interrupts until most of the available buffers have been processed; * it returns "false" if there are many pending buffers in the queue, * to detect a possible race between the driver checking for more work, * and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 bufs; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; /* TODO: tune this threshold */ bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; vring_used_event(&vq->vring) = vq->last_used_idx + bufs; virtio_mb(vq->weak_barriers); if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { END_USE(vq); return false; } END_USE(vq); return true; } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); /** * virtqueue_detach_unused_buf - detach first unused buffer * @vq: the struct virtqueue we're talking about. * * Returns NULL or the "data" token handed to virtqueue_add_buf(). * This is not valid on an active queue; it is useful only for device * shutdown. */ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i; void *buf; START_USE(vq); for (i = 0; i < vq->vring.num; i++) { if (!vq->data[i]) continue; /* detach_buf clears data, so grab it now. */ buf = vq->data[i]; detach_buf(vq, i); vq->vring.avail->idx--; END_USE(vq); return buf; } /* That should have freed everything. */ BUG_ON(vq->vq.num_free != vq->vring.num); END_USE(vq); return NULL; } EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); irqreturn_t vring_interrupt(int irq, void *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (!more_used(vq)) { pr_debug("virtqueue interrupt with no work for %p\n", vq); return IRQ_NONE; } if (unlikely(vq->broken)) return IRQ_HANDLED; pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); if (vq->vq.callback) vq->vq.callback(&vq->vq); return IRQ_HANDLED; } EXPORT_SYMBOL_GPL(vring_interrupt); struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, void *pages, void (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) { struct vring_virtqueue *vq; unsigned int i; /* We assume num is a power of 2. */ if (num & (num - 1)) { dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); return NULL; } vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); if (!vq) return NULL; vring_init(&vq->vring, num, pages, vring_align); vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->vq.num_free = num; vq->vq.index = index; vq->notify = notify; vq->weak_barriers = weak_barriers; vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; vq->last_add_time_valid = false; #endif vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); /* No callback? Tell other side not to bother us. */ if (!callback) vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; /* Put everything in free lists. */ vq->free_head = 0; for (i = 0; i < num-1; i++) { vq->vring.desc[i].next = i+1; vq->data[i] = NULL; } vq->data[i] = NULL; return &vq->vq; } EXPORT_SYMBOL_GPL(vring_new_virtqueue); void vring_del_virtqueue(struct virtqueue *vq) { list_del(&vq->list); kfree(to_vvq(vq)); } EXPORT_SYMBOL_GPL(vring_del_virtqueue); /* Manipulates transport-specific feature bits. */ void vring_transport_features(struct virtio_device *vdev) { unsigned int i; for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { switch (i) { case VIRTIO_RING_F_INDIRECT_DESC: break; case VIRTIO_RING_F_EVENT_IDX: break; default: /* We don't understand this bit. */ clear_bit(i, vdev->features); } } } EXPORT_SYMBOL_GPL(vring_transport_features); /** * virtqueue_get_vring_size - return the size of the virtqueue's vring * @vq: the struct virtqueue containing the vring of interest. * * Returns the size of the vring. This is mainly used for boasting to * userspace. Unlike other operations, this need not be serialized. */ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->vring.num; } EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); bool virtqueue_is_broken(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->broken; } EXPORT_SYMBOL_GPL(virtqueue_is_broken); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/3.0.13/000077500000000000000000000000001314037446600265375ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/3.0.13/virtio.c000066400000000000000000000156031314037446600302240ustar00rootroot00000000000000#include #include #include #include #include #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) /* Unique numbering for virtio devices. */ static DEFINE_IDA(virtio_index_ida); #endif static ssize_t device_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.device); } static ssize_t vendor_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.vendor); } static ssize_t status_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); } static ssize_t modalias_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "virtio:d%08Xv%08X\n", dev->id.device, dev->id.vendor); } static ssize_t features_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); unsigned int i; ssize_t len = 0; /* We actually represent this as a bitstring, as it could be * arbitrary length in future. */ for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++) len += sprintf(buf+len, "%c", test_bit(i, dev->features) ? '1' : '0'); len += sprintf(buf+len, "\n"); return len; } static struct device_attribute virtio_dev_attrs[] = { __ATTR_RO(device), __ATTR_RO(vendor), __ATTR_RO(status), __ATTR_RO(modalias), __ATTR_RO(features), __ATTR_NULL }; static inline int virtio_id_match(const struct virtio_device *dev, const struct virtio_device_id *id) { if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID) return 0; return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; } /* This looks through all the IDs a driver claims to support. If any of them * match, we return 1 and the kernel will call virtio_dev_probe(). */ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) { unsigned int i; struct virtio_device *dev = dev_to_virtio(_dv); const struct virtio_device_id *ids; ids = drv_to_virtio(_dr)->id_table; for (i = 0; ids[i].device; i++) if (virtio_id_match(dev, &ids[i])) return 1; return 0; } static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) { struct virtio_device *dev = dev_to_virtio(_dv); return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", dev->id.device, dev->id.vendor); } static void add_status(struct virtio_device *dev, unsigned status) { dev->config->set_status(dev, dev->config->get_status(dev) | status); } void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit) { unsigned int i; struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); for (i = 0; i < drv->feature_table_size; i++) if (drv->feature_table[i] == fbit) return; BUG(); } EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); static int virtio_dev_probe(struct device *_d) { int err, i; struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); u32 device_features; /* We have a driver! */ add_status(dev, VIRTIO_CONFIG_S_DRIVER); /* Figure out what features the device supports. */ device_features = dev->config->get_features(dev); /* Features supported by both device and driver into dev->features. */ memset(dev->features, 0, sizeof(dev->features)); for (i = 0; i < drv->feature_table_size; i++) { unsigned int f = drv->feature_table[i]; BUG_ON(f >= 32); if (device_features & (1 << f)) set_bit(f, dev->features); } /* Transport features always preserved to pass to finalize_features. */ for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) if (device_features & (1 << i)) set_bit(i, dev->features); dev->config->finalize_features(dev); err = drv->probe(dev); if (err) add_status(dev, VIRTIO_CONFIG_S_FAILED); else { add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); if (drv->scan) drv->scan(dev); } return err; } static int virtio_dev_remove(struct device *_d) { struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); drv->remove(dev); /* Driver should have reset device. */ WARN_ON_ONCE(dev->config->get_status(dev)); /* Acknowledge the device's existence again. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); return 0; } static struct bus_type virtio_bus = { .name = "virtio", .match = virtio_dev_match, .dev_attrs = virtio_dev_attrs, .uevent = virtio_uevent, .probe = virtio_dev_probe, .remove = virtio_dev_remove, }; int register_virtio_driver(struct virtio_driver *driver) { /* Catch this early. */ BUG_ON(driver->feature_table_size && !driver->feature_table); driver->driver.bus = &virtio_bus; return driver_register(&driver->driver); } EXPORT_SYMBOL_GPL(register_virtio_driver); void unregister_virtio_driver(struct virtio_driver *driver) { driver_unregister(&driver->driver); } EXPORT_SYMBOL_GPL(unregister_virtio_driver); #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,14) /* Unique numbering for virtio devices. */ static unsigned int dev_index; #endif int register_virtio_device(struct virtio_device *dev) { int err; dev->dev.bus = &virtio_bus; /* Assign a unique device index and hence name. */ #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL); if (err < 0) goto out; dev->index = err; #else dev->index = dev_index++; #endif dev_set_name(&dev->dev, "virtio%u", dev->index); /* We always start by resetting the device, in case a previous * driver messed it up. This also tests that code path a little. */ dev->config->reset(dev); /* Acknowledge that we've seen the device. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); INIT_LIST_HEAD(&dev->vqs); /* device_register() causes the bus infrastructure to look for a * matching driver. */ err = device_register(&dev->dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) out: #endif if (err) add_status(dev, VIRTIO_CONFIG_S_FAILED); return err; } EXPORT_SYMBOL_GPL(register_virtio_device); void unregister_virtio_device(struct virtio_device *dev) { #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) int index = dev->index; /* save for after device release */ #endif device_unregister(&dev->dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) ida_simple_remove(&virtio_index_ida, index); #endif } EXPORT_SYMBOL_GPL(unregister_virtio_device); static int virtio_init(void) { if (bus_register(&virtio_bus) != 0) panic("virtio bus registration failed"); return 0; } static void __exit virtio_exit(void) { bus_unregister(&virtio_bus); } core_initcall(virtio_init); module_exit(virtio_exit); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/3.0.13/virtio_net.c000066400000000000000000001460641314037446600311000ustar00rootroot00000000000000/* A network driver using virtio. * * Copyright 2007 Rusty Russell IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ //#define DEBUG #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* device supports hardware address * change when it's running */ static unsigned int iff_live_addr_change = 1; static int napi_weight = 64; module_param(napi_weight, int, 0444); static bool csum = true, gso = true; module_param(csum, bool, 0444); module_param(gso, bool, 0444); /* FIXME: MTU in config. */ #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 #ifndef VIRTIO_F_ANY_LAYOUT #define VIRTIO_F_ANY_LAYOUT 27 #endif #define VIRTNET_DRIVER_VERSION "1.0.0" struct virtnet_stats { struct u64_stats_sync tx_syncp; struct u64_stats_sync rx_syncp; u64 tx_bytes; u64 tx_packets; u64 rx_bytes; u64 rx_packets; }; /* Internal representation of a send virtqueue */ struct send_queue { /* Virtqueue associated with this send _queue */ struct virtqueue *vq; /* TX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Name of the send queue: output.$index */ char name[40]; }; /* Internal representation of a receive virtqueue */ struct receive_queue { /* Virtqueue associated with this receive_queue */ struct virtqueue *vq; struct napi_struct napi; /* Number of input buffers, and max we've ever had. */ unsigned int num, max; /* Chain pages by the private ptr. */ struct page *pages; /* RX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Name of this receive queue: input.$index */ char name[40]; }; struct virtnet_info { struct virtio_device *vdev; struct virtqueue *cvq; struct net_device *dev; struct send_queue *sq; struct receive_queue *rq; unsigned int status; /* Max # of queue pairs supported by the device */ u16 max_queue_pairs; /* # of queue pairs currently used by the driver */ u16 curr_queue_pairs; /* I like... big packets and I cannot lie! */ bool big_packets; /* Host will merge rx buffers for big packets (shake it! shake it!) */ bool mergeable_rx_bufs; /* Has control virtqueue */ bool has_cvq; /* Host can handle any s/g split between our header and packet data */ bool any_header_sg; /* enable config space updates */ bool config_enable; /* Active statistics */ struct virtnet_stats __percpu *stats; /* Work struct for refilling if we run low on memory. */ struct delayed_work refill; /* Work struct for config space updates */ struct work_struct config_work; /* Lock for config space updates */ struct mutex config_lock; /* Does the affinity hint is set for virtqueues? */ bool affinity_hint_set; /* Per-cpu variable to show the mapping from CPU to virtqueue */ int __percpu *vq_index; /* CPU hot plug notifier */ struct notifier_block nb; }; struct skb_vnet_hdr { union { struct virtio_net_hdr hdr; struct virtio_net_hdr_mrg_rxbuf mhdr; }; }; struct padded_vnet_hdr { struct virtio_net_hdr hdr; /* * virtio_net_hdr should be in a separated sg buffer because of a * QEMU bug, and data sg buffer shares same page with this header sg. * This padding makes next sg 16 byte aligned after virtio_net_hdr. */ char padding[6]; }; /* Converting between virtqueue no. and kernel tx/rx queue no. * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq */ static int vq2txq(struct virtqueue *vq) { return (vq->index - 1) / 2; } static int txq2vq(int txq) { return txq * 2 + 1; } static int vq2rxq(struct virtqueue *vq) { return vq->index / 2; } static int rxq2vq(int rxq) { return rxq * 2; } static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) { return (struct skb_vnet_hdr *)skb->cb; } /* * private is used to chain pages for big packets, put the whole * most recent used list in the beginning for reuse */ static void give_pages(struct receive_queue *rq, struct page *page) { struct page *end; /* Find end of list, sew whole thing into vi->rq.pages. */ for (end = page; end->private; end = (struct page *)end->private); end->private = (unsigned long)rq->pages; rq->pages = page; } static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) { struct page *p = rq->pages; if (p) { rq->pages = (struct page *)p->private; /* clear private here, it is used to chain pages */ p->private = 0; } else p = alloc_page(gfp_mask); return p; } static void skb_xmit_done(struct virtqueue *vq) { struct virtnet_info *vi = vq->vdev->priv; /* Suppress further interrupts. */ virtqueue_disable_cb(vq); /* We were probably waiting for more output buffers. */ netif_wake_subqueue(vi->dev, vq2txq(vq)); } static void set_skb_frag(struct sk_buff *skb, struct page *page, unsigned int offset, unsigned int *len) { int size = min((unsigned)PAGE_SIZE - offset, *len); int i = skb_shinfo(skb)->nr_frags; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,13)) skb_fill_page_desc(skb, i, page, offset, size); #else __skb_fill_page_desc(skb, i, page, offset, size); skb_shinfo(skb)->nr_frags++; #endif skb->data_len += size; skb->len += size; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,17)) skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; #endif *len -= size; } /* Called from bottom half context */ static struct sk_buff *page_to_skb(struct receive_queue *rq, struct page *page, unsigned int len) { struct virtnet_info *vi = rq->vq->vdev->priv; struct sk_buff *skb; struct skb_vnet_hdr *hdr; unsigned int copy, hdr_len, offset; char *p; p = page_address(page); /* copy small packet so we can reuse these pages for small data */ skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); if (unlikely(!skb)) return NULL; hdr = skb_vnet_hdr(skb); if (vi->mergeable_rx_bufs) { hdr_len = sizeof hdr->mhdr; offset = hdr_len; } else { hdr_len = sizeof hdr->hdr; offset = sizeof(struct padded_vnet_hdr); } memcpy(hdr, p, hdr_len); len -= hdr_len; p += offset; copy = len; if (copy > skb_tailroom(skb)) copy = skb_tailroom(skb); memcpy(skb_put(skb, copy), p, copy); len -= copy; offset += copy; /* * Verify that we can indeed put this data into a skb. * This is here to handle cases when the device erroneously * tries to receive more than is possible. This is usually * the case of a broken device. */ if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { net_dbg_ratelimited("%s: too much data\n", skb->dev->name); dev_kfree_skb(skb); return NULL; } while (len) { set_skb_frag(skb, page, offset, &len); page = (struct page *)page->private; offset = 0; } if (page) give_pages(rq, page); return skb; } static struct sk_buff *receive_small(void *buf, unsigned int len) { struct sk_buff * skb = buf; len -= sizeof(struct virtio_net_hdr); skb_trim(skb, len); return skb; } static struct sk_buff *receive_big(struct net_device *dev, struct receive_queue *rq, void *buf) { struct page *page = buf; struct sk_buff *skb = page_to_skb(rq, page, 0); if (unlikely(!skb)) goto err; return skb; err: dev->stats.rx_dropped++; give_pages(rq, page); return NULL; } static struct sk_buff *receive_mergeable(struct net_device *dev, struct receive_queue *rq, void *buf, unsigned int len) { struct skb_vnet_hdr *hdr = page_address(buf); int num_buf = hdr->mhdr.num_buffers; struct page *page = buf; struct sk_buff *skb = page_to_skb(rq, page, len); int i; if (unlikely(!skb)) goto err_skb; while (--num_buf) { i = skb_shinfo(skb)->nr_frags; if (i >= MAX_SKB_FRAGS) { pr_debug("%s: packet too long\n", skb->dev->name); skb->dev->stats.rx_length_errors++; goto err_frags; } page = virtqueue_get_buf(rq->vq, &len); if (!page) { pr_debug("%s: rx error: %d buffers %d missing\n", dev->name, hdr->mhdr.num_buffers, num_buf); dev->stats.rx_length_errors++; goto err_buf; } if (len > PAGE_SIZE) len = PAGE_SIZE; set_skb_frag(skb, page, 0, &len); --rq->num; } return skb; err_skb: give_pages(rq, page); while (--num_buf) { err_frags: buf = virtqueue_get_buf(rq->vq, &len); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers missing\n", dev->name, num_buf); dev->stats.rx_length_errors++; break; } page = buf; give_pages(rq, page); --rq->num; } err_buf: dev->stats.rx_dropped++; dev_kfree_skb(skb); return NULL; } static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) { struct virtnet_info *vi = rq->vq->vdev->priv; struct net_device *dev = vi->dev; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); struct sk_buff *skb; struct skb_vnet_hdr *hdr; if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(rq, buf); else dev_kfree_skb(buf); return; } if (vi->mergeable_rx_bufs) skb = receive_mergeable(dev, rq, buf, len); else if (vi->big_packets) skb = receive_big(dev, rq, buf); else skb = receive_small(buf, len); if (unlikely(!skb)) return; hdr = skb_vnet_hdr(skb); skb->truesize += skb->data_len; u64_stats_update_begin(&stats->rx_syncp); stats->rx_bytes += skb->len; stats->rx_packets++; u64_stats_update_end(&stats->rx_syncp); if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { pr_debug("Needs csum!\n"); if (!skb_partial_csum_set(skb, hdr->hdr.csum_start, hdr->hdr.csum_offset)) goto frame_err; } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { skb->ip_summed = CHECKSUM_UNNECESSARY; } skb->protocol = eth_type_trans(skb, dev); pr_debug("Receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type); if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_UDP: skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; case VIRTIO_NET_HDR_GSO_TCPV6: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; default: net_warn_ratelimited("%s: bad gso type %u.\n", dev->name, hdr->hdr.gso_type); goto frame_err; } if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; if (skb_shinfo(skb)->gso_size == 0) { net_warn_ratelimited("%s: zero gso size.\n", dev->name); goto frame_err; } /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; } netif_receive_skb(skb); return; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); } static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) { struct virtnet_info *vi = rq->vq->vdev->priv; struct sk_buff *skb; struct skb_vnet_hdr *hdr; int err; #if (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13)) skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); #else skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); #endif if (unlikely(!skb)) return -ENOMEM; skb_put(skb, MAX_PACKET_LEN); hdr = skb_vnet_hdr(skb); sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); if (err < 0) dev_kfree_skb(skb); return err; } static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) { struct page *first, *list = NULL; char *p; int i, err, offset; /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { first = get_a_page(rq, gfp); if (!first) { if (list) give_pages(rq, list); return -ENOMEM; } sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); /* chain new page in list head to match sg */ first->private = (unsigned long)list; list = first; } first = get_a_page(rq, gfp); if (!first) { give_pages(rq, list); return -ENOMEM; } p = page_address(first); /* rq->sg[0], rq->sg[1] share the same page */ /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); /* rq->sg[1] for data packet, from offset */ offset = sizeof(struct padded_vnet_hdr); sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); /* chain first in list head */ first->private = (unsigned long)list; err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, first, gfp); if (err < 0) give_pages(rq, first); return err; } static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) { struct page *page; int err; page = get_a_page(rq, gfp); if (!page) return -ENOMEM; sg_init_one(rq->sg, page_address(page), PAGE_SIZE); err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp); if (err < 0) give_pages(rq, page); return err; } /* * Returns false if we couldn't fill entirely (OOM). * * Normally run in the receive path, but can also be run from ndo_open * before we're receiving packets, or from refill_work which is * careful to disable receiving (using napi_disable). */ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) { struct virtnet_info *vi = rq->vq->vdev->priv; int err; bool oom; do { if (vi->mergeable_rx_bufs) err = add_recvbuf_mergeable(rq, gfp); else if (vi->big_packets) err = add_recvbuf_big(rq, gfp); else err = add_recvbuf_small(rq, gfp); oom = err == -ENOMEM; if (err) break; ++rq->num; } while (rq->vq->num_free); if (unlikely(rq->num > rq->max)) rq->max = rq->num; virtqueue_kick(rq->vq); return !oom; } static void skb_recv_done(struct virtqueue *rvq) { struct virtnet_info *vi = rvq->vdev->priv; struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; /* Schedule NAPI, Suppress further interrupts if successful. */ if (napi_schedule_prep(&rq->napi)) { virtqueue_disable_cb(rvq); __napi_schedule(&rq->napi); } } static void virtnet_napi_enable(struct receive_queue *rq) { napi_enable(&rq->napi); /* If all buffers were filled by other side before we napi_enabled, we * won't get another interrupt, so process any outstanding packets * now. virtnet_poll wants re-enable the queue, so we disable here. * We synchronize against interrupts via NAPI_STATE_SCHED */ if (napi_schedule_prep(&rq->napi)) { virtqueue_disable_cb(rq->vq); local_bh_disable(); __napi_schedule(&rq->napi); local_bh_enable(); } } static void refill_work(struct work_struct *work) { struct virtnet_info *vi = container_of(work, struct virtnet_info, refill.work); bool still_empty; int i; for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; napi_disable(&rq->napi); still_empty = !try_fill_recv(rq, GFP_KERNEL); virtnet_napi_enable(rq); /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. */ if (still_empty) schedule_delayed_work(&vi->refill, HZ/2); } } static int virtnet_poll(struct napi_struct *napi, int budget) { struct receive_queue *rq = container_of(napi, struct receive_queue, napi); struct virtnet_info *vi = rq->vq->vdev->priv; void *buf; unsigned int r, len, received = 0; again: while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { receive_buf(rq, buf, len); --rq->num; received++; } if (rq->num < rq->max / 2) { if (!try_fill_recv(rq, GFP_ATOMIC)) schedule_delayed_work(&vi->refill, 0); } /* Out of packets? */ if (received < budget) { r = virtqueue_enable_cb_prepare(rq->vq); napi_complete(napi); if (unlikely(virtqueue_poll(rq->vq, r)) && napi_schedule_prep(napi)) { virtqueue_disable_cb(rq->vq); __napi_schedule(napi); goto again; } } return received; } static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; for (i = 0; i < vi->max_queue_pairs; i++) { if (i < vi->curr_queue_pairs) /* Make sure we have some buffers: if oom use wq. */ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); virtnet_napi_enable(&vi->rq[i]); } return 0; } static void free_old_xmit_skbs(struct send_queue *sq) { struct sk_buff *skb; unsigned int len; struct virtnet_info *vi = sq->vq->vdev->priv; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { pr_debug("Sent skb %p\n", skb); u64_stats_update_begin(&stats->tx_syncp); stats->tx_bytes += skb->len; stats->tx_packets++; u64_stats_update_end(&stats->tx_syncp); dev_kfree_skb_any(skb); } } static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) { struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; struct virtnet_info *vi = sq->vq->vdev->priv; unsigned num_sg; unsigned hdr_len; bool can_push; pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); if (vi->mergeable_rx_bufs) hdr_len = sizeof hdr->mhdr; else hdr_len = sizeof hdr->hdr; can_push = vi->any_header_sg && !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; /* Even if we can, don't push here yet as this would skew * csum_start offset below. */ if (can_push) hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len); else hdr = skb_vnet_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->hdr.csum_start = skb_checksum_start_offset(skb); hdr->hdr.csum_offset = skb->csum_offset; } else { hdr->hdr.flags = 0; hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; } if (skb_is_gso(skb)) { hdr->hdr.hdr_len = skb_headlen(skb); hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; } else { hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; } if (vi->mergeable_rx_bufs) hdr->mhdr.num_buffers = 0; if (can_push) { __skb_push(skb, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); /* Pull header back to avoid skew in tx bytes calculations. */ __skb_pull(skb, hdr_len); } else { sg_set_buf(sq->sg, hdr, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; } return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); } static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int qnum = skb_get_queue_mapping(skb); struct send_queue *sq = &vi->sq[qnum]; int err; /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(sq); /* Try to transmit */ err = xmit_skb(sq, skb); /* This should not happen! */ if (unlikely(err)) { dev->stats.tx_fifo_errors++; if (net_ratelimit()) dev_warn(&dev->dev, "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } virtqueue_kick(sq->vq); /* Don't wait up for transmitted skbs to be freed. */ skb_orphan(skb); nf_reset(skb); /* Apparently nice girls don't return TX_BUSY; stop the queue * before it gets out of hand. Naturally, this wastes entries. */ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { netif_stop_subqueue(dev, qnum); if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { /* More just got used, free them then recheck. */ free_old_xmit_skbs(sq); if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { netif_start_subqueue(dev, qnum); virtqueue_disable_cb(sq->vq); } } } return NETDEV_TX_OK; } /* * Send command via the control virtqueue and check status. Commands * supported by the hypervisor, as indicated by feature bits, should * never fail unless improperly formated. */ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, struct scatterlist *out, struct scatterlist *in) { struct scatterlist *sgs[4], hdr, stat; struct virtio_net_ctrl_hdr ctrl; virtio_net_ctrl_ack status = ~0; unsigned out_num = 0, in_num = 0, tmp; /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); ctrl.class = class; ctrl.cmd = cmd; /* Add header */ sg_init_one(&hdr, &ctrl, sizeof(ctrl)); sgs[out_num++] = &hdr; if (out) sgs[out_num++] = out; if (in) sgs[out_num + in_num++] = in; /* Add return status. */ sg_init_one(&stat, &status, sizeof(status)); sgs[out_num + in_num++] = &stat; BUG_ON(out_num + in_num > ARRAY_SIZE(sgs)); BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC) < 0); virtqueue_kick(vi->cvq); /* Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. */ while (!virtqueue_get_buf(vi->cvq, &tmp) && !virtqueue_is_broken(vi->cvq)) cpu_relax(); return status == VIRTIO_NET_OK; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) /** * eth_prepare_mac_addr_change - prepare for mac change * @dev: network device * @p: socket address */ static int eth_prepare_mac_addr_change(struct net_device *dev, void *p) { struct sockaddr *addr = p; if (!iff_live_addr_change && netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; return 0; } /** * eth_commit_mac_addr_change - commit mac change * @dev: network device * @p: socket address */ static void eth_commit_mac_addr_change(struct net_device *dev, void *p) { struct sockaddr *addr = p; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); } #endif static int virtnet_set_mac_address(struct net_device *dev, void *p) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; int ret; struct sockaddr *addr = p; struct scatterlist sg; ret = eth_prepare_mac_addr_change(dev, p); if (ret) return ret; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { sg_init_one(&sg, addr->sa_data, dev->addr_len); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg, NULL)) { dev_warn(&vdev->dev, "Failed to set mac address by vq command.\n"); return -EINVAL; } } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), addr->sa_data, dev->addr_len); } eth_commit_mac_addr_change(dev, p); return 0; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) static struct net_device_stats *virtnet_stats(struct net_device *dev) { int cpu; unsigned int start; struct virtnet_info *vi = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; stats->rx_packets = 0; stats->tx_packets = 0; stats->rx_bytes = 0; stats->tx_bytes = 0; for_each_possible_cpu(cpu) { struct virtnet_stats *stats_tmp = per_cpu_ptr(vi->stats, cpu); u64 tpackets, tbytes, rpackets, rbytes; do { start = u64_stats_fetch_begin_bh(&stats_tmp->tx_syncp); tpackets = stats_tmp->tx_packets; tbytes = stats_tmp->tx_bytes; } while (u64_stats_fetch_retry_bh(&stats_tmp->tx_syncp, start)); do { start = u64_stats_fetch_begin_bh(&stats_tmp->rx_syncp); rpackets = stats_tmp->rx_packets; rbytes = stats_tmp->rx_bytes; } while (u64_stats_fetch_retry_bh(&stats_tmp->rx_syncp, start)); stats->rx_packets += rpackets; stats->tx_packets += tpackets; stats->rx_bytes += rbytes; stats->tx_bytes += tbytes; } return stats; } #else static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct virtnet_info *vi = netdev_priv(dev); int cpu; unsigned int start; for_each_possible_cpu(cpu) { struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); u64 tpackets, tbytes, rpackets, rbytes; do { start = u64_stats_fetch_begin_bh(&stats->tx_syncp); tpackets = stats->tx_packets; tbytes = stats->tx_bytes; } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); do { start = u64_stats_fetch_begin_bh(&stats->rx_syncp); rpackets = stats->rx_packets; rbytes = stats->rx_bytes; } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); tot->rx_packets += rpackets; tot->tx_packets += tpackets; tot->rx_bytes += rbytes; tot->tx_bytes += tbytes; } tot->tx_dropped = dev->stats.tx_dropped; tot->tx_fifo_errors = dev->stats.tx_fifo_errors; tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; return tot; } #endif #ifdef CONFIG_NET_POLL_CONTROLLER static void virtnet_netpoll(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; for (i = 0; i < vi->curr_queue_pairs; i++) napi_schedule(&vi->rq[i].napi); } #endif static void virtnet_ack_link_announce(struct virtnet_info *vi) { rtnl_lock(); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL)) dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); rtnl_unlock(); } static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { struct scatterlist sg; struct virtio_net_ctrl_mq s; struct net_device *dev = vi->dev; if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; s.virtqueue_pairs = queue_pairs; sg_init_one(&sg, &s, sizeof(s)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) { dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", queue_pairs); return -EINVAL; } else { vi->curr_queue_pairs = queue_pairs; /* virtnet_open() will refill when device is going to up. */ if (dev->flags & IFF_UP) schedule_delayed_work(&vi->refill, 0); } return 0; } static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); for (i = 0; i < vi->max_queue_pairs; i++) napi_disable(&vi->rq[i].napi); return 0; } #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) static int virtnet_set_tx_csum(struct net_device *dev, u32 data) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) return -ENOSYS; return ethtool_op_set_tx_hw_csum(dev, data); } #endif static void virtnet_set_rx_mode(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg[2]; u8 promisc, allmulti; struct virtio_net_ctrl_mac *mac_data; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) struct dev_addr_list *addr; #endif struct netdev_hw_addr *ha; int uc_count; int mc_count; void *buf; int i; /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; promisc = ((dev->flags & IFF_PROMISC) != 0); allmulti = ((dev->flags & IFF_ALLMULTI) != 0); sg_init_one(sg, &promisc, sizeof(promisc)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg, NULL)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", promisc ? "en" : "dis"); sg_init_one(sg, &allmulti, sizeof(allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg, NULL)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", allmulti ? "en" : "dis"); uc_count = netdev_uc_count(dev); mc_count = netdev_mc_count(dev); /* MAC filter - use one buffer for both lists */ buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + (2 * sizeof(mac_data->entries)), GFP_ATOMIC); mac_data = buf; if (!buf) return; sg_init_table(sg, 2); /* Store the unicast list and count in the front of the buffer */ mac_data->entries = uc_count; i = 0; netdev_for_each_uc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); sg_set_buf(&sg[0], mac_data, sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); /* multicast list and count fill the end */ mac_data = (void *)&mac_data->macs[uc_count][0]; mac_data->entries = mc_count; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) addr = dev->mc_list; for (i = 0; i < dev->mc_count; i++, addr = addr->next) memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN); #else i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); #endif sg_set_buf(&sg[1], mac_data, sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, sg, NULL)) dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); kfree(buf); } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,24)) static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) #else static int virtnet_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) #endif { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL)) dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,24)) return 0; #endif } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,24)) static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) #else static int virtnet_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) #endif { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL)) dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,24)) return 0; #endif } static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) { int i; int cpu; if (vi->affinity_hint_set) { for (i = 0; i < vi->max_queue_pairs; i++) { virtqueue_set_affinity(vi->rq[i].vq, -1); virtqueue_set_affinity(vi->sq[i].vq, -1); } vi->affinity_hint_set = false; } i = 0; for_each_online_cpu(cpu) { if (cpu == hcpu) { *per_cpu_ptr(vi->vq_index, cpu) = -1; } else { *per_cpu_ptr(vi->vq_index, cpu) = ++i % vi->curr_queue_pairs; } } } static void virtnet_set_affinity(struct virtnet_info *vi) { int i; int cpu; /* In multiqueue mode, when the number of cpu is equal to the number of * queue pairs, we let the queue pairs to be private to one cpu by * setting the affinity hint to eliminate the contention. */ if (vi->curr_queue_pairs == 1 || vi->max_queue_pairs != num_online_cpus()) { virtnet_clean_affinity(vi, -1); return; } i = 0; for_each_online_cpu(cpu) { virtqueue_set_affinity(vi->rq[i].vq, cpu); virtqueue_set_affinity(vi->sq[i].vq, cpu); *per_cpu_ptr(vi->vq_index, cpu) = i; i++; } vi->affinity_hint_set = true; } static int virtnet_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); switch(action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: case CPU_DOWN_FAILED: case CPU_DEAD: virtnet_set_affinity(vi); break; case CPU_DOWN_PREPARE: virtnet_clean_affinity(vi, (long)hcpu); break; default: break; } return NOTIFY_OK; } static void virtnet_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { struct virtnet_info *vi = netdev_priv(dev); ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); ring->rx_pending = ring->rx_max_pending; ring->tx_pending = ring->tx_max_pending; } static void virtnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34)) /* TODO: Eliminate OOO packets during switching */ static int virtnet_set_channels(struct net_device *dev, struct ethtool_channels *channels) { struct virtnet_info *vi = netdev_priv(dev); u16 queue_pairs = channels->combined_count; int err; /* We don't support separate rx/tx channels. * We don't allow setting 'other' channels. */ if (channels->rx_count || channels->tx_count || channels->other_count) return -EINVAL; if (queue_pairs > vi->max_queue_pairs) return -EINVAL; get_online_cpus(); err = virtnet_set_queues(vi, queue_pairs); if (!err) { netif_set_real_num_tx_queues(dev, queue_pairs); netif_set_real_num_rx_queues(dev, queue_pairs); virtnet_set_affinity(vi); } put_online_cpus(); return err; } static void virtnet_get_channels(struct net_device *dev, struct ethtool_channels *channels) { struct virtnet_info *vi = netdev_priv(dev); channels->combined_count = vi->curr_queue_pairs; channels->max_combined = vi->max_queue_pairs; channels->max_other = 0; channels->rx_count = 0; channels->tx_count = 0; channels->other_count = 0; } #endif static const struct ethtool_ops virtnet_ethtool_ops = { .get_drvinfo = virtnet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = virtnet_get_ringparam, #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) .set_tx_csum = virtnet_set_tx_csum, .set_sg = ethtool_op_set_sg, .set_tso = ethtool_op_set_tso, .set_ufo = ethtool_op_set_ufo, #else .set_channels = virtnet_set_channels, .get_channels = virtnet_get_channels, #endif }; #define MIN_MTU 68 #define MAX_MTU 65535 static int virtnet_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) return -EINVAL; dev->mtu = new_mtu; return 0; } #if 0 /* To avoid contending a lock hold by a vcpu who would exit to host, select the * txq based on the processor id. */ static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb) { int txq; struct virtnet_info *vi = netdev_priv(dev); if (skb_rx_queue_recorded(skb)) { txq = skb_get_rx_queue(skb); } else { txq = *this_cpu_ptr(vi->vq_index); if (txq == -1) txq = 0; } while (unlikely(txq >= dev->real_num_tx_queues)) txq -= dev->real_num_tx_queues; return txq; } #endif static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, .ndo_start_xmit = start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = virtnet_set_mac_address, .ndo_set_rx_mode = virtnet_set_rx_mode, .ndo_change_mtu = virtnet_change_mtu, #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) .ndo_get_stats = virtnet_stats, #else .ndo_get_stats64 = virtnet_stats, #endif .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, #if 0 .ndo_select_queue = virtnet_select_queue, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = virtnet_netpoll, #endif }; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,32)) /* from higher version kernel code. For redhat*/ static void inetdev_send_gratuitous_arp(struct net_device *dev, struct in_device *in_dev) { struct in_ifaddr *ifa; for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { arp_send(ARPOP_REQUEST, ETH_P_ARP, ifa->ifa_local, dev, ifa->ifa_local, NULL, dev->dev_addr, NULL); } } /** * netif_notify_peers - notify network peers about existence of @dev * @dev: network device * * Generate traffic such that interested network peers are aware of * @dev, such as by generating a gratuitous ARP. This may be used when * a device wants to inform the rest of the network about some sort of * reconfiguration such as a failover event or virtual machine * migration. * For redhat. */ void netif_notify_peers(struct net_device *dev) { struct in_device *in_dev; rtnl_lock(); in_dev = __in_dev_get_rtnl(dev); if(in_dev) inetdev_send_gratuitous_arp(dev, in_dev); rtnl_unlock(); } #endif static void virtnet_config_changed_work(struct work_struct *work) { struct virtnet_info *vi = container_of(work, struct virtnet_info, config_work); u16 v; mutex_lock(&vi->config_lock); if (!vi->config_enable) goto done; if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, offsetof(struct virtio_net_config, status), &v) < 0) goto done; if (v & VIRTIO_NET_S_ANNOUNCE) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) netif_notify_peers(vi->dev); #else netdev_notify_peers(vi->dev); #endif virtnet_ack_link_announce(vi); } /* Ignore unknown (future) status bits */ v &= VIRTIO_NET_S_LINK_UP; if (vi->status == v) goto done; vi->status = v; if (vi->status & VIRTIO_NET_S_LINK_UP) { netif_carrier_on(vi->dev); netif_tx_wake_all_queues(vi->dev); } else { netif_carrier_off(vi->dev); netif_tx_stop_all_queues(vi->dev); } done: mutex_unlock(&vi->config_lock); } static void virtnet_config_changed(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; schedule_work(&vi->config_work); } static void virtnet_free_queues(struct virtnet_info *vi) { int i; for (i = 0; i < vi->max_queue_pairs; i++) netif_napi_del(&vi->rq[i].napi); kfree(vi->rq); kfree(vi->sq); } static void free_receive_bufs(struct virtnet_info *vi) { int i; for (i = 0; i < vi->max_queue_pairs; i++) { while (vi->rq[i].pages) __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); } } static void free_unused_bufs(struct virtnet_info *vi) { void *buf; int i; for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->sq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) dev_kfree_skb(buf); } for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->rq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(&vi->rq[i], buf); else dev_kfree_skb(buf); --vi->rq[i].num; } BUG_ON(vi->rq[i].num != 0); } } static void virtnet_del_vqs(struct virtnet_info *vi) { struct virtio_device *vdev = vi->vdev; virtnet_clean_affinity(vi, -1); vdev->config->del_vqs(vdev); virtnet_free_queues(vi); } static int virtnet_find_vqs(struct virtnet_info *vi) { vq_callback_t **callbacks; struct virtqueue **vqs; int ret = -ENOMEM; int i, total_vqs; const char **names; /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by * possible control vq. */ total_vqs = vi->max_queue_pairs * 2 + virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); /* Allocate space for find_vqs parameters */ vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); if (!vqs) goto err_vq; callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); if (!callbacks) goto err_callback; names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); if (!names) goto err_names; /* Parameters for control virtqueue, if any */ if (vi->has_cvq) { callbacks[total_vqs - 1] = NULL; names[total_vqs - 1] = "control"; } /* Allocate/initialize parameters for send/receive virtqueues */ for (i = 0; i < vi->max_queue_pairs; i++) { callbacks[rxq2vq(i)] = skb_recv_done; callbacks[txq2vq(i)] = skb_xmit_done; sprintf(vi->rq[i].name, "input.%d", i); sprintf(vi->sq[i].name, "output.%d", i); names[rxq2vq(i)] = vi->rq[i].name; names[txq2vq(i)] = vi->sq[i].name; } ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, names); if (ret) goto err_find; if (vi->has_cvq) { vi->cvq = vqs[total_vqs - 1]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) vi->dev->features |= NETIF_F_HW_VLAN_FILTER; #else vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; #endif } for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].vq = vqs[rxq2vq(i)]; vi->sq[i].vq = vqs[txq2vq(i)]; } kfree(names); kfree(callbacks); kfree(vqs); return 0; err_find: kfree(names); err_names: kfree(callbacks); err_callback: kfree(vqs); err_vq: return ret; } static int virtnet_alloc_queues(struct virtnet_info *vi) { int i; vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); if (!vi->sq) goto err_sq; vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); if (!vi->rq) goto err_rq; INIT_DELAYED_WORK(&vi->refill, refill_work); for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].pages = NULL; netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, napi_weight); sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); } return 0; err_rq: kfree(vi->sq); err_sq: return -ENOMEM; } static int init_vqs(struct virtnet_info *vi) { int ret; /* Allocate send & receive queues */ ret = virtnet_alloc_queues(vi); if (ret) goto err; ret = virtnet_find_vqs(vi); if (ret) goto err_free; get_online_cpus(); virtnet_set_affinity(vi); put_online_cpus(); return 0; err_free: virtnet_free_queues(vi); err: return ret; } static int virtnet_probe(struct virtio_device *vdev) { int i, err; struct net_device *dev; struct virtnet_info *vi; u16 max_queue_pairs; /* Find if host supports multiqueue virtio_net device */ err = virtio_config_val(vdev, VIRTIO_NET_F_MQ, offsetof(struct virtio_net_config, max_virtqueue_pairs), &max_queue_pairs); /* We need at least 2 queue's */ if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) max_queue_pairs = 1; /* Allocate ourselves a network device with room for our info */ dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); if (!dev) return -ENOMEM; /* Set up network device as normal. */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,17)) dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,24)) dev->priv_flags |= IFF_UNICAST_FLT; #endif dev->netdev_ops = &virtnet_netdev; dev->features = NETIF_F_HIGHDMA; SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); SET_NETDEV_DEV(dev, &vdev->dev); /* Do we support "hardware" checksums? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { /* This opens up the world of extra features. */ #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) dev->features |= NETIF_F_TSO; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) dev->features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->features |= NETIF_F_TSO_ECN; dev->features |= NETIF_F_GSO_ROBUST; #else dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (csum) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) dev->hw_features |= NETIF_F_TSO; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) dev->hw_features |= NETIF_F_UFO; if (gso) dev->features |= dev->hw_features & NETIF_F_ALL_TSO; #endif /* (!csum && gso) case will be fixed by register_netdev() */ } dev->vlan_features = dev->features; /* Configuration may specify what MAC to use. Otherwise random. */ if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, offsetof(struct virtio_net_config, mac), dev->dev_addr, dev->addr_len) < 0) eth_hw_addr_random(dev); /* Set up our device-specific information */ vi = netdev_priv(dev); vi->dev = dev; vi->vdev = vdev; vdev->priv = vi; vi->stats = alloc_percpu(struct virtnet_stats); err = -ENOMEM; if (vi->stats == NULL) goto free; vi->vq_index = alloc_percpu(int); if (vi->vq_index == NULL) goto free_stats; mutex_init(&vi->config_lock); vi->config_enable = true; INIT_WORK(&vi->config_work, virtnet_config_changed_work); /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) vi->big_packets = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) vi->mergeable_rx_bufs = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) vi->has_cvq = true; if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) { vi->any_header_sg = true; if (vi->mergeable_rx_bufs) dev->needed_headroom = sizeof (struct virtio_net_hdr_mrg_rxbuf); else dev->needed_headroom = sizeof (struct virtio_net_hdr); } /* Use single tx/rx queue pair as default */ vi->curr_queue_pairs = 1; vi->max_queue_pairs = max_queue_pairs; /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ err = init_vqs(vi); if (err) goto free_index; get_online_cpus(); err = virtnet_set_queues(vi, max_queue_pairs); if (!err) { netif_set_real_num_tx_queues(dev, max_queue_pairs); netif_set_real_num_rx_queues(dev, max_queue_pairs); virtnet_set_affinity(vi); } put_online_cpus(); err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); goto free_vqs; } /* Last of all, set up some receive buffers. */ for (i = 0; i < vi->curr_queue_pairs; i++) { try_fill_recv(&vi->rq[i], GFP_KERNEL); /* If we didn't even get one input buffer, we're useless. */ if (vi->rq[i].num == 0) { free_unused_bufs(vi); err = -ENOMEM; goto free_recv_bufs; } } vi->nb.notifier_call = &virtnet_cpu_callback; err = register_hotcpu_notifier(&vi->nb); if (err) { pr_debug("virtio_net: registering cpu notifier failed\n"); goto free_recv_bufs; } /* Assume link up if device can't report link status, otherwise get link status from config. */ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { netif_carrier_off(dev); schedule_work(&vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; netif_carrier_on(dev); } pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", dev->name, max_queue_pairs); return 0; free_recv_bufs: free_receive_bufs(vi); unregister_netdev(dev); free_vqs: cancel_delayed_work_sync(&vi->refill); virtnet_del_vqs(vi); free_index: free_percpu(vi->vq_index); free_stats: free_percpu(vi->stats); free: free_netdev(dev); return err; } static void remove_vq_common(struct virtnet_info *vi) { vi->vdev->config->reset(vi->vdev); /* Free unused buffers in both send and recv, if any. */ free_unused_bufs(vi); free_receive_bufs(vi); virtnet_del_vqs(vi); } static void virtnet_remove(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; unregister_hotcpu_notifier(&vi->nb); /* Prevent config work handler from accessing the device. */ mutex_lock(&vi->config_lock); vi->config_enable = false; mutex_unlock(&vi->config_lock); unregister_netdev(vi->dev); remove_vq_common(vi); flush_work(&vi->config_work); free_percpu(vi->vq_index); free_percpu(vi->stats); free_netdev(vi->dev); } #ifdef CONFIG_PM static int virtnet_freeze(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int i; unregister_hotcpu_notifier(&vi->nb); /* Prevent config work handler from accessing the device */ mutex_lock(&vi->config_lock); vi->config_enable = false; mutex_unlock(&vi->config_lock); netif_device_detach(vi->dev); cancel_delayed_work_sync(&vi->refill); if (netif_running(vi->dev)) for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi); } remove_vq_common(vi); flush_work(&vi->config_work); return 0; } static int virtnet_restore(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int err, i; err = init_vqs(vi); if (err) return err; if (netif_running(vi->dev)) { for (i = 0; i < vi->curr_queue_pairs; i++) if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); for (i = 0; i < vi->max_queue_pairs; i++) virtnet_napi_enable(&vi->rq[i]); } netif_device_attach(vi->dev); mutex_lock(&vi->config_lock); vi->config_enable = true; mutex_unlock(&vi->config_lock); rtnl_lock(); virtnet_set_queues(vi, vi->curr_queue_pairs); rtnl_unlock(); err = register_hotcpu_notifier(&vi->nb); if (err) return err; return 0; } #endif static struct virtio_device_id id_table[] = { { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, VIRTIO_NET_F_CTRL_MAC_ADDR, VIRTIO_F_ANY_LAYOUT, }; static struct virtio_driver virtio_net_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtnet_probe, .remove = virtnet_remove, .config_changed = virtnet_config_changed, #ifdef CONFIG_PM .freeze = virtnet_freeze, .restore = virtnet_restore, #endif }; module_virtio_driver(virtio_net_driver); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio network driver"); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/3.0.13/virtio_pci.c000066400000000000000000000564271314037446600310700ustar00rootroot00000000000000/* * Virtio PCI driver * * This module allows virtio devices to be used over a virtual PCI device. * This can be used with QEMU based VMMs like KVM or Xen. * * Copyright IBM Corp. 2007 * * Authors: * Anthony Liguori * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include #include #include #include #include #include #include #include #include #include #include #include MODULE_AUTHOR("Anthony Liguori "); MODULE_DESCRIPTION("virtio-pci"); MODULE_LICENSE("GPL"); MODULE_VERSION("1"); /* Our device structure */ struct virtio_pci_device { struct virtio_device vdev; struct pci_dev *pci_dev; /* the IO mapping for the PCI config space */ void __iomem *ioaddr; /* a list of queues so we can dispatch IRQs */ spinlock_t lock; struct list_head virtqueues; /* MSI-X support */ int msix_enabled; int intx_enabled; struct msix_entry *msix_entries; cpumask_var_t *msix_affinity_masks; /* Name strings for interrupts. This size should be enough, * and I'm too lazy to allocate each name separately. */ char (*msix_names)[256]; /* Number of available vectors */ unsigned msix_vectors; /* Vectors allocated, excluding per-vq vectors if any */ unsigned msix_used_vectors; /* Status saved during hibernate/restore */ u8 saved_status; /* Whether we have vector per vq */ bool per_vq_vectors; }; /* Constants for MSI-X */ /* Use first vector for configuration changes, second and the rest for * virtqueues Thus, we need at least 2 vectors for MSI. */ enum { VP_MSIX_CONFIG_VECTOR = 0, VP_MSIX_VQ_VECTOR = 1, }; struct virtio_pci_vq_info { /* the actual virtqueue */ struct virtqueue *vq; /* the number of entries in the queue */ int num; /* the virtual address of the ring queue */ void *queue; /* the list node for the virtqueues list */ struct list_head node; /* MSI-X vector (or none) */ unsigned msix_vector; }; /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ static DEFINE_PCI_DEVICE_TABLE(virtio_pci_id_table) = { { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, { 0 } }; MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); /* Convert a generic virtio device to our structure */ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) { return container_of(vdev, struct virtio_pci_device, vdev); } /* virtio config->get_features() implementation */ static u32 vp_get_features(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* When someone needs more than 32 feature bits, we'll need to * steal a bit to indicate that the rest are somewhere else. */ return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); } /* virtio config->finalize_features() implementation */ static void vp_finalize_features(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); /* We only support 32 feature bits. */ BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1); iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES); } /* virtio config->get() implementation */ static void vp_get(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG(vp_dev) + offset; u8 *ptr = buf; int i; for (i = 0; i < len; i++) ptr[i] = ioread8(ioaddr + i); } /* the config->set() implementation. it's symmetric to the config->get() * implementation */ static void vp_set(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG(vp_dev) + offset; const u8 *ptr = buf; int i; for (i = 0; i < len; i++) iowrite8(ptr[i], ioaddr + i); } /* config->{get,set}_status() implementations */ static u8 vp_get_status(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); } static void vp_set_status(struct virtio_device *vdev, u8 status) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* We should never be setting status to 0. */ BUG_ON(status == 0); iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); } /* wait for pending irq handlers */ static void vp_synchronize_vectors(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; if (vp_dev->intx_enabled) synchronize_irq(vp_dev->pci_dev->irq); for (i = 0; i < vp_dev->msix_vectors; ++i) synchronize_irq(vp_dev->msix_entries[i].vector); } static void vp_reset(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* 0 status means a reset. */ iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); /* Flush out the status write, and flush in device writes, * including MSi-X interrupts, if any. */ ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); /* Flush pending VQ/configuration callbacks. */ vp_synchronize_vectors(vdev); } /* the notify function used when creating a virt queue */ static void vp_notify(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); /* we write the queue's selector into the notification register to * signal the other end */ iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); } /* Handle a configuration change: Tell driver if it wants to know. */ static irqreturn_t vp_config_changed(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; struct virtio_driver *drv; drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); if (drv && drv->config_changed) drv->config_changed(&vp_dev->vdev); return IRQ_HANDLED; } /* Notify all virtqueues on an interrupt. */ static irqreturn_t vp_vring_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; struct virtio_pci_vq_info *info; irqreturn_t ret = IRQ_NONE; unsigned long flags; spin_lock_irqsave(&vp_dev->lock, flags); list_for_each_entry(info, &vp_dev->virtqueues, node) { if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) ret = IRQ_HANDLED; } spin_unlock_irqrestore(&vp_dev->lock, flags); return ret; } /* A small wrapper to also acknowledge the interrupt when it's handled. * I really need an EIO hook for the vring so I can ack the interrupt once we * know that we'll be handling the IRQ but before we invoke the callback since * the callback may notify the host which results in the host attempting to * raise an interrupt that we would then mask once we acknowledged the * interrupt. */ static irqreturn_t vp_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; u8 isr; /* reading the ISR has the effect of also clearing it so it's very * important to save off the value. */ isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); /* It's definitely not us if the ISR was not high */ if (!isr) return IRQ_NONE; /* Configuration change? Tell driver if it wants to know. */ if (isr & VIRTIO_PCI_ISR_CONFIG) vp_config_changed(irq, opaque); return vp_vring_interrupt(irq, opaque); } static void vp_free_vectors(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; if (vp_dev->intx_enabled) { free_irq(vp_dev->pci_dev->irq, vp_dev); vp_dev->intx_enabled = 0; } for (i = 0; i < vp_dev->msix_used_vectors; ++i) free_irq(vp_dev->msix_entries[i].vector, vp_dev); for (i = 0; i < vp_dev->msix_vectors; i++) if (vp_dev->msix_affinity_masks[i]) free_cpumask_var(vp_dev->msix_affinity_masks[i]); if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); /* Flush the write out to device */ ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); pci_disable_msix(vp_dev->pci_dev); vp_dev->msix_enabled = 0; vp_dev->msix_vectors = 0; } vp_dev->msix_used_vectors = 0; kfree(vp_dev->msix_names); vp_dev->msix_names = NULL; kfree(vp_dev->msix_entries); vp_dev->msix_entries = NULL; kfree(vp_dev->msix_affinity_masks); vp_dev->msix_affinity_masks = NULL; } static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, bool per_vq_vectors) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); const char *name = dev_name(&vp_dev->vdev.dev); unsigned i, v; int err = -ENOMEM; vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, GFP_KERNEL); if (!vp_dev->msix_entries) goto error; vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, GFP_KERNEL); if (!vp_dev->msix_names) goto error; vp_dev->msix_affinity_masks = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, GFP_KERNEL); if (!vp_dev->msix_affinity_masks) goto error; for (i = 0; i < nvectors; ++i) if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], GFP_KERNEL)) goto error; for (i = 0; i < nvectors; ++i) vp_dev->msix_entries[i].entry = i; /* pci_enable_msix returns positive if we can't get this many. */ err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); if (err > 0) err = -ENOSPC; if (err) goto error; vp_dev->msix_vectors = nvectors; vp_dev->msix_enabled = 1; /* Set the vector used for configuration */ v = vp_dev->msix_used_vectors; snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, "%s-config", name); err = request_irq(vp_dev->msix_entries[v].vector, vp_config_changed, 0, vp_dev->msix_names[v], vp_dev); if (err) goto error; ++vp_dev->msix_used_vectors; iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); /* Verify we had enough resources to assign the vector */ v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); if (v == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto error; } if (!per_vq_vectors) { /* Shared vector for all VQs */ v = vp_dev->msix_used_vectors; snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, "%s-virtqueues", name); err = request_irq(vp_dev->msix_entries[v].vector, vp_vring_interrupt, 0, vp_dev->msix_names[v], vp_dev); if (err) goto error; ++vp_dev->msix_used_vectors; } return 0; error: vp_free_vectors(vdev); return err; } static int vp_request_intx(struct virtio_device *vdev) { int err; struct virtio_pci_device *vp_dev = to_vp_device(vdev); err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vp_dev); if (!err) vp_dev->intx_enabled = 1; return err; } static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, u16 msix_vec) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info; struct virtqueue *vq; unsigned long flags, size; u16 num; int err; /* Select the queue we're interested in */ iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); /* Check if queue is either not available or already active. */ num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) return ERR_PTR(-ENOENT); /* allocate and fill out our structure the represents an active * queue */ info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); info->num = num; info->msix_vector = msix_vec; size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); if (info->queue == NULL) { err = -ENOMEM; goto out_info; } /* activate the queue */ iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); /* create the vring */ vq = vring_new_virtqueue(index, info->num, VIRTIO_PCI_VRING_ALIGN, vdev, true, info->queue, vp_notify, callback, name); if (!vq) { err = -ENOMEM; goto out_activate_queue; } vq->priv = info; info->vq = vq; if (msix_vec != VIRTIO_MSI_NO_VECTOR) { iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); if (msix_vec == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto out_assign; } } if (callback) { spin_lock_irqsave(&vp_dev->lock, flags); list_add(&info->node, &vp_dev->virtqueues); spin_unlock_irqrestore(&vp_dev->lock, flags); } else { INIT_LIST_HEAD(&info->node); } return vq; out_assign: vring_del_virtqueue(vq); out_activate_queue: iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); free_pages_exact(info->queue, size); out_info: kfree(info); return ERR_PTR(err); } static void vp_del_vq(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_vq_info *info = vq->priv; unsigned long flags, size; spin_lock_irqsave(&vp_dev->lock, flags); list_del(&info->node); spin_unlock_irqrestore(&vp_dev->lock, flags); iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); /* Flush the write out to device */ ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); } vring_del_virtqueue(vq); /* Select and deactivate the queue */ iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); free_pages_exact(info->queue, size); kfree(info); } /* the config->del_vqs() implementation */ static void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq, *n; struct virtio_pci_vq_info *info; list_for_each_entry_safe(vq, n, &vdev->vqs, list) { info = vq->priv; if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR) free_irq(vp_dev->msix_entries[info->msix_vector].vector, vq); vp_del_vq(vq); } vp_dev->per_vq_vectors = false; vp_free_vectors(vdev); } static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[], bool use_msix, bool per_vq_vectors) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); u16 msix_vec; int i, err, nvectors, allocated_vectors; if (!use_msix) { /* Old style: one normal interrupt for change and all vqs. */ err = vp_request_intx(vdev); if (err) goto error_request; } else { if (per_vq_vectors) { /* Best option: one for change interrupt, one per vq. */ nvectors = 1; for (i = 0; i < nvqs; ++i) if (callbacks[i]) ++nvectors; } else { /* Second best: one for change, shared for all vqs. */ nvectors = 2; } err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); if (err) goto error_request; } vp_dev->per_vq_vectors = per_vq_vectors; allocated_vectors = vp_dev->msix_used_vectors; for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } else if (!callbacks[i] || !vp_dev->msix_enabled) msix_vec = VIRTIO_MSI_NO_VECTOR; else if (vp_dev->per_vq_vectors) msix_vec = allocated_vectors++; else msix_vec = VP_MSIX_VQ_VECTOR; vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); goto error_find; } if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) continue; /* allocate per-vq irq if available and necessary */ snprintf(vp_dev->msix_names[msix_vec], sizeof *vp_dev->msix_names, "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); err = request_irq(vp_dev->msix_entries[msix_vec].vector, vring_interrupt, 0, vp_dev->msix_names[msix_vec], vqs[i]); if (err) { vp_del_vq(vqs[i]); goto error_find; } } return 0; error_find: vp_del_vqs(vdev); error_request: return err; } /* the config->find_vqs() implementation */ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) { int err; /* Try MSI-X with one vector per queue. */ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); if (!err) return 0; /* Fallback: MSI-X with one vector for config, one shared for queues. */ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, false); if (!err) return 0; /* Finally fall back to regular interrupts. */ return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, false, false); } static const char *vp_bus_name(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); return pci_name(vp_dev->pci_dev); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) /* Setup the affinity for a virtqueue: * - force the affinity for per vq vector * - OR over all affinities for shared MSI * - ignore the affinity request if we're using INTX */ static int vp_set_vq_affinity(struct virtqueue *vq, int cpu) { struct virtio_device *vdev = vq->vdev; struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info = vq->priv; struct cpumask *mask; unsigned int irq; if (!vq->callback) return -EINVAL; if (vp_dev->msix_enabled) { mask = vp_dev->msix_affinity_masks[info->msix_vector]; irq = vp_dev->msix_entries[info->msix_vector].vector; if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { cpumask_set_cpu(cpu, mask); irq_set_affinity_hint(irq, mask); } } return 0; } #endif static const struct virtio_config_ops virtio_pci_config_ops = { .get = vp_get, .set = vp_set, .get_status = vp_get_status, .set_status = vp_set_status, .reset = vp_reset, .find_vqs = vp_find_vqs, .del_vqs = vp_del_vqs, .get_features = vp_get_features, .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) .set_vq_affinity = vp_set_vq_affinity, #endif }; static void virtio_pci_release_dev(struct device *_d) { /* * No need for a release method as we allocate/free * all devices together with the pci devices. * Provide an empty one to avoid getting a warning from core. */ } #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) /** * pci_msi_off - disables any msi or msix capabilities * @dev: the PCI device to operate on * * If you want to use msi see pci_enable_msi and friends. * This is a lower level primitive that allows us to disable * msi operation at the device level. */ void pci_msi_off(struct pci_dev *dev) { int pos; u16 control; pos = pci_find_capability(dev, PCI_CAP_ID_MSI); if (pos) { pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); control &= ~PCI_MSI_FLAGS_ENABLE; pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); } pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); if (pos) { pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); control &= ~PCI_MSIX_FLAGS_ENABLE; pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); } } #endif /* the PCI probing function */ static int virtio_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { struct virtio_pci_device *vp_dev; int err; /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) return -ENODEV; if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", VIRTIO_PCI_ABI_VERSION, pci_dev->revision); return -ENODEV; } /* allocate our structure and fill it out */ vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); if (vp_dev == NULL) return -ENOMEM; vp_dev->vdev.dev.parent = &pci_dev->dev; vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->vdev.config = &virtio_pci_config_ops; vp_dev->pci_dev = pci_dev; INIT_LIST_HEAD(&vp_dev->virtqueues); spin_lock_init(&vp_dev->lock); /* Disable MSI/MSIX to bring device to a known good state. */ pci_msi_off(pci_dev); /* enable the device */ err = pci_enable_device(pci_dev); if (err) goto out; err = pci_request_regions(pci_dev, "virtio-pci"); if (err) goto out_enable_device; vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); if (vp_dev->ioaddr == NULL) { err = -ENOMEM; goto out_req_regions; } pci_set_drvdata(pci_dev, vp_dev); pci_set_master(pci_dev); /* we use the subsystem vendor/device id as the virtio vendor/device * id. this allows us to use the same PCI vendor/device id for all * virtio devices and to identify the particular virtio driver by * the subsystem ids */ vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; vp_dev->vdev.id.device = pci_dev->subsystem_device; /* finally register the virtio device */ err = register_virtio_device(&vp_dev->vdev); if (err) goto out_set_drvdata; return 0; out_set_drvdata: pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); out_req_regions: pci_release_regions(pci_dev); out_enable_device: pci_disable_device(pci_dev); out: kfree(vp_dev); return err; } static void virtio_pci_remove(struct pci_dev *pci_dev) { struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); unregister_virtio_device(&vp_dev->vdev); vp_del_vqs(&vp_dev->vdev); pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); pci_release_regions(pci_dev); pci_disable_device(pci_dev); kfree(vp_dev); } #ifdef CONFIG_PM static int virtio_pci_freeze(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); struct virtio_driver *drv; int ret; drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); ret = 0; vp_dev->saved_status = vp_get_status(&vp_dev->vdev); if (drv && drv->freeze) ret = drv->freeze(&vp_dev->vdev); if (!ret) pci_disable_device(pci_dev); return ret; } static int virtio_pci_restore(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); struct virtio_driver *drv; int ret; drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); ret = pci_enable_device(pci_dev); if (ret) return ret; pci_set_master(pci_dev); vp_finalize_features(&vp_dev->vdev); if (drv && drv->restore) ret = drv->restore(&vp_dev->vdev); /* Finally, tell the device we're all set */ if (!ret) vp_set_status(&vp_dev->vdev, vp_dev->saved_status); return ret; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) static const struct dev_pm_ops virtio_pci_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore) }; #else static const struct dev_pm_ops virtio_pci_pm_ops = { .suspend = virtio_pci_freeze, .resume = virtio_pci_restore, .freeze = virtio_pci_freeze, .thaw = virtio_pci_restore, .restore = virtio_pci_restore, .poweroff = virtio_pci_freeze, }; #endif #endif static struct pci_driver virtio_pci_driver = { .name = "virtio-pci", .id_table = virtio_pci_id_table, .probe = virtio_pci_probe, .remove = virtio_pci_remove, #ifdef CONFIG_PM .driver.pm = &virtio_pci_pm_ops, #endif }; static int __init virtio_pci_init(void) { return pci_register_driver(&virtio_pci_driver); } module_init(virtio_pci_init); static void __exit virtio_pci_exit(void) { pci_unregister_driver(&virtio_pci_driver); } module_exit(virtio_pci_exit); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/3.0.13/virtio_ring.c000066400000000000000000000570751314037446600312540ustar00rootroot00000000000000/* Virtio ring implementation. * * Copyright 2007 Rusty Russell IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&(_vq)->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ BUG(); \ } while (0) /* Caller is supposed to guarantee no reentry. */ #define START_USE(_vq) \ do { \ if ((_vq)->in_use) \ panic("%s:in_use = %i\n", \ (_vq)->vq.name, (_vq)->in_use); \ (_vq)->in_use = __LINE__; \ } while (0) #define END_USE(_vq) \ do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) #else #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&_vq->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ (_vq)->broken = true; \ } while (0) #define START_USE(vq) #define END_USE(vq) #endif struct vring_virtqueue { struct virtqueue vq; /* Actual memory layout for this queue */ struct vring vring; /* Can we use weak barriers? */ bool weak_barriers; /* Other side has made a mess, don't try any more. */ bool broken; /* Host supports indirect buffers */ bool indirect; /* Host publishes avail event idx */ bool event; /* Head of free buffer list. */ unsigned int free_head; /* Number we've added since last sync. */ unsigned int num_added; /* Last used index we've seen. */ u16 last_used_idx; /* How to notify other side. FIXME: commonalize hcalls! */ void (*notify)(struct virtqueue *vq); #ifdef DEBUG /* They're supposed to lock for us. */ unsigned int in_use; /* Figure out if their kicks are too delayed. */ bool last_add_time_valid; ktime_t last_add_time; #endif /* Tokens for callbacks. */ void *data[]; }; #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) static inline struct scatterlist *sg_next_chained(struct scatterlist *sg, unsigned int *count) { return sg_next(sg); } static inline struct scatterlist *sg_next_arr(struct scatterlist *sg, unsigned int *count) { if (--(*count) == 0) return NULL; return sg + 1; } /* Set up an indirect table of descriptors and add it to the queue. */ static inline int vring_add_indirect(struct vring_virtqueue *vq, struct scatterlist *sgs[], struct scatterlist *(*next) (struct scatterlist *, unsigned int *), unsigned int total_sg, unsigned int total_out, unsigned int total_in, unsigned int out_sgs, unsigned int in_sgs, gfp_t gfp) { struct vring_desc *desc; unsigned head; struct scatterlist *sg; int i, n; /* * We require lowmem mappings for the descriptors because * otherwise virt_to_phys will give us bogus addresses in the * virtqueue. */ gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); if (!desc) return -ENOMEM; /* Transfer entries from the sg lists into the indirect page */ i = 0; for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { desc[i].flags = VRING_DESC_F_NEXT; desc[i].addr = sg_phys(sg); desc[i].len = sg->length; desc[i].next = i+1; i++; } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; desc[i].addr = sg_phys(sg); desc[i].len = sg->length; desc[i].next = i+1; i++; } } BUG_ON(i != total_sg); /* Last one doesn't continue. */ desc[i-1].flags &= ~VRING_DESC_F_NEXT; desc[i-1].next = 0; /* We're about to use a buffer */ vq->vq.num_free--; /* Use a single buffer which doesn't continue */ head = vq->free_head; vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; vq->vring.desc[head].addr = virt_to_phys(desc); vq->vring.desc[head].len = i * sizeof(struct vring_desc); /* Update free pointer */ vq->free_head = vq->vring.desc[head].next; return head; } static inline int virtqueue_add(struct virtqueue *_vq, struct scatterlist *sgs[], struct scatterlist *(*next) (struct scatterlist *, unsigned int *), unsigned int total_out, unsigned int total_in, unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); struct scatterlist *sg; unsigned int i, n, avail, uninitialized_var(prev), total_sg; int head; START_USE(vq); BUG_ON(data == NULL); #ifdef DEBUG { ktime_t now = ktime_get(); /* No kick or get, with .1 second between? Warn. */ if (vq->last_add_time_valid) WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) > 100); vq->last_add_time = now; vq->last_add_time_valid = true; } #endif total_sg = total_in + total_out; /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ if (vq->indirect && total_sg > 2 && vq->vq.num_free) { head = vring_add_indirect(vq, sgs, next, total_sg, total_out, total_in, out_sgs, in_sgs, gfp); if (likely(head >= 0)) goto add_head; } BUG_ON(total_sg > vq->vring.num); BUG_ON(total_sg == 0); if (vq->vq.num_free < total_sg) { pr_debug("Can't add buf len %i - avail = %i\n", total_sg, vq->vq.num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ if (out_sgs) vq->notify(&vq->vq); END_USE(vq); return -ENOSPC; } /* We're about to use some buffers from the free list. */ vq->vq.num_free -= total_sg; head = i = vq->free_head; for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; i = vq->vring.desc[i].next; } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; i = vq->vring.desc[i].next; } } /* Last one doesn't continue. */ vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; /* Update free pointer */ vq->free_head = i; add_head: /* Set token. */ vq->data[head] = data; /* Put entry in available array (but don't update avail->idx until they * do sync). */ avail = (vq->vring.avail->idx & (vq->vring.num-1)); vq->vring.avail->ring[avail] = head; /* Descriptors and available array need to be set before we expose the * new available array entries. */ virtio_wmb(vq->weak_barriers); vq->vring.avail->idx++; vq->num_added++; /* This is very unlikely, but theoretically possible. Kick * just in case. */ if (unlikely(vq->num_added == (1 << 16) - 1)) virtqueue_kick(_vq); pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); return 0; } /** * virtqueue_add_buf - expose buffer to other end * @vq: the struct virtqueue we're talking about. * @sg: the description of the buffer(s). * @out_num: the number of sg readable by other side * @in_num: the number of sg which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_buf(struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, unsigned int in, void *data, gfp_t gfp) { struct scatterlist *sgs[2]; sgs[0] = sg; sgs[1] = sg + out; return virtqueue_add(_vq, sgs, sg_next_arr, out, in, out ? 1 : 0, in ? 1 : 0, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_buf); /** * virtqueue_add_sgs - expose buffers to other end * @vq: the struct virtqueue we're talking about. * @sgs: array of terminated scatterlists. * @out_num: the number of scatterlists readable by other side * @in_num: the number of scatterlists which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_sgs(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { unsigned int i, total_out, total_in; /* Count them first. */ for (i = total_out = total_in = 0; i < out_sgs; i++) { struct scatterlist *sg; for (sg = sgs[i]; sg; sg = sg_next(sg)) total_out++; } for (; i < out_sgs + in_sgs; i++) { struct scatterlist *sg; for (sg = sgs[i]; sg; sg = sg_next(sg)) total_in++; } return virtqueue_add(_vq, sgs, sg_next_chained, total_out, total_in, out_sgs, in_sgs, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_sgs); /** * virtqueue_add_outbuf - expose output buffers to other end * @vq: the struct virtqueue we're talking about. * @sgs: array of scatterlists (need not be terminated!) * @num: the number of scatterlists readable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); /** * virtqueue_add_inbuf - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sgs: array of scatterlists (need not be terminated!) * @num: the number of scatterlists writable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); /** * virtqueue_kick_prepare - first half of split virtqueue_kick call. * @vq: the struct virtqueue * * Instead of virtqueue_kick(), you can do: * if (virtqueue_kick_prepare(vq)) * virtqueue_notify(vq); * * This is sometimes useful because the virtqueue_kick_prepare() needs * to be serialized, but the actual virtqueue_notify() call does not. */ bool virtqueue_kick_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 new, old; bool needs_kick; START_USE(vq); /* We need to expose available array entries before checking avail * event. */ virtio_mb(vq->weak_barriers); old = vq->vring.avail->idx - vq->num_added; new = vq->vring.avail->idx; vq->num_added = 0; #ifdef DEBUG if (vq->last_add_time_valid) { WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), vq->last_add_time)) > 100); } vq->last_add_time_valid = false; #endif if (vq->event) { needs_kick = vring_need_event(vring_avail_event(&vq->vring), new, old); } else { needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); } END_USE(vq); return needs_kick; } EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); /** * virtqueue_notify - second half of split virtqueue_kick call. * @vq: the struct virtqueue * * This does not need to be serialized. */ void virtqueue_notify(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); /* Prod other side to tell it about changes. */ vq->notify(_vq); } EXPORT_SYMBOL_GPL(virtqueue_notify); /** * virtqueue_kick - update after add_buf * @vq: the struct virtqueue * * After one or more virtqueue_add_buf calls, invoke this to kick * the other side. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ void virtqueue_kick(struct virtqueue *vq) { if (virtqueue_kick_prepare(vq)) virtqueue_notify(vq); } EXPORT_SYMBOL_GPL(virtqueue_kick); static void detach_buf(struct vring_virtqueue *vq, unsigned int head) { unsigned int i; /* Clear data ptr. */ vq->data[head] = NULL; /* Put back on free list: find end */ i = head; /* Free the indirect table */ if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) kfree(phys_to_virt(vq->vring.desc[i].addr)); while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { i = vq->vring.desc[i].next; vq->vq.num_free++; } vq->vring.desc[i].next = vq->free_head; vq->free_head = head; /* Plus final descriptor */ vq->vq.num_free++; } static inline bool more_used(const struct vring_virtqueue *vq) { return vq->last_used_idx != vq->vring.used->idx; } /** * virtqueue_get_buf - get the next used buffer * @vq: the struct virtqueue we're talking about. * @len: the length written into the buffer * * If the driver wrote data into the buffer, @len will be set to the * amount written. This means you don't need to clear the buffer * beforehand to ensure there's no data leakage in the case of short * writes. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). * * Returns NULL if there are no used buffers, or the "data" token * handed to virtqueue_add_buf(). */ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) { struct vring_virtqueue *vq = to_vvq(_vq); void *ret; unsigned int i; u16 last_used; START_USE(vq); if (unlikely(vq->broken)) { END_USE(vq); return NULL; } if (!more_used(vq)) { pr_debug("No more buffers in queue\n"); END_USE(vq); return NULL; } /* Only get used array entries after they have been exposed by host. */ virtio_rmb(vq->weak_barriers); last_used = (vq->last_used_idx & (vq->vring.num - 1)); i = vq->vring.used->ring[last_used].id; *len = vq->vring.used->ring[last_used].len; if (unlikely(i >= vq->vring.num)) { BAD_RING(vq, "id %u out of range\n", i); return NULL; } if (unlikely(!vq->data[i])) { BAD_RING(vq, "id %u is not a head!\n", i); return NULL; } /* detach_buf clears data, so grab it now. */ ret = vq->data[i]; detach_buf(vq, i); vq->last_used_idx++; /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call. */ if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { vring_used_event(&vq->vring) = vq->last_used_idx; virtio_mb(vq->weak_barriers); } #ifdef DEBUG vq->last_add_time_valid = false; #endif END_USE(vq); return ret; } EXPORT_SYMBOL_GPL(virtqueue_get_buf); /** * virtqueue_disable_cb - disable callbacks * @vq: the struct virtqueue we're talking about. * * Note that this is not necessarily synchronous, hence unreliable and only * useful as an optimization. * * Unlike other operations, this need not be serialized. */ void virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; } EXPORT_SYMBOL_GPL(virtqueue_disable_cb); /** * virtqueue_enable_cb_prepare - restart callbacks after disable_cb * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns current queue state * in an opaque unsigned value. This value should be later tested by * virtqueue_poll, to detect a possible race between the driver checking for * more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 last_used_idx; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; END_USE(vq); return last_used_idx; } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); /** * virtqueue_poll - query pending used buffers * @vq: the struct virtqueue we're talking about. * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). * * Returns "true" if there are pending used buffers in the queue. * * This does not need to be serialized. */ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); virtio_mb(vq->weak_barriers); return (u16)last_used_idx != vq->vring.used->idx; } EXPORT_SYMBOL_GPL(virtqueue_poll); /** * virtqueue_enable_cb - restart callbacks after disable_cb. * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns "false" if there are pending * buffers in the queue, to detect a possible race between the driver * checking for more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb(struct virtqueue *_vq) { unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); return !virtqueue_poll(_vq, last_used_idx); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb); /** * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks but hints to the other side to delay * interrupts until most of the available buffers have been processed; * it returns "false" if there are many pending buffers in the queue, * to detect a possible race between the driver checking for more work, * and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 bufs; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; /* TODO: tune this threshold */ bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; vring_used_event(&vq->vring) = vq->last_used_idx + bufs; virtio_mb(vq->weak_barriers); if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { END_USE(vq); return false; } END_USE(vq); return true; } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); /** * virtqueue_detach_unused_buf - detach first unused buffer * @vq: the struct virtqueue we're talking about. * * Returns NULL or the "data" token handed to virtqueue_add_buf(). * This is not valid on an active queue; it is useful only for device * shutdown. */ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i; void *buf; START_USE(vq); for (i = 0; i < vq->vring.num; i++) { if (!vq->data[i]) continue; /* detach_buf clears data, so grab it now. */ buf = vq->data[i]; detach_buf(vq, i); vq->vring.avail->idx--; END_USE(vq); return buf; } /* That should have freed everything. */ BUG_ON(vq->vq.num_free != vq->vring.num); END_USE(vq); return NULL; } EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); irqreturn_t vring_interrupt(int irq, void *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (!more_used(vq)) { pr_debug("virtqueue interrupt with no work for %p\n", vq); return IRQ_NONE; } if (unlikely(vq->broken)) return IRQ_HANDLED; pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); if (vq->vq.callback) vq->vq.callback(&vq->vq); return IRQ_HANDLED; } EXPORT_SYMBOL_GPL(vring_interrupt); struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, void *pages, void (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) { struct vring_virtqueue *vq; unsigned int i; /* We assume num is a power of 2. */ if (num & (num - 1)) { dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); return NULL; } vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); if (!vq) return NULL; vring_init(&vq->vring, num, pages, vring_align); vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->vq.num_free = num; vq->vq.index = index; vq->notify = notify; vq->weak_barriers = weak_barriers; vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; vq->last_add_time_valid = false; #endif vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); /* No callback? Tell other side not to bother us. */ if (!callback) vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; /* Put everything in free lists. */ vq->free_head = 0; for (i = 0; i < num-1; i++) { vq->vring.desc[i].next = i+1; vq->data[i] = NULL; } vq->data[i] = NULL; return &vq->vq; } EXPORT_SYMBOL_GPL(vring_new_virtqueue); void vring_del_virtqueue(struct virtqueue *vq) { list_del(&vq->list); kfree(to_vvq(vq)); } EXPORT_SYMBOL_GPL(vring_del_virtqueue); /* Manipulates transport-specific feature bits. */ void vring_transport_features(struct virtio_device *vdev) { unsigned int i; for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { switch (i) { case VIRTIO_RING_F_INDIRECT_DESC: break; case VIRTIO_RING_F_EVENT_IDX: break; default: /* We don't understand this bit. */ clear_bit(i, vdev->features); } } } EXPORT_SYMBOL_GPL(vring_transport_features); /** * virtqueue_get_vring_size - return the size of the virtqueue's vring * @vq: the struct virtqueue containing the vring of interest. * * Returns the size of the vring. This is mainly used for boasting to * userspace. Unlike other operations, this need not be serialized. */ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->vring.num; } EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); bool virtqueue_is_broken(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->broken; } EXPORT_SYMBOL_GPL(virtqueue_is_broken); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/3.0.76/000077500000000000000000000000001314037446600265505ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/3.0.76/config.c000066400000000000000000000004541314037446600301640ustar00rootroot00000000000000/* Configuration space parsing helpers for virtio. * * The configuration is [type][len][... len bytes ...] fields. * * Copyright 2007 Rusty Russell, IBM Corporation. * GPL v2 or later. */ #include #include #include #include UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/3.0.76/virtio.c000066400000000000000000000156031314037446600302350ustar00rootroot00000000000000#include #include #include #include #include #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) /* Unique numbering for virtio devices. */ static DEFINE_IDA(virtio_index_ida); #endif static ssize_t device_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.device); } static ssize_t vendor_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.vendor); } static ssize_t status_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); } static ssize_t modalias_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "virtio:d%08Xv%08X\n", dev->id.device, dev->id.vendor); } static ssize_t features_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); unsigned int i; ssize_t len = 0; /* We actually represent this as a bitstring, as it could be * arbitrary length in future. */ for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++) len += sprintf(buf+len, "%c", test_bit(i, dev->features) ? '1' : '0'); len += sprintf(buf+len, "\n"); return len; } static struct device_attribute virtio_dev_attrs[] = { __ATTR_RO(device), __ATTR_RO(vendor), __ATTR_RO(status), __ATTR_RO(modalias), __ATTR_RO(features), __ATTR_NULL }; static inline int virtio_id_match(const struct virtio_device *dev, const struct virtio_device_id *id) { if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID) return 0; return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; } /* This looks through all the IDs a driver claims to support. If any of them * match, we return 1 and the kernel will call virtio_dev_probe(). */ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) { unsigned int i; struct virtio_device *dev = dev_to_virtio(_dv); const struct virtio_device_id *ids; ids = drv_to_virtio(_dr)->id_table; for (i = 0; ids[i].device; i++) if (virtio_id_match(dev, &ids[i])) return 1; return 0; } static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) { struct virtio_device *dev = dev_to_virtio(_dv); return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", dev->id.device, dev->id.vendor); } static void add_status(struct virtio_device *dev, unsigned status) { dev->config->set_status(dev, dev->config->get_status(dev) | status); } void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit) { unsigned int i; struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); for (i = 0; i < drv->feature_table_size; i++) if (drv->feature_table[i] == fbit) return; BUG(); } EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); static int virtio_dev_probe(struct device *_d) { int err, i; struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); u32 device_features; /* We have a driver! */ add_status(dev, VIRTIO_CONFIG_S_DRIVER); /* Figure out what features the device supports. */ device_features = dev->config->get_features(dev); /* Features supported by both device and driver into dev->features. */ memset(dev->features, 0, sizeof(dev->features)); for (i = 0; i < drv->feature_table_size; i++) { unsigned int f = drv->feature_table[i]; BUG_ON(f >= 32); if (device_features & (1 << f)) set_bit(f, dev->features); } /* Transport features always preserved to pass to finalize_features. */ for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) if (device_features & (1 << i)) set_bit(i, dev->features); dev->config->finalize_features(dev); err = drv->probe(dev); if (err) add_status(dev, VIRTIO_CONFIG_S_FAILED); else { add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); if (drv->scan) drv->scan(dev); } return err; } static int virtio_dev_remove(struct device *_d) { struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); drv->remove(dev); /* Driver should have reset device. */ WARN_ON_ONCE(dev->config->get_status(dev)); /* Acknowledge the device's existence again. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); return 0; } static struct bus_type virtio_bus = { .name = "virtio", .match = virtio_dev_match, .dev_attrs = virtio_dev_attrs, .uevent = virtio_uevent, .probe = virtio_dev_probe, .remove = virtio_dev_remove, }; int register_virtio_driver(struct virtio_driver *driver) { /* Catch this early. */ BUG_ON(driver->feature_table_size && !driver->feature_table); driver->driver.bus = &virtio_bus; return driver_register(&driver->driver); } EXPORT_SYMBOL_GPL(register_virtio_driver); void unregister_virtio_driver(struct virtio_driver *driver) { driver_unregister(&driver->driver); } EXPORT_SYMBOL_GPL(unregister_virtio_driver); #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,14) /* Unique numbering for virtio devices. */ static unsigned int dev_index; #endif int register_virtio_device(struct virtio_device *dev) { int err; dev->dev.bus = &virtio_bus; /* Assign a unique device index and hence name. */ #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL); if (err < 0) goto out; dev->index = err; #else dev->index = dev_index++; #endif dev_set_name(&dev->dev, "virtio%u", dev->index); /* We always start by resetting the device, in case a previous * driver messed it up. This also tests that code path a little. */ dev->config->reset(dev); /* Acknowledge that we've seen the device. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); INIT_LIST_HEAD(&dev->vqs); /* device_register() causes the bus infrastructure to look for a * matching driver. */ err = device_register(&dev->dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) out: #endif if (err) add_status(dev, VIRTIO_CONFIG_S_FAILED); return err; } EXPORT_SYMBOL_GPL(register_virtio_device); void unregister_virtio_device(struct virtio_device *dev) { #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) int index = dev->index; /* save for after device release */ #endif device_unregister(&dev->dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) ida_simple_remove(&virtio_index_ida, index); #endif } EXPORT_SYMBOL_GPL(unregister_virtio_device); static int virtio_init(void) { if (bus_register(&virtio_bus) != 0) panic("virtio bus registration failed"); return 0; } static void __exit virtio_exit(void) { bus_unregister(&virtio_bus); } core_initcall(virtio_init); module_exit(virtio_exit); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/3.0.76/virtio_net.c000066400000000000000000001447701314037446600311130ustar00rootroot00000000000000/* A network driver using virtio. * * Copyright 2007 Rusty Russell IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ //#define DEBUG #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* device supports hardware address * change when it's running */ static unsigned int iff_live_addr_change = 1; static int napi_weight = 64; module_param(napi_weight, int, 0444); static bool csum = true, gso = true; module_param(csum, bool, 0444); module_param(gso, bool, 0444); /* FIXME: MTU in config. */ #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 #ifndef VIRTIO_F_ANY_LAYOUT #define VIRTIO_F_ANY_LAYOUT 27 #endif #define VIRTNET_DRIVER_VERSION "1.0.0" struct virtnet_stats { struct u64_stats_sync tx_syncp; struct u64_stats_sync rx_syncp; u64 tx_bytes; u64 tx_packets; u64 rx_bytes; u64 rx_packets; }; /* Internal representation of a send virtqueue */ struct send_queue { /* Virtqueue associated with this send _queue */ struct virtqueue *vq; /* TX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Name of the send queue: output.$index */ char name[40]; }; /* Internal representation of a receive virtqueue */ struct receive_queue { /* Virtqueue associated with this receive_queue */ struct virtqueue *vq; struct napi_struct napi; /* Number of input buffers, and max we've ever had. */ unsigned int num, max; /* Chain pages by the private ptr. */ struct page *pages; /* RX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Name of this receive queue: input.$index */ char name[40]; }; struct virtnet_info { struct virtio_device *vdev; struct virtqueue *cvq; struct net_device *dev; struct send_queue *sq; struct receive_queue *rq; unsigned int status; /* Max # of queue pairs supported by the device */ u16 max_queue_pairs; /* # of queue pairs currently used by the driver */ u16 curr_queue_pairs; /* I like... big packets and I cannot lie! */ bool big_packets; /* Host will merge rx buffers for big packets (shake it! shake it!) */ bool mergeable_rx_bufs; /* Has control virtqueue */ bool has_cvq; /* Host can handle any s/g split between our header and packet data */ bool any_header_sg; /* enable config space updates */ bool config_enable; /* Active statistics */ struct virtnet_stats __percpu *stats; /* Work struct for refilling if we run low on memory. */ struct delayed_work refill; /* Work struct for config space updates */ struct work_struct config_work; /* Lock for config space updates */ struct mutex config_lock; /* Does the affinity hint is set for virtqueues? */ bool affinity_hint_set; /* Per-cpu variable to show the mapping from CPU to virtqueue */ int __percpu *vq_index; /* CPU hot plug notifier */ struct notifier_block nb; }; struct skb_vnet_hdr { union { struct virtio_net_hdr hdr; struct virtio_net_hdr_mrg_rxbuf mhdr; }; }; struct padded_vnet_hdr { struct virtio_net_hdr hdr; /* * virtio_net_hdr should be in a separated sg buffer because of a * QEMU bug, and data sg buffer shares same page with this header sg. * This padding makes next sg 16 byte aligned after virtio_net_hdr. */ char padding[6]; }; /* Converting between virtqueue no. and kernel tx/rx queue no. * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq */ static int vq2txq(struct virtqueue *vq) { return (vq->index - 1) / 2; } static int txq2vq(int txq) { return txq * 2 + 1; } static int vq2rxq(struct virtqueue *vq) { return vq->index / 2; } static int rxq2vq(int rxq) { return rxq * 2; } static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) { return (struct skb_vnet_hdr *)skb->cb; } /* * private is used to chain pages for big packets, put the whole * most recent used list in the beginning for reuse */ static void give_pages(struct receive_queue *rq, struct page *page) { struct page *end; /* Find end of list, sew whole thing into vi->rq.pages. */ for (end = page; end->private; end = (struct page *)end->private); end->private = (unsigned long)rq->pages; rq->pages = page; } static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) { struct page *p = rq->pages; if (p) { rq->pages = (struct page *)p->private; /* clear private here, it is used to chain pages */ p->private = 0; } else p = alloc_page(gfp_mask); return p; } static void skb_xmit_done(struct virtqueue *vq) { struct virtnet_info *vi = vq->vdev->priv; /* Suppress further interrupts. */ virtqueue_disable_cb(vq); /* We were probably waiting for more output buffers. */ netif_wake_subqueue(vi->dev, vq2txq(vq)); } static void set_skb_frag(struct sk_buff *skb, struct page *page, unsigned int offset, unsigned int *len) { int size = min((unsigned)PAGE_SIZE - offset, *len); int i = skb_shinfo(skb)->nr_frags; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,13)) skb_fill_page_desc(skb, i, page, offset, size); #else __skb_fill_page_desc(skb, i, page, offset, size); skb_shinfo(skb)->nr_frags++; #endif skb->data_len += size; skb->len += size; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,17)) skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; #endif *len -= size; } /* Called from bottom half context */ static struct sk_buff *page_to_skb(struct receive_queue *rq, struct page *page, unsigned int len) { struct virtnet_info *vi = rq->vq->vdev->priv; struct sk_buff *skb; struct skb_vnet_hdr *hdr; unsigned int copy, hdr_len, offset; char *p; p = page_address(page); /* copy small packet so we can reuse these pages for small data */ skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); if (unlikely(!skb)) return NULL; hdr = skb_vnet_hdr(skb); if (vi->mergeable_rx_bufs) { hdr_len = sizeof hdr->mhdr; offset = hdr_len; } else { hdr_len = sizeof hdr->hdr; offset = sizeof(struct padded_vnet_hdr); } memcpy(hdr, p, hdr_len); len -= hdr_len; p += offset; copy = len; if (copy > skb_tailroom(skb)) copy = skb_tailroom(skb); memcpy(skb_put(skb, copy), p, copy); len -= copy; offset += copy; /* * Verify that we can indeed put this data into a skb. * This is here to handle cases when the device erroneously * tries to receive more than is possible. This is usually * the case of a broken device. */ if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { net_dbg_ratelimited("%s: too much data\n", skb->dev->name); dev_kfree_skb(skb); return NULL; } while (len) { set_skb_frag(skb, page, offset, &len); page = (struct page *)page->private; offset = 0; } if (page) give_pages(rq, page); return skb; } static struct sk_buff *receive_small(void *buf, unsigned int len) { struct sk_buff * skb = buf; len -= sizeof(struct virtio_net_hdr); skb_trim(skb, len); return skb; } static struct sk_buff *receive_big(struct net_device *dev, struct receive_queue *rq, void *buf) { struct page *page = buf; struct sk_buff *skb = page_to_skb(rq, page, 0); if (unlikely(!skb)) goto err; return skb; err: dev->stats.rx_dropped++; give_pages(rq, page); return NULL; } static struct sk_buff *receive_mergeable(struct net_device *dev, struct receive_queue *rq, void *buf, unsigned int len) { struct skb_vnet_hdr *hdr = page_address(buf); int num_buf = hdr->mhdr.num_buffers; struct page *page = buf; struct sk_buff *skb = page_to_skb(rq, page, len); int i; if (unlikely(!skb)) goto err_skb; while (--num_buf) { i = skb_shinfo(skb)->nr_frags; if (i >= MAX_SKB_FRAGS) { pr_debug("%s: packet too long\n", skb->dev->name); skb->dev->stats.rx_length_errors++; goto err_frags; } page = virtqueue_get_buf(rq->vq, &len); if (!page) { pr_debug("%s: rx error: %d buffers %d missing\n", dev->name, hdr->mhdr.num_buffers, num_buf); dev->stats.rx_length_errors++; goto err_buf; } if (len > PAGE_SIZE) len = PAGE_SIZE; set_skb_frag(skb, page, 0, &len); --rq->num; } return skb; err_skb: give_pages(rq, page); while (--num_buf) { err_frags: buf = virtqueue_get_buf(rq->vq, &len); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers missing\n", dev->name, num_buf); dev->stats.rx_length_errors++; break; } page = buf; give_pages(rq, page); --rq->num; } err_buf: dev->stats.rx_dropped++; dev_kfree_skb(skb); return NULL; } static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) { struct virtnet_info *vi = rq->vq->vdev->priv; struct net_device *dev = vi->dev; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); struct sk_buff *skb; struct skb_vnet_hdr *hdr; if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(rq, buf); else dev_kfree_skb(buf); return; } if (vi->mergeable_rx_bufs) skb = receive_mergeable(dev, rq, buf, len); else if (vi->big_packets) skb = receive_big(dev, rq, buf); else skb = receive_small(buf, len); if (unlikely(!skb)) return; hdr = skb_vnet_hdr(skb); skb->truesize += skb->data_len; u64_stats_update_begin(&stats->rx_syncp); stats->rx_bytes += skb->len; stats->rx_packets++; u64_stats_update_end(&stats->rx_syncp); if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { pr_debug("Needs csum!\n"); if (!skb_partial_csum_set(skb, hdr->hdr.csum_start, hdr->hdr.csum_offset)) goto frame_err; } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { skb->ip_summed = CHECKSUM_UNNECESSARY; } skb->protocol = eth_type_trans(skb, dev); pr_debug("Receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type); if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_UDP: skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; case VIRTIO_NET_HDR_GSO_TCPV6: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; default: net_warn_ratelimited("%s: bad gso type %u.\n", dev->name, hdr->hdr.gso_type); goto frame_err; } if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; if (skb_shinfo(skb)->gso_size == 0) { net_warn_ratelimited("%s: zero gso size.\n", dev->name); goto frame_err; } /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; } netif_receive_skb(skb); return; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); } static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) { struct virtnet_info *vi = rq->vq->vdev->priv; struct sk_buff *skb; struct skb_vnet_hdr *hdr; int err; #if (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13)) skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); #else skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); #endif if (unlikely(!skb)) return -ENOMEM; skb_put(skb, MAX_PACKET_LEN); hdr = skb_vnet_hdr(skb); sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); if (err < 0) dev_kfree_skb(skb); return err; } static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) { struct page *first, *list = NULL; char *p; int i, err, offset; /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { first = get_a_page(rq, gfp); if (!first) { if (list) give_pages(rq, list); return -ENOMEM; } sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); /* chain new page in list head to match sg */ first->private = (unsigned long)list; list = first; } first = get_a_page(rq, gfp); if (!first) { give_pages(rq, list); return -ENOMEM; } p = page_address(first); /* rq->sg[0], rq->sg[1] share the same page */ /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); /* rq->sg[1] for data packet, from offset */ offset = sizeof(struct padded_vnet_hdr); sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); /* chain first in list head */ first->private = (unsigned long)list; err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, first, gfp); if (err < 0) give_pages(rq, first); return err; } static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) { struct page *page; int err; page = get_a_page(rq, gfp); if (!page) return -ENOMEM; sg_init_one(rq->sg, page_address(page), PAGE_SIZE); err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp); if (err < 0) give_pages(rq, page); return err; } /* * Returns false if we couldn't fill entirely (OOM). * * Normally run in the receive path, but can also be run from ndo_open * before we're receiving packets, or from refill_work which is * careful to disable receiving (using napi_disable). */ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) { struct virtnet_info *vi = rq->vq->vdev->priv; int err; bool oom; do { if (vi->mergeable_rx_bufs) err = add_recvbuf_mergeable(rq, gfp); else if (vi->big_packets) err = add_recvbuf_big(rq, gfp); else err = add_recvbuf_small(rq, gfp); oom = err == -ENOMEM; if (err) break; ++rq->num; } while (rq->vq->num_free); if (unlikely(rq->num > rq->max)) rq->max = rq->num; virtqueue_kick(rq->vq); return !oom; } static void skb_recv_done(struct virtqueue *rvq) { struct virtnet_info *vi = rvq->vdev->priv; struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; /* Schedule NAPI, Suppress further interrupts if successful. */ if (napi_schedule_prep(&rq->napi)) { virtqueue_disable_cb(rvq); __napi_schedule(&rq->napi); } } static void virtnet_napi_enable(struct receive_queue *rq) { napi_enable(&rq->napi); /* If all buffers were filled by other side before we napi_enabled, we * won't get another interrupt, so process any outstanding packets * now. virtnet_poll wants re-enable the queue, so we disable here. * We synchronize against interrupts via NAPI_STATE_SCHED */ if (napi_schedule_prep(&rq->napi)) { virtqueue_disable_cb(rq->vq); local_bh_disable(); __napi_schedule(&rq->napi); local_bh_enable(); } } static void refill_work(struct work_struct *work) { struct virtnet_info *vi = container_of(work, struct virtnet_info, refill.work); bool still_empty; int i; for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; napi_disable(&rq->napi); still_empty = !try_fill_recv(rq, GFP_KERNEL); virtnet_napi_enable(rq); /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. */ if (still_empty) schedule_delayed_work(&vi->refill, HZ/2); } } static int virtnet_poll(struct napi_struct *napi, int budget) { struct receive_queue *rq = container_of(napi, struct receive_queue, napi); struct virtnet_info *vi = rq->vq->vdev->priv; void *buf; unsigned int r, len, received = 0; again: while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { receive_buf(rq, buf, len); --rq->num; received++; } if (rq->num < rq->max / 2) { if (!try_fill_recv(rq, GFP_ATOMIC)) schedule_delayed_work(&vi->refill, 0); } /* Out of packets? */ if (received < budget) { r = virtqueue_enable_cb_prepare(rq->vq); napi_complete(napi); if (unlikely(virtqueue_poll(rq->vq, r)) && napi_schedule_prep(napi)) { virtqueue_disable_cb(rq->vq); __napi_schedule(napi); goto again; } } return received; } static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; for (i = 0; i < vi->max_queue_pairs; i++) { if (i < vi->curr_queue_pairs) /* Make sure we have some buffers: if oom use wq. */ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); virtnet_napi_enable(&vi->rq[i]); } return 0; } static void free_old_xmit_skbs(struct send_queue *sq) { struct sk_buff *skb; unsigned int len; struct virtnet_info *vi = sq->vq->vdev->priv; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { pr_debug("Sent skb %p\n", skb); u64_stats_update_begin(&stats->tx_syncp); stats->tx_bytes += skb->len; stats->tx_packets++; u64_stats_update_end(&stats->tx_syncp); dev_kfree_skb_any(skb); } } static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) { struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; struct virtnet_info *vi = sq->vq->vdev->priv; unsigned num_sg; unsigned hdr_len; bool can_push; pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); if (vi->mergeable_rx_bufs) hdr_len = sizeof hdr->mhdr; else hdr_len = sizeof hdr->hdr; can_push = vi->any_header_sg && !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; /* Even if we can, don't push here yet as this would skew * csum_start offset below. */ if (can_push) hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len); else hdr = skb_vnet_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->hdr.csum_start = skb_checksum_start_offset(skb); hdr->hdr.csum_offset = skb->csum_offset; } else { hdr->hdr.flags = 0; hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; } if (skb_is_gso(skb)) { hdr->hdr.hdr_len = skb_headlen(skb); hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; } else { hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; } if (vi->mergeable_rx_bufs) hdr->mhdr.num_buffers = 0; if (can_push) { __skb_push(skb, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); /* Pull header back to avoid skew in tx bytes calculations. */ __skb_pull(skb, hdr_len); } else { sg_set_buf(sq->sg, hdr, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; } return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); } static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int qnum = skb_get_queue_mapping(skb); struct send_queue *sq = &vi->sq[qnum]; int err; /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(sq); /* Try to transmit */ err = xmit_skb(sq, skb); /* This should not happen! */ if (unlikely(err)) { dev->stats.tx_fifo_errors++; if (net_ratelimit()) dev_warn(&dev->dev, "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } virtqueue_kick(sq->vq); /* Don't wait up for transmitted skbs to be freed. */ skb_orphan(skb); nf_reset(skb); /* Apparently nice girls don't return TX_BUSY; stop the queue * before it gets out of hand. Naturally, this wastes entries. */ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { netif_stop_subqueue(dev, qnum); if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { /* More just got used, free them then recheck. */ free_old_xmit_skbs(sq); if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { netif_start_subqueue(dev, qnum); virtqueue_disable_cb(sq->vq); } } } return NETDEV_TX_OK; } /* * Send command via the control virtqueue and check status. Commands * supported by the hypervisor, as indicated by feature bits, should * never fail unless improperly formated. */ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, struct scatterlist *out, struct scatterlist *in) { struct scatterlist *sgs[4], hdr, stat; struct virtio_net_ctrl_hdr ctrl; virtio_net_ctrl_ack status = ~0; unsigned out_num = 0, in_num = 0, tmp; /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); ctrl.class = class; ctrl.cmd = cmd; /* Add header */ sg_init_one(&hdr, &ctrl, sizeof(ctrl)); sgs[out_num++] = &hdr; if (out) sgs[out_num++] = out; if (in) sgs[out_num + in_num++] = in; /* Add return status. */ sg_init_one(&stat, &status, sizeof(status)); sgs[out_num + in_num++] = &stat; BUG_ON(out_num + in_num > ARRAY_SIZE(sgs)); BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC) < 0); virtqueue_kick(vi->cvq); /* Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. */ while (!virtqueue_get_buf(vi->cvq, &tmp) && !virtqueue_is_broken(vi->cvq)) cpu_relax(); return status == VIRTIO_NET_OK; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) /** * eth_prepare_mac_addr_change - prepare for mac change * @dev: network device * @p: socket address */ static int eth_prepare_mac_addr_change(struct net_device *dev, void *p) { struct sockaddr *addr = p; if (!iff_live_addr_change && netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; return 0; } /** * eth_commit_mac_addr_change - commit mac change * @dev: network device * @p: socket address */ static void eth_commit_mac_addr_change(struct net_device *dev, void *p) { struct sockaddr *addr = p; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); } #endif static int virtnet_set_mac_address(struct net_device *dev, void *p) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; int ret; struct sockaddr *addr = p; struct scatterlist sg; ret = eth_prepare_mac_addr_change(dev, p); if (ret) return ret; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { sg_init_one(&sg, addr->sa_data, dev->addr_len); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg, NULL)) { dev_warn(&vdev->dev, "Failed to set mac address by vq command.\n"); return -EINVAL; } } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), addr->sa_data, dev->addr_len); } eth_commit_mac_addr_change(dev, p); return 0; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) static struct net_device_stats *virtnet_stats(struct net_device *dev) { int cpu; unsigned int start; struct virtnet_info *vi = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; stats->rx_packets = 0; stats->tx_packets = 0; stats->rx_bytes = 0; stats->tx_bytes = 0; for_each_possible_cpu(cpu) { struct virtnet_stats *stats_tmp = per_cpu_ptr(vi->stats, cpu); u64 tpackets, tbytes, rpackets, rbytes; do { start = u64_stats_fetch_begin_bh(&stats_tmp->tx_syncp); tpackets = stats_tmp->tx_packets; tbytes = stats_tmp->tx_bytes; } while (u64_stats_fetch_retry_bh(&stats_tmp->tx_syncp, start)); do { start = u64_stats_fetch_begin_bh(&stats_tmp->rx_syncp); rpackets = stats_tmp->rx_packets; rbytes = stats_tmp->rx_bytes; } while (u64_stats_fetch_retry_bh(&stats_tmp->rx_syncp, start)); stats->rx_packets += rpackets; stats->tx_packets += tpackets; stats->rx_bytes += rbytes; stats->tx_bytes += tbytes; } return stats; } #else static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct virtnet_info *vi = netdev_priv(dev); int cpu; unsigned int start; for_each_possible_cpu(cpu) { struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); u64 tpackets, tbytes, rpackets, rbytes; do { start = u64_stats_fetch_begin_bh(&stats->tx_syncp); tpackets = stats->tx_packets; tbytes = stats->tx_bytes; } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); do { start = u64_stats_fetch_begin_bh(&stats->rx_syncp); rpackets = stats->rx_packets; rbytes = stats->rx_bytes; } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); tot->rx_packets += rpackets; tot->tx_packets += tpackets; tot->rx_bytes += rbytes; tot->tx_bytes += tbytes; } tot->tx_dropped = dev->stats.tx_dropped; tot->tx_fifo_errors = dev->stats.tx_fifo_errors; tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; return tot; } #endif #ifdef CONFIG_NET_POLL_CONTROLLER static void virtnet_netpoll(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; for (i = 0; i < vi->curr_queue_pairs; i++) napi_schedule(&vi->rq[i].napi); } #endif static void virtnet_ack_link_announce(struct virtnet_info *vi) { rtnl_lock(); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL)) dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); rtnl_unlock(); } static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { struct scatterlist sg; struct virtio_net_ctrl_mq s; struct net_device *dev = vi->dev; if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; s.virtqueue_pairs = queue_pairs; sg_init_one(&sg, &s, sizeof(s)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) { dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", queue_pairs); return -EINVAL; } else { vi->curr_queue_pairs = queue_pairs; /* virtnet_open() will refill when device is going to up. */ if (dev->flags & IFF_UP) schedule_delayed_work(&vi->refill, 0); } return 0; } static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); for (i = 0; i < vi->max_queue_pairs; i++) napi_disable(&vi->rq[i].napi); return 0; } #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) static int virtnet_set_tx_csum(struct net_device *dev, u32 data) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) return -ENOSYS; return ethtool_op_set_tx_hw_csum(dev, data); } #endif static void virtnet_set_rx_mode(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg[2]; u8 promisc, allmulti; struct virtio_net_ctrl_mac *mac_data; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) struct dev_addr_list *addr; #endif struct netdev_hw_addr *ha; int uc_count; int mc_count; void *buf; int i; /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; promisc = ((dev->flags & IFF_PROMISC) != 0); allmulti = ((dev->flags & IFF_ALLMULTI) != 0); sg_init_one(sg, &promisc, sizeof(promisc)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg, NULL)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", promisc ? "en" : "dis"); sg_init_one(sg, &allmulti, sizeof(allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg, NULL)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", allmulti ? "en" : "dis"); uc_count = netdev_uc_count(dev); mc_count = netdev_mc_count(dev); /* MAC filter - use one buffer for both lists */ buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + (2 * sizeof(mac_data->entries)), GFP_ATOMIC); mac_data = buf; if (!buf) return; sg_init_table(sg, 2); /* Store the unicast list and count in the front of the buffer */ mac_data->entries = uc_count; i = 0; netdev_for_each_uc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); sg_set_buf(&sg[0], mac_data, sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); /* multicast list and count fill the end */ mac_data = (void *)&mac_data->macs[uc_count][0]; mac_data->entries = mc_count; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) addr = dev->mc_list; for (i = 0; i < dev->mc_count; i++, addr = addr->next) memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN); #else i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); #endif sg_set_buf(&sg[1], mac_data, sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, sg, NULL)) dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); kfree(buf); } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,24)) static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) #else static int virtnet_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) #endif { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL)) dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,24)) return 0; #endif } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,24)) static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) #else static int virtnet_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) #endif { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL)) dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,24)) return 0; #endif } static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) { int i; int cpu; if (vi->affinity_hint_set) { for (i = 0; i < vi->max_queue_pairs; i++) { virtqueue_set_affinity(vi->rq[i].vq, -1); virtqueue_set_affinity(vi->sq[i].vq, -1); } vi->affinity_hint_set = false; } i = 0; for_each_online_cpu(cpu) { if (cpu == hcpu) { *per_cpu_ptr(vi->vq_index, cpu) = -1; } else { *per_cpu_ptr(vi->vq_index, cpu) = ++i % vi->curr_queue_pairs; } } } static void virtnet_set_affinity(struct virtnet_info *vi) { int i; int cpu; /* In multiqueue mode, when the number of cpu is equal to the number of * queue pairs, we let the queue pairs to be private to one cpu by * setting the affinity hint to eliminate the contention. */ if (vi->curr_queue_pairs == 1 || vi->max_queue_pairs != num_online_cpus()) { virtnet_clean_affinity(vi, -1); return; } i = 0; for_each_online_cpu(cpu) { virtqueue_set_affinity(vi->rq[i].vq, cpu); virtqueue_set_affinity(vi->sq[i].vq, cpu); *per_cpu_ptr(vi->vq_index, cpu) = i; i++; } vi->affinity_hint_set = true; } static int virtnet_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); switch(action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: case CPU_DOWN_FAILED: case CPU_DEAD: virtnet_set_affinity(vi); break; case CPU_DOWN_PREPARE: virtnet_clean_affinity(vi, (long)hcpu); break; default: break; } return NOTIFY_OK; } static void virtnet_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { struct virtnet_info *vi = netdev_priv(dev); ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); ring->rx_pending = ring->rx_max_pending; ring->tx_pending = ring->tx_max_pending; } static void virtnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34)) /* TODO: Eliminate OOO packets during switching */ static int virtnet_set_channels(struct net_device *dev, struct ethtool_channels *channels) { struct virtnet_info *vi = netdev_priv(dev); u16 queue_pairs = channels->combined_count; int err; /* We don't support separate rx/tx channels. * We don't allow setting 'other' channels. */ if (channels->rx_count || channels->tx_count || channels->other_count) return -EINVAL; if (queue_pairs > vi->max_queue_pairs) return -EINVAL; get_online_cpus(); err = virtnet_set_queues(vi, queue_pairs); if (!err) { netif_set_real_num_tx_queues(dev, queue_pairs); netif_set_real_num_rx_queues(dev, queue_pairs); virtnet_set_affinity(vi); } put_online_cpus(); return err; } static void virtnet_get_channels(struct net_device *dev, struct ethtool_channels *channels) { struct virtnet_info *vi = netdev_priv(dev); channels->combined_count = vi->curr_queue_pairs; channels->max_combined = vi->max_queue_pairs; channels->max_other = 0; channels->rx_count = 0; channels->tx_count = 0; channels->other_count = 0; } #endif static const struct ethtool_ops virtnet_ethtool_ops = { .get_drvinfo = virtnet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = virtnet_get_ringparam, #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) .set_tx_csum = virtnet_set_tx_csum, .set_sg = ethtool_op_set_sg, .set_tso = ethtool_op_set_tso, .set_ufo = ethtool_op_set_ufo, #else .set_channels = virtnet_set_channels, .get_channels = virtnet_get_channels, #endif }; #define MIN_MTU 68 #define MAX_MTU 65535 static int virtnet_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) return -EINVAL; dev->mtu = new_mtu; return 0; } static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, .ndo_start_xmit = start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = virtnet_set_mac_address, .ndo_set_rx_mode = virtnet_set_rx_mode, .ndo_change_mtu = virtnet_change_mtu, #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) .ndo_get_stats = virtnet_stats, #else .ndo_get_stats64 = virtnet_stats, #endif .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = virtnet_netpoll, #endif }; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,32)) /* from higher version kernel code. For redhat*/ static void inetdev_send_gratuitous_arp(struct net_device *dev, struct in_device *in_dev) { struct in_ifaddr *ifa; for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { arp_send(ARPOP_REQUEST, ETH_P_ARP, ifa->ifa_local, dev, ifa->ifa_local, NULL, dev->dev_addr, NULL); } } /** * netif_notify_peers - notify network peers about existence of @dev * @dev: network device * * Generate traffic such that interested network peers are aware of * @dev, such as by generating a gratuitous ARP. This may be used when * a device wants to inform the rest of the network about some sort of * reconfiguration such as a failover event or virtual machine * migration. * For redhat. */ void netif_notify_peers(struct net_device *dev) { struct in_device *in_dev; rtnl_lock(); in_dev = __in_dev_get_rtnl(dev); if(in_dev) inetdev_send_gratuitous_arp(dev, in_dev); rtnl_unlock(); } #endif static void virtnet_config_changed_work(struct work_struct *work) { struct virtnet_info *vi = container_of(work, struct virtnet_info, config_work); u16 v; mutex_lock(&vi->config_lock); if (!vi->config_enable) goto done; if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, offsetof(struct virtio_net_config, status), &v) < 0) goto done; if (v & VIRTIO_NET_S_ANNOUNCE) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) netif_notify_peers(vi->dev); #else netdev_notify_peers(vi->dev); #endif virtnet_ack_link_announce(vi); } /* Ignore unknown (future) status bits */ v &= VIRTIO_NET_S_LINK_UP; if (vi->status == v) goto done; vi->status = v; if (vi->status & VIRTIO_NET_S_LINK_UP) { netif_carrier_on(vi->dev); netif_tx_wake_all_queues(vi->dev); } else { netif_carrier_off(vi->dev); netif_tx_stop_all_queues(vi->dev); } done: mutex_unlock(&vi->config_lock); } static void virtnet_config_changed(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; schedule_work(&vi->config_work); } static void virtnet_free_queues(struct virtnet_info *vi) { int i; for (i = 0; i < vi->max_queue_pairs; i++) netif_napi_del(&vi->rq[i].napi); kfree(vi->rq); kfree(vi->sq); } static void free_receive_bufs(struct virtnet_info *vi) { int i; for (i = 0; i < vi->max_queue_pairs; i++) { while (vi->rq[i].pages) __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); } } static void free_unused_bufs(struct virtnet_info *vi) { void *buf; int i; for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->sq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) dev_kfree_skb(buf); } for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->rq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(&vi->rq[i], buf); else dev_kfree_skb(buf); --vi->rq[i].num; } BUG_ON(vi->rq[i].num != 0); } } static void virtnet_del_vqs(struct virtnet_info *vi) { struct virtio_device *vdev = vi->vdev; virtnet_clean_affinity(vi, -1); vdev->config->del_vqs(vdev); virtnet_free_queues(vi); } static int virtnet_find_vqs(struct virtnet_info *vi) { vq_callback_t **callbacks; struct virtqueue **vqs; int ret = -ENOMEM; int i, total_vqs; const char **names; /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by * possible control vq. */ total_vqs = vi->max_queue_pairs * 2 + virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); /* Allocate space for find_vqs parameters */ vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); if (!vqs) goto err_vq; callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); if (!callbacks) goto err_callback; names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); if (!names) goto err_names; /* Parameters for control virtqueue, if any */ if (vi->has_cvq) { callbacks[total_vqs - 1] = NULL; names[total_vqs - 1] = "control"; } /* Allocate/initialize parameters for send/receive virtqueues */ for (i = 0; i < vi->max_queue_pairs; i++) { callbacks[rxq2vq(i)] = skb_recv_done; callbacks[txq2vq(i)] = skb_xmit_done; sprintf(vi->rq[i].name, "input.%d", i); sprintf(vi->sq[i].name, "output.%d", i); names[rxq2vq(i)] = vi->rq[i].name; names[txq2vq(i)] = vi->sq[i].name; } ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, names); if (ret) goto err_find; if (vi->has_cvq) { vi->cvq = vqs[total_vqs - 1]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) vi->dev->features |= NETIF_F_HW_VLAN_FILTER; #else vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; #endif } for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].vq = vqs[rxq2vq(i)]; vi->sq[i].vq = vqs[txq2vq(i)]; } kfree(names); kfree(callbacks); kfree(vqs); return 0; err_find: kfree(names); err_names: kfree(callbacks); err_callback: kfree(vqs); err_vq: return ret; } static int virtnet_alloc_queues(struct virtnet_info *vi) { int i; vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); if (!vi->sq) goto err_sq; vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); if (!vi->rq) goto err_rq; INIT_DELAYED_WORK(&vi->refill, refill_work); for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].pages = NULL; netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, napi_weight); sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); } return 0; err_rq: kfree(vi->sq); err_sq: return -ENOMEM; } static int init_vqs(struct virtnet_info *vi) { int ret; /* Allocate send & receive queues */ ret = virtnet_alloc_queues(vi); if (ret) goto err; ret = virtnet_find_vqs(vi); if (ret) goto err_free; get_online_cpus(); virtnet_set_affinity(vi); put_online_cpus(); return 0; err_free: virtnet_free_queues(vi); err: return ret; } static int virtnet_probe(struct virtio_device *vdev) { int i, err; struct net_device *dev; struct virtnet_info *vi; u16 max_queue_pairs; /* Find if host supports multiqueue virtio_net device */ err = virtio_config_val(vdev, VIRTIO_NET_F_MQ, offsetof(struct virtio_net_config, max_virtqueue_pairs), &max_queue_pairs); /* We need at least 2 queue's */ if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) max_queue_pairs = 1; /* Allocate ourselves a network device with room for our info */ dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); if (!dev) return -ENOMEM; /* Set up network device as normal. */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,17)) dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,24)) dev->priv_flags |= IFF_UNICAST_FLT; #endif dev->netdev_ops = &virtnet_netdev; dev->features = NETIF_F_HIGHDMA; SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); SET_NETDEV_DEV(dev, &vdev->dev); /* Do we support "hardware" checksums? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { /* This opens up the world of extra features. */ #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) dev->features |= NETIF_F_TSO; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) dev->features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->features |= NETIF_F_TSO_ECN; dev->features |= NETIF_F_GSO_ROBUST; #else dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (csum) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) dev->hw_features |= NETIF_F_TSO; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) dev->hw_features |= NETIF_F_UFO; if (gso) dev->features |= dev->hw_features & NETIF_F_ALL_TSO; #endif /* (!csum && gso) case will be fixed by register_netdev() */ } dev->vlan_features = dev->features; /* Configuration may specify what MAC to use. Otherwise random. */ if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, offsetof(struct virtio_net_config, mac), dev->dev_addr, dev->addr_len) < 0) eth_hw_addr_random(dev); /* Set up our device-specific information */ vi = netdev_priv(dev); vi->dev = dev; vi->vdev = vdev; vdev->priv = vi; vi->stats = alloc_percpu(struct virtnet_stats); err = -ENOMEM; if (vi->stats == NULL) goto free; vi->vq_index = alloc_percpu(int); if (vi->vq_index == NULL) goto free_stats; mutex_init(&vi->config_lock); vi->config_enable = true; INIT_WORK(&vi->config_work, virtnet_config_changed_work); /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) vi->big_packets = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) vi->mergeable_rx_bufs = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) vi->has_cvq = true; if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) { vi->any_header_sg = true; if (vi->mergeable_rx_bufs) dev->needed_headroom = sizeof (struct virtio_net_hdr_mrg_rxbuf); else dev->needed_headroom = sizeof (struct virtio_net_hdr); } /* Use single tx/rx queue pair as default */ vi->curr_queue_pairs = 1; vi->max_queue_pairs = max_queue_pairs; /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ err = init_vqs(vi); if (err) goto free_index; get_online_cpus(); err = virtnet_set_queues(vi, max_queue_pairs); if (!err) { netif_set_real_num_tx_queues(dev, max_queue_pairs); netif_set_real_num_rx_queues(dev, max_queue_pairs); virtnet_set_affinity(vi); } put_online_cpus(); err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); goto free_vqs; } /* Last of all, set up some receive buffers. */ for (i = 0; i < vi->curr_queue_pairs; i++) { try_fill_recv(&vi->rq[i], GFP_KERNEL); /* If we didn't even get one input buffer, we're useless. */ if (vi->rq[i].num == 0) { free_unused_bufs(vi); err = -ENOMEM; goto free_recv_bufs; } } vi->nb.notifier_call = &virtnet_cpu_callback; err = register_hotcpu_notifier(&vi->nb); if (err) { pr_debug("virtio_net: registering cpu notifier failed\n"); goto free_recv_bufs; } /* Assume link up if device can't report link status, otherwise get link status from config. */ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { netif_carrier_off(dev); schedule_work(&vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; netif_carrier_on(dev); } pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", dev->name, max_queue_pairs); return 0; free_recv_bufs: free_receive_bufs(vi); unregister_netdev(dev); free_vqs: cancel_delayed_work_sync(&vi->refill); virtnet_del_vqs(vi); free_index: free_percpu(vi->vq_index); free_stats: free_percpu(vi->stats); free: free_netdev(dev); return err; } static void remove_vq_common(struct virtnet_info *vi) { vi->vdev->config->reset(vi->vdev); /* Free unused buffers in both send and recv, if any. */ free_unused_bufs(vi); free_receive_bufs(vi); virtnet_del_vqs(vi); } static void virtnet_remove(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; unregister_hotcpu_notifier(&vi->nb); /* Prevent config work handler from accessing the device. */ mutex_lock(&vi->config_lock); vi->config_enable = false; mutex_unlock(&vi->config_lock); unregister_netdev(vi->dev); remove_vq_common(vi); flush_work(&vi->config_work); free_percpu(vi->vq_index); free_percpu(vi->stats); free_netdev(vi->dev); } #ifdef CONFIG_PM static int virtnet_freeze(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int i; unregister_hotcpu_notifier(&vi->nb); /* Prevent config work handler from accessing the device */ mutex_lock(&vi->config_lock); vi->config_enable = false; mutex_unlock(&vi->config_lock); netif_device_detach(vi->dev); cancel_delayed_work_sync(&vi->refill); if (netif_running(vi->dev)) for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi); } remove_vq_common(vi); flush_work(&vi->config_work); return 0; } static int virtnet_restore(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int err, i; err = init_vqs(vi); if (err) return err; if (netif_running(vi->dev)) { for (i = 0; i < vi->curr_queue_pairs; i++) if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); for (i = 0; i < vi->max_queue_pairs; i++) virtnet_napi_enable(&vi->rq[i]); } netif_device_attach(vi->dev); mutex_lock(&vi->config_lock); vi->config_enable = true; mutex_unlock(&vi->config_lock); rtnl_lock(); virtnet_set_queues(vi, vi->curr_queue_pairs); rtnl_unlock(); err = register_hotcpu_notifier(&vi->nb); if (err) return err; return 0; } #endif static struct virtio_device_id id_table[] = { { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, VIRTIO_NET_F_CTRL_MAC_ADDR, VIRTIO_F_ANY_LAYOUT, }; static struct virtio_driver virtio_net_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtnet_probe, .remove = virtnet_remove, .config_changed = virtnet_config_changed, #ifdef CONFIG_PM .freeze = virtnet_freeze, .restore = virtnet_restore, #endif }; module_virtio_driver(virtio_net_driver); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio network driver"); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/3.0.76/virtio_pci.c000066400000000000000000000564271314037446600311010ustar00rootroot00000000000000/* * Virtio PCI driver * * This module allows virtio devices to be used over a virtual PCI device. * This can be used with QEMU based VMMs like KVM or Xen. * * Copyright IBM Corp. 2007 * * Authors: * Anthony Liguori * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include #include #include #include #include #include #include #include #include #include #include #include MODULE_AUTHOR("Anthony Liguori "); MODULE_DESCRIPTION("virtio-pci"); MODULE_LICENSE("GPL"); MODULE_VERSION("1"); /* Our device structure */ struct virtio_pci_device { struct virtio_device vdev; struct pci_dev *pci_dev; /* the IO mapping for the PCI config space */ void __iomem *ioaddr; /* a list of queues so we can dispatch IRQs */ spinlock_t lock; struct list_head virtqueues; /* MSI-X support */ int msix_enabled; int intx_enabled; struct msix_entry *msix_entries; cpumask_var_t *msix_affinity_masks; /* Name strings for interrupts. This size should be enough, * and I'm too lazy to allocate each name separately. */ char (*msix_names)[256]; /* Number of available vectors */ unsigned msix_vectors; /* Vectors allocated, excluding per-vq vectors if any */ unsigned msix_used_vectors; /* Status saved during hibernate/restore */ u8 saved_status; /* Whether we have vector per vq */ bool per_vq_vectors; }; /* Constants for MSI-X */ /* Use first vector for configuration changes, second and the rest for * virtqueues Thus, we need at least 2 vectors for MSI. */ enum { VP_MSIX_CONFIG_VECTOR = 0, VP_MSIX_VQ_VECTOR = 1, }; struct virtio_pci_vq_info { /* the actual virtqueue */ struct virtqueue *vq; /* the number of entries in the queue */ int num; /* the virtual address of the ring queue */ void *queue; /* the list node for the virtqueues list */ struct list_head node; /* MSI-X vector (or none) */ unsigned msix_vector; }; /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ static DEFINE_PCI_DEVICE_TABLE(virtio_pci_id_table) = { { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, { 0 } }; MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); /* Convert a generic virtio device to our structure */ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) { return container_of(vdev, struct virtio_pci_device, vdev); } /* virtio config->get_features() implementation */ static u32 vp_get_features(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* When someone needs more than 32 feature bits, we'll need to * steal a bit to indicate that the rest are somewhere else. */ return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); } /* virtio config->finalize_features() implementation */ static void vp_finalize_features(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); /* We only support 32 feature bits. */ BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1); iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES); } /* virtio config->get() implementation */ static void vp_get(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG(vp_dev) + offset; u8 *ptr = buf; int i; for (i = 0; i < len; i++) ptr[i] = ioread8(ioaddr + i); } /* the config->set() implementation. it's symmetric to the config->get() * implementation */ static void vp_set(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG(vp_dev) + offset; const u8 *ptr = buf; int i; for (i = 0; i < len; i++) iowrite8(ptr[i], ioaddr + i); } /* config->{get,set}_status() implementations */ static u8 vp_get_status(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); } static void vp_set_status(struct virtio_device *vdev, u8 status) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* We should never be setting status to 0. */ BUG_ON(status == 0); iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); } /* wait for pending irq handlers */ static void vp_synchronize_vectors(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; if (vp_dev->intx_enabled) synchronize_irq(vp_dev->pci_dev->irq); for (i = 0; i < vp_dev->msix_vectors; ++i) synchronize_irq(vp_dev->msix_entries[i].vector); } static void vp_reset(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* 0 status means a reset. */ iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); /* Flush out the status write, and flush in device writes, * including MSi-X interrupts, if any. */ ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); /* Flush pending VQ/configuration callbacks. */ vp_synchronize_vectors(vdev); } /* the notify function used when creating a virt queue */ static void vp_notify(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); /* we write the queue's selector into the notification register to * signal the other end */ iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); } /* Handle a configuration change: Tell driver if it wants to know. */ static irqreturn_t vp_config_changed(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; struct virtio_driver *drv; drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); if (drv && drv->config_changed) drv->config_changed(&vp_dev->vdev); return IRQ_HANDLED; } /* Notify all virtqueues on an interrupt. */ static irqreturn_t vp_vring_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; struct virtio_pci_vq_info *info; irqreturn_t ret = IRQ_NONE; unsigned long flags; spin_lock_irqsave(&vp_dev->lock, flags); list_for_each_entry(info, &vp_dev->virtqueues, node) { if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) ret = IRQ_HANDLED; } spin_unlock_irqrestore(&vp_dev->lock, flags); return ret; } /* A small wrapper to also acknowledge the interrupt when it's handled. * I really need an EIO hook for the vring so I can ack the interrupt once we * know that we'll be handling the IRQ but before we invoke the callback since * the callback may notify the host which results in the host attempting to * raise an interrupt that we would then mask once we acknowledged the * interrupt. */ static irqreturn_t vp_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; u8 isr; /* reading the ISR has the effect of also clearing it so it's very * important to save off the value. */ isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); /* It's definitely not us if the ISR was not high */ if (!isr) return IRQ_NONE; /* Configuration change? Tell driver if it wants to know. */ if (isr & VIRTIO_PCI_ISR_CONFIG) vp_config_changed(irq, opaque); return vp_vring_interrupt(irq, opaque); } static void vp_free_vectors(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; if (vp_dev->intx_enabled) { free_irq(vp_dev->pci_dev->irq, vp_dev); vp_dev->intx_enabled = 0; } for (i = 0; i < vp_dev->msix_used_vectors; ++i) free_irq(vp_dev->msix_entries[i].vector, vp_dev); for (i = 0; i < vp_dev->msix_vectors; i++) if (vp_dev->msix_affinity_masks[i]) free_cpumask_var(vp_dev->msix_affinity_masks[i]); if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); /* Flush the write out to device */ ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); pci_disable_msix(vp_dev->pci_dev); vp_dev->msix_enabled = 0; vp_dev->msix_vectors = 0; } vp_dev->msix_used_vectors = 0; kfree(vp_dev->msix_names); vp_dev->msix_names = NULL; kfree(vp_dev->msix_entries); vp_dev->msix_entries = NULL; kfree(vp_dev->msix_affinity_masks); vp_dev->msix_affinity_masks = NULL; } static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, bool per_vq_vectors) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); const char *name = dev_name(&vp_dev->vdev.dev); unsigned i, v; int err = -ENOMEM; vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, GFP_KERNEL); if (!vp_dev->msix_entries) goto error; vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, GFP_KERNEL); if (!vp_dev->msix_names) goto error; vp_dev->msix_affinity_masks = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, GFP_KERNEL); if (!vp_dev->msix_affinity_masks) goto error; for (i = 0; i < nvectors; ++i) if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], GFP_KERNEL)) goto error; for (i = 0; i < nvectors; ++i) vp_dev->msix_entries[i].entry = i; /* pci_enable_msix returns positive if we can't get this many. */ err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); if (err > 0) err = -ENOSPC; if (err) goto error; vp_dev->msix_vectors = nvectors; vp_dev->msix_enabled = 1; /* Set the vector used for configuration */ v = vp_dev->msix_used_vectors; snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, "%s-config", name); err = request_irq(vp_dev->msix_entries[v].vector, vp_config_changed, 0, vp_dev->msix_names[v], vp_dev); if (err) goto error; ++vp_dev->msix_used_vectors; iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); /* Verify we had enough resources to assign the vector */ v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); if (v == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto error; } if (!per_vq_vectors) { /* Shared vector for all VQs */ v = vp_dev->msix_used_vectors; snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, "%s-virtqueues", name); err = request_irq(vp_dev->msix_entries[v].vector, vp_vring_interrupt, 0, vp_dev->msix_names[v], vp_dev); if (err) goto error; ++vp_dev->msix_used_vectors; } return 0; error: vp_free_vectors(vdev); return err; } static int vp_request_intx(struct virtio_device *vdev) { int err; struct virtio_pci_device *vp_dev = to_vp_device(vdev); err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vp_dev); if (!err) vp_dev->intx_enabled = 1; return err; } static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, u16 msix_vec) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info; struct virtqueue *vq; unsigned long flags, size; u16 num; int err; /* Select the queue we're interested in */ iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); /* Check if queue is either not available or already active. */ num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) return ERR_PTR(-ENOENT); /* allocate and fill out our structure the represents an active * queue */ info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); info->num = num; info->msix_vector = msix_vec; size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); if (info->queue == NULL) { err = -ENOMEM; goto out_info; } /* activate the queue */ iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); /* create the vring */ vq = vring_new_virtqueue(index, info->num, VIRTIO_PCI_VRING_ALIGN, vdev, true, info->queue, vp_notify, callback, name); if (!vq) { err = -ENOMEM; goto out_activate_queue; } vq->priv = info; info->vq = vq; if (msix_vec != VIRTIO_MSI_NO_VECTOR) { iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); if (msix_vec == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto out_assign; } } if (callback) { spin_lock_irqsave(&vp_dev->lock, flags); list_add(&info->node, &vp_dev->virtqueues); spin_unlock_irqrestore(&vp_dev->lock, flags); } else { INIT_LIST_HEAD(&info->node); } return vq; out_assign: vring_del_virtqueue(vq); out_activate_queue: iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); free_pages_exact(info->queue, size); out_info: kfree(info); return ERR_PTR(err); } static void vp_del_vq(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_vq_info *info = vq->priv; unsigned long flags, size; spin_lock_irqsave(&vp_dev->lock, flags); list_del(&info->node); spin_unlock_irqrestore(&vp_dev->lock, flags); iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); /* Flush the write out to device */ ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); } vring_del_virtqueue(vq); /* Select and deactivate the queue */ iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); free_pages_exact(info->queue, size); kfree(info); } /* the config->del_vqs() implementation */ static void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq, *n; struct virtio_pci_vq_info *info; list_for_each_entry_safe(vq, n, &vdev->vqs, list) { info = vq->priv; if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR) free_irq(vp_dev->msix_entries[info->msix_vector].vector, vq); vp_del_vq(vq); } vp_dev->per_vq_vectors = false; vp_free_vectors(vdev); } static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[], bool use_msix, bool per_vq_vectors) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); u16 msix_vec; int i, err, nvectors, allocated_vectors; if (!use_msix) { /* Old style: one normal interrupt for change and all vqs. */ err = vp_request_intx(vdev); if (err) goto error_request; } else { if (per_vq_vectors) { /* Best option: one for change interrupt, one per vq. */ nvectors = 1; for (i = 0; i < nvqs; ++i) if (callbacks[i]) ++nvectors; } else { /* Second best: one for change, shared for all vqs. */ nvectors = 2; } err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); if (err) goto error_request; } vp_dev->per_vq_vectors = per_vq_vectors; allocated_vectors = vp_dev->msix_used_vectors; for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } else if (!callbacks[i] || !vp_dev->msix_enabled) msix_vec = VIRTIO_MSI_NO_VECTOR; else if (vp_dev->per_vq_vectors) msix_vec = allocated_vectors++; else msix_vec = VP_MSIX_VQ_VECTOR; vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); goto error_find; } if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) continue; /* allocate per-vq irq if available and necessary */ snprintf(vp_dev->msix_names[msix_vec], sizeof *vp_dev->msix_names, "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); err = request_irq(vp_dev->msix_entries[msix_vec].vector, vring_interrupt, 0, vp_dev->msix_names[msix_vec], vqs[i]); if (err) { vp_del_vq(vqs[i]); goto error_find; } } return 0; error_find: vp_del_vqs(vdev); error_request: return err; } /* the config->find_vqs() implementation */ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) { int err; /* Try MSI-X with one vector per queue. */ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); if (!err) return 0; /* Fallback: MSI-X with one vector for config, one shared for queues. */ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, false); if (!err) return 0; /* Finally fall back to regular interrupts. */ return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, false, false); } static const char *vp_bus_name(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); return pci_name(vp_dev->pci_dev); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) /* Setup the affinity for a virtqueue: * - force the affinity for per vq vector * - OR over all affinities for shared MSI * - ignore the affinity request if we're using INTX */ static int vp_set_vq_affinity(struct virtqueue *vq, int cpu) { struct virtio_device *vdev = vq->vdev; struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info = vq->priv; struct cpumask *mask; unsigned int irq; if (!vq->callback) return -EINVAL; if (vp_dev->msix_enabled) { mask = vp_dev->msix_affinity_masks[info->msix_vector]; irq = vp_dev->msix_entries[info->msix_vector].vector; if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { cpumask_set_cpu(cpu, mask); irq_set_affinity_hint(irq, mask); } } return 0; } #endif static const struct virtio_config_ops virtio_pci_config_ops = { .get = vp_get, .set = vp_set, .get_status = vp_get_status, .set_status = vp_set_status, .reset = vp_reset, .find_vqs = vp_find_vqs, .del_vqs = vp_del_vqs, .get_features = vp_get_features, .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) .set_vq_affinity = vp_set_vq_affinity, #endif }; static void virtio_pci_release_dev(struct device *_d) { /* * No need for a release method as we allocate/free * all devices together with the pci devices. * Provide an empty one to avoid getting a warning from core. */ } #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) /** * pci_msi_off - disables any msi or msix capabilities * @dev: the PCI device to operate on * * If you want to use msi see pci_enable_msi and friends. * This is a lower level primitive that allows us to disable * msi operation at the device level. */ void pci_msi_off(struct pci_dev *dev) { int pos; u16 control; pos = pci_find_capability(dev, PCI_CAP_ID_MSI); if (pos) { pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); control &= ~PCI_MSI_FLAGS_ENABLE; pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); } pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); if (pos) { pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); control &= ~PCI_MSIX_FLAGS_ENABLE; pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); } } #endif /* the PCI probing function */ static int virtio_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { struct virtio_pci_device *vp_dev; int err; /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) return -ENODEV; if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", VIRTIO_PCI_ABI_VERSION, pci_dev->revision); return -ENODEV; } /* allocate our structure and fill it out */ vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); if (vp_dev == NULL) return -ENOMEM; vp_dev->vdev.dev.parent = &pci_dev->dev; vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->vdev.config = &virtio_pci_config_ops; vp_dev->pci_dev = pci_dev; INIT_LIST_HEAD(&vp_dev->virtqueues); spin_lock_init(&vp_dev->lock); /* Disable MSI/MSIX to bring device to a known good state. */ pci_msi_off(pci_dev); /* enable the device */ err = pci_enable_device(pci_dev); if (err) goto out; err = pci_request_regions(pci_dev, "virtio-pci"); if (err) goto out_enable_device; vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); if (vp_dev->ioaddr == NULL) { err = -ENOMEM; goto out_req_regions; } pci_set_drvdata(pci_dev, vp_dev); pci_set_master(pci_dev); /* we use the subsystem vendor/device id as the virtio vendor/device * id. this allows us to use the same PCI vendor/device id for all * virtio devices and to identify the particular virtio driver by * the subsystem ids */ vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; vp_dev->vdev.id.device = pci_dev->subsystem_device; /* finally register the virtio device */ err = register_virtio_device(&vp_dev->vdev); if (err) goto out_set_drvdata; return 0; out_set_drvdata: pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); out_req_regions: pci_release_regions(pci_dev); out_enable_device: pci_disable_device(pci_dev); out: kfree(vp_dev); return err; } static void virtio_pci_remove(struct pci_dev *pci_dev) { struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); unregister_virtio_device(&vp_dev->vdev); vp_del_vqs(&vp_dev->vdev); pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); pci_release_regions(pci_dev); pci_disable_device(pci_dev); kfree(vp_dev); } #ifdef CONFIG_PM static int virtio_pci_freeze(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); struct virtio_driver *drv; int ret; drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); ret = 0; vp_dev->saved_status = vp_get_status(&vp_dev->vdev); if (drv && drv->freeze) ret = drv->freeze(&vp_dev->vdev); if (!ret) pci_disable_device(pci_dev); return ret; } static int virtio_pci_restore(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); struct virtio_driver *drv; int ret; drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); ret = pci_enable_device(pci_dev); if (ret) return ret; pci_set_master(pci_dev); vp_finalize_features(&vp_dev->vdev); if (drv && drv->restore) ret = drv->restore(&vp_dev->vdev); /* Finally, tell the device we're all set */ if (!ret) vp_set_status(&vp_dev->vdev, vp_dev->saved_status); return ret; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) static const struct dev_pm_ops virtio_pci_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore) }; #else static const struct dev_pm_ops virtio_pci_pm_ops = { .suspend = virtio_pci_freeze, .resume = virtio_pci_restore, .freeze = virtio_pci_freeze, .thaw = virtio_pci_restore, .restore = virtio_pci_restore, .poweroff = virtio_pci_freeze, }; #endif #endif static struct pci_driver virtio_pci_driver = { .name = "virtio-pci", .id_table = virtio_pci_id_table, .probe = virtio_pci_probe, .remove = virtio_pci_remove, #ifdef CONFIG_PM .driver.pm = &virtio_pci_pm_ops, #endif }; static int __init virtio_pci_init(void) { return pci_register_driver(&virtio_pci_driver); } module_init(virtio_pci_init); static void __exit virtio_pci_exit(void) { pci_unregister_driver(&virtio_pci_driver); } module_exit(virtio_pci_exit); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/3.0.76/virtio_ring.c000066400000000000000000000570751314037446600312650ustar00rootroot00000000000000/* Virtio ring implementation. * * Copyright 2007 Rusty Russell IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&(_vq)->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ BUG(); \ } while (0) /* Caller is supposed to guarantee no reentry. */ #define START_USE(_vq) \ do { \ if ((_vq)->in_use) \ panic("%s:in_use = %i\n", \ (_vq)->vq.name, (_vq)->in_use); \ (_vq)->in_use = __LINE__; \ } while (0) #define END_USE(_vq) \ do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) #else #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&_vq->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ (_vq)->broken = true; \ } while (0) #define START_USE(vq) #define END_USE(vq) #endif struct vring_virtqueue { struct virtqueue vq; /* Actual memory layout for this queue */ struct vring vring; /* Can we use weak barriers? */ bool weak_barriers; /* Other side has made a mess, don't try any more. */ bool broken; /* Host supports indirect buffers */ bool indirect; /* Host publishes avail event idx */ bool event; /* Head of free buffer list. */ unsigned int free_head; /* Number we've added since last sync. */ unsigned int num_added; /* Last used index we've seen. */ u16 last_used_idx; /* How to notify other side. FIXME: commonalize hcalls! */ void (*notify)(struct virtqueue *vq); #ifdef DEBUG /* They're supposed to lock for us. */ unsigned int in_use; /* Figure out if their kicks are too delayed. */ bool last_add_time_valid; ktime_t last_add_time; #endif /* Tokens for callbacks. */ void *data[]; }; #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) static inline struct scatterlist *sg_next_chained(struct scatterlist *sg, unsigned int *count) { return sg_next(sg); } static inline struct scatterlist *sg_next_arr(struct scatterlist *sg, unsigned int *count) { if (--(*count) == 0) return NULL; return sg + 1; } /* Set up an indirect table of descriptors and add it to the queue. */ static inline int vring_add_indirect(struct vring_virtqueue *vq, struct scatterlist *sgs[], struct scatterlist *(*next) (struct scatterlist *, unsigned int *), unsigned int total_sg, unsigned int total_out, unsigned int total_in, unsigned int out_sgs, unsigned int in_sgs, gfp_t gfp) { struct vring_desc *desc; unsigned head; struct scatterlist *sg; int i, n; /* * We require lowmem mappings for the descriptors because * otherwise virt_to_phys will give us bogus addresses in the * virtqueue. */ gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); if (!desc) return -ENOMEM; /* Transfer entries from the sg lists into the indirect page */ i = 0; for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { desc[i].flags = VRING_DESC_F_NEXT; desc[i].addr = sg_phys(sg); desc[i].len = sg->length; desc[i].next = i+1; i++; } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; desc[i].addr = sg_phys(sg); desc[i].len = sg->length; desc[i].next = i+1; i++; } } BUG_ON(i != total_sg); /* Last one doesn't continue. */ desc[i-1].flags &= ~VRING_DESC_F_NEXT; desc[i-1].next = 0; /* We're about to use a buffer */ vq->vq.num_free--; /* Use a single buffer which doesn't continue */ head = vq->free_head; vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; vq->vring.desc[head].addr = virt_to_phys(desc); vq->vring.desc[head].len = i * sizeof(struct vring_desc); /* Update free pointer */ vq->free_head = vq->vring.desc[head].next; return head; } static inline int virtqueue_add(struct virtqueue *_vq, struct scatterlist *sgs[], struct scatterlist *(*next) (struct scatterlist *, unsigned int *), unsigned int total_out, unsigned int total_in, unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); struct scatterlist *sg; unsigned int i, n, avail, uninitialized_var(prev), total_sg; int head; START_USE(vq); BUG_ON(data == NULL); #ifdef DEBUG { ktime_t now = ktime_get(); /* No kick or get, with .1 second between? Warn. */ if (vq->last_add_time_valid) WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) > 100); vq->last_add_time = now; vq->last_add_time_valid = true; } #endif total_sg = total_in + total_out; /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ if (vq->indirect && total_sg > 2 && vq->vq.num_free) { head = vring_add_indirect(vq, sgs, next, total_sg, total_out, total_in, out_sgs, in_sgs, gfp); if (likely(head >= 0)) goto add_head; } BUG_ON(total_sg > vq->vring.num); BUG_ON(total_sg == 0); if (vq->vq.num_free < total_sg) { pr_debug("Can't add buf len %i - avail = %i\n", total_sg, vq->vq.num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ if (out_sgs) vq->notify(&vq->vq); END_USE(vq); return -ENOSPC; } /* We're about to use some buffers from the free list. */ vq->vq.num_free -= total_sg; head = i = vq->free_head; for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; i = vq->vring.desc[i].next; } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; i = vq->vring.desc[i].next; } } /* Last one doesn't continue. */ vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; /* Update free pointer */ vq->free_head = i; add_head: /* Set token. */ vq->data[head] = data; /* Put entry in available array (but don't update avail->idx until they * do sync). */ avail = (vq->vring.avail->idx & (vq->vring.num-1)); vq->vring.avail->ring[avail] = head; /* Descriptors and available array need to be set before we expose the * new available array entries. */ virtio_wmb(vq->weak_barriers); vq->vring.avail->idx++; vq->num_added++; /* This is very unlikely, but theoretically possible. Kick * just in case. */ if (unlikely(vq->num_added == (1 << 16) - 1)) virtqueue_kick(_vq); pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); return 0; } /** * virtqueue_add_buf - expose buffer to other end * @vq: the struct virtqueue we're talking about. * @sg: the description of the buffer(s). * @out_num: the number of sg readable by other side * @in_num: the number of sg which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_buf(struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, unsigned int in, void *data, gfp_t gfp) { struct scatterlist *sgs[2]; sgs[0] = sg; sgs[1] = sg + out; return virtqueue_add(_vq, sgs, sg_next_arr, out, in, out ? 1 : 0, in ? 1 : 0, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_buf); /** * virtqueue_add_sgs - expose buffers to other end * @vq: the struct virtqueue we're talking about. * @sgs: array of terminated scatterlists. * @out_num: the number of scatterlists readable by other side * @in_num: the number of scatterlists which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_sgs(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { unsigned int i, total_out, total_in; /* Count them first. */ for (i = total_out = total_in = 0; i < out_sgs; i++) { struct scatterlist *sg; for (sg = sgs[i]; sg; sg = sg_next(sg)) total_out++; } for (; i < out_sgs + in_sgs; i++) { struct scatterlist *sg; for (sg = sgs[i]; sg; sg = sg_next(sg)) total_in++; } return virtqueue_add(_vq, sgs, sg_next_chained, total_out, total_in, out_sgs, in_sgs, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_sgs); /** * virtqueue_add_outbuf - expose output buffers to other end * @vq: the struct virtqueue we're talking about. * @sgs: array of scatterlists (need not be terminated!) * @num: the number of scatterlists readable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); /** * virtqueue_add_inbuf - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sgs: array of scatterlists (need not be terminated!) * @num: the number of scatterlists writable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); /** * virtqueue_kick_prepare - first half of split virtqueue_kick call. * @vq: the struct virtqueue * * Instead of virtqueue_kick(), you can do: * if (virtqueue_kick_prepare(vq)) * virtqueue_notify(vq); * * This is sometimes useful because the virtqueue_kick_prepare() needs * to be serialized, but the actual virtqueue_notify() call does not. */ bool virtqueue_kick_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 new, old; bool needs_kick; START_USE(vq); /* We need to expose available array entries before checking avail * event. */ virtio_mb(vq->weak_barriers); old = vq->vring.avail->idx - vq->num_added; new = vq->vring.avail->idx; vq->num_added = 0; #ifdef DEBUG if (vq->last_add_time_valid) { WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), vq->last_add_time)) > 100); } vq->last_add_time_valid = false; #endif if (vq->event) { needs_kick = vring_need_event(vring_avail_event(&vq->vring), new, old); } else { needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); } END_USE(vq); return needs_kick; } EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); /** * virtqueue_notify - second half of split virtqueue_kick call. * @vq: the struct virtqueue * * This does not need to be serialized. */ void virtqueue_notify(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); /* Prod other side to tell it about changes. */ vq->notify(_vq); } EXPORT_SYMBOL_GPL(virtqueue_notify); /** * virtqueue_kick - update after add_buf * @vq: the struct virtqueue * * After one or more virtqueue_add_buf calls, invoke this to kick * the other side. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ void virtqueue_kick(struct virtqueue *vq) { if (virtqueue_kick_prepare(vq)) virtqueue_notify(vq); } EXPORT_SYMBOL_GPL(virtqueue_kick); static void detach_buf(struct vring_virtqueue *vq, unsigned int head) { unsigned int i; /* Clear data ptr. */ vq->data[head] = NULL; /* Put back on free list: find end */ i = head; /* Free the indirect table */ if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) kfree(phys_to_virt(vq->vring.desc[i].addr)); while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { i = vq->vring.desc[i].next; vq->vq.num_free++; } vq->vring.desc[i].next = vq->free_head; vq->free_head = head; /* Plus final descriptor */ vq->vq.num_free++; } static inline bool more_used(const struct vring_virtqueue *vq) { return vq->last_used_idx != vq->vring.used->idx; } /** * virtqueue_get_buf - get the next used buffer * @vq: the struct virtqueue we're talking about. * @len: the length written into the buffer * * If the driver wrote data into the buffer, @len will be set to the * amount written. This means you don't need to clear the buffer * beforehand to ensure there's no data leakage in the case of short * writes. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). * * Returns NULL if there are no used buffers, or the "data" token * handed to virtqueue_add_buf(). */ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) { struct vring_virtqueue *vq = to_vvq(_vq); void *ret; unsigned int i; u16 last_used; START_USE(vq); if (unlikely(vq->broken)) { END_USE(vq); return NULL; } if (!more_used(vq)) { pr_debug("No more buffers in queue\n"); END_USE(vq); return NULL; } /* Only get used array entries after they have been exposed by host. */ virtio_rmb(vq->weak_barriers); last_used = (vq->last_used_idx & (vq->vring.num - 1)); i = vq->vring.used->ring[last_used].id; *len = vq->vring.used->ring[last_used].len; if (unlikely(i >= vq->vring.num)) { BAD_RING(vq, "id %u out of range\n", i); return NULL; } if (unlikely(!vq->data[i])) { BAD_RING(vq, "id %u is not a head!\n", i); return NULL; } /* detach_buf clears data, so grab it now. */ ret = vq->data[i]; detach_buf(vq, i); vq->last_used_idx++; /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call. */ if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { vring_used_event(&vq->vring) = vq->last_used_idx; virtio_mb(vq->weak_barriers); } #ifdef DEBUG vq->last_add_time_valid = false; #endif END_USE(vq); return ret; } EXPORT_SYMBOL_GPL(virtqueue_get_buf); /** * virtqueue_disable_cb - disable callbacks * @vq: the struct virtqueue we're talking about. * * Note that this is not necessarily synchronous, hence unreliable and only * useful as an optimization. * * Unlike other operations, this need not be serialized. */ void virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; } EXPORT_SYMBOL_GPL(virtqueue_disable_cb); /** * virtqueue_enable_cb_prepare - restart callbacks after disable_cb * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns current queue state * in an opaque unsigned value. This value should be later tested by * virtqueue_poll, to detect a possible race between the driver checking for * more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 last_used_idx; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; END_USE(vq); return last_used_idx; } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); /** * virtqueue_poll - query pending used buffers * @vq: the struct virtqueue we're talking about. * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). * * Returns "true" if there are pending used buffers in the queue. * * This does not need to be serialized. */ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); virtio_mb(vq->weak_barriers); return (u16)last_used_idx != vq->vring.used->idx; } EXPORT_SYMBOL_GPL(virtqueue_poll); /** * virtqueue_enable_cb - restart callbacks after disable_cb. * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns "false" if there are pending * buffers in the queue, to detect a possible race between the driver * checking for more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb(struct virtqueue *_vq) { unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); return !virtqueue_poll(_vq, last_used_idx); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb); /** * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks but hints to the other side to delay * interrupts until most of the available buffers have been processed; * it returns "false" if there are many pending buffers in the queue, * to detect a possible race between the driver checking for more work, * and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 bufs; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; /* TODO: tune this threshold */ bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; vring_used_event(&vq->vring) = vq->last_used_idx + bufs; virtio_mb(vq->weak_barriers); if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { END_USE(vq); return false; } END_USE(vq); return true; } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); /** * virtqueue_detach_unused_buf - detach first unused buffer * @vq: the struct virtqueue we're talking about. * * Returns NULL or the "data" token handed to virtqueue_add_buf(). * This is not valid on an active queue; it is useful only for device * shutdown. */ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i; void *buf; START_USE(vq); for (i = 0; i < vq->vring.num; i++) { if (!vq->data[i]) continue; /* detach_buf clears data, so grab it now. */ buf = vq->data[i]; detach_buf(vq, i); vq->vring.avail->idx--; END_USE(vq); return buf; } /* That should have freed everything. */ BUG_ON(vq->vq.num_free != vq->vring.num); END_USE(vq); return NULL; } EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); irqreturn_t vring_interrupt(int irq, void *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (!more_used(vq)) { pr_debug("virtqueue interrupt with no work for %p\n", vq); return IRQ_NONE; } if (unlikely(vq->broken)) return IRQ_HANDLED; pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); if (vq->vq.callback) vq->vq.callback(&vq->vq); return IRQ_HANDLED; } EXPORT_SYMBOL_GPL(vring_interrupt); struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, void *pages, void (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) { struct vring_virtqueue *vq; unsigned int i; /* We assume num is a power of 2. */ if (num & (num - 1)) { dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); return NULL; } vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); if (!vq) return NULL; vring_init(&vq->vring, num, pages, vring_align); vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->vq.num_free = num; vq->vq.index = index; vq->notify = notify; vq->weak_barriers = weak_barriers; vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; vq->last_add_time_valid = false; #endif vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); /* No callback? Tell other side not to bother us. */ if (!callback) vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; /* Put everything in free lists. */ vq->free_head = 0; for (i = 0; i < num-1; i++) { vq->vring.desc[i].next = i+1; vq->data[i] = NULL; } vq->data[i] = NULL; return &vq->vq; } EXPORT_SYMBOL_GPL(vring_new_virtqueue); void vring_del_virtqueue(struct virtqueue *vq) { list_del(&vq->list); kfree(to_vvq(vq)); } EXPORT_SYMBOL_GPL(vring_del_virtqueue); /* Manipulates transport-specific feature bits. */ void vring_transport_features(struct virtio_device *vdev) { unsigned int i; for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { switch (i) { case VIRTIO_RING_F_INDIRECT_DESC: break; case VIRTIO_RING_F_EVENT_IDX: break; default: /* We don't understand this bit. */ clear_bit(i, vdev->features); } } } EXPORT_SYMBOL_GPL(vring_transport_features); /** * virtqueue_get_vring_size - return the size of the virtqueue's vring * @vq: the struct virtqueue containing the vring of interest. * * Returns the size of the vring. This is mainly used for boasting to * userspace. Unlike other operations, this need not be serialized. */ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->vring.num; } EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); bool virtqueue_is_broken(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->broken; } EXPORT_SYMBOL_GPL(virtqueue_is_broken); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/Kbuild000066400000000000000000000001451314037446600271520ustar00rootroot00000000000000EXTRA_CFLAGS := -I$(M)/include/ obj-m := virtio.o virtio_ring.o virtio_pci.o obj-m += virtio_net.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/vni_front/Makefile000066400000000000000000000000661314037446600274570ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/000077500000000000000000000000001314037446600262275ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/2.6.27/000077500000000000000000000000001314037446600267635ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/2.6.27/balloon.c000066400000000000000000000541241314037446600305630ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /*added by linyuliang 00176349 begin */ #include #include /*added by linyuliang 00176349 end */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); struct balloon_stats balloon_stats; unsigned long old_totalram_pages; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; /* VM /proc information for memory */ extern unsigned long totalram_pages; #if !defined(MODULE) && defined(CONFIG_HIGHMEM) extern unsigned long totalhigh_pages; #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else #define inc_totalhigh_pages() ((void)0) #define dec_totalhigh_pages() ((void)0) #endif #ifndef CONFIG_XEN /* * In HVM guests accounting here uses the Xen visible values, but the kernel * determined totalram_pages value shouldn't get altered. Since totalram_pages * includes neither the kernel static image nor any memory allocated prior to * or from the bootmem allocator, we have to synchronize the two values. */ static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process); static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page) { /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; dec_totalhigh_pages(); } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(void) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); if (PageHighMem(page)) { bs.balloon_high--; inc_totalhigh_pages(); } else bs.balloon_low--; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static inline void balloon_free_page(struct page *page) { #ifndef MODULE if (put_page_testzero(page)) free_cold_page(page); #else /* free_cold_page() is not being exported. */ __free_page(page); #endif } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = min(bs.target_pages, bs.hard_limit); if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } unsigned long balloon_minimum_target(void) { #ifndef CONFIG_XEN #define max_pfn num_physpages #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN #undef max_pfn #endif } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { /*modified by linyuliang 00176349 begin */ /*when xen has less pages than nr_pages,don not give it back! */ BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page); page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); init_page_count(page); balloon_free_page(page); /*modified by linyuliang 00176349 end */ } /*modified by linyuliang 00176349 begin */ bs.current_pages += rc; /*modified by linyuliang 00176349 end */ /* totalram_pages = bs.current_pages; */ if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages + rc; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages = totalram_pages; out: balloon_unlock(flags); /*modified by linyuliang 00176349 begin */ //return 0; return rc < 0 ? rc : rc != nr_pages; /*modified by linyuliang 00176349 end */ } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; /*added by linyuliang 00176349 begin */ IPRINTK("nr_pages = %lu\n",nr_pages); IPRINTK("need_sleep= %d\n",need_sleep); /*added by linyuliang 00176349 end */ break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(pfn_to_page(pfn)); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; /* totalram_pages = bs.current_pages; */ if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages - nr_pages; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages = totalram_pages; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ /*modified by linyuliang 00176349 begin */ /*When adjustment be stopped one time,adjustment over! then write current memory to xenstore*/ static int flag; static void balloon_process(struct work_struct *unused) { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); if(totalram_pages > (bs.current_pages - totalram_bias)) { IPRINTK("totalram_pages=%lu, current_pages=%lu, totalram_bias=%lu\n", totalram_pages*4, bs.current_pages*4, totalram_bias*4); bs.current_pages = totalram_pages + totalram_bias; IPRINTK("totalram_pages=%lu, current_pages=%lu\n", totalram_pages*4, bs.current_pages*4); } if(current_target() == bs.current_pages) { mutex_unlock(&balloon_mutex); return; } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); // if (!need_sleep) // { if (current_target() != bs.current_pages || (flag==1)) { sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); // } /* Schedule more work if there is some still to be done. */ // if (current_target() != bs.current_pages) // mod_timer(&balloon_timer, jiffies + HZ); mutex_unlock(&balloon_mutex); } extern unsigned int migrate_state; /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, balloon_minimum_target()); flag = bs.target_pages== target ? 0:1; if(!migrate_state) { schedule_work(&balloon_worker); } else { IPRINTK("totalram_pages=%lu k, current_pages=%lu k migrate_state:%d\n", totalram_pages*4, bs.current_pages*4, migrate_state); } } /*modified by linyuliang 00176349 end */ static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf( page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Minimum target: %8lu kB\n" "Maximum target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n" "Xen hard limit: ", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(balloon_minimum_target()), PAGES2KB(num_physpages), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); if (bs.hard_limit != ~0UL) len += sprintf(page + len, "%8lu kB\n", PAGES2KB(bs.hard_limit)); else len += sprintf(page + len, " ??? kB\n"); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { /*#if defined(CONFIG_X86) && defined(CONFIG_XEN)*/ #if !defined(CONFIG_XEN) # ifndef XENMEM_get_pod_target # define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; # endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; #elif defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("[uvp] Initialising new balloon driver.\n"); #ifdef CONFIG_XEN bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else /* bs.current_pages = totalram_pages; */ totalram_bias = HYPERVISOR_memory_op( HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target) != -ENOSYS ? XENMEM_maximum_reservation : XENMEM_current_reservation, &pod_target.domid); if ((long)totalram_bias != -ENOSYS) { BUG_ON(totalram_bias < totalram_pages); bs.current_pages = totalram_bias; totalram_bias -= totalram_pages; old_totalram_pages = totalram_pages; } else { totalram_bias = 0; bs.current_pages = totalram_pages; } #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; bs.hard_limit = ~0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #ifdef CONFIG_PROC_FS if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) balloon_append(page); } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void __exit balloon_exit(void) { balloon_sysfs_exit(); /* XXX - release balloon here */ } module_exit(balloon_exit); void balloon_update_driver_allowance(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } #ifdef CONFIG_XEN static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long flags; void *v; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD); if (page == NULL) goto err; v = page_address(page); scrub_pages(v, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, (unsigned long)v, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_unlock(flags); balloon_free_page(page); goto err; } /* totalram_pages = --bs.current_pages; */ totalram_pages = --bs.current_pages - totalram_bias; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i]); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages, int free_vec) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i]); } balloon_unlock(flags); if (free_vec) kfree(pagevec); else totalram_pages = bs.current_pages -= nr_pages; schedule_work(&balloon_worker); } void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { _free_empty_pages_and_pagevec(pagevec, nr_pages, 1); } void free_empty_pages(struct page **pagevec, int nr_pages) { _free_empty_pages_and_pagevec(pagevec, nr_pages, 0); } void balloon_release_driver_page(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page); bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/2.6.27/common.h000066400000000000000000000046021314037446600304260ustar00rootroot00000000000000/****************************************************************************** * balloon/common.h * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_COMMON_H__ #define __XEN_BALLOON_COMMON_H__ #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* We may hit the hard limit in Xen. If we do then we remember it. */ unsigned long hard_limit; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; extern unsigned long num_physpages; extern struct balloon_stats balloon_stats; #define bs balloon_stats int balloon_sysfs_init(void); void balloon_sysfs_exit(void); void balloon_set_new_target(unsigned long target); unsigned long balloon_minimum_target(void); #endif /* __XEN_BALLOON_COMMON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/2.6.27/sysfs.c000066400000000000000000000120241314037446600302750ustar00rootroot00000000000000/****************************************************************************** * balloon/sysfs.c * * Xen balloon driver - sysfs interfaces. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BALLOON_CLASS_NAME "xen_memory" #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ struct sysdev_attribute *attr, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); BALLOON_SHOW(min_kb, "%lu\n", PAGES2KB(balloon_minimum_target())); BALLOON_SHOW(max_kb, "%lu\n", PAGES2KB(num_physpages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); BALLOON_SHOW(hard_limit_kb, (bs.hard_limit!=~0UL) ? "%lu\n" : "???\n", (bs.hard_limit!=~0UL) ? PAGES2KB(bs.hard_limit) : 0); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ strcpy(memstring, buf); target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_min_kb.attr, &attr_max_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_hard_limit_kb.attr, &attr_driver_kb.attr, NULL }; static struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { .name = BALLOON_CLASS_NAME, }; static struct sys_device balloon_sysdev; static int __init register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } static __exit void unregister_balloon(struct sys_device *sysdev) { int i; sysfs_remove_group(&sysdev->kobj, &balloon_info_group); for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); } int __init balloon_sysfs_init(void) { return register_balloon(&balloon_sysdev); } void __exit balloon_sysfs_exit(void) { unregister_balloon(&balloon_sysdev); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/2.6.32/000077500000000000000000000000001314037446600267575ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/2.6.32/balloon.c000066400000000000000000000562561314037446600305670ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /*added by linyuliang 00176349 begin */ #include /*added by linyuliang 00176349 end */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); #ifndef MODULE #include static struct pagevec free_pagevec; #endif struct balloon_stats balloon_stats; unsigned long old_totalram_pages; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; #ifdef CONFIG_HIGHMEM #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else #define inc_totalhigh_pages() ((void)0) #define dec_totalhigh_pages() ((void)0) #endif #ifndef CONFIG_XEN /* * In HVM guests accounting here uses the Xen visible values, but the kernel * determined totalram_pages value shouldn't get altered. Since totalram_pages * includes neither the kernel static image nor any memory allocated prior to * or from the bootmem allocator, we have to synchronize the two values. */ static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process); static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page, int account) { unsigned long pfn; /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; if (account) dec_totalhigh_pages(); } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } pfn = page_to_pfn(page); if (account) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); page_zone(page)->present_pages--; } else { BUG_ON(!PageReserved(page)); WARN_ON_ONCE(phys_to_machine_mapping_valid(pfn)); } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(int *was_empty) { struct page *page; struct zone *zone; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); BUG_ON(!PageReserved(page)); if (PageHighMem(page)) { bs.balloon_high--; inc_totalhigh_pages(); } else bs.balloon_low--; zone = page_zone(page); *was_empty |= !populated_zone(zone); zone->present_pages++; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static inline void balloon_free_page(struct page *page) { #ifndef MODULE if (put_page_testzero(page) && !pagevec_add(&free_pagevec, page)) { __pagevec_free(&free_pagevec); pagevec_reinit(&free_pagevec); } #else /* pagevec interface is not being exported. */ __free_page(page); #endif } static inline void balloon_free_and_unlock(unsigned long flags) { #ifndef MODULE if (pagevec_count(&free_pagevec)) { __pagevec_free(&free_pagevec); pagevec_reinit(&free_pagevec); } #endif balloon_unlock(flags); } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = bs.target_pages; if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } unsigned long balloon_minimum_target(void) { #ifndef CONFIG_XEN #define max_pfn num_physpages #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN #undef max_pfn #endif } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; int need_zonelists_rebuild = 0; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page);; page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(&need_zonelists_rebuild); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); init_page_count(page); balloon_free_page(page); } bs.current_pages += rc; /* totalram_pages = bs.current_pages; */ if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages + rc; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages = totalram_pages; out: balloon_free_and_unlock(flags); #ifndef MODULE setup_per_zone_wmarks(); if (rc > 0) kswapd_run(0); if (need_zonelists_rebuild) build_all_zonelists(); else vm_total_pages = nr_free_pagecache_pages(); #endif return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; /*added by linyuliang 00176349 begin */ IPRINTK("nr_pages = %lu\n",nr_pages); IPRINTK("need_sleep= %d\n",need_sleep); /*added by linyuliang 00176349 end */ break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); balloon_append(pfn_to_page(pfn), 1); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; /* totalram_pages = bs.current_pages; */ if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages - nr_pages; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages = totalram_pages; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ /*modified by linyuliang 00176349 begin */ /*When adjustment be stopped one time,adjustment over! then write current memory to xenstore*/ static int flag; static void balloon_process(struct work_struct *unused) { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); if(totalram_pages > (bs.current_pages - totalram_bias)) { IPRINTK("totalram_pages=%lu, current_pages=%lu, totalram_bias=%lu\n", totalram_pages*4, bs.current_pages*4, totalram_bias*4); bs.current_pages = totalram_pages + totalram_bias; IPRINTK("totalram_pages=%lu, current_pages=%lu\n", totalram_pages*4, bs.current_pages*4); } if(current_target() == bs.current_pages) { mutex_unlock(&balloon_mutex); return; } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); // if (!need_sleep) // { if (current_target() != bs.current_pages || (flag==1)) { sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); // } /* Schedule more work if there is some still to be done. */ // if (current_target() != bs.current_pages) // mod_timer(&balloon_timer, jiffies + HZ); mutex_unlock(&balloon_mutex); } extern unsigned int migrate_state; /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, balloon_minimum_target()); flag = bs.target_pages== target ? 0:1; if(!migrate_state) { schedule_work(&balloon_worker); } else { IPRINTK("totalram_pages=%lu k, current_pages=%lu k migrate_state:%d\n", totalram_pages*4, bs.current_pages*4, migrate_state); } } /*modified by linyuliang 00176349 end */ static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf( page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Minimum target: %8lu kB\n" "Maximum target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(balloon_minimum_target()), PAGES2KB(num_physpages), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { /*#if defined(CONFIG_X86) && defined(CONFIG_XEN)*/ #if !defined(CONFIG_XEN) # ifndef XENMEM_get_pod_target # define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; # endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; #elif defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("[uvp] Initialising new balloon driver.\n"); #ifdef CONFIG_XEN pagevec_init(&free_pagevec, true); bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else /* bs.current_pages = totalram_pages; */ totalram_bias = HYPERVISOR_memory_op( HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target) != -ENOSYS ? XENMEM_maximum_reservation : XENMEM_current_reservation, &pod_target.domid); if ((long)totalram_bias != -ENOSYS) { BUG_ON(totalram_bias < totalram_pages); bs.current_pages = totalram_bias; totalram_bias -= totalram_pages; old_totalram_pages = totalram_pages; } else { totalram_bias = 0; bs.current_pages = totalram_pages; } #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #ifdef CONFIG_PROC_FS if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(page, 0); } } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void __exit balloon_exit(void) { balloon_sysfs_exit(); /* XXX - release balloon here */ } module_exit(balloon_exit); void balloon_update_driver_allowance(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) #ifdef CONFIG_XEN static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long pfn, mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); pfn = __pa(addr) >> PAGE_SHIFT; set_phys_to_machine(pfn, INVALID_P2M_ENTRY); SetPageReserved(pfn_to_page(pfn)); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long flags; void *v; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { balloon_lock(flags); page = balloon_first_page(); if (page && !PageHighMem(page)) { UNLIST_PAGE(page); bs.balloon_low--; balloon_unlock(flags); pagevec[i] = page; continue; } balloon_unlock(flags); page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD); if (page == NULL) goto err; v = page_address(page); scrub_pages(v, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, (unsigned long)v, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_free_page(page); balloon_free_and_unlock(flags); goto err; } /* totalram_pages = --bs.current_pages; */ totalram_pages = --bs.current_pages - totalram_bias; if (PageHighMem(page)) dec_totalhigh_pages(); page_zone(page)->present_pages--; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i], 0); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); #endif /* CONFIG_XEN_BACKEND */ static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages, int free_vec) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i], !free_vec); } if (!free_vec) totalram_pages = bs.current_pages -= nr_pages; balloon_unlock(flags); if (free_vec) kfree(pagevec); schedule_work(&balloon_worker); } #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { _free_empty_pages_and_pagevec(pagevec, nr_pages, 1); } EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); #endif /* CONFIG_XEN_BACKEND */ void free_empty_pages(struct page **pagevec, int nr_pages) { _free_empty_pages_and_pagevec(pagevec, nr_pages, 0); } void balloon_release_driver_page(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page, 1); totalram_pages = --bs.current_pages; bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/2.6.32/common.h000066400000000000000000000043741314037446600304300ustar00rootroot00000000000000/****************************************************************************** * balloon/common.h * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_COMMON_H__ #define __XEN_BALLOON_COMMON_H__ #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; extern struct balloon_stats balloon_stats; #define bs balloon_stats int balloon_sysfs_init(void); void balloon_sysfs_exit(void); void balloon_set_new_target(unsigned long target); unsigned long balloon_minimum_target(void); #endif /* __XEN_BALLOON_COMMON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/2.6.32/sysfs.c000066400000000000000000000127521314037446600303010ustar00rootroot00000000000000/****************************************************************************** * balloon/sysfs.c * * Xen balloon driver - sysfs interfaces. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BALLOON_CLASS_NAME "xen_memory" #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ struct sysdev_attribute *attr, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); BALLOON_SHOW(min_kb, "%lu\n", PAGES2KB(balloon_minimum_target())); BALLOON_SHOW(max_kb, "%lu\n", PAGES2KB(num_physpages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = simple_strtoull(buf, &endchar, 0) << 10; balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)balloon_stats.target_pages << PAGE_SHIFT); } static ssize_t store_target(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = memparse(buf, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR, show_target, store_target); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, &attr_target, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_min_kb.attr, &attr_max_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_driver_kb.attr, NULL }; static struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { .name = BALLOON_CLASS_NAME, }; static struct sys_device balloon_sysdev; static int __init register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } static __exit void unregister_balloon(struct sys_device *sysdev) { int i; sysfs_remove_group(&sysdev->kobj, &balloon_info_group); for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); } int __init balloon_sysfs_init(void) { return register_balloon(&balloon_sysdev); } void __exit balloon_sysfs_exit(void) { unregister_balloon(&balloon_sysdev); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/3.0.101/000077500000000000000000000000001314037446600270275ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/3.0.101/balloon.c000066400000000000000000000527511314037446600306330ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); #ifndef MODULE #include static struct pagevec free_pagevec; #endif struct balloon_stats balloon_stats; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; #ifdef CONFIG_HIGHMEM #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else #define inc_totalhigh_pages() ((void)0) #define dec_totalhigh_pages() ((void)0) #endif #ifndef CONFIG_XEN /* * In HVM guests accounting here uses the Xen visible values, but the kernel * determined totalram_pages value shouldn't get altered. Since totalram_pages * includes neither the kernel static image nor any memory allocated prior to * or from the bootmem allocator, we have to synchronize the two values. */ static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process); static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) pr_info("xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) pr_warning("xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page, int account) { unsigned long pfn; /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; if (account) dec_totalhigh_pages(); } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } pfn = page_to_pfn(page); if (account) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); page_zone(page)->present_pages--; } else { BUG_ON(!PageReserved(page)); WARN_ON_ONCE(phys_to_machine_mapping_valid(pfn)); } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(int *was_empty) { struct page *page; struct zone *zone; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); BUG_ON(!PageReserved(page)); if (PageHighMem(page)) { bs.balloon_high--; inc_totalhigh_pages(); } else bs.balloon_low--; zone = page_zone(page); *was_empty |= !populated_zone(zone); zone->present_pages++; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static inline void balloon_free_page(struct page *page) { #ifndef MODULE if (put_page_testzero(page) && !pagevec_add(&free_pagevec, page)) { __pagevec_free(&free_pagevec); pagevec_reinit(&free_pagevec); } #else /* pagevec interface is not being exported. */ __free_page(page); #endif } static inline void balloon_free_and_unlock(unsigned long flags) { #ifndef MODULE if (pagevec_count(&free_pagevec)) { __pagevec_free(&free_pagevec); pagevec_reinit(&free_pagevec); } #endif balloon_unlock(flags); } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static DEFINE_TIMER(balloon_timer, balloon_alarm, 0, 0); static unsigned long current_target(void) { unsigned long target = bs.target_pages; if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } unsigned long balloon_minimum_target(void) { #ifndef CONFIG_XEN #define max_pfn num_physpages #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN #undef max_pfn #endif } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; int need_zonelists_rebuild = 0; struct xen_memory_reservation reservation = { .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page);; page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(&need_zonelists_rebuild); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); init_page_count(page); balloon_free_page(page); } bs.current_pages += rc; totalram_pages = bs.current_pages - totalram_bias; out: balloon_free_and_unlock(flags); #ifndef MODULE setup_per_zone_wmarks(); if (rc > 0) kswapd_run(0); if (need_zonelists_rebuild) build_all_zonelists(NULL); else vm_total_pages = nr_free_pagecache_pages(); #endif return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); xen_scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); xen_scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); balloon_append(pfn_to_page(pfn), 1); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; totalram_pages = bs.current_pages - totalram_bias; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ static int flag; static void balloon_process(struct work_struct *unused) { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); if(totalram_pages > (bs.current_pages - totalram_bias)) { IPRINTK("totalram_pages=%lu, current_pages=%lu, totalram_bias=%lu\n", totalram_pages*4, bs.current_pages*4, totalram_bias*4); bs.current_pages = totalram_pages + totalram_bias; IPRINTK("totalram_pages=%lu, current_pages=%lu\n", totalram_pages*4, bs.current_pages*4); } if(current_target() == bs.current_pages) { mutex_unlock(&balloon_mutex); return; } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); /* Schedule more work if there is some still to be done. */ if (current_target() != bs.current_pages || (flag==1)) { sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); mutex_unlock(&balloon_mutex); } extern unsigned int migrate_state; /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, balloon_minimum_target()); flag = bs.target_pages== target ? 0:1; if(!migrate_state) { schedule_work(&balloon_worker); } else { IPRINTK("totalram_pages=%lu k, current_pages=%lu k migrate_state:%d\n", totalram_pages*4, bs.current_pages*4, migrate_state); } } static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) pr_err("Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf( page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Minimum target: %8lu kB\n" "Maximum target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(balloon_minimum_target()), PAGES2KB(num_physpages), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { #if !defined(CONFIG_XEN) # ifndef XENMEM_get_pod_target # define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; # endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; int rc; #elif defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("Initialising balloon driver.\n"); #ifdef CONFIG_XEN pagevec_init(&free_pagevec, true); bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else rc = HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target); /* * Xen prior to 3.4.0 masks the memory_op command to 4 bits, thus * converting XENMEM_get_pod_target to XENMEM_decrease_reservation. * Fortunately this results in a request with all input fields zero, * but (due to the way bit 4 and upwards get interpreted) a starting * extent of 1. When start_extent > nr_extents (>= in newer Xen), we * simply get start_extent returned. */ totalram_bias = HYPERVISOR_memory_op(rc != -ENOSYS && rc != 1 ? XENMEM_maximum_reservation : XENMEM_current_reservation, &pod_target.domid); if ((long)totalram_bias != -ENOSYS) { BUG_ON(totalram_bias < totalram_pages); bs.current_pages = totalram_bias; totalram_bias -= totalram_pages; } else { totalram_bias = 0; bs.current_pages = totalram_pages; } #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #ifdef CONFIG_PROC_FS if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(page, 0); } } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void __exit balloon_exit(void) { balloon_sysfs_exit(); /* XXX - release balloon here */ } module_exit(balloon_exit); void balloon_update_driver_allowance(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) #ifdef CONFIG_XEN static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long pfn, mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); pfn = __pa(addr) >> PAGE_SHIFT; set_phys_to_machine(pfn, INVALID_P2M_ENTRY); SetPageReserved(pfn_to_page(pfn)); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long flags; void *v; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { balloon_lock(flags); page = balloon_first_page(); if (page && !PageHighMem(page)) { UNLIST_PAGE(page); bs.balloon_low--; balloon_unlock(flags); pagevec[i] = page; continue; } balloon_unlock(flags); page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_NOTRACK|__GFP_COLD); if (page == NULL) goto err; v = page_address(page); xen_scrub_pages(v, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, (unsigned long)v, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_free_page(page); balloon_free_and_unlock(flags); goto err; } totalram_pages = --bs.current_pages - totalram_bias; if (PageHighMem(page)) dec_totalhigh_pages(); page_zone(page)->present_pages--; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i], 0); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); #endif /* CONFIG_XEN_BACKEND */ #ifdef CONFIG_XEN static void _free_empty_pages(struct page **pagevec, int nr_pages, bool account) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i], account); } if (account) { bs.current_pages -= nr_pages; totalram_pages = bs.current_pages - totalram_bias; } balloon_unlock(flags); schedule_work(&balloon_worker); } void free_empty_pages(struct page **pagevec, int nr_pages) { _free_empty_pages(pagevec, nr_pages, true); } #endif #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { if (pagevec) { _free_empty_pages(pagevec, nr_pages, false); kfree(pagevec); } } EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); #endif void balloon_release_driver_page(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page, 1); totalram_pages = --bs.current_pages - totalram_bias; bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/3.0.101/common.h000066400000000000000000000043741314037446600305000ustar00rootroot00000000000000/****************************************************************************** * balloon/common.h * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_COMMON_H__ #define __XEN_BALLOON_COMMON_H__ #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; extern struct balloon_stats balloon_stats; #define bs balloon_stats int balloon_sysfs_init(void); void balloon_sysfs_exit(void); void balloon_set_new_target(unsigned long target); unsigned long balloon_minimum_target(void); #endif /* __XEN_BALLOON_COMMON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/3.0.101/sysfs.c000066400000000000000000000127601314037446600303500ustar00rootroot00000000000000/****************************************************************************** * balloon/sysfs.c * * Xen balloon driver - sysfs interfaces. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BALLOON_CLASS_NAME "xen_memory" #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ struct sysdev_attribute *attr, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); BALLOON_SHOW(min_kb, "%lu\n", PAGES2KB(balloon_minimum_target())); BALLOON_SHOW(max_kb, "%lu\n", PAGES2KB(num_physpages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = simple_strtoull(buf, &endchar, 0) << 10; balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)balloon_stats.target_pages << PAGE_SHIFT); } static ssize_t store_target(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = memparse(buf, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR, show_target, store_target); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, &attr_target, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_min_kb.attr, &attr_max_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_driver_kb.attr, NULL }; static const struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { .name = BALLOON_CLASS_NAME, }; static struct sys_device balloon_sysdev; static int __init register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } static __exit void unregister_balloon(struct sys_device *sysdev) { int i; sysfs_remove_group(&sysdev->kobj, &balloon_info_group); for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); } int __init balloon_sysfs_init(void) { return register_balloon(&balloon_sysdev); } void __exit balloon_sysfs_exit(void) { unregister_balloon(&balloon_sysdev); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/3.0.13/000077500000000000000000000000001314037446600267515ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/3.0.13/balloon.c000066400000000000000000000564011314037446600305510ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include /*added by linyuliang 00176349 begin */ #include /*added by linyuliang 00176349 end */ #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); #ifndef MODULE #include static struct pagevec free_pagevec; #endif struct balloon_stats balloon_stats; unsigned long old_totalram_pages; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; #ifdef CONFIG_HIGHMEM #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else #define inc_totalhigh_pages() ((void)0) #define dec_totalhigh_pages() ((void)0) #endif #ifndef CONFIG_XEN /* * In HVM guests accounting here uses the Xen visible values, but the kernel * determined totalram_pages value shouldn't get altered. Since totalram_pages * includes neither the kernel static image nor any memory allocated prior to * or from the bootmem allocator, we have to synchronize the two values. */ static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process); static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page, int account) { unsigned long pfn; /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; if (account) dec_totalhigh_pages(); } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } pfn = page_to_pfn(page); if (account) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); page_zone(page)->present_pages--; } else { BUG_ON(!PageReserved(page)); WARN_ON_ONCE(phys_to_machine_mapping_valid(pfn)); } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(int *was_empty) { struct page *page; struct zone *zone; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); BUG_ON(!PageReserved(page)); if (PageHighMem(page)) { bs.balloon_high--; inc_totalhigh_pages(); } else bs.balloon_low--; zone = page_zone(page); *was_empty |= !populated_zone(zone); zone->present_pages++; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static inline void balloon_free_page(struct page *page) { #ifndef MODULE if (put_page_testzero(page) && !pagevec_add(&free_pagevec, page)) { __pagevec_free(&free_pagevec); pagevec_reinit(&free_pagevec); } #else /* pagevec interface is not being exported. */ __free_page(page); #endif } static inline void balloon_free_and_unlock(unsigned long flags) { #ifndef MODULE if (pagevec_count(&free_pagevec)) { __pagevec_free(&free_pagevec); pagevec_reinit(&free_pagevec); } #endif balloon_unlock(flags); } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = bs.target_pages; if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } unsigned long balloon_minimum_target(void) { #ifndef CONFIG_XEN #define max_pfn num_physpages #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN #undef max_pfn #endif } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; int need_zonelists_rebuild = 0; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page);; page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(&need_zonelists_rebuild); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); init_page_count(page); balloon_free_page(page); } bs.current_pages += rc; /* totalram_pages = bs.current_pages; */ if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages + rc; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages = totalram_pages; out: balloon_free_and_unlock(flags); #ifndef MODULE setup_per_zone_wmarks(); if (rc > 0) kswapd_run(0); if (need_zonelists_rebuild) build_all_zonelists(); else vm_total_pages = nr_free_pagecache_pages(); #endif return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; /*added by linyuliang 00176349 begin */ IPRINTK("nr_pages = %lu\n",nr_pages); IPRINTK("need_sleep= %d\n",need_sleep); /*added by linyuliang 00176349 end */ break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); balloon_append(pfn_to_page(pfn), 1); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; /* totalram_pages = bs.current_pages; */ if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages - nr_pages; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages = totalram_pages; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ /*modified by linyuliang 00176349 begin */ /*When adjustment be stopped one time,adjustment over! then write current memory to xenstore*/ static int flag; static void balloon_process(struct work_struct *unused) { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); if(totalram_pages > (bs.current_pages - totalram_bias)) { IPRINTK("totalram_pages=%lu, current_pages=%lu, totalram_bias=%lu\n", totalram_pages*4, bs.current_pages*4, totalram_bias*4); bs.current_pages = totalram_pages + totalram_bias; IPRINTK("totalram_pages=%lu, current_pages=%lu\n", totalram_pages*4, bs.current_pages*4); } if(current_target() == bs.current_pages) { mutex_unlock(&balloon_mutex); return; } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); // if (!need_sleep) // { if (current_target() != bs.current_pages || (flag==1)) { sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); // } /* Schedule more work if there is some still to be done. */ // if (current_target() != bs.current_pages) // mod_timer(&balloon_timer, jiffies + HZ); mutex_unlock(&balloon_mutex); } extern unsigned int migrate_state; /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, balloon_minimum_target()); flag = bs.target_pages== target ? 0:1; if(!migrate_state) { schedule_work(&balloon_worker); } else { IPRINTK("totalram_pages=%lu k, current_pages=%lu k migrate_state:%d\n", totalram_pages*4, bs.current_pages*4, migrate_state); } } /*modified by linyuliang 00176349 end */ static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf( page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Minimum target: %8lu kB\n" "Maximum target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(balloon_minimum_target()), PAGES2KB(num_physpages), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { /*#if defined(CONFIG_X86) && defined(CONFIG_XEN)*/ #if !defined(CONFIG_XEN) # ifndef XENMEM_get_pod_target # define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; # endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; #elif defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("[uvp] Initialising new balloon driver.\n"); #ifdef CONFIG_XEN pagevec_init(&free_pagevec, true); bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else /* bs.current_pages = totalram_pages; */ totalram_bias = HYPERVISOR_memory_op( HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target) != -ENOSYS ? XENMEM_maximum_reservation : XENMEM_current_reservation, &pod_target.domid); if ((long)totalram_bias != -ENOSYS) { BUG_ON(totalram_bias < totalram_pages); bs.current_pages = totalram_bias; totalram_bias -= totalram_pages; old_totalram_pages = totalram_pages; } else { totalram_bias = 0; bs.current_pages = totalram_pages; } #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #ifdef CONFIG_PROC_FS if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(page, 0); } } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void __exit balloon_exit(void) { balloon_sysfs_exit(); /* XXX - release balloon here */ } module_exit(balloon_exit); void balloon_update_driver_allowance(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) #ifdef CONFIG_XEN static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long pfn, mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); pfn = __pa(addr) >> PAGE_SHIFT; set_phys_to_machine(pfn, INVALID_P2M_ENTRY); SetPageReserved(pfn_to_page(pfn)); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long flags; void *v; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { balloon_lock(flags); page = balloon_first_page(); if (page && !PageHighMem(page)) { UNLIST_PAGE(page); bs.balloon_low--; balloon_unlock(flags); pagevec[i] = page; continue; } balloon_unlock(flags); page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD); if (page == NULL) goto err; v = page_address(page); scrub_pages(v, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, (unsigned long)v, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_free_page(page); balloon_free_and_unlock(flags); goto err; } /* totalram_pages = --bs.current_pages; */ totalram_pages = --bs.current_pages - totalram_bias; if (PageHighMem(page)) dec_totalhigh_pages(); page_zone(page)->present_pages--; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i], 0); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); #endif /* CONFIG_XEN_BACKEND */ static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages, int free_vec) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i], !free_vec); } if (!free_vec) totalram_pages = bs.current_pages -= nr_pages; balloon_unlock(flags); if (free_vec) kfree(pagevec); schedule_work(&balloon_worker); } #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { _free_empty_pages_and_pagevec(pagevec, nr_pages, 1); } EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); #endif /* CONFIG_XEN_BACKEND */ void free_empty_pages(struct page **pagevec, int nr_pages) { _free_empty_pages_and_pagevec(pagevec, nr_pages, 0); } void balloon_release_driver_page(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page, 1); totalram_pages = --bs.current_pages; bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/3.0.13/common.h000066400000000000000000000043741314037446600304220ustar00rootroot00000000000000/****************************************************************************** * balloon/common.h * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_COMMON_H__ #define __XEN_BALLOON_COMMON_H__ #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; extern struct balloon_stats balloon_stats; #define bs balloon_stats int balloon_sysfs_init(void); void balloon_sysfs_exit(void); void balloon_set_new_target(unsigned long target); unsigned long balloon_minimum_target(void); #endif /* __XEN_BALLOON_COMMON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/3.0.13/sysfs.c000066400000000000000000000127521314037446600302730ustar00rootroot00000000000000/****************************************************************************** * balloon/sysfs.c * * Xen balloon driver - sysfs interfaces. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BALLOON_CLASS_NAME "xen_memory" #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ struct sysdev_attribute *attr, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); BALLOON_SHOW(min_kb, "%lu\n", PAGES2KB(balloon_minimum_target())); BALLOON_SHOW(max_kb, "%lu\n", PAGES2KB(num_physpages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = simple_strtoull(buf, &endchar, 0) << 10; balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)balloon_stats.target_pages << PAGE_SHIFT); } static ssize_t store_target(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = memparse(buf, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR, show_target, store_target); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, &attr_target, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_min_kb.attr, &attr_max_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_driver_kb.attr, NULL }; static struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { .name = BALLOON_CLASS_NAME, }; static struct sys_device balloon_sysdev; static int __init register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } static __exit void unregister_balloon(struct sys_device *sysdev) { int i; sysfs_remove_group(&sysdev->kobj, &balloon_info_group); for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); } int __init balloon_sysfs_init(void) { return register_balloon(&balloon_sysdev); } void __exit balloon_sysfs_exit(void) { unregister_balloon(&balloon_sysdev); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/3.0.76/000077500000000000000000000000001314037446600267625ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/3.0.76/balloon.c000066400000000000000000000550071314037446600305630ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifdef CONFIG_PROC_FS static struct proc_dir_entry *balloon_pde; #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); #ifndef MODULE #include static struct pagevec free_pagevec; #endif struct balloon_stats balloon_stats; unsigned long old_totalram_pages; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; #ifdef CONFIG_HIGHMEM #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else #define inc_totalhigh_pages() ((void)0) #define dec_totalhigh_pages() ((void)0) #endif #ifndef CONFIG_XEN /* * In HVM guests accounting here uses the Xen visible values, but the kernel * determined totalram_pages value shouldn't get altered. Since totalram_pages * includes neither the kernel static image nor any memory allocated prior to * or from the bootmem allocator, we have to synchronize the two values. */ static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process); static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) pr_info("xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) pr_warning("xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page, int account) { unsigned long pfn; /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; if (account) dec_totalhigh_pages(); } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } pfn = page_to_pfn(page); if (account) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); page_zone(page)->present_pages--; } else { BUG_ON(!PageReserved(page)); WARN_ON_ONCE(phys_to_machine_mapping_valid(pfn)); } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(int *was_empty) { struct page *page; struct zone *zone; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); BUG_ON(!PageReserved(page)); if (PageHighMem(page)) { bs.balloon_high--; inc_totalhigh_pages(); } else bs.balloon_low--; zone = page_zone(page); *was_empty |= !populated_zone(zone); zone->present_pages++; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static inline void balloon_free_page(struct page *page) { #ifndef MODULE if (put_page_testzero(page) && !pagevec_add(&free_pagevec, page)) { __pagevec_free(&free_pagevec); pagevec_reinit(&free_pagevec); } #else /* pagevec interface is not being exported. */ __free_page(page); #endif } static inline void balloon_free_and_unlock(unsigned long flags) { #ifndef MODULE if (pagevec_count(&free_pagevec)) { __pagevec_free(&free_pagevec); pagevec_reinit(&free_pagevec); } #endif balloon_unlock(flags); } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static DEFINE_TIMER(balloon_timer, balloon_alarm, 0, 0); static unsigned long current_target(void) { unsigned long target = bs.target_pages; if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } unsigned long balloon_minimum_target(void) { #ifndef CONFIG_XEN #define max_pfn num_physpages #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN #undef max_pfn #endif } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; int need_zonelists_rebuild = 0; struct xen_memory_reservation reservation = { .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page);; page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(&need_zonelists_rebuild); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); init_page_count(page); balloon_free_page(page); } bs.current_pages += rc; if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages + rc; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages = totalram_pages; out: balloon_free_and_unlock(flags); #ifndef MODULE setup_per_zone_wmarks(); if (rc > 0) kswapd_run(0); if (need_zonelists_rebuild) build_all_zonelists(NULL); else vm_total_pages = nr_free_pagecache_pages(); #endif return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; IPRINTK("nr_pages = %lu\n",nr_pages); IPRINTK("need_sleep= %d\n",need_sleep); break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); xen_scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); xen_scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); balloon_append(pfn_to_page(pfn), 1); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; if(old_totalram_pages < totalram_pages) { IPRINTK("old_totalram=%lu, totalram_pages=%lu\n", old_totalram_pages*4, totalram_pages*4); totalram_pages = totalram_pages - nr_pages; bs.current_pages = totalram_pages + totalram_bias; IPRINTK("when ballooning, the mem online! totalram=%lu, current=%lu\n", totalram_pages*4, bs.current_pages*4); } else { totalram_pages = bs.current_pages - totalram_bias; } old_totalram_pages = totalram_pages; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ static int flag; static void balloon_process(struct work_struct *unused) { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); if(totalram_pages > (bs.current_pages - totalram_bias)) { IPRINTK("totalram_pages=%lu, current_pages=%lu, totalram_bias=%lu\n", totalram_pages*4, bs.current_pages*4, totalram_bias*4); bs.current_pages = totalram_pages + totalram_bias; IPRINTK("totalram_pages=%lu, current_pages=%lu\n", totalram_pages*4, bs.current_pages*4); } if(current_target() == bs.current_pages) { mutex_unlock(&balloon_mutex); return; } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); /* Schedule more work if there is some still to be done. */ if (current_target() != bs.current_pages || (flag==1)) { sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); mutex_unlock(&balloon_mutex); } extern unsigned int migrate_state; /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, balloon_minimum_target()); flag = bs.target_pages== target ? 0:1; if(!migrate_state) { schedule_work(&balloon_worker); } else { IPRINTK("totalram_pages=%lu k, current_pages=%lu k migrate_state:%d\n", totalram_pages*4, bs.current_pages*4, migrate_state); } } static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) pr_err("Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static int balloon_write(struct file *file, const char __user *buffer, unsigned long count, void *data) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf( page, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Minimum target: %8lu kB\n" "Maximum target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(balloon_minimum_target()), PAGES2KB(num_physpages), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); *eof = 1; return len; } #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { #if !defined(CONFIG_XEN) # ifndef XENMEM_get_pod_target # define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; # endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; int rc; #elif defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("Initialising balloon driver.\n"); #ifdef CONFIG_XEN pagevec_init(&free_pagevec, true); bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else rc = HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target); /* * Xen prior to 3.4.0 masks the memory_op command to 4 bits, thus * converting XENMEM_get_pod_target to XENMEM_decrease_reservation. * Fortunately this results in a request with all input fields zero, * but (due to the way bit 4 and upwards get interpreted) a starting * extent of 1. When start_extent > nr_extents (>= in newer Xen), we * simply get start_extent returned. */ totalram_bias = HYPERVISOR_memory_op(rc != -ENOSYS && rc != 1 ? XENMEM_maximum_reservation : XENMEM_current_reservation, &pod_target.domid); if ((long)totalram_bias != -ENOSYS) { BUG_ON(totalram_bias < totalram_pages); bs.current_pages = totalram_bias; totalram_bias -= totalram_pages; old_totalram_pages = totalram_pages; } else { totalram_bias = 0; bs.current_pages = totalram_pages; } #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #ifdef CONFIG_PROC_FS if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } balloon_pde->read_proc = balloon_read; balloon_pde->write_proc = balloon_write; #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(page, 0); } } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void __exit balloon_exit(void) { balloon_sysfs_exit(); /* XXX - release balloon here */ } module_exit(balloon_exit); void balloon_update_driver_allowance(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) #ifdef CONFIG_XEN static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long pfn, mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); pfn = __pa(addr) >> PAGE_SHIFT; set_phys_to_machine(pfn, INVALID_P2M_ENTRY); SetPageReserved(pfn_to_page(pfn)); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long flags; void *v; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { balloon_lock(flags); page = balloon_first_page(); if (page && !PageHighMem(page)) { UNLIST_PAGE(page); bs.balloon_low--; balloon_unlock(flags); pagevec[i] = page; continue; } balloon_unlock(flags); page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD); if (page == NULL) goto err; v = page_address(page); xen_scrub_pages(v, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, (unsigned long)v, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_free_page(page); balloon_free_and_unlock(flags); goto err; } totalram_pages = --bs.current_pages - totalram_bias; if (PageHighMem(page)) dec_totalhigh_pages(); page_zone(page)->present_pages--; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i], 0); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); #endif /* CONFIG_XEN_BACKEND */ #ifdef CONFIG_XEN static void _free_empty_pages(struct page **pagevec, int nr_pages, bool account) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i], account); } if (account) { bs.current_pages -= nr_pages; totalram_pages = bs.current_pages - totalram_bias; } balloon_unlock(flags); schedule_work(&balloon_worker); } void free_empty_pages(struct page **pagevec, int nr_pages) { _free_empty_pages(pagevec, nr_pages, true); } #endif #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { if (pagevec) { _free_empty_pages(pagevec, nr_pages, false); kfree(pagevec); } } EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); #endif void balloon_release_driver_page(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page, 1); totalram_pages = --bs.current_pages - totalram_bias; bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/3.0.76/common.h000066400000000000000000000043741314037446600304330ustar00rootroot00000000000000/****************************************************************************** * balloon/common.h * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_COMMON_H__ #define __XEN_BALLOON_COMMON_H__ #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; extern struct balloon_stats balloon_stats; #define bs balloon_stats int balloon_sysfs_init(void); void balloon_sysfs_exit(void); void balloon_set_new_target(unsigned long target); unsigned long balloon_minimum_target(void); #endif /* __XEN_BALLOON_COMMON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/3.0.76/sysfs.c000066400000000000000000000127601314037446600303030ustar00rootroot00000000000000/****************************************************************************** * balloon/sysfs.c * * Xen balloon driver - sysfs interfaces. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BALLOON_CLASS_NAME "xen_memory" #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ struct sysdev_attribute *attr, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); BALLOON_SHOW(min_kb, "%lu\n", PAGES2KB(balloon_minimum_target())); BALLOON_SHOW(max_kb, "%lu\n", PAGES2KB(num_physpages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = simple_strtoull(buf, &endchar, 0) << 10; balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)balloon_stats.target_pages << PAGE_SHIFT); } static ssize_t store_target(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = memparse(buf, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR, show_target, store_target); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, &attr_target, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_min_kb.attr, &attr_max_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_driver_kb.attr, NULL }; static const struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { .name = BALLOON_CLASS_NAME, }; static struct sys_device balloon_sysdev; static int __init register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } static __exit void unregister_balloon(struct sys_device *sysdev) { int i; sysfs_remove_group(&sysdev->kobj, &balloon_info_group); for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); } int __init balloon_sysfs_init(void) { return register_balloon(&balloon_sysdev); } void __exit balloon_sysfs_exit(void) { unregister_balloon(&balloon_sysdev); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/Kbuild000066400000000000000000000002551314037446600273660ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-balloon.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-balloon-y := balloon.o sysfs.o xen-balloon-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/Makefile000066400000000000000000000000661314037446600276710ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-balloon/scrub.c000066400000000000000000000010461314037446600275120ustar00rootroot00000000000000#include #include #include void scrub_pages(void *v, unsigned int count) { if (likely(cpu_has_xmm2)) { unsigned long n = count * (PAGE_SIZE / sizeof(long) / 4); for (; n--; v += sizeof(long) * 4) asm("movnti %1,(%0)\n\t" "movnti %1,%c2(%0)\n\t" "movnti %1,2*%c2(%0)\n\t" "movnti %1,3*%c2(%0)\n\t" : : "r" (v), "r" (0L), "i" (sizeof(long)) : "memory"); asm volatile("sfence" : : : "memory"); } else for (; count--; v += PAGE_SIZE) clear_page(v); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/000077500000000000000000000000001314037446600271765ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.27/000077500000000000000000000000001314037446600277325ustar00rootroot00000000000000platform-pci.c000066400000000000000000000240221314037446600324140ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.27/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __ia64__ #include #endif #include "platform-pci.h" #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DRV_NAME "xen-platform-pci" #define DRV_VERSION "0.10" #define DRV_RELDATE "03/03/2005" static int max_hypercall_stub_pages, nr_hypercall_stub_pages; char *hypercall_stubs; EXPORT_SYMBOL(hypercall_stubs); MODULE_AUTHOR("ssmith@xensource.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); struct pci_dev *xen_platform_pdev; static unsigned long shared_info_frame; static uint64_t callback_via; /* driver should write xenstore flag to tell xen which feature supported */ void write_feature_flag() { int rc; char str[32] = {0}; (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); rc = xenbus_write(XBT_NIL, "control/uvp", "feature_flag", str); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); } } /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { int rc; rc = xenbus_write(XBT_NIL, "control/uvp", "monitor-service-flag", "true"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/monitor-service-flag failed \n"); } } /*DTS2014042508949. driver write this flag when the xenpci resume succeed.*/ void write_driver_resume_flag() { int rc = 0; rc = xenbus_write(XBT_NIL, "control/uvp", "driver-resume-flag", "1"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/driver-resume-flag failed \n"); } } static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; extern void *shared_info_area; #ifdef __ia64__ xencomm_initialize(); #endif setup_xen_features(); shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); shared_info_area = ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); return 0; } static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } #ifndef __ia64__ static uint32_t xen_cpuid_base(void) { uint32_t base, eax, ebx, ecx, edx; char signature[13]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { cpuid(base, &eax, &ebx, &ecx, &edx); *(uint32_t*)(signature + 0) = ebx; *(uint32_t*)(signature + 4) = ecx; *(uint32_t*)(signature + 8) = edx; signature[12] = 0; if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) return base; } return 0; } static int init_hypercall_stubs(void) { uint32_t eax, ebx, ecx, edx, pages, msr, i, base; base = xen_cpuid_base(); if (base == 0) { printk(KERN_WARNING "Detected Xen platform device but not Xen VMM?\n"); return -EINVAL; } cpuid(base + 1, &eax, &ebx, &ecx, &edx); printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff); /* * Find largest supported number of hypercall pages. * We'll create as many as possible up to this number. */ cpuid(base + 2, &pages, &msr, &ecx, &edx); /* * Use __vmalloc() because vmalloc_exec() is not an exported symbol. * PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL. * hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE); */ while (pages > 0) { hypercall_stubs = __vmalloc( pages * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM, __pgprot(__PAGE_KERNEL & ~_PAGE_NX)); if (hypercall_stubs != NULL) break; pages--; /* vmalloc failed: try one fewer pages */ } if (hypercall_stubs == NULL) return -ENOMEM; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; max_hypercall_stub_pages = pages; printk(KERN_INFO "Hypercall area is %u pages.\n", pages); return 0; } static void resume_hypercall_stubs(void) { uint32_t base, ecx, edx, pages, msr, i; base = xen_cpuid_base(); BUG_ON(base == 0); cpuid(base + 2, &pages, &msr, &ecx, &edx); if (pages > max_hypercall_stub_pages) pages = max_hypercall_stub_pages; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; } #else /* __ia64__ */ #define init_hypercall_stubs() (0) #define resume_hypercall_stubs() ((void)0) #endif static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; #ifdef __ia64__ for (irq = 0; irq < 16; irq++) { if (isa_irq_to_vector(irq) == pdev->irq) return irq; /* ISA IRQ */ } #else /* !__ia64__ */ irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) pin = pdev->pin; #else pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); #endif /* We don't know the GSI. Specify the PCI INTx line instead. */ return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3)); } static int set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } int xen_irq_init(struct pci_dev *pdev); int xenbus_init(void); int xen_reboot_init(void); int xen_panic_handler_init(void); int gnttab_init(void); void xen_unplug_emulated_devices(void); static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr, iolen; long mmio_addr, mmio_len; xen_unplug_emulated_devices(); if (xen_platform_pdev) return -EBUSY; xen_platform_pdev = pdev; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); callback_via = get_callback_via(pdev); if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { printk(KERN_WARNING DRV_NAME ":no resources found\n"); return -ENOENT; } if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) { printk(KERN_ERR ":MEM I/O resource 0x%lx @ 0x%lx busy\n", mmio_addr, mmio_len); return -EBUSY; } if (request_region(ioaddr, iolen, DRV_NAME) == NULL) { printk(KERN_ERR DRV_NAME ":I/O resource 0x%lx @ 0x%lx busy\n", iolen, ioaddr); release_mem_region(mmio_addr, mmio_len); return -EBUSY; } platform_mmio = mmio_addr; platform_mmiolen = mmio_len; ret = init_hypercall_stubs(); if (ret < 0) goto out; if ((ret = init_xen_info())) goto out; if ((ret = gnttab_init())) goto out; if ((ret = xen_irq_init(pdev))) goto out; if ((ret = set_callback_via(callback_via))) goto out; if ((ret = xenbus_init())) goto out; if ((ret = xen_reboot_init())) goto out; if ((ret = xen_panic_handler_init())) goto out; /* write flag to xenstore when xenbus is available */ write_feature_flag(); out: if (ret) { release_mem_region(mmio_addr, mmio_len); release_region(ioaddr, iolen); } return ret; } #define XEN_PLATFORM_VENDOR_ID 0x5853 #define XEN_PLATFORM_DEVICE_ID 0x0001 static struct pci_device_id platform_pci_tbl[] __devinitdata = { {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* Continue to recognise the old ID for now */ {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { name: DRV_NAME, probe: platform_pci_init, id_table: platform_pci_tbl, }; static int pci_device_registered; void platform_pci_resume(void) { struct xen_add_to_physmap xatp; resume_hypercall_stubs(); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); if (set_callback_via(callback_via)) printk("platform_pci_resume failure!\n"); } static int __init platform_pci_module_init(void) { int rc; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) rc = pci_module_init(&platform_driver); #else rc = pci_register_driver(&platform_driver); #endif if (rc) { printk(KERN_INFO DRV_NAME ": No platform pci device model found\n"); return rc; } pci_device_registered = 1; return 0; } module_init(platform_pci_module_init); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.27/reboot.c000066400000000000000000000200401314037446600313640ustar00rootroot00000000000000#define __KERNEL_SYSCALLS__ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #undef handle_sysrq #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } static int xen_reboot_vm(void) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *reboot_argv[] = { "/sbin/reboot", NULL }; if (call_usermodehelper("/sbin/reboot", reboot_argv, envp, 0) < 0) printk(KERN_ERR "Xen reboot vm Failed.\n"); return 0; } #ifdef CONFIG_PM_SLEEP static int setup_suspend_evtchn(void); /* Was last suspend request cancelled? */ static int suspend_cancelled; static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed(current, cpumask_of_cpu(0)); if (err) { printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { printk(KERN_ERR "Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_delayed_work(&shutdown_work, 0); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } #else # define xen_suspend NULL #endif static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_delayed_work(&shutdown_work, 0); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(struct work_struct *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } /*պǨʱsuspendʧᵼǨƹȥsuspendʧܺǨƳʱ*/ //xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) xen_reboot_vm(); #ifdef CONFIG_PM_SLEEP else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; #endif else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else printk("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) { printk(KERN_ERR "Unable to read sysrq code in " "control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key, NULL); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #ifdef CONFIG_PM_SLEEP static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); printk(KERN_INFO "suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } #else #define setup_suspend_evtchn() 0 #endif static int setup_shutdown_watcher(void) { int err; xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { printk(KERN_ERR "Failed to set shutdown watcher\n"); return err; } err = register_xenbus_watch(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { printk(KERN_ERR "Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ xenbus_backend_client.c000066400000000000000000000106761314037446600343420ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.27/****************************************************************************** * Backend-client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code in the backend * driver. * * Copyright (C) 2005-2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include /* Based on Rusty Russell's skeleton driver's map_page */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref) { struct gnttab_map_grant_ref op; struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE); if (!area) return ERR_PTR(-ENOMEM); gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map, gnt_ref, dev->otherend_id); if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { free_vm_area(area); xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); BUG_ON(!IS_ERR(ERR_PTR(op.status))); return ERR_PTR(op.status); } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; return area; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, dev->otherend_id); if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring); /* Based on Rusty Russell's skeleton driver's unmap_page */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map, (grant_handle_t)area->phys_addr); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) free_vm_area(area); else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring); int xenbus_dev_is_online(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); MODULE_LICENSE("Dual BSD/GPL"); xenbus_client.c000066400000000000000000000421231314037446600326630ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.27/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #if defined(CONFIG_XEN) || defined(MODULE) #include #include #include #include #else #include #include #include #include #include #include #include #endif #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate); /** * xenbus_watch_path - register a watch * @dev: xenbus device * @path: path to watch * @watch: watch to register * @callback: callback to register * * Register a @watch on the given path, using the given xenbus_watch structure * for storage, and the given @callback function as the callback. Return 0 on * success, or -errno on error. On success, the given @path will be saved as * @watch->node, and remains the caller's to free. On error, @watch->node will * be NULL, the device will switch to %XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path); #if defined(CONFIG_XEN) || defined(MODULE) int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2); if (!state) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, state, watch, callback); if (err) kfree(state); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path2); #else /** * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path * @dev: xenbus device * @watch: watch to register * @callback: callback to register * @pathfmt: format of path to watch * * Register a watch on the given @path, using the given xenbus_watch * structure for storage, and the given @callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (@path/@path2) will be saved as @watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to %XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...) { int err; va_list ap; char *path; va_start(ap, pathfmt); path = kvasprintf(GFP_KERNEL, pathfmt, ap); va_end(ap); if (!path) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, path, watch, callback); if (err) kfree(path); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); #endif /** * xenbus_switch_state * @dev: xenbus device * @xbt: transaction handle * @state: new state * * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not work inside a Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ int current_state; int err; if (state == dev->state) return 0; err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d", ¤t_state); if (err != 1) return 0; err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state); if (err) { if (state != XenbusStateClosing) /* Avoid looping */ xenbus_dev_fatal(dev, err, "writing new state"); return err; } dev->state = state; return 0; } EXPORT_SYMBOL_GPL(xenbus_switch_state); int xenbus_frontend_closed(struct xenbus_device *dev) { xenbus_switch_state(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed); /** * Return the path to the error node for the given device, or NULL on failure. * If the value returned is non-NULL, then it is the caller's to kfree. */ static char *error_path(struct xenbus_device *dev) { return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); } static void _dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list ap) { int ret; unsigned int len; char *printf_buffer = NULL, *path_buffer = NULL; #define PRINTF_BUFFER_SIZE 4096 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); if (printf_buffer == NULL) goto fail; len = sprintf(printf_buffer, "%i ", -err); ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = error_path(dev); if (path_buffer == NULL) { dev_err(&dev->dev, "xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { dev_err(&dev->dev, "xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } fail: if (printf_buffer) kfree(printf_buffer); if (path_buffer) kfree(path_buffer); } /** * xenbus_dev_error * @dev: xenbus device * @err: error to report * @fmt: error message format * * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error); /** * xenbus_dev_fatal * @dev: xenbus device * @err: error to report * @fmt: error message format * * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); xenbus_switch_state(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal); /** * xenbus_grant_ring * @dev: xenbus device * @ring_mfn: mfn of ring to grant * * Grant access to the given @ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); if (err < 0) xenbus_dev_fatal(dev, err, "granting access to ring page"); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); #if 0 /* !defined(CONFIG_XEN) && !defined(MODULE) */ /** * Bind to an existing interdomain event channel in another domain. Returns 0 * on success and stores the local port in *port. On error, returns -errno, * switches the device to XenbusStateClosing, and saves the error in XenStore. */ int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port) { struct evtchn_bind_interdomain bind_interdomain; int err; bind_interdomain.remote_dom = dev->otherend_id; bind_interdomain.remote_port = remote_port; err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); if (err) xenbus_dev_fatal(dev, err, "binding to event channel %d from domain %d", remote_port, dev->otherend_id); else *port = bind_interdomain.local_port; return err; } EXPORT_SYMBOL_GPL(xenbus_bind_evtchn); #endif /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error(dev, err, "freeing event channel %d", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn); #if 0 /* !defined(CONFIG_XEN) && !defined(MODULE) */ /** * xenbus_map_ring_valloc * @dev: xenbus device * @gnt_ref: grant reference * @vaddr: pointer to address to be filled out by mapping * * Based on Rusty Russell's skeleton driver's map_page. * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_valloc allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) { struct gnttab_map_grant_ref op = { .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; struct vm_struct *area; *vaddr = NULL; area = xen_alloc_vm_area(PAGE_SIZE); if (!area) return -ENOMEM; op.host_addr = (unsigned long)area->addr; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xen_free_vm_area(area); xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); return op.status; } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; *vaddr = area->addr; return 0; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); /** * xenbus_map_ring * @dev: xenbus device * @gnt_ref: grant reference * @handle: pointer to grant handle to be filled * @vaddr: address to be mapped to * * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op = { .host_addr = (unsigned long)vaddr, .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring); /** * xenbus_unmap_ring_vfree * @dev: xenbus device * @vaddr: addr to unmap * * Based on Rusty Russell's skeleton driver's unmap_page. * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_vfree if you mapped in your memory with * xenbus_map_ring_valloc (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) { struct vm_struct *area; struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, }; /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) * method so that we don't have to muck with vmalloc internals here. * We could force the user to hang on to their struct vm_struct from * xenbus_map_ring_valloc, but these 6 lines considerably simplify * this API. */ read_lock(&vmlist_lock); for (area = vmlist; area != NULL; area = area->next) { if (area->addr == vaddr) break; } read_unlock(&vmlist_lock); if (!area) { xenbus_dev_error(dev, -ENOENT, "can't find mapped virtual address %p", vaddr); return GNTST_bad_virt_addr; } op.handle = (grant_handle_t)area->phys_addr; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) xen_free_vm_area(area); else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); /** * xenbus_unmap_ring * @dev: xenbus device * @handle: grant handle * @vaddr: addr to unmap * * Unmap a page of memory in this domain that was imported from another domain. * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, .handle = handle, }; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring); #endif /** * xenbus_read_driver_state * @path: path for driver * * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state); xenbus_comms.c000066400000000000000000000151621314037446600325260ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.27/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #else #include #include #include #endif #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_irq; extern void xenbus_probe(struct work_struct *); extern int xenstored_ready; static DECLARE_WORK(probe_work, xenbus_probe); static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); static irqreturn_t wake_waiting(int irq, void *unused) { if (unlikely(xenstored_ready == 0)) { xenstored_ready = 1; schedule_work(&probe_work); } wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } /** * xb_write - low level write * @data: buffer to send * @len: length of buffer * * Returns 0 on success, error otherwise. */ int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { return wait_event_interruptible(xb_waitq, xb_data_to_read()); } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /** * xb_init_comms - Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; int err; if (intf->req_prod != intf->req_cons) printk(KERN_ERR "XENBUS request ring is not quiescent " "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { printk(KERN_WARNING "XENBUS response ring is not quiescent " "(%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); intf->rsp_cons = intf->rsp_prod; } #if defined(CONFIG_XEN) || defined(MODULE) if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); err = bind_caller_port_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; #else if (xenbus_irq) { /* Already have an irq; assume we're resuming */ rebind_evtchn_irq(xen_store_evtchn, xenbus_irq); } else { err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; } #endif return 0; } xenbus_comms.h000066400000000000000000000035521314037446600325330ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.27/* * Private include for xenbus communications. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_COMMS_H #define _XENBUS_COMMS_H int xs_init(void); int xb_init_comms(void); /* Low level routines. */ int xb_write(const void *data, unsigned len); int xb_read(void *data, unsigned len); int xb_data_to_read(void); int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; #endif /* _XENBUS_COMMS_H */ xenbus_dev.c000066400000000000000000000233311314037446600321630ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.27/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[PAGE_SIZE]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static struct proc_dir_entry *xenbus_dev_intf; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static void queue_reply(struct xenbus_dev_data *u, char *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); BUG_ON(rb == NULL); rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, &u->read_buffers); wake_up(&u->read_waitq); } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int path_len, tok_len, body_len, data_len = 0; path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr)); queue_reply(adap->dev_data, (char *)path, path_len); queue_reply(adap->dev_data, (char *)token, tok_len); if (len > 2) queue_reply(adap->dev_data, (char *)vec[2], data_len); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply; char *path, *token; struct watch_adapter *watch, *tmp_watch; int err, rc = len; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 if ((len + u->len) > sizeof(u->u.buffer)) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_TRANSACTION_START: case XS_TRANSACTION_END: case XS_DIRECTORY: case XS_READ: case XS_GET_PERMS: case XS_RELEASE: case XS_GET_DOMAIN_PATH: case XS_WRITE: case XS_LINUX_WEAK_WRITE: case XS_MKDIR: case XS_RM: case XS_SET_PERMS: if (msg_type == XS_TRANSACTION_START) { trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } } reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; BUG_ON(&trans->list == &u->transactions); list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg)); queue_reply(u, (char *)reply, u->u.msg.len); mutex_unlock(&u->reply_mutex); kfree(reply); break; case XS_WATCH: case XS_UNWATCH: { static const char *XS_RESP = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); watch->watch.node = kmalloc(strlen(path)+1, GFP_KERNEL); strcpy((char *)watch->watch.node, path); watch->watch.callback = watch_fired; watch->token = kmalloc(strlen(token)+1, GFP_KERNEL); strcpy(watch->token, token); watch->dev_data = u; err = register_xenbus_watch(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = strlen(XS_RESP) + 1; mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&hdr, sizeof(hdr)); queue_reply(u, (char *)XS_RESP, hdr.len); mutex_unlock(&u->reply_mutex); break; } default: rc = -EINVAL; break; } out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .poll = xenbus_dev_poll, }; int xenbus_dev_init(void) { xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400); if (xenbus_dev_intf) xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops; return 0; } xenbus_probe.c000066400000000000000000000730431314037446600325210ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.27/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #include #include #include #ifdef MODULE #include #endif #else #include #include #include #include #endif #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif int xen_store_evtchn; struct xenstore_domain_interface *xen_store_interface; static unsigned long xen_store_mfn; extern struct mutex xenwatch_mutex; static BLOCKING_NOTIFIER_HEAD(xenstore_chain); static void wait_for_devices(struct xenbus_driver *xendrv); static int xenbus_probe_frontend(const char *type, const char *name); static void xenbus_dev_shutdown(struct device *_dev); /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } /* device// => - */ static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) { printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, BUS_ID_SIZE); if (!strchr(bus_id, '/')) { printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } static int read_backend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "backend-id", "backend"); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ #if defined(CONFIG_XEN) || defined(MODULE) add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); #endif add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype); return 0; } #endif /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { .name = "xen", .match = xenbus_match, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_frontend, #endif }, #if defined(CONFIG_XEN) || defined(MODULE) .dev = { .bus_id = "xen", }, #endif }; static void otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]); return; } state = xenbus_read_driver_state(dev->otherend); dev_dbg(&dev->dev, "state is %d (%s), %s, %s", state, xenbus_strstate(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { struct xen_bus_type *bus = bus; bus = container_of(dev->dev.bus, struct xen_bus_type, bus); /* If we're frontend, drive the state machine to Closed. */ /* This should cause the backend to release our resources. */ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) xenbus_frontend_closed(dev); return; } #endif if (drv->otherend_changed) drv->otherend_changed(dev, state); } static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { #if defined(CONFIG_XEN) || defined(MODULE) return xenbus_watch_path2(dev, dev->otherend, "state", &dev->otherend_watch, otherend_changed); #else return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed, "%s/%s", dev->otherend, "state"); #endif } int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); return -ENODEV; } int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); free_otherend_details(dev); if (drv->remove) drv->remove(dev); xenbus_switch_state(dev, XenbusStateClosed); return 0; } static void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); if (is_initial_xendomain()) return; get_device(&dev->dev); if (dev->state != XenbusStateConnected) { dev_info(&dev->dev, "%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) dev_info(&dev->dev, "%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); } int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus, struct module *owner, const char *mod_name) { int ret; if (bus->error) return bus->error; drv->driver.name = drv->name; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) drv->driver.owner = owner; #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) drv->driver.mod_name = mod_name; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; drv->driver.remove = xenbus_dev_remove; drv->driver.shutdown = xenbus_dev_shutdown; #endif mutex_lock(&xenwatch_mutex); ret = driver_register(&drv->driver); mutex_unlock(&xenwatch_mutex); return ret; } int __xenbus_register_frontend(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend, owner, mod_name); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(__xenbus_register_frontend); void xenbus_unregister_driver(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t xendev_show_nodename(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); static ssize_t xendev_show_devtype(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); static ssize_t xendev_show_modalias(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_driver_state(nodename); if (bus->error) return bus->error; if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); #if defined(CONFIG_XEN) || defined(MODULE) xendev->dev.parent = &bus->dev; #endif xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); if (err) goto fail; /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) goto fail_unregister; err = device_create_file(&xendev->dev, &dev_attr_devtype); if (err) goto fail_remove_nodename; err = device_create_file(&xendev->dev, &dev_attr_modalias); if (err) goto fail_remove_devtype; return 0; fail_remove_devtype: device_remove_file(&xendev->dev, &dev_attr_devtype); fail_remove_nodename: device_remove_file(&xendev->dev, &dev_attr_nodename); fail_unregister: device_unregister(&xendev->dev); fail: kfree(xendev); return err; } /* device// */ static int xenbus_probe_frontend(const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(&xenbus_frontend, type, nodename); kfree(nodename); return err; } static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n; if (bus->error) return bus->error; dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void xenbus_dev_changed(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[BUS_ID_SIZE]; const char *p, *root; if (bus->error || char_count(node, '/') < 2) return; exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend//... or device//... */ p = strchr(node, '/') + 1; snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int suspend_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend) err = drv->suspend(xdev); if (err) printk(KERN_WARNING "xenbus: suspend %s failed: %i\n", dev->bus_id, err); return 0; } static int suspend_cancel_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend_cancel) err = drv->suspend_cancel(xdev); if (err) printk(KERN_WARNING "xenbus: suspend_cancel %s failed: %i\n", dev->bus_id, err); return 0; } static int resume_dev(struct device *dev, void *data) { int err; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); err = talk_to_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus: resume (talk_to_otherend) %s failed: %i\n", dev->bus_id, err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { printk(KERN_WARNING "xenbus: resume %s failed: %i\n", dev->bus_id, err); return err; } } err = watch_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus_probe: resume (watch_otherend) %s failed: " "%d.\n", dev->bus_id, err); return err; } return 0; } void xenbus_suspend(void) { DPRINTK(""); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); xenbus_backend_suspend(suspend_dev); xs_suspend(); } EXPORT_SYMBOL_GPL(xenbus_suspend); void xen_unplug_emulated_devices(void); void xenbus_resume(void) { xb_init_comms(); xs_resume(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); xenbus_backend_resume(resume_dev); xen_unplug_emulated_devices(); } EXPORT_SYMBOL_GPL(xenbus_resume); void xenbus_suspend_cancel(void) { xs_suspend_cancel(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); xenbus_backend_resume(suspend_cancel_dev); } EXPORT_SYMBOL_GPL(xenbus_suspend_cancel); /* A flag to determine if xenstored is 'ready' (i.e. has started) */ int xenstored_ready = 0; int register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; if (xenstored_ready > 0) ret = nb->notifier_call(nb, 0, NULL); else blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } EXPORT_SYMBOL_GPL(register_xenstore_notifier); void unregister_xenstore_notifier(struct notifier_block *nb) { blocking_notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); void xenbus_probe(struct work_struct *unused) { BUG_ON((xenstored_ready <= 0)); /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); xenbus_backend_probe_and_watch(); /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) static struct file_operations xsd_kva_fops; static struct proc_dir_entry *xsd_kva_intf; static struct proc_dir_entry *xsd_port_intf; static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static int xsd_kva_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "0x%p", xen_store_interface); *eof = 1; return len; } static int xsd_port_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "%d", xen_store_evtchn); *eof = 1; return len; } #endif #ifndef MODULE static int __init xenbus_probe_init(void) #else static int __devinit xenbus_probe_init(void) #endif { int err = 0; #if defined(CONFIG_XEN) || defined(MODULE) unsigned long page = 0; #endif DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) printk(KERN_WARNING "XENBUS: Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { #if defined(CONFIG_XEN) || defined(MODULE) struct evtchn_alloc_unbound alloc_unbound; /* Allocate page. */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = 0; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600); if (xsd_kva_intf) { memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, sizeof(xsd_kva_fops)); xsd_kva_fops.mmap = xsd_kva_mmap; xsd_kva_intf->proc_fops = &xsd_kva_fops; xsd_kva_intf->read_proc = xsd_kva_read; } xsd_port_intf = create_xen_proc_entry("xsd_port", 0400); if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif #else /* dom0 not yet supported */ #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else { xenstored_ready = 1; #ifndef MODULE xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); #else xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN); xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN); xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); #endif } #if defined(CONFIG_XEN) || defined(MODULE) xenbus_dev_init(); #endif /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); goto err; } #if defined(CONFIG_XEN) || defined(MODULE) /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); if (xenbus_frontend.error) { bus_unregister(&xenbus_frontend.bus); printk(KERN_WARNING "XENBUS: Error registering frontend device: %i\n", xenbus_frontend.error); } } #endif xenbus_backend_device_register(); if (!is_initial_xendomain()) xenbus_probe(NULL); return 0; err: #if defined(CONFIG_XEN) || defined(MODULE) if (page) free_page(page); #endif /* * Do not unregister the xenbus front/backend buses here. The buses * must exist because front/backend drivers will use them when they are * registered. */ return err; } #ifndef MODULE postcore_initcall(xenbus_probe_init); #ifdef CONFIG_XEN MODULE_LICENSE("Dual BSD/GPL"); #else MODULE_LICENSE("GPL"); #endif #else int __devinit xenbus_init(void) { return xenbus_probe_init(); } #endif static int is_disconnected_device(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_disconnected_device(struct device_driver *drv) { if (xenbus_frontend.error) return xenbus_frontend.error; return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_disconnected_device); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", xendev->nodename); return 0; } if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); printk(KERN_WARNING "XENBUS: Timeout connecting " "to device: %s (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } xendrv = to_xenbus_driver(dev->driver); if (xendrv->is_ready && !xendrv->is_ready(xendev)) printk(KERN_WARNING "XENBUS: Device not ready: %s\n", xendev->nodename); return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; } if (!ready_to_wait_for_devices || !is_running_on_xen()) return; while (exists_disconnected_device(drv)) { if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) printk(KERN_WARNING "XENBUS: Waiting for " "devices to initialise: "); seconds_waited += 5; printk("%us...", 300 - seconds_waited); if (seconds_waited == 300) break; } schedule_timeout_interruptible(HZ/10); } if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } #ifndef MODULE static int __init boot_wait_for_devices(void) { if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; wait_for_devices(NULL); } return 0; } late_initcall(boot_wait_for_devices); #endif int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); xenbus_probe.h000066400000000000000000000062741314037446600325300ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.27/****************************************************************************** * xenbus_probe.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_PROBE_H #define _XENBUS_PROBE_H #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); extern void xenbus_backend_probe_and_watch(void); extern void xenbus_backend_bus_register(void); extern void xenbus_backend_device_register(void); #else static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_probe_and_watch(void) {} static inline void xenbus_backend_bus_register(void) {} static inline void xenbus_backend_device_register(void) {} #endif struct xen_bus_type { char *root; int error; unsigned int levels; int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename); int (*probe)(const char *type, const char *dir); struct bus_type bus; #if defined(CONFIG_XEN) || defined(MODULE) struct device dev; #endif }; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); extern int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus, struct module *owner, const char *mod_name); extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); extern int xenbus_probe_devices(struct xen_bus_type *bus); extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); #endif xenbus_probe_backend.c000066400000000000000000000164741314037446600341750ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.27/****************************************************************************** * Talks to Xen Store to figure out what devices we have (backend half). * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env); static int xenbus_probe_backend(const char *type, const char *domid); extern int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node); static int read_frontend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "frontend-id", "frontend"); } /* backend/// => -- */ static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) { int domid, err; const char *devid, *type, *frontend; unsigned int typelen; type = strchr(nodename, '/'); if (!type) return -EINVAL; type++; typelen = strcspn(type, "/"); if (!typelen || type[typelen] != '/') return -EINVAL; devid = strrchr(nodename, '/') + 1; err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid, "frontend", NULL, &frontend, NULL); if (err) return err; if (strlen(frontend) == 0) err = -ERANGE; if (!err && !xenbus_exists(XBT_NIL, frontend, "")) err = -ENOENT; kfree(frontend); if (err) return err; if (snprintf(bus_id, BUS_ID_SIZE, "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE) return -ENOSPC; return 0; } static struct xen_bus_type xenbus_backend = { .root = "backend", .levels = 3, /* backend/type// */ .get_bus_id = backend_bus_id, .probe = xenbus_probe_backend, .error = -ENODEV, .bus = { .name = "xen-backend", .match = xenbus_match, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, // .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_backend, }, .dev = { .bus_id = "xen-backend", }, }; static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; struct xenbus_driver *drv; DPRINTK(""); if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); add_uevent_var(env, "XENBUS_BASE_PATH=%s", xenbus_backend.root); if (dev->driver) { drv = to_xenbus_driver(dev->driver); if (drv && drv->uevent) return drv->uevent(xdev, env); } return 0; } int __xenbus_register_backend(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { drv->read_otherend_details = read_frontend_details; return xenbus_register_driver_common(drv, &xenbus_backend, owner, mod_name); } EXPORT_SYMBOL_GPL(__xenbus_register_backend); /* backend/// */ static int xenbus_probe_backend_unit(const char *dir, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); if (!nodename) return -ENOMEM; DPRINTK("%s\n", nodename); err = xenbus_probe_node(&xenbus_backend, type, nodename); kfree(nodename); return err; } /* backend// */ static int xenbus_probe_backend(const char *type, const char *domid) { char *nodename; int err = 0; char **dir; unsigned int i, dir_n = 0; DPRINTK(""); nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid); if (!nodename) return -ENOMEM; dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n); if (IS_ERR(dir)) { kfree(nodename); return PTR_ERR(dir); } for (i = 0; i < dir_n; i++) { err = xenbus_probe_backend_unit(nodename, type, dir[i]); if (err) break; } kfree(dir); kfree(nodename); return err; } static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); } static struct xenbus_watch be_watch = { .node = "backend", .callback = backend_changed, }; void xenbus_backend_suspend(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_resume(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_probe_and_watch(void) { xenbus_probe_devices(&xenbus_backend); register_xenbus_watch(&be_watch); } void xenbus_backend_bus_register(void) { xenbus_backend.error = bus_register(&xenbus_backend.bus); if (xenbus_backend.error) printk(KERN_WARNING "XENBUS: Error registering backend bus: %i\n", xenbus_backend.error); } void xenbus_backend_device_register(void) { if (xenbus_backend.error) return; xenbus_backend.error = device_register(&xenbus_backend.dev); if (xenbus_backend.error) { bus_unregister(&xenbus_backend.bus); printk(KERN_WARNING "XENBUS: Error registering backend device: %i\n", xenbus_backend.error); } } int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_backend); xenbus_xs.c000066400000000000000000000520111314037446600320340ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.27/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct rw_semaphore transaction_mutex; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { printk(KERN_WARNING "XENBUS xen store gave: unknown error %s", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; struct xsd_sockmsg req_msg = *msg; int err; if (req_msg.type == XS_TRANSACTION_START) down_read(&xs_state.transaction_mutex); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((req_msg.type == XS_TRANSACTION_END) || ((req_msg.type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) up_read(&xs_state.transaction_mutex); return ret; } /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { if (printk_ratelimit()) printk(KERN_WARNING "XENBUS unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len) + 1; /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; ret[*num] = strings + len; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; down_read(&xs_state.transaction_mutex); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { up_read(&xs_state.transaction_mutex); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); up_read(&xs_state.transaction_mutex); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; #define PRINTF_BUFFER_SIZE 4096 char *printf_buffer; printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH); if (printf_buffer == NULL) return -ENOMEM; va_start(ap, fmt); ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap); va_end(ap); BUG_ON(ret > PRINTF_BUFFER_SIZE-1); ret = xenbus_write(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); /* Ignore errors due to multiple registration. */ if ((err != 0) && (err != -EEXIST)) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; #if defined(CONFIG_XEN) || defined(MODULE) BUG_ON(watch->flags & XBWF_new_thread); #endif sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) printk(KERN_WARNING "XENBUS Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Make sure there are no callbacks running currently (unless its us) */ if (current->pid != xenwatch_pid) mutex_lock(&xenwatch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); if (current->pid != xenwatch_pid) mutex_unlock(&xenwatch_mutex); } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); void xs_suspend(void) { down_write(&xs_state.transaction_mutex); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.transaction_mutex); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); up_write(&xs_state.transaction_mutex); } #if defined(CONFIG_XEN) || defined(MODULE) static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } #endif static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); #if defined(CONFIG_XEN) || defined(MODULE) /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } #else msg->u.watch.handle->callback( msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); mutex_unlock(&xenwatch_mutex); kfree(msg->u.watch.vec); kfree(msg); #endif } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) printk(KERN_WARNING "XENBUS error %d while reading " "message\n", err); if (kthread_should_stop()) break; } return 0; } int xs_init(void) { int err; struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); init_rwsem(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) return err; task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.32/000077500000000000000000000000001314037446600277265ustar00rootroot00000000000000platform-pci.c000066400000000000000000000237351314037446600324220ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.32/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __ia64__ #include #endif #include "platform-pci.h" #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DRV_NAME "xen-platform-pci" #define DRV_VERSION "0.10" #define DRV_RELDATE "03/03/2005" static int max_hypercall_stub_pages, nr_hypercall_stub_pages; char *hypercall_stubs; EXPORT_SYMBOL(hypercall_stubs); MODULE_AUTHOR("ssmith@xensource.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); struct pci_dev *xen_platform_pdev; static unsigned long shared_info_frame; static uint64_t callback_via; /* driver should write xenstore flag to tell xen which feature supported */ void write_feature_flag() { int rc; char str[32] = {0}; (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); rc = xenbus_write(XBT_NIL, "control/uvp", "feature_flag", str); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); } } /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { int rc; rc = xenbus_write(XBT_NIL, "control/uvp", "monitor-service-flag", "true"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/monitor-service-flag failed \n"); } } /*DTS2014042508949. driver write this flag when the xenpci resume succeed.*/ void write_driver_resume_flag() { int rc = 0; rc = xenbus_write(XBT_NIL, "control/uvp", "driver-resume-flag", "1"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/driver-resume-flag failed \n"); } } static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; extern void *shared_info_area; #ifdef __ia64__ xencomm_initialize(); #endif setup_xen_features(); shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); shared_info_area = ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); return 0; } static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } #ifndef __ia64__ static uint32_t xen_cpuid_base(void) { uint32_t base, eax, ebx, ecx, edx; char signature[13]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { cpuid(base, &eax, &ebx, &ecx, &edx); *(uint32_t*)(signature + 0) = ebx; *(uint32_t*)(signature + 4) = ecx; *(uint32_t*)(signature + 8) = edx; signature[12] = 0; if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) return base; } return 0; } static int init_hypercall_stubs(void) { uint32_t eax, ebx, ecx, edx, pages, msr, i, base; base = xen_cpuid_base(); if (base == 0) { printk(KERN_WARNING "Detected Xen platform device but not Xen VMM?\n"); return -EINVAL; } cpuid(base + 1, &eax, &ebx, &ecx, &edx); printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff); /* * Find largest supported number of hypercall pages. * We'll create as many as possible up to this number. */ cpuid(base + 2, &pages, &msr, &ecx, &edx); /* * Use __vmalloc() because vmalloc_exec() is not an exported symbol. * PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL. * hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE); */ while (pages > 0) { hypercall_stubs = __vmalloc( pages * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM, __pgprot(__PAGE_KERNEL & ~_PAGE_NX)); if (hypercall_stubs != NULL) break; pages--; /* vmalloc failed: try one fewer pages */ } if (hypercall_stubs == NULL) return -ENOMEM; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; max_hypercall_stub_pages = pages; printk(KERN_INFO "Hypercall area is %u pages.\n", pages); return 0; } static void resume_hypercall_stubs(void) { uint32_t base, ecx, edx, pages, msr, i; base = xen_cpuid_base(); BUG_ON(base == 0); cpuid(base + 2, &pages, &msr, &ecx, &edx); if (pages > max_hypercall_stub_pages) pages = max_hypercall_stub_pages; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; } #else /* __ia64__ */ #define init_hypercall_stubs() (0) #define resume_hypercall_stubs() ((void)0) #endif static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; #ifdef __ia64__ for (irq = 0; irq < 16; irq++) { if (isa_irq_to_vector(irq) == pdev->irq) return irq; /* ISA IRQ */ } #else /* !__ia64__ */ irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) pin = pdev->pin; #else pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); #endif /* We don't know the GSI. Specify the PCI INTx line instead. */ return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3)); } static int set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } int xen_irq_init(struct pci_dev *pdev); int xenbus_init(void); int xen_reboot_init(void); int xen_panic_handler_init(void); int gnttab_init(void); void xen_unplug_emulated_devices(void); static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr, iolen; long mmio_addr, mmio_len; xen_unplug_emulated_devices(); if (xen_platform_pdev) return -EBUSY; xen_platform_pdev = pdev; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); callback_via = get_callback_via(pdev); if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { printk(KERN_WARNING DRV_NAME ":no resources found\n"); return -ENOENT; } if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) { printk(KERN_ERR ":MEM I/O resource 0x%lx @ 0x%lx busy\n", mmio_addr, mmio_len); return -EBUSY; } if (request_region(ioaddr, iolen, DRV_NAME) == NULL) { printk(KERN_ERR DRV_NAME ":I/O resource 0x%lx @ 0x%lx busy\n", iolen, ioaddr); release_mem_region(mmio_addr, mmio_len); return -EBUSY; } platform_mmio = mmio_addr; platform_mmiolen = mmio_len; ret = init_hypercall_stubs(); if (ret < 0) goto out; if ((ret = init_xen_info())) goto out; if ((ret = gnttab_init())) goto out; if ((ret = xen_irq_init(pdev))) goto out; if ((ret = set_callback_via(callback_via))) goto out; if ((ret = xenbus_init())) goto out; if ((ret = xen_reboot_init())) goto out; /* write flag to xenstore when xenbus is available */ write_feature_flag(); out: if (ret) { release_mem_region(mmio_addr, mmio_len); release_region(ioaddr, iolen); } return ret; } #define XEN_PLATFORM_VENDOR_ID 0x5853 #define XEN_PLATFORM_DEVICE_ID 0x0001 static struct pci_device_id platform_pci_tbl[] __devinitdata = { {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* Continue to recognise the old ID for now */ {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { name: DRV_NAME, probe: platform_pci_init, id_table: platform_pci_tbl, }; static int pci_device_registered; void platform_pci_resume(void) { struct xen_add_to_physmap xatp; resume_hypercall_stubs(); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); if (set_callback_via(callback_via)) printk("platform_pci_resume failure!\n"); } static int __init platform_pci_module_init(void) { int rc; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) rc = pci_module_init(&platform_driver); #else rc = pci_register_driver(&platform_driver); #endif if (rc) { printk(KERN_INFO DRV_NAME ": No platform pci device model found\n"); return rc; } pci_device_registered = 1; return 0; } module_init(platform_pci_module_init); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.32/reboot.c000066400000000000000000000177031314037446600313740ustar00rootroot00000000000000#define __KERNEL_SYSCALLS__ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #undef handle_sysrq #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } static int xen_reboot_vm(void) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *reboot_argv[] = { "/sbin/reboot", NULL }; if (call_usermodehelper("/sbin/reboot", reboot_argv, envp, 0) < 0) printk(KERN_ERR "Xen reboot vm Failed.\n"); return 0; } #ifdef CONFIG_PM_SLEEP static int setup_suspend_evtchn(void); /* Was last suspend request cancelled? */ static int suspend_cancelled; static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed_ptr(current, cpumask_of(0)); if (err) { printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { printk(KERN_ERR "Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_delayed_work(&shutdown_work, 0); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } #else # define xen_suspend NULL #endif static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_delayed_work(&shutdown_work, 0); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(struct work_struct *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) xen_reboot_vm(); #ifdef CONFIG_PM_SLEEP else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; #endif else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else printk("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) { printk(KERN_ERR "Unable to read sysrq code in " "control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key, NULL); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #ifdef CONFIG_PM_SLEEP static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); printk(KERN_INFO "suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } #else #define setup_suspend_evtchn() 0 #endif static int setup_shutdown_watcher(void) { int err; xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { printk(KERN_ERR "Failed to set shutdown watcher\n"); return err; } err = register_xenbus_watch(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { printk(KERN_ERR "Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ xenbus_backend_client.c000066400000000000000000000111611314037446600343240ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.32/****************************************************************************** * Backend-client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code in the backend * driver. * * Copyright (C) 2005-2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /* Based on Rusty Russell's skeleton driver's map_page */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref) { struct gnttab_map_grant_ref op; struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE); if (!area) return ERR_PTR(-ENOMEM); gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map, gnt_ref, dev->otherend_id); do { if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); msleep(10); } while(op.status == GNTST_eagain); if (op.status != GNTST_okay) { free_vm_area(area); xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); BUG_ON(!IS_ERR(ERR_PTR(op.status))); return ERR_PTR(op.status); } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; return area; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, dev->otherend_id); do { if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); msleep(10); } while(op.status == GNTST_eagain); if (op.status != GNTST_okay) { xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring); /* Based on Rusty Russell's skeleton driver's unmap_page */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map, (grant_handle_t)area->phys_addr); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) free_vm_area(area); else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring); int xenbus_dev_is_online(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); MODULE_LICENSE("Dual BSD/GPL"); xenbus_client.c000066400000000000000000000265631314037446600326710ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.32/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #if defined(CONFIG_XEN) || defined(MODULE) #include #include #include #include #else #include #include #include #include #include #include #include #endif #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", [ XenbusStateReconfiguring ] = "Reconfiguring", [ XenbusStateReconfigured ] = "Reconfigured", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate); /** * xenbus_watch_path - register a watch * @dev: xenbus device * @path: path to watch * @watch: watch to register * @callback: callback to register * * Register a @watch on the given path, using the given xenbus_watch structure * for storage, and the given @callback function as the callback. Return 0 on * success, or -errno on error. On success, the given @path will be saved as * @watch->node, and remains the caller's to free. On error, @watch->node will * be NULL, the device will switch to %XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path); #if defined(CONFIG_XEN) || defined(MODULE) int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2); if (!state) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, state, watch, callback); if (err) kfree(state); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path2); #else /** * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path * @dev: xenbus device * @watch: watch to register * @callback: callback to register * @pathfmt: format of path to watch * * Register a watch on the given @path, using the given xenbus_watch * structure for storage, and the given @callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (@path/@path2) will be saved as @watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to %XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...) { int err; va_list ap; char *path; va_start(ap, pathfmt); path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); va_end(ap); if (!path) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, path, watch, callback); if (err) kfree(path); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); #endif /** * xenbus_switch_state * @dev: xenbus device * @state: new state * * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not work inside a Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ int current_state; int err; if (state == dev->state) return 0; err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d", ¤t_state); if (err != 1) return 0; err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state); if (err) { if (state != XenbusStateClosing) /* Avoid looping */ xenbus_dev_fatal(dev, err, "writing new state"); return err; } dev->state = state; return 0; } EXPORT_SYMBOL_GPL(xenbus_switch_state); int xenbus_frontend_closed(struct xenbus_device *dev) { xenbus_switch_state(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed); /** * Return the path to the error node for the given device, or NULL on failure. * If the value returned is non-NULL, then it is the caller's to kfree. */ static char *error_path(struct xenbus_device *dev) { return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); } static void _dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list ap) { int ret; unsigned int len; char *printf_buffer = NULL, *path_buffer = NULL; #define PRINTF_BUFFER_SIZE 4096 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); if (printf_buffer == NULL) goto fail; len = sprintf(printf_buffer, "%i ", -err); ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = error_path(dev); if (path_buffer == NULL) { dev_err(&dev->dev, "xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { dev_err(&dev->dev, "xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); goto fail; } fail: kfree(printf_buffer); kfree(path_buffer); } /** * xenbus_dev_error * @dev: xenbus device * @err: error to report * @fmt: error message format * * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error); /** * xenbus_dev_fatal * @dev: xenbus device * @err: error to report * @fmt: error message format * * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, ap); va_end(ap); xenbus_switch_state(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal); /** * xenbus_grant_ring * @dev: xenbus device * @ring_mfn: mfn of ring to grant * * Grant access to the given @ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); if (err < 0) xenbus_dev_fatal(dev, err, "granting access to ring page"); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error(dev, err, "freeing event channel %d", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn); /** * xenbus_read_driver_state * @path: path for driver * * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state); xenbus_comms.c000066400000000000000000000157351314037446600325300ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.32/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #else #include #include #include #endif #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_irq; extern void xenbus_probe(struct work_struct *); static DECLARE_WORK(probe_work, xenbus_probe); static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); static irqreturn_t wake_waiting(int irq, void *unused) { int old, new; old = atomic_read(&xenbus_xsd_state); switch (old) { case XENBUS_XSD_UNCOMMITTED: BUG(); return IRQ_HANDLED; case XENBUS_XSD_FOREIGN_INIT: new = XENBUS_XSD_FOREIGN_READY; break; case XENBUS_XSD_LOCAL_INIT: new = XENBUS_XSD_LOCAL_READY; break; case XENBUS_XSD_FOREIGN_READY: case XENBUS_XSD_LOCAL_READY: default: goto wake; } old = atomic_cmpxchg(&xenbus_xsd_state, old, new); if (old != new) schedule_work(&probe_work); wake: wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } /** * xb_write - low level write * @data: buffer to send * @len: length of buffer * * Returns 0 on success, error otherwise. */ int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { return wait_event_interruptible(xb_waitq, xb_data_to_read()); } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /** * xb_init_comms - Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; int err; if (intf->req_prod != intf->req_cons) printk(KERN_ERR "XENBUS request ring is not quiescent " "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { printk(KERN_WARNING "XENBUS response ring is not quiescent " "(%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); intf->rsp_cons = intf->rsp_prod; } #if defined(CONFIG_XEN) || defined(MODULE) if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); err = bind_caller_port_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; #else if (xenbus_irq) { /* Already have an irq; assume we're resuming */ rebind_evtchn_irq(xen_store_evtchn, xenbus_irq); } else { err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { printk(KERN_ERR "XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; } #endif return 0; } xenbus_comms.h000066400000000000000000000043301314037446600325220ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.32/* * Private include for xenbus communications. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_COMMS_H #define _XENBUS_COMMS_H int xs_init(void); int xb_init_comms(void); /* Low level routines. */ int xb_write(const void *data, unsigned len); int xb_read(void *data, unsigned len); int xb_data_to_read(void); int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; /* For xenbus internal use. */ enum { XENBUS_XSD_UNCOMMITTED = 0, XENBUS_XSD_FOREIGN_INIT, XENBUS_XSD_FOREIGN_READY, XENBUS_XSD_LOCAL_INIT, XENBUS_XSD_LOCAL_READY, }; extern atomic_t xenbus_xsd_state; static inline int is_xenstored_ready(void) { int s = atomic_read(&xenbus_xsd_state); return s == XENBUS_XSD_FOREIGN_READY || s == XENBUS_XSD_LOCAL_READY; } #endif /* _XENBUS_COMMS_H */ xenbus_dev.c000066400000000000000000000253341314037446600321640ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.32/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #include struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[PAGE_SIZE]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static struct proc_dir_entry *xenbus_dev_intf; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; if (!is_xenstored_ready()) return -ENODEV; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static void queue_reply(struct xenbus_dev_data *u, char *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); BUG_ON(rb == NULL); rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, &u->read_buffers); wake_up(&u->read_waitq); } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int path_len, tok_len, body_len, data_len = 0; path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr)); queue_reply(adap->dev_data, (char *)path, path_len); queue_reply(adap->dev_data, (char *)token, tok_len); if (len > 2) queue_reply(adap->dev_data, (char *)vec[2], data_len); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply; char *path, *token; struct watch_adapter *watch, *tmp_watch; int err, rc = len; if (!is_xenstored_ready()) return -ENODEV; if ((len + u->len) > sizeof(u->u.buffer)) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: { static const char *XS_RESP = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); watch->watch.node = kmalloc(strlen(path)+1, GFP_KERNEL); strcpy((char *)watch->watch.node, path); watch->watch.callback = watch_fired; watch->token = kmalloc(strlen(token)+1, GFP_KERNEL); strcpy(watch->token, token); watch->dev_data = u; err = register_xenbus_watch(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = strlen(XS_RESP) + 1; mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&hdr, sizeof(hdr)); queue_reply(u, (char *)XS_RESP, hdr.len); mutex_unlock(&u->reply_mutex); break; } default: if (msg_type == XS_TRANSACTION_START) { trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } } reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; BUG_ON(&trans->list == &u->transactions); list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg)); queue_reply(u, (char *)reply, u->u.msg.len); mutex_unlock(&u->reply_mutex); kfree(reply); break; } out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; if (!is_xenstored_ready()) return -ENODEV; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } #ifdef HAVE_UNLOCKED_IOCTL static long xenbus_dev_ioctl(struct file *file, unsigned int cmd, unsigned long data) { extern int xenbus_conn(domid_t remote_dom, int *grant_ref, evtchn_port_t *local_port); void __user *udata = (void __user *) data; int ret = -ENOTTY; if (!is_initial_xendomain()) return -ENODEV; switch (cmd) { case IOCTL_XENBUS_ALLOC: { xenbus_alloc_t xa; int old; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_FOREIGN_INIT); if (old != XENBUS_XSD_UNCOMMITTED) return -EBUSY; if (copy_from_user(&xa, udata, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } ret = xenbus_conn(xa.dom, &xa.grant_ref, &xa.port); if (ret != 0) { atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } if (copy_to_user(udata, &xa, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } } break; default: break; } return ret; } #endif static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .poll = xenbus_dev_poll, #ifdef HAVE_UNLOCKED_IOCTL .unlocked_ioctl = xenbus_dev_ioctl #endif }; int xenbus_dev_init(void) { xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400); if (xenbus_dev_intf) xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops; return 0; } xenbus_probe.c000066400000000000000000001123431314037446600325120ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.32/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #include #include #include #include #ifdef MODULE #include #endif #else #include #include #include #include #endif #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif int xen_store_evtchn; #if !defined(CONFIG_XEN) && !defined(MODULE) EXPORT_SYMBOL(xen_store_evtchn); #endif struct xenstore_domain_interface *xen_store_interface; static unsigned long xen_store_mfn; extern struct mutex xenwatch_mutex; static BLOCKING_NOTIFIER_HEAD(xenstore_chain); static void wait_for_devices(struct xenbus_driver *xendrv); static int xenbus_probe_frontend(const char *type, const char *name); static void xenbus_dev_shutdown(struct device *_dev); #if !defined(CONFIG_XEN) && !defined(MODULE) static int xenbus_dev_suspend(struct device *dev, pm_message_t state); static int xenbus_dev_resume(struct device *dev); #endif /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } /* device// => - */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); if (!strchr(bus_id, '/')) { printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } static int read_backend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "backend-id", "backend"); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ #if defined(CONFIG_XEN) || defined(MODULE) add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); #endif add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype); return 0; } #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) static struct device_attribute xenbus_dev_attrs[] = { __ATTR_NULL }; #endif /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { .name = "xen", .match = xenbus_match, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_frontend, #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) .dev_attrs = xenbus_dev_attrs, #endif #if !defined(CONFIG_XEN) && !defined(MODULE) .suspend = xenbus_dev_suspend, .resume = xenbus_dev_resume, #endif }, #if defined(CONFIG_XEN) || defined(MODULE) .dev = { .init_name = "xen", }, #endif }; static void otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]); return; } state = xenbus_read_driver_state(dev->otherend); dev_dbg(&dev->dev, "state is %d (%s), %s, %s", state, xenbus_strstate(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { struct xen_bus_type *bus = bus; bus = container_of(dev->dev.bus, struct xen_bus_type, bus); /* If we're frontend, drive the state machine to Closed. */ /* This should cause the backend to release our resources. */ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) xenbus_frontend_closed(dev); return; } #endif if (drv->otherend_changed) drv->otherend_changed(dev, state); } static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { #if defined(CONFIG_XEN) || defined(MODULE) return xenbus_watch_path2(dev, dev->otherend, "state", &dev->otherend_watch, otherend_changed); #else return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed, "%s/%s", dev->otherend, "state"); #endif } int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); return -ENODEV; } int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); free_otherend_details(dev); if (drv->remove) drv->remove(dev); xenbus_switch_state(dev, XenbusStateClosed); return 0; } static void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); /* Commented out since xenstored stubdom is now minios based not linux based #define XENSTORE_DOMAIN_SHARES_THIS_KERNEL */ #ifndef XENSTORE_DOMAIN_SHARES_THIS_KERNEL if (is_initial_xendomain()) #endif return; get_device(&dev->dev); if (dev->state != XenbusStateConnected) { dev_info(&dev->dev, "%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); if (!strcmp(dev->devicetype, "vfb")) goto out; timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) dev_info(&dev->dev, "%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); } int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus, struct module *owner, const char *mod_name) { int ret; if (bus->error) return bus->error; drv->driver.name = drv->name; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) drv->driver.owner = owner; #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) drv->driver.mod_name = mod_name; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; drv->driver.remove = xenbus_dev_remove; drv->driver.shutdown = xenbus_dev_shutdown; #endif mutex_lock(&xenwatch_mutex); ret = driver_register(&drv->driver); mutex_unlock(&xenwatch_mutex); return ret; } int __xenbus_register_frontend(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend, owner, mod_name); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(__xenbus_register_frontend); void xenbus_unregister_driver(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t xendev_show_nodename(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); static ssize_t xendev_show_devtype(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); static ssize_t xendev_show_modalias(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_driver_state(nodename); if (bus->error) return bus->error; if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); #if defined(CONFIG_XEN) || defined(MODULE) xendev->dev.parent = &bus->dev; #endif xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) { char devname[XEN_BUS_ID_SIZE]; err = bus->get_bus_id(devname, xendev->nodename); if (!err) dev_set_name(&xendev->dev, devname); } #else err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); #endif if (err) goto fail; /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) goto fail_unregister; err = device_create_file(&xendev->dev, &dev_attr_devtype); if (err) goto fail_remove_nodename; err = device_create_file(&xendev->dev, &dev_attr_modalias); if (err) goto fail_remove_devtype; return 0; fail_remove_devtype: device_remove_file(&xendev->dev, &dev_attr_devtype); fail_remove_nodename: device_remove_file(&xendev->dev, &dev_attr_nodename); fail_unregister: device_unregister(&xendev->dev); fail: kfree(xendev); return err; } /* device// */ static int xenbus_probe_frontend(const char *type, const char *name) { char *nodename; int err; if (!strcmp(type, "console")) return 0; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(&xenbus_frontend, type, nodename); kfree(nodename); return err; } static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n; if (bus->error) return bus->error; dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void xenbus_dev_changed(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[XEN_BUS_ID_SIZE]; const char *p, *root; if (bus->error || char_count(node, '/') < 2) return; exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend//... or device//... */ p = strchr(node, '/') + 1; snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[XEN_BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } #if !defined(CONFIG_XEN) && !defined(MODULE) EXPORT_SYMBOL_GPL(xenbus_dev_changed); #endif static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; #if !defined(CONFIG_XEN) && !defined(MODULE) static int xenbus_dev_suspend(struct device *dev, pm_message_t state) #else static int suspend_dev(struct device *dev, void *data) #endif { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend) #if !defined(CONFIG_XEN) && !defined(MODULE) err = drv->suspend(xdev, state); #else err = drv->suspend(xdev); #endif if (err) printk(KERN_WARNING "xenbus: suspend %s failed: %i\n", dev_name(dev), err); return 0; } #if defined(CONFIG_XEN) || defined(MODULE) static int suspend_cancel_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend_cancel) err = drv->suspend_cancel(xdev); if (err) printk(KERN_WARNING "xenbus: suspend_cancel %s failed: %i\n", dev_name(dev), err); return 0; } #endif #if !defined(CONFIG_XEN) && !defined(MODULE) static int xenbus_dev_resume(struct device *dev) #else static int resume_dev(struct device *dev, void *data) #endif { int err; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); err = talk_to_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus: resume (talk_to_otherend) %s failed: %i\n", dev_name(dev), err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { printk(KERN_WARNING "xenbus: resume %s failed: %i\n", dev_name(dev), err); return err; } } err = watch_otherend(xdev); if (err) { printk(KERN_WARNING "xenbus_probe: resume (watch_otherend) %s failed: " "%d.\n", dev_name(dev), err); return err; } return 0; } #if defined(CONFIG_XEN) || defined(MODULE) void xenbus_suspend(void) { DPRINTK(""); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); xenbus_backend_suspend(suspend_dev); xs_suspend(); } EXPORT_SYMBOL_GPL(xenbus_suspend); void xen_unplug_emulated_devices(void); void xenbus_resume(void) { xb_init_comms(); xs_resume(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); xenbus_backend_resume(resume_dev); xen_unplug_emulated_devices(); } EXPORT_SYMBOL_GPL(xenbus_resume); void xenbus_suspend_cancel(void) { xs_suspend_cancel(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); xenbus_backend_resume(suspend_cancel_dev); } EXPORT_SYMBOL_GPL(xenbus_suspend_cancel); #endif /* A flag to determine if xenstored is 'ready' (i.e. has started) */ atomic_t xenbus_xsd_state = ATOMIC_INIT(XENBUS_XSD_UNCOMMITTED); int register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; if (is_xenstored_ready()) ret = nb->notifier_call(nb, 0, NULL); else blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } EXPORT_SYMBOL_GPL(register_xenstore_notifier); void unregister_xenstore_notifier(struct notifier_block *nb) { blocking_notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); #if defined(VRM) static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq); static int backend_state; static void xenbus_reset_backend_state_changed(struct xenbus_watch *w, const char **v, unsigned int l) { xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state); printk(KERN_EMERG "XENBUS: backend %s %s\n", v[XS_WATCH_PATH], xenbus_strstate(backend_state)); wake_up(&backend_state_wq); } static void xenbus_reset_wait_for_backend(char *be, int expected) { long timeout; timeout = wait_event_interruptible_timeout(backend_state_wq, backend_state == expected, 5 * HZ); if (timeout <= 0) printk(KERN_EMERG "XENBUS: backend %s timed out.\n", be); } /* * Reset frontend if it is in Connected or Closed state. * Wait for backend to catch up. * State Connected happens during kdump, Closed after kexec. */ static void xenbus_reset_frontend(char *fe, char *be, int be_state) { struct xenbus_watch be_watch; printk(KERN_EMERG "XENBUS: backend %s %s\n", be, xenbus_strstate(be_state)); memset(&be_watch, 0, sizeof(be_watch)); be_watch.node = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/state", be); if (!be_watch.node) return; be_watch.callback = xenbus_reset_backend_state_changed; backend_state = XenbusStateUnknown; printk(KERN_EMERG "XENBUS: triggering reconnect on %s\n", be); register_xenbus_watch(&be_watch); /* fall through to forward backend to state XenbusStateInitialising */ switch (be_state) { case XenbusStateConnected: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing); xenbus_reset_wait_for_backend(be, XenbusStateClosing); case XenbusStateClosing: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed); xenbus_reset_wait_for_backend(be, XenbusStateClosed); case XenbusStateClosed: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising); xenbus_reset_wait_for_backend(be, XenbusStateInitWait); } unregister_xenbus_watch(&be_watch); printk(KERN_EMERG "XENBUS: reconnect done on %s\n", be); kfree(be_watch.node); } static void xenbus_check_frontend(char *class, char *dev) { int be_state, fe_state, err; char *backend, *frontend; frontend = kasprintf(GFP_NOIO | __GFP_HIGH, "device/%s/%s", class, dev); if (!frontend) return; err = xenbus_scanf(XBT_NIL, frontend, "state", "%i", &fe_state); if (err != 1) goto out; switch (fe_state) { case XenbusStateConnected: case XenbusStateClosed: printk(KERN_EMERG "XENBUS: frontend %s %s\n", frontend, xenbus_strstate(fe_state)); backend = xenbus_read(XBT_NIL, frontend, "backend", NULL); if (!backend || IS_ERR(backend)) goto out; err = xenbus_scanf(XBT_NIL, backend, "state", "%i", &be_state); if (err == 1) xenbus_reset_frontend(frontend, backend, be_state); kfree(backend); break; default: break; } out: kfree(frontend); } static void xenbus_reset_state(void) { char **devclass, **dev; int devclass_n, dev_n; int i, j; devclass = xenbus_directory(XBT_NIL, "device", "", &devclass_n); if (IS_ERR(devclass)) return; for (i = 0; i < devclass_n; i++) { dev = xenbus_directory(XBT_NIL, "device", devclass[i], &dev_n); if (IS_ERR(dev)) continue; for (j = 0; j < dev_n; j++) xenbus_check_frontend(devclass[i], dev[j]); kfree(dev); } kfree(devclass); } #endif void xenbus_probe(struct work_struct *unused) { BUG_ON(!is_xenstored_ready()); #if defined(VRM) printk(KERN_EMERG "%s: xenbus_reset_state.\n", __func__); /* reset devices in Connected or Closed state */ xenbus_reset_state(); #endif /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); xenbus_backend_probe_and_watch(); /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) static struct file_operations xsd_kva_fops; static struct proc_dir_entry *xsd_kva_intf; static struct proc_dir_entry *xsd_port_intf; static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; int old; int rc; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_LOCAL_INIT); switch (old) { case XENBUS_XSD_UNCOMMITTED: rc = xb_init_comms(); if (rc != 0) return rc; break; case XENBUS_XSD_FOREIGN_INIT: case XENBUS_XSD_FOREIGN_READY: return -EBUSY; case XENBUS_XSD_LOCAL_INIT: case XENBUS_XSD_LOCAL_READY: default: break; } if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static int xsd_kva_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "0x%p", xen_store_interface); *eof = 1; return len; } static int xsd_port_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "%d", xen_store_evtchn); *eof = 1; return len; } #endif #if defined(CONFIG_XEN) || defined(MODULE) static int xb_free_port(evtchn_port_t port) { struct evtchn_close close; close.port = port; return HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); } int xenbus_conn(domid_t remote_dom, unsigned long *grant_ref, evtchn_port_t *local_port) { struct evtchn_alloc_unbound alloc_unbound; int rc, rc2; BUG_ON(atomic_read(&xenbus_xsd_state) != XENBUS_XSD_FOREIGN_INIT); BUG_ON(!is_initial_xendomain()); #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) remove_xen_proc_entry("xsd_kva"); remove_xen_proc_entry("xsd_port"); #endif rc = xb_free_port(xen_store_evtchn); if (rc != 0) goto fail0; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_dom; rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (rc != 0) goto fail0; *local_port = xen_store_evtchn = alloc_unbound.port; /* keep the old page (xen_store_mfn, xen_store_interface) */ rc = gnttab_grant_foreign_access(remote_dom, xen_store_mfn, GTF_permit_access); if (rc < 0) goto fail1; *grant_ref = rc; rc = xb_init_comms(); if (rc != 0) goto fail1; return 0; fail1: rc2 = xb_free_port(xen_store_evtchn); if (rc2 != 0) printk(KERN_WARNING "XENBUS: Error freeing xenstore event channel: %d\n", rc2); fail0: xen_store_evtchn = -1; return rc; } #endif #ifndef MODULE static int __init xenbus_probe_init(void) #else static int __devinit xenbus_probe_init(void) #endif { int err = 0; #if defined(CONFIG_XEN) || defined(MODULE) unsigned long page = 0; #endif DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) printk(KERN_WARNING "XENBUS: Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { #if defined(CONFIG_XEN) || defined(MODULE) struct evtchn_alloc_unbound alloc_unbound; /* Allocate page. */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOMID_SELF; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600); if (xsd_kva_intf) { memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, sizeof(xsd_kva_fops)); xsd_kva_fops.mmap = xsd_kva_mmap; xsd_kva_intf->proc_fops = &xsd_kva_fops; xsd_kva_intf->read_proc = xsd_kva_read; } xsd_port_intf = create_xen_proc_entry("xsd_port", 0400); if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif #else /* dom0 not yet supported */ #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else { atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY); #ifndef MODULE xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); #else xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN); xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN); xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); #endif /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) goto err; } #if defined(CONFIG_XEN) || defined(MODULE) xenbus_dev_init(); #endif /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { printk(KERN_WARNING "XENBUS: Error initializing xenstore comms: %i\n", err); goto err; } #if defined(CONFIG_XEN) || defined(MODULE) /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); if (xenbus_frontend.error) { bus_unregister(&xenbus_frontend.bus); printk(KERN_WARNING "XENBUS: Error registering frontend device: %i\n", xenbus_frontend.error); } } #endif xenbus_backend_device_register(); if (!is_initial_xendomain()) xenbus_probe(NULL); #if defined(CONFIG_XEN_COMPAT_XENFS) && !defined(MODULE) /* * Create xenfs mountpoint in /proc for compatibility with * utilities that expect to find "xenbus" under "/proc/xen". */ proc_mkdir("xen", NULL); #endif return 0; err: #if defined(CONFIG_XEN) || defined(MODULE) if (page) free_page(page); #endif /* * Do not unregister the xenbus front/backend buses here. The buses * must exist because front/backend drivers will use them when they are * registered. */ return err; } #ifndef MODULE postcore_initcall(xenbus_probe_init); #ifdef CONFIG_XEN MODULE_LICENSE("Dual BSD/GPL"); #else MODULE_LICENSE("GPL"); #endif #else int __devinit xenbus_init(void) { return xenbus_probe_init(); } #endif static int is_device_connecting(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_connecting_device(struct device_driver *drv) { if (xenbus_frontend.error) return xenbus_frontend.error; return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", xendev->nodename); return 0; } if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); printk(KERN_WARNING "XENBUS: Timeout connecting " "to device: %s (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } xendrv = to_xenbus_driver(dev->driver); if (xendrv->is_ready && !xendrv->is_ready(xendev)) printk(KERN_WARNING "XENBUS: Device not ready: %s\n", xendev->nodename); return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; } if (!ready_to_wait_for_devices || !is_running_on_xen()) return; while (exists_connecting_device(drv)) { if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) printk(KERN_WARNING "XENBUS: Waiting for " "devices to initialise: "); seconds_waited += 5; printk("%us...", 300 - seconds_waited); if (seconds_waited == 300) break; } schedule_timeout_interruptible(HZ/10); } if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } #ifndef MODULE static int __init boot_wait_for_devices(void) { if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; wait_for_devices(NULL); } return 0; } late_initcall(boot_wait_for_devices); #endif int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); xenbus_probe.h000066400000000000000000000070021314037446600325120ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.32/****************************************************************************** * xenbus_probe.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_PROBE_H #define _XENBUS_PROBE_H #ifndef BUS_ID_SIZE #define XEN_BUS_ID_SIZE 20 #else #define XEN_BUS_ID_SIZE BUS_ID_SIZE #endif #ifdef CONFIG_PARAVIRT_XEN #define is_running_on_xen() xen_domain() #define is_initial_xendomain() xen_initial_domain() #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) #define dev_name(dev) ((dev)->bus_id) #endif #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); extern void xenbus_backend_probe_and_watch(void); extern void xenbus_backend_bus_register(void); extern void xenbus_backend_device_register(void); #else static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_probe_and_watch(void) {} static inline void xenbus_backend_bus_register(void) {} static inline void xenbus_backend_device_register(void) {} #endif struct xen_bus_type { char *root; int error; unsigned int levels; int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); int (*probe)(const char *type, const char *dir); struct bus_type bus; #if defined(CONFIG_XEN) || defined(MODULE) struct device dev; #endif }; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); extern int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus, struct module *owner, const char *mod_name); extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); extern int xenbus_probe_devices(struct xen_bus_type *bus); extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); #endif xenbus_probe_backend.c000066400000000000000000000167341314037446600341700ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.32/****************************************************************************** * Talks to Xen Store to figure out what devices we have (backend half). * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env); static int xenbus_probe_backend(const char *type, const char *domid); extern int read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node); static int read_frontend_details(struct xenbus_device *xendev) { return read_otherend_details(xendev, "frontend-id", "frontend"); } /* backend/// => -- */ static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { int domid, err; const char *devid, *type, *frontend; unsigned int typelen; type = strchr(nodename, '/'); if (!type) return -EINVAL; type++; typelen = strcspn(type, "/"); if (!typelen || type[typelen] != '/') return -EINVAL; devid = strrchr(nodename, '/') + 1; err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid, "frontend", NULL, &frontend, NULL); if (err) return err; if (strlen(frontend) == 0) err = -ERANGE; if (!err && !xenbus_exists(XBT_NIL, frontend, "")) err = -ENOENT; kfree(frontend); if (err) return err; if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s", typelen, type, domid, devid) >= XEN_BUS_ID_SIZE) return -ENOSPC; return 0; } static struct device_attribute xenbus_backend_attrs[] = { __ATTR_NULL }; static struct xen_bus_type xenbus_backend = { .root = "backend", .levels = 3, /* backend/type// */ .get_bus_id = backend_bus_id, .probe = xenbus_probe_backend, .error = -ENODEV, .bus = { .name = "xen-backend", .match = xenbus_match, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, // .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_backend, .dev_attrs = xenbus_backend_attrs, }, .dev = { .init_name = "xen-backend", }, }; static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; struct xenbus_driver *drv; DPRINTK(""); if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype); add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename); add_uevent_var(env, "XENBUS_BASE_PATH=%s", xenbus_backend.root); if (dev->driver) { drv = to_xenbus_driver(dev->driver); if (drv && drv->uevent) return drv->uevent(xdev, env); } return 0; } int __xenbus_register_backend(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { drv->read_otherend_details = read_frontend_details; return xenbus_register_driver_common(drv, &xenbus_backend, owner, mod_name); } EXPORT_SYMBOL_GPL(__xenbus_register_backend); /* backend/// */ static int xenbus_probe_backend_unit(const char *dir, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); if (!nodename) return -ENOMEM; DPRINTK("%s\n", nodename); err = xenbus_probe_node(&xenbus_backend, type, nodename); kfree(nodename); return err; } /* backend// */ static int xenbus_probe_backend(const char *type, const char *domid) { char *nodename; int err = 0; char **dir; unsigned int i, dir_n = 0; DPRINTK(""); nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid); if (!nodename) return -ENOMEM; dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n); if (IS_ERR(dir)) { kfree(nodename); return PTR_ERR(dir); } for (i = 0; i < dir_n; i++) { err = xenbus_probe_backend_unit(nodename, type, dir[i]); if (err) break; } kfree(dir); kfree(nodename); return err; } static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); } static struct xenbus_watch be_watch = { .node = "backend", .callback = backend_changed, }; void xenbus_backend_suspend(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_resume(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_probe_and_watch(void) { xenbus_probe_devices(&xenbus_backend); register_xenbus_watch(&be_watch); } void xenbus_backend_bus_register(void) { xenbus_backend.error = bus_register(&xenbus_backend.bus); if (xenbus_backend.error) printk(KERN_WARNING "XENBUS: Error registering backend bus: %i\n", xenbus_backend.error); } void xenbus_backend_device_register(void) { if (xenbus_backend.error) return; xenbus_backend.error = device_register(&xenbus_backend.dev); if (xenbus_backend.error) { bus_unregister(&xenbus_backend.bus); printk(KERN_WARNING "XENBUS: Error registering backend device: %i\n", xenbus_backend.error); } } int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_backend); xenbus_xs.c000066400000000000000000000536721314037446600320460ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/2.6.32/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. * * transaction_mutex must be held before incrementing * transaction_count. The mutex is held when a suspend is in * progress to prevent new transactions starting. * * When decrementing transaction_count to zero the wait queue * should be woken up, the suspend code waits for count to * reach zero. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct mutex transaction_mutex; atomic_t transaction_count; wait_queue_head_t transaction_wq; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { printk(KERN_WARNING "XENBUS xen store gave: unknown error %s", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } static void transaction_start(void) { mutex_lock(&xs_state.transaction_mutex); atomic_inc(&xs_state.transaction_count); mutex_unlock(&xs_state.transaction_mutex); } static void transaction_end(void) { if (atomic_dec_and_test(&xs_state.transaction_count)) wake_up(&xs_state.transaction_wq); } static void transaction_suspend(void) { mutex_lock(&xs_state.transaction_mutex); wait_event(xs_state.transaction_wq, atomic_read(&xs_state.transaction_count) == 0); } static void transaction_resume(void) { mutex_unlock(&xs_state.transaction_mutex); } void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; struct xsd_sockmsg req_msg = *msg; int err; if (req_msg.type == XS_TRANSACTION_START) transaction_start(); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((req_msg.type == XS_TRANSACTION_END) || ((req_msg.type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) transaction_end(); return ret; } #if !defined(CONFIG_XEN) && !defined(MODULE) EXPORT_SYMBOL(xenbus_dev_request_and_reply); #endif /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { if (printk_ratelimit()) printk(KERN_WARNING "XENBUS unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len) + 1; /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; ret[*num] = strings + len; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; transaction_start(); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { transaction_end(); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); transaction_end(); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; #define PRINTF_BUFFER_SIZE 4096 char *printf_buffer; printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH); if (printf_buffer == NULL) return -ENOMEM; va_start(ap, fmt); ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap); va_end(ap); BUG_ON(ret > PRINTF_BUFFER_SIZE-1); ret = xenbus_write(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); /* Ignore errors due to multiple registration. */ if ((err != 0) && (err != -EEXIST)) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; #if defined(CONFIG_XEN) || defined(MODULE) BUG_ON(watch->flags & XBWF_new_thread); #endif sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) printk(KERN_WARNING "XENBUS Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Make sure there are no callbacks running currently (unless its us) */ if (current->pid != xenwatch_pid) mutex_lock(&xenwatch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); if (current->pid != xenwatch_pid) mutex_unlock(&xenwatch_mutex); } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); void xs_suspend(void) { transaction_suspend(); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; #if !defined(CONFIG_XEN) && !defined(MODULE) xb_init_comms(); #endif mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); transaction_resume(); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); mutex_unlock(&xs_state.transaction_mutex); } #if defined(CONFIG_XEN) || defined(MODULE) static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } #endif static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); #if defined(CONFIG_XEN) || defined(MODULE) /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } #else msg->u.watch.handle->callback( msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); mutex_unlock(&xenwatch_mutex); kfree(msg->u.watch.vec); kfree(msg); #endif } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) printk(KERN_WARNING "XENBUS error %d while reading " "message\n", err); if (kthread_should_stop()) break; } return 0; } int xs_init(void) { struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); mutex_init(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); atomic_set(&xs_state.transaction_count, 0); init_waitqueue_head(&xs_state.transaction_wq); task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101/000077500000000000000000000000001314037446600277765ustar00rootroot00000000000000features.c000066400000000000000000000016041314037446600317020ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101/****************************************************************************** * features.c * * Xen feature flags. * * Copyright (c) 2006, Ian Campbell, XenSource Inc. */ #include #include #include #ifdef CONFIG_PARAVIRT_XEN #include #else #include #endif #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #include #include #include u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; EXPORT_SYMBOL(xen_features); void xen_setup_features(void) { struct xen_feature_info fi; int i, j; for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) { fi.submap_idx = i; if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j = 0; j < 32; j++) xen_features[i * 32 + j] = !!(fi.submap & 1< #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff #define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_v1_t)) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; static unsigned int boot_max_nr_grant_frames; static int gnttab_free_count; static grant_ref_t gnttab_free_head; static DEFINE_SPINLOCK(gnttab_list_lock); static struct grant_entry_v1 *shared; static struct gnttab_free_callback *gnttab_free_callback_list; static int gnttab_expand(unsigned int req_entries); #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP]) #define nr_freelist_frames(grant_frames) \ (((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP) static int get_free_entries(int count) { unsigned long flags; int ref, rc; grant_ref_t head; spin_lock_irqsave(&gnttab_list_lock, flags); if ((gnttab_free_count < count) && ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { spin_unlock_irqrestore(&gnttab_list_lock, flags); return rc; } ref = head = gnttab_free_head; gnttab_free_count -= count; while (count-- > 1) head = gnttab_entry(head); gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } #define get_free_entry() get_free_entries(1) static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; callback = gnttab_free_callback_list; gnttab_free_callback_list = NULL; while (callback != NULL) { next = callback->next; if (gnttab_free_count >= callback->count) { callback->next = NULL; callback->queued = 0; callback->fn(callback->arg); } else { callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; } callback = next; } } static inline void check_free_callbacks(void) { if (unlikely(gnttab_free_callback_list)) do_free_callbacks(); } static void put_free_entry(grant_ref_t ref) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; gnttab_free_count++; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } /* * Public grant-issuing interface functions */ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags) { shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); int gnttab_query_foreign_access(grant_ref_t ref) { u16 nflags; nflags = shared[ref].flags; return (nflags & (GTF_reading|GTF_writing)); } EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref) { u16 flags, nflags; nflags = shared[ref].flags; do { if ((flags = nflags) & (GTF_reading|GTF_writing)) return 0; } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags); return 1; } int gnttab_end_foreign_access_ref(grant_ref_t ref) { if (_gnttab_end_foreign_access_ref(ref)) return 1; printk(KERN_DEBUG "WARNING: g.e. %#x still in use!\n", ref); return 0; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); struct deferred_entry { struct list_head list; grant_ref_t ref; uint16_t warn_delay; struct page *page; }; static LIST_HEAD(deferred_list); static void gnttab_handle_deferred(unsigned long); static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0); static void gnttab_handle_deferred(unsigned long unused) { unsigned int nr = 10; struct deferred_entry *first = NULL; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); while (nr--) { struct deferred_entry *entry = list_first_entry(&deferred_list, struct deferred_entry, list); if (entry == first) break; list_del(&entry->list); spin_unlock_irqrestore(&gnttab_list_lock, flags); if (_gnttab_end_foreign_access_ref(entry->ref)) { put_free_entry(entry->ref); if (entry->page) { printk(KERN_DEBUG "freeing g.e. %#x (pfn %#lx)\n", entry->ref, page_to_pfn(entry->page)); __free_page(entry->page); } else printk(KERN_DEBUG "freeing g.e. %#x\n", entry->ref); kfree(entry); entry = NULL; } else { if (!--entry->warn_delay) pr_info("g.e. %#x still pending\n", entry->ref); if (!first) first = entry; } spin_lock_irqsave(&gnttab_list_lock, flags); if (entry) list_add_tail(&entry->list, &deferred_list); else if (list_empty(&deferred_list)) break; } if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) { deferred_timer.expires = jiffies + HZ; add_timer(&deferred_timer); } spin_unlock_irqrestore(&gnttab_list_lock, flags); } static void gnttab_add_deferred(grant_ref_t ref, struct page *page) { struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); const char *what = KERN_WARNING "leaking"; if (entry) { unsigned long flags; entry->ref = ref; entry->page = page; entry->warn_delay = 60; spin_lock_irqsave(&gnttab_list_lock, flags); list_add_tail(&entry->list, &deferred_list); if (!timer_pending(&deferred_timer)) { deferred_timer.expires = jiffies + HZ; add_timer(&deferred_timer); } spin_unlock_irqrestore(&gnttab_list_lock, flags); what = KERN_DEBUG "deferring"; } printk("%s g.e. %#x (pfn %lx)\n", what, ref, page ? page_to_pfn(page) : -1); } void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page) { if (gnttab_end_foreign_access_ref(ref)) { put_free_entry(ref); if (page != 0) free_page(page); } else gnttab_add_deferred(ref, page ? virt_to_page(page) : NULL); } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); void gnttab_multi_end_foreign_access(unsigned int nr, grant_ref_t refs[], struct page *pages[]) { for (; nr--; ++refs) { if (*refs != GRANT_INVALID_REF) { if (gnttab_end_foreign_access_ref(*refs)) put_free_entry(*refs); else if (pages) { gnttab_add_deferred(*refs, *pages); *pages = NULL; } else gnttab_add_deferred(*refs, NULL); *refs = GRANT_INVALID_REF; } if (pages) { if (*pages) { __free_page(*pages); *pages = NULL; } ++pages; } } } EXPORT_SYMBOL_GPL(gnttab_multi_end_foreign_access); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; gnttab_grant_foreign_transfer_ref(ref, domid, pfn); return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, unsigned long pfn) { shared[ref].frame = pfn; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_accept_transfer; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) { unsigned long frame; u16 flags; /* * If a transfer is not even yet started, try to reclaim the grant * reference and return failure (== 0). */ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags) return 0; cpu_relax(); } /* If a transfer is in progress then wait until it is completed. */ while (!(flags & GTF_transfer_completed)) { flags = shared[ref].flags; cpu_relax(); } /* Read the frame number /after/ reading completion status. */ rmb(); frame = shared[ref].frame; BUG_ON(frame == 0); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) { unsigned long frame = gnttab_end_foreign_transfer_ref(ref); put_free_entry(ref); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); void gnttab_free_grant_reference(grant_ref_t ref) { put_free_entry(ref); } EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; unsigned long flags; int count = 1; if (head == GNTTAB_LIST_END) return; spin_lock_irqsave(&gnttab_list_lock, flags); ref = head; while (gnttab_entry(ref) != GNTTAB_LIST_END) { ref = gnttab_entry(ref); count++; } gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = head; gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_references); int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) { int h = get_free_entries(count); if (h < 0) return -ENOSPC; *head = h; return 0; } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); } EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); int gnttab_claim_grant_reference(grant_ref_t *private_head) { grant_ref_t g = *private_head; if (unlikely(g == GNTTAB_LIST_END)) return -ENOSPC; *private_head = gnttab_entry(g); return g; } EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release) { gnttab_entry(release) = *private_head; *private_head = release; } EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); if (callback->queued) goto out; callback->fn = fn; callback->arg = arg; callback->count = count; callback->queued = 1; callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; check_free_callbacks(); out: spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_request_free_callback); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) { struct gnttab_free_callback **pcb; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { if (*pcb == callback) { *pcb = callback->next; callback->queued = 0; break; } } spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); static int grow_gnttab_list(unsigned int more_frames) { unsigned int new_nr_grant_frames, extra_entries, i; unsigned int nr_glist_frames, new_nr_glist_frames; new_nr_grant_frames = nr_grant_frames + more_frames; extra_entries = more_frames * ENTRIES_PER_GRANT_FRAME; nr_glist_frames = nr_freelist_frames(nr_grant_frames); new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames); for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); if (!gnttab_list[i]) goto grow_nomem; } for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; check_free_callbacks(); return 0; grow_nomem: for ( ; i >= nr_glist_frames; i--) free_page((unsigned long) gnttab_list[i]); return -ENOMEM; } static unsigned int __max_nr_grant_frames(void) { struct gnttab_query_size query; int rc; query.dom = DOMID_SELF; rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); if ((rc < 0) || (query.status != GNTST_okay)) return 4; /* Legacy max supported number of frames */ return query.max_nr_frames; } static inline unsigned int max_nr_grant_frames(void) { unsigned int xen_max = __max_nr_grant_frames(); if (xen_max > boot_max_nr_grant_frames) return boot_max_nr_grant_frames; return xen_max; } #ifdef CONFIG_XEN #ifdef CONFIG_X86 static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } #ifdef CONFIG_PM_SLEEP static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } #endif void *arch_gnttab_alloc_shared(xen_pfn_t *frames) { struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames()); BUG_ON(area == NULL); return area->addr; } #endif /* CONFIG_X86 */ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; xen_pfn_t *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(*frames), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status != GNTST_okay); if (shared == NULL) shared = arch_gnttab_alloc_shared(frames); #ifdef CONFIG_X86 rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); BUG_ON(rc); frames -= nr_gframes; /* adjust after map_pte_fn() */ #endif /* CONFIG_X86 */ kfree(frames); return 0; } #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) static DEFINE_SEQLOCK(gnttab_dma_lock); static void gnttab_page_free(struct page *page, unsigned int order) { BUG_ON(order); ClearPageForeign(page); gnttab_reset_grant_page(page); ClearPageReserved(page); put_page(page); } /* * Must not be called with IRQs off. This should only be used on the * slow path. * * Copy a foreign granted page to local memory. */ int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep) { struct gnttab_unmap_and_replace unmap; struct page *page, *new_page; void *addr, *new_addr; unsigned long pfn; maddr_t new_mfn; int err; page = *pagep; if (!get_page_unless_zero(page)) return -ENOENT; err = -ENOMEM; new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!new_page) goto out; new_addr = page_address(new_page); addr = page_address(page); copy_page(new_addr, addr); pfn = page_to_pfn(page); new_mfn = virt_to_mfn(new_addr); write_seqlock_bh(&gnttab_dma_lock); /* Make seq visible before checking page_mapped. */ smp_mb(); /* Has the page been DMA-mapped? */ if (unlikely(page_mapped(page))) { write_sequnlock_bh(&gnttab_dma_lock); put_page(new_page); err = -EBUSY; goto out; } gnttab_set_replace_op(&unmap, (unsigned long)addr, (unsigned long)new_addr, ref); if (!xen_feature(XENFEAT_auto_translated_physmap)) { multicall_entry_t mc[2]; mmu_update_t mmu; set_phys_to_machine(pfn, new_mfn); set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY); MULTI_grant_table_op(&mc[0], GNTTABOP_unmap_and_replace, &unmap, 1); mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu.val = pfn; MULTI_mmu_update(&mc[1], &mmu, 1, NULL, DOMID_SELF); err = HYPERVISOR_multicall_check(mc, 2, NULL); } else err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace, &unmap, 1); BUG_ON(err); BUG_ON(unmap.status != GNTST_okay); write_sequnlock_bh(&gnttab_dma_lock); new_page->mapping = page->mapping; new_page->index = page->index; set_bit(PG_foreign, &new_page->flags); if (PageReserved(page)) SetPageReserved(new_page); *pagep = new_page; SetPageForeign(page, gnttab_page_free); page->mapping = NULL; out: put_page(page); return err; } EXPORT_SYMBOL_GPL(gnttab_copy_grant_page); void gnttab_reset_grant_page(struct page *page) { init_page_count(page); reset_page_mapcount(page); } EXPORT_SYMBOL_GPL(gnttab_reset_grant_page); /* * Keep track of foreign pages marked as PageForeign so that we don't * return them to the remote domain prematurely. * * PageForeign pages are pinned down by increasing their mapcount. * * All other pages are simply returned as is. */ void __gnttab_dma_map_page(struct page *page) { unsigned int seq; if (!is_running_on_xen() || !PageForeign(page)) return; BUG_ON(PageCompound(page)); do { seq = read_seqbegin(&gnttab_dma_lock); if (gnttab_dma_local_pfn(page)) break; atomic_set(&page->_mapcount, 0); /* Make _mapcount visible before read_seqretry. */ smp_mb(); } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); } #endif /* CONFIG_XEN_BACKEND */ #ifdef __HAVE_ARCH_PTE_SPECIAL static unsigned int GNTMAP_pte_special; bool gnttab_pre_map_adjust(unsigned int cmd, struct gnttab_map_grant_ref *map, unsigned int count) { unsigned int i; if (unlikely(cmd != GNTTABOP_map_grant_ref)) count = 0; for (i = 0; i < count; ++i, ++map) { if (!(map->flags & GNTMAP_host_map) || !(map->flags & GNTMAP_application_map)) continue; if (GNTMAP_pte_special) map->flags |= GNTMAP_pte_special; else { BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); return true; } } return false; } EXPORT_SYMBOL(gnttab_pre_map_adjust); #if CONFIG_XEN_COMPAT < 0x030400 int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *map, unsigned int count) { unsigned int i; int rc = 0; for (i = 0; i < count && rc == 0; ++i, ++map) { pte_t pte; if (!(map->flags & GNTMAP_host_map) || !(map->flags & GNTMAP_application_map)) continue; #ifdef CONFIG_X86 pte = __pte_ma((map->dev_bus_addr | _PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NX | _PAGE_SPECIAL) & __supported_pte_mask); #else #error Architecture not yet supported. #endif if (!(map->flags & GNTMAP_readonly)) pte = pte_mkwrite(pte); if (map->flags & GNTMAP_contains_pte) { mmu_update_t u; u.ptr = map->host_addr; u.val = __pte_val(pte); rc = HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF); } else rc = HYPERVISOR_update_va_mapping(map->host_addr, pte, 0); } return rc; } EXPORT_SYMBOL(gnttab_post_map_adjust); #endif #endif /* __HAVE_ARCH_PTE_SPECIAL */ int gnttab_resume(void) { if (max_nr_grant_frames() < nr_grant_frames) return 0; return gnttab_map(0, nr_grant_frames - 1); } #ifdef CONFIG_PM_SLEEP #include #ifdef CONFIG_X86 static int gnttab_suspend(void) { apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); return 0; } #else #define gnttab_suspend NULL #endif static void _gnttab_resume(void) { if (gnttab_resume()) BUG(); } static struct syscore_ops gnttab_syscore_ops = { .resume = _gnttab_resume, .suspend = gnttab_suspend, }; #endif #else /* !CONFIG_XEN */ #include static unsigned long resume_frames; static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; /* Loop backwards, so that the first hypercall has the largest index, * ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); } while (i-- > start_idx); return 0; } int gnttab_resume(void) { unsigned int max_nr_gframes, nr_gframes; nr_gframes = nr_grant_frames; max_nr_gframes = max_nr_grant_frames(); if (max_nr_gframes < nr_gframes) return -ENOSYS; if (!resume_frames) { resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); if (shared == NULL) { pr_warning("error to ioremap gnttab share frames\n"); return -1; } } gnttab_map(0, nr_gframes - 1); return 0; } #endif /* !CONFIG_XEN */ static int gnttab_expand(unsigned int req_entries) { int rc; unsigned int cur, extra; cur = nr_grant_frames; extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) / ENTRIES_PER_GRANT_FRAME); if (cur + extra > max_nr_grant_frames()) return -ENOSPC; if ((rc = gnttab_map(cur, cur + extra - 1)) == 0) rc = grow_gnttab_list(extra); return rc; } #ifdef CONFIG_XEN static int __init #else int __devinit #endif gnttab_init(void) { int i, ret; unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int nr_init_grefs; if (!is_running_on_xen()) return -ENODEV; nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; nr_glist_frames = nr_freelist_frames(nr_grant_frames); for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) { ret = -ENOMEM; goto ini_nomem; } } if (gnttab_resume() < 0) { ret = -ENODEV; goto ini_nomem; } nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; #if defined(CONFIG_XEN) && defined(__HAVE_ARCH_PTE_SPECIAL) if (!xen_feature(XENFEAT_auto_translated_physmap) && xen_feature(XENFEAT_gnttab_map_avail_bits)) { #ifdef CONFIG_X86 GNTMAP_pte_special = (__pte_val(pte_mkspecial(__pte_ma(0))) >> _PAGE_BIT_SPECIAL) << _GNTMAP_guest_avail0; #else #error Architecture not yet supported. #endif } #endif #if defined(CONFIG_XEN) && defined(CONFIG_PM_SLEEP) if (!is_initial_xendomain()) register_syscore_ops(&gnttab_syscore_ops); #endif return 0; ini_nomem: for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); return ret; } #ifdef CONFIG_XEN core_initcall(gnttab_init); #endif platform-pci.c000066400000000000000000000320551314037446600324650ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __ia64__ #include #endif #include "platform-pci.h" #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DRV_NAME "xen-platform-pci" #define DRV_VERSION "0.10" #define DRV_RELDATE "03/03/2005" static int max_hypercall_stub_pages, nr_hypercall_stub_pages; char *hypercall_stubs; EXPORT_SYMBOL(hypercall_stubs); MODULE_AUTHOR("ssmith@xensource.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); /* NB. [aux-]ide-disks options do not unplug IDE CD-ROM drives. */ /* NB. aux-ide-disks is equiv to ide-disks except ignores primary master. */ static char *dev_unplug; module_param(dev_unplug, charp, 0644); MODULE_PARM_DESC(dev_unplug, "Emulated devices to unplug: " "[all,][ide-disks,][aux-ide-disks,][nics]\n"); struct pci_dev *xen_platform_pdev; static unsigned long shared_info_frame; static uint64_t callback_via; /*driver should write xenstore flag to tell xen which feature supported, add by w00205029 2012-01-17*/ void write_feature_flag() { int rc; char str[32] = {0}; (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); rc = xenbus_write(XBT_NIL, "control/uvp", "feature_flag", str); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); } } /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { int rc; rc = xenbus_write(XBT_NIL, "control/uvp", "monitor-service-flag", "true"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/monitor-service-flag failed \n"); } } /*driver write this flag when the xenpci resume succeed.*/ void write_driver_resume_flag() { int rc = 0; rc = xenbus_write(XBT_NIL, "control/uvp", "driver-resume-flag", "1"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/driver-resume-flag failed \n"); } } static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; extern void *shared_info_area; #ifdef __ia64__ xencomm_initialize(); #endif setup_xen_features(); shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); shared_info_area = ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); return 0; } static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } #ifndef __ia64__ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) static uint32_t xen_cpuid_base(void) { uint32_t base, eax, ebx, ecx, edx; char signature[13]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { cpuid(base, &eax, &ebx, &ecx, &edx); *(uint32_t*)(signature + 0) = ebx; *(uint32_t*)(signature + 4) = ecx; *(uint32_t*)(signature + 8) = edx; signature[12] = 0; if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) return base; } return 0; } #endif static int init_hypercall_stubs(void) { uint32_t eax, ebx, ecx, edx, pages, msr, i, base; base = xen_cpuid_base(); if (base == 0) { printk(KERN_WARNING "Detected Xen platform device but not Xen VMM?\n"); return -EINVAL; } cpuid(base + 1, &eax, &ebx, &ecx, &edx); printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff); /* * Find largest supported number of hypercall pages. * We'll create as many as possible up to this number. */ cpuid(base + 2, &pages, &msr, &ecx, &edx); /* * Use __vmalloc() because vmalloc_exec() is not an exported symbol. * PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL. * hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE); */ while (pages > 0) { hypercall_stubs = __vmalloc( pages * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM, __pgprot(__PAGE_KERNEL & ~_PAGE_NX)); if (hypercall_stubs != NULL) break; pages--; /* vmalloc failed: try one fewer pages */ } if (hypercall_stubs == NULL) return -ENOMEM; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; max_hypercall_stub_pages = pages; printk(KERN_INFO "Hypercall area is %u pages.\n", pages); return 0; } static void resume_hypercall_stubs(void) { uint32_t base, ecx, edx, pages, msr, i; base = xen_cpuid_base(); BUG_ON(base == 0); cpuid(base + 2, &pages, &msr, &ecx, &edx); if (pages > max_hypercall_stub_pages) pages = max_hypercall_stub_pages; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; } #else /* __ia64__ */ #define init_hypercall_stubs() (0) #define resume_hypercall_stubs() ((void)0) #endif static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; #ifdef __ia64__ for (irq = 0; irq < 16; irq++) { if (isa_irq_to_vector(irq) == pdev->irq) return irq; /* ISA IRQ */ } #else /* !__ia64__ */ irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) pin = pdev->pin; #else pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); #endif /* We don't know the GSI. Specify the PCI INTx line instead. */ return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3)); } static int set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } int xen_irq_init(struct pci_dev *pdev); int xenbus_init(void); int xen_reboot_init(void); int xen_panic_handler_init(void); int gnttab_init(void); #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0xffff /* NB: register a proper one */ #define XEN_IOPORT_LINUX_DRVVER ((LINUX_VERSION_CODE << 8) + 0x0) #define UNPLUG_ALL_IDE_DISKS 1 #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 #define UNPLUG_ALL 7 static int check_platform_magic(struct device *dev, long ioaddr, long iolen) { short magic, unplug = 0; char protocol, *p, *q, *err; /* Unconditionally unplug everything */ if (!dev_unplug) unplug = UNPLUG_ALL; for (p = dev_unplug; p; p = q) { q = strchr(dev_unplug, ','); if (q) *q++ = '\0'; if (!strcmp(p, "all")) unplug |= UNPLUG_ALL; else if (!strcmp(p, "ide-disks")) unplug |= UNPLUG_ALL_IDE_DISKS; else if (!strcmp(p, "aux-ide-disks")) unplug |= UNPLUG_AUX_IDE_DISKS; else if (!strcmp(p, "nics")) unplug |= UNPLUG_ALL_NICS; else if (!strcmp(p, "never")) unplug = 0; else dev_warn(dev, "unrecognised option '%s' " "in module parameter 'dev_unplug'\n", p); } if (iolen < 0x16) { err = "backend too old"; goto no_dev; } magic = inw(XEN_IOPORT_MAGIC); if (magic != XEN_IOPORT_MAGIC_VAL) { err = "unrecognised magic value"; goto no_dev; } protocol = inb(XEN_IOPORT_PROTOVER); dev_info(dev, "I/O protocol version %d\n", protocol); switch (protocol) { case 1: outw(XEN_IOPORT_LINUX_PRODNUM, XEN_IOPORT_PRODNUM); outl(XEN_IOPORT_LINUX_DRVVER, XEN_IOPORT_DRVVER); if (inw(XEN_IOPORT_MAGIC) != XEN_IOPORT_MAGIC_VAL) { dev_err(dev, "blacklisted by host\n"); return -ENODEV; } /* Fall through */ case 0: outw(unplug, XEN_IOPORT_UNPLUG); break; default: err = "unknown I/O protocol version"; goto no_dev; } return 0; no_dev: dev_warn(dev, "failed backend handshake: %s\n", err); if (!unplug) return 0; dev_err(dev, "failed to execute specified dev_unplug options!\n"); return -ENODEV; } #ifdef HAVE_OLDMEM_PFN_IS_RAM static int xen_oldmem_pfn_is_ram(unsigned long pfn) { struct xen_hvm_get_mem_type a; int ret; a.domid = DOMID_SELF; a.pfn = pfn; if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a)) return -ENXIO; switch (a.mem_type) { case HVMMEM_mmio_dm: ret = 0; break; case HVMMEM_ram_rw: case HVMMEM_ram_ro: default: ret = 1; break; } return ret; } #endif void xen_unplug_emulated_devices(void); static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr, iolen; long mmio_addr, mmio_len; xen_unplug_emulated_devices(); if (xen_platform_pdev) return -EBUSY; xen_platform_pdev = pdev; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); callback_via = get_callback_via(pdev); if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { printk(KERN_WARNING DRV_NAME ":no resources found\n"); return -ENOENT; } ret = pci_request_region(pdev, 1, DRV_NAME); if (ret < 0) return ret; ret = pci_request_region(pdev, 0, DRV_NAME); if (ret < 0) goto mem_out; platform_mmio = mmio_addr; platform_mmiolen = mmio_len; ret = init_hypercall_stubs(); if (ret < 0) goto out; ret = check_platform_magic(&pdev->dev, ioaddr, iolen); if (ret < 0) goto out; if ((ret = init_xen_info())) goto out; if ((ret = gnttab_init())) goto out; if ((ret = xen_irq_init(pdev))) goto out; if ((ret = set_callback_via(callback_via))) goto out; if ((ret = xenbus_init())) goto out; if ((ret = xen_reboot_init())) goto out; if ((ret = xen_panic_handler_init())) goto out; write_feature_flag(); #ifdef HAVE_OLDMEM_PFN_IS_RAM register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram); #endif out: if (ret) { pci_release_region(pdev, 0); mem_out: pci_release_region(pdev, 1); } return ret; } #define XEN_PLATFORM_VENDOR_ID 0x5853 #define XEN_PLATFORM_DEVICE_ID 0x0001 static struct pci_device_id platform_pci_tbl[] __devinitdata = { {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* Continue to recognise the old ID for now */ {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { name: DRV_NAME, probe: platform_pci_init, id_table: platform_pci_tbl, }; static int pci_device_registered; void platform_pci_resume(void) { struct xen_add_to_physmap xatp; resume_hypercall_stubs(); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); if (set_callback_via(callback_via)) printk("platform_pci_resume failure!\n"); } static int __init platform_pci_module_init(void) { int rc; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) rc = pci_module_init(&platform_driver); #else rc = pci_register_driver(&platform_driver); #endif if (rc) { printk(KERN_INFO DRV_NAME ": No platform pci device model found\n"); return rc; } pci_device_registered = 1; return 0; } module_init(platform_pci_module_init); MODULE_VERSION("2.2.0.202"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101/reboot.c000066400000000000000000000175431314037446600314460ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #undef handle_sysrq #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } static int xen_reboot_vm(void) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *reboot_argv[] = { "/sbin/reboot", NULL }; if (call_usermodehelper("/sbin/reboot", reboot_argv, envp, 0) < 0) printk(KERN_ERR "Xen reboot vm Failed.\n"); return 0; } #ifdef CONFIG_PM_SLEEP static int setup_suspend_evtchn(void); /* Was last suspend request cancelled? */ static int suspend_cancelled; static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed_ptr(current, cpumask_of(0)); if (err) { pr_err("Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { pr_err("Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_delayed_work(&shutdown_work, 0); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } #else # define xen_suspend NULL #endif static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_delayed_work(&shutdown_work, 0); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(struct work_struct *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { pr_warning("Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) //ctrl_alt_del(); xen_reboot_vm(); #ifdef CONFIG_PM_SLEEP else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; #endif else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else pr_warning("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key) <= 0) { pr_err("Unable to read sysrq code in control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #ifdef CONFIG_PM_SLEEP static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); pr_info("suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } #else #define setup_suspend_evtchn() 0 #endif static int setup_shutdown_watcher(void) { int err; err = register_xenbus_watch(&sysrq_watch); if (err) { pr_err("Failed to set sysrq watcher\n"); return err; } if (is_initial_xendomain()) return 0; xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { pr_err("Failed to set shutdown watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { pr_err("Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ xen_proc.c000066400000000000000000000011511314037446600316760ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101#include #include #include #include static struct proc_dir_entry *xen_base; struct proc_dir_entry * #ifndef MODULE __init #endif create_xen_proc_entry(const char *name, mode_t mode) { if ( xen_base == NULL ) if ( (xen_base = proc_mkdir("xen", NULL)) == NULL ) panic("Couldn't create /proc/xen"); return create_proc_entry(name, mode, xen_base); } #ifdef MODULE EXPORT_SYMBOL_GPL(create_xen_proc_entry); #elif defined(CONFIG_XEN_PRIVILEGED_GUEST) void remove_xen_proc_entry(const char *name) { remove_proc_entry(name, xen_base); } #endif xenbus_backend_client.c000066400000000000000000000102331314037446600343730ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101/****************************************************************************** * Backend-client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code in the backend * driver. * * Copyright (C) 2005-2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include static int unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area, unsigned int nr, grant_handle_t handles[]) { unsigned int i; int err = 0; for (i = 0; i < nr; ++i) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr + i * PAGE_SIZE, GNTMAP_host_map, handles[i]); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) continue; xenbus_dev_error(dev, op.status, "unmapping page %u (handle %#x)", i, handles[i]); err = -EINVAL; } if (!err) { free_vm_area(area); kfree(handles); } return err; } /* Based on Rusty Russell's skeleton driver's map_page */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, const grant_ref_t refs[], unsigned int nr) { grant_handle_t *handles = kmalloc(nr * sizeof(*handles), GFP_KERNEL); struct vm_struct *area; unsigned int i; if (!handles) return ERR_PTR(-ENOMEM); area = alloc_vm_area(nr * PAGE_SIZE); if (!area) { kfree(handles); return ERR_PTR(-ENOMEM); } for (i = 0; i < nr; ++i) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)area->addr + i * PAGE_SIZE, GNTMAP_host_map, refs[i], dev->otherend_id); gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); if (op.status == GNTST_okay) { handles[i] = op.handle; continue; } unmap_ring_vfree(dev, area, i, handles); xenbus_dev_fatal(dev, op.status, "mapping page %u (ref %#x, dom%d)", i, refs[i], dev->otherend_id); BUG_ON(!IS_ERR(ERR_PTR(op.status))); return ERR_PTR(-EINVAL); } /* Stuff the handle array in an unused field. */ area->phys_addr = (unsigned long)handles; return area; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); /* Based on Rusty Russell's skeleton driver's unmap_page */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area) { return unmap_ring_vfree(dev, area, get_vm_area_size(area) >> PAGE_SHIFT, (void *)(unsigned long)area->phys_addr); } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); int xenbus_dev_is_online(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); MODULE_LICENSE("Dual BSD/GPL"); xenbus_client.c000066400000000000000000000306011314037446600327250ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #else #include #include #include #include #include #include #include #endif #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", [ XenbusStateReconfiguring ] = "Reconfiguring", [ XenbusStateReconfigured ] = "Reconfigured", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate); /** * xenbus_watch_path - register a watch * @dev: xenbus device * @path: path to watch * @watch: watch to register * @callback: callback to register * * Register a @watch on the given path, using the given xenbus_watch structure * for storage, and the given @callback function as the callback. Return 0 on * success, or -errno on error. On success, the given @path will be saved as * @watch->node, and remains the caller's to free. On error, @watch->node will * be NULL, the device will switch to %XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path); #if defined(CONFIG_XEN) || defined(MODULE) int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2); if (!state) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, state, watch, callback); if (err) kfree(state); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path2); #else /** * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path * @dev: xenbus device * @watch: watch to register * @callback: callback to register * @pathfmt: format of path to watch * * Register a watch on the given @path, using the given xenbus_watch * structure for storage, and the given @callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (@path/@path2) will be saved as @watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to %XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...) { int err; va_list ap; char *path; va_start(ap, pathfmt); path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); va_end(ap); if (!path) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, path, watch, callback); if (err) kfree(path); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); #endif static void xenbus_switch_fatal(struct xenbus_device *, int, int, const char *, ...); static int __xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state, int depth) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not take a caller's Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ struct xenbus_transaction xbt; int current_state; int err, abort; if (state == dev->state) return 0; again: abort = 1; err = xenbus_transaction_start(&xbt); if (err) { xenbus_switch_fatal(dev, depth, err, "starting transaction"); return 0; } err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); if (err != 1) goto abort; err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); if (err) { xenbus_switch_fatal(dev, depth, err, "writing new state"); goto abort; } abort = 0; abort: err = xenbus_transaction_end(xbt, abort); if (err) { if (err == -EAGAIN && !abort) goto again; xenbus_switch_fatal(dev, depth, err, "ending transaction"); } else dev->state = state; return 0; } /** * xenbus_switch_state * @dev: xenbus device * @state: new state * * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) { return __xenbus_switch_state(dev, state, 0); } EXPORT_SYMBOL_GPL(xenbus_switch_state); int xenbus_frontend_closed(struct xenbus_device *dev) { xenbus_switch_state(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed); /** * Return the path to the error node for the given device, or NULL on failure. * If the value returned is non-NULL, then it is the caller's to kfree. */ static char *error_path(struct xenbus_device *dev) { return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); } static void _dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list *ap) { char *printf_buffer, *path_buffer; struct va_format vaf = { .fmt = fmt, .va = ap }; printf_buffer = kasprintf(GFP_KERNEL, "%i %pV", -err, &vaf); if (printf_buffer) dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = error_path(dev); if (!printf_buffer || !path_buffer || xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer)) dev_err(&dev->dev, "xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); kfree(printf_buffer); kfree(path_buffer); } /** * xenbus_dev_error * @dev: xenbus device * @err: error to report * @fmt: error message format * * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, &ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error); /** * xenbus_dev_fatal * @dev: xenbus device * @err: error to report * @fmt: error message format * * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, &ap); va_end(ap); xenbus_switch_state(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal); /** * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps * avoiding recursion within xenbus_switch_state. */ static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, &ap); va_end(ap); if (!depth) __xenbus_switch_state(dev, XenbusStateClosing, 1); } /** * xenbus_grant_ring * @dev: xenbus device * @ring_mfn: mfn of ring to grant * * Grant access to the given @ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); if (err < 0) xenbus_dev_fatal(dev, err, "granting access to ring page"); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); int xenbus_multi_grant_ring(struct xenbus_device *dev, unsigned int nr, struct page *pages[], grant_ref_t refs[]) { unsigned int i; int err = -EINVAL; for (i = 0; i < nr; ++i) { unsigned long pfn = page_to_pfn(pages[i]); err = gnttab_grant_foreign_access(dev->otherend_id, pfn_to_mfn(pfn), 0); if (err < 0) { xenbus_dev_fatal(dev, err, "granting access to ring page #%u", i); break; } refs[i] = err; } return err; } EXPORT_SYMBOL_GPL(xenbus_multi_grant_ring); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error(dev, err, "freeing event channel %d", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn); /** * xenbus_read_driver_state * @path: path for driver * * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path) { int result; if (xenbus_scanf(XBT_NIL, path, "state", "%d", &result) != 1) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state); xenbus_comms.c000066400000000000000000000157311314037446600325740ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #else #include #include #include #endif #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_irq; static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); static irqreturn_t wake_waiting(int irq, void *unused) { #ifdef CONFIG_XEN_PRIVILEGED_GUEST static DECLARE_WORK(probe_work, xenbus_probe); int old, new; old = atomic_read(&xenbus_xsd_state); switch (old) { case XENBUS_XSD_UNCOMMITTED: BUG(); return IRQ_HANDLED; case XENBUS_XSD_FOREIGN_INIT: new = XENBUS_XSD_FOREIGN_READY; break; case XENBUS_XSD_LOCAL_INIT: new = XENBUS_XSD_LOCAL_READY; break; case XENBUS_XSD_FOREIGN_READY: case XENBUS_XSD_LOCAL_READY: default: goto wake; } old = atomic_cmpxchg(&xenbus_xsd_state, old, new); if (old != new) schedule_work(&probe_work); wake: #endif wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } /** * xb_write - low level write * @data: buffer to send * @len: length of buffer * * Returns 0 on success, error otherwise. */ int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { return wait_event_interruptible(xb_waitq, xb_data_to_read()); } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /** * xb_init_comms - Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; int err; if (intf->req_prod != intf->req_cons) pr_err("XENBUS request ring is not quiescent " "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { pr_warning("XENBUS response ring is not quiescent" " (%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); /* breaks kdump */ if (!reset_devices) intf->rsp_cons = intf->rsp_prod; } #if defined(CONFIG_XEN) || defined(MODULE) if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); err = bind_caller_port_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { pr_err("XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; #else if (xenbus_irq) { /* Already have an irq; assume we're resuming */ rebind_evtchn_irq(xen_store_evtchn, xenbus_irq); } else { err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { pr_err("XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; } #endif return 0; } xenbus_comms.h000066400000000000000000000046661314037446600326060ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101/* * Private include for xenbus communications. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_COMMS_H #define _XENBUS_COMMS_H int xs_init(void); int xb_init_comms(void); /* Low level routines. */ int xb_write(const void *data, unsigned len); int xb_read(void *data, unsigned len); int xb_data_to_read(void); int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; /* For xenbus internal use. */ enum { XENBUS_XSD_UNCOMMITTED = 0, XENBUS_XSD_FOREIGN_INIT, XENBUS_XSD_FOREIGN_READY, XENBUS_XSD_LOCAL_INIT, XENBUS_XSD_LOCAL_READY, }; extern atomic_t xenbus_xsd_state; static inline int is_xenstored_ready(void) { int s = atomic_read(&xenbus_xsd_state); return s == XENBUS_XSD_FOREIGN_READY || s == XENBUS_XSD_LOCAL_READY; } #if defined(CONFIG_XEN_XENBUS_DEV) && defined(CONFIG_XEN_PRIVILEGED_GUEST) #include #include int xenbus_conn(domid_t, grant_ref_t *, evtchn_port_t *); #endif #endif /* _XENBUS_COMMS_H */ xenbus_dev.c000066400000000000000000000270301314037446600322270ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #include struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[XENSTORE_PAYLOAD_MAX]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static struct proc_dir_entry *xenbus_dev_intf; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; if (!is_xenstored_ready()) return -ENODEV; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static int queue_reply(struct list_head *queue, const void *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return 0; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); if (!rb) return -ENOMEM; rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, queue); return 0; } static void queue_flush(struct xenbus_dev_data *u, struct list_head *queue, int err) { if (!err) { list_splice_tail(queue, &u->read_buffers); wake_up(&u->read_waitq); } else while (!list_empty(queue)) { struct read_buffer *rb = list_entry(queue->next, struct read_buffer, list); list_del(queue->next); kfree(rb); } } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int err, path_len, tok_len, body_len, data_len = 0; LIST_HEAD(queue); path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); err = queue_reply(&queue, &hdr, sizeof(hdr)); if (!err) err = queue_reply(&queue, path, path_len); if (!err) err = queue_reply(&queue, token, tok_len); if (!err && len > 2) err = queue_reply(&queue, vec[2], data_len); queue_flush(adap->dev_data, &queue, err); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply = NULL; LIST_HEAD(queue); char *path, *token; struct watch_adapter *watch; int err, rc = len; if (!is_xenstored_ready()) return -ENODEV; if (len > sizeof(u->u.buffer) - u->len) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: { static const char XS_RESP[] = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { rc = -EILSEQ; goto out; } if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); if (watch == NULL) { rc = -ENOMEM; goto out; } watch->watch.node = kstrdup(path, GFP_KERNEL); watch->watch.callback = watch_fired; watch->token = kstrdup(token, GFP_KERNEL); watch->dev_data = u; err = watch->watch.node && watch->token ? register_xenbus_watch(&watch->watch) : -ENOMEM; if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry(watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = sizeof(XS_RESP); mutex_lock(&u->reply_mutex); err = queue_reply(&queue, &hdr, sizeof(hdr)) ?: queue_reply(&queue, XS_RESP, hdr.len); break; } case XS_TRANSACTION_START: trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } goto common; case XS_TRANSACTION_END: list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; if (&trans->list == &u->transactions) { rc = -ESRCH; goto out; } /* fall through */ common: default: reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { if (msg_type == XS_TRANSACTION_START) kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { if (u->u.msg.type == XS_ERROR) kfree(trans); else { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } } else if (u->u.msg.type == XS_TRANSACTION_END) { list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); err = queue_reply(&queue, &u->u.msg, sizeof(u->u.msg)) ?: queue_reply(&queue, reply, u->u.msg.len); break; } queue_flush(u, &queue, err); mutex_unlock(&u->reply_mutex); kfree(reply); if (err) rc = err; out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; struct read_buffer *rb, *tmp_rb; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { list_del(&rb->list); kfree(rb); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; if (!is_xenstored_ready()) return -ENODEV; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } #ifdef CONFIG_XEN_PRIVILEGED_GUEST static long xenbus_dev_ioctl(struct file *file, unsigned int cmd, unsigned long data) { void __user *udata = (void __user *) data; int ret = -ENOTTY; if (!is_initial_xendomain()) return -ENODEV; switch (cmd) { case IOCTL_XENBUS_ALLOC: { xenbus_alloc_t xa; int old; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_FOREIGN_INIT); if (old != XENBUS_XSD_UNCOMMITTED) return -EBUSY; if (copy_from_user(&xa, udata, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } ret = xenbus_conn(xa.dom, &xa.grant_ref, &xa.port); if (ret != 0) { atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } if (copy_to_user(udata, &xa, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } } break; default: break; } return ret; } #endif static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .llseek = no_llseek, .poll = xenbus_dev_poll, #ifdef CONFIG_XEN_PRIVILEGED_GUEST .unlocked_ioctl = xenbus_dev_ioctl #endif }; int #ifndef MODULE __init #else __devinit #endif xenbus_dev_init(void) { xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400); if (xenbus_dev_intf) xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops; return 0; } xenbus_probe.c000066400000000000000000001130171314037446600325610ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #include #include #include #include #define PARAVIRT_EXPORT_SYMBOL(sym) __typeof__(sym) sym #else #include #include #include #include #include #define PARAVIRT_EXPORT_SYMBOL EXPORT_SYMBOL_GPL #endif #ifndef CONFIG_XEN #include #endif #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif int xen_store_evtchn; PARAVIRT_EXPORT_SYMBOL(xen_store_evtchn); struct xenstore_domain_interface *xen_store_interface; PARAVIRT_EXPORT_SYMBOL(xen_store_interface); static unsigned long xen_store_mfn; extern struct mutex xenwatch_mutex; static #ifdef CONFIG_XEN_UNPRIVILEGED_GUEST __initdata #endif BLOCKING_NOTIFIER_HEAD(xenstore_chain); #if defined(CONFIG_XEN) || defined(MODULE) static void wait_for_devices(struct xenbus_driver *xendrv); #endif /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } PARAVIRT_EXPORT_SYMBOL(xenbus_match); static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } int xenbus_read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_read_otherend_details); #if defined(CONFIG_XEN) || defined(MODULE) static int read_backend_details(struct xenbus_device *xendev) { return xenbus_read_otherend_details(xendev, "backend-id", "backend"); } static void otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) #else /* !CONFIG_XEN && !MODULE */ void xenbus_otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len, int ignore_on_shutdown) #endif /* CONFIG_XEN || MODULE */ { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]); return; } state = xenbus_read_driver_state(dev->otherend); dev_dbg(&dev->dev, "state is %d (%s), %s, %s", state, xenbus_strstate(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { /* If we're frontend, drive the state machine to Closed. */ /* This should cause the backend to release our resources. */ # if defined(CONFIG_XEN) || defined(MODULE) const struct xen_bus_type *bus = container_of(dev->dev.bus, struct xen_bus_type, bus); int ignore_on_shutdown = (bus->levels == 2); # endif if (ignore_on_shutdown && (state == XenbusStateClosing)) xenbus_frontend_closed(dev); return; } #endif if (drv->otherend_changed) drv->otherend_changed(dev, state); } PARAVIRT_EXPORT_SYMBOL(xenbus_otherend_changed); static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { #if defined(CONFIG_XEN) || defined(MODULE) return xenbus_watch_path2(dev, dev->otherend, "state", &dev->otherend_watch, otherend_changed); #else struct xen_bus_type *bus = container_of(dev->dev.bus, struct xen_bus_type, bus); return xenbus_watch_pathfmt(dev, &dev->otherend_watch, bus->otherend_changed, "%s/%s", dev->otherend, "state"); #endif } int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); #if defined(CONFIG_XEN) || defined(MODULE) return -ENODEV; #else return err; #endif } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_probe); int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); if (drv->remove) drv->remove(dev); free_otherend_details(dev); xenbus_switch_state(dev, XenbusStateClosed); return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_remove); void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); /* Commented out since xenstored stubdom is now minios based not linux based #define XENSTORE_DOMAIN_SHARES_THIS_KERNEL */ #ifndef XENSTORE_DOMAIN_SHARES_THIS_KERNEL if (is_initial_xendomain()) #endif return; get_device(&dev->dev); if (dev->state != XenbusStateConnected) { dev_info(&dev->dev, "%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); if (!strcmp(dev->devicetype, "vfb")) goto out; timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) dev_info(&dev->dev, "%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_shutdown); int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus) { int ret; if (bus->error) return bus->error; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; drv->driver.remove = xenbus_dev_remove; drv->driver.shutdown = xenbus_dev_shutdown; #endif mutex_lock(&xenwatch_mutex); ret = driver_register(&drv->driver); mutex_unlock(&xenwatch_mutex); return ret; } PARAVIRT_EXPORT_SYMBOL(xenbus_register_driver_common); void xenbus_unregister_driver(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t xendev_show_nodename(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); static ssize_t xendev_show_devtype(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); static ssize_t xendev_show_modalias(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_driver_state(nodename); if (bus->error) return bus->error; if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); #if defined(CONFIG_XEN) || defined(MODULE) xendev->dev.parent = &bus->dev; #endif xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) { char devname[XEN_BUS_ID_SIZE]; err = bus->get_bus_id(devname, xendev->nodename); if (!err) dev_set_name(&xendev->dev, devname); } #else err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); #endif if (err) goto fail; /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) goto fail_unregister; err = device_create_file(&xendev->dev, &dev_attr_devtype); if (err) goto fail_remove_nodename; err = device_create_file(&xendev->dev, &dev_attr_modalias); if (err) goto fail_remove_devtype; return 0; fail_remove_devtype: device_remove_file(&xendev->dev, &dev_attr_devtype); fail_remove_nodename: device_remove_file(&xendev->dev, &dev_attr_nodename); fail_unregister: device_unregister(&xendev->dev); fail: kfree(xendev); return err; } PARAVIRT_EXPORT_SYMBOL(xenbus_probe_node); #if defined(CONFIG_XEN) || defined(MODULE) /* device// => - */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { pr_warning("XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); if (!strchr(bus_id, '/')) { pr_warning("XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } /* device// */ static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type, const char *name) { char *nodename; int err; if (!strcmp(type, "console")) return 0; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(bus, type, nodename); kfree(nodename); return err; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype) || add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename) || add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype)) return -ENOMEM; return 0; } #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) static struct device_attribute xenbus_dev_attrs[] = { __ATTR_NULL }; #endif /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { .name = "xen", .match = xenbus_match, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_frontend, #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) .dev_attrs = xenbus_dev_attrs, #endif }, .dev = { .init_name = "xen", }, }; int xenbus_register_frontend(struct xenbus_driver *drv) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(xenbus_register_frontend); #endif static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(bus, type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n; if (bus->error) return bus->error; dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } PARAVIRT_EXPORT_SYMBOL(xenbus_probe_devices); static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void xenbus_dev_changed(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[XEN_BUS_ID_SIZE]; const char *p, *root; if (bus->error || char_count(node, '/') < 2) return; exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend//... or device//... */ p = strchr(node, '/') + 1; snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[XEN_BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_changed); #if defined(CONFIG_XEN) || defined(MODULE) static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int __maybe_unused suspend_dev(struct device *dev, void *data) #else int xenbus_dev_suspend(struct device *dev) #endif { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev = container_of(dev, struct xenbus_device, dev); DPRINTK("%s", xdev->nodename); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); if (drv->suspend) err = drv->suspend(xdev); if (err) pr_warning("xenbus: suspend %s failed: %i\n", dev_name(dev), err); return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_suspend); #if defined(CONFIG_XEN) || defined(MODULE) static int __maybe_unused suspend_cancel_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend_cancel) err = drv->suspend_cancel(xdev); if (err) pr_warning("xenbus: suspend_cancel %s failed: %i\n", dev_name(dev), err); return 0; } static int __maybe_unused resume_dev(struct device *dev, void *data) #else int xenbus_dev_resume(struct device *dev) #endif { int err; struct xenbus_driver *drv; struct xenbus_device *xdev = container_of(dev, struct xenbus_device, dev); DPRINTK("%s", xdev->nodename); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); err = talk_to_otherend(xdev); if (err) { pr_warning("xenbus: resume (talk_to_otherend) %s failed: %i\n", dev_name(dev), err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { pr_warning("xenbus: resume %s failed: %i\n", dev_name(dev), err); return err; } } err = watch_otherend(xdev); if (err) { pr_warning("xenbus_probe: resume (watch_otherend) %s failed:" " %d\n", dev_name(dev), err); return err; } return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_resume); #if !defined(CONFIG_XEN) && !defined(MODULE) int xenbus_dev_cancel(struct device *dev) { /* Do nothing */ DPRINTK("cancel"); return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_cancel); #elif defined(CONFIG_PM_SLEEP) || defined(MODULE) void xenbus_suspend(void) { DPRINTK(""); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); xenbus_backend_suspend(suspend_dev); xs_suspend(); } void xen_unplug_emulated_devices(void); void xenbus_resume(void) { xb_init_comms(); xs_resume(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); xenbus_backend_resume(resume_dev); xen_unplug_emulated_devices(); } void xenbus_suspend_cancel(void) { xs_suspend_cancel(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); xenbus_backend_resume(suspend_cancel_dev); } #endif /* CONFIG_PM_SLEEP || MODULE */ /* A flag to determine if xenstored is 'ready' (i.e. has started) */ atomic_t xenbus_xsd_state = ATOMIC_INIT(XENBUS_XSD_UNCOMMITTED); int #ifdef CONFIG_XEN __init #endif register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; if (is_xenstored_ready()) ret = nb->notifier_call(nb, 0, NULL); else blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } #ifndef CONFIG_XEN EXPORT_SYMBOL_GPL(register_xenstore_notifier); void unregister_xenstore_notifier(struct notifier_block *nb) { blocking_notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); #endif #ifndef CONFIG_XEN static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq); static int backend_state; static void xenbus_reset_backend_state_changed(struct xenbus_watch *w, const char **v, unsigned int l) { if (xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state) != 1) backend_state = XenbusStateUnknown; printk(KERN_DEBUG "XENBUS: backend %s %s\n", v[XS_WATCH_PATH], xenbus_strstate(backend_state)); wake_up(&backend_state_wq); } static void xenbus_reset_wait_for_backend(char *be, int expected) { long timeout; timeout = wait_event_interruptible_timeout(backend_state_wq, backend_state == expected, 5 * HZ); if (timeout <= 0) pr_info("XENBUS: backend %s timed out.\n", be); } /* * Reset frontend if it is in Connected or Closed state. * Wait for backend to catch up. * State Connected happens during kdump, Closed after kexec. */ static void xenbus_reset_frontend(char *fe, char *be, int be_state) { struct xenbus_watch be_watch; printk(KERN_DEBUG "XENBUS: backend %s %s\n", be, xenbus_strstate(be_state)); memset(&be_watch, 0, sizeof(be_watch)); be_watch.node = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/state", be); if (!be_watch.node) return; be_watch.callback = xenbus_reset_backend_state_changed; backend_state = XenbusStateUnknown; pr_info("XENBUS: triggering reconnect on %s\n", be); register_xenbus_watch(&be_watch); /* fall through to forward backend to state XenbusStateInitialising */ switch (be_state) { case XenbusStateConnected: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing); xenbus_reset_wait_for_backend(be, XenbusStateClosing); case XenbusStateClosing: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed); xenbus_reset_wait_for_backend(be, XenbusStateClosed); case XenbusStateClosed: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising); xenbus_reset_wait_for_backend(be, XenbusStateInitWait); } unregister_xenbus_watch(&be_watch); pr_info("XENBUS: reconnect done on %s\n", be); kfree(be_watch.node); } static void xenbus_check_frontend(char *class, char *dev) { int be_state, fe_state, err; char *backend, *frontend; frontend = kasprintf(GFP_NOIO | __GFP_HIGH, "device/%s/%s", class, dev); if (!frontend) return; err = xenbus_scanf(XBT_NIL, frontend, "state", "%i", &fe_state); if (err != 1) goto out; switch (fe_state) { case XenbusStateConnected: case XenbusStateClosed: printk(KERN_DEBUG "XENBUS: frontend %s %s\n", frontend, xenbus_strstate(fe_state)); backend = xenbus_read(XBT_NIL, frontend, "backend", NULL); if (!backend || IS_ERR(backend)) goto out; err = xenbus_scanf(XBT_NIL, backend, "state", "%i", &be_state); if (err == 1) xenbus_reset_frontend(frontend, backend, be_state); kfree(backend); break; default: break; } out: kfree(frontend); } static void xenbus_reset_state(void) { char **devclass, **dev; int devclass_n, dev_n; int i, j; devclass = xenbus_directory(XBT_NIL, "device", "", &devclass_n); if (IS_ERR(devclass)) return; for (i = 0; i < devclass_n; i++) { dev = xenbus_directory(XBT_NIL, "device", devclass[i], &dev_n); if (IS_ERR(dev)) continue; for (j = 0; j < dev_n; j++) xenbus_check_frontend(devclass[i], dev[j]); kfree(dev); } kfree(devclass); } #endif void #if defined(CONFIG_XEN_UNPRIVILEGED_GUEST) __init #elif defined(MODULE) __devinit #endif xenbus_probe(struct work_struct *unused) { BUG_ON(!is_xenstored_ready()); #ifndef CONFIG_XEN /* reset devices in Connected or Closed state */ xenbus_reset_state(); #endif #if defined(CONFIG_XEN) || defined(MODULE) /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); xenbus_backend_probe_and_watch(); #endif /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } PARAVIRT_EXPORT_SYMBOL(xenbus_probe); #if !defined(CONFIG_XEN) && !defined(MODULE) static int __init xenbus_probe_initcall(void) { if (!xen_domain()) return -ENODEV; if (xen_initial_domain() || xen_hvm_domain()) return 0; xenbus_probe(NULL); return 0; } device_initcall(xenbus_probe_initcall); #endif #ifdef CONFIG_XEN_PRIVILEGED_GUEST #ifdef CONFIG_PROC_FS static struct file_operations xsd_kva_fops; static struct proc_dir_entry *xsd_kva_intf; static struct proc_dir_entry *xsd_port_intf; static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; int old; int rc; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_LOCAL_INIT); switch (old) { case XENBUS_XSD_UNCOMMITTED: rc = xb_init_comms(); if (rc != 0) return rc; break; case XENBUS_XSD_FOREIGN_INIT: case XENBUS_XSD_FOREIGN_READY: return -EBUSY; case XENBUS_XSD_LOCAL_INIT: case XENBUS_XSD_LOCAL_READY: default: break; } if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static int xsd_kva_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "0x%p", xen_store_interface); *eof = 1; return len; } static int xsd_port_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "%d", xen_store_evtchn); *eof = 1; return len; } #endif #ifdef CONFIG_XEN_XENBUS_DEV int xenbus_conn(domid_t remote_dom, grant_ref_t *grant_ref, evtchn_port_t *local_port) { struct evtchn_alloc_unbound alloc_unbound; int rc, rc2; BUG_ON(atomic_read(&xenbus_xsd_state) != XENBUS_XSD_FOREIGN_INIT); BUG_ON(!is_initial_xendomain()); remove_xen_proc_entry("xsd_kva"); remove_xen_proc_entry("xsd_port"); rc = close_evtchn(xen_store_evtchn); if (rc != 0) goto fail0; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_dom; rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (rc != 0) goto fail0; *local_port = xen_store_evtchn = alloc_unbound.port; /* keep the old page (xen_store_mfn, xen_store_interface) */ rc = gnttab_grant_foreign_access(remote_dom, xen_store_mfn, GTF_permit_access); if (rc < 0) goto fail1; *grant_ref = rc; rc = xb_init_comms(); if (rc != 0) goto fail1; return 0; fail1: rc2 = close_evtchn(xen_store_evtchn); if (rc2 != 0) pr_warning("XENBUS: Error freeing xenstore event channel:" " %d\n", rc2); fail0: xen_store_evtchn = -1; return rc; } #endif #endif /* CONFIG_XEN_PRIVILEGED_GUEST */ #ifndef MODULE static int __init #else int __devinit #endif xenbus_init(void) { int err = 0; unsigned long page = 0; DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; #if defined(CONFIG_XEN) || defined(MODULE) /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) pr_warning("XENBUS: Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); #endif /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { struct evtchn_alloc_unbound alloc_unbound; /* Allocate Xenstore page */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOMID_SELF; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600); if (xsd_kva_intf) { memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, sizeof(xsd_kva_fops)); xsd_kva_fops.mmap = xsd_kva_mmap; xsd_kva_intf->proc_fops = &xsd_kva_fops; xsd_kva_intf->read_proc = xsd_kva_read; } xsd_port_intf = create_xen_proc_entry("xsd_port", 0400); if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else { #if !defined(CONFIG_XEN) && !defined(MODULE) if (xen_hvm_domain()) { #endif #ifndef CONFIG_XEN uint64_t v = 0; err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); if (err) goto err; xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto err; xen_store_mfn = (unsigned long)v; xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); #endif #if !defined(CONFIG_XEN) && !defined(MODULE) } else { #endif #ifndef MODULE xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); #endif #if !defined(CONFIG_XEN) && !defined(MODULE) } #endif atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY); /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) goto err; } #if defined(CONFIG_XEN) || defined(MODULE) xenbus_dev_init(); #endif /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { pr_warning("XENBUS: Error initializing xenstore comms: %i\n", err); goto err; } #if defined(CONFIG_XEN) || defined(MODULE) /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); if (xenbus_frontend.error) { bus_unregister(&xenbus_frontend.bus); pr_warning("XENBUS: Error registering frontend device:" " %d\n", xenbus_frontend.error); } } xenbus_backend_device_register(); if (!is_initial_xendomain()) xenbus_probe(NULL); #endif #if defined(CONFIG_XEN_COMPAT_XENFS) && !defined(MODULE) /* * Create xenfs mountpoint in /proc for compatibility with * utilities that expect to find "xenbus" under "/proc/xen". */ proc_mkdir("xen", NULL); #endif return 0; err: /* * Do not unregister the xenbus front/backend buses here. The buses * must exist because front/backend drivers will use them when they are * registered. */ if (page != 0) free_page(page); return err; } #ifndef MODULE postcore_initcall(xenbus_init); #ifdef CONFIG_XEN MODULE_LICENSE("Dual BSD/GPL"); #else MODULE_LICENSE("GPL"); #endif #endif #if defined(CONFIG_XEN) || defined(MODULE) static int is_device_connecting(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_connecting_device(struct device_driver *drv) { if (xenbus_frontend.error) return xenbus_frontend.error; return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ pr_info("XENBUS: Device with no driver: %s\n", xendev->nodename); return 0; } if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); pr_warning("XENBUS: Timeout connecting to device: %s" " (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } xendrv = to_xenbus_driver(dev->driver); if (xendrv->is_ready && !xendrv->is_ready(xendev)) pr_warning("XENBUS: Device not ready: %s\n", xendev->nodename); return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!ready_to_wait_for_devices || !is_running_on_xen()) return; while (exists_connecting_device(drv)) { if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) pr_warning("XENBUS: Waiting for " "devices to initialise: "); seconds_waited += 5; pr_cont("%us...", 300 - seconds_waited); if (seconds_waited == 300) break; } schedule_timeout_interruptible(HZ/10); } if (seconds_waited) pr_cont("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } #ifndef MODULE static int __init boot_wait_for_devices(void) { #if !defined(CONFIG_XEN) && !defined(MODULE) if (xen_hvm_domain() && !xen_platform_pci_unplug) return -ENODEV; #endif if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; wait_for_devices(NULL); } return 0; } late_initcall(boot_wait_for_devices); #endif int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); #endif /* CONFIG_XEN || MODULE */ xenbus_probe.h000066400000000000000000000100531314037446600325620ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101/****************************************************************************** * xenbus_probe.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_PROBE_H #define _XENBUS_PROBE_H #ifndef BUS_ID_SIZE #define XEN_BUS_ID_SIZE 20 #else #define XEN_BUS_ID_SIZE BUS_ID_SIZE #endif #ifdef CONFIG_PARAVIRT_XEN #define is_running_on_xen() xen_domain() #define is_initial_xendomain() xen_initial_domain() #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) #define dev_name(dev) ((dev)->bus_id) #endif #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); extern void xenbus_backend_probe_and_watch(void); extern void xenbus_backend_bus_register(void); extern void xenbus_backend_device_register(void); #else static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_probe_and_watch(void) {} static inline void xenbus_backend_bus_register(void) {} static inline void xenbus_backend_device_register(void) {} #endif struct xen_bus_type { char *root; int error; unsigned int levels; int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); int (*probe)(struct xen_bus_type *bus, const char *type, const char *dir); #if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) void (*otherend_changed)(struct xenbus_watch *watch, const char **vec, unsigned int len); #else struct device dev; #endif struct bus_type bus; }; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); extern int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus); extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); extern int xenbus_probe_devices(struct xen_bus_type *bus); extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); extern void xenbus_dev_shutdown(struct device *_dev); extern int xenbus_dev_suspend(struct device *dev); extern int xenbus_dev_resume(struct device *dev); extern int xenbus_dev_cancel(struct device *dev); extern void xenbus_otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len, int ignore_on_shutdown); extern int xenbus_read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node); #endif xenbus_probe_backend.c000066400000000000000000000211751314037446600342330ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101/****************************************************************************** * Talks to Xen Store to figure out what devices we have (backend half). * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __func__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #ifndef CONFIG_XEN #include #endif #include #include #ifdef CONFIG_XEN #include #include #endif #include #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* backend/// => -- */ static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { int domid, err; const char *devid, *type, *frontend; unsigned int typelen; type = strchr(nodename, '/'); if (!type) return -EINVAL; type++; typelen = strcspn(type, "/"); if (!typelen || type[typelen] != '/') return -EINVAL; devid = strrchr(nodename, '/') + 1; err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid, "frontend", NULL, &frontend, NULL); if (err) return err; if (strlen(frontend) == 0) err = -ERANGE; if (!err && !xenbus_exists(XBT_NIL, frontend, "")) err = -ENOENT; kfree(frontend); if (err) return err; if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s", typelen, type, domid, devid) >= XEN_BUS_ID_SIZE) return -ENOSPC; return 0; } static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; struct xenbus_driver *drv; struct xen_bus_type *bus; DPRINTK(""); if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); bus = container_of(xdev->dev.bus, struct xen_bus_type, bus); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype)) return -ENOMEM; if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename)) return -ENOMEM; if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root)) return -ENOMEM; if (dev->driver) { drv = to_xenbus_driver(dev->driver); if (drv && drv->uevent) return drv->uevent(xdev, env); } return 0; } /* backend/// */ static int xenbus_probe_backend_unit(struct xen_bus_type *bus, const char *dir, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); if (!nodename) return -ENOMEM; DPRINTK("%s\n", nodename); err = xenbus_probe_node(bus, type, nodename); kfree(nodename); return err; } /* backend// */ static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type, const char *domid) { char *nodename; int err = 0; char **dir; unsigned int i, dir_n = 0; DPRINTK(""); nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid); if (!nodename) return -ENOMEM; dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n); if (IS_ERR(dir)) { kfree(nodename); return PTR_ERR(dir); } for (i = 0; i < dir_n; i++) { err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]); if (err) break; } kfree(dir); kfree(nodename); return err; } #ifndef CONFIG_XEN static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { xenbus_otherend_changed(watch, vec, len, 0); } #endif static struct device_attribute xenbus_backend_dev_attrs[] = { __ATTR_NULL }; static struct xen_bus_type xenbus_backend = { .root = "backend", .levels = 3, /* backend/type// */ .get_bus_id = backend_bus_id, .probe = xenbus_probe_backend, #ifndef CONFIG_XEN .otherend_changed = frontend_changed, #else .dev = { .init_name = "xen-backend", }, #endif .error = -ENODEV, .bus = { .name = "xen-backend", .match = xenbus_match, .uevent = xenbus_uevent_backend, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, #ifdef CONFIG_XEN .shutdown = xenbus_dev_shutdown, #endif .dev_attrs = xenbus_backend_dev_attrs, }, }; static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); } static struct xenbus_watch be_watch = { .node = "backend", .callback = backend_changed, }; static int read_frontend_details(struct xenbus_device *xendev) { return xenbus_read_otherend_details(xendev, "frontend-id", "frontend"); } #ifndef CONFIG_XEN int xenbus_dev_is_online(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); #endif int xenbus_register_backend(struct xenbus_driver *drv) { drv->read_otherend_details = read_frontend_details; return xenbus_register_driver_common(drv, &xenbus_backend); } EXPORT_SYMBOL_GPL(xenbus_register_backend); #if defined(CONFIG_XEN) && defined(CONFIG_PM_SLEEP) void xenbus_backend_suspend(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_resume(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } #endif #ifndef CONFIG_XEN static int backend_probe_and_watch(struct notifier_block *notifier, unsigned long event, void *data) #else void xenbus_backend_probe_and_watch(void) #endif { /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_backend); register_xenbus_watch(&be_watch); #ifndef CONFIG_XEN return NOTIFY_DONE; #endif } #ifndef CONFIG_XEN static int __init xenbus_probe_backend_init(void) { static struct notifier_block xenstore_notifier = { .notifier_call = backend_probe_and_watch }; int err; DPRINTK(""); /* Register ourselves with the kernel bus subsystem */ err = bus_register(&xenbus_backend.bus); if (err) return err; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(xenbus_probe_backend_init); #else void __init xenbus_backend_bus_register(void) { xenbus_backend.error = bus_register(&xenbus_backend.bus); if (xenbus_backend.error) pr_warning("XENBUS: Error registering backend bus: %i\n", xenbus_backend.error); } void __init xenbus_backend_device_register(void) { if (xenbus_backend.error) return; xenbus_backend.error = device_register(&xenbus_backend.dev); if (xenbus_backend.error) { bus_unregister(&xenbus_backend.bus); pr_warning("XENBUS: Error registering backend device: %i\n", xenbus_backend.error); } } int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_backend); #endif xenbus_probe_frontend.c000066400000000000000000000213071314037446600344600ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101#define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __func__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include "xenbus_probe.h" /* device// => - */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); if (!strchr(bus_id, '/')) { printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } /* device// */ static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(bus, type, nodename); kfree(nodename); return err; } static int xenbus_uevent_frontend(struct device *_dev, struct kobj_uevent_env *env) { struct xenbus_device *dev = to_xenbus_device(_dev); if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype)) return -ENOMEM; return 0; } static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { xenbus_otherend_changed(watch, vec, len, 1); } static struct device_attribute xenbus_frontend_dev_attrs[] = { __ATTR_NULL }; static const struct dev_pm_ops xenbus_pm_ops = { .suspend = xenbus_dev_suspend, .resume = xenbus_dev_resume, .freeze = xenbus_dev_suspend, .thaw = xenbus_dev_cancel, .restore = xenbus_dev_resume, }; static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .otherend_changed = backend_changed, .bus = { .name = "xen", .match = xenbus_match, .uevent = xenbus_uevent_frontend, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .dev_attrs = xenbus_frontend_dev_attrs, .pm = &xenbus_pm_ops, }, }; static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int read_backend_details(struct xenbus_device *xendev) { return xenbus_read_otherend_details(xendev, "backend-id", "backend"); } static int is_device_connecting(struct device *dev, void *data, bool ignore_nonessential) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (ignore_nonessential) { /* With older QEMU, for PVonHVM guests the guest config files * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0'] * which is nonsensical as there is no PV FB (there can be * a PVKB) running as HVM guest. */ if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0)) return 0; if ((strncmp(xendev->nodename, "device/vfb", 10) == 0)) return 0; } xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int essential_device_connecting(struct device *dev, void *data) { return is_device_connecting(dev, data, true /* ignore PV[KBB+FB] */); } static int non_essential_device_connecting(struct device *dev, void *data) { return is_device_connecting(dev, data, false); } static int exists_essential_connecting_device(struct device_driver *drv) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, essential_device_connecting); } static int exists_non_essential_connecting_device(struct device_driver *drv) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, non_essential_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", xendev->nodename); } else if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); printk(KERN_WARNING "XENBUS: Timeout connecting " "to device: %s (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; static bool wait_loop(unsigned long start, unsigned int max_delay, unsigned int *seconds_waited) { if (time_after(jiffies, start + (*seconds_waited+5)*HZ)) { if (!*seconds_waited) printk(KERN_WARNING "XENBUS: Waiting for " "devices to initialise: "); *seconds_waited += 5; printk("%us...", max_delay - *seconds_waited); if (*seconds_waited == max_delay) return true; } schedule_timeout_interruptible(HZ/10); return false; } /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!ready_to_wait_for_devices || !xen_domain()) return; while (exists_non_essential_connecting_device(drv)) if (wait_loop(start, 30, &seconds_waited)) break; /* Skips PVKB and PVFB check.*/ while (exists_essential_connecting_device(drv)) if (wait_loop(start, 270, &seconds_waited)) break; if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } int __xenbus_register_frontend(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend, owner, mod_name); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(__xenbus_register_frontend); static int frontend_probe_and_watch(struct notifier_block *notifier, unsigned long event, void *data) { /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); return NOTIFY_DONE; } static int __init xenbus_probe_frontend_init(void) { static struct notifier_block xenstore_notifier = { .notifier_call = frontend_probe_and_watch }; int err; DPRINTK(""); /* Register ourselves with the kernel bus subsystem */ err = bus_register(&xenbus_frontend.bus); if (err) return err; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(xenbus_probe_frontend_init); #ifndef MODULE static int __init boot_wait_for_devices(void) { if (xen_hvm_domain() && !xen_platform_pci_unplug) return -ENODEV; ready_to_wait_for_devices = 1; wait_for_devices(NULL); return 0; } late_initcall(boot_wait_for_devices); #endif MODULE_LICENSE("GPL"); xenbus_xs.c000066400000000000000000000545331314037446600321130ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.101/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. * * transaction_mutex must be held before incrementing * transaction_count. The mutex is held when a suspend is in * progress to prevent new transactions starting. * * When decrementing transaction_count to zero the wait queue * should be woken up, the suspend code waits for count to * reach zero. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct mutex transaction_mutex; atomic_t transaction_count; wait_queue_head_t transaction_wq; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { pr_warning("XENBUS xen store gave: unknown error %s", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } static void transaction_start(void) { mutex_lock(&xs_state.transaction_mutex); atomic_inc(&xs_state.transaction_count); mutex_unlock(&xs_state.transaction_mutex); } static void transaction_end(void) { if (atomic_dec_and_test(&xs_state.transaction_count)) wake_up(&xs_state.transaction_wq); } #if !defined(CONFIG_XEN) || defined(CONFIG_PM_SLEEP) static void transaction_suspend(void) { mutex_lock(&xs_state.transaction_mutex); wait_event(xs_state.transaction_wq, atomic_read(&xs_state.transaction_count) == 0); } static void transaction_resume(void) { mutex_unlock(&xs_state.transaction_mutex); } #endif void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; enum xsd_sockmsg_type type = msg->type; int err; if (type == XS_TRANSACTION_START) transaction_start(); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((type == XS_TRANSACTION_END) || ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) transaction_end(); return ret; } #if !defined(CONFIG_XEN) && !defined(MODULE) EXPORT_SYMBOL(xenbus_dev_request_and_reply); #endif /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; #define XS_LINUX_WEAK_WRITE 128 msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { pr_warn_ratelimited("XENBUS unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len) + 1; /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; ret[*num] = strings + len; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; transaction_start(); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { transaction_end(); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); transaction_end(); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *printf_buffer; va_start(ap, fmt); printf_buffer = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap); va_end(ap); if (!printf_buffer) return -ENOMEM; ret = xenbus_write(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } static void xs_reset_watches(void) { #ifdef MODULE int err, supported = 0; err = xenbus_scanf(XBT_NIL, "control", "platform-feature-xs_reset_watches", "%d", &supported); if (err != 1 || !supported) return; err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL)); if (err && err != -EEXIST) pr_warning("xs_reset_watches failed: %d\n", err); #endif } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); if (err) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; #if defined(CONFIG_XEN) || defined(MODULE) BUG_ON(watch->flags & XBWF_new_thread); #endif sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) pr_warning("XENBUS Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Make sure there are no callbacks running currently (unless its us) */ if (current->pid != xenwatch_pid) mutex_lock(&xenwatch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); if (current->pid != xenwatch_pid) mutex_unlock(&xenwatch_mutex); } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); #if !defined(CONFIG_XEN) || defined(CONFIG_PM_SLEEP) void xs_suspend(void) { transaction_suspend(); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; #if !defined(CONFIG_XEN) && !defined(MODULE) xb_init_comms(); #endif mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); transaction_resume(); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); mutex_unlock(&xs_state.transaction_mutex); } #endif #if defined(CONFIG_XEN) || defined(MODULE) static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } #endif static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); #if defined(CONFIG_XEN) || defined(MODULE) /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } #else msg->u.watch.handle->callback( msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); mutex_unlock(&xenwatch_mutex); kfree(msg->u.watch.vec); kfree(msg); #endif } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) { kfree(msg); err = -EINVAL; goto out; } if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) { kfree(msg); err = -EINVAL; goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) pr_warning("XENBUS error %d while reading " "message\n", err); if (kthread_should_stop()) break; } return 0; } int #ifndef MODULE __init #else __devinit #endif xs_init(void) { struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); mutex_init(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); atomic_set(&xs_state.transaction_count, 0); init_waitqueue_head(&xs_state.transaction_wq); task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); /* shutdown watches for kexec boot */ xs_reset_watches(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13/000077500000000000000000000000001314037446600277205ustar00rootroot00000000000000features.c000066400000000000000000000016041314037446600316240ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13/****************************************************************************** * features.c * * Xen feature flags. * * Copyright (c) 2006, Ian Campbell, XenSource Inc. */ #include #include #include #ifdef CONFIG_PARAVIRT_XEN #include #else #include #endif #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #include #include #include u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; EXPORT_SYMBOL(xen_features); void xen_setup_features(void) { struct xen_feature_info fi; int i, j; for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) { fi.submap_idx = i; if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j = 0; j < 32; j++) xen_features[i * 32 + j] = !!(fi.submap & 1< #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff #define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t)) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; static unsigned int boot_max_nr_grant_frames; static int gnttab_free_count; static grant_ref_t gnttab_free_head; static DEFINE_SPINLOCK(gnttab_list_lock); static struct grant_entry *shared; static struct gnttab_free_callback *gnttab_free_callback_list; static int gnttab_expand(unsigned int req_entries); #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP]) #define nr_freelist_frames(grant_frames) \ (((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP) static int get_free_entries(int count) { unsigned long flags; int ref, rc; grant_ref_t head; spin_lock_irqsave(&gnttab_list_lock, flags); if ((gnttab_free_count < count) && ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { spin_unlock_irqrestore(&gnttab_list_lock, flags); return rc; } ref = head = gnttab_free_head; gnttab_free_count -= count; while (count-- > 1) head = gnttab_entry(head); gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } #define get_free_entry() get_free_entries(1) static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; callback = gnttab_free_callback_list; gnttab_free_callback_list = NULL; while (callback != NULL) { next = callback->next; if (gnttab_free_count >= callback->count) { callback->next = NULL; callback->queued = 0; callback->fn(callback->arg); } else { callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; } callback = next; } } static inline void check_free_callbacks(void) { if (unlikely(gnttab_free_callback_list)) do_free_callbacks(); } static void put_free_entry(grant_ref_t ref) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; gnttab_free_count++; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } /* * Public grant-issuing interface functions */ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags) { shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); int gnttab_query_foreign_access(grant_ref_t ref) { u16 nflags; nflags = shared[ref].flags; return (nflags & (GTF_reading|GTF_writing)); } EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); int gnttab_end_foreign_access_ref(grant_ref_t ref) { u16 flags, nflags; nflags = shared[ref].flags; do { if ((flags = nflags) & (GTF_reading|GTF_writing)) { printk(KERN_DEBUG "WARNING: g.e. still in use!\n"); return 0; } } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) != flags); return 1; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page) { if (gnttab_end_foreign_access_ref(ref)) { put_free_entry(ref); if (page != 0) free_page(page); } else { /* XXX This needs to be fixed so that the ref and page are placed on a list to be freed up later. */ printk(KERN_DEBUG "WARNING: leaking g.e. and page still in use!\n"); } } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; gnttab_grant_foreign_transfer_ref(ref, domid, pfn); return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, unsigned long pfn) { shared[ref].frame = pfn; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_accept_transfer; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) { unsigned long frame; u16 flags; /* * If a transfer is not even yet started, try to reclaim the grant * reference and return failure (== 0). */ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags) return 0; cpu_relax(); } /* If a transfer is in progress then wait until it is completed. */ while (!(flags & GTF_transfer_completed)) { flags = shared[ref].flags; cpu_relax(); } /* Read the frame number /after/ reading completion status. */ rmb(); frame = shared[ref].frame; BUG_ON(frame == 0); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) { unsigned long frame = gnttab_end_foreign_transfer_ref(ref); put_free_entry(ref); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); void gnttab_free_grant_reference(grant_ref_t ref) { put_free_entry(ref); } EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; unsigned long flags; int count = 1; if (head == GNTTAB_LIST_END) return; spin_lock_irqsave(&gnttab_list_lock, flags); ref = head; while (gnttab_entry(ref) != GNTTAB_LIST_END) { ref = gnttab_entry(ref); count++; } gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = head; gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_references); int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) { int h = get_free_entries(count); if (h < 0) return -ENOSPC; *head = h; return 0; } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); } EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); int gnttab_claim_grant_reference(grant_ref_t *private_head) { grant_ref_t g = *private_head; if (unlikely(g == GNTTAB_LIST_END)) return -ENOSPC; *private_head = gnttab_entry(g); return g; } EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release) { gnttab_entry(release) = *private_head; *private_head = release; } EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); if (callback->queued) goto out; callback->fn = fn; callback->arg = arg; callback->count = count; callback->queued = 1; callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; check_free_callbacks(); out: spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_request_free_callback); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) { struct gnttab_free_callback **pcb; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { if (*pcb == callback) { *pcb = callback->next; callback->queued = 0; break; } } spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); static int grow_gnttab_list(unsigned int more_frames) { unsigned int new_nr_grant_frames, extra_entries, i; unsigned int nr_glist_frames, new_nr_glist_frames; new_nr_grant_frames = nr_grant_frames + more_frames; extra_entries = more_frames * ENTRIES_PER_GRANT_FRAME; nr_glist_frames = nr_freelist_frames(nr_grant_frames); new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames); for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); if (!gnttab_list[i]) goto grow_nomem; } for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; check_free_callbacks(); return 0; grow_nomem: for ( ; i >= nr_glist_frames; i--) free_page((unsigned long) gnttab_list[i]); return -ENOMEM; } static unsigned int __max_nr_grant_frames(void) { struct gnttab_query_size query; int rc; query.dom = DOMID_SELF; rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); if ((rc < 0) || (query.status != GNTST_okay)) return 4; /* Legacy max supported number of frames */ return query.max_nr_frames; } static inline unsigned int max_nr_grant_frames(void) { unsigned int xen_max = __max_nr_grant_frames(); if (xen_max > boot_max_nr_grant_frames) return boot_max_nr_grant_frames; return xen_max; } #ifdef CONFIG_XEN #ifdef CONFIG_X86 static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } #ifdef CONFIG_PM_SLEEP static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } #endif void *arch_gnttab_alloc_shared(unsigned long *frames) { struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames()); BUG_ON(area == NULL); return area->addr; } #endif /* CONFIG_X86 */ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status != GNTST_okay); if (shared == NULL) shared = arch_gnttab_alloc_shared(frames); #ifdef CONFIG_X86 rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); BUG_ON(rc); frames -= nr_gframes; /* adjust after map_pte_fn() */ #endif /* CONFIG_X86 */ kfree(frames); return 0; } #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) static DEFINE_SEQLOCK(gnttab_dma_lock); static void gnttab_page_free(struct page *page, unsigned int order) { BUG_ON(order); ClearPageForeign(page); gnttab_reset_grant_page(page); ClearPageReserved(page); put_page(page); } /* * Must not be called with IRQs off. This should only be used on the * slow path. * * Copy a foreign granted page to local memory. */ int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep) { struct gnttab_unmap_and_replace unmap; mmu_update_t mmu; struct page *page; struct page *new_page; void *new_addr; void *addr; paddr_t pfn; maddr_t mfn; maddr_t new_mfn; int err; page = *pagep; if (!get_page_unless_zero(page)) return -ENOENT; err = -ENOMEM; new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!new_page) goto out; new_addr = page_address(new_page); addr = page_address(page); copy_page(new_addr, addr); pfn = page_to_pfn(page); mfn = pfn_to_mfn(pfn); new_mfn = virt_to_mfn(new_addr); write_seqlock_bh(&gnttab_dma_lock); /* Make seq visible before checking page_mapped. */ smp_mb(); /* Has the page been DMA-mapped? */ if (unlikely(page_mapped(page))) { write_sequnlock_bh(&gnttab_dma_lock); put_page(new_page); err = -EBUSY; goto out; } if (!xen_feature(XENFEAT_auto_translated_physmap)) set_phys_to_machine(pfn, new_mfn); gnttab_set_replace_op(&unmap, (unsigned long)addr, (unsigned long)new_addr, ref); err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace, &unmap, 1); BUG_ON(err); BUG_ON(unmap.status != GNTST_okay); write_sequnlock_bh(&gnttab_dma_lock); if (!xen_feature(XENFEAT_auto_translated_physmap)) { set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY); mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu.val = pfn; err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF); BUG_ON(err); } new_page->mapping = page->mapping; new_page->index = page->index; set_bit(PG_foreign, &new_page->flags); if (PageReserved(page)) SetPageReserved(new_page); *pagep = new_page; SetPageForeign(page, gnttab_page_free); page->mapping = NULL; out: put_page(page); return err; } EXPORT_SYMBOL_GPL(gnttab_copy_grant_page); void gnttab_reset_grant_page(struct page *page) { init_page_count(page); reset_page_mapcount(page); } EXPORT_SYMBOL_GPL(gnttab_reset_grant_page); /* * Keep track of foreign pages marked as PageForeign so that we don't * return them to the remote domain prematurely. * * PageForeign pages are pinned down by increasing their mapcount. * * All other pages are simply returned as is. */ void __gnttab_dma_map_page(struct page *page) { unsigned int seq; if (!is_running_on_xen() || !PageForeign(page)) return; do { seq = read_seqbegin(&gnttab_dma_lock); if (gnttab_dma_local_pfn(page)) break; atomic_set(&page->_mapcount, 0); /* Make _mapcount visible before read_seqretry. */ smp_mb(); } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); } #endif /* CONFIG_XEN_BACKEND */ #ifdef __HAVE_ARCH_PTE_SPECIAL static unsigned int GNTMAP_pte_special; bool gnttab_pre_map_adjust(unsigned int cmd, struct gnttab_map_grant_ref *map, unsigned int count) { unsigned int i; if (unlikely(cmd != GNTTABOP_map_grant_ref)) count = 0; for (i = 0; i < count; ++i, ++map) { if (!(map->flags & GNTMAP_host_map) || !(map->flags & GNTMAP_application_map)) continue; if (GNTMAP_pte_special) map->flags |= GNTMAP_pte_special; else { BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); return true; } } return false; } EXPORT_SYMBOL(gnttab_pre_map_adjust); #if CONFIG_XEN_COMPAT < 0x030400 int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *map, unsigned int count) { unsigned int i; int rc = 0; for (i = 0; i < count && rc == 0; ++i, ++map) { pte_t pte; if (!(map->flags & GNTMAP_host_map) || !(map->flags & GNTMAP_application_map)) continue; #ifdef CONFIG_X86 pte = __pte_ma((map->dev_bus_addr | _PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NX | _PAGE_SPECIAL) & __supported_pte_mask); #else #error Architecture not yet supported. #endif if (!(map->flags & GNTMAP_readonly)) pte = pte_mkwrite(pte); if (map->flags & GNTMAP_contains_pte) { mmu_update_t u; u.ptr = map->host_addr; u.val = __pte_val(pte); rc = HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF); } else rc = HYPERVISOR_update_va_mapping(map->host_addr, pte, 0); } return rc; } EXPORT_SYMBOL(gnttab_post_map_adjust); #endif #endif /* __HAVE_ARCH_PTE_SPECIAL */ int gnttab_resume(void) { if (max_nr_grant_frames() < nr_grant_frames) return 0; return gnttab_map(0, nr_grant_frames - 1); } #ifdef CONFIG_PM_SLEEP #include #ifdef CONFIG_X86 static int gnttab_suspend(void) { apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); return 0; } #else #define gnttab_suspend NULL #endif static void _gnttab_resume(void) { if (gnttab_resume()) BUG(); } static struct syscore_ops gnttab_syscore_ops = { .resume = _gnttab_resume, .suspend = gnttab_suspend, }; #endif #else /* !CONFIG_XEN */ #include static unsigned long resume_frames; static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; /* Loop backwards, so that the first hypercall has the largest index, * ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); } while (i-- > start_idx); return 0; } int gnttab_resume(void) { unsigned int max_nr_gframes, nr_gframes; nr_gframes = nr_grant_frames; max_nr_gframes = max_nr_grant_frames(); if (max_nr_gframes < nr_gframes) return -ENOSYS; if (!resume_frames) { resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); if (shared == NULL) { pr_warning("error to ioremap gnttab share frames\n"); return -1; } } gnttab_map(0, nr_gframes - 1); return 0; } #endif /* !CONFIG_XEN */ static int gnttab_expand(unsigned int req_entries) { int rc; unsigned int cur, extra; cur = nr_grant_frames; extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) / ENTRIES_PER_GRANT_FRAME); if (cur + extra > max_nr_grant_frames()) return -ENOSPC; if ((rc = gnttab_map(cur, cur + extra - 1)) == 0) rc = grow_gnttab_list(extra); return rc; } #ifdef CONFIG_XEN static int __init #else int __devinit #endif gnttab_init(void) { int i; unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int nr_init_grefs; if (!is_running_on_xen()) return -ENODEV; nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; nr_glist_frames = nr_freelist_frames(nr_grant_frames); for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) goto ini_nomem; } if (gnttab_resume() < 0) return -ENODEV; nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; #if defined(CONFIG_XEN) && defined(__HAVE_ARCH_PTE_SPECIAL) if (!xen_feature(XENFEAT_auto_translated_physmap) && xen_feature(XENFEAT_gnttab_map_avail_bits)) { #ifdef CONFIG_X86 GNTMAP_pte_special = (__pte_val(pte_mkspecial(__pte_ma(0))) >> _PAGE_BIT_UNUSED1) << _GNTMAP_guest_avail0; #else #error Architecture not yet supported. #endif } #endif #if defined(CONFIG_XEN) && defined(CONFIG_PM_SLEEP) if (!is_initial_xendomain()) register_syscore_ops(&gnttab_syscore_ops); #endif return 0; ini_nomem: for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); return -ENOMEM; } #ifdef CONFIG_XEN core_initcall(gnttab_init); #endif platform-pci.c000066400000000000000000000306701314037446600324100ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __ia64__ #include #endif #include "platform-pci.h" #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DRV_NAME "xen-platform-pci" #define DRV_VERSION "0.10" #define DRV_RELDATE "03/03/2005" static int max_hypercall_stub_pages, nr_hypercall_stub_pages; char *hypercall_stubs; EXPORT_SYMBOL(hypercall_stubs); MODULE_AUTHOR("ssmith@xensource.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); /* NB. [aux-]ide-disks options do not unplug IDE CD-ROM drives. */ /* NB. aux-ide-disks is equiv to ide-disks except ignores primary master. */ static char *dev_unplug; module_param(dev_unplug, charp, 0644); MODULE_PARM_DESC(dev_unplug, "Emulated devices to unplug: " "[all,][ide-disks,][aux-ide-disks,][nics]\n"); struct pci_dev *xen_platform_pdev; static unsigned long shared_info_frame; static uint64_t callback_via; /* driver should write xenstore flag to tell xen which feature supported */ void write_feature_flag() { int rc; char str[32] = {0}; (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); rc = xenbus_write(XBT_NIL, "control/uvp", "feature_flag", str); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); } } /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { int rc; rc = xenbus_write(XBT_NIL, "control/uvp", "monitor-service-flag", "true"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/monitor-service-flag failed \n"); } } /*DTS2014042508949. driver write this flag when the xenpci resume succeed.*/ void write_driver_resume_flag() { int rc = 0; rc = xenbus_write(XBT_NIL, "control/uvp", "driver-resume-flag", "1"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/driver-resume-flag failed \n"); } } static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; extern void *shared_info_area; #ifdef __ia64__ xencomm_initialize(); #endif setup_xen_features(); shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); shared_info_area = ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); return 0; } static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } #ifndef __ia64__ /*static uint32_t xen_cpuid_base(void) { uint32_t base, eax, ebx, ecx, edx; char signature[13]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { cpuid(base, &eax, &ebx, &ecx, &edx); *(uint32_t*)(signature + 0) = ebx; *(uint32_t*)(signature + 4) = ecx; *(uint32_t*)(signature + 8) = edx; signature[12] = 0; if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) return base; } return 0; }*/ static int init_hypercall_stubs(void) { uint32_t eax, ebx, ecx, edx, pages, msr, i, base; base = xen_cpuid_base(); if (base == 0) { printk(KERN_WARNING "Detected Xen platform device but not Xen VMM?\n"); return -EINVAL; } cpuid(base + 1, &eax, &ebx, &ecx, &edx); printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff); /* * Find largest supported number of hypercall pages. * We'll create as many as possible up to this number. */ cpuid(base + 2, &pages, &msr, &ecx, &edx); /* * Use __vmalloc() because vmalloc_exec() is not an exported symbol. * PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL. * hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE); */ while (pages > 0) { hypercall_stubs = __vmalloc( pages * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM, __pgprot(__PAGE_KERNEL & ~_PAGE_NX)); if (hypercall_stubs != NULL) break; pages--; /* vmalloc failed: try one fewer pages */ } if (hypercall_stubs == NULL) return -ENOMEM; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; max_hypercall_stub_pages = pages; printk(KERN_INFO "Hypercall area is %u pages.\n", pages); return 0; } static void resume_hypercall_stubs(void) { uint32_t base, ecx, edx, pages, msr, i; base = xen_cpuid_base(); BUG_ON(base == 0); cpuid(base + 2, &pages, &msr, &ecx, &edx); if (pages > max_hypercall_stub_pages) pages = max_hypercall_stub_pages; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; } #else /* __ia64__ */ #define init_hypercall_stubs() (0) #define resume_hypercall_stubs() ((void)0) #endif static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; #ifdef __ia64__ for (irq = 0; irq < 16; irq++) { if (isa_irq_to_vector(irq) == pdev->irq) return irq; /* ISA IRQ */ } #else /* !__ia64__ */ irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) pin = pdev->pin; #else pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); #endif /* We don't know the GSI. Specify the PCI INTx line instead. */ return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3)); } static int set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } int xen_irq_init(struct pci_dev *pdev); int xenbus_init(void); int xen_reboot_init(void); int xen_panic_handler_init(void); int gnttab_init(void); #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0xffff /* NB: register a proper one */ #define XEN_IOPORT_LINUX_DRVVER ((LINUX_VERSION_CODE << 8) + 0x0) #define UNPLUG_ALL_IDE_DISKS 1 #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 #define UNPLUG_ALL 7 static int check_platform_magic(struct device *dev, long ioaddr, long iolen) { short magic, unplug = 0; char protocol, *p, *q, *err; for (p = dev_unplug; p; p = q) { q = strchr(dev_unplug, ','); if (q) *q++ = '\0'; if (!strcmp(p, "all")) unplug |= UNPLUG_ALL; else if (!strcmp(p, "ide-disks")) unplug |= UNPLUG_ALL_IDE_DISKS; else if (!strcmp(p, "aux-ide-disks")) unplug |= UNPLUG_AUX_IDE_DISKS; else if (!strcmp(p, "nics")) unplug |= UNPLUG_ALL_NICS; else dev_warn(dev, "unrecognised option '%s' " "in module parameter 'dev_unplug'\n", p); } if (iolen < 0x16) { err = "backend too old"; goto no_dev; } magic = inw(XEN_IOPORT_MAGIC); if (magic != XEN_IOPORT_MAGIC_VAL) { err = "unrecognised magic value"; goto no_dev; } protocol = inb(XEN_IOPORT_PROTOVER); dev_info(dev, "I/O protocol version %d\n", protocol); switch (protocol) { case 1: outw(XEN_IOPORT_LINUX_PRODNUM, XEN_IOPORT_PRODNUM); outl(XEN_IOPORT_LINUX_DRVVER, XEN_IOPORT_DRVVER); if (inw(XEN_IOPORT_MAGIC) != XEN_IOPORT_MAGIC_VAL) { dev_err(dev, "blacklisted by host\n"); return -ENODEV; } /* Fall through */ case 0: outw(unplug, XEN_IOPORT_UNPLUG); break; default: err = "unknown I/O protocol version"; goto no_dev; } return 0; no_dev: dev_warn(dev, "failed backend handshake: %s\n", err); if (!unplug) return 0; dev_err(dev, "failed to execute specified dev_unplug options!\n"); return -ENODEV; } void xen_unplug_emulated_devices(void); static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr, iolen; long mmio_addr, mmio_len; xen_unplug_emulated_devices(); if (xen_platform_pdev) return -EBUSY; xen_platform_pdev = pdev; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); callback_via = get_callback_via(pdev); if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { printk(KERN_WARNING DRV_NAME ":no resources found\n"); return -ENOENT; } ret = pci_request_region(pdev, 1, DRV_NAME); if (ret < 0) return ret; ret = pci_request_region(pdev, 0, DRV_NAME); if (ret < 0) goto mem_out; platform_mmio = mmio_addr; platform_mmiolen = mmio_len; ret = init_hypercall_stubs(); if (ret < 0) goto out; ret = check_platform_magic(&pdev->dev, ioaddr, iolen); if (ret < 0) goto out; if ((ret = init_xen_info())) goto out; if ((ret = gnttab_init())) goto out; if ((ret = xen_irq_init(pdev))) goto out; if ((ret = set_callback_via(callback_via))) goto out; if ((ret = xenbus_init())) goto out; if ((ret = xen_reboot_init())) goto out; if ((ret = xen_panic_handler_init())) goto out; /* write flag to xenstore when xenbus is available */ write_feature_flag(); out: if (ret) { pci_release_region(pdev, 0); mem_out: pci_release_region(pdev, 1); } return ret; } #define XEN_PLATFORM_VENDOR_ID 0x5853 #define XEN_PLATFORM_DEVICE_ID 0x0001 static struct pci_device_id platform_pci_tbl[] __devinitdata = { {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* Continue to recognise the old ID for now */ {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { name: DRV_NAME, probe: platform_pci_init, id_table: platform_pci_tbl, }; static int pci_device_registered; void platform_pci_resume(void) { struct xen_add_to_physmap xatp; resume_hypercall_stubs(); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); if (set_callback_via(callback_via)) printk("platform_pci_resume failure!\n"); } static int __init platform_pci_module_init(void) { int rc; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) rc = pci_module_init(&platform_driver); #else rc = pci_register_driver(&platform_driver); #endif if (rc) { printk(KERN_INFO DRV_NAME ": No platform pci device model found\n"); return rc; } pci_device_registered = 1; return 0; } module_init(platform_pci_module_init); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13/reboot.c000066400000000000000000000176241314037446600313700ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #undef handle_sysrq #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } static int xen_reboot_vm(void) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *reboot_argv[] = { "/sbin/reboot", NULL }; if (call_usermodehelper("/sbin/reboot", reboot_argv, envp, 0) < 0) printk(KERN_ERR "Xen reboot vm Failed.\n"); return 0; } #ifdef CONFIG_PM_SLEEP static int setup_suspend_evtchn(void); /* Was last suspend request cancelled? */ static int suspend_cancelled; static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed_ptr(current, cpumask_of(0)); if (err) { pr_err("Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { pr_err("Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_delayed_work(&shutdown_work, 0); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } #else # define xen_suspend NULL #endif static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_delayed_work(&shutdown_work, 0); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(struct work_struct *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { pr_warning("Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) xen_reboot_vm(); #ifdef CONFIG_PM_SLEEP else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; #endif else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else pr_warning("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key) <= 0) { pr_err("Unable to read sysrq code in control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #ifdef CONFIG_PM_SLEEP static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); pr_info("suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } #else #define setup_suspend_evtchn() 0 #endif static int setup_shutdown_watcher(void) { int err; err = register_xenbus_watch(&sysrq_watch); if (err) { pr_err("Failed to set sysrq watcher\n"); return err; } if (is_initial_xendomain()) return 0; xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { pr_err("Failed to set shutdown watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { pr_err("Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ xen_proc.c000066400000000000000000000011511314037446600316200ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13#include #include #include #include static struct proc_dir_entry *xen_base; struct proc_dir_entry * #ifndef MODULE __init #endif create_xen_proc_entry(const char *name, mode_t mode) { if ( xen_base == NULL ) if ( (xen_base = proc_mkdir("xen", NULL)) == NULL ) panic("Couldn't create /proc/xen"); return create_proc_entry(name, mode, xen_base); } #ifdef MODULE EXPORT_SYMBOL_GPL(create_xen_proc_entry); #elif defined(CONFIG_XEN_PRIVILEGED_GUEST) void remove_xen_proc_entry(const char *name) { remove_proc_entry(name, xen_base); } #endif xenbus_backend_client.c000066400000000000000000000067501314037446600343260ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13/****************************************************************************** * Backend-client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code in the backend * driver. * * Copyright (C) 2005-2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include /* Based on Rusty Russell's skeleton driver's map_page */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t gnt_ref) { struct gnttab_map_grant_ref op; struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE); if (!area) return ERR_PTR(-ENOMEM); gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map, gnt_ref, dev->otherend_id); gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); if (op.status != GNTST_okay) { free_vm_area(area); xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); BUG_ON(!IS_ERR(ERR_PTR(op.status))); return ERR_PTR(-EINVAL); } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; return area; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); /* Based on Rusty Russell's skeleton driver's unmap_page */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map, (grant_handle_t)area->phys_addr); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) free_vm_area(area); else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status == GNTST_okay ? 0 : -EINVAL; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); int xenbus_dev_is_online(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); MODULE_LICENSE("Dual BSD/GPL"); xenbus_client.c000066400000000000000000000432161314037446600326550ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #else #include #include #include #include #include #include #include #endif #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", [ XenbusStateReconfiguring ] = "Reconfiguring", [ XenbusStateReconfigured ] = "Reconfigured", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate); /** * xenbus_watch_path - register a watch * @dev: xenbus device * @path: path to watch * @watch: watch to register * @callback: callback to register * * Register a @watch on the given path, using the given xenbus_watch structure * for storage, and the given @callback function as the callback. Return 0 on * success, or -errno on error. On success, the given @path will be saved as * @watch->node, and remains the caller's to free. On error, @watch->node will * be NULL, the device will switch to %XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path); #if defined(CONFIG_XEN) || defined(MODULE) int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2); if (!state) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, state, watch, callback); if (err) kfree(state); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path2); #else /** * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path * @dev: xenbus device * @watch: watch to register * @callback: callback to register * @pathfmt: format of path to watch * * Register a watch on the given @path, using the given xenbus_watch * structure for storage, and the given @callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (@path/@path2) will be saved as @watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to %XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...) { int err; va_list ap; char *path; va_start(ap, pathfmt); path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); va_end(ap); if (!path) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, path, watch, callback); if (err) kfree(path); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); #endif static void xenbus_switch_fatal(struct xenbus_device *, int, int, const char *, ...); static int __xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state, int depth) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not take a caller's Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ struct xenbus_transaction xbt; int current_state; int err, abort; if (state == dev->state) return 0; again: abort = 1; err = xenbus_transaction_start(&xbt); if (err) { xenbus_switch_fatal(dev, depth, err, "starting transaction"); return 0; } err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); if (err != 1) goto abort; err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); if (err) { xenbus_switch_fatal(dev, depth, err, "writing new state"); goto abort; } abort = 0; abort: err = xenbus_transaction_end(xbt, abort); if (err) { if (err == -EAGAIN && !abort) goto again; xenbus_switch_fatal(dev, depth, err, "ending transaction"); } else dev->state = state; return 0; } /** * xenbus_switch_state * @dev: xenbus device * @state: new state * * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) { return __xenbus_switch_state(dev, state, 0); } EXPORT_SYMBOL_GPL(xenbus_switch_state); int xenbus_frontend_closed(struct xenbus_device *dev) { xenbus_switch_state(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed); /** * Return the path to the error node for the given device, or NULL on failure. * If the value returned is non-NULL, then it is the caller's to kfree. */ static char *error_path(struct xenbus_device *dev) { return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); } static void _dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list *ap) { char *printf_buffer, *path_buffer; struct va_format vaf = { .fmt = fmt, .va = ap }; printf_buffer = kasprintf(GFP_KERNEL, "%i %pV", -err, &vaf); if (printf_buffer) dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = error_path(dev); if (!printf_buffer || !path_buffer || xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer)) dev_err(&dev->dev, "xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); kfree(printf_buffer); kfree(path_buffer); } /** * xenbus_dev_error * @dev: xenbus device * @err: error to report * @fmt: error message format * * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, &ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error); /** * xenbus_dev_fatal * @dev: xenbus device * @err: error to report * @fmt: error message format * * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, &ap); va_end(ap); xenbus_switch_state(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal); /** * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps * avoiding recursion within xenbus_switch_state. */ static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, &ap); va_end(ap); if (!depth) __xenbus_switch_state(dev, XenbusStateClosing, 1); } /** * xenbus_grant_ring * @dev: xenbus device * @ring_mfn: mfn of ring to grant * * Grant access to the given @ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); if (err < 0) xenbus_dev_fatal(dev, err, "granting access to ring page"); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); #if 0 /* !defined(CONFIG_XEN) && !defined(MODULE) */ /** * Bind to an existing interdomain event channel in another domain. Returns 0 * on success and stores the local port in *port. On error, returns -errno, * switches the device to XenbusStateClosing, and saves the error in XenStore. */ int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port) { struct evtchn_bind_interdomain bind_interdomain; int err; bind_interdomain.remote_dom = dev->otherend_id; bind_interdomain.remote_port = remote_port; err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); if (err) xenbus_dev_fatal(dev, err, "binding to event channel %d from domain %d", remote_port, dev->otherend_id); else *port = bind_interdomain.local_port; return err; } EXPORT_SYMBOL_GPL(xenbus_bind_evtchn); #endif /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error(dev, err, "freeing event channel %d", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn); #if 0 /* !defined(CONFIG_XEN) && !defined(MODULE) */ /** * xenbus_map_ring_valloc * @dev: xenbus device * @gnt_ref: grant reference * @vaddr: pointer to address to be filled out by mapping * * Based on Rusty Russell's skeleton driver's map_page. * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_valloc allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t gnt_ref, void **vaddr) { struct gnttab_map_grant_ref op = { .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; struct vm_struct *area; *vaddr = NULL; area = xen_alloc_vm_area(PAGE_SIZE); if (!area) return -ENOMEM; op.host_addr = (unsigned long)area->addr; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xen_free_vm_area(area); xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); return op.status; } /* Stuff the handle in an unused field */ area->phys_addr = (unsigned long)op.handle; *vaddr = area->addr; return 0; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); /** * xenbus_map_ring * @dev: xenbus device * @gnt_ref: grant reference * @handle: pointer to grant handle to be filled * @vaddr: address to be mapped to * * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op = { .host_addr = (unsigned long)vaddr, .flags = GNTMAP_host_map, .ref = gnt_ref, .dom = dev->otherend_id, }; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) { xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring); /** * xenbus_unmap_ring_vfree * @dev: xenbus device * @vaddr: addr to unmap * * Based on Rusty Russell's skeleton driver's unmap_page. * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_vfree if you mapped in your memory with * xenbus_map_ring_valloc (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) { struct vm_struct *area; struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, }; /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) * method so that we don't have to muck with vmalloc internals here. * We could force the user to hang on to their struct vm_struct from * xenbus_map_ring_valloc, but these 6 lines considerably simplify * this API. */ read_lock(&vmlist_lock); for (area = vmlist; area != NULL; area = area->next) { if (area->addr == vaddr) break; } read_unlock(&vmlist_lock); if (!area) { xenbus_dev_error(dev, -ENOENT, "can't find mapped virtual address %p", vaddr); return GNTST_bad_virt_addr; } op.handle = (grant_handle_t)area->phys_addr; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) xen_free_vm_area(area); else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", (int16_t)area->phys_addr, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); /** * xenbus_unmap_ring * @dev: xenbus device * @handle: grant handle * @vaddr: addr to unmap * * Unmap a page of memory in this domain that was imported from another domain. * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, .handle = handle, }; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring); #endif /** * xenbus_read_driver_state * @path: path for driver * * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path) { int result; if (xenbus_scanf(XBT_NIL, path, "state", "%d", &result) != 1) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state); xenbus_comms.c000066400000000000000000000157311314037446600325160ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #else #include #include #include #endif #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_irq; static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); static irqreturn_t wake_waiting(int irq, void *unused) { #ifdef CONFIG_XEN_PRIVILEGED_GUEST static DECLARE_WORK(probe_work, xenbus_probe); int old, new; old = atomic_read(&xenbus_xsd_state); switch (old) { case XENBUS_XSD_UNCOMMITTED: BUG(); return IRQ_HANDLED; case XENBUS_XSD_FOREIGN_INIT: new = XENBUS_XSD_FOREIGN_READY; break; case XENBUS_XSD_LOCAL_INIT: new = XENBUS_XSD_LOCAL_READY; break; case XENBUS_XSD_FOREIGN_READY: case XENBUS_XSD_LOCAL_READY: default: goto wake; } old = atomic_cmpxchg(&xenbus_xsd_state, old, new); if (old != new) schedule_work(&probe_work); wake: #endif wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } /** * xb_write - low level write * @data: buffer to send * @len: length of buffer * * Returns 0 on success, error otherwise. */ int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { return wait_event_interruptible(xb_waitq, xb_data_to_read()); } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /** * xb_init_comms - Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; int err; if (intf->req_prod != intf->req_cons) pr_err("XENBUS request ring is not quiescent " "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { pr_warning("XENBUS response ring is not quiescent" " (%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); /* breaks kdump */ if (!reset_devices) intf->rsp_cons = intf->rsp_prod; } #if defined(CONFIG_XEN) || defined(MODULE) if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); err = bind_caller_port_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { pr_err("XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; #else if (xenbus_irq) { /* Already have an irq; assume we're resuming */ rebind_evtchn_irq(xen_store_evtchn, xenbus_irq); } else { err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { pr_err("XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; } #endif return 0; } xenbus_comms.h000066400000000000000000000046661314037446600325300ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13/* * Private include for xenbus communications. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_COMMS_H #define _XENBUS_COMMS_H int xs_init(void); int xb_init_comms(void); /* Low level routines. */ int xb_write(const void *data, unsigned len); int xb_read(void *data, unsigned len); int xb_data_to_read(void); int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; /* For xenbus internal use. */ enum { XENBUS_XSD_UNCOMMITTED = 0, XENBUS_XSD_FOREIGN_INIT, XENBUS_XSD_FOREIGN_READY, XENBUS_XSD_LOCAL_INIT, XENBUS_XSD_LOCAL_READY, }; extern atomic_t xenbus_xsd_state; static inline int is_xenstored_ready(void) { int s = atomic_read(&xenbus_xsd_state); return s == XENBUS_XSD_FOREIGN_READY || s == XENBUS_XSD_LOCAL_READY; } #if defined(CONFIG_XEN_XENBUS_DEV) && defined(CONFIG_XEN_PRIVILEGED_GUEST) #include #include int xenbus_conn(domid_t, grant_ref_t *, evtchn_port_t *); #endif #endif /* _XENBUS_COMMS_H */ xenbus_dev.c000066400000000000000000000266151314037446600321610ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #include struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[XENSTORE_PAYLOAD_MAX]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static struct proc_dir_entry *xenbus_dev_intf; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; if (!is_xenstored_ready()) return -ENODEV; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static int queue_reply(struct list_head *queue, const void *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return 0; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); if (!rb) return -ENOMEM; rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, queue); return 0; } static void queue_flush(struct xenbus_dev_data *u, struct list_head *queue, int err) { if (!err) { list_splice_tail(queue, &u->read_buffers); wake_up(&u->read_waitq); } else while (!list_empty(queue)) { struct read_buffer *rb = list_entry(queue->next, struct read_buffer, list); list_del(queue->next); kfree(rb); } } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int err, path_len, tok_len, body_len, data_len = 0; LIST_HEAD(queue); path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); err = queue_reply(&queue, &hdr, sizeof(hdr)); if (!err) err = queue_reply(&queue, path, path_len); if (!err) err = queue_reply(&queue, token, tok_len); if (!err && len > 2) err = queue_reply(&queue, vec[2], data_len); queue_flush(adap->dev_data, &queue, err); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply = NULL; LIST_HEAD(queue); char *path, *token; struct watch_adapter *watch; int err, rc = len; if (!is_xenstored_ready()) return -ENODEV; if ((len + u->len) > sizeof(u->u.buffer)) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: { static const char XS_RESP[] = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); watch->watch.node = kmalloc(strlen(path)+1, GFP_KERNEL); strcpy((char *)watch->watch.node, path); watch->watch.callback = watch_fired; watch->token = kmalloc(strlen(token)+1, GFP_KERNEL); strcpy(watch->token, token); watch->dev_data = u; err = register_xenbus_watch(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry(watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = sizeof(XS_RESP); mutex_lock(&u->reply_mutex); err = queue_reply(&queue, &hdr, sizeof(hdr)) ?: queue_reply(&queue, XS_RESP, hdr.len); break; } case XS_TRANSACTION_START: trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } goto common; case XS_TRANSACTION_END: list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; if (&trans->list == &u->transactions) { rc = -ESRCH; goto out; } /* fall through */ common: default: reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { if (msg_type == XS_TRANSACTION_START) kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); err = queue_reply(&queue, &u->u.msg, sizeof(u->u.msg)) ?: queue_reply(&queue, reply, u->u.msg.len); break; } queue_flush(u, &queue, err); mutex_unlock(&u->reply_mutex); kfree(reply); if (err) rc = err; out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; struct read_buffer *rb, *tmp_rb; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { list_del(&rb->list); kfree(rb); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; if (!is_xenstored_ready()) return -ENODEV; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } #ifdef CONFIG_XEN_PRIVILEGED_GUEST static long xenbus_dev_ioctl(struct file *file, unsigned int cmd, unsigned long data) { void __user *udata = (void __user *) data; int ret = -ENOTTY; if (!is_initial_xendomain()) return -ENODEV; switch (cmd) { case IOCTL_XENBUS_ALLOC: { xenbus_alloc_t xa; int old; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_FOREIGN_INIT); if (old != XENBUS_XSD_UNCOMMITTED) return -EBUSY; if (copy_from_user(&xa, udata, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } ret = xenbus_conn(xa.dom, &xa.grant_ref, &xa.port); if (ret != 0) { atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } if (copy_to_user(udata, &xa, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } } break; default: break; } return ret; } #endif static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .llseek = no_llseek, .poll = xenbus_dev_poll, #ifdef CONFIG_XEN_PRIVILEGED_GUEST .unlocked_ioctl = xenbus_dev_ioctl #endif }; int #ifndef MODULE __init #else __devinit #endif xenbus_dev_init(void) { xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400); if (xenbus_dev_intf) xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops; return 0; } xenbus_probe.c000066400000000000000000001133561314037446600325110ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #include #include #include #include #define PARAVIRT_EXPORT_SYMBOL(sym) __typeof__(sym) sym #else #include #include #include #include #include #define PARAVIRT_EXPORT_SYMBOL EXPORT_SYMBOL_GPL #endif #ifndef CONFIG_XEN #include #endif #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif int xen_store_evtchn; PARAVIRT_EXPORT_SYMBOL(xen_store_evtchn); struct xenstore_domain_interface *xen_store_interface; PARAVIRT_EXPORT_SYMBOL(xen_store_interface); static unsigned long xen_store_mfn; extern struct mutex xenwatch_mutex; static #ifdef CONFIG_XEN_UNPRIVILEGED_GUEST __initdata #endif BLOCKING_NOTIFIER_HEAD(xenstore_chain); #if defined(CONFIG_XEN) || defined(MODULE) static void wait_for_devices(struct xenbus_driver *xendrv); #endif /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } PARAVIRT_EXPORT_SYMBOL(xenbus_match); static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } int xenbus_read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_read_otherend_details); #if defined(CONFIG_XEN) || defined(MODULE) static int read_backend_details(struct xenbus_device *xendev) { return xenbus_read_otherend_details(xendev, "backend-id", "backend"); } static void otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) #else /* !CONFIG_XEN && !MODULE */ void xenbus_otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len, int ignore_on_shutdown) #endif /* CONFIG_XEN || MODULE */ { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]); return; } state = xenbus_read_driver_state(dev->otherend); dev_dbg(&dev->dev, "state is %d (%s), %s, %s", state, xenbus_strstate(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { /* If we're frontend, drive the state machine to Closed. */ /* This should cause the backend to release our resources. */ # if defined(CONFIG_XEN) || defined(MODULE) const struct xen_bus_type *bus = container_of(dev->dev.bus, struct xen_bus_type, bus); int ignore_on_shutdown = (bus->levels == 2); # endif if (ignore_on_shutdown && (state == XenbusStateClosing)) xenbus_frontend_closed(dev); return; } #endif if (drv->otherend_changed) drv->otherend_changed(dev, state); } PARAVIRT_EXPORT_SYMBOL(xenbus_otherend_changed); static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { #if defined(CONFIG_XEN) || defined(MODULE) return xenbus_watch_path2(dev, dev->otherend, "state", &dev->otherend_watch, otherend_changed); #else struct xen_bus_type *bus = container_of(dev->dev.bus, struct xen_bus_type, bus); return xenbus_watch_pathfmt(dev, &dev->otherend_watch, bus->otherend_changed, "%s/%s", dev->otherend, "state"); #endif } int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); #if defined(CONFIG_XEN) || defined(MODULE) return -ENODEV; #else return err; #endif } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_probe); int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); free_otherend_details(dev); if (drv->remove) drv->remove(dev); xenbus_switch_state(dev, XenbusStateClosed); return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_remove); void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); /* Commented out since xenstored stubdom is now minios based not linux based #define XENSTORE_DOMAIN_SHARES_THIS_KERNEL */ #ifndef XENSTORE_DOMAIN_SHARES_THIS_KERNEL if (is_initial_xendomain()) #endif return; get_device(&dev->dev); if (dev->state != XenbusStateConnected) { dev_info(&dev->dev, "%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); if (!strcmp(dev->devicetype, "vfb")) goto out; timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) dev_info(&dev->dev, "%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_shutdown); int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus, struct module *owner, const char *mod_name) { int ret; if (bus->error) return bus->error; drv->driver.name = drv->name; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) drv->driver.owner = owner; #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) drv->driver.mod_name = mod_name; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; drv->driver.remove = xenbus_dev_remove; drv->driver.shutdown = xenbus_dev_shutdown; #endif mutex_lock(&xenwatch_mutex); ret = driver_register(&drv->driver); mutex_unlock(&xenwatch_mutex); return ret; } PARAVIRT_EXPORT_SYMBOL(xenbus_register_driver_common); void xenbus_unregister_driver(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t xendev_show_nodename(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); static ssize_t xendev_show_devtype(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); static ssize_t xendev_show_modalias(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_driver_state(nodename); if (bus->error) return bus->error; if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); #if defined(CONFIG_XEN) || defined(MODULE) xendev->dev.parent = &bus->dev; #endif xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) { char devname[XEN_BUS_ID_SIZE]; err = bus->get_bus_id(devname, xendev->nodename); if (!err) dev_set_name(&xendev->dev, devname); } #else err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); #endif if (err) goto fail; /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) goto fail_unregister; err = device_create_file(&xendev->dev, &dev_attr_devtype); if (err) goto fail_remove_nodename; err = device_create_file(&xendev->dev, &dev_attr_modalias); if (err) goto fail_remove_devtype; return 0; fail_remove_devtype: device_remove_file(&xendev->dev, &dev_attr_devtype); fail_remove_nodename: device_remove_file(&xendev->dev, &dev_attr_nodename); fail_unregister: device_unregister(&xendev->dev); fail: kfree(xendev); return err; } PARAVIRT_EXPORT_SYMBOL(xenbus_probe_node); #if defined(CONFIG_XEN) || defined(MODULE) /* device// => - */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { pr_warning("XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); if (!strchr(bus_id, '/')) { pr_warning("XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } /* device// */ static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type, const char *name) { char *nodename; int err; if (!strcmp(type, "console")) return 0; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(bus, type, nodename); kfree(nodename); return err; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype) || add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename) || add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype)) return -ENOMEM; return 0; } #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) static struct device_attribute xenbus_dev_attrs[] = { __ATTR_NULL }; #endif /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { .name = "xen", .match = xenbus_match, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_frontend, #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) .dev_attrs = xenbus_dev_attrs, #endif }, .dev = { .init_name = "xen", }, }; int __xenbus_register_frontend(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend, owner, mod_name); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(__xenbus_register_frontend); #endif static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(bus, type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n; if (bus->error) return bus->error; dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } PARAVIRT_EXPORT_SYMBOL(xenbus_probe_devices); static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void xenbus_dev_changed(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[XEN_BUS_ID_SIZE]; const char *p, *root; if (bus->error || char_count(node, '/') < 2) return; exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend//... or device//... */ p = strchr(node, '/') + 1; snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[XEN_BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_changed); #if defined(CONFIG_XEN) || defined(MODULE) static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int suspend_dev(struct device *dev, void *data) #else int xenbus_dev_suspend(struct device *dev) #endif { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev = container_of(dev, struct xenbus_device, dev); DPRINTK("%s", xdev->nodename); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); if (drv->suspend) err = drv->suspend(xdev); if (err) pr_warning("xenbus: suspend %s failed: %i\n", dev_name(dev), err); return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_suspend); #if defined(CONFIG_XEN) || defined(MODULE) static int suspend_cancel_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend_cancel) err = drv->suspend_cancel(xdev); if (err) pr_warning("xenbus: suspend_cancel %s failed: %i\n", dev_name(dev), err); return 0; } static int resume_dev(struct device *dev, void *data) #else int xenbus_dev_resume(struct device *dev) #endif { int err; struct xenbus_driver *drv; struct xenbus_device *xdev = container_of(dev, struct xenbus_device, dev); DPRINTK("%s", xdev->nodename); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); err = talk_to_otherend(xdev); if (err) { pr_warning("xenbus: resume (talk_to_otherend) %s failed: %i\n", dev_name(dev), err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { pr_warning("xenbus: resume %s failed: %i\n", dev_name(dev), err); return err; } } err = watch_otherend(xdev); if (err) { pr_warning("xenbus_probe: resume (watch_otherend) %s failed:" " %d\n", dev_name(dev), err); return err; } return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_resume); #if !defined(CONFIG_XEN) && !defined(MODULE) int xenbus_dev_cancel(struct device *dev) { /* Do nothing */ DPRINTK("cancel"); return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_cancel); #else void xenbus_suspend(void) { DPRINTK(""); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); xenbus_backend_suspend(suspend_dev); xs_suspend(); } void xen_unplug_emulated_devices(void); void xenbus_resume(void) { xb_init_comms(); xs_resume(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); xenbus_backend_resume(resume_dev); xen_unplug_emulated_devices(); } void xenbus_suspend_cancel(void) { xs_suspend_cancel(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); xenbus_backend_resume(suspend_cancel_dev); } #endif /* A flag to determine if xenstored is 'ready' (i.e. has started) */ atomic_t xenbus_xsd_state = ATOMIC_INIT(XENBUS_XSD_UNCOMMITTED); int #ifdef CONFIG_XEN __init #endif register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; if (is_xenstored_ready()) ret = nb->notifier_call(nb, 0, NULL); else blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } #ifndef CONFIG_XEN EXPORT_SYMBOL_GPL(register_xenstore_notifier); void unregister_xenstore_notifier(struct notifier_block *nb) { blocking_notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); #endif #ifndef CONFIG_XEN static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq); static int backend_state; static void xenbus_reset_backend_state_changed(struct xenbus_watch *w, const char **v, unsigned int l) { if (xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state) != 1) backend_state = XenbusStateUnknown; printk(KERN_DEBUG "XENBUS: backend %s %s\n", v[XS_WATCH_PATH], xenbus_strstate(backend_state)); wake_up(&backend_state_wq); } static void xenbus_reset_wait_for_backend(char *be, int expected) { long timeout; timeout = wait_event_interruptible_timeout(backend_state_wq, backend_state == expected, 5 * HZ); if (timeout <= 0) pr_info("XENBUS: backend %s timed out.\n", be); } /* * Reset frontend if it is in Connected or Closed state. * Wait for backend to catch up. * State Connected happens during kdump, Closed after kexec. */ static void xenbus_reset_frontend(char *fe, char *be, int be_state) { struct xenbus_watch be_watch; printk(KERN_DEBUG "XENBUS: backend %s %s\n", be, xenbus_strstate(be_state)); memset(&be_watch, 0, sizeof(be_watch)); be_watch.node = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/state", be); if (!be_watch.node) return; be_watch.callback = xenbus_reset_backend_state_changed; backend_state = XenbusStateUnknown; pr_info("XENBUS: triggering reconnect on %s\n", be); register_xenbus_watch(&be_watch); /* fall through to forward backend to state XenbusStateInitialising */ switch (be_state) { case XenbusStateConnected: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing); xenbus_reset_wait_for_backend(be, XenbusStateClosing); case XenbusStateClosing: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed); xenbus_reset_wait_for_backend(be, XenbusStateClosed); case XenbusStateClosed: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising); xenbus_reset_wait_for_backend(be, XenbusStateInitWait); } unregister_xenbus_watch(&be_watch); pr_info("XENBUS: reconnect done on %s\n", be); kfree(be_watch.node); } static void xenbus_check_frontend(char *class, char *dev) { int be_state, fe_state, err; char *backend, *frontend; frontend = kasprintf(GFP_NOIO | __GFP_HIGH, "device/%s/%s", class, dev); if (!frontend) return; err = xenbus_scanf(XBT_NIL, frontend, "state", "%i", &fe_state); if (err != 1) goto out; switch (fe_state) { case XenbusStateConnected: case XenbusStateClosed: printk(KERN_DEBUG "XENBUS: frontend %s %s\n", frontend, xenbus_strstate(fe_state)); backend = xenbus_read(XBT_NIL, frontend, "backend", NULL); if (!backend || IS_ERR(backend)) goto out; err = xenbus_scanf(XBT_NIL, backend, "state", "%i", &be_state); if (err == 1) xenbus_reset_frontend(frontend, backend, be_state); kfree(backend); break; default: break; } out: kfree(frontend); } static void xenbus_reset_state(void) { char **devclass, **dev; int devclass_n, dev_n; int i, j; devclass = xenbus_directory(XBT_NIL, "device", "", &devclass_n); if (IS_ERR(devclass)) return; for (i = 0; i < devclass_n; i++) { dev = xenbus_directory(XBT_NIL, "device", devclass[i], &dev_n); if (IS_ERR(dev)) continue; for (j = 0; j < dev_n; j++) xenbus_check_frontend(devclass[i], dev[j]); kfree(dev); } kfree(devclass); } #endif void #if defined(CONFIG_XEN_UNPRIVILEGED_GUEST) __init #elif defined(MODULE) __devinit #endif xenbus_probe(struct work_struct *unused) { BUG_ON(!is_xenstored_ready()); #ifndef CONFIG_XEN /* reset devices in Connected or Closed state */ xenbus_reset_state(); #endif #if defined(CONFIG_XEN) || defined(MODULE) /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); xenbus_backend_probe_and_watch(); #endif /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } PARAVIRT_EXPORT_SYMBOL(xenbus_probe); #if !defined(CONFIG_XEN) && !defined(MODULE) static int __init xenbus_probe_initcall(void) { if (!xen_domain()) return -ENODEV; if (xen_initial_domain() || xen_hvm_domain()) return 0; xenbus_probe(NULL); return 0; } device_initcall(xenbus_probe_initcall); #endif #ifdef CONFIG_XEN_PRIVILEGED_GUEST #ifdef CONFIG_PROC_FS static struct file_operations xsd_kva_fops; static struct proc_dir_entry *xsd_kva_intf; static struct proc_dir_entry *xsd_port_intf; static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; int old; int rc; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_LOCAL_INIT); switch (old) { case XENBUS_XSD_UNCOMMITTED: rc = xb_init_comms(); if (rc != 0) return rc; break; case XENBUS_XSD_FOREIGN_INIT: case XENBUS_XSD_FOREIGN_READY: return -EBUSY; case XENBUS_XSD_LOCAL_INIT: case XENBUS_XSD_LOCAL_READY: default: break; } if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static int xsd_kva_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "0x%p", xen_store_interface); *eof = 1; return len; } static int xsd_port_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "%d", xen_store_evtchn); *eof = 1; return len; } #endif #ifdef CONFIG_XEN_XENBUS_DEV int xenbus_conn(domid_t remote_dom, grant_ref_t *grant_ref, evtchn_port_t *local_port) { struct evtchn_alloc_unbound alloc_unbound; int rc, rc2; BUG_ON(atomic_read(&xenbus_xsd_state) != XENBUS_XSD_FOREIGN_INIT); BUG_ON(!is_initial_xendomain()); remove_xen_proc_entry("xsd_kva"); remove_xen_proc_entry("xsd_port"); rc = close_evtchn(xen_store_evtchn); if (rc != 0) goto fail0; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_dom; rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (rc != 0) goto fail0; *local_port = xen_store_evtchn = alloc_unbound.port; /* keep the old page (xen_store_mfn, xen_store_interface) */ rc = gnttab_grant_foreign_access(remote_dom, xen_store_mfn, GTF_permit_access); if (rc < 0) goto fail1; *grant_ref = rc; rc = xb_init_comms(); if (rc != 0) goto fail1; return 0; fail1: rc2 = close_evtchn(xen_store_evtchn); if (rc2 != 0) pr_warning("XENBUS: Error freeing xenstore event channel:" " %d\n", rc2); fail0: xen_store_evtchn = -1; return rc; } #endif #endif /* CONFIG_XEN_PRIVILEGED_GUEST */ #ifndef MODULE static int __init #else int __devinit #endif xenbus_init(void) { int err = 0; unsigned long page = 0; DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; #if defined(CONFIG_XEN) || defined(MODULE) /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) pr_warning("XENBUS: Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); #endif /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { struct evtchn_alloc_unbound alloc_unbound; /* Allocate Xenstore page */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOMID_SELF; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600); if (xsd_kva_intf) { memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, sizeof(xsd_kva_fops)); xsd_kva_fops.mmap = xsd_kva_mmap; xsd_kva_intf->proc_fops = &xsd_kva_fops; xsd_kva_intf->read_proc = xsd_kva_read; } xsd_port_intf = create_xen_proc_entry("xsd_port", 0400); if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else { #if !defined(CONFIG_XEN) && !defined(MODULE) if (xen_hvm_domain()) { #endif #ifndef CONFIG_XEN uint64_t v = 0; err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); if (err) goto err; xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto err; xen_store_mfn = (unsigned long)v; xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); #endif #if !defined(CONFIG_XEN) && !defined(MODULE) } else { #endif #ifndef MODULE xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); #endif #if !defined(CONFIG_XEN) && !defined(MODULE) } #endif atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY); /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) goto err; } #if defined(CONFIG_XEN) || defined(MODULE) xenbus_dev_init(); #endif /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { pr_warning("XENBUS: Error initializing xenstore comms: %i\n", err); goto err; } #if defined(CONFIG_XEN) || defined(MODULE) /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); if (xenbus_frontend.error) { bus_unregister(&xenbus_frontend.bus); pr_warning("XENBUS: Error registering frontend device:" " %d\n", xenbus_frontend.error); } } xenbus_backend_device_register(); if (!is_initial_xendomain()) xenbus_probe(NULL); #endif #if defined(CONFIG_XEN_COMPAT_XENFS) && !defined(MODULE) /* * Create xenfs mountpoint in /proc for compatibility with * utilities that expect to find "xenbus" under "/proc/xen". */ proc_mkdir("xen", NULL); #endif return 0; err: /* * Do not unregister the xenbus front/backend buses here. The buses * must exist because front/backend drivers will use them when they are * registered. */ if (page != 0) free_page(page); return err; } #ifndef MODULE postcore_initcall(xenbus_init); #ifdef CONFIG_XEN MODULE_LICENSE("Dual BSD/GPL"); #else MODULE_LICENSE("GPL"); #endif #endif #if defined(CONFIG_XEN) || defined(MODULE) static int is_device_connecting(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_connecting_device(struct device_driver *drv) { if (xenbus_frontend.error) return xenbus_frontend.error; return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ pr_info("XENBUS: Device with no driver: %s\n", xendev->nodename); return 0; } if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); pr_warning("XENBUS: Timeout connecting to device: %s" " (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } xendrv = to_xenbus_driver(dev->driver); if (xendrv->is_ready && !xendrv->is_ready(xendev)) pr_warning("XENBUS: Device not ready: %s\n", xendev->nodename); return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!ready_to_wait_for_devices || !is_running_on_xen()) return; while (exists_connecting_device(drv)) { if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) pr_warning("XENBUS: Waiting for " "devices to initialise: "); seconds_waited += 5; printk("%us...", 300 - seconds_waited); if (seconds_waited == 300) break; } schedule_timeout_interruptible(HZ/10); } if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } #ifndef MODULE static int __init boot_wait_for_devices(void) { #if !defined(CONFIG_XEN) && !defined(MODULE) if (xen_hvm_domain() && !xen_platform_pci_unplug) return -ENODEV; #endif if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; wait_for_devices(NULL); } return 0; } late_initcall(boot_wait_for_devices); #endif int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); #endif /* CONFIG_XEN || MODULE */ xenbus_probe.h000066400000000000000000000101431314037446600325040ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13/****************************************************************************** * xenbus_probe.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_PROBE_H #define _XENBUS_PROBE_H #ifndef BUS_ID_SIZE #define XEN_BUS_ID_SIZE 20 #else #define XEN_BUS_ID_SIZE BUS_ID_SIZE #endif #ifdef CONFIG_PARAVIRT_XEN #define is_running_on_xen() xen_domain() #define is_initial_xendomain() xen_initial_domain() #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) #define dev_name(dev) ((dev)->bus_id) #endif #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); extern void xenbus_backend_probe_and_watch(void); extern void xenbus_backend_bus_register(void); extern void xenbus_backend_device_register(void); #else static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_probe_and_watch(void) {} static inline void xenbus_backend_bus_register(void) {} static inline void xenbus_backend_device_register(void) {} #endif struct xen_bus_type { char *root; int error; unsigned int levels; int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); int (*probe)(struct xen_bus_type *bus, const char *type, const char *dir); #if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) void (*otherend_changed)(struct xenbus_watch *watch, const char **vec, unsigned int len); #else struct device dev; #endif struct bus_type bus; }; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); extern int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus, struct module *owner, const char *mod_name); extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); extern int xenbus_probe_devices(struct xen_bus_type *bus); extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); extern void xenbus_dev_shutdown(struct device *_dev); extern int xenbus_dev_suspend(struct device *dev); extern int xenbus_dev_resume(struct device *dev); extern int xenbus_dev_cancel(struct device *dev); extern void xenbus_otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len, int ignore_on_shutdown); extern int xenbus_read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node); #endif xenbus_probe_backend.c000066400000000000000000000221561314037446600341550ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13/****************************************************************************** * Talks to Xen Store to figure out what devices we have (backend half). * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __func__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) #include #endif #include #include #if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) #include #include #endif #include #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* backend/// => -- */ static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { int domid, err; const char *devid, *type, *frontend; unsigned int typelen; type = strchr(nodename, '/'); if (!type) return -EINVAL; type++; typelen = strcspn(type, "/"); if (!typelen || type[typelen] != '/') return -EINVAL; devid = strrchr(nodename, '/') + 1; err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid, "frontend", NULL, &frontend, NULL); if (err) return err; if (strlen(frontend) == 0) err = -ERANGE; if (!err && !xenbus_exists(XBT_NIL, frontend, "")) err = -ENOENT; kfree(frontend); if (err) return err; if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s", typelen, type, domid, devid) >= XEN_BUS_ID_SIZE) return -ENOSPC; return 0; } static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; struct xenbus_driver *drv; struct xen_bus_type *bus; DPRINTK(""); if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); bus = container_of(xdev->dev.bus, struct xen_bus_type, bus); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype)) return -ENOMEM; if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename)) return -ENOMEM; if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root)) return -ENOMEM; if (dev->driver) { drv = to_xenbus_driver(dev->driver); if (drv && drv->uevent) return drv->uevent(xdev, env); } return 0; } /* backend/// */ static int xenbus_probe_backend_unit(struct xen_bus_type *bus, const char *dir, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); if (!nodename) return -ENOMEM; DPRINTK("%s\n", nodename); err = xenbus_probe_node(bus, type, nodename); kfree(nodename); return err; } /* backend// */ static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type, const char *domid) { char *nodename; int err = 0; char **dir; unsigned int i, dir_n = 0; DPRINTK(""); nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid); if (!nodename) return -ENOMEM; dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n); if (IS_ERR(dir)) { kfree(nodename); return PTR_ERR(dir); } for (i = 0; i < dir_n; i++) { err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]); if (err) break; } kfree(dir); kfree(nodename); return err; } #if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { xenbus_otherend_changed(watch, vec, len, 0); } #endif static struct device_attribute xenbus_backend_dev_attrs[] = { __ATTR_NULL }; static struct xen_bus_type xenbus_backend = { .root = "backend", .levels = 3, /* backend/type// */ .get_bus_id = backend_bus_id, .probe = xenbus_probe_backend, #if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) .otherend_changed = frontend_changed, #else .dev = { .init_name = "xen-backend", }, #endif .error = -ENODEV, .bus = { .name = "xen-backend", .match = xenbus_match, .uevent = xenbus_uevent_backend, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, #if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) .shutdown = xenbus_dev_shutdown, #endif .dev_attrs = xenbus_backend_dev_attrs, }, }; static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); } static struct xenbus_watch be_watch = { .node = "backend", .callback = backend_changed, }; static int read_frontend_details(struct xenbus_device *xendev) { return xenbus_read_otherend_details(xendev, "frontend-id", "frontend"); } #if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) int xenbus_dev_is_online(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); #endif int __xenbus_register_backend(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { drv->read_otherend_details = read_frontend_details; return xenbus_register_driver_common(drv, &xenbus_backend, owner, mod_name); } EXPORT_SYMBOL_GPL(__xenbus_register_backend); #if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) void xenbus_backend_suspend(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_resume(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } #endif #if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) static int backend_probe_and_watch(struct notifier_block *notifier, unsigned long event, void *data) #else void xenbus_backend_probe_and_watch(void) #endif { /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_backend); register_xenbus_watch(&be_watch); #if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) return NOTIFY_DONE; #endif } #if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) static int __init xenbus_probe_backend_init(void) { static struct notifier_block xenstore_notifier = { .notifier_call = backend_probe_and_watch }; int err; DPRINTK(""); /* Register ourselves with the kernel bus subsystem */ err = bus_register(&xenbus_backend.bus); if (err) return err; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(xenbus_probe_backend_init); #else void xenbus_backend_bus_register(void) { xenbus_backend.error = bus_register(&xenbus_backend.bus); if (xenbus_backend.error) pr_warning("XENBUS: Error registering backend bus: %i\n", xenbus_backend.error); } void xenbus_backend_device_register(void) { if (xenbus_backend.error) return; xenbus_backend.error = device_register(&xenbus_backend.dev); if (xenbus_backend.error) { bus_unregister(&xenbus_backend.bus); pr_warning("XENBUS: Error registering backend device: %i\n", xenbus_backend.error); } } int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_backend); #endif xenbus_probe_frontend.c000066400000000000000000000167731314037446600344150ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13#define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __func__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include "xenbus_probe.h" /* device// => - */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); if (!strchr(bus_id, '/')) { printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } /* device// */ static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(bus, type, nodename); kfree(nodename); return err; } static int xenbus_uevent_frontend(struct device *_dev, struct kobj_uevent_env *env) { struct xenbus_device *dev = to_xenbus_device(_dev); if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype)) return -ENOMEM; return 0; } static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { xenbus_otherend_changed(watch, vec, len, 1); } static struct device_attribute xenbus_frontend_dev_attrs[] = { __ATTR_NULL }; static const struct dev_pm_ops xenbus_pm_ops = { .suspend = xenbus_dev_suspend, .resume = xenbus_dev_resume, .freeze = xenbus_dev_suspend, .thaw = xenbus_dev_cancel, .restore = xenbus_dev_resume, }; static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .otherend_changed = backend_changed, .bus = { .name = "xen", .match = xenbus_match, .uevent = xenbus_uevent_frontend, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .dev_attrs = xenbus_frontend_dev_attrs, .pm = &xenbus_pm_ops, }, }; static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int read_backend_details(struct xenbus_device *xendev) { return xenbus_read_otherend_details(xendev, "backend-id", "backend"); } static int is_device_connecting(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_connecting_device(struct device_driver *drv) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", xendev->nodename); } else if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); printk(KERN_WARNING "XENBUS: Timeout connecting " "to device: %s (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!ready_to_wait_for_devices || !xen_domain()) return; while (exists_connecting_device(drv)) { if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) printk(KERN_WARNING "XENBUS: Waiting for " "devices to initialise: "); seconds_waited += 5; printk("%us...", 300 - seconds_waited); if (seconds_waited == 300) break; } schedule_timeout_interruptible(HZ/10); } if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } int __xenbus_register_frontend(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend, owner, mod_name); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(__xenbus_register_frontend); static int frontend_probe_and_watch(struct notifier_block *notifier, unsigned long event, void *data) { /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); return NOTIFY_DONE; } static int __init xenbus_probe_frontend_init(void) { static struct notifier_block xenstore_notifier = { .notifier_call = frontend_probe_and_watch }; int err; DPRINTK(""); /* Register ourselves with the kernel bus subsystem */ err = bus_register(&xenbus_frontend.bus); if (err) return err; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(xenbus_probe_frontend_init); #ifndef MODULE static int __init boot_wait_for_devices(void) { if (xen_hvm_domain() && !xen_platform_pci_unplug) return -ENODEV; ready_to_wait_for_devices = 1; wait_for_devices(NULL); return 0; } late_initcall(boot_wait_for_devices); #endif MODULE_LICENSE("GPL"); xenbus_xs.c000066400000000000000000000542221314037446600320300ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.13/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. * * transaction_mutex must be held before incrementing * transaction_count. The mutex is held when a suspend is in * progress to prevent new transactions starting. * * When decrementing transaction_count to zero the wait queue * should be woken up, the suspend code waits for count to * reach zero. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct mutex transaction_mutex; atomic_t transaction_count; wait_queue_head_t transaction_wq; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { pr_warning("XENBUS xen store gave: unknown error %s", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } static void transaction_start(void) { mutex_lock(&xs_state.transaction_mutex); atomic_inc(&xs_state.transaction_count); mutex_unlock(&xs_state.transaction_mutex); } static void transaction_end(void) { if (atomic_dec_and_test(&xs_state.transaction_count)) wake_up(&xs_state.transaction_wq); } static void transaction_suspend(void) { mutex_lock(&xs_state.transaction_mutex); wait_event(xs_state.transaction_wq, atomic_read(&xs_state.transaction_count) == 0); } static void transaction_resume(void) { mutex_unlock(&xs_state.transaction_mutex); } void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; enum xsd_sockmsg_type type = msg->type; int err; if (type == XS_TRANSACTION_START) transaction_start(); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((type == XS_TRANSACTION_END) || ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) transaction_end(); return ret; } #if !defined(CONFIG_XEN) && !defined(MODULE) EXPORT_SYMBOL(xenbus_dev_request_and_reply); #endif /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; //˱Ӧļxs_wire.hеxsd_sockmsg_typeж #define XS_LINUX_WEAK_WRITE 128 msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { if (printk_ratelimit()) pr_warning("XENBUS unexpected type [%d]," " expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len) + 1; /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; ret[*num] = strings + len; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; transaction_start(); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { transaction_end(); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); transaction_end(); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *printf_buffer; va_start(ap, fmt); printf_buffer = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap); va_end(ap); if (!printf_buffer) return -ENOMEM; ret = xenbus_write(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } static void xs_reset_watches(void) { #ifdef MODULE int err, supported = 0; err = xenbus_scanf(XBT_NIL, "control", "platform-feature-xs_reset_watches", "%d", &supported); if (err != 1 || !supported) return; err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL)); if (err && err != -EEXIST) pr_warning("xs_reset_watches failed: %d\n", err); #endif } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); if (err) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; #if defined(CONFIG_XEN) || defined(MODULE) BUG_ON(watch->flags & XBWF_new_thread); #endif sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) pr_warning("XENBUS Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Make sure there are no callbacks running currently (unless its us) */ if (current->pid != xenwatch_pid) mutex_lock(&xenwatch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); if (current->pid != xenwatch_pid) mutex_unlock(&xenwatch_mutex); } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); void xs_suspend(void) { transaction_suspend(); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; #if !defined(CONFIG_XEN) && !defined(MODULE) xb_init_comms(); #endif mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); transaction_resume(); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); mutex_unlock(&xs_state.transaction_mutex); } #if defined(CONFIG_XEN) || defined(MODULE) static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } #endif static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); #if defined(CONFIG_XEN) || defined(MODULE) /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } #else msg->u.watch.handle->callback( msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); mutex_unlock(&xenwatch_mutex); kfree(msg->u.watch.vec); kfree(msg); #endif } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) { kfree(msg); err = -EINVAL; goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) pr_warning("XENBUS error %d while reading " "message\n", err); if (kthread_should_stop()) break; } return 0; } int xs_init(void) { struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); mutex_init(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); atomic_set(&xs_state.transaction_count, 0); init_waitqueue_head(&xs_state.transaction_wq); task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); /* shutdown watches for kexec boot */ xs_reset_watches(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76/000077500000000000000000000000001314037446600277315ustar00rootroot00000000000000features.c000066400000000000000000000016041314037446600316350ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76/****************************************************************************** * features.c * * Xen feature flags. * * Copyright (c) 2006, Ian Campbell, XenSource Inc. */ #include #include #include #ifdef CONFIG_PARAVIRT_XEN #include #else #include #endif #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #include #include #include u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; EXPORT_SYMBOL(xen_features); void xen_setup_features(void) { struct xen_feature_info fi; int i, j; for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) { fi.submap_idx = i; if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j = 0; j < 32; j++) xen_features[i * 32 + j] = !!(fi.submap & 1< #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff #define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_v1_t)) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; static unsigned int boot_max_nr_grant_frames; static int gnttab_free_count; static grant_ref_t gnttab_free_head; static DEFINE_SPINLOCK(gnttab_list_lock); static struct grant_entry_v1 *shared; static struct gnttab_free_callback *gnttab_free_callback_list; static int gnttab_expand(unsigned int req_entries); #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP]) #define nr_freelist_frames(grant_frames) \ (((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP) static int get_free_entries(int count) { unsigned long flags; int ref, rc; grant_ref_t head; spin_lock_irqsave(&gnttab_list_lock, flags); if ((gnttab_free_count < count) && ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { spin_unlock_irqrestore(&gnttab_list_lock, flags); return rc; } ref = head = gnttab_free_head; gnttab_free_count -= count; while (count-- > 1) head = gnttab_entry(head); gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } #define get_free_entry() get_free_entries(1) static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; callback = gnttab_free_callback_list; gnttab_free_callback_list = NULL; while (callback != NULL) { next = callback->next; if (gnttab_free_count >= callback->count) { callback->next = NULL; callback->queued = 0; callback->fn(callback->arg); } else { callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; } callback = next; } } static inline void check_free_callbacks(void) { if (unlikely(gnttab_free_callback_list)) do_free_callbacks(); } static void put_free_entry(grant_ref_t ref) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; gnttab_free_count++; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } /* * Public grant-issuing interface functions */ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags) { shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); int gnttab_query_foreign_access(grant_ref_t ref) { u16 nflags; nflags = shared[ref].flags; return (nflags & (GTF_reading|GTF_writing)); } EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref) { u16 flags, nflags; nflags = shared[ref].flags; do { if ((flags = nflags) & (GTF_reading|GTF_writing)) return 0; } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags); return 1; } int gnttab_end_foreign_access_ref(grant_ref_t ref) { if (_gnttab_end_foreign_access_ref(ref)) return 1; printk(KERN_DEBUG "WARNING: g.e. %#x still in use!\n", ref); return 0; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); struct deferred_entry { struct list_head list; grant_ref_t ref; uint16_t warn_delay; struct page *page; }; static LIST_HEAD(deferred_list); static void gnttab_handle_deferred(unsigned long); static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0); static void gnttab_handle_deferred(unsigned long unused) { unsigned int nr = 10; struct deferred_entry *first = NULL; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); while (nr--) { struct deferred_entry *entry = list_first_entry(&deferred_list, struct deferred_entry, list); if (entry == first) break; list_del(&entry->list); spin_unlock_irqrestore(&gnttab_list_lock, flags); if (_gnttab_end_foreign_access_ref(entry->ref)) { put_free_entry(entry->ref); if (entry->page) { printk(KERN_DEBUG "freeing g.e. %#x (pfn %#lx)\n", entry->ref, page_to_pfn(entry->page)); __free_page(entry->page); } else printk(KERN_DEBUG "freeing g.e. %#x\n", entry->ref); kfree(entry); entry = NULL; } else { if (!--entry->warn_delay) pr_info("g.e. %#x still pending\n", entry->ref); if (!first) first = entry; } spin_lock_irqsave(&gnttab_list_lock, flags); if (entry) list_add_tail(&entry->list, &deferred_list); else if (list_empty(&deferred_list)) break; } if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) { deferred_timer.expires = jiffies + HZ; add_timer(&deferred_timer); } spin_unlock_irqrestore(&gnttab_list_lock, flags); } static void gnttab_add_deferred(grant_ref_t ref, struct page *page) { struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); const char *what = KERN_WARNING "leaking"; if (entry) { unsigned long flags; entry->ref = ref; entry->page = page; entry->warn_delay = 60; spin_lock_irqsave(&gnttab_list_lock, flags); list_add_tail(&entry->list, &deferred_list); if (!timer_pending(&deferred_timer)) { deferred_timer.expires = jiffies + HZ; add_timer(&deferred_timer); } spin_unlock_irqrestore(&gnttab_list_lock, flags); what = KERN_DEBUG "deferring"; } printk("%s g.e. %#x (pfn %lx)\n", what, ref, page ? page_to_pfn(page) : -1); } void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page) { if (gnttab_end_foreign_access_ref(ref)) { put_free_entry(ref); if (page != 0) free_page(page); } else gnttab_add_deferred(ref, page ? virt_to_page(page) : NULL); } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); void gnttab_multi_end_foreign_access(unsigned int nr, grant_ref_t refs[], struct page *pages[]) { for (; nr--; ++refs) { if (*refs != GRANT_INVALID_REF) { if (gnttab_end_foreign_access_ref(*refs)) put_free_entry(*refs); else if (pages) { gnttab_add_deferred(*refs, *pages); *pages = NULL; } else gnttab_add_deferred(*refs, NULL); *refs = GRANT_INVALID_REF; } if (pages) { if (*pages) { __free_page(*pages); *pages = NULL; } ++pages; } } } EXPORT_SYMBOL_GPL(gnttab_multi_end_foreign_access); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; gnttab_grant_foreign_transfer_ref(ref, domid, pfn); return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, unsigned long pfn) { shared[ref].frame = pfn; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_accept_transfer; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) { unsigned long frame; u16 flags; /* * If a transfer is not even yet started, try to reclaim the grant * reference and return failure (== 0). */ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags) return 0; cpu_relax(); } /* If a transfer is in progress then wait until it is completed. */ while (!(flags & GTF_transfer_completed)) { flags = shared[ref].flags; cpu_relax(); } /* Read the frame number /after/ reading completion status. */ rmb(); frame = shared[ref].frame; BUG_ON(frame == 0); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) { unsigned long frame = gnttab_end_foreign_transfer_ref(ref); put_free_entry(ref); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); void gnttab_free_grant_reference(grant_ref_t ref) { put_free_entry(ref); } EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; unsigned long flags; int count = 1; if (head == GNTTAB_LIST_END) return; spin_lock_irqsave(&gnttab_list_lock, flags); ref = head; while (gnttab_entry(ref) != GNTTAB_LIST_END) { ref = gnttab_entry(ref); count++; } gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = head; gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_references); int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) { int h = get_free_entries(count); if (h < 0) return -ENOSPC; *head = h; return 0; } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); } EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); int gnttab_claim_grant_reference(grant_ref_t *private_head) { grant_ref_t g = *private_head; if (unlikely(g == GNTTAB_LIST_END)) return -ENOSPC; *private_head = gnttab_entry(g); return g; } EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release) { gnttab_entry(release) = *private_head; *private_head = release; } EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); if (callback->queued) goto out; callback->fn = fn; callback->arg = arg; callback->count = count; callback->queued = 1; callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; check_free_callbacks(); out: spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_request_free_callback); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) { struct gnttab_free_callback **pcb; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { if (*pcb == callback) { *pcb = callback->next; callback->queued = 0; break; } } spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); static int grow_gnttab_list(unsigned int more_frames) { unsigned int new_nr_grant_frames, extra_entries, i; unsigned int nr_glist_frames, new_nr_glist_frames; new_nr_grant_frames = nr_grant_frames + more_frames; extra_entries = more_frames * ENTRIES_PER_GRANT_FRAME; nr_glist_frames = nr_freelist_frames(nr_grant_frames); new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames); for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); if (!gnttab_list[i]) goto grow_nomem; } for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; check_free_callbacks(); return 0; grow_nomem: for ( ; i >= nr_glist_frames; i--) free_page((unsigned long) gnttab_list[i]); return -ENOMEM; } static unsigned int __max_nr_grant_frames(void) { struct gnttab_query_size query; int rc; query.dom = DOMID_SELF; rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); if ((rc < 0) || (query.status != GNTST_okay)) return 4; /* Legacy max supported number of frames */ return query.max_nr_frames; } static inline unsigned int max_nr_grant_frames(void) { unsigned int xen_max = __max_nr_grant_frames(); if (xen_max > boot_max_nr_grant_frames) return boot_max_nr_grant_frames; return xen_max; } #ifdef CONFIG_XEN #ifdef CONFIG_X86 static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } #ifdef CONFIG_PM_SLEEP static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } #endif void *arch_gnttab_alloc_shared(unsigned long *frames) { struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames()); BUG_ON(area == NULL); return area->addr; } #endif /* CONFIG_X86 */ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; unsigned long *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status != GNTST_okay); if (shared == NULL) shared = arch_gnttab_alloc_shared(frames); #ifdef CONFIG_X86 rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); BUG_ON(rc); frames -= nr_gframes; /* adjust after map_pte_fn() */ #endif /* CONFIG_X86 */ kfree(frames); return 0; } #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) static DEFINE_SEQLOCK(gnttab_dma_lock); static void gnttab_page_free(struct page *page, unsigned int order) { BUG_ON(order); ClearPageForeign(page); gnttab_reset_grant_page(page); ClearPageReserved(page); put_page(page); } /* * Must not be called with IRQs off. This should only be used on the * slow path. * * Copy a foreign granted page to local memory. */ int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep) { struct gnttab_unmap_and_replace unmap; struct page *page, *new_page; void *addr, *new_addr; unsigned long pfn; maddr_t new_mfn; int err; page = *pagep; if (!get_page_unless_zero(page)) return -ENOENT; err = -ENOMEM; new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!new_page) goto out; new_addr = page_address(new_page); addr = page_address(page); copy_page(new_addr, addr); pfn = page_to_pfn(page); new_mfn = virt_to_mfn(new_addr); write_seqlock_bh(&gnttab_dma_lock); /* Make seq visible before checking page_mapped. */ smp_mb(); /* Has the page been DMA-mapped? */ if (unlikely(page_mapped(page))) { write_sequnlock_bh(&gnttab_dma_lock); put_page(new_page); err = -EBUSY; goto out; } gnttab_set_replace_op(&unmap, (unsigned long)addr, (unsigned long)new_addr, ref); if (!xen_feature(XENFEAT_auto_translated_physmap)) { multicall_entry_t mc[2]; mmu_update_t mmu; set_phys_to_machine(pfn, new_mfn); set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY); MULTI_grant_table_op(&mc[0], GNTTABOP_unmap_and_replace, &unmap, 1); mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu.val = pfn; MULTI_mmu_update(&mc[1], &mmu, 1, NULL, DOMID_SELF); err = HYPERVISOR_multicall_check(mc, 2, NULL); } else err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace, &unmap, 1); BUG_ON(err); BUG_ON(unmap.status != GNTST_okay); write_sequnlock_bh(&gnttab_dma_lock); new_page->mapping = page->mapping; new_page->index = page->index; set_bit(PG_foreign, &new_page->flags); if (PageReserved(page)) SetPageReserved(new_page); *pagep = new_page; SetPageForeign(page, gnttab_page_free); page->mapping = NULL; out: put_page(page); return err; } EXPORT_SYMBOL_GPL(gnttab_copy_grant_page); void gnttab_reset_grant_page(struct page *page) { init_page_count(page); reset_page_mapcount(page); } EXPORT_SYMBOL_GPL(gnttab_reset_grant_page); /* * Keep track of foreign pages marked as PageForeign so that we don't * return them to the remote domain prematurely. * * PageForeign pages are pinned down by increasing their mapcount. * * All other pages are simply returned as is. */ void __gnttab_dma_map_page(struct page *page) { unsigned int seq; if (!is_running_on_xen() || !PageForeign(page)) return; BUG_ON(PageCompound(page)); do { seq = read_seqbegin(&gnttab_dma_lock); if (gnttab_dma_local_pfn(page)) break; atomic_set(&page->_mapcount, 0); /* Make _mapcount visible before read_seqretry. */ smp_mb(); } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); } #endif /* CONFIG_XEN_BACKEND */ #ifdef __HAVE_ARCH_PTE_SPECIAL static unsigned int GNTMAP_pte_special; bool gnttab_pre_map_adjust(unsigned int cmd, struct gnttab_map_grant_ref *map, unsigned int count) { unsigned int i; if (unlikely(cmd != GNTTABOP_map_grant_ref)) count = 0; for (i = 0; i < count; ++i, ++map) { if (!(map->flags & GNTMAP_host_map) || !(map->flags & GNTMAP_application_map)) continue; if (GNTMAP_pte_special) map->flags |= GNTMAP_pte_special; else { BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); return true; } } return false; } EXPORT_SYMBOL(gnttab_pre_map_adjust); #if CONFIG_XEN_COMPAT < 0x030400 int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *map, unsigned int count) { unsigned int i; int rc = 0; for (i = 0; i < count && rc == 0; ++i, ++map) { pte_t pte; if (!(map->flags & GNTMAP_host_map) || !(map->flags & GNTMAP_application_map)) continue; #ifdef CONFIG_X86 pte = __pte_ma((map->dev_bus_addr | _PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NX | _PAGE_SPECIAL) & __supported_pte_mask); #else #error Architecture not yet supported. #endif if (!(map->flags & GNTMAP_readonly)) pte = pte_mkwrite(pte); if (map->flags & GNTMAP_contains_pte) { mmu_update_t u; u.ptr = map->host_addr; u.val = __pte_val(pte); rc = HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF); } else rc = HYPERVISOR_update_va_mapping(map->host_addr, pte, 0); } return rc; } EXPORT_SYMBOL(gnttab_post_map_adjust); #endif #endif /* __HAVE_ARCH_PTE_SPECIAL */ int gnttab_resume(void) { if (max_nr_grant_frames() < nr_grant_frames) return 0; return gnttab_map(0, nr_grant_frames - 1); } #ifdef CONFIG_PM_SLEEP #include #ifdef CONFIG_X86 static int gnttab_suspend(void) { apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); return 0; } #else #define gnttab_suspend NULL #endif static void _gnttab_resume(void) { if (gnttab_resume()) BUG(); } static struct syscore_ops gnttab_syscore_ops = { .resume = _gnttab_resume, .suspend = gnttab_suspend, }; #endif #else /* !CONFIG_XEN */ #include static unsigned long resume_frames; static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; /* Loop backwards, so that the first hypercall has the largest index, * ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); } while (i-- > start_idx); return 0; } int gnttab_resume(void) { unsigned int max_nr_gframes, nr_gframes; nr_gframes = nr_grant_frames; max_nr_gframes = max_nr_grant_frames(); if (max_nr_gframes < nr_gframes) return -ENOSYS; if (!resume_frames) { resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); if (shared == NULL) { pr_warning("error to ioremap gnttab share frames\n"); return -1; } } gnttab_map(0, nr_gframes - 1); return 0; } #endif /* !CONFIG_XEN */ static int gnttab_expand(unsigned int req_entries) { int rc; unsigned int cur, extra; cur = nr_grant_frames; extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) / ENTRIES_PER_GRANT_FRAME); if (cur + extra > max_nr_grant_frames()) return -ENOSPC; if ((rc = gnttab_map(cur, cur + extra - 1)) == 0) rc = grow_gnttab_list(extra); return rc; } #ifdef CONFIG_XEN static int __init #else int __devinit #endif gnttab_init(void) { int i, ret; unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int nr_init_grefs; if (!is_running_on_xen()) return -ENODEV; nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; nr_glist_frames = nr_freelist_frames(nr_grant_frames); for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) { ret = -ENOMEM; goto ini_nomem; } } if (gnttab_resume() < 0) { ret = -ENODEV; goto ini_nomem; } nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; #if defined(CONFIG_XEN) && defined(__HAVE_ARCH_PTE_SPECIAL) if (!xen_feature(XENFEAT_auto_translated_physmap) && xen_feature(XENFEAT_gnttab_map_avail_bits)) { #ifdef CONFIG_X86 GNTMAP_pte_special = (__pte_val(pte_mkspecial(__pte_ma(0))) >> _PAGE_BIT_UNUSED1) << _GNTMAP_guest_avail0; #else #error Architecture not yet supported. #endif } #endif #if defined(CONFIG_XEN) && defined(CONFIG_PM_SLEEP) if (!is_initial_xendomain()) register_syscore_ops(&gnttab_syscore_ops); #endif return 0; ini_nomem: for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); return ret; } #ifdef CONFIG_XEN core_initcall(gnttab_init); #endif platform-pci.c000066400000000000000000000305731314037446600324230ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __ia64__ #include #endif #include "platform-pci.h" #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DRV_NAME "xen-platform-pci" #define DRV_VERSION "0.10" #define DRV_RELDATE "03/03/2005" static int max_hypercall_stub_pages, nr_hypercall_stub_pages; char *hypercall_stubs; EXPORT_SYMBOL(hypercall_stubs); MODULE_AUTHOR("ssmith@xensource.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); /* NB. [aux-]ide-disks options do not unplug IDE CD-ROM drives. */ /* NB. aux-ide-disks is equiv to ide-disks except ignores primary master. */ static char *dev_unplug; module_param(dev_unplug, charp, 0644); MODULE_PARM_DESC(dev_unplug, "Emulated devices to unplug: " "[all,][ide-disks,][aux-ide-disks,][nics]\n"); struct pci_dev *xen_platform_pdev; static unsigned long shared_info_frame; static uint64_t callback_via; /* driver should write xenstore flag to tell xen which feature supported */ void write_feature_flag() { int rc; char str[32] = {0}; (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); rc = xenbus_write(XBT_NIL, "control/uvp", "feature_flag", str); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); } } /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { int rc; rc = xenbus_write(XBT_NIL, "control/uvp", "monitor-service-flag", "true"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/monitor-service-flag failed \n"); } } /*DTS2014042508949. driver write this flag when the xenpci resume succeed.*/ void write_driver_resume_flag() { int rc = 0; rc = xenbus_write(XBT_NIL, "control/uvp", "driver-resume-flag", "1"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/driver-resume-flag failed \n"); } } static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; extern void *shared_info_area; #ifdef __ia64__ xencomm_initialize(); #endif setup_xen_features(); shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); shared_info_area = ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); return 0; } static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } #ifndef __ia64__ /*#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) static uint32_t xen_cpuid_base(void) { uint32_t base, eax, ebx, ecx, edx; char signature[13]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { cpuid(base, &eax, &ebx, &ecx, &edx); *(uint32_t*)(signature + 0) = ebx; *(uint32_t*)(signature + 4) = ecx; *(uint32_t*)(signature + 8) = edx; signature[12] = 0; if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) return base; } return 0; } #endif*/ static int init_hypercall_stubs(void) { uint32_t eax, ebx, ecx, edx, pages, msr, i, base; base = xen_cpuid_base(); if (base == 0) { printk(KERN_WARNING "Detected Xen platform device but not Xen VMM?\n"); return -EINVAL; } cpuid(base + 1, &eax, &ebx, &ecx, &edx); printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff); /* * Find largest supported number of hypercall pages. * We'll create as many as possible up to this number. */ cpuid(base + 2, &pages, &msr, &ecx, &edx); /* * Use __vmalloc() because vmalloc_exec() is not an exported symbol. * PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL. * hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE); */ while (pages > 0) { hypercall_stubs = __vmalloc( pages * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM, __pgprot(__PAGE_KERNEL & ~_PAGE_NX)); if (hypercall_stubs != NULL) break; pages--; /* vmalloc failed: try one fewer pages */ } if (hypercall_stubs == NULL) return -ENOMEM; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; max_hypercall_stub_pages = pages; printk(KERN_INFO "Hypercall area is %u pages.\n", pages); return 0; } static void resume_hypercall_stubs(void) { uint32_t base, ecx, edx, pages, msr, i; base = xen_cpuid_base(); BUG_ON(base == 0); cpuid(base + 2, &pages, &msr, &ecx, &edx); if (pages > max_hypercall_stub_pages) pages = max_hypercall_stub_pages; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; } #else /* __ia64__ */ #define init_hypercall_stubs() (0) #define resume_hypercall_stubs() ((void)0) #endif static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; #ifdef __ia64__ for (irq = 0; irq < 16; irq++) { if (isa_irq_to_vector(irq) == pdev->irq) return irq; /* ISA IRQ */ } #else /* !__ia64__ */ irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) pin = pdev->pin; #else pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); #endif /* We don't know the GSI. Specify the PCI INTx line instead. */ return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3)); } static int set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } int xen_irq_init(struct pci_dev *pdev); int xenbus_init(void); int xen_reboot_init(void); int xen_panic_handler_init(void); int gnttab_init(void); #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0xffff /* NB: register a proper one */ #define XEN_IOPORT_LINUX_DRVVER ((LINUX_VERSION_CODE << 8) + 0x0) #define UNPLUG_ALL_IDE_DISKS 1 #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 #define UNPLUG_ALL 7 static int check_platform_magic(struct device *dev, long ioaddr, long iolen) { short magic, unplug = 0; char protocol, *p, *q, *err; for (p = dev_unplug; p; p = q) { q = strchr(dev_unplug, ','); if (q) *q++ = '\0'; if (!strcmp(p, "all")) unplug |= UNPLUG_ALL; else if (!strcmp(p, "ide-disks")) unplug |= UNPLUG_ALL_IDE_DISKS; else if (!strcmp(p, "aux-ide-disks")) unplug |= UNPLUG_AUX_IDE_DISKS; else if (!strcmp(p, "nics")) unplug |= UNPLUG_ALL_NICS; else dev_warn(dev, "unrecognised option '%s' " "in module parameter 'dev_unplug'\n", p); } if (iolen < 0x16) { err = "backend too old"; goto no_dev; } magic = inw(XEN_IOPORT_MAGIC); if (magic != XEN_IOPORT_MAGIC_VAL) { err = "unrecognised magic value"; goto no_dev; } protocol = inb(XEN_IOPORT_PROTOVER); dev_info(dev, "I/O protocol version %d\n", protocol); switch (protocol) { case 1: outw(XEN_IOPORT_LINUX_PRODNUM, XEN_IOPORT_PRODNUM); outl(XEN_IOPORT_LINUX_DRVVER, XEN_IOPORT_DRVVER); if (inw(XEN_IOPORT_MAGIC) != XEN_IOPORT_MAGIC_VAL) { dev_err(dev, "blacklisted by host\n"); return -ENODEV; } /* Fall through */ case 0: outw(unplug, XEN_IOPORT_UNPLUG); break; default: err = "unknown I/O protocol version"; goto no_dev; } return 0; no_dev: dev_warn(dev, "failed backend handshake: %s\n", err); if (!unplug) return 0; dev_err(dev, "failed to execute specified dev_unplug options!\n"); return -ENODEV; } void xen_unplug_emulated_devices(void); static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr, iolen; long mmio_addr, mmio_len; xen_unplug_emulated_devices(); if (xen_platform_pdev) return -EBUSY; xen_platform_pdev = pdev; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); callback_via = get_callback_via(pdev); if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { printk(KERN_WARNING DRV_NAME ":no resources found\n"); return -ENOENT; } ret = pci_request_region(pdev, 1, DRV_NAME); if (ret < 0) return ret; ret = pci_request_region(pdev, 0, DRV_NAME); if (ret < 0) goto mem_out; platform_mmio = mmio_addr; platform_mmiolen = mmio_len; ret = init_hypercall_stubs(); if (ret < 0) goto out; ret = check_platform_magic(&pdev->dev, ioaddr, iolen); if (ret < 0) goto out; if ((ret = init_xen_info())) goto out; if ((ret = gnttab_init())) goto out; if ((ret = xen_irq_init(pdev))) goto out; if ((ret = set_callback_via(callback_via))) goto out; if ((ret = xenbus_init())) goto out; if ((ret = xen_reboot_init())) goto out; write_feature_flag(); out: if (ret) { pci_release_region(pdev, 0); mem_out: pci_release_region(pdev, 1); } return ret; } #define XEN_PLATFORM_VENDOR_ID 0x5853 #define XEN_PLATFORM_DEVICE_ID 0x0001 static struct pci_device_id platform_pci_tbl[] __devinitdata = { {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* Continue to recognise the old ID for now */ {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { name: DRV_NAME, probe: platform_pci_init, id_table: platform_pci_tbl, }; static int pci_device_registered; void platform_pci_resume(void) { struct xen_add_to_physmap xatp; resume_hypercall_stubs(); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); if (set_callback_via(callback_via)) printk("platform_pci_resume failure!\n"); } static int __init platform_pci_module_init(void) { int rc; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) rc = pci_module_init(&platform_driver); #else rc = pci_register_driver(&platform_driver); #endif if (rc) { printk(KERN_INFO DRV_NAME ": No platform pci device model found\n"); return rc; } pci_device_registered = 1; return 0; } module_init(platform_pci_module_init); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76/reboot.c000066400000000000000000000176241314037446600314010ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #undef handle_sysrq #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } static int xen_reboot_vm(void) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *reboot_argv[] = { "/sbin/reboot", NULL }; if (call_usermodehelper("/sbin/reboot", reboot_argv, envp, 0) < 0) printk(KERN_ERR "Xen reboot vm Failed.\n"); return 0; } #ifdef CONFIG_PM_SLEEP static int setup_suspend_evtchn(void); /* Was last suspend request cancelled? */ static int suspend_cancelled; static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed_ptr(current, cpumask_of(0)); if (err) { pr_err("Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { pr_err("Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_delayed_work(&shutdown_work, 0); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } #else # define xen_suspend NULL #endif static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_delayed_work(&shutdown_work, 0); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(struct work_struct *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { pr_warning("Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) xen_reboot_vm(); #ifdef CONFIG_PM_SLEEP else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; #endif else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else pr_warning("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key) <= 0) { pr_err("Unable to read sysrq code in control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #ifdef CONFIG_PM_SLEEP static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); pr_info("suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } #else #define setup_suspend_evtchn() 0 #endif static int setup_shutdown_watcher(void) { int err; err = register_xenbus_watch(&sysrq_watch); if (err) { pr_err("Failed to set sysrq watcher\n"); return err; } if (is_initial_xendomain()) return 0; xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { pr_err("Failed to set shutdown watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { pr_err("Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ xen_proc.c000066400000000000000000000011511314037446600316310ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76#include #include #include #include static struct proc_dir_entry *xen_base; struct proc_dir_entry * #ifndef MODULE __init #endif create_xen_proc_entry(const char *name, mode_t mode) { if ( xen_base == NULL ) if ( (xen_base = proc_mkdir("xen", NULL)) == NULL ) panic("Couldn't create /proc/xen"); return create_proc_entry(name, mode, xen_base); } #ifdef MODULE EXPORT_SYMBOL_GPL(create_xen_proc_entry); #elif defined(CONFIG_XEN_PRIVILEGED_GUEST) void remove_xen_proc_entry(const char *name) { remove_proc_entry(name, xen_base); } #endif xenbus_backend_client.c000066400000000000000000000102331314037446600343260ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76/****************************************************************************** * Backend-client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code in the backend * driver. * * Copyright (C) 2005-2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include static int unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area, unsigned int nr, grant_handle_t handles[]) { unsigned int i; int err = 0; for (i = 0; i < nr; ++i) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr + i * PAGE_SIZE, GNTMAP_host_map, handles[i]); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) continue; xenbus_dev_error(dev, op.status, "unmapping page %u (handle %#x)", i, handles[i]); err = -EINVAL; } if (!err) { free_vm_area(area); kfree(handles); } return err; } /* Based on Rusty Russell's skeleton driver's map_page */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, const grant_ref_t refs[], unsigned int nr) { grant_handle_t *handles = kmalloc(nr * sizeof(*handles), GFP_KERNEL); struct vm_struct *area; unsigned int i; if (!handles) return ERR_PTR(-ENOMEM); area = alloc_vm_area(nr * PAGE_SIZE); if (!area) { kfree(handles); return ERR_PTR(-ENOMEM); } for (i = 0; i < nr; ++i) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)area->addr + i * PAGE_SIZE, GNTMAP_host_map, refs[i], dev->otherend_id); gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); if (op.status == GNTST_okay) { handles[i] = op.handle; continue; } unmap_ring_vfree(dev, area, i, handles); xenbus_dev_fatal(dev, op.status, "mapping page %u (ref %#x, dom%d)", i, refs[i], dev->otherend_id); BUG_ON(!IS_ERR(ERR_PTR(op.status))); return ERR_PTR(-EINVAL); } /* Stuff the handle array in an unused field. */ area->phys_addr = (unsigned long)handles; return area; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); /* Based on Rusty Russell's skeleton driver's unmap_page */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area) { return unmap_ring_vfree(dev, area, get_vm_area_size(area) >> PAGE_SHIFT, (void *)(unsigned long)area->phys_addr); } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); int xenbus_dev_is_online(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); MODULE_LICENSE("Dual BSD/GPL"); xenbus_client.c000066400000000000000000000305731314037446600326700ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #else #include #include #include #include #include #include #include #endif #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", [ XenbusStateReconfiguring ] = "Reconfiguring", [ XenbusStateReconfigured ] = "Reconfigured", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate); /** * xenbus_watch_path - register a watch * @dev: xenbus device * @path: path to watch * @watch: watch to register * @callback: callback to register * * Register a @watch on the given path, using the given xenbus_watch structure * for storage, and the given @callback function as the callback. Return 0 on * success, or -errno on error. On success, the given @path will be saved as * @watch->node, and remains the caller's to free. On error, @watch->node will * be NULL, the device will switch to %XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path); #if defined(CONFIG_XEN) || defined(MODULE) int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2); if (!state) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, state, watch, callback); if (err) kfree(state); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path2); #else /** * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path * @dev: xenbus device * @watch: watch to register * @callback: callback to register * @pathfmt: format of path to watch * * Register a watch on the given @path, using the given xenbus_watch * structure for storage, and the given @callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (@path/@path2) will be saved as @watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to %XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...) { int err; va_list ap; char *path; va_start(ap, pathfmt); path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); va_end(ap); if (!path) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, path, watch, callback); if (err) kfree(path); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); #endif static void xenbus_switch_fatal(struct xenbus_device *, int, int, const char *, ...); static int __xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state, int depth) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not take a caller's Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ struct xenbus_transaction xbt; int current_state; int err, abort; if (state == dev->state) return 0; again: abort = 1; err = xenbus_transaction_start(&xbt); if (err) { xenbus_switch_fatal(dev, depth, err, "starting transaction"); return 0; } err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); if (err != 1) goto abort; err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); if (err) { xenbus_switch_fatal(dev, depth, err, "writing new state"); goto abort; } abort = 0; abort: err = xenbus_transaction_end(xbt, abort); if (err) { if (err == -EAGAIN && !abort) goto again; xenbus_switch_fatal(dev, depth, err, "ending transaction"); } else dev->state = state; return 0; } /** * xenbus_switch_state * @dev: xenbus device * @state: new state * * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) { return __xenbus_switch_state(dev, state, 0); } EXPORT_SYMBOL_GPL(xenbus_switch_state); int xenbus_frontend_closed(struct xenbus_device *dev) { xenbus_switch_state(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed); /** * Return the path to the error node for the given device, or NULL on failure. * If the value returned is non-NULL, then it is the caller's to kfree. */ static char *error_path(struct xenbus_device *dev) { return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); } static void _dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list *ap) { char *printf_buffer, *path_buffer; struct va_format vaf = { .fmt = fmt, .va = ap }; printf_buffer = kasprintf(GFP_KERNEL, "%i %pV", -err, &vaf); if (printf_buffer) dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = error_path(dev); if (!printf_buffer || !path_buffer || xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer)) dev_err(&dev->dev, "xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); kfree(printf_buffer); kfree(path_buffer); } /** * xenbus_dev_error * @dev: xenbus device * @err: error to report * @fmt: error message format * * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, &ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error); /** * xenbus_dev_fatal * @dev: xenbus device * @err: error to report * @fmt: error message format * * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, &ap); va_end(ap); xenbus_switch_state(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal); /** * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps * avoiding recursion within xenbus_switch_state. */ static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, &ap); va_end(ap); if (!depth) __xenbus_switch_state(dev, XenbusStateClosing, 1); } /** * xenbus_grant_ring * @dev: xenbus device * @ring_mfn: mfn of ring to grant * * Grant access to the given @ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); if (err < 0) xenbus_dev_fatal(dev, err, "granting access to ring page"); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); int xenbus_multi_grant_ring(struct xenbus_device *dev, unsigned int nr, struct page *pages[], grant_ref_t refs[]) { unsigned int i; int err = -EINVAL; for (i = 0; i < nr; ++i) { unsigned long pfn = page_to_pfn(pages[i]); err = gnttab_grant_foreign_access(dev->otherend_id, pfn_to_mfn(pfn), 0); if (err < 0) { xenbus_dev_fatal(dev, err, "granting access to ring page #%u", i); break; } refs[i] = err; } return err; } EXPORT_SYMBOL_GPL(xenbus_multi_grant_ring); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error(dev, err, "freeing event channel %d", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn); /** * xenbus_read_driver_state * @path: path for driver * * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path) { int result; if (xenbus_scanf(XBT_NIL, path, "state", "%d", &result) != 1) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state); xenbus_comms.c000066400000000000000000000157311314037446600325270ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #else #include #include #include #endif #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_irq; static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); static irqreturn_t wake_waiting(int irq, void *unused) { #ifdef CONFIG_XEN_PRIVILEGED_GUEST static DECLARE_WORK(probe_work, xenbus_probe); int old, new; old = atomic_read(&xenbus_xsd_state); switch (old) { case XENBUS_XSD_UNCOMMITTED: BUG(); return IRQ_HANDLED; case XENBUS_XSD_FOREIGN_INIT: new = XENBUS_XSD_FOREIGN_READY; break; case XENBUS_XSD_LOCAL_INIT: new = XENBUS_XSD_LOCAL_READY; break; case XENBUS_XSD_FOREIGN_READY: case XENBUS_XSD_LOCAL_READY: default: goto wake; } old = atomic_cmpxchg(&xenbus_xsd_state, old, new); if (old != new) schedule_work(&probe_work); wake: #endif wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } /** * xb_write - low level write * @data: buffer to send * @len: length of buffer * * Returns 0 on success, error otherwise. */ int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { return wait_event_interruptible(xb_waitq, xb_data_to_read()); } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /** * xb_init_comms - Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; int err; if (intf->req_prod != intf->req_cons) pr_err("XENBUS request ring is not quiescent " "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { pr_warning("XENBUS response ring is not quiescent" " (%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); /* breaks kdump */ if (!reset_devices) intf->rsp_cons = intf->rsp_prod; } #if defined(CONFIG_XEN) || defined(MODULE) if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); err = bind_caller_port_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { pr_err("XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; #else if (xenbus_irq) { /* Already have an irq; assume we're resuming */ rebind_evtchn_irq(xen_store_evtchn, xenbus_irq); } else { err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { pr_err("XENBUS request irq failed %i\n", err); return err; } xenbus_irq = err; } #endif return 0; } xenbus_comms.h000066400000000000000000000046661314037446600325410ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76/* * Private include for xenbus communications. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_COMMS_H #define _XENBUS_COMMS_H int xs_init(void); int xb_init_comms(void); /* Low level routines. */ int xb_write(const void *data, unsigned len); int xb_read(void *data, unsigned len); int xb_data_to_read(void); int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; /* For xenbus internal use. */ enum { XENBUS_XSD_UNCOMMITTED = 0, XENBUS_XSD_FOREIGN_INIT, XENBUS_XSD_FOREIGN_READY, XENBUS_XSD_LOCAL_INIT, XENBUS_XSD_LOCAL_READY, }; extern atomic_t xenbus_xsd_state; static inline int is_xenstored_ready(void) { int s = atomic_read(&xenbus_xsd_state); return s == XENBUS_XSD_FOREIGN_READY || s == XENBUS_XSD_LOCAL_READY; } #if defined(CONFIG_XEN_XENBUS_DEV) && defined(CONFIG_XEN_PRIVILEGED_GUEST) #include #include int xenbus_conn(domid_t, grant_ref_t *, evtchn_port_t *); #endif #endif /* _XENBUS_COMMS_H */ xenbus_dev.c000066400000000000000000000267161314037446600321740ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #include struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[XENSTORE_PAYLOAD_MAX]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static struct proc_dir_entry *xenbus_dev_intf; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; if (!is_xenstored_ready()) return -ENODEV; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static int queue_reply(struct list_head *queue, const void *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return 0; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); if (!rb) return -ENOMEM; rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, queue); return 0; } static void queue_flush(struct xenbus_dev_data *u, struct list_head *queue, int err) { if (!err) { list_splice_tail(queue, &u->read_buffers); wake_up(&u->read_waitq); } else while (!list_empty(queue)) { struct read_buffer *rb = list_entry(queue->next, struct read_buffer, list); list_del(queue->next); kfree(rb); } } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int err, path_len, tok_len, body_len, data_len = 0; LIST_HEAD(queue); path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); err = queue_reply(&queue, &hdr, sizeof(hdr)); if (!err) err = queue_reply(&queue, path, path_len); if (!err) err = queue_reply(&queue, token, tok_len); if (!err && len > 2) err = queue_reply(&queue, vec[2], data_len); queue_flush(adap->dev_data, &queue, err); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply = NULL; LIST_HEAD(queue); char *path, *token; struct watch_adapter *watch; int err, rc = len; if (!is_xenstored_ready()) return -ENODEV; if (len > sizeof(u->u.buffer) - u->len) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: { static const char XS_RESP[] = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { rc = -EILSEQ; goto out; } if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); if (watch == NULL) { rc = -ENOMEM; goto out; } watch->watch.node = kstrdup(path, GFP_KERNEL); watch->watch.callback = watch_fired; watch->token = kstrdup(token, GFP_KERNEL); watch->dev_data = u; err = watch->watch.node && watch->token ? register_xenbus_watch(&watch->watch) : -ENOMEM; if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry(watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = sizeof(XS_RESP); mutex_lock(&u->reply_mutex); err = queue_reply(&queue, &hdr, sizeof(hdr)) ?: queue_reply(&queue, XS_RESP, hdr.len); break; } case XS_TRANSACTION_START: trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } goto common; case XS_TRANSACTION_END: list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; if (&trans->list == &u->transactions) { rc = -ESRCH; goto out; } /* fall through */ common: default: reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { if (msg_type == XS_TRANSACTION_START) kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); err = queue_reply(&queue, &u->u.msg, sizeof(u->u.msg)) ?: queue_reply(&queue, reply, u->u.msg.len); break; } queue_flush(u, &queue, err); mutex_unlock(&u->reply_mutex); kfree(reply); if (err) rc = err; out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; struct read_buffer *rb, *tmp_rb; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { list_del(&rb->list); kfree(rb); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; if (!is_xenstored_ready()) return -ENODEV; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } #ifdef CONFIG_XEN_PRIVILEGED_GUEST static long xenbus_dev_ioctl(struct file *file, unsigned int cmd, unsigned long data) { void __user *udata = (void __user *) data; int ret = -ENOTTY; if (!is_initial_xendomain()) return -ENODEV; switch (cmd) { case IOCTL_XENBUS_ALLOC: { xenbus_alloc_t xa; int old; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_FOREIGN_INIT); if (old != XENBUS_XSD_UNCOMMITTED) return -EBUSY; if (copy_from_user(&xa, udata, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } ret = xenbus_conn(xa.dom, &xa.grant_ref, &xa.port); if (ret != 0) { atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } if (copy_to_user(udata, &xa, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } } break; default: break; } return ret; } #endif static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .llseek = no_llseek, .poll = xenbus_dev_poll, #ifdef CONFIG_XEN_PRIVILEGED_GUEST .unlocked_ioctl = xenbus_dev_ioctl #endif }; int #ifndef MODULE __init #else __devinit #endif xenbus_dev_init(void) { xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400); if (xenbus_dev_intf) xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops; return 0; } xenbus_probe.c000066400000000000000000001130171314037446600325140ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #include #include #include #include #define PARAVIRT_EXPORT_SYMBOL(sym) __typeof__(sym) sym #else #include #include #include #include #include #define PARAVIRT_EXPORT_SYMBOL EXPORT_SYMBOL_GPL #endif #ifndef CONFIG_XEN #include #endif #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif int xen_store_evtchn; PARAVIRT_EXPORT_SYMBOL(xen_store_evtchn); struct xenstore_domain_interface *xen_store_interface; PARAVIRT_EXPORT_SYMBOL(xen_store_interface); static unsigned long xen_store_mfn; extern struct mutex xenwatch_mutex; static #ifdef CONFIG_XEN_UNPRIVILEGED_GUEST __initdata #endif BLOCKING_NOTIFIER_HEAD(xenstore_chain); #if defined(CONFIG_XEN) || defined(MODULE) static void wait_for_devices(struct xenbus_driver *xendrv); #endif /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } PARAVIRT_EXPORT_SYMBOL(xenbus_match); static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } int xenbus_read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_read_otherend_details); #if defined(CONFIG_XEN) || defined(MODULE) static int read_backend_details(struct xenbus_device *xendev) { return xenbus_read_otherend_details(xendev, "backend-id", "backend"); } static void otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) #else /* !CONFIG_XEN && !MODULE */ void xenbus_otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len, int ignore_on_shutdown) #endif /* CONFIG_XEN || MODULE */ { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]); return; } state = xenbus_read_driver_state(dev->otherend); dev_dbg(&dev->dev, "state is %d (%s), %s, %s", state, xenbus_strstate(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { /* If we're frontend, drive the state machine to Closed. */ /* This should cause the backend to release our resources. */ # if defined(CONFIG_XEN) || defined(MODULE) const struct xen_bus_type *bus = container_of(dev->dev.bus, struct xen_bus_type, bus); int ignore_on_shutdown = (bus->levels == 2); # endif if (ignore_on_shutdown && (state == XenbusStateClosing)) xenbus_frontend_closed(dev); return; } #endif if (drv->otherend_changed) drv->otherend_changed(dev, state); } PARAVIRT_EXPORT_SYMBOL(xenbus_otherend_changed); static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { #if defined(CONFIG_XEN) || defined(MODULE) return xenbus_watch_path2(dev, dev->otherend, "state", &dev->otherend_watch, otherend_changed); #else struct xen_bus_type *bus = container_of(dev->dev.bus, struct xen_bus_type, bus); return xenbus_watch_pathfmt(dev, &dev->otherend_watch, bus->otherend_changed, "%s/%s", dev->otherend, "state"); #endif } int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); #if defined(CONFIG_XEN) || defined(MODULE) return -ENODEV; #else return err; #endif } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_probe); int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); if (drv->remove) drv->remove(dev); free_otherend_details(dev); xenbus_switch_state(dev, XenbusStateClosed); return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_remove); void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); /* Commented out since xenstored stubdom is now minios based not linux based #define XENSTORE_DOMAIN_SHARES_THIS_KERNEL */ #ifndef XENSTORE_DOMAIN_SHARES_THIS_KERNEL if (is_initial_xendomain()) #endif return; get_device(&dev->dev); if (dev->state != XenbusStateConnected) { dev_info(&dev->dev, "%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); if (!strcmp(dev->devicetype, "vfb")) goto out; timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) dev_info(&dev->dev, "%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_shutdown); int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus) { int ret; if (bus->error) return bus->error; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; drv->driver.remove = xenbus_dev_remove; drv->driver.shutdown = xenbus_dev_shutdown; #endif mutex_lock(&xenwatch_mutex); ret = driver_register(&drv->driver); mutex_unlock(&xenwatch_mutex); return ret; } PARAVIRT_EXPORT_SYMBOL(xenbus_register_driver_common); void xenbus_unregister_driver(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t xendev_show_nodename(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); static ssize_t xendev_show_devtype(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); static ssize_t xendev_show_modalias(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); } static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_driver_state(nodename); if (bus->error) return bus->error; if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); #if defined(CONFIG_XEN) || defined(MODULE) xendev->dev.parent = &bus->dev; #endif xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) { char devname[XEN_BUS_ID_SIZE]; err = bus->get_bus_id(devname, xendev->nodename); if (!err) dev_set_name(&xendev->dev, devname); } #else err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); #endif if (err) goto fail; /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; err = device_create_file(&xendev->dev, &dev_attr_nodename); if (err) goto fail_unregister; err = device_create_file(&xendev->dev, &dev_attr_devtype); if (err) goto fail_remove_nodename; err = device_create_file(&xendev->dev, &dev_attr_modalias); if (err) goto fail_remove_devtype; return 0; fail_remove_devtype: device_remove_file(&xendev->dev, &dev_attr_devtype); fail_remove_nodename: device_remove_file(&xendev->dev, &dev_attr_nodename); fail_unregister: device_unregister(&xendev->dev); fail: kfree(xendev); return err; } PARAVIRT_EXPORT_SYMBOL(xenbus_probe_node); #if defined(CONFIG_XEN) || defined(MODULE) /* device// => - */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { pr_warning("XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); if (!strchr(bus_id, '/')) { pr_warning("XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } /* device// */ static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type, const char *name) { char *nodename; int err; if (!strcmp(type, "console")) return 0; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(bus, type, nodename); kfree(nodename); return err; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype) || add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename) || add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype)) return -ENOMEM; return 0; } #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) static struct device_attribute xenbus_dev_attrs[] = { __ATTR_NULL }; #endif /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { .name = "xen", .match = xenbus_match, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_frontend, #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) .dev_attrs = xenbus_dev_attrs, #endif }, .dev = { .init_name = "xen", }, }; int xenbus_register_frontend(struct xenbus_driver *drv) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(xenbus_register_frontend); #endif static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(bus, type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n; if (bus->error) return bus->error; dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } PARAVIRT_EXPORT_SYMBOL(xenbus_probe_devices); static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void xenbus_dev_changed(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[XEN_BUS_ID_SIZE]; const char *p, *root; if (bus->error || char_count(node, '/') < 2) return; exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend//... or device//... */ p = strchr(node, '/') + 1; snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[XEN_BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_changed); #if defined(CONFIG_XEN) || defined(MODULE) static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int __maybe_unused suspend_dev(struct device *dev, void *data) #else int xenbus_dev_suspend(struct device *dev) #endif { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev = container_of(dev, struct xenbus_device, dev); DPRINTK("%s", xdev->nodename); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); if (drv->suspend) err = drv->suspend(xdev); if (err) pr_warning("xenbus: suspend %s failed: %i\n", dev_name(dev), err); return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_suspend); #if defined(CONFIG_XEN) || defined(MODULE) static int __maybe_unused suspend_cancel_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend_cancel) err = drv->suspend_cancel(xdev); if (err) pr_warning("xenbus: suspend_cancel %s failed: %i\n", dev_name(dev), err); return 0; } static int __maybe_unused resume_dev(struct device *dev, void *data) #else int xenbus_dev_resume(struct device *dev) #endif { int err; struct xenbus_driver *drv; struct xenbus_device *xdev = container_of(dev, struct xenbus_device, dev); DPRINTK("%s", xdev->nodename); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); err = talk_to_otherend(xdev); if (err) { pr_warning("xenbus: resume (talk_to_otherend) %s failed: %i\n", dev_name(dev), err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { pr_warning("xenbus: resume %s failed: %i\n", dev_name(dev), err); return err; } } err = watch_otherend(xdev); if (err) { pr_warning("xenbus_probe: resume (watch_otherend) %s failed:" " %d\n", dev_name(dev), err); return err; } return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_resume); #if !defined(CONFIG_XEN) && !defined(MODULE) int xenbus_dev_cancel(struct device *dev) { /* Do nothing */ DPRINTK("cancel"); return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_cancel); #elif defined(CONFIG_PM_SLEEP) || defined(MODULE) void xenbus_suspend(void) { DPRINTK(""); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); xenbus_backend_suspend(suspend_dev); xs_suspend(); } void xen_unplug_emulated_devices(void); void xenbus_resume(void) { xb_init_comms(); xs_resume(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); xenbus_backend_resume(resume_dev); xen_unplug_emulated_devices(); } void xenbus_suspend_cancel(void) { xs_suspend_cancel(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); xenbus_backend_resume(suspend_cancel_dev); } #endif /* CONFIG_PM_SLEEP || MODULE */ /* A flag to determine if xenstored is 'ready' (i.e. has started) */ atomic_t xenbus_xsd_state = ATOMIC_INIT(XENBUS_XSD_UNCOMMITTED); int #ifdef CONFIG_XEN __init #endif register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; if (is_xenstored_ready()) ret = nb->notifier_call(nb, 0, NULL); else blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } #ifndef CONFIG_XEN EXPORT_SYMBOL_GPL(register_xenstore_notifier); void unregister_xenstore_notifier(struct notifier_block *nb) { blocking_notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); #endif #ifndef CONFIG_XEN static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq); static int backend_state; static void xenbus_reset_backend_state_changed(struct xenbus_watch *w, const char **v, unsigned int l) { if (xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state) != 1) backend_state = XenbusStateUnknown; printk(KERN_DEBUG "XENBUS: backend %s %s\n", v[XS_WATCH_PATH], xenbus_strstate(backend_state)); wake_up(&backend_state_wq); } static void xenbus_reset_wait_for_backend(char *be, int expected) { long timeout; timeout = wait_event_interruptible_timeout(backend_state_wq, backend_state == expected, 5 * HZ); if (timeout <= 0) pr_info("XENBUS: backend %s timed out.\n", be); } /* * Reset frontend if it is in Connected or Closed state. * Wait for backend to catch up. * State Connected happens during kdump, Closed after kexec. */ static void xenbus_reset_frontend(char *fe, char *be, int be_state) { struct xenbus_watch be_watch; printk(KERN_DEBUG "XENBUS: backend %s %s\n", be, xenbus_strstate(be_state)); memset(&be_watch, 0, sizeof(be_watch)); be_watch.node = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/state", be); if (!be_watch.node) return; be_watch.callback = xenbus_reset_backend_state_changed; backend_state = XenbusStateUnknown; pr_info("XENBUS: triggering reconnect on %s\n", be); register_xenbus_watch(&be_watch); /* fall through to forward backend to state XenbusStateInitialising */ switch (be_state) { case XenbusStateConnected: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing); xenbus_reset_wait_for_backend(be, XenbusStateClosing); case XenbusStateClosing: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed); xenbus_reset_wait_for_backend(be, XenbusStateClosed); case XenbusStateClosed: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising); xenbus_reset_wait_for_backend(be, XenbusStateInitWait); } unregister_xenbus_watch(&be_watch); pr_info("XENBUS: reconnect done on %s\n", be); kfree(be_watch.node); } static void xenbus_check_frontend(char *class, char *dev) { int be_state, fe_state, err; char *backend, *frontend; frontend = kasprintf(GFP_NOIO | __GFP_HIGH, "device/%s/%s", class, dev); if (!frontend) return; err = xenbus_scanf(XBT_NIL, frontend, "state", "%i", &fe_state); if (err != 1) goto out; switch (fe_state) { case XenbusStateConnected: case XenbusStateClosed: printk(KERN_DEBUG "XENBUS: frontend %s %s\n", frontend, xenbus_strstate(fe_state)); backend = xenbus_read(XBT_NIL, frontend, "backend", NULL); if (!backend || IS_ERR(backend)) goto out; err = xenbus_scanf(XBT_NIL, backend, "state", "%i", &be_state); if (err == 1) xenbus_reset_frontend(frontend, backend, be_state); kfree(backend); break; default: break; } out: kfree(frontend); } static void xenbus_reset_state(void) { char **devclass, **dev; int devclass_n, dev_n; int i, j; devclass = xenbus_directory(XBT_NIL, "device", "", &devclass_n); if (IS_ERR(devclass)) return; for (i = 0; i < devclass_n; i++) { dev = xenbus_directory(XBT_NIL, "device", devclass[i], &dev_n); if (IS_ERR(dev)) continue; for (j = 0; j < dev_n; j++) xenbus_check_frontend(devclass[i], dev[j]); kfree(dev); } kfree(devclass); } #endif void #if defined(CONFIG_XEN_UNPRIVILEGED_GUEST) __init #elif defined(MODULE) __devinit #endif xenbus_probe(struct work_struct *unused) { BUG_ON(!is_xenstored_ready()); #ifndef CONFIG_XEN /* reset devices in Connected or Closed state */ xenbus_reset_state(); #endif #if defined(CONFIG_XEN) || defined(MODULE) /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); xenbus_backend_probe_and_watch(); #endif /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } PARAVIRT_EXPORT_SYMBOL(xenbus_probe); #if !defined(CONFIG_XEN) && !defined(MODULE) static int __init xenbus_probe_initcall(void) { if (!xen_domain()) return -ENODEV; if (xen_initial_domain() || xen_hvm_domain()) return 0; xenbus_probe(NULL); return 0; } device_initcall(xenbus_probe_initcall); #endif #ifdef CONFIG_XEN_PRIVILEGED_GUEST #ifdef CONFIG_PROC_FS static struct file_operations xsd_kva_fops; static struct proc_dir_entry *xsd_kva_intf; static struct proc_dir_entry *xsd_port_intf; static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; int old; int rc; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_LOCAL_INIT); switch (old) { case XENBUS_XSD_UNCOMMITTED: rc = xb_init_comms(); if (rc != 0) return rc; break; case XENBUS_XSD_FOREIGN_INIT: case XENBUS_XSD_FOREIGN_READY: return -EBUSY; case XENBUS_XSD_LOCAL_INIT: case XENBUS_XSD_LOCAL_READY: default: break; } if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static int xsd_kva_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "0x%p", xen_store_interface); *eof = 1; return len; } static int xsd_port_read(char *page, char **start, off_t off, int count, int *eof, void *data) { int len; len = sprintf(page, "%d", xen_store_evtchn); *eof = 1; return len; } #endif #ifdef CONFIG_XEN_XENBUS_DEV int xenbus_conn(domid_t remote_dom, grant_ref_t *grant_ref, evtchn_port_t *local_port) { struct evtchn_alloc_unbound alloc_unbound; int rc, rc2; BUG_ON(atomic_read(&xenbus_xsd_state) != XENBUS_XSD_FOREIGN_INIT); BUG_ON(!is_initial_xendomain()); remove_xen_proc_entry("xsd_kva"); remove_xen_proc_entry("xsd_port"); rc = close_evtchn(xen_store_evtchn); if (rc != 0) goto fail0; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_dom; rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (rc != 0) goto fail0; *local_port = xen_store_evtchn = alloc_unbound.port; /* keep the old page (xen_store_mfn, xen_store_interface) */ rc = gnttab_grant_foreign_access(remote_dom, xen_store_mfn, GTF_permit_access); if (rc < 0) goto fail1; *grant_ref = rc; rc = xb_init_comms(); if (rc != 0) goto fail1; return 0; fail1: rc2 = close_evtchn(xen_store_evtchn); if (rc2 != 0) pr_warning("XENBUS: Error freeing xenstore event channel:" " %d\n", rc2); fail0: xen_store_evtchn = -1; return rc; } #endif #endif /* CONFIG_XEN_PRIVILEGED_GUEST */ #ifndef MODULE static int __init #else int __devinit #endif xenbus_init(void) { int err = 0; unsigned long page = 0; DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; #if defined(CONFIG_XEN) || defined(MODULE) /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) pr_warning("XENBUS: Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); #endif /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { struct evtchn_alloc_unbound alloc_unbound; /* Allocate Xenstore page */ page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOMID_SELF; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600); if (xsd_kva_intf) { memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops, sizeof(xsd_kva_fops)); xsd_kva_fops.mmap = xsd_kva_mmap; xsd_kva_intf->proc_fops = &xsd_kva_fops; xsd_kva_intf->read_proc = xsd_kva_read; } xsd_port_intf = create_xen_proc_entry("xsd_port", 0400); if (xsd_port_intf) xsd_port_intf->read_proc = xsd_port_read; #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else { #if !defined(CONFIG_XEN) && !defined(MODULE) if (xen_hvm_domain()) { #endif #ifndef CONFIG_XEN uint64_t v = 0; err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); if (err) goto err; xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto err; xen_store_mfn = (unsigned long)v; xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); #endif #if !defined(CONFIG_XEN) && !defined(MODULE) } else { #endif #ifndef MODULE xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); #endif #if !defined(CONFIG_XEN) && !defined(MODULE) } #endif atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY); /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) goto err; } #if defined(CONFIG_XEN) || defined(MODULE) xenbus_dev_init(); #endif /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { pr_warning("XENBUS: Error initializing xenstore comms: %i\n", err); goto err; } #if defined(CONFIG_XEN) || defined(MODULE) /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); if (xenbus_frontend.error) { bus_unregister(&xenbus_frontend.bus); pr_warning("XENBUS: Error registering frontend device:" " %d\n", xenbus_frontend.error); } } xenbus_backend_device_register(); if (!is_initial_xendomain()) xenbus_probe(NULL); #endif #if defined(CONFIG_XEN_COMPAT_XENFS) && !defined(MODULE) /* * Create xenfs mountpoint in /proc for compatibility with * utilities that expect to find "xenbus" under "/proc/xen". */ proc_mkdir("xen", NULL); #endif return 0; err: /* * Do not unregister the xenbus front/backend buses here. The buses * must exist because front/backend drivers will use them when they are * registered. */ if (page != 0) free_page(page); return err; } #ifndef MODULE postcore_initcall(xenbus_init); #ifdef CONFIG_XEN MODULE_LICENSE("Dual BSD/GPL"); #else MODULE_LICENSE("GPL"); #endif #endif #if defined(CONFIG_XEN) || defined(MODULE) static int is_device_connecting(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_connecting_device(struct device_driver *drv) { if (xenbus_frontend.error) return xenbus_frontend.error; return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ pr_info("XENBUS: Device with no driver: %s\n", xendev->nodename); return 0; } if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); pr_warning("XENBUS: Timeout connecting to device: %s" " (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } xendrv = to_xenbus_driver(dev->driver); if (xendrv->is_ready && !xendrv->is_ready(xendev)) pr_warning("XENBUS: Device not ready: %s\n", xendev->nodename); return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!ready_to_wait_for_devices || !is_running_on_xen()) return; while (exists_connecting_device(drv)) { if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) pr_warning("XENBUS: Waiting for " "devices to initialise: "); seconds_waited += 5; printk("%us...", 300 - seconds_waited); if (seconds_waited == 300) break; } schedule_timeout_interruptible(HZ/10); } if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } #ifndef MODULE static int __init boot_wait_for_devices(void) { #if !defined(CONFIG_XEN) && !defined(MODULE) if (xen_hvm_domain() && !xen_platform_pci_unplug) return -ENODEV; #endif if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; wait_for_devices(NULL); } return 0; } late_initcall(boot_wait_for_devices); #endif int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); #endif /* CONFIG_XEN || MODULE */ xenbus_probe.h000066400000000000000000000100531314037446600325150ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76/****************************************************************************** * xenbus_probe.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_PROBE_H #define _XENBUS_PROBE_H #ifndef BUS_ID_SIZE #define XEN_BUS_ID_SIZE 20 #else #define XEN_BUS_ID_SIZE BUS_ID_SIZE #endif #ifdef CONFIG_PARAVIRT_XEN #define is_running_on_xen() xen_domain() #define is_initial_xendomain() xen_initial_domain() #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) #define dev_name(dev) ((dev)->bus_id) #endif #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); extern void xenbus_backend_probe_and_watch(void); extern void xenbus_backend_bus_register(void); extern void xenbus_backend_device_register(void); #else static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_probe_and_watch(void) {} static inline void xenbus_backend_bus_register(void) {} static inline void xenbus_backend_device_register(void) {} #endif struct xen_bus_type { char *root; int error; unsigned int levels; int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); int (*probe)(struct xen_bus_type *bus, const char *type, const char *dir); #if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) void (*otherend_changed)(struct xenbus_watch *watch, const char **vec, unsigned int len); #else struct device dev; #endif struct bus_type bus; }; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); extern int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus); extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); extern int xenbus_probe_devices(struct xen_bus_type *bus); extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); extern void xenbus_dev_shutdown(struct device *_dev); extern int xenbus_dev_suspend(struct device *dev); extern int xenbus_dev_resume(struct device *dev); extern int xenbus_dev_cancel(struct device *dev); extern void xenbus_otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len, int ignore_on_shutdown); extern int xenbus_read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node); #endif xenbus_probe_backend.c000066400000000000000000000211751314037446600341660ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76/****************************************************************************** * Talks to Xen Store to figure out what devices we have (backend half). * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __func__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #ifndef CONFIG_XEN #include #endif #include #include #ifdef CONFIG_XEN #include #include #endif #include #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* backend/// => -- */ static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { int domid, err; const char *devid, *type, *frontend; unsigned int typelen; type = strchr(nodename, '/'); if (!type) return -EINVAL; type++; typelen = strcspn(type, "/"); if (!typelen || type[typelen] != '/') return -EINVAL; devid = strrchr(nodename, '/') + 1; err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid, "frontend", NULL, &frontend, NULL); if (err) return err; if (strlen(frontend) == 0) err = -ERANGE; if (!err && !xenbus_exists(XBT_NIL, frontend, "")) err = -ENOENT; kfree(frontend); if (err) return err; if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s", typelen, type, domid, devid) >= XEN_BUS_ID_SIZE) return -ENOSPC; return 0; } static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; struct xenbus_driver *drv; struct xen_bus_type *bus; DPRINTK(""); if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); bus = container_of(xdev->dev.bus, struct xen_bus_type, bus); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype)) return -ENOMEM; if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename)) return -ENOMEM; if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root)) return -ENOMEM; if (dev->driver) { drv = to_xenbus_driver(dev->driver); if (drv && drv->uevent) return drv->uevent(xdev, env); } return 0; } /* backend/// */ static int xenbus_probe_backend_unit(struct xen_bus_type *bus, const char *dir, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); if (!nodename) return -ENOMEM; DPRINTK("%s\n", nodename); err = xenbus_probe_node(bus, type, nodename); kfree(nodename); return err; } /* backend// */ static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type, const char *domid) { char *nodename; int err = 0; char **dir; unsigned int i, dir_n = 0; DPRINTK(""); nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid); if (!nodename) return -ENOMEM; dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n); if (IS_ERR(dir)) { kfree(nodename); return PTR_ERR(dir); } for (i = 0; i < dir_n; i++) { err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]); if (err) break; } kfree(dir); kfree(nodename); return err; } #ifndef CONFIG_XEN static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { xenbus_otherend_changed(watch, vec, len, 0); } #endif static struct device_attribute xenbus_backend_dev_attrs[] = { __ATTR_NULL }; static struct xen_bus_type xenbus_backend = { .root = "backend", .levels = 3, /* backend/type// */ .get_bus_id = backend_bus_id, .probe = xenbus_probe_backend, #ifndef CONFIG_XEN .otherend_changed = frontend_changed, #else .dev = { .init_name = "xen-backend", }, #endif .error = -ENODEV, .bus = { .name = "xen-backend", .match = xenbus_match, .uevent = xenbus_uevent_backend, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, #ifdef CONFIG_XEN .shutdown = xenbus_dev_shutdown, #endif .dev_attrs = xenbus_backend_dev_attrs, }, }; static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); } static struct xenbus_watch be_watch = { .node = "backend", .callback = backend_changed, }; static int read_frontend_details(struct xenbus_device *xendev) { return xenbus_read_otherend_details(xendev, "frontend-id", "frontend"); } #ifndef CONFIG_XEN int xenbus_dev_is_online(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); #endif int xenbus_register_backend(struct xenbus_driver *drv) { drv->read_otherend_details = read_frontend_details; return xenbus_register_driver_common(drv, &xenbus_backend); } EXPORT_SYMBOL_GPL(xenbus_register_backend); #if defined(CONFIG_XEN) && defined(CONFIG_PM_SLEEP) void xenbus_backend_suspend(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_resume(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } #endif #ifndef CONFIG_XEN static int backend_probe_and_watch(struct notifier_block *notifier, unsigned long event, void *data) #else void xenbus_backend_probe_and_watch(void) #endif { /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_backend); register_xenbus_watch(&be_watch); #ifndef CONFIG_XEN return NOTIFY_DONE; #endif } #ifndef CONFIG_XEN static int __init xenbus_probe_backend_init(void) { static struct notifier_block xenstore_notifier = { .notifier_call = backend_probe_and_watch }; int err; DPRINTK(""); /* Register ourselves with the kernel bus subsystem */ err = bus_register(&xenbus_backend.bus); if (err) return err; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(xenbus_probe_backend_init); #else void __init xenbus_backend_bus_register(void) { xenbus_backend.error = bus_register(&xenbus_backend.bus); if (xenbus_backend.error) pr_warning("XENBUS: Error registering backend bus: %i\n", xenbus_backend.error); } void __init xenbus_backend_device_register(void) { if (xenbus_backend.error) return; xenbus_backend.error = device_register(&xenbus_backend.dev); if (xenbus_backend.error) { bus_unregister(&xenbus_backend.bus); pr_warning("XENBUS: Error registering backend device: %i\n", xenbus_backend.error); } } int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_backend); #endif xenbus_probe_frontend.c000066400000000000000000000213071314037446600344130ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76#define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __func__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include "xenbus_probe.h" /* device// => - */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); if (!strchr(bus_id, '/')) { printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } /* device// */ static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(bus, type, nodename); kfree(nodename); return err; } static int xenbus_uevent_frontend(struct device *_dev, struct kobj_uevent_env *env) { struct xenbus_device *dev = to_xenbus_device(_dev); if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype)) return -ENOMEM; return 0; } static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { xenbus_otherend_changed(watch, vec, len, 1); } static struct device_attribute xenbus_frontend_dev_attrs[] = { __ATTR_NULL }; static const struct dev_pm_ops xenbus_pm_ops = { .suspend = xenbus_dev_suspend, .resume = xenbus_dev_resume, .freeze = xenbus_dev_suspend, .thaw = xenbus_dev_cancel, .restore = xenbus_dev_resume, }; static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .otherend_changed = backend_changed, .bus = { .name = "xen", .match = xenbus_match, .uevent = xenbus_uevent_frontend, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .dev_attrs = xenbus_frontend_dev_attrs, .pm = &xenbus_pm_ops, }, }; static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int read_backend_details(struct xenbus_device *xendev) { return xenbus_read_otherend_details(xendev, "backend-id", "backend"); } static int is_device_connecting(struct device *dev, void *data, bool ignore_nonessential) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (ignore_nonessential) { /* With older QEMU, for PVonHVM guests the guest config files * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0'] * which is nonsensical as there is no PV FB (there can be * a PVKB) running as HVM guest. */ if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0)) return 0; if ((strncmp(xendev->nodename, "device/vfb", 10) == 0)) return 0; } xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int essential_device_connecting(struct device *dev, void *data) { return is_device_connecting(dev, data, true /* ignore PV[KBB+FB] */); } static int non_essential_device_connecting(struct device *dev, void *data) { return is_device_connecting(dev, data, false); } static int exists_essential_connecting_device(struct device_driver *drv) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, essential_device_connecting); } static int exists_non_essential_connecting_device(struct device_driver *drv) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, non_essential_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", xendev->nodename); } else if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); printk(KERN_WARNING "XENBUS: Timeout connecting " "to device: %s (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; static bool wait_loop(unsigned long start, unsigned int max_delay, unsigned int *seconds_waited) { if (time_after(jiffies, start + (*seconds_waited+5)*HZ)) { if (!*seconds_waited) printk(KERN_WARNING "XENBUS: Waiting for " "devices to initialise: "); *seconds_waited += 5; printk("%us...", max_delay - *seconds_waited); if (*seconds_waited == max_delay) return true; } schedule_timeout_interruptible(HZ/10); return false; } /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!ready_to_wait_for_devices || !xen_domain()) return; while (exists_non_essential_connecting_device(drv)) if (wait_loop(start, 30, &seconds_waited)) break; /* Skips PVKB and PVFB check.*/ while (exists_essential_connecting_device(drv)) if (wait_loop(start, 270, &seconds_waited)) break; if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } int __xenbus_register_frontend(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend, owner, mod_name); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(__xenbus_register_frontend); static int frontend_probe_and_watch(struct notifier_block *notifier, unsigned long event, void *data) { /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); return NOTIFY_DONE; } static int __init xenbus_probe_frontend_init(void) { static struct notifier_block xenstore_notifier = { .notifier_call = frontend_probe_and_watch }; int err; DPRINTK(""); /* Register ourselves with the kernel bus subsystem */ err = bus_register(&xenbus_frontend.bus); if (err) return err; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(xenbus_probe_frontend_init); #ifndef MODULE static int __init boot_wait_for_devices(void) { if (xen_hvm_domain() && !xen_platform_pci_unplug) return -ENODEV; ready_to_wait_for_devices = 1; wait_for_devices(NULL); return 0; } late_initcall(boot_wait_for_devices); #endif MODULE_LICENSE("GPL"); xenbus_xs.c000066400000000000000000000545341314037446600320470ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/3.0.76/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. * * transaction_mutex must be held before incrementing * transaction_count. The mutex is held when a suspend is in * progress to prevent new transactions starting. * * When decrementing transaction_count to zero the wait queue * should be woken up, the suspend code waits for count to * reach zero. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct mutex transaction_mutex; atomic_t transaction_count; wait_queue_head_t transaction_wq; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { pr_warning("XENBUS xen store gave: unknown error %s", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } static void transaction_start(void) { mutex_lock(&xs_state.transaction_mutex); atomic_inc(&xs_state.transaction_count); mutex_unlock(&xs_state.transaction_mutex); } static void transaction_end(void) { if (atomic_dec_and_test(&xs_state.transaction_count)) wake_up(&xs_state.transaction_wq); } #if !defined(CONFIG_XEN) || defined(CONFIG_PM_SLEEP) static void transaction_suspend(void) { mutex_lock(&xs_state.transaction_mutex); wait_event(xs_state.transaction_wq, atomic_read(&xs_state.transaction_count) == 0); } static void transaction_resume(void) { mutex_unlock(&xs_state.transaction_mutex); } #endif void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; enum xsd_sockmsg_type type = msg->type; int err; if (type == XS_TRANSACTION_START) transaction_start(); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((type == XS_TRANSACTION_END) || ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) transaction_end(); return ret; } #if !defined(CONFIG_XEN) && !defined(MODULE) EXPORT_SYMBOL(xenbus_dev_request_and_reply); #endif /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; #define XS_LINUX_WEAK_WRITE 128 msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { pr_warn_ratelimited("XENBUS unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len) + 1; /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; ret[*num] = strings + len; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; transaction_start(); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { transaction_end(); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); transaction_end(); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *printf_buffer; va_start(ap, fmt); printf_buffer = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap); va_end(ap); if (!printf_buffer) return -ENOMEM; ret = xenbus_write(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } static void xs_reset_watches(void) { #ifdef MODULE int err, supported = 0; err = xenbus_scanf(XBT_NIL, "control", "platform-feature-xs_reset_watches", "%d", &supported); if (err != 1 || !supported) return; err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL)); if (err && err != -EEXIST) pr_warning("xs_reset_watches failed: %d\n", err); #endif } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); if (err) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; #if defined(CONFIG_XEN) || defined(MODULE) BUG_ON(watch->flags & XBWF_new_thread); #endif sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) pr_warning("XENBUS Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Make sure there are no callbacks running currently (unless its us) */ if (current->pid != xenwatch_pid) mutex_lock(&xenwatch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); if (current->pid != xenwatch_pid) mutex_unlock(&xenwatch_mutex); } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); #if !defined(CONFIG_XEN) || defined(CONFIG_PM_SLEEP) void xs_suspend(void) { transaction_suspend(); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; #if !defined(CONFIG_XEN) && !defined(MODULE) xb_init_comms(); #endif mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); transaction_resume(); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); mutex_unlock(&xs_state.transaction_mutex); } #endif #if defined(CONFIG_XEN) || defined(MODULE) static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } #endif static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); #if defined(CONFIG_XEN) || defined(MODULE) /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } #else msg->u.watch.handle->callback( msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); mutex_unlock(&xenwatch_mutex); kfree(msg->u.watch.vec); kfree(msg); #endif } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) { kfree(msg); err = -EINVAL; goto out; } if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) { kfree(msg); err = -EINVAL; goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) pr_warning("XENBUS error %d while reading " "message\n", err); if (kthread_should_stop()) break; } return 0; } int #ifndef MODULE __init #else __devinit #endif xs_init(void) { struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); mutex_init(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); atomic_set(&xs_state.transaction_count, 0); init_waitqueue_head(&xs_state.transaction_wq); task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); /* shutdown watches for kexec boot */ xs_reset_watches(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/Kbuild000066400000000000000000000013171314037446600303350ustar00rootroot00000000000000include $(M)/config.mk obj-m := xen-platform-pci.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-platform-pci-objs := evtchn.o platform-pci.o gnttab.o xen_support.o xen-platform-pci-objs += features.o platform-compat.o xen-platform-pci-objs += reboot.o machine_reboot.o xen-platform-pci-objs += panic-handler.o xen-platform-pci-objs += platform-pci-unplug.o xen-platform-pci-objs += xenbus_comms.o xen-platform-pci-objs += xenbus_xs.o xen-platform-pci-objs += xenbus_probe.o xen-platform-pci-objs += xenbus_dev.o xen-platform-pci-objs += xenbus_client.o xen-platform-pci-objs += xen_proc.o # Can we do better ? ifeq ($(ARCH),ia64) xen-platform-pci-objs += xencomm.o xencomm_arch.o xcom_hcall.o xcom_asm.o endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/Makefile000066400000000000000000000000661314037446600306400ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/evtchn.c000066400000000000000000000212711314037446600306340ustar00rootroot00000000000000/****************************************************************************** * evtchn.c * * A simplified event channel for para-drivers in unmodified linux * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, Intel Corporation * * This file may be distributed separately from the Linux kernel, or * incorporated into other software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif void *shared_info_area; #define is_valid_evtchn(x) ((x) != 0) #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn) static struct { spinlock_t lock; irq_handler_t handler; void *dev_id; int evtchn; int close:1; /* close on unbind_from_irqhandler()? */ int inuse:1; int in_handler:1; } irq_evtchn[256]; static int evtchn_to_irq[NR_EVENT_CHANNELS] = { [0 ... NR_EVENT_CHANNELS-1] = -1 }; static DEFINE_SPINLOCK(irq_alloc_lock); static int alloc_xen_irq(void) { static int warned; int irq; spin_lock(&irq_alloc_lock); for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) { if (irq_evtchn[irq].inuse) continue; irq_evtchn[irq].inuse = 1; spin_unlock(&irq_alloc_lock); return irq; } if (!warned) { warned = 1; printk(KERN_WARNING "No available IRQ to bind to: " "increase irq_evtchn[] size in evtchn.c.\n"); } spin_unlock(&irq_alloc_lock); return -ENOSPC; } static void free_xen_irq(int irq) { spin_lock(&irq_alloc_lock); irq_evtchn[irq].inuse = 0; spin_unlock(&irq_alloc_lock); } int irq_to_evtchn_port(int irq) { return irq_evtchn[irq].evtchn; } EXPORT_SYMBOL(irq_to_evtchn_port); void mask_evtchn(int port) { shared_info_t *s = shared_info_area; synch_set_bit(port, &s->evtchn_mask[0]); } EXPORT_SYMBOL(mask_evtchn); void unmask_evtchn(int port) { evtchn_unmask_t op = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op)); } EXPORT_SYMBOL(unmask_evtchn); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { struct evtchn_alloc_unbound alloc_unbound; int err, irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_domain; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { spin_unlock_irq(&irq_evtchn[irq].lock); free_xen_irq(irq); return err; } irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = alloc_unbound.port; irq_evtchn[irq].close = 1; evtchn_to_irq[alloc_unbound.port] = irq; unmask_evtchn(alloc_unbound.port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_listening_port_to_irqhandler); int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = caller_port; irq_evtchn[irq].close = 0; evtchn_to_irq[caller_port] = irq; unmask_evtchn(caller_port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_caller_port_to_irqhandler); void unbind_from_irqhandler(unsigned int irq, void *dev_id) { int evtchn; spin_lock_irq(&irq_evtchn[irq].lock); evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) { evtchn_to_irq[evtchn] = -1; mask_evtchn(evtchn); if (irq_evtchn[irq].close) { struct evtchn_close close = { .port = evtchn }; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) BUG(); } } irq_evtchn[irq].handler = NULL; irq_evtchn[irq].evtchn = 0; spin_unlock_irq(&irq_evtchn[irq].lock); while (irq_evtchn[irq].in_handler) cpu_relax(); free_xen_irq(irq); } EXPORT_SYMBOL(unbind_from_irqhandler); void notify_remote_via_irq(int irq) { int evtchn; evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL(notify_remote_via_irq); static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 }; static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 }; static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh, unsigned int idx) { return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]); } static irqreturn_t evtchn_interrupt(int irq, void *dev_id #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) , struct pt_regs *regs #else # define handler(irq, dev_id, regs) handler(irq, dev_id) #endif ) { unsigned int l1i, l2i, port; unsigned long masked_l1, masked_l2; /* XXX: All events are bound to vcpu0 but irq may be redirected. */ int cpu = 0; /*smp_processor_id();*/ irq_handler_t handler; shared_info_t *s = shared_info_area; vcpu_info_t *v = &s->vcpu_info[cpu]; unsigned long l1, l2; v->evtchn_upcall_pending = 0; #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ /* Clear master flag /before/ clearing selector flag. */ wmb(); #endif l1 = xchg(&v->evtchn_pending_sel, 0); l1i = per_cpu(last_processed_l1i, cpu); l2i = per_cpu(last_processed_l2i, cpu); while (l1 != 0) { l1i = (l1i + 1) % BITS_PER_LONG; masked_l1 = l1 & ((~0UL) << l1i); if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */ l1i = BITS_PER_LONG - 1; l2i = BITS_PER_LONG - 1; continue; } l1i = __ffs(masked_l1); do { l2 = active_evtchns(cpu, s, l1i); l2i = (l2i + 1) % BITS_PER_LONG; masked_l2 = l2 & ((~0UL) << l2i); if (masked_l2 == 0) { /* if we masked out all events, move on */ l2i = BITS_PER_LONG - 1; break; } l2i = __ffs(masked_l2); /* process port */ port = (l1i * BITS_PER_LONG) + l2i; synch_clear_bit(port, &s->evtchn_pending[0]); irq = evtchn_to_irq[port]; if (irq < 0) continue; spin_lock(&irq_evtchn[irq].lock); handler = irq_evtchn[irq].handler; dev_id = irq_evtchn[irq].dev_id; if (unlikely(handler == NULL)) { printk("Xen IRQ%d (port %d) has no handler!\n", irq, port); spin_unlock(&irq_evtchn[irq].lock); continue; } irq_evtchn[irq].in_handler = 1; spin_unlock(&irq_evtchn[irq].lock); local_irq_enable(); handler(irq, irq_evtchn[irq].dev_id, regs); local_irq_disable(); spin_lock(&irq_evtchn[irq].lock); irq_evtchn[irq].in_handler = 0; spin_unlock(&irq_evtchn[irq].lock); /* if this is the final port processed, we'll pick up here+1 next time */ per_cpu(last_processed_l1i, cpu) = l1i; per_cpu(last_processed_l2i, cpu) = l2i; } while (l2i != BITS_PER_LONG - 1); l2 = active_evtchns(cpu, s, l1i); if (l2 == 0) /* we handled all ports, so we can clear the selector bit */ l1 &= ~(1UL << l1i); } return IRQ_HANDLED; } void irq_resume(void) { int evtchn, irq; for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) { mask_evtchn(evtchn); evtchn_to_irq[evtchn] = -1; } for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) irq_evtchn[irq].evtchn = 0; } int xen_irq_init(struct pci_dev *pdev) { int irq; for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) spin_lock_init(&irq_evtchn[irq].lock); return request_irq(pdev->irq, evtchn_interrupt, #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT, #else IRQF_SHARED | IRQF_SAMPLE_RANDOM | IRQF_DISABLED, #endif "xen-platform-pci", pdev); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/machine_reboot.c000066400000000000000000000047011314037446600323220ustar00rootroot00000000000000#include #include #include #include #include #include "platform-pci.h" #include struct ap_suspend_info { int do_spin; atomic_t nr_spinning; }; #ifdef CONFIG_SMP /* * Spinning prevents, for example, APs touching grant table entries while * the shared grant table is not mapped into the address space imemdiately * after resume. */ static void ap_suspend(void *_info) { struct ap_suspend_info *info = _info; BUG_ON(!irqs_disabled()); atomic_inc(&info->nr_spinning); mb(); while (info->do_spin) cpu_relax(); mb(); atomic_dec(&info->nr_spinning); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0, 0) #else #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0) #endif #else /* !defined(CONFIG_SMP) */ #define initiate_ap_suspend(i) 0 #endif static int bp_suspend(void) { int suspend_cancelled; BUG_ON(!irqs_disabled()); suspend_cancelled = HYPERVISOR_suspend(0); if (!suspend_cancelled) { platform_pci_resume(); gnttab_resume(); irq_resume(); } return suspend_cancelled; } unsigned int migrate_state = 0; EXPORT_SYMBOL(migrate_state); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)) { int err, suspend_cancelled, nr_cpus; struct ap_suspend_info info; migrate_state = 1; xenbus_suspend(); preempt_disable(); /* Prevent any races with evtchn_interrupt() handler. */ disable_irq(xen_platform_pdev->irq); local_bh_disable(); /* DTS2014031907471 */ info.do_spin = 1; atomic_set(&info.nr_spinning, 0); smp_mb(); nr_cpus = num_online_cpus() - 1; err = initiate_ap_suspend(&info); if (err < 0) { preempt_enable(); xenbus_suspend_cancel(); return err; } while (atomic_read(&info.nr_spinning) != nr_cpus) cpu_relax(); local_irq_disable(); suspend_cancelled = bp_suspend(); resume_notifier(suspend_cancelled); local_irq_enable(); smp_mb(); info.do_spin = 0; while (atomic_read(&info.nr_spinning) != 0) cpu_relax(); local_bh_enable(); /* DTS2014031907471 */ enable_irq(xen_platform_pdev->irq); preempt_enable(); if (!suspend_cancelled) xenbus_resume(); else xenbus_suspend_cancel(); /* update uvp flags */ write_driver_resume_flag(); write_feature_flag(); write_monitor_service_flag(); migrate_state = 0; #if defined(VRM) xenbus_write(XBT_NIL, "control/uvp", "vrm_flag", "true"); #endif return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/panic-handler.c000066400000000000000000000016521314037446600320530ustar00rootroot00000000000000#include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif MODULE_LICENSE("GPL"); #ifdef __ia64__ static void xen_panic_hypercall(struct unw_frame_info *info, void *arg) { current->thread.ksp = (__u64)info->sw - 16; HYPERVISOR_shutdown(SHUTDOWN_crash); /* we're never actually going to get here... */ } #endif static int xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { #ifdef __ia64__ unw_init_running(xen_panic_hypercall, NULL); #else /* !__ia64__ */ HYPERVISOR_shutdown(SHUTDOWN_crash); #endif /* we're never actually going to get here... */ return NOTIFY_DONE; } static struct notifier_block xen_panic_block = { .notifier_call = xen_panic_event }; int xen_panic_handler_init(void) { atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); return 0; } platform-compat.c000066400000000000000000000067611314037446600324020ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci#include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) static int system_state = 1; EXPORT_SYMBOL(system_state); #endif void ctrl_alt_del(void) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) kill_proc(1, SIGINT, 1); /* interrupt init */ #else kill_cad_pid(SIGINT, 1); #endif } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) size_t strcspn(const char *s, const char *reject) { const char *p; const char *r; size_t count = 0; for (p = s; *p != '\0'; ++p) { for (r = reject; *r != '\0'; ++r) { if (*p == *r) return count; } ++count; } return count; } EXPORT_SYMBOL(strcspn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(void * vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; } EXPORT_SYMBOL(wait_for_completion_timeout); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) /* fake do_exit using complete_and_exit */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) asmlinkage NORET_TYPE void do_exit(long code) #else fastcall NORET_TYPE void do_exit(long code) #endif { complete_and_exit(NULL, code); } EXPORT_SYMBOL_GPL(do_exit); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout) { __set_current_state(TASK_INTERRUPTIBLE); return schedule_timeout(timeout); } EXPORT_SYMBOL(schedule_timeout_interruptible); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. */ void *kzalloc(size_t size, int flags) { void *ret = kmalloc(size, flags); if (ret) memset(ret, 0, size); return ret; } EXPORT_SYMBOL(kzalloc); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) /* Simplified asprintf. */ char *kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; unsigned int len; char *p, dummy[1]; va_start(ap, fmt); len = vsnprintf(dummy, 0, fmt, ap); va_end(ap); p = kmalloc(len + 1, gfp); if (!p) return NULL; va_start(ap, fmt); vsprintf(p, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL(kasprintf); #endif platform-pci-unplug.c000066400000000000000000000117421314037446600331750ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/****************************************************************************** * platform-pci-unplug.c * * Xen platform PCI device driver * Copyright (c) 2010, Citrix * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include /****************************************************************************** * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0xffff #define XEN_IOPORT_LINUX_DRVVER ((LINUX_VERSION_CODE << 8) + 0x0) #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define UNPLUG_ALL_IDE_DISKS 1 #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 #define UNPLUG_ALL 7 #define UNPLUG_IGNORE 8 int xen_platform_pci; EXPORT_SYMBOL_GPL(xen_platform_pci); static int xen_emul_unplug; void xen_unplug_emulated_devices(void) { /* If the version matches enable the Xen platform PCI driver. * Also enable the Xen platform PCI driver if the version is really old * and the user told us to ignore it. */ xen_platform_pci = 1; xen_emul_unplug |= UNPLUG_ALL_NICS; xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; /* Set the default value of xen_emul_unplug depending on whether or * not the Xen PV frontends and the Xen platform PCI driver have * been compiled for this kernel (modules or built-in are both OK). */ if (xen_platform_pci && !xen_emul_unplug) { #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Netfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated ICs.\n"); xen_emul_unplug |= UNPLUG_ALL_NICS; #endif #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Blkfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated disks.\n" "You might have to change the root device\n" "from /dev/hd[a-d] to /dev/xvd[a-d]\n" "in your root= kernel command line option\n"); xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; #endif } /* Now unplug the emulated devices */ if (xen_platform_pci && !(xen_emul_unplug & UNPLUG_IGNORE)) outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/platform-pci.h000066400000000000000000000026601314037446600317500ustar00rootroot00000000000000/****************************************************************************** * platform-pci.h * * Xen platform PCI device driver * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. */ #ifndef _XEN_PLATFORM_PCI_H #define _XEN_PLATFORM_PCI_H #include /* the features pvdriver supported */ #define XENPAGING 0x1 #define SUPPORTED_FEATURE XENPAGING extern void write_feature_flag(void); extern void write_monitor_service_flag(void); /*when xenpci resume succeed, write this flag, then uvpmonitor call ndsend*/ extern void write_driver_resume_flag(void); unsigned long alloc_xen_mmio(unsigned long len); void platform_pci_resume(void); extern struct pci_dev *xen_platform_pdev; #endif /* _XEN_PLATFORM_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/reboot_64.c000066400000000000000000000201421314037446600311440ustar00rootroot00000000000000#define __KERNEL_SYSCALLS__ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #undef handle_sysrq #endif MODULE_LICENSE("Dual BSD/GPL"); #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } /*侫õⲿŹػ*/ static int xen_reboot_vm(void) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *reboot_argv[] = { "/sbin/reboot", NULL }; if (call_usermodehelper("/sbin/reboot", reboot_argv, envp, 0) < 0) printk(KERN_ERR "Xen reboot vm Failed.\n"); return 0; } #ifdef CONFIG_PM_SLEEP static int setup_suspend_evtchn(void); /* Was last suspend request cancelled? */ static int suspend_cancelled; static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; daemonize("suspend"); err = set_cpus_allowed_ptr(current, cpumask_of(0)); if (err) { printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { printk(KERN_ERR "Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_delayed_work(&shutdown_work, 0); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } #else # define xen_suspend NULL #endif static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_delayed_work(&shutdown_work, 0); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(struct work_struct *unused) { int err; err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ? xen_suspend : shutdown_process, NULL, CLONE_FS | CLONE_FILES); if (err < 0) { printk(KERN_WARNING "Error creating shutdown process (%d): " "retrying...\n", -err); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } printk(KERN_WARNING "%s(%d): receive shutdown request %s\n", __FUNCTION__, __LINE__, str); if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) //ctrl_alt_del(); xen_reboot_vm(); #ifdef CONFIG_PM_SLEEP else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; #endif else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else printk("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) { printk(KERN_ERR "Unable to read sysrq code in " "control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key, NULL); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #ifdef CONFIG_PM_SLEEP static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); printk(KERN_INFO "suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } #else #define setup_suspend_evtchn() 0 #endif static int setup_shutdown_watcher(void) { int err; xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { printk(KERN_ERR "Failed to set shutdown watcher\n"); return err; } err = register_xenbus_watch(&sysrq_watch); if (err) { printk(KERN_ERR "Failed to set sysrq watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { printk(KERN_ERR "Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-platform-pci/xen_support.c000066400000000000000000000040531314037446600317320ustar00rootroot00000000000000/****************************************************************************** * support.c * Xen module support functions. * Copyright (C) 2004, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if defined (__ia64__) unsigned long __hypercall(unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long cmd) { unsigned long __res; __asm__ __volatile__ (";;\n" "mov r2=%1\n" "break 0x1000 ;;\n" "mov %0=r8 ;;\n" : "=r"(__res) : "r"(cmd) : "r2", "r8", "memory"); return __res; } EXPORT_SYMBOL(__hypercall); int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) { return xencomm_hypercall_grant_table_op(cmd, uop, count); } EXPORT_SYMBOL(HYPERVISOR_grant_table_op); /* without using balloon driver on PV-on-HVM for ia64 */ void balloon_update_driver_allowance(long delta) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); void balloon_release_driver_page(struct page *page) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_release_driver_page); #endif /* __ia64__ */ void xen_machphys_update(unsigned long mfn, unsigned long pfn) { BUG(); } EXPORT_SYMBOL(xen_machphys_update); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/000077500000000000000000000000001314037446600255425ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/2.6.27/000077500000000000000000000000001314037446600262765ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/2.6.27/common.h000066400000000000000000000075301314037446600277440ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_SCSIFRONT_H__ #define __XEN_DRIVERS_SCSIFRONT_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define GRANT_INVALID_REF 0 #define VSCSI_IN_ABORT 1 #define VSCSI_IN_RESET 2 /* tuning point*/ #define VSCSIIF_DEFAULT_CMD_PER_LUN 10 #define VSCSIIF_MAX_TARGET 64 #define VSCSIIF_MAX_LUN 255 #define VSCSIIF_RING_SIZE \ __RING_SIZE((struct vscsiif_sring *)0, PAGE_SIZE) #define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE struct vscsifrnt_shadow { uint16_t next_free; /* command between backend and frontend * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */ unsigned char act; /* do reset function */ wait_queue_head_t wq_reset; /* reset work queue */ int wait_reset; /* reset work queue condition */ int32_t rslt_reset; /* reset response status */ /* (SUCESS or FAILED) */ /* for DMA_TO_DEVICE(1), DMA_FROM_DEVICE(2), DMA_NONE(3) requests */ unsigned int sc_data_direction; /* Number of pieces of scatter-gather */ unsigned int nr_segments; /* requested struct scsi_cmnd is stored from kernel */ unsigned long req_scsi_cmnd; int gref[VSCSIIF_SG_TABLESIZE]; }; struct vscsifrnt_info { struct xenbus_device *dev; struct Scsi_Host *host; spinlock_t io_lock; spinlock_t shadow_lock; unsigned int evtchn; unsigned int irq; grant_ref_t ring_ref; struct vscsiif_front_ring ring; struct vscsiif_response ring_res; struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS]; uint32_t shadow_free; struct task_struct *kthread; wait_queue_head_t wq; unsigned int waiting_resp; }; #define DPRINTK(_f, _a...) \ pr_debug("(file=%s, line=%d) " _f, \ __FILE__ , __LINE__ , ## _a ) int scsifront_xenbus_init(void); void scsifront_xenbus_unregister(void); int scsifront_schedule(void *data); irqreturn_t scsifront_intr(int irq, void *dev_id); int scsifront_cmd_done(struct vscsifrnt_info *info); #endif /* __XEN_DRIVERS_SCSIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/2.6.27/xenbus.c000066400000000000000000000240001314037446600277420ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "common.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) #define DEFAULT_TASK_COMM_LEN 16 #else #define DEFAULT_TASK_COMM_LEN TASK_COMM_LEN #endif extern struct scsi_host_template scsifront_sht; static void scsifront_free(struct vscsifrnt_info *info) { struct Scsi_Host *host = info->host; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) if (host->shost_state != SHOST_DEL) { #else if (!test_bit(SHOST_DEL, &host->shost_state)) { #endif scsi_remove_host(info->host); } if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; scsi_host_put(info->host); } static int scsifront_alloc_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct vscsiif_sring *sring; int err = -ENOMEM; info->ring_ref = GRANT_INVALID_REF; /***** Frontend to Backend ring start *****/ sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL); if (!sring) { xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)"); return err; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(sring)); if (err < 0) { free_page((unsigned long) sring); info->ring.sring = NULL; xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)"); goto free_sring; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, scsifront_intr, IRQF_SAMPLE_RANDOM, "scsifront", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto free_sring; } info->irq = err; return 0; /* free resource */ free_sring: scsifront_free(info); return err; } static int scsifront_init_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct xenbus_transaction xbt; int err; DPRINTK("%s\n",__FUNCTION__); err = scsifront_alloc_ring(info); if (err) return err; DPRINTK("%u %u\n", info->ring_ref, info->evtchn); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); } err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u", info->ring_ref); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing ring-ref"); goto fail; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing event-channel"); goto fail; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto free_sring; } return 0; fail: xenbus_transaction_end(xbt, 1); free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { struct vscsifrnt_info *info; struct Scsi_Host *host; int i, err = -ENOMEM; char name[DEFAULT_TASK_COMM_LEN]; host = scsi_host_alloc(&scsifront_sht, sizeof(*info)); if (!host) { xenbus_dev_fatal(dev, err, "fail to allocate scsi host"); return err; } info = (struct vscsifrnt_info *) host->hostdata; info->host = host; dev->dev.driver_data = info; info->dev = dev; for (i = 0; i < VSCSIIF_MAX_REQS; i++) { info->shadow[i].next_free = i + 1; init_waitqueue_head(&(info->shadow[i].wq_reset)); info->shadow[i].wait_reset = 0; } info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff; err = scsifront_init_ring(info); if (err) { scsi_host_put(host); return err; } init_waitqueue_head(&info->wq); spin_lock_init(&info->io_lock); spin_lock_init(&info->shadow_lock); snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no); info->kthread = kthread_run(scsifront_schedule, info, name); if (IS_ERR(info->kthread)) { err = PTR_ERR(info->kthread); info->kthread = NULL; printk(KERN_ERR "scsifront: kthread start err %d\n", err); goto free_sring; } host->max_id = VSCSIIF_MAX_TARGET; host->max_channel = 0; host->max_lun = VSCSIIF_MAX_LUN; host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512; err = scsi_add_host(host, &dev->dev); if (err) { printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err); goto free_sring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_remove(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev->dev.driver_data; DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename); if (info->kthread) { kthread_stop(info->kthread); info->kthread = NULL; } scsifront_free(info); return 0; } static int scsifront_disconnect(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct Scsi_Host *host = info->host; DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename); /* When this function is executed, all devices of Frontend have been deleted. Therefore, it need not block I/O before remove_host. */ scsi_remove_host(host); xenbus_frontend_closed(dev); return 0; } #define VSCSIFRONT_OP_ADD_LUN 1 #define VSCSIFRONT_OP_DEL_LUN 2 static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) { struct xenbus_device *dev = info->dev; int i, err = 0; char str[64], state_str[64]; char **dir; unsigned int dir_n = 0; unsigned int device_state; unsigned int hst, chn, tgt, lun; struct scsi_device *sdev; dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n); if (IS_ERR(dir)) return; for (i = 0; i < dir_n; i++) { /* read status */ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u", &device_state); if (XENBUS_EXIST_ERR(err)) continue; /* virtual SCSI device */ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u:%u:%u:%u", &hst, &chn, &tgt, &lun); if (XENBUS_EXIST_ERR(err)) continue; /* front device state path */ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]); switch (op) { case VSCSIFRONT_OP_ADD_LUN: if (device_state == XenbusStateInitialised) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { printk(KERN_ERR "scsifront: Device already in use.\n"); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } else { scsi_add_device(info->host, chn, tgt, lun); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); } } break; case VSCSIFRONT_OP_DEL_LUN: if (device_state == XenbusStateClosing) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } } break; default: break; } } kfree(dir); return; } static void scsifront_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct vscsifrnt_info *info = dev->dev.driver_data; DPRINTK("%p %u %u\n", dev, dev->state, backend_state); switch (backend_state) { case XenbusStateUnknown: case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateClosed: break; case XenbusStateInitialised: break; case XenbusStateConnected: if (xenbus_read_driver_state(dev->nodename) == XenbusStateInitialised) { scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); } if (dev->state == XenbusStateConnected) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosing: scsifront_disconnect(info); break; case XenbusStateReconfiguring: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN); xenbus_switch_state(dev, XenbusStateReconfiguring); break; case XenbusStateReconfigured: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); xenbus_switch_state(dev, XenbusStateConnected); break; } } static struct xenbus_device_id scsifront_ids[] = { { "vscsi" }, { "" } }; static struct xenbus_driver scsifront_driver = { .name = "vscsi", .ids = scsifront_ids, .probe = scsifront_probe, .remove = scsifront_remove, /* .resume = scsifront_resume, */ .otherend_changed = scsifront_backend_changed, }; int scsifront_xenbus_init(void) { return xenbus_register_frontend(&scsifront_driver); } void scsifront_xenbus_unregister(void) { xenbus_unregister_driver(&scsifront_driver); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/2.6.32/000077500000000000000000000000001314037446600262725ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/2.6.32/common.h000066400000000000000000000100441314037446600277320ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_SCSIFRONT_H__ #define __XEN_DRIVERS_SCSIFRONT_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define GRANT_INVALID_REF 0 #define VSCSI_IN_ABORT 1 #define VSCSI_IN_RESET 2 /* tuning point*/ #define VSCSIIF_DEFAULT_CMD_PER_LUN 10 #define VSCSIIF_MAX_TARGET 64 #define VSCSIIF_MAX_LUN 255 #define VSCSIIF_RING_SIZE \ __RING_SIZE((struct vscsiif_sring *)0, PAGE_SIZE) #define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE struct vscsifrnt_shadow { uint16_t next_free; /* command between backend and frontend * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */ unsigned char act; /* do reset function */ wait_queue_head_t wq_reset; /* reset work queue */ int wait_reset; /* reset work queue condition */ int32_t rslt_reset; /* reset response status */ /* (SUCESS or FAILED) */ /* for DMA_TO_DEVICE(1), DMA_FROM_DEVICE(2), DMA_NONE(3) requests */ unsigned int sc_data_direction; /* Number of pieces of scatter-gather */ unsigned int nr_segments; /* requested struct scsi_cmnd is stored from kernel */ unsigned long req_scsi_cmnd; int gref[VSCSIIF_SG_TABLESIZE]; }; struct vscsifrnt_info { struct xenbus_device *dev; struct Scsi_Host *host; spinlock_t io_lock; spinlock_t shadow_lock; unsigned int evtchn; unsigned int irq; grant_ref_t ring_ref; struct vscsiif_front_ring ring; struct vscsiif_response ring_res; struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS]; uint32_t shadow_free; struct task_struct *kthread; wait_queue_head_t wq; unsigned int waiting_resp; }; #define DPRINTK(_f, _a...) \ pr_debug("(file=%s, line=%d) " _f, \ __FILE__ , __LINE__ , ## _a ) int scsifront_xenbus_init(void); void scsifront_xenbus_unregister(void); int scsifront_schedule(void *data); irqreturn_t scsifront_intr(int irq, void *dev_id); int scsifront_cmd_done(struct vscsifrnt_info *info); #endif /* __XEN_DRIVERS_SCSIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/2.6.32/xenbus.c000066400000000000000000000240211314037446600277410ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "common.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) #define DEFAULT_TASK_COMM_LEN 16 #else #define DEFAULT_TASK_COMM_LEN TASK_COMM_LEN #endif extern struct scsi_host_template scsifront_sht; static void scsifront_free(struct vscsifrnt_info *info) { struct Scsi_Host *host = info->host; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) if (host->shost_state != SHOST_DEL) { #else if (!test_bit(SHOST_DEL, &host->shost_state)) { #endif scsi_remove_host(info->host); } if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; scsi_host_put(info->host); } static int scsifront_alloc_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct vscsiif_sring *sring; int err = -ENOMEM; info->ring_ref = GRANT_INVALID_REF; /***** Frontend to Backend ring start *****/ sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL); if (!sring) { xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)"); return err; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(sring)); if (err < 0) { free_page((unsigned long) sring); info->ring.sring = NULL; xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)"); goto free_sring; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, scsifront_intr, IRQF_SAMPLE_RANDOM, "scsifront", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto free_sring; } info->irq = err; return 0; /* free resource */ free_sring: scsifront_free(info); return err; } static int scsifront_init_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct xenbus_transaction xbt; int err; DPRINTK("%s\n",__FUNCTION__); err = scsifront_alloc_ring(info); if (err) return err; DPRINTK("%u %u\n", info->ring_ref, info->evtchn); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); } err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u", info->ring_ref); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing ring-ref"); goto fail; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing event-channel"); goto fail; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto free_sring; } return 0; fail: xenbus_transaction_end(xbt, 1); free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { struct vscsifrnt_info *info; struct Scsi_Host *host; int i, err = -ENOMEM; char name[DEFAULT_TASK_COMM_LEN]; host = scsi_host_alloc(&scsifront_sht, sizeof(*info)); if (!host) { xenbus_dev_fatal(dev, err, "fail to allocate scsi host"); return err; } info = (struct vscsifrnt_info *) host->hostdata; info->host = host; dev_set_drvdata(&dev->dev, info); info->dev = dev; for (i = 0; i < VSCSIIF_MAX_REQS; i++) { info->shadow[i].next_free = i + 1; init_waitqueue_head(&(info->shadow[i].wq_reset)); info->shadow[i].wait_reset = 0; } info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff; err = scsifront_init_ring(info); if (err) { scsi_host_put(host); return err; } init_waitqueue_head(&info->wq); spin_lock_init(&info->io_lock); spin_lock_init(&info->shadow_lock); snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no); info->kthread = kthread_run(scsifront_schedule, info, name); if (IS_ERR(info->kthread)) { err = PTR_ERR(info->kthread); info->kthread = NULL; printk(KERN_ERR "scsifront: kthread start err %d\n", err); goto free_sring; } host->max_id = VSCSIIF_MAX_TARGET; host->max_channel = 0; host->max_lun = VSCSIIF_MAX_LUN; host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512; err = scsi_add_host(host, &dev->dev); if (err) { printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err); goto free_sring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_remove(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename); if (info->kthread) { kthread_stop(info->kthread); info->kthread = NULL; } scsifront_free(info); return 0; } static int scsifront_disconnect(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct Scsi_Host *host = info->host; DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename); /* When this function is executed, all devices of Frontend have been deleted. Therefore, it need not block I/O before remove_host. */ scsi_remove_host(host); xenbus_frontend_closed(dev); return 0; } #define VSCSIFRONT_OP_ADD_LUN 1 #define VSCSIFRONT_OP_DEL_LUN 2 static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) { struct xenbus_device *dev = info->dev; int i, err = 0; char str[64], state_str[64]; char **dir; unsigned int dir_n = 0; unsigned int device_state; unsigned int hst, chn, tgt, lun; struct scsi_device *sdev; dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n); if (IS_ERR(dir)) return; for (i = 0; i < dir_n; i++) { /* read status */ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u", &device_state); if (XENBUS_EXIST_ERR(err)) continue; /* virtual SCSI device */ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u:%u:%u:%u", &hst, &chn, &tgt, &lun); if (XENBUS_EXIST_ERR(err)) continue; /* front device state path */ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]); switch (op) { case VSCSIFRONT_OP_ADD_LUN: if (device_state == XenbusStateInitialised) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { printk(KERN_ERR "scsifront: Device already in use.\n"); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } else { scsi_add_device(info->host, chn, tgt, lun); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); } } break; case VSCSIFRONT_OP_DEL_LUN: if (device_state == XenbusStateClosing) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } } break; default: break; } } kfree(dir); return; } static void scsifront_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%p %u %u\n", dev, dev->state, backend_state); switch (backend_state) { case XenbusStateUnknown: case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateClosed: break; case XenbusStateInitialised: break; case XenbusStateConnected: if (xenbus_read_driver_state(dev->nodename) == XenbusStateInitialised) { scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); } if (dev->state == XenbusStateConnected) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosing: scsifront_disconnect(info); break; case XenbusStateReconfiguring: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN); xenbus_switch_state(dev, XenbusStateReconfiguring); break; case XenbusStateReconfigured: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); xenbus_switch_state(dev, XenbusStateConnected); break; } } static struct xenbus_device_id scsifront_ids[] = { { "vscsi" }, { "" } }; static struct xenbus_driver scsifront_driver = { .name = "vscsi", .ids = scsifront_ids, .probe = scsifront_probe, .remove = scsifront_remove, /* .resume = scsifront_resume, */ .otherend_changed = scsifront_backend_changed, }; int scsifront_xenbus_init(void) { return xenbus_register_frontend(&scsifront_driver); } void scsifront_xenbus_unregister(void) { xenbus_unregister_driver(&scsifront_driver); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/3.0.101/000077500000000000000000000000001314037446600263425ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/3.0.101/common.h000066400000000000000000000115501314037446600300050ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_SCSIFRONT_H__ #define __XEN_DRIVERS_SCSIFRONT_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define VSCSI_IN_ABORT 1 #define VSCSI_IN_RESET 2 #define DMA_VSCSI_INDIRECT 4 /* tuning point*/ #define VSCSIIF_DEFAULT_CMD_PER_LUN 128 #define VSCSIIF_MAX_TARGET 64 #define VSCSIIF_MAX_LUN 255 #define VSCSIIF_MAX_RING_ORDER 3U #define VSCSIIF_MAX_RING_PAGES (1U << VSCSIIF_MAX_RING_ORDER) #define VSCSIIF_MAX_RING_PAGE_SIZE (VSCSIIF_MAX_RING_PAGES * PAGE_SIZE) #define VSCSIIF_MAX_RING_SIZE __CONST_RING_SIZE(vscsiif, VSCSIIF_MAX_RING_PAGE_SIZE) #define VSCSIIF_MAX_REQS VSCSIIF_MAX_RING_SIZE #define VSCSIIF_STATE_DISCONNECT 0 #define VSCSIIF_STATE_CONNECTED 1 #define VSCSIIF_STATE_SUSPENDED 2 struct grant { grant_ref_t gref; unsigned long pfn; struct list_head node; }; struct vscsifrnt_shadow { uint16_t next_free; #define VSCSIIF_IN_USE 0x0fff #define VSCSIIF_NONE 0x0ffe /* command between backend and frontend * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */ unsigned char act; /* Number of pieces of scatter-gather */ unsigned int nr_segments; uint8_t sc_data_direction; /* do reset function */ wait_queue_head_t wq_reset; /* reset work queue */ int wait_reset; /* reset work queue condition */ int32_t rslt_reset; /* reset response status */ /* (SUCESS or FAILED) */ /* requested struct scsi_cmnd is stored from kernel */ struct scsi_cmnd *sc; //int gref[SG_ALL]; struct grant **grants_used; struct grant **indirect_grants; }; struct vscsifrnt_info { struct xenbus_device *dev; struct Scsi_Host *host; spinlock_t shadow_lock; unsigned int evtchn; unsigned int irq; grant_ref_t ring_ref[VSCSIIF_MAX_RING_PAGES]; struct vscsiif_front_ring ring; unsigned int ring_size; struct vscsiif_response ring_res; struct list_head grants; struct list_head indirect_pages; unsigned int max_indirect_segments; unsigned int persistent_gnts_c; unsigned int feature_persistent; struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS]; uint32_t shadow_free; struct task_struct *kthread; wait_queue_head_t wq; wait_queue_head_t wq_sync; wait_queue_head_t wq_pause; unsigned char waiting_resp; unsigned char waiting_sync; unsigned char waiting_pause; unsigned char pause; unsigned callers; }; #define DPRINTK(_f, _a...) \ pr_debug("(file=%s, line=%d) " _f, \ __FILE__ , __LINE__ , ## _a ) int scsifront_xenbus_init(void); void scsifront_xenbus_unregister(void); int scsifront_schedule(void *data); void scsifront_finish_all(struct vscsifrnt_info *info); irqreturn_t scsifront_intr(int irq, void *dev_id); #endif /* __XEN_DRIVERS_SCSIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/3.0.101/scsifront.c000066400000000000000000000575041314037446600305330ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "common.h" #include #define PREFIX(lvl) KERN_##lvl "scsifront: " static int get_id_from_freelist(struct vscsifrnt_info *info) { unsigned long flags; uint32_t free; spin_lock_irqsave(&info->shadow_lock, flags); free = info->shadow_free; BUG_ON(free >= VSCSIIF_MAX_REQS); info->shadow_free = info->shadow[free].next_free; info->shadow[free].next_free = VSCSIIF_IN_USE; info->shadow[free].wait_reset = 0; spin_unlock_irqrestore(&info->shadow_lock, flags); return free; } static void _add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id) { info->shadow[id].next_free = info->shadow_free; info->shadow[id].sc = NULL; info->shadow_free = id; } static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id) { unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); _add_id_to_freelist(info, id); spin_unlock_irqrestore(&info->shadow_lock, flags); } static struct grant *get_grant(grant_ref_t *gref_head, unsigned long pfn, struct vscsifrnt_info *info, int write) { struct grant *gnt_list_entry; unsigned long buffer_mfn; BUG_ON(list_empty(&info->grants)); gnt_list_entry = list_first_entry(&info->grants, struct grant, node); list_del(&gnt_list_entry->node); /* for persistent grant */ if (gnt_list_entry->gref != GRANT_INVALID_REF) { info->persistent_gnts_c--; return gnt_list_entry; } /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); if (!info->feature_persistent) { BUG_ON(!pfn); gnt_list_entry->pfn = pfn; } buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); gnttab_grant_foreign_access_ref(gnt_list_entry->gref, info->dev->otherend_id, buffer_mfn, write); return gnt_list_entry; } struct vscsiif_request * scsifront_pre_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); vscsiif_request_t *ring_req; uint32_t id; ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); ring->req_prod_pvt++; id = get_id_from_freelist(info); /* use id by response */ ring_req->rqid = (uint16_t)id; return ring_req; } static void scsifront_notify_work(struct vscsifrnt_info *info) { info->waiting_resp = 1; wake_up(&info->wq); } static void scsifront_do_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); unsigned int irq = info->irq; int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); if (notify) notify_remote_via_irq(irq); } irqreturn_t scsifront_intr(int irq, void *dev_id) { scsifront_notify_work((struct vscsifrnt_info *)dev_id); return IRQ_HANDLED; } #if 0 static bool push_cmd_to_ring(struct vscsifrnt_info *info, vscsiif_request_t *ring_req) { unsigned int left, rqid = info->active.rqid; struct scsi_cmnd *sc; for (; ; ring_req = NULL) { struct vscsiif_sg_list *sgl; if (!ring_req) { struct vscsiif_front_ring *ring = &info->ring; ring_req = RING_GET_REQUEST(ring, ring->req_prod_pvt); ring->req_prod_pvt++; ring_req->rqid = rqid; } left = info->shadow[rqid].nr_segments - info->active.done; if (left <= VSCSIIF_SG_TABLESIZE) break; sgl = (void *)ring_req; sgl->act = VSCSIIF_ACT_SCSI_SG_PRESET; if (left > VSCSIIF_SG_LIST_SIZE) left = VSCSIIF_SG_LIST_SIZE; memcpy(sgl->seg, info->active.segs + info->active.done, left * sizeof(*sgl->seg)); sgl->nr_segments = left; info->active.done += left; if (RING_FULL(&info->ring)) return false; } sc = info->active.sc; ring_req->act = VSCSIIF_ACT_SCSI_CDB; ring_req->id = sc->device->id; ring_req->lun = sc->device->lun; ring_req->channel = sc->device->channel; ring_req->cmd_len = sc->cmd_len; if ( sc->cmd_len ) memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); else memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); if (info->shadow[rqid].sc_data_direction == DMA_VSCSI_INDIRECT) { ring_req->sc_data_direction = DMA_VSCSI_INDIRECT; ring_req->u.indirect.indirect_op = (uint8_t)sc->sc_data_direction; ring_req->u.indirect.nr_segments = left; } else { ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->u.rw.nr_segments = left; } //ring_req->sc_data_direction = sc->sc_data_direction; ring_req->timeout_per_command = sc->request->timeout / HZ; //ring_req->nr_segments = left; memcpy(ring_req->seg, info->active.segs + info->active.done, left * sizeof(*ring_req->seg)); info->active.sc = NULL; return !RING_FULL(&info->ring); } #endif static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id) { struct vscsifrnt_shadow *s = &info->shadow[id]; struct scsi_cmnd *sc = s->sc; int i = 0; int nseg = s->nr_segments; int read = (s->sc->sc_data_direction == DMA_FROM_DEVICE); unsigned int off, len, bytes; unsigned int data_len = scsi_bufflen(sc); char *sg_data = NULL; void *shared_data = NULL; if (s->sc->sc_data_direction == DMA_NONE) return; if (read && info->feature_persistent) { /* * Copy the data received from the backend into the bvec. * Since bv_offset can be different than 0, and bv_len different * than PAGE_SIZE, we have to keep track of the current offset, * to be sure we are copying the data from the right shared page. */ struct scatterlist *sg, *sgl = scsi_sglist(sc); for_each_sg (sgl, sg, scsi_sg_count(sc), i) { off = sg->offset; len = sg->length; while (len > 0 && data_len > 0) { /* * sg sends a scatterlist that is larger than * the data_len it wants transferred for certain * IO sizes */ bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic( pfn_to_page(s->grants_used[i]->pfn), KM_USER0); sg_data = kmap_atomic(sg_page(sg), KM_USER1); memcpy(sg_data + sg->offset, shared_data + sg->offset, sg->length); kunmap_atomic(sg_data, KM_USER1); kunmap_atomic(shared_data, KM_USER0); len -= bytes; data_len -= bytes; off = 0; } } } /* Add the persistent grant into the list of free grants */ for (i = 0; i < nseg; i++) { if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ if (!info->feature_persistent) { printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->grants_used[i]->gref); shost_printk(PREFIX(ALERT), info->host, "grant still in use by backend\n"); BUG(); } list_add(&s->grants_used[i]->node, &info->grants); info->persistent_gnts_c++; } else { /* * If the grant is not mapped by the backend we end the * foreign access and add it to the tail of the list, * so it will not be picked again unless we run out of * persistent grants. */ gnttab_end_foreign_access(s->grants_used[i]->gref, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &info->grants); } } if (s->sc_data_direction == DMA_VSCSI_INDIRECT) { for (i = 0; i < VSCSI_INDIRECT_PAGES(nseg); i++) { if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->indirect_grants[i]->gref); list_add(&s->indirect_grants[i]->node, &info->grants); info->persistent_gnts_c++; } else { struct page *indirect_page; gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. */ if (!info->feature_persistent) { indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); list_add(&indirect_page->lru, &info->indirect_pages); } s->indirect_grants[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->indirect_grants[i]->node, &info->grants); } } } } static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { struct scsi_cmnd *sc; uint32_t id; uint8_t sense_len; id = ring_res->rqid; sc = info->shadow[id].sc; if (sc == NULL) BUG(); scsifront_gnttab_done(info, id); add_id_to_freelist(info, id); sc->result = ring_res->rslt; scsi_set_resid(sc, ring_res->residual_len); if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE) sense_len = VSCSIIF_SENSE_BUFFERSIZE; else sense_len = ring_res->sense_len; if (sense_len) memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len); sc->scsi_done(sc); return; } static void scsifront_sync_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { uint16_t id = ring_res->rqid; unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); info->shadow[id].wait_reset = 1; switch (info->shadow[id].rslt_reset) { case 0: info->shadow[id].rslt_reset = ring_res->rslt; break; case -1: _add_id_to_freelist(info, id); break; default: shost_printk(PREFIX(ERR), info->host, "bad reset state %d, possibly leaking %u\n", info->shadow[id].rslt_reset, id); break; } spin_unlock_irqrestore(&info->shadow_lock, flags); wake_up(&(info->shadow[id].wq_reset)); } static void scsifront_do_response(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { /*if (info->host->sg_tablesize > VSCSIIF_SG_TABLESIZE) { u8 act = ring_res->act; if (act == VSCSIIF_ACT_SCSI_SG_PRESET) return; if (act != info->shadow[ring_res->rqid].act) DPRINTK("Bogus backend response (%02x vs %02x)\n", act, info->shadow[ring_res->rqid].act); }*/ if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB) scsifront_cdb_cmd_done(info, ring_res); else scsifront_sync_cmd_done(info, ring_res); } static int scsifront_ring_drain(struct vscsifrnt_info *info) { vscsiif_response_t *ring_res; RING_IDX i, rp; int more_to_do = 0; rp = info->ring.sring->rsp_prod; rmb(); for (i = info->ring.rsp_cons; i != rp; i++) { ring_res = RING_GET_RESPONSE(&info->ring, i); scsifront_do_response(info, ring_res); } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); } else { info->ring.sring->rsp_event = i + 1; } return more_to_do; } static int scsifront_cmd_done(struct vscsifrnt_info *info) { int more_to_do; unsigned long flags; spin_lock_irqsave(info->host->host_lock, flags); more_to_do = scsifront_ring_drain(info); /*if (info->active.sc && !RING_FULL(&info->ring)) { push_cmd_to_ring(info, NULL); scsifront_do_request(info); }*/ info->waiting_sync = 0; spin_unlock_irqrestore(info->host->host_lock, flags); wake_up(&info->wq_sync); /* Yield point for this unbounded loop. */ cond_resched(); return more_to_do; } void scsifront_finish_all(struct vscsifrnt_info *info) { unsigned i; struct vscsiif_response resp; scsifront_ring_drain(info); for (i = 0; i < VSCSIIF_MAX_REQS; i++) { if (info->shadow[i].next_free != VSCSIIF_IN_USE) continue; resp.rqid = i; resp.sense_len = 0; resp.rslt = DID_RESET << 16; resp.residual_len = 0; scsifront_do_response(info, &resp); } } int scsifront_schedule(void *data) { struct vscsifrnt_info *info = (struct vscsifrnt_info *)data; while (!kthread_should_stop()) { wait_event_interruptible( info->wq, info->waiting_resp || kthread_should_stop()); info->waiting_resp = 0; smp_mb(); if (scsifront_cmd_done(info)) info->waiting_resp = 1; } return 0; } static int scsifront_calculate_segs(struct scsi_cmnd *sc) { unsigned int i, off, len, bytes; int ref_cnt = 0; unsigned int data_len = scsi_bufflen(sc); struct scatterlist *sg, *sgl = scsi_sglist(sc); for_each_sg (sgl, sg, scsi_sg_count(sc), i) { off = sg->offset; len = sg->length; while (len > 0 && data_len > 0) { /* * sg sends a scatterlist that is larger than * the data_len it wants transferred for certain * IO sizes */ bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); len -= bytes; data_len -= bytes; off = 0; ref_cnt++; } } return ref_cnt; } static int map_data_for_request(struct vscsifrnt_info *info, struct scsi_cmnd *sc, vscsiif_request_t *ring_req, uint32_t id) { grant_ref_t gref_head; struct page *page; int err, ref, ref_cnt = 0; int write = (sc->sc_data_direction == DMA_TO_DEVICE); unsigned int i, nr_pages, off, len, bytes; unsigned int data_len = scsi_bufflen(sc); int nseg, max_grefs, n; struct grant *gnt_list_entry = NULL; struct scsiif_request_segment_aligned *segments = NULL; bool new_persistent_gnts; if (sc->sc_data_direction == DMA_NONE || !data_len) { ring_req->u.rw.nr_segments = (uint8_t)ref_cnt; return 0; } max_grefs = info->max_indirect_segments ? info->max_indirect_segments + VSCSI_INDIRECT_PAGES(info->max_indirect_segments) : VSCSIIF_SG_TABLESIZE; /* Check if we have enought grants to allocate a requests */ if (info->persistent_gnts_c < max_grefs) { new_persistent_gnts = 1; err = gnttab_alloc_grant_references( max_grefs - info->persistent_gnts_c, &gref_head); if (err) { shost_printk(PREFIX(ERR), info->host, "gnttab_alloc_grant_references() error\n"); return -ENOMEM; } } else new_persistent_gnts = 0; nseg = scsifront_calculate_segs(sc); if (nseg > VSCSIIF_SG_TABLESIZE) { ring_req->sc_data_direction = DMA_VSCSI_INDIRECT; ring_req->u.indirect.indirect_op = (uint8_t)sc->sc_data_direction; ring_req->u.indirect.nr_segments = (uint16_t)nseg; info->shadow[id].sc_data_direction = DMA_VSCSI_INDIRECT; } else { ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->u.rw.nr_segments = (uint8_t)nseg; info->shadow[id].sc_data_direction = (uint8_t)sc->sc_data_direction; } /* quoted scsi_lib.c/scsi_req_map_sg . */ nr_pages = PFN_UP(data_len + scsi_sglist(sc)->offset); if (nr_pages > max_grefs) { shost_printk(PREFIX(ERR), info->host, "Unable to map request_buffer for command!\n"); ref_cnt = -E2BIG; } else { struct scatterlist *sg, *sgl = scsi_sglist(sc); for_each_sg (sgl, sg, scsi_sg_count(sc), i) { page = sg_page(sg); off = sg->offset; len = sg->length; while (len > 0 && data_len > 0) { /* map indirect page */ if ((ring_req->sc_data_direction == DMA_VSCSI_INDIRECT) && (ref_cnt % SEGS_PER_VSCSI_INDIRECT_FRAME == 0)) { unsigned long uninitialized_var(pfn); if (segments) kunmap_atomic(segments, KM_IRQ0); n = ref_cnt / SEGS_PER_VSCSI_INDIRECT_FRAME; if (!info->feature_persistent) { struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ BUG_ON(list_empty(&info->indirect_pages)); indirect_page = list_first_entry(&info->indirect_pages, struct page, lru); list_del(&indirect_page->lru); pfn = page_to_pfn(indirect_page); } gnt_list_entry = get_grant(&gref_head, pfn, info, 0); info->shadow[id].indirect_grants[n] = gnt_list_entry; segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_IRQ0); ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; } /* * sg sends a scatterlist that is larger than * the data_len it wants transferred for certain * IO sizes */ bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info, write); ref = gnt_list_entry->gref; info->shadow[id].grants_used[ref_cnt] = gnt_list_entry; /* If use persistent grant, it will have a memcpy, * just copy the data from sg page to grant page. */ if (write && info->feature_persistent) { char *sg_data = NULL; void *shared_data = NULL; BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_USER0); sg_data = kmap_atomic(sg_page(sg), KM_USER1); /* * this does not wipe data stored outside the * range sg->offset..sg->offset+sg->length. * Therefore, blkback *could* see data from * previous requests. This is OK as long as * persistent grants are shared with just one * domain. It may need refactoring if this * changes */ memcpy(shared_data + sg->offset, sg_data + sg->offset, sg->length); kunmap_atomic(sg_data, KM_USER1); kunmap_atomic(shared_data, KM_USER0); } if (ring_req->sc_data_direction != DMA_VSCSI_INDIRECT) { ring_req->u.rw.seg[ref_cnt].gref = ref; ring_req->u.rw.seg[ref_cnt].offset = (uint16_t)off; ring_req->u.rw.seg[ref_cnt].length = (uint16_t)bytes; } else { n = ref_cnt % SEGS_PER_VSCSI_INDIRECT_FRAME; segments[n] = (struct scsiif_request_segment_aligned) { .gref = ref, .offset = (uint16_t)off, .length = (uint16_t)bytes }; } page++; len -= bytes; data_len -= bytes; off = 0; ref_cnt++; } } } if (segments) kunmap_atomic(segments, KM_IRQ0); /* for persistent grant */ if (new_persistent_gnts) gnttab_free_grant_references(gref_head); return ref_cnt; } static int scsifront_enter(struct vscsifrnt_info *info) { if (info->pause) return 1; info->callers++; return 0; } static void scsifront_return(struct vscsifrnt_info *info) { info->callers--; if (info->callers) return; if (!info->waiting_pause) return; info->waiting_pause = 0; wake_up(&info->wq_pause); } static int scsifront_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) { struct vscsifrnt_info *info = shost_priv(shost); vscsiif_request_t *ring_req; unsigned long flags; int ref_cnt; uint16_t rqid; if (scsifront_enter(info)) return SCSI_MLQUEUE_HOST_BUSY; /* debug printk to identify more missing scsi commands shost_printk(KERN_INFO "scsicmd: ", sc->device->host, "len=%u %#x,%#x,%#x,%#x,%#x,%#x,%#x,%#x,%#x,%#x\n", sc->cmd_len, sc->cmnd[0], sc->cmnd[1], sc->cmnd[2], sc->cmnd[3], sc->cmnd[4], sc->cmnd[5], sc->cmnd[6], sc->cmnd[7], sc->cmnd[8], sc->cmnd[9]); */ spin_lock_irqsave(shost->host_lock, flags); scsi_cmd_get_serial(shost, sc); if (RING_FULL(&info->ring)) { scsifront_return(info); spin_unlock_irqrestore(shost->host_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } sc->result = 0; ring_req = scsifront_pre_request(info); rqid = ring_req->rqid; ring_req->act = VSCSIIF_ACT_SCSI_CDB; ring_req->id = sc->device->id; ring_req->lun = sc->device->lun; ring_req->channel = sc->device->channel; ring_req->cmd_len = sc->cmd_len; BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); if ( sc->cmd_len ) memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); else memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->timeout_per_command = (sc->request->timeout / HZ); info->shadow[rqid].sc = sc; info->shadow[rqid].act = VSCSIIF_ACT_SCSI_CDB; info->shadow[rqid].sc_data_direction = (uint8_t)sc->sc_data_direction; ref_cnt = map_data_for_request(info, sc, ring_req, rqid); if (ref_cnt < 0) { add_id_to_freelist(info, rqid); info->ring.req_prod_pvt--; scsifront_do_request(info); scsifront_return(info); spin_unlock_irqrestore(shost->host_lock, flags); if (ref_cnt == (-ENOMEM)) return SCSI_MLQUEUE_HOST_BUSY; sc->result = (DID_ERROR << 16); sc->scsi_done(sc); return 0; } info->shadow[rqid].nr_segments = ref_cnt; scsifront_do_request(info); scsifront_return(info); spin_unlock_irqrestore(shost->host_lock, flags); return 0; } static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) { return (FAILED); } /* vscsi supports only device_reset, because it is each of LUNs */ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc) { struct Scsi_Host *host = sc->device->host; struct vscsifrnt_info *info = shost_priv(host); vscsiif_request_t *ring_req; uint16_t rqid; int err = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_lock_irq(host->host_lock); #endif while (RING_FULL(&info->ring)) { if (err || info->pause) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_unlock_irq(host->host_lock); #endif return FAILED; } info->waiting_sync = 1; spin_unlock_irq(host->host_lock); err = wait_event_interruptible(info->wq_sync, !info->waiting_sync); spin_lock_irq(host->host_lock); } if (scsifront_enter(info)) { spin_unlock_irq(host->host_lock); return FAILED; } ring_req = scsifront_pre_request(info); ring_req->act = VSCSIIF_ACT_SCSI_RESET; rqid = ring_req->rqid; info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET; info->shadow[rqid].rslt_reset = 0; ring_req->channel = sc->device->channel; ring_req->id = sc->device->id; ring_req->lun = sc->device->lun; ring_req->cmd_len = sc->cmd_len; if ( sc->cmd_len ) memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); else memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->timeout_per_command = (sc->request->timeout / HZ); ring_req->u.rw.nr_segments = 0; scsifront_do_request(info); spin_unlock_irq(host->host_lock); err = wait_event_interruptible(info->shadow[rqid].wq_reset, info->shadow[rqid].wait_reset); spin_lock_irq(host->host_lock); if (!err) { err = info->shadow[rqid].rslt_reset; add_id_to_freelist(info, rqid); } else { spin_lock(&info->shadow_lock); info->shadow[rqid].rslt_reset = -1; spin_unlock(&info->shadow_lock); err = FAILED; } scsifront_return(info); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_unlock_irq(host->host_lock); #endif return (err); } struct scsi_host_template scsifront_sht = { .module = THIS_MODULE, .name = "Xen SCSI frontend driver", .queuecommand = scsifront_queuecommand, .eh_abort_handler = scsifront_eh_abort_handler, .eh_device_reset_handler= scsifront_dev_reset_handler, .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN, .can_queue = VSCSIIF_MAX_REQS, .this_id = -1, .sg_tablesize = VSCSIIF_SG_TABLESIZE, .use_clustering = DISABLE_CLUSTERING, .proc_name = "scsifront", }; static int __init scsifront_init(void) { int err; if (!is_running_on_xen()) return -ENODEV; err = scsifront_xenbus_init(); return err; } static void __exit scsifront_exit(void) { scsifront_xenbus_unregister(); } module_init(scsifront_init); module_exit(scsifront_exit); MODULE_DESCRIPTION("Xen SCSI frontend driver"); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/3.0.101/xenbus.c000066400000000000000000000546321314037446600300240ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ /* * Patched to support >2TB drives * 2010, Samuel Kvasnica, IMS Nanofabrication AG */ #include "common.h" #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) #define DEFAULT_TASK_COMM_LEN 16 #else #define DEFAULT_TASK_COMM_LEN TASK_COMM_LEN #endif static unsigned int max_nr_segs = VSCSIIF_SG_TABLESIZE; module_param_named(max_segs, max_nr_segs, uint, 0); MODULE_PARM_DESC(max_segs, "Maximum number of segments per request"); extern struct scsi_host_template scsifront_sht; static int fill_grant_buffer(struct vscsifrnt_info *info, int num) { struct grant *gnt_list_entry, *n; struct page *granted_page; int i = 0; while(i < num) { gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); if (!gnt_list_entry) goto out_of_memory; /* If frontend uses feature_persistent, it will preallocate * (256 + 1) * 256 pages, almost 257M. */ if (info->feature_persistent) { granted_page = alloc_page(GFP_NOIO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; } gnt_list_entry->pfn = page_to_pfn(granted_page); } gnt_list_entry->gref = GRANT_INVALID_REF; list_add(&gnt_list_entry->node, &info->grants); i++; } return 0; out_of_memory: list_for_each_entry_safe(gnt_list_entry, n, &info->grants, node) { list_del(&gnt_list_entry->node); if (info->feature_persistent) __free_page(pfn_to_page(gnt_list_entry->pfn)); kfree(gnt_list_entry); i--; } BUG_ON(i != 0); return -ENOMEM; } static void scsifront_free_ring(struct vscsifrnt_info *info) { int i; for (i = 0; i < info->ring_size; i++) { if (info->ring_ref[i] != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref[i], 0UL); info->ring_ref[i] = GRANT_INVALID_REF; } } free_pages((unsigned long)info->ring.sring, get_order(VSCSIIF_MAX_RING_PAGE_SIZE)); info->ring.sring = NULL; if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } void scsifront_resume_free(struct vscsifrnt_info *info) { //struct Scsi_Host *host = info->host; //unsigned long flags; struct grant *persistent_gnt; struct grant *n; int i, j, segs; //spin_lock_irqsave(host->host_lock, flags); //info->connected = VSCSIIF_STATE_SUSPENDED; //spin_unlock_irqrestore(host->host_lock, flags); /* Prevent new requests being issued until we fix things up. */ //scsifront_device_block_io(host); //scsi_block_requests(host); /* Remove all persistent grants */ if (!list_empty(&info->grants)) { list_for_each_entry_safe(persistent_gnt, n, &info->grants, node) { list_del(&persistent_gnt->node); if (persistent_gnt->gref != GRANT_INVALID_REF) { gnttab_end_foreign_access(persistent_gnt->gref, 0UL); info->persistent_gnts_c--; } if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } } BUG_ON(info->persistent_gnts_c != 0); /* * Remove indirect pages, this only happens when using indirect * descriptors but not persistent grants */ if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; BUG_ON(info->feature_persistent); list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { /* * Clear persistent grants present in requests already * on the shared ring */ if (!info->shadow[i].sc) goto free_shadow; segs = info->shadow[i].nr_segments; for (j = 0; j < segs; j++) { persistent_gnt = info->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } if (info->shadow[i].sc_data_direction != DMA_VSCSI_INDIRECT) /* * If this is not an indirect operation don't try to * free indirect segments */ goto free_shadow; for (j = 0; j < VSCSI_INDIRECT_PAGES(segs); j++) { persistent_gnt = info->shadow[i].indirect_grants[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } free_shadow: kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; } scsifront_free_ring(info); } static void scsifront_free(struct vscsifrnt_info *info) { struct Scsi_Host *host = info->host; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) if (host->shost_state != SHOST_DEL) { #else if (!test_bit(SHOST_DEL, &host->shost_state)) { #endif scsi_remove_host(info->host); } scsifront_resume_free(info); scsi_host_put(info->host); } static void shadow_init(struct vscsifrnt_shadow * shadow, unsigned int ring_size) { unsigned int i = 0; WARN_ON(!ring_size); for (i = 0; i < ring_size; i++) { shadow[i].next_free = i + 1; init_waitqueue_head(&(shadow[i].wq_reset)); shadow[i].wait_reset = 0; } shadow[ring_size - 1].next_free = VSCSIIF_NONE; } static int scsifront_alloc_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct vscsiif_sring *sring; int err = -ENOMEM; int i; for (i = 0; i < info->ring_size; i++) info->ring_ref[i] = GRANT_INVALID_REF; /***** Frontend to Backend ring start *****/ sring = (struct vscsiif_sring *) __get_free_pages(GFP_KERNEL, get_order(VSCSIIF_MAX_RING_PAGE_SIZE)); if (!sring) { xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)"); return err; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, (unsigned long)info->ring_size << PAGE_SHIFT); for (i = 0; i < info->ring_size; i++) { err = xenbus_grant_ring(dev, virt_to_mfn((unsigned long)sring + i * PAGE_SIZE)); if (err < 0) { info->ring.sring = NULL; xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)"); goto free_sring; } info->ring_ref[i] = err; } err = bind_listening_port_to_irqhandler( dev->otherend_id, scsifront_intr, IRQF_SAMPLE_RANDOM, "scsifront", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto free_sring; } info->irq = err; return 0; /* free resource */ free_sring: scsifront_free(info); return err; } static int scsifront_init_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct xenbus_transaction xbt; unsigned int ring_size, ring_order; const char *what = NULL; int err; int i; char buf[16]; DPRINTK("%s\n",__FUNCTION__); err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &ring_order); if (err != 1) ring_order = VSCSIIF_MAX_RING_ORDER; if (ring_order > VSCSIIF_MAX_RING_ORDER) ring_order = VSCSIIF_MAX_RING_ORDER; /* * While for larger rings not all pages are actually used, be on the * safe side and set up a full power of two to please as many backends * as possible. */ printk("talk_to_scsiback ring_size %d, ring_order %d\n", 1U<ring_size = ring_size = 1U << ring_order; /* Create shared ring, alloc event channel. */ err = scsifront_alloc_ring(info); if (err) return err; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); } /* * for compatibility with old backend * ring order = 0 write the same xenstore * key with ring order > 0 */ what = "ring-page-order"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_order); if (err) goto fail; what = "num-ring-pages"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_size); if (err) goto fail; what = buf; for (i = 0; i < ring_size; i++) { snprintf(buf, sizeof(buf), "ring-ref-%d", i + 1); err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_ref[i]); if (err) goto fail; } what = "event-channel"; err = xenbus_printf(xbt, dev->nodename, what, "%u", irq_to_evtchn_port(info->irq)); if (err) goto fail; /* for persistent grant */ what = "feature-persistent"; err = xenbus_printf(xbt, dev->nodename, what, "%u", 1); if (err) goto fail; err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto free_sring; } return 0; fail: xenbus_transaction_end(xbt, 1); if (what) xenbus_dev_fatal(dev, err, "writing %s", what); free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { struct vscsifrnt_info *info; struct Scsi_Host *host; int err = -ENOMEM; char name[DEFAULT_TASK_COMM_LEN]; host = scsi_host_alloc(&scsifront_sht, sizeof(*info)); //offsetof(struct vscsifrnt_info, //active.segs[max_nr_segs])); if (!host) { xenbus_dev_fatal(dev, err, "fail to allocate scsi host"); return err; } info = (struct vscsifrnt_info *) host->hostdata; info->host = host; INIT_LIST_HEAD(&info->grants); INIT_LIST_HEAD(&info->indirect_pages); dev_set_drvdata(&dev->dev, info); info->dev = dev; /* should be moved to backend changed, wait for backend state become to 2 */ /*err = scsifront_init_ring(info); if (err) { scsi_host_put(host); return err; } shadow_init(info->shadow, RING_SIZE(&info->ring));*/ init_waitqueue_head(&info->wq); init_waitqueue_head(&info->wq_sync); spin_lock_init(&info->shadow_lock); info->persistent_gnts_c = 0; info->pause = 0; info->callers = 0; info->waiting_pause = 0; init_waitqueue_head(&info->wq_pause); snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no); info->kthread = kthread_run(scsifront_schedule, info, name); if (IS_ERR(info->kthread)) { err = PTR_ERR(info->kthread); info->kthread = NULL; dev_err(&dev->dev, "kthread start err %d\n", err); goto free_sring; } host->max_id = VSCSIIF_MAX_TARGET; host->max_channel = 0; host->max_lun = VSCSIIF_MAX_LUN; host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512; host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE; err = scsi_add_host(host, &dev->dev); if (err) { dev_err(&dev->dev, "fail to add scsi host %d\n", err); goto free_sring; } //xenbus_switch_state(dev, XenbusStateInitialised); return 0; free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_resume(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); struct Scsi_Host *host = info->host; int err; spin_lock_irq(host->host_lock); /* finish all still pending commands */ scsifront_finish_all(info); spin_unlock_irq(host->host_lock); /* reconnect to dom0 */ scsifront_resume_free(info); err = scsifront_init_ring(info); if (err) { dev_err(&dev->dev, "fail to resume %d\n", err); scsifront_free(info); return err; } shadow_init(info->shadow, RING_SIZE(&info->ring)); info->shadow_free = info->ring.req_prod_pvt; xenbus_switch_state(dev, XenbusStateInitialised); return 0; } static int scsifront_suspend(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); struct Scsi_Host *host = info->host; int err = 0; /* no new commands for the backend */ spin_lock_irq(host->host_lock); info->pause = 1; while (info->callers && !err) { info->waiting_pause = 1; info->waiting_sync = 0; spin_unlock_irq(host->host_lock); wake_up(&info->wq_sync); err = wait_event_interruptible(info->wq_pause, !info->waiting_pause); spin_lock_irq(host->host_lock); } spin_unlock_irq(host->host_lock); return err; } static int scsifront_suspend_cancel(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); struct Scsi_Host *host = info->host; spin_lock_irq(host->host_lock); info->pause = 0; spin_unlock_irq(host->host_lock); return 0; } static int scsifront_remove(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename); if (info->kthread) { kthread_stop(info->kthread); info->kthread = NULL; } scsifront_free(info); return 0; } static int scsifront_disconnect(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct Scsi_Host *host = info->host; DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename); /* When this function is executed, all devices of Frontend have been deleted. Therefore, it need not block I/O before remove_host. */ scsi_remove_host(host); xenbus_frontend_closed(dev); return 0; } /*static void scsifront_read_backend_params(struct xenbus_device *dev, struct vscsifrnt_info *info) { unsigned int nr_segs; int ret; struct Scsi_Host *host = info->host; ret = xenbus_scanf(XBT_NIL, dev->otherend, "segs-per-req", "%u", &nr_segs); if (ret != 1) nr_segs = VSCSIIF_SG_TABLESIZE; if (!info->pause && nr_segs > host->sg_tablesize) { host->sg_tablesize = min(nr_segs, max_nr_segs); dev_info(&dev->dev, "using up to %d SG entries\n", host->sg_tablesize); host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512; } else if (info->pause && nr_segs < host->sg_tablesize) dev_warn(&dev->dev, "SG entries decreased from %d to %u - device may not work properly anymore\n", host->sg_tablesize, nr_segs); }*/ #define VSCSIFRONT_OP_ADD_LUN 1 #define VSCSIFRONT_OP_DEL_LUN 2 #define VSCSIFRONT_OP_READD_LUN 3 static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) { struct xenbus_device *dev = info->dev; int i, err = 0; char str[64], state_str[64]; char **dir; unsigned int dir_n = 0; unsigned int device_state; unsigned int hst, chn, tgt, lun; struct scsi_device *sdev; dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n); if (IS_ERR(dir)) return; for (i = 0; i < dir_n; i++) { /* read status */ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u", &device_state); if (XENBUS_EXIST_ERR(err)) continue; /* virtual SCSI device */ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u:%u:%u:%u", &hst, &chn, &tgt, &lun); if (XENBUS_EXIST_ERR(err)) continue; /* front device state path */ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]); switch (op) { case VSCSIFRONT_OP_ADD_LUN: if (device_state == XenbusStateInitialised) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { dev_err(&dev->dev, "Device already in use.\n"); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } else { scsi_add_device(info->host, chn, tgt, lun); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); } } break; case VSCSIFRONT_OP_DEL_LUN: if (device_state == XenbusStateClosing) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } } break; case VSCSIFRONT_OP_READD_LUN: if (device_state == XenbusStateConnected) xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); break; default: break; } } kfree(dir); return; } static void scsifront_setup_persistent(struct vscsifrnt_info *info) { int err; int persistent; err = xenbus_gather(XBT_NIL, info->dev->otherend, "feature-persistent", "%u", &persistent, NULL); if (err) info->feature_persistent = 0; else info->feature_persistent = persistent; } static void scsifront_setup_host(struct vscsifrnt_info *info) { int nr_segs; struct Scsi_Host *host = info->host; if (info->max_indirect_segments) nr_segs = info->max_indirect_segments; else nr_segs = VSCSIIF_SG_TABLESIZE; if (!info->pause && nr_segs > host->sg_tablesize) { host->sg_tablesize = nr_segs; dev_info(&info->dev->dev, "using up to %d SG entries\n", host->sg_tablesize); host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512; } else if (info->pause && nr_segs < host->sg_tablesize) dev_warn(&info->dev->dev, "SG entries decreased from %d to %u - device may not work properly anymore\n", host->sg_tablesize, nr_segs); } static int scsifront_setup_indirect(struct vscsifrnt_info *info) { unsigned int indirect_segments, segs; int err, i; info->max_indirect_segments = 0; segs = VSCSIIF_SG_TABLESIZE; err = xenbus_gather(XBT_NIL, info->dev->otherend, "feature-max-indirect-segments", "%u", &indirect_segments, NULL); if (err != 1) segs = VSCSIIF_SG_TABLESIZE; if (!err && indirect_segments > VSCSIIF_SG_TABLESIZE) { info->max_indirect_segments = min(indirect_segments, MAX_VSCSI_INDIRECT_SEGMENTS); segs = info->max_indirect_segments; } scsifront_setup_host(info); printk("[%s:%d], segs %d\n", __func__, __LINE__, segs); err = fill_grant_buffer(info, (segs + VSCSI_INDIRECT_PAGES(segs)) * RING_SIZE(&info->ring)); if (err) goto out_of_memory; if (!info->feature_persistent && info->max_indirect_segments) { /* * We are using indirect descriptors but not persistent * grants, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int num = VSCSI_INDIRECT_PAGES(segs) * RING_SIZE(&info->ring); BUG_ON(!list_empty(&info->indirect_pages)); for (i = 0; i < num; i++) { struct page *indirect_page = alloc_page(GFP_NOIO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &info->indirect_pages); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { info->shadow[i].grants_used = kzalloc( sizeof(info->shadow[i].grants_used[0]) * segs, GFP_NOIO); if (info->max_indirect_segments) info->shadow[i].indirect_grants = kzalloc( sizeof(info->shadow[i].indirect_grants[0]) * VSCSI_INDIRECT_PAGES(segs), GFP_NOIO); if ((info->shadow[i].grants_used == NULL) || (info->max_indirect_segments && (info->shadow[i].indirect_grants == NULL))) goto out_of_memory; } return 0; out_of_memory: for (i = 0; i < RING_SIZE(&info->ring); i++) { kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; } if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } return -ENOMEM; } static void scsifront_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); int err; DPRINTK("%p %u %u\n", dev, dev->state, backend_state); switch (backend_state) { case XenbusStateUnknown: case XenbusStateInitialising: break; case XenbusStateInitWait: if (dev->state == XenbusStateInitialised) break; err = scsifront_init_ring(info); if (err) { scsi_host_put(info->host); xenbus_dev_fatal(info->dev, err, "scsi init ring at %s", info->dev->otherend); return; } shadow_init(info->shadow, RING_SIZE(&info->ring)); xenbus_switch_state(dev, XenbusStateInitialised); break; case XenbusStateInitialised: break; case XenbusStateConnected: if (xenbus_read_driver_state(dev->nodename) == XenbusStateInitialised) { scsifront_setup_persistent(info); err = scsifront_setup_indirect(info); if (err) { xenbus_dev_fatal(info->dev, err, "setup_indirect at %s", info->dev->otherend); return; } } if (info->pause) { scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_READD_LUN); xenbus_switch_state(dev, XenbusStateConnected); info->pause = 0; return; } if (xenbus_read_driver_state(dev->nodename) == XenbusStateInitialised) { scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); } if (dev->state == XenbusStateConnected) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's Closing state -- fallthrough */ case XenbusStateClosing: scsifront_disconnect(info); break; case XenbusStateReconfiguring: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN); xenbus_switch_state(dev, XenbusStateReconfiguring); break; case XenbusStateReconfigured: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); xenbus_switch_state(dev, XenbusStateConnected); break; } } static const struct xenbus_device_id scsifront_ids[] = { { "vscsi" }, { "" } }; MODULE_ALIAS("xen:vscsi"); static DEFINE_XENBUS_DRIVER(scsifront, , .probe = scsifront_probe, .remove = scsifront_remove, .resume = scsifront_resume, .suspend = scsifront_suspend, .suspend_cancel = scsifront_suspend_cancel, .otherend_changed = scsifront_backend_changed, ); int __init scsifront_xenbus_init(void) { if (max_nr_segs > SG_ALL) max_nr_segs = SG_ALL; if (max_nr_segs < VSCSIIF_SG_TABLESIZE) max_nr_segs = VSCSIIF_SG_TABLESIZE; return xenbus_register_frontend(&scsifront_driver); } void __exit scsifront_xenbus_unregister(void) { xenbus_unregister_driver(&scsifront_driver); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/3.0.13/000077500000000000000000000000001314037446600262645ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/3.0.13/common.h000066400000000000000000000100441314037446600277240ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_SCSIFRONT_H__ #define __XEN_DRIVERS_SCSIFRONT_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define GRANT_INVALID_REF 0 #define VSCSI_IN_ABORT 1 #define VSCSI_IN_RESET 2 /* tuning point*/ #define VSCSIIF_DEFAULT_CMD_PER_LUN 10 #define VSCSIIF_MAX_TARGET 64 #define VSCSIIF_MAX_LUN 255 #define VSCSIIF_RING_SIZE \ __RING_SIZE((struct vscsiif_sring *)0, PAGE_SIZE) #define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE struct vscsifrnt_shadow { uint16_t next_free; /* command between backend and frontend * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */ unsigned char act; /* do reset function */ wait_queue_head_t wq_reset; /* reset work queue */ int wait_reset; /* reset work queue condition */ int32_t rslt_reset; /* reset response status */ /* (SUCESS or FAILED) */ /* for DMA_TO_DEVICE(1), DMA_FROM_DEVICE(2), DMA_NONE(3) requests */ unsigned int sc_data_direction; /* Number of pieces of scatter-gather */ unsigned int nr_segments; /* requested struct scsi_cmnd is stored from kernel */ unsigned long req_scsi_cmnd; int gref[VSCSIIF_SG_TABLESIZE]; }; struct vscsifrnt_info { struct xenbus_device *dev; struct Scsi_Host *host; spinlock_t io_lock; spinlock_t shadow_lock; unsigned int evtchn; unsigned int irq; grant_ref_t ring_ref; struct vscsiif_front_ring ring; struct vscsiif_response ring_res; struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS]; uint32_t shadow_free; struct task_struct *kthread; wait_queue_head_t wq; unsigned int waiting_resp; }; #define DPRINTK(_f, _a...) \ pr_debug("(file=%s, line=%d) " _f, \ __FILE__ , __LINE__ , ## _a ) int scsifront_xenbus_init(void); void scsifront_xenbus_unregister(void); int scsifront_schedule(void *data); irqreturn_t scsifront_intr(int irq, void *dev_id); int scsifront_cmd_done(struct vscsifrnt_info *info); #endif /* __XEN_DRIVERS_SCSIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/3.0.13/xenbus.c000066400000000000000000000240211314037446600277330ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "common.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) #define DEFAULT_TASK_COMM_LEN 16 #else #define DEFAULT_TASK_COMM_LEN TASK_COMM_LEN #endif extern struct scsi_host_template scsifront_sht; static void scsifront_free(struct vscsifrnt_info *info) { struct Scsi_Host *host = info->host; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) if (host->shost_state != SHOST_DEL) { #else if (!test_bit(SHOST_DEL, &host->shost_state)) { #endif scsi_remove_host(info->host); } if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; scsi_host_put(info->host); } static int scsifront_alloc_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct vscsiif_sring *sring; int err = -ENOMEM; info->ring_ref = GRANT_INVALID_REF; /***** Frontend to Backend ring start *****/ sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL); if (!sring) { xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)"); return err; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(sring)); if (err < 0) { free_page((unsigned long) sring); info->ring.sring = NULL; xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)"); goto free_sring; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, scsifront_intr, IRQF_SAMPLE_RANDOM, "scsifront", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto free_sring; } info->irq = err; return 0; /* free resource */ free_sring: scsifront_free(info); return err; } static int scsifront_init_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct xenbus_transaction xbt; int err; DPRINTK("%s\n",__FUNCTION__); err = scsifront_alloc_ring(info); if (err) return err; DPRINTK("%u %u\n", info->ring_ref, info->evtchn); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); } err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u", info->ring_ref); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing ring-ref"); goto fail; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing event-channel"); goto fail; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto free_sring; } return 0; fail: xenbus_transaction_end(xbt, 1); free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { struct vscsifrnt_info *info; struct Scsi_Host *host; int i, err = -ENOMEM; char name[DEFAULT_TASK_COMM_LEN]; host = scsi_host_alloc(&scsifront_sht, sizeof(*info)); if (!host) { xenbus_dev_fatal(dev, err, "fail to allocate scsi host"); return err; } info = (struct vscsifrnt_info *) host->hostdata; info->host = host; dev_set_drvdata(&dev->dev, info); info->dev = dev; for (i = 0; i < VSCSIIF_MAX_REQS; i++) { info->shadow[i].next_free = i + 1; init_waitqueue_head(&(info->shadow[i].wq_reset)); info->shadow[i].wait_reset = 0; } info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff; err = scsifront_init_ring(info); if (err) { scsi_host_put(host); return err; } init_waitqueue_head(&info->wq); spin_lock_init(&info->io_lock); spin_lock_init(&info->shadow_lock); snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no); info->kthread = kthread_run(scsifront_schedule, info, name); if (IS_ERR(info->kthread)) { err = PTR_ERR(info->kthread); info->kthread = NULL; printk(KERN_ERR "scsifront: kthread start err %d\n", err); goto free_sring; } host->max_id = VSCSIIF_MAX_TARGET; host->max_channel = 0; host->max_lun = VSCSIIF_MAX_LUN; host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512; err = scsi_add_host(host, &dev->dev); if (err) { printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err); goto free_sring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_remove(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename); if (info->kthread) { kthread_stop(info->kthread); info->kthread = NULL; } scsifront_free(info); return 0; } static int scsifront_disconnect(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct Scsi_Host *host = info->host; DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename); /* When this function is executed, all devices of Frontend have been deleted. Therefore, it need not block I/O before remove_host. */ scsi_remove_host(host); xenbus_frontend_closed(dev); return 0; } #define VSCSIFRONT_OP_ADD_LUN 1 #define VSCSIFRONT_OP_DEL_LUN 2 static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) { struct xenbus_device *dev = info->dev; int i, err = 0; char str[64], state_str[64]; char **dir; unsigned int dir_n = 0; unsigned int device_state; unsigned int hst, chn, tgt, lun; struct scsi_device *sdev; dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n); if (IS_ERR(dir)) return; for (i = 0; i < dir_n; i++) { /* read status */ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u", &device_state); if (XENBUS_EXIST_ERR(err)) continue; /* virtual SCSI device */ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u:%u:%u:%u", &hst, &chn, &tgt, &lun); if (XENBUS_EXIST_ERR(err)) continue; /* front device state path */ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]); switch (op) { case VSCSIFRONT_OP_ADD_LUN: if (device_state == XenbusStateInitialised) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { printk(KERN_ERR "scsifront: Device already in use.\n"); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } else { scsi_add_device(info->host, chn, tgt, lun); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); } } break; case VSCSIFRONT_OP_DEL_LUN: if (device_state == XenbusStateClosing) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } } break; default: break; } } kfree(dir); return; } static void scsifront_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%p %u %u\n", dev, dev->state, backend_state); switch (backend_state) { case XenbusStateUnknown: case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateClosed: break; case XenbusStateInitialised: break; case XenbusStateConnected: if (xenbus_read_driver_state(dev->nodename) == XenbusStateInitialised) { scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); } if (dev->state == XenbusStateConnected) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosing: scsifront_disconnect(info); break; case XenbusStateReconfiguring: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN); xenbus_switch_state(dev, XenbusStateReconfiguring); break; case XenbusStateReconfigured: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); xenbus_switch_state(dev, XenbusStateConnected); break; } } static struct xenbus_device_id scsifront_ids[] = { { "vscsi" }, { "" } }; static struct xenbus_driver scsifront_driver = { .name = "vscsi", .ids = scsifront_ids, .probe = scsifront_probe, .remove = scsifront_remove, /* .resume = scsifront_resume, */ .otherend_changed = scsifront_backend_changed, }; int scsifront_xenbus_init(void) { return xenbus_register_frontend(&scsifront_driver); } void scsifront_xenbus_unregister(void) { xenbus_unregister_driver(&scsifront_driver); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/Kbuild000066400000000000000000000001241314037446600266740ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-scsi.o xen-scsi-objs := scsifront.o xenbus.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/Makefile000066400000000000000000000000661314037446600272040ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-scsi/scsifront.c000066400000000000000000000264711314037446600277320ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "common.h" static int get_id_from_freelist(struct vscsifrnt_info *info) { unsigned long flags; uint32_t free; spin_lock_irqsave(&info->shadow_lock, flags); free = info->shadow_free; BUG_ON(free > VSCSIIF_MAX_REQS); info->shadow_free = info->shadow[free].next_free; info->shadow[free].next_free = 0x0fff; info->shadow[free].wait_reset = 0; spin_unlock_irqrestore(&info->shadow_lock, flags); return free; } static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id) { unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); info->shadow[id].next_free = info->shadow_free; info->shadow[id].req_scsi_cmnd = 0; info->shadow_free = id; spin_unlock_irqrestore(&info->shadow_lock, flags); } struct vscsiif_request * scsifront_pre_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); vscsiif_request_t *ring_req; uint32_t id; ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); ring->req_prod_pvt++; id = get_id_from_freelist(info); /* use id by response */ ring_req->rqid = (uint16_t)id; return ring_req; } static void scsifront_notify_work(struct vscsifrnt_info *info) { info->waiting_resp = 1; wake_up(&info->wq); } static void scsifront_do_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); unsigned int irq = info->irq; int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); if (notify) notify_remote_via_irq(irq); } irqreturn_t scsifront_intr(int irq, void *dev_id) { scsifront_notify_work((struct vscsifrnt_info *)dev_id); return IRQ_HANDLED; } static void scsifront_gnttab_done(struct vscsifrnt_shadow *s, uint32_t id) { int i; if (s->sc_data_direction == DMA_NONE) return; if (s->nr_segments) { for (i = 0; i < s->nr_segments; i++) { if (unlikely(gnttab_query_foreign_access( s->gref[i]) != 0)) { printk(KERN_ALERT "scsifront: " "grant still in use by backend.\n"); BUG(); } gnttab_end_foreign_access(s->gref[i], 0UL); } } return; } static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { struct scsi_cmnd *sc; uint32_t id; uint8_t sense_len; id = ring_res->rqid; sc = (struct scsi_cmnd *)info->shadow[id].req_scsi_cmnd; if (sc == NULL) BUG(); scsifront_gnttab_done(&info->shadow[id], id); add_id_to_freelist(info, id); sc->result = ring_res->rslt; scsi_set_resid(sc, ring_res->residual_len); if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE) sense_len = VSCSIIF_SENSE_BUFFERSIZE; else sense_len = ring_res->sense_len; if (sense_len) memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len); sc->scsi_done(sc); return; } static void scsifront_sync_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { uint16_t id = ring_res->rqid; unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); info->shadow[id].wait_reset = 1; info->shadow[id].rslt_reset = ring_res->rslt; spin_unlock_irqrestore(&info->shadow_lock, flags); wake_up(&(info->shadow[id].wq_reset)); } int scsifront_cmd_done(struct vscsifrnt_info *info) { vscsiif_response_t *ring_res; RING_IDX i, rp; int more_to_do = 0; unsigned long flags; spin_lock_irqsave(&info->io_lock, flags); rp = info->ring.sring->rsp_prod; rmb(); for (i = info->ring.rsp_cons; i != rp; i++) { ring_res = RING_GET_RESPONSE(&info->ring, i); if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB) scsifront_cdb_cmd_done(info, ring_res); else scsifront_sync_cmd_done(info, ring_res); } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); } else { info->ring.sring->rsp_event = i + 1; } spin_unlock_irqrestore(&info->io_lock, flags); /* Yield point for this unbounded loop. */ cond_resched(); return more_to_do; } int scsifront_schedule(void *data) { struct vscsifrnt_info *info = (struct vscsifrnt_info *)data; while (!kthread_should_stop()) { wait_event_interruptible( info->wq, info->waiting_resp || kthread_should_stop()); info->waiting_resp = 0; smp_mb(); if (scsifront_cmd_done(info)) info->waiting_resp = 1; } return 0; } static int map_data_for_request(struct vscsifrnt_info *info, struct scsi_cmnd *sc, vscsiif_request_t *ring_req, uint32_t id) { grant_ref_t gref_head; struct page *page; int err, ref, ref_cnt = 0; int write = (sc->sc_data_direction == DMA_TO_DEVICE); unsigned int i, nr_pages, off, len, bytes; unsigned long buffer_pfn; if (sc->sc_data_direction == DMA_NONE) return 0; err = gnttab_alloc_grant_references(VSCSIIF_SG_TABLESIZE, &gref_head); if (err) { printk(KERN_ERR "scsifront: gnttab_alloc_grant_references() error\n"); return -ENOMEM; } if (scsi_bufflen(sc)) { /* quoted scsi_lib.c/scsi_req_map_sg . */ struct scatterlist *sg, *sgl = scsi_sglist(sc); unsigned int data_len = scsi_bufflen(sc); nr_pages = (data_len + sgl->offset + PAGE_SIZE - 1) >> PAGE_SHIFT; if (nr_pages > VSCSIIF_SG_TABLESIZE) { printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n"); ref_cnt = (-E2BIG); goto big_to_sg; } for_each_sg (sgl, sg, scsi_sg_count(sc), i) { page = sg_page(sg); off = sg->offset; len = sg->length; buffer_pfn = page_to_phys(page) >> PAGE_SHIFT; while (len > 0 && data_len > 0) { /* * sg sends a scatterlist that is larger than * the data_len it wants transferred for certain * IO sizes */ bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id, buffer_pfn, write); info->shadow[id].gref[ref_cnt] = ref; ring_req->seg[ref_cnt].gref = ref; ring_req->seg[ref_cnt].offset = (uint16_t)off; ring_req->seg[ref_cnt].length = (uint16_t)bytes; buffer_pfn++; len -= bytes; data_len -= bytes; off = 0; ref_cnt++; } } } big_to_sg: gnttab_free_grant_references(gref_head); return ref_cnt; } static int scsifront_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { struct vscsifrnt_info *info = (struct vscsifrnt_info *) sc->device->host->hostdata; vscsiif_request_t *ring_req; int ref_cnt; uint16_t rqid; if (RING_FULL(&info->ring)) { goto out_host_busy; } sc->scsi_done = done; sc->result = 0; ring_req = scsifront_pre_request(info); rqid = ring_req->rqid; ring_req->act = VSCSIIF_ACT_SCSI_CDB; ring_req->id = sc->device->id; ring_req->lun = sc->device->lun; ring_req->channel = sc->device->channel; ring_req->cmd_len = sc->cmd_len; BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); if ( sc->cmd_len ) memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); else memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->timeout_per_command = (sc->request->timeout / HZ); info->shadow[rqid].req_scsi_cmnd = (unsigned long)sc; info->shadow[rqid].sc_data_direction = sc->sc_data_direction; info->shadow[rqid].act = ring_req->act; ref_cnt = map_data_for_request(info, sc, ring_req, rqid); if (ref_cnt < 0) { add_id_to_freelist(info, rqid); if (ref_cnt == (-ENOMEM)) goto out_host_busy; else { sc->result = (DID_ERROR << 16); goto out_fail_command; } } ring_req->nr_segments = (uint8_t)ref_cnt; info->shadow[rqid].nr_segments = ref_cnt; scsifront_do_request(info); return 0; out_host_busy: return SCSI_MLQUEUE_HOST_BUSY; out_fail_command: done(sc); return 0; } static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) { return (FAILED); } /* vscsi supports only device_reset, because it is each of LUNs */ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc) { struct Scsi_Host *host = sc->device->host; struct vscsifrnt_info *info = (struct vscsifrnt_info *) sc->device->host->hostdata; vscsiif_request_t *ring_req; uint16_t rqid; int err; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_lock_irq(host->host_lock); #endif ring_req = scsifront_pre_request(info); ring_req->act = VSCSIIF_ACT_SCSI_RESET; rqid = ring_req->rqid; info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET; ring_req->channel = sc->device->channel; ring_req->id = sc->device->id; ring_req->lun = sc->device->lun; ring_req->cmd_len = sc->cmd_len; if ( sc->cmd_len ) memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); else memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->timeout_per_command = (sc->request->timeout / HZ); ring_req->nr_segments = 0; scsifront_do_request(info); spin_unlock_irq(host->host_lock); wait_event_interruptible(info->shadow[rqid].wq_reset, info->shadow[rqid].wait_reset); spin_lock_irq(host->host_lock); err = info->shadow[rqid].rslt_reset; add_id_to_freelist(info, rqid); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_unlock_irq(host->host_lock); #endif return (err); } struct scsi_host_template scsifront_sht = { .module = THIS_MODULE, .name = "Xen SCSI frontend driver", .queuecommand = scsifront_queuecommand, .eh_abort_handler = scsifront_eh_abort_handler, .eh_device_reset_handler= scsifront_dev_reset_handler, .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN, .can_queue = VSCSIIF_MAX_REQS, .this_id = -1, .sg_tablesize = VSCSIIF_SG_TABLESIZE, .use_clustering = DISABLE_CLUSTERING, .proc_name = "scsifront", }; static int __init scsifront_init(void) { int err; if (!is_running_on_xen()) return -ENODEV; err = scsifront_xenbus_init(); return err; } static void __exit scsifront_exit(void) { scsifront_xenbus_unregister(); } module_init(scsifront_init); module_exit(scsifront_exit); MODULE_DESCRIPTION("Xen SCSI frontend driver"); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/000077500000000000000000000000001314037446600253545ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/2.6.27/000077500000000000000000000000001314037446600261105ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/2.6.27/blkfront.c000066400000000000000000000647011314037446600301050ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #include /* ssleep prototype */ #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define UNPLUG_DISK_TMOUT 60 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 static void connect(struct blkfront_info *); static void blkfront_closing(struct xenbus_device *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(struct work_struct *arg); static void blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice, i; struct blkfront_info *info; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); printk(KERN_INFO "blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } info->xbdev = dev; info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev->dev.driver_data = info; err = talk_to_backend(dev, info); if (err) { kfree(info); dev->dev.driver_data = NULL; return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; int err; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); err = talk_to_backend(dev, info); if (info->connected == BLKIF_STATE_SUSPENDED && !err) blkif_recover(info); return err; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { const char *message = NULL; struct xenbus_transaction xbt; int err; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } err = xenbus_printf(xbt, dev->nodename, "ring-ref","%u", info->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { message = "writing protocol"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (message) xenbus_dev_fatal(dev, err, "%s", message); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; info->ring_ref = GRANT_INVALID_REF; sring = (blkif_sring_t *)__get_free_page(GFP_NOIO | __GFP_HIGH); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); if (err < 0) { free_page((unsigned long)sring); info->ring.sring = NULL; goto fail; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, IRQF_SAMPLE_RANDOM, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } static int xenwatch_unplugdisk_callback(void *data) { int len; char *devname; int iTimeout = 0; char *bestate; struct block_device *bd; struct xenbus_device *dev = data; struct blkfront_info *info = dev_get_drvdata(&dev->dev); if(!strcmp(info->gd->disk_name, "xvda")) { printk(KERN_ERR "xvda disk is unallowed to unplug!\n"); return 0; } devname = xenbus_read(XBT_NIL, dev->otherend, "dev", &len); if (IS_ERR(devname)){ printk(KERN_ERR "read %s xenstore error!\n", dev->otherend); return 0; } else{ xenbus_write(XBT_NIL, "control/uvp", "unplug-disk", devname); kfree(devname); } while(info && info->users != 0){ if(iTimeout > UNPLUG_DISK_TMOUT) break; printk(KERN_INFO "info->users=%d,ssleep(1),iTimeout=%d!\n", info->users, iTimeout); ssleep(1); iTimeout++; } if(info && !info->users) { printk(KERN_INFO "finish to umount,info->users has changed to 0!\n"); } /* if (!info->gd) { printk(KERN_ERR "unplug_disk, info->gd is NULL\n"); xenbus_frontend_closed_uvp(dev); return 0; } */ bd = bdget_disk(info->gd, 0); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); bestate = xenbus_read(XBT_NIL, dev->otherend, "state", &len); if (IS_ERR(bestate)){ printk(KERN_ERR "read %s state error!\n", dev->otherend); } else{ if(strncmp(bestate, "5", 1) || iTimeout > UNPLUG_DISK_TMOUT) { kfree(bestate); return 0; } kfree(bestate); } return 0; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev->dev.driver_data; //struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateConnected: connect(info); break; case XenbusStateClosing: kthread_run(xenwatch_unplugdisk_callback, dev, "xenwatch_unplugdisk"); break; } } /* ** Connection ** */ /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (XENBUS_EXIST_ERR(err)) return; printk(KERN_INFO "Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); //revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%lu", &info->feature_barrier, NULL); if (err) info->feature_barrier = 0; err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&blkif_io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); add_disk(info->gd); info->is_ready = 1; register_vcd(info); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; unsigned long flags; DPRINTK("blkfront_closing: %s removed\n", dev->nodename); if (info->rq == NULL) goto out; spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&blkif_io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); xlvbd_sysfs_delif(info); ssleep(2); while(RING_FREE_REQUESTS(&info->ring) != RING_SIZE(&info->ring)) { ssleep(1); } spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_start_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); unregister_vcd(info); xlvbd_del(info); out: if (dev) xenbus_frontend_closed(dev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); kfree(info); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free > BLK_RING_SIZE); info->shadow_free = info->shadow[free].req.id; info->shadow[free].req.id = 0x0fffffee; /* debug */ return free; } static inline void ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = 0; info->shadow_free = id; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(struct work_struct *arg) { struct blkfront_info *info = container_of(arg, struct blkfront_info, work); spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } int blkif_open(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users++; return 0; } int blkif_release(struct inode *inode, struct file *filep) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; info->users--; #if 0 if (info->users == 0) { /* Check whether we have been instructed to close. We will have ignored this request initially, as the device was still mounted. */ struct xenbus_device * dev = info->xbdev; enum xenbus_state state = xenbus_read_driver_state(dev->otherend); if (state == XenbusStateClosing && info->is_ready) blkfront_closing(dev); } #endif return 0; } int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct block_device *bd = inode->i_bdev; struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; struct gendisk *gd = info->gd; if (gd->flags & GENHD_FL_CD) return 0; return -EINVAL; } default: /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", command);*/ return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * blkif_queue_request * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; unsigned long buffer_mfn; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref; grant_ref_t gref_head; struct scatterlist *sg; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; if (gnttab_alloc_grant_references( BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, BLKIF_MAX_SEGMENTS_PER_REQUEST); return 1; } /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = (unsigned long)req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)req->sector; ring_req->handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (blk_barrier_rq(req)) ring_req->operation = BLKIF_OP_WRITE_BARRIER; if (blk_pc_request(req)) ring_req->operation = BLKIF_OP_PACKET; ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); for_each_sg(info->sg, sg, ring_req->nr_segments, i) { buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; /* install a grant reference. */ ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ? GTF_readonly : 0 ); info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); ring_req->seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = elv_next_request(rq)) != NULL) { info = req->rq_disk->private_data; if (!blk_fs_request(req) && !blk_pc_request(req)) { end_request(req, 0); continue; } if (RING_FULL(&info->ring)) goto wait; DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%li) buffer:%p [%s]\n", req, req->cmd, (long long)req->sector, req->current_nr_sectors, req->nr_sectors, req->buffer, rq_data_dir(req) ? "write" : "read"); blkdev_dequeue_request(req); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; spin_lock_irqsave(&blkif_io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = (struct request *)info->shadow[id].request; blkif_completion(&info->shadow[id]); ADD_ID_TO_FREELIST(info, id); ret = bret->status == BLKIF_RSP_OKAY ? 0 : -EIO; switch (bret->operation) { case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk("blkfront: %s: write barrier op failed\n", info->gd->disk_name); ret = -EOPNOTSUPP; info->feature_barrier = 0; xlvbd_barrier(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_PACKET: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); ret = __blk_end_request(req, ret, blk_rq_bytes(req)); BUG_ON(ret); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&blkif_io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); /* Free resources associated with old device channel. */ if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s) { int i; for (i = 0; i < s->req.nr_segments; i++) gnttab_end_foreign_access(s->req.seg[i].gref, 0UL); } static void blkif_recover(struct blkfront_info *info) { int i; blkif_request_t *req; struct blk_shadow *copy; int j; /* Stage 1: Make a safe copy of the shadow state. */ copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); memcpy(copy, info->shadow, sizeof(info->shadow)); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow_free = info->ring.req_prod_pvt; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ if (copy[i].request == 0) continue; /* Grab a request slot and copy shadow state into it. */ req = RING_GET_REQUEST( &info->ring, info->ring.req_prod_pvt); *req = copy[i].req; /* We get a new request id, and must reset the shadow state. */ req->id = GET_ID_FROM_FREELIST(info); memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i])); /* Rewrite any grant references invalidated by susp/resume. */ for (j = 0; j < req->nr_segments; j++) gnttab_grant_foreign_access_ref( req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), rq_data_dir((struct request *) info->shadow[req->id].request) ? GTF_readonly : 0); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; } kfree(copy); (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&blkif_io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Send off requeued requests */ flush_requests(info); /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev->dev.driver_data; return info->is_ready; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static struct xenbus_driver blkfront = { .name = "vbd", .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, }; static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront); } module_init(xlblk_init); static void __exit xlblk_exit(void) { return xenbus_unregister_driver(&blkfront); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/2.6.27/block.h000066400000000000000000000116351314037446600273610ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #if 0 #define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a) #else #define DPRINTK_IOCTL(_f, _a...) ((void)0) #endif struct xlbd_type_info { int partn_shift; int disks_per_major; char *devname; char *diskname; }; struct xlbd_major_info { int major; int index; int usage; struct xlbd_type_info *type; }; struct blk_shadow { blkif_request_t req; unsigned long request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; struct gendisk *gd; int vdevice; blkif_vdev_t handle; int connected; int ring_ref; blkif_front_ring_t ring; struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int irq; struct xlbd_major_info *mi; struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; unsigned long shadow_free; int feature_barrier; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; extern spinlock_t blkif_io_lock; extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (struct request_queue *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); int xlvbd_barrier(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif /* Virtual cdrom block-device */ extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/2.6.27/vbd.c000066400000000000000000000265551314037446600270440ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #include /* ssleep prototype */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) /*llj: the major of vitual_device_ext */ #define EXT_MAJOR 202 #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; DEFINE_SPINLOCK(blkif_io_lock); static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; int do_register, i; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SCSI_RANGE: ptr->type = &xlbd_scsi_type; ptr->index = index - XLBD_MAJOR_SCSI_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major EXT_MAJOR, * don't try to register it again */ for (i = XLBD_MAJOR_VBD_START; i < XLBD_MAJOR_VBD_START+ NUM_VBD_MAJORS; i++ ) { if (major_info[i] != NULL && major_info[i]->major == ptr->major) { do_register = 0; break; } } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(ptr); return NULL; } printk("xen-vbd: registered block device major %i\n", ptr->major); } major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = 10; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = 11 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = 18 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = 26; break; default: if (!VDEV_IS_EXTENDED(vdevice)) index = 27; else index = 28; break; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) { struct request_queue *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) elevator_init(rq, "noop"); #else elevator_init(rq, &elevator_noop); #endif /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_hardsect_size(rq, sector_size); blk_queue_max_sectors(rq, 512); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; return 0; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { int major, minor; struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; unsigned int offset; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = EXT_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); } BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if (!(vdisk_info & VDISK_CDROM) && (minor & ((1 << mi->type->partn_shift) - 1)) == 0) nr_minors = 1 << mi->type->partn_shift; gd = alloc_disk(nr_minors); if (gd == NULL) goto out; offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (nr_minors > 1 || (vdisk_info & VDISK_CDROM)) { if (offset < 26) { sprintf(gd->disk_name, "%s%c", mi->type->diskname, 'a' + offset ); } else { sprintf(gd->disk_name, "%s%c%c", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26) ); } } else { if (offset < 26) { sprintf(gd->disk_name, "%s%c%d", mi->type->diskname, 'a' + offset, minor & ((1 << mi->type->partn_shift) - 1)); } else { sprintf(gd->disk_name, "%s%c%c%d", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26), minor & ((1 << mi->type->partn_shift) - 1)); } } gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size)) { del_gendisk(gd); goto out; } info->rq = gd->queue; info->gd = gd; if (info->feature_barrier) xlvbd_barrier(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } void xlvbd_del(struct blkfront_info *info) { unsigned long flags; if (info->mi == NULL) return; BUG_ON(info->gd == NULL); del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); while (!list_empty(&info->rq->queue_head)){ ssleep(1); } blk_cleanup_queue(info->rq); info->rq = NULL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int xlvbd_barrier(struct blkfront_info *info) { int err; err = blk_queue_ordered(info->rq, info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL); if (err) return err; printk(KERN_INFO "blkfront: %s: barriers %s\n", info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled"); return 0; } #else int xlvbd_barrier(struct blkfront_info *info) { printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name); return -ENOSYS; } #endif #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = xendev->dev.driver_data; if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/2.6.27/vcd.c000066400000000000000000000300031314037446600270240ustar00rootroot00000000000000/******************************************************************************* * vcd.c * * Implements CDROM cmd packet passing between frontend guest and backend driver. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define REVISION "$Revision: 1.0 $" #include #include #include #include #include #include "block.h" /* List of cdrom_device_info, can have as many as blkfront supports */ struct vcd_disk { struct list_head vcd_entry; struct cdrom_device_info vcd_cdrom_info; spinlock_t vcd_cdrom_info_lock; }; static LIST_HEAD(vcd_disks); static DEFINE_SPINLOCK(vcd_disks_lock); static struct vcd_disk * xencdrom_get_list_entry(struct gendisk *disk) { struct vcd_disk * ret_vcd = NULL; struct vcd_disk * vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { spin_lock(&vcd->vcd_cdrom_info_lock); ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd; } static void submit_message(struct blkfront_info *info, void * sp) { struct request *req = NULL; req = blk_get_request(info->rq, READ, __GFP_WAIT); if (blk_rq_map_kern(info->rq, req, sp, PAGE_SIZE, __GFP_WAIT)) goto out; req->rq_disk = info->gd; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_NOMERGE; #else req->flags |= REQ_BLOCK_PC; #endif req->sector = 0; req->nr_sectors = 1; req->timeout = 60*HZ; blk_execute_rq(req->q, info->gd, req, 1); out: blk_put_request(req); } static int submit_cdrom_cmd(struct blkfront_info *info, struct packet_command * cgc) { int ret = 0; struct page *page; size_t size; union xen_block_packet *sp; struct xen_cdrom_packet *xcp; struct vcd_generic_command * vgc; if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) { printk(KERN_WARNING "%s() Packet buffer length is to large \n", __func__); return -EIO; } page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } size = PAGE_SIZE; memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xcp = &(sp->xcp); xcp->type = XEN_TYPE_CDROM_PACKET; xcp->payload_offset = PACKET_PAYLOAD_OFFSET; vgc = (struct vcd_generic_command *)((char *)sp + xcp->payload_offset); memcpy(vgc->cmd, cgc->cmd, CDROM_PACKET_SIZE); vgc->stat = cgc->stat; vgc->data_direction = cgc->data_direction; vgc->quiet = cgc->quiet; vgc->timeout = cgc->timeout; if (cgc->sense) { vgc->sense_offset = PACKET_SENSE_OFFSET; memcpy((char *)sp + vgc->sense_offset, cgc->sense, sizeof(struct request_sense)); } if (cgc->buffer) { vgc->buffer_offset = PACKET_BUFFER_OFFSET; memcpy((char *)sp + vgc->buffer_offset, cgc->buffer, cgc->buflen); vgc->buflen = cgc->buflen; } submit_message(info,sp); if (xcp->ret) ret = xcp->err; if (cgc->sense) { memcpy(cgc->sense, (char *)sp + PACKET_SENSE_OFFSET, sizeof(struct request_sense)); } if (cgc->buffer && cgc->buflen) { memcpy(cgc->buffer, (char *)sp + PACKET_BUFFER_OFFSET, cgc->buflen); } __free_page(page); return ret; } static int xencdrom_open(struct cdrom_device_info *cdi, int purpose) { int ret = 0; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_open *xco; info = cdi->disk->private_data; if (strlen(info->xbdev->otherend) > MAX_PACKET_DATA) { return -EIO; } page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xco = &(sp->xco); xco->type = XEN_TYPE_CDROM_OPEN; xco->payload_offset = sizeof(struct xen_cdrom_open); strcpy((char *)sp + xco->payload_offset, info->xbdev->otherend); submit_message(info,sp); if (xco->ret) { ret = xco->err; goto out; } if (xco->media_present) set_capacity(cdi->disk, xco->sectors); out: __free_page(page); return ret; } static void xencdrom_release(struct cdrom_device_info *cdi) { } static int xencdrom_media_changed(struct cdrom_device_info *cdi, int disc_nr) { int ret; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_media_changed *xcmc; info = cdi->disk->private_data; page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xcmc = &(sp->xcmc); xcmc->type = XEN_TYPE_CDROM_MEDIA_CHANGED; submit_message(info,sp); ret = xcmc->media_changed; __free_page(page); return ret; } static int xencdrom_tray_move(struct cdrom_device_info *cdi, int position) { int ret; struct packet_command cgc; struct blkfront_info *info; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_START_STOP_UNIT; if (position) cgc.cmd[4] = 2; else cgc.cmd[4] = 3; ret = submit_cdrom_cmd(info, &cgc); return ret; } static int xencdrom_lock_door(struct cdrom_device_info *cdi, int lock) { int ret = 0; struct blkfront_info *info; struct packet_command cgc; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; cgc.cmd[4] = lock; ret = submit_cdrom_cmd(info, &cgc); return ret; } static int xencdrom_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { int ret = -EIO; struct blkfront_info *info; info = cdi->disk->private_data; ret = submit_cdrom_cmd(info, cgc); cgc->stat = ret; return ret; } static int xencdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { return -EINVAL; } /* Query backend to see if CDROM packets are supported */ static int xencdrom_supported(struct blkfront_info *info) { struct page *page; union xen_block_packet *sp; struct xen_cdrom_support *xcs; page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xcs = &(sp->xcs); xcs->type = XEN_TYPE_CDROM_SUPPORT; submit_message(info,sp); return xcs->supported; } static struct cdrom_device_ops xencdrom_dops = { .open = xencdrom_open, .release = xencdrom_release, .media_changed = xencdrom_media_changed, .tray_move = xencdrom_tray_move, .lock_door = xencdrom_lock_door, .generic_packet = xencdrom_packet, .audio_ioctl = xencdrom_audio_ioctl, .capability = (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | \ CDC_MEDIA_CHANGED | CDC_GENERIC_PACKET | CDC_DVD | \ CDC_CD_R), .n_minors = 1, }; static int xencdrom_block_open(struct inode *inode, struct file *file) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; struct vcd_disk * vcd; int ret = 0; if ((vcd = xencdrom_get_list_entry(info->gd))) { ret = cdrom_open(&vcd->vcd_cdrom_info, inode, file); info->users = vcd->vcd_cdrom_info.use_count; spin_unlock(&vcd->vcd_cdrom_info_lock); } return ret; } static int xencdrom_block_release(struct inode *inode, struct file *file) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; struct vcd_disk * vcd; int ret = 0; if ((vcd = xencdrom_get_list_entry(info->gd))) { ret = cdrom_release(&vcd->vcd_cdrom_info, file); spin_unlock(&vcd->vcd_cdrom_info_lock); if (vcd->vcd_cdrom_info.use_count == 0) { info->users = 1; blkif_release(inode, file); } } return ret; } static int xencdrom_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; struct vcd_disk * vcd; int ret = 0; if (!(vcd = xencdrom_get_list_entry(info->gd))) goto out; switch (cmd) { case 2285: /* SG_IO */ ret = -ENOSYS; break; case CDROMEJECT: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 1); break; case CDROMCLOSETRAY: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 0); break; case CDROM_GET_CAPABILITY: ret = vcd->vcd_cdrom_info.ops->capability & ~vcd->vcd_cdrom_info.mask; break; case CDROM_SET_OPTIONS: ret = vcd->vcd_cdrom_info.options; break; case CDROM_SEND_PACKET: { struct packet_command * cgc = (struct packet_command *)arg; ret = submit_cdrom_cmd(info, cgc); } break; default: /* Not supported, augment supported above if necessary */ printk( "%s():%d Unsupported IOCTL:%x \n", __func__, __LINE__, cmd); ret = -ENOTTY; break; } spin_unlock(&vcd->vcd_cdrom_info_lock); out: return ret; } /* Called as result of cdrom_open, vcd_cdrom_info_lock already held */ static int xencdrom_block_media_changed(struct gendisk *disk) { struct vcd_disk * vcd; struct vcd_disk * ret_vcd = NULL; int ret = 0; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); if (ret_vcd) { ret = cdrom_media_changed(&ret_vcd->vcd_cdrom_info); } return ret; } static struct block_device_operations xencdrom_bdops = { .owner = THIS_MODULE, .open = xencdrom_block_open, .release = xencdrom_block_release, .ioctl = xencdrom_block_ioctl, .media_changed = xencdrom_block_media_changed, }; void register_vcd(struct blkfront_info *info) { struct gendisk * gd = info->gd; struct vcd_disk * vcd; /* Make sure this is for a CD device */ if (!(gd->flags & GENHD_FL_CD)) goto out; /* Make sure we have backend support */ if (!xencdrom_supported(info)) { goto out; } /* Create new vcd_disk and fill in cdrom_info */ vcd = (struct vcd_disk *)kzalloc(sizeof(struct vcd_disk), GFP_KERNEL); if (!vcd) { printk(KERN_INFO "%s(): Unable to allocate vcd struct!\n", __func__); goto out; } spin_lock_init(&vcd->vcd_cdrom_info_lock); vcd->vcd_cdrom_info.ops = &xencdrom_dops; vcd->vcd_cdrom_info.speed = 4; vcd->vcd_cdrom_info.capacity = 1; vcd->vcd_cdrom_info.options = 0; strcpy(vcd->vcd_cdrom_info.name, gd->disk_name); vcd->vcd_cdrom_info.mask = ( CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_SELECT_SPEED | CDC_MRW | CDC_MRW_W | CDC_RAM); if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) { printk(KERN_WARNING "%s() Cannot register blkdev as a cdrom %d!\n", __func__, gd->major); goto err_out; } xencdrom_bdops.owner = gd->fops->owner; gd->fops = &xencdrom_bdops; vcd->vcd_cdrom_info.disk = gd; spin_lock(&vcd_disks_lock); list_add(&(vcd->vcd_entry), &vcd_disks); spin_unlock(&vcd_disks_lock); out: return; err_out: kfree(vcd); } void unregister_vcd(struct blkfront_info *info) { struct gendisk * gd = info->gd; struct vcd_disk * vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == gd) { spin_lock(&vcd->vcd_cdrom_info_lock); unregister_cdrom(&vcd->vcd_cdrom_info); list_del(&vcd->vcd_entry); spin_unlock(&vcd->vcd_cdrom_info_lock); kfree(vcd); break; } } spin_unlock(&vcd_disks_lock); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/2.6.32/000077500000000000000000000000001314037446600261045ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/2.6.32/blkfront.c000066400000000000000000000650271314037446600301030ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #include /* ssleep prototype */ #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define UNPLUG_DISK_TMOUT 60 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 static void connect(struct blkfront_info *); static void blkfront_closing(struct xenbus_device *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(struct work_struct *arg); static void blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice, i; struct blkfront_info *info; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); printk(KERN_INFO "blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } info->xbdev = dev; info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); err = talk_to_backend(dev, info); if (err) { kfree(info); dev_set_drvdata(&dev->dev, NULL); return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); int err; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); err = talk_to_backend(dev, info); if (info->connected == BLKIF_STATE_SUSPENDED && !err) blkif_recover(info); return err; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { const char *message = NULL; struct xenbus_transaction xbt; int err; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } err = xenbus_printf(xbt, dev->nodename, "ring-ref","%u", info->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { message = "writing protocol"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (message) xenbus_dev_fatal(dev, err, "%s", message); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; info->ring_ref = GRANT_INVALID_REF; sring = (blkif_sring_t *)__get_free_page(GFP_NOIO | __GFP_HIGH); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); if (err < 0) { free_page((unsigned long)sring); info->ring.sring = NULL; goto fail; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, IRQF_SAMPLE_RANDOM, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } static int xenwatch_unplugdisk_callback(void *data) { int len; char *devname; int iTimeout = 0; char *bestate; struct block_device *bd; struct xenbus_device *dev = data; struct blkfront_info *info = dev_get_drvdata(&dev->dev); if(!strcmp(info->gd->disk_name, "xvda")) { printk(KERN_ERR "xvda disk is unallowed to unplug!\n"); return 0; } devname = xenbus_read(XBT_NIL, dev->otherend, "dev", &len); if (IS_ERR(devname)){ printk(KERN_ERR "read %s xenstore error!\n", dev->otherend); return 0; } else{ xenbus_write(XBT_NIL, "control/uvp", "unplug-disk", devname); kfree(devname); } while(info && info->users != 0){ if(iTimeout > UNPLUG_DISK_TMOUT) break; printk(KERN_INFO "info->users=%d,ssleep(1),iTimeout=%d!\n", info->users, iTimeout); ssleep(1); iTimeout++; } if(info && !info->users) { printk(KERN_INFO "finish to umount,info->users has changed to 0!\n"); } /* if (!info->gd) { printk(KERN_ERR "unplug_disk, info->gd is NULL\n"); xenbus_frontend_closed_uvp(dev); return 0; } */ bd = bdget_disk(info->gd, 0); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); bestate = xenbus_read(XBT_NIL, dev->otherend, "state", &len); if (IS_ERR(bestate)){ printk(KERN_ERR "read %s state error!\n", dev->otherend); } else{ if(strncmp(bestate, "5", 1) || iTimeout > UNPLUG_DISK_TMOUT) { kfree(bestate); return 0; } kfree(bestate); } return 0; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); //struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateConnected: connect(info); break; case XenbusStateClosing: kthread_run(xenwatch_unplugdisk_callback, dev, "xenwatch_unplugdisk"); break; } } /* ** Connection ** */ /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (XENBUS_EXIST_ERR(err)) return; printk(KERN_INFO "Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); //revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%lu", &info->feature_barrier, NULL); if (err) info->feature_barrier = 0; err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&blkif_io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); add_disk(info->gd); info->is_ready = 1; register_vcd(info); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); unsigned long flags; DPRINTK("blkfront_closing: %s removed\n", dev->nodename); if (info->rq == NULL) goto out; spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&blkif_io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); xlvbd_sysfs_delif(info); ssleep(2); while(RING_FREE_REQUESTS(&info->ring) != RING_SIZE(&info->ring)) { ssleep(1); } spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_start_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); unregister_vcd(info); xlvbd_del(info); out: if (dev) xenbus_frontend_closed(dev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); kfree(info); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= BLK_RING_SIZE); info->shadow_free = info->shadow[free].req.id; info->shadow[free].req.id = 0x0fffffee; /* debug */ return free; } static inline void ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = 0; info->shadow_free = id; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(struct work_struct *arg) { struct blkfront_info *info = container_of(arg, struct blkfront_info, work); spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_open(struct inode *inode, struct file *filep) { struct block_device *bd = inode->i_bdev; #else int blkif_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; info->users++; return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_release(struct inode *inode, struct file *filep) { struct gendisk *disk = inode->i_bdev->bd_disk; #else int blkif_release(struct gendisk *disk, fmode_t mode) { #endif struct blkfront_info *info = disk->private_data; info->users--; return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { struct block_device *bd = inode->i_bdev; #else int blkif_ioctl(struct block_device *bd, fmode_t mode, unsigned command, unsigned long argument) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: if (info->gd && (info->gd->flags & GENHD_FL_CD)) return 0; return -EINVAL; default: /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", command);*/ return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * blkif_queue_request * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; unsigned long buffer_mfn; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref; grant_ref_t gref_head; struct scatterlist *sg; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; if (gnttab_alloc_grant_references( BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, BLKIF_MAX_SEGMENTS_PER_REQUEST); return 1; } /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = (unsigned long)req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (blk_barrier_rq(req)) ring_req->operation = BLKIF_OP_WRITE_BARRIER; if (blk_pc_request(req)) ring_req->operation = BLKIF_OP_PACKET; ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); for_each_sg(info->sg, sg, ring_req->nr_segments, i) { buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; /* install a grant reference. */ ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ? GTF_readonly : 0 ); info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); ring_req->seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = blk_peek_request(rq)) != NULL) { info = req->rq_disk->private_data; if (RING_FULL(&info->ring)) goto wait; blk_start_request(req); if (!blk_fs_request(req) && !blk_pc_request(req)) { __blk_end_request_all(req, -EIO); continue; } DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%u) buffer:%p [%s]\n", req, req->cmd, (long long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), req->buffer, rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; spin_lock_irqsave(&blkif_io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = (struct request *)info->shadow[id].request; blkif_completion(&info->shadow[id]); ADD_ID_TO_FREELIST(info, id); ret = bret->status == BLKIF_RSP_OKAY ? 0 : -EIO; switch (bret->operation) { case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk("blkfront: %s: write barrier op failed\n", info->gd->disk_name); ret = -EOPNOTSUPP; info->feature_barrier = 0; xlvbd_barrier(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_PACKET: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); __blk_end_request_all(req, ret); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&blkif_io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); /* Free resources associated with old device channel. */ if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s) { int i; for (i = 0; i < s->req.nr_segments; i++) gnttab_end_foreign_access(s->req.seg[i].gref, 0UL); } static void blkif_recover(struct blkfront_info *info) { int i; blkif_request_t *req; struct blk_shadow *copy; int j; /* Stage 1: Make a safe copy of the shadow state. */ copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); memcpy(copy, info->shadow, sizeof(info->shadow)); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow_free = info->ring.req_prod_pvt; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ if (copy[i].request == 0) continue; /* Grab a request slot and copy shadow state into it. */ req = RING_GET_REQUEST( &info->ring, info->ring.req_prod_pvt); *req = copy[i].req; /* We get a new request id, and must reset the shadow state. */ req->id = GET_ID_FROM_FREELIST(info); memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i])); /* Rewrite any grant references invalidated by susp/resume. */ for (j = 0; j < req->nr_segments; j++) gnttab_grant_foreign_access_ref( req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), rq_data_dir((struct request *) info->shadow[req->id].request) ? GTF_readonly : 0); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; } kfree(copy); (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&blkif_io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Send off requeued requests */ flush_requests(info); /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); return info->is_ready; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static struct xenbus_driver blkfront = { .name = "vbd", .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, }; static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront); } module_init(xlblk_init); static void __exit xlblk_exit(void) { return xenbus_unregister_driver(&blkfront); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/2.6.32/block.h000066400000000000000000000121741314037446600273540ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #define DPRINTK_IOCTL(_f, _a...) ((void)0) struct xlbd_type_info { int partn_shift; int disks_per_major; char *devname; char *diskname; }; struct xlbd_major_info { int major; int index; int usage; struct xlbd_type_info *type; }; struct blk_shadow { blkif_request_t req; unsigned long request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; struct gendisk *gd; int vdevice; blkif_vdev_t handle; int connected; int ring_ref; blkif_front_ring_t ring; struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int irq; struct xlbd_major_info *mi; struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; unsigned long shadow_free; int feature_barrier; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; extern spinlock_t blkif_io_lock; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); #else extern int blkif_open(struct block_device *bdev, fmode_t mode); extern int blkif_release(struct gendisk *disk, fmode_t mode); extern int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument); #endif extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (struct request_queue *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); int xlvbd_barrier(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif /* Virtual cdrom block-device */ extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/2.6.32/vbd.c000066400000000000000000000267031314037446600270330ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #include /* ssleep prototype */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) /*llj: the major of vitual_device_ext */ #define EXT_MAJOR 202 #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; DEFINE_SPINLOCK(blkif_io_lock); static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; int do_register, i; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SCSI_RANGE: ptr->type = &xlbd_scsi_type; ptr->index = index - XLBD_MAJOR_SCSI_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major EXT_MAJOR, * don't try to register it again */ for (i = XLBD_MAJOR_VBD_START; i < XLBD_MAJOR_VBD_START+ NUM_VBD_MAJORS; i++ ) { if (major_info[i] != NULL && major_info[i]->major == ptr->major) { do_register = 0; break; } } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(ptr); return NULL; } printk("xen-vbd: registered block device major %i\n", ptr->major); } major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = 10; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = 11 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = 18 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = 26; break; default: if (!VDEV_IS_EXTENDED(vdevice)) index = 27; else index = 28; break; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) { struct request_queue *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); #endif /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_logical_block_size(rq, sector_size); blk_queue_max_sectors(rq, 512); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; return 0; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { int major, minor; struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; unsigned int offset; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = EXT_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); } BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if (!(vdisk_info & VDISK_CDROM) && (minor & ((1 << mi->type->partn_shift) - 1)) == 0) nr_minors = 1 << mi->type->partn_shift; gd = alloc_disk(nr_minors); if (gd == NULL) goto out; offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (nr_minors > 1 || (vdisk_info & VDISK_CDROM)) { if (offset < 26) { sprintf(gd->disk_name, "%s%c", mi->type->diskname, 'a' + offset ); } else { sprintf(gd->disk_name, "%s%c%c", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26) ); } } else { if (offset < 26) { sprintf(gd->disk_name, "%s%c%d", mi->type->diskname, 'a' + offset, minor & ((1 << mi->type->partn_shift) - 1)); } else { sprintf(gd->disk_name, "%s%c%c%d", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26), minor & ((1 << mi->type->partn_shift) - 1)); } } gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size)) { del_gendisk(gd); goto out; } info->rq = gd->queue; info->gd = gd; if (info->feature_barrier) xlvbd_barrier(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } void xlvbd_del(struct blkfront_info *info) { unsigned long flags; if (info->mi == NULL) return; BUG_ON(info->gd == NULL); del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); while (!list_empty(&info->rq->queue_head)){ ssleep(1); } blk_cleanup_queue(info->rq); info->rq = NULL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int xlvbd_barrier(struct blkfront_info *info) { int err; err = blk_queue_ordered(info->rq, info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL); if (err) return err; printk(KERN_INFO "blkfront: %s: barriers %s\n", info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled"); return 0; } #else int xlvbd_barrier(struct blkfront_info *info) { printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name); return -ENOSYS; } #endif #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = dev_get_drvdata(&xendev->dev); if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/2.6.32/vcd.c000066400000000000000000000313671314037446600270360ustar00rootroot00000000000000/******************************************************************************* * vcd.c * * Implements CDROM cmd packet passing between frontend guest and backend driver. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define REVISION "$Revision: 1.0 $" #include #include #include #include #include #include "block.h" /* List of cdrom_device_info, can have as many as blkfront supports */ struct vcd_disk { struct list_head vcd_entry; struct cdrom_device_info vcd_cdrom_info; spinlock_t vcd_cdrom_info_lock; }; static LIST_HEAD(vcd_disks); static DEFINE_SPINLOCK(vcd_disks_lock); static struct vcd_disk *xencdrom_get_list_entry(struct gendisk *disk) { struct vcd_disk *ret_vcd = NULL; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { spin_lock(&vcd->vcd_cdrom_info_lock); ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd; } static void submit_message(struct blkfront_info *info, void *sp) { struct request *req = NULL; req = blk_get_request(info->rq, READ, __GFP_WAIT); if (blk_rq_map_kern(info->rq, req, sp, PAGE_SIZE, __GFP_WAIT)) goto out; req->rq_disk = info->gd; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_NOMERGE; #else req->flags |= REQ_BLOCK_PC; #endif req->__sector = 0; req->__data_len = PAGE_SIZE; req->timeout = 60*HZ; blk_execute_rq(req->q, info->gd, req, 1); out: blk_put_request(req); } static int submit_cdrom_cmd(struct blkfront_info *info, struct packet_command *cgc) { int ret = 0; struct page *page; size_t size; union xen_block_packet *sp; struct xen_cdrom_packet *xcp; struct vcd_generic_command *vgc; if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) { printk(KERN_WARNING "%s() Packet buffer length is to large \n", __func__); return -EIO; } page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } size = PAGE_SIZE; memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xcp = &(sp->xcp); xcp->type = XEN_TYPE_CDROM_PACKET; xcp->payload_offset = PACKET_PAYLOAD_OFFSET; vgc = (struct vcd_generic_command *)((char *)sp + xcp->payload_offset); memcpy(vgc->cmd, cgc->cmd, CDROM_PACKET_SIZE); vgc->stat = cgc->stat; vgc->data_direction = cgc->data_direction; vgc->quiet = cgc->quiet; vgc->timeout = cgc->timeout; if (cgc->sense) { vgc->sense_offset = PACKET_SENSE_OFFSET; memcpy((char *)sp + vgc->sense_offset, cgc->sense, sizeof(struct request_sense)); } if (cgc->buffer) { vgc->buffer_offset = PACKET_BUFFER_OFFSET; memcpy((char *)sp + vgc->buffer_offset, cgc->buffer, cgc->buflen); vgc->buflen = cgc->buflen; } submit_message(info,sp); if (xcp->ret) ret = xcp->err; if (cgc->sense) { memcpy(cgc->sense, (char *)sp + PACKET_SENSE_OFFSET, sizeof(struct request_sense)); } if (cgc->buffer && cgc->buflen) { memcpy(cgc->buffer, (char *)sp + PACKET_BUFFER_OFFSET, cgc->buflen); } __free_page(page); return ret; } static int xencdrom_open(struct cdrom_device_info *cdi, int purpose) { int ret = 0; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_open *xco; info = cdi->disk->private_data; if (strlen(info->xbdev->otherend) > MAX_PACKET_DATA) { return -EIO; } page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xco = &(sp->xco); xco->type = XEN_TYPE_CDROM_OPEN; xco->payload_offset = sizeof(struct xen_cdrom_open); strcpy((char *)sp + xco->payload_offset, info->xbdev->otherend); submit_message(info,sp); if (xco->ret) { ret = xco->err; goto out; } if (xco->media_present) set_capacity(cdi->disk, xco->sectors); out: __free_page(page); return ret; } static void xencdrom_release(struct cdrom_device_info *cdi) { } static int xencdrom_media_changed(struct cdrom_device_info *cdi, int disc_nr) { int ret; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_media_changed *xcmc; info = cdi->disk->private_data; page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xcmc = &(sp->xcmc); xcmc->type = XEN_TYPE_CDROM_MEDIA_CHANGED; submit_message(info,sp); ret = xcmc->media_changed; __free_page(page); return ret; } static int xencdrom_tray_move(struct cdrom_device_info *cdi, int position) { int ret; struct packet_command cgc; struct blkfront_info *info; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_START_STOP_UNIT; if (position) cgc.cmd[4] = 2; else cgc.cmd[4] = 3; ret = submit_cdrom_cmd(info, &cgc); return ret; } static int xencdrom_lock_door(struct cdrom_device_info *cdi, int lock) { int ret = 0; struct blkfront_info *info; struct packet_command cgc; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; cgc.cmd[4] = lock; ret = submit_cdrom_cmd(info, &cgc); return ret; } static int xencdrom_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { int ret = -EIO; struct blkfront_info *info; info = cdi->disk->private_data; ret = submit_cdrom_cmd(info, cgc); cgc->stat = ret; return ret; } static int xencdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { return -EINVAL; } /* Query backend to see if CDROM packets are supported */ static int xencdrom_supported(struct blkfront_info *info) { struct page *page; union xen_block_packet *sp; struct xen_cdrom_support *xcs; page = alloc_page(GFP_NOIO); if (!page) { printk(KERN_CRIT "%s() Unable to allocate page\n", __func__); return -ENOMEM; } memset(page_address(page), 0, PAGE_SIZE); sp = page_address(page); xcs = &(sp->xcs); xcs->type = XEN_TYPE_CDROM_SUPPORT; submit_message(info,sp); return xcs->supported; } static struct cdrom_device_ops xencdrom_dops = { .open = xencdrom_open, .release = xencdrom_release, .media_changed = xencdrom_media_changed, .tray_move = xencdrom_tray_move, .lock_door = xencdrom_lock_door, .generic_packet = xencdrom_packet, .audio_ioctl = xencdrom_audio_ioctl, .capability = (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | \ CDC_MEDIA_CHANGED | CDC_GENERIC_PACKET | CDC_DVD | \ CDC_CD_R), .n_minors = 1, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_open(struct inode *inode, struct file *file) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_open(&vcd->vcd_cdrom_info, inode, file); #else ret = cdrom_open(&vcd->vcd_cdrom_info, bd, mode); #endif info->users = vcd->vcd_cdrom_info.use_count; spin_unlock(&vcd->vcd_cdrom_info_lock); } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_release(struct inode *inode, struct file *file) { struct gendisk *gd = inode->i_bdev->bd_disk; #else static int xencdrom_block_release(struct gendisk *gd, fmode_t mode) { #endif struct blkfront_info *info = gd->private_data; struct vcd_disk *vcd; int ret = 0; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_release(&vcd->vcd_cdrom_info, file); #else cdrom_release(&vcd->vcd_cdrom_info, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); if (vcd->vcd_cdrom_info.use_count == 0) { info->users = 1; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) blkif_release(inode, file); #else blkif_release(gd, mode); #endif } } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_ioctl(struct block_device *bd, fmode_t mode, unsigned cmd, unsigned long arg) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!(vcd = xencdrom_get_list_entry(info->gd))) goto out; switch (cmd) { case 2285: /* SG_IO */ ret = -ENOSYS; break; case CDROMEJECT: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 1); break; case CDROMCLOSETRAY: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 0); break; case CDROM_GET_CAPABILITY: ret = vcd->vcd_cdrom_info.ops->capability & ~vcd->vcd_cdrom_info.mask; break; case CDROM_SET_OPTIONS: ret = vcd->vcd_cdrom_info.options; break; case CDROM_SEND_PACKET: ret = submit_cdrom_cmd(info, (struct packet_command *)arg); break; default: /* Not supported, augment supported above if necessary */ printk("%s():%d Unsupported IOCTL:%x \n", __func__, __LINE__, cmd); ret = -ENOTTY; break; } spin_unlock(&vcd->vcd_cdrom_info_lock); out: return ret; } /* Called as result of cdrom_open, vcd_cdrom_info_lock already held */ static int xencdrom_block_media_changed(struct gendisk *disk) { struct vcd_disk *vcd; struct vcd_disk *ret_vcd = NULL; int ret = 0; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); if (ret_vcd) { ret = cdrom_media_changed(&ret_vcd->vcd_cdrom_info); } return ret; } static struct block_device_operations xencdrom_bdops = { .owner = THIS_MODULE, .open = xencdrom_block_open, .release = xencdrom_block_release, .ioctl = xencdrom_block_ioctl, .media_changed = xencdrom_block_media_changed, }; void register_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; /* Make sure this is for a CD device */ if (!(gd->flags & GENHD_FL_CD)) goto out; /* Make sure we have backend support */ if (!xencdrom_supported(info)) { goto out; } /* Create new vcd_disk and fill in cdrom_info */ vcd = (struct vcd_disk *)kzalloc(sizeof(struct vcd_disk), GFP_KERNEL); if (!vcd) { printk(KERN_INFO "%s(): Unable to allocate vcd struct!\n", __func__); goto out; } spin_lock_init(&vcd->vcd_cdrom_info_lock); vcd->vcd_cdrom_info.ops = &xencdrom_dops; vcd->vcd_cdrom_info.speed = 4; vcd->vcd_cdrom_info.capacity = 1; vcd->vcd_cdrom_info.options = 0; strcpy(vcd->vcd_cdrom_info.name, gd->disk_name); vcd->vcd_cdrom_info.mask = (CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_SELECT_SPEED | CDC_MRW | CDC_MRW_W | CDC_RAM); if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) { printk(KERN_WARNING "%s() Cannot register blkdev as a cdrom %d!\n", __func__, gd->major); goto err_out; } xencdrom_bdops.owner = gd->fops->owner; gd->fops = &xencdrom_bdops; vcd->vcd_cdrom_info.disk = gd; spin_lock(&vcd_disks_lock); list_add(&(vcd->vcd_entry), &vcd_disks); spin_unlock(&vcd_disks_lock); out: return; err_out: kfree(vcd); } void unregister_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == gd) { spin_lock(&vcd->vcd_cdrom_info_lock); unregister_cdrom(&vcd->vcd_cdrom_info); list_del(&vcd->vcd_entry); spin_unlock(&vcd->vcd_cdrom_info_lock); kfree(vcd); break; } } spin_unlock(&vcd_disks_lock); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.101/000077500000000000000000000000001314037446600261545ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.101/blkfront.c000066400000000000000000001564551314037446600301610ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* ssleep prototype */ #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct blk_resume_entry { struct list_head list; struct blk_shadow copy; }; #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define UNPLUG_DISK_TMOUT 60 static void connect(struct blkfront_info *); static void blkfront_closing(struct blkfront_info *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(struct work_struct *arg); static int blkif_recover(struct blkfront_info *, unsigned int old_ring_size, unsigned int new_ring_size); static void blkif_completion(struct blk_shadow *, struct blkfront_info *info, struct blkif_response *bret); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); static int blkfront_setup_indirect(struct blkfront_info *info); static int fill_grant_buffer(struct blkfront_info *info, int num) { struct page *granted_page; struct grant *gnt_list_entry, *n; int i = 0; while (i < num) { gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); if (!gnt_list_entry) goto out_of_memory; /* If frontend uses feature_persistent, it will preallocate * (256 + 1) * 256 pages, almost 257M. */ if (info->feature_persistent) { granted_page = alloc_page(GFP_NOIO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; } gnt_list_entry->pfn = page_to_pfn(granted_page); } gnt_list_entry->gref = GRANT_INVALID_REF; list_add(&gnt_list_entry->node, &info->grants); i++; } return 0; out_of_memory: list_for_each_entry_safe(gnt_list_entry, n, &info->grants, node) { list_del(&gnt_list_entry->node); if (info->feature_persistent) __free_page(pfn_to_page(gnt_list_entry->pfn)); kfree(gnt_list_entry); i--; } BUG_ON(i != 0); return -ENOMEM; } static struct grant *get_grant(grant_ref_t *gref_head, unsigned long pfn, struct blkfront_info *info) { struct grant *gnt_list_entry; unsigned long buffer_mfn; BUG_ON(list_empty(&info->grants)); gnt_list_entry = list_first_entry(&info->grants, struct grant, node); list_del(&gnt_list_entry->node); /* for persistent grant */ if (gnt_list_entry->gref != GRANT_INVALID_REF) { info->persistent_gnts_c--; return gnt_list_entry; } /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); if (!info->feature_persistent) { BUG_ON(!pfn); gnt_list_entry->pfn = pfn; } buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); gnttab_grant_foreign_access_ref(gnt_list_entry->gref, info->xbdev->otherend_id, buffer_mfn, 0); return gnt_list_entry; } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice; struct blkfront_info *info; enum xenbus_state backend_state; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); pr_notice("blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } spin_lock_init(&info->io_lock); mutex_init(&info->mutex); INIT_LIST_HEAD(&info->grants); INIT_LIST_HEAD(&info->indirect_pages); info->xbdev = dev; info->vdevice = vdevice; info->persistent_gnts_c = 0; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); INIT_LIST_HEAD(&info->resume_list); /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); backend_state = xenbus_read_driver_state(dev->otherend); /* * XenbusStateInitWait would be the correct state to enter here, * but (at least) blkback considers this a fatal error. */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_backend(dev, info); if (err) { kfree(info); dev_set_drvdata(&dev->dev, NULL); return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); enum xenbus_state backend_state; int i; int err; for (i=0; iring_size; i++) { if (gnttab_query_foreign_access(info->ring_refs[i])) return 0; } blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); backend_state = xenbus_read_driver_state(dev->otherend); /* See respective comment in blkfront_probe(). */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_backend(dev, info); return err; } static void shadow_init(struct blk_shadow *shadow, unsigned int ring_size) { unsigned int i = 0; WARN_ON(!ring_size); while (++i < ring_size) shadow[i - 1].req.u.rw.id = i; shadow[i - 1].req.u.rw.id = 0x0fffffff; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { unsigned int ring_size, ring_order; unsigned int old_ring_size = RING_SIZE(&info->ring); const char *what = NULL; struct xenbus_transaction xbt; int err; if (dev->state >= XenbusStateInitialised) return 0; err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-pages", "%u", &ring_size); if (err != 1) ring_size = 0; else if (!ring_size) pr_warn("blkfront: %s: zero max-ring-pages\n", dev->nodename); err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &ring_order); if (err != 1) ring_order = ring_size ? ilog2(ring_size) : 0; else if (!ring_size) /* nothing */; else if ((ring_size - 1) >> ring_order) pr_warn("blkfront: %s: max-ring-pages (%#x) inconsistent with" " max-ring-page-order (%u)\n", dev->nodename, ring_size, ring_order); else ring_order = ilog2(ring_size); if (ring_order > BLK_MAX_RING_PAGE_ORDER) ring_order = BLK_MAX_RING_PAGE_ORDER; /* * While for larger rings not all pages are actually used, be on the * safe side and set up a full power of two to please as many backends * as possible. */ printk("talk_to_blkback ring_size %d, ring_order %d\n", 1U<ring_size = ring_size = 1U << ring_order; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } if (ring_size == 1) { what = "ring-ref"; err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[0]); if (err) goto abort_transaction; } else { unsigned int i; char buf[16]; what = "ring-page-order"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_order); if (err) goto abort_transaction; what = "num-ring-pages"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_size); if (err) goto abort_transaction; what = buf; for (i = 0; i < ring_size; i++) { snprintf(buf, sizeof(buf), "ring-ref%u", i); err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[i]); if (err) goto abort_transaction; } } what = "event-channel"; err = xenbus_printf(xbt, dev->nodename, what, "%u", irq_to_evtchn_port(info->irq)); if (err) goto abort_transaction; what = "protocol"; err = xenbus_printf(xbt, dev->nodename, what, "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) goto abort_transaction; /* for persistent grant */ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1); if (err) dev_warn(&dev->dev, "writing persistent grants feature to xenbus"); err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); ring_size = RING_SIZE(&info->ring); switch (info->connected) { case BLKIF_STATE_DISCONNECTED: shadow_init(info->shadow, ring_size); break; case BLKIF_STATE_SUSPENDED: printk("talk_to_backend:blkif_recover old_ring_size =%d.ring_size=%d\n", old_ring_size,ring_size); err = blkif_recover(info, old_ring_size, ring_size); if (err) goto out; break; } pr_info("blkfront: %s: ring-pages=%u nr_ents=%u\n", dev->nodename, info->ring_size, ring_size); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (what) xenbus_dev_fatal(dev, err, "writing %s", what); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; unsigned int nr; for (nr = 0; nr < info->ring_size; nr++) { info->ring_refs[nr] = GRANT_INVALID_REF; info->ring_pages[nr] = alloc_page(GFP_NOIO | __GFP_HIGH); if (!info->ring_pages[nr]) break; } sring = nr == info->ring_size ? vmap(info->ring_pages, nr, VM_MAP, PAGE_KERNEL) : NULL; if (!sring) { while (nr--) __free_page(info->ring_pages[nr]); xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, (unsigned long)info->ring_size << PAGE_SHIFT); err = xenbus_multi_grant_ring(dev, nr, info->ring_pages, info->ring_refs); if (err < 0) goto fail; err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, IRQF_SAMPLE_RANDOM, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } static int xenwatch_unplugdisk_callback(void *data) { int len; char *devname; int iTimeout = 0; char *bestate; struct block_device *bd; struct xenbus_device *dev = data; struct blkfront_info *info = dev_get_drvdata(&dev->dev); if(!strcmp(info->gd->disk_name, "xvda")) { printk(KERN_ERR "xvda disk is unallowed to unplug!\n"); return 0; } devname = xenbus_read(XBT_NIL, dev->otherend, "dev", &len); if (IS_ERR(devname)){ printk(KERN_ERR "read %s xenstore error!\n", dev->otherend); return 0; } else{ xenbus_write(XBT_NIL, "control/uvp", "unplug-disk", devname); kfree(devname); } while(info && info->users != 0){ if(iTimeout > UNPLUG_DISK_TMOUT) break; printk(KERN_INFO "info->users=%d,ssleep(1),iTimeout=%d!\n", info->users, iTimeout); ssleep(1); iTimeout++; } if(info && !info->users) { printk(KERN_INFO "finish to umount,info->users has changed to 0!\n"); } bd = bdget_disk(info->gd, 0); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(info); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); bestate = xenbus_read(XBT_NIL, dev->otherend, "state", &len); if (IS_ERR(bestate)){ printk(KERN_ERR "read %s state error!\n", dev->otherend); } else{ if(strncmp(bestate, "5", 1) || iTimeout > UNPLUG_DISK_TMOUT) { kfree(bestate); return 0; } kfree(bestate); } return 0; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (talk_to_backend(dev, info)) { dev_set_drvdata(&dev->dev, NULL); kfree(info); } break; case XenbusStateConnected: connect(info); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's Closing state -- fallthrough */ case XenbusStateClosing: kthread_run(xenwatch_unplugdisk_callback, dev, "xenwatch_unplugdisk"); break; } } /* ** Connection ** */ static void blkfront_setup_discard(struct blkfront_info *info) { int err; char *type; unsigned int discard_granularity; unsigned int discard_alignment; int discard_secure; type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL); if (IS_ERR(type)) return; info->feature_secdiscard = 0; if (strncmp(type, "phy", 3) == 0) { err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "discard-granularity", "%u", &discard_granularity, "discard-alignment", "%u", &discard_alignment, NULL); if (!err) { info->feature_discard = 1; info->discard_granularity = discard_granularity; info->discard_alignment = discard_alignment; } err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "discard-secure", "%d", &discard_secure); if (err == 1) info->feature_secdiscard = discard_secure; } else if (strncmp(type, "file", 4) == 0) info->feature_discard = 1; kfree(type); } /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err, barrier, flush, discard; int persistent; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (err != 1) return; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sector-size", "%lu", §or_size); if (err != 1) sector_size = 0; if (sector_size) blk_queue_logical_block_size(info->gd->queue, sector_size); pr_info("Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: /* * If we are recovering from suspension, we need to wait * for the backend to announce it's features before * reconnecting, at least we need to know if the backend * supports indirect descriptors, and how many. */ return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } info->feature_flush = 0; info->flush_op = 0; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%d", &barrier); /* * If there's no "feature-barrier" defined, then it means * we're dealing with a very old backend which writes * synchronously; nothing to do. * * If there are barriers, then we use flush. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) if (err > 0 && barrier) { info->feature_flush = REQ_FLUSH | REQ_FUA; info->flush_op = BLKIF_OP_WRITE_BARRIER; } /* * And if there is "feature-flush-cache" use that above * barriers. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-flush-cache", "%d", &flush); if (err > 0 && flush) { info->feature_flush = REQ_FLUSH; info->flush_op = BLKIF_OP_FLUSH_DISKCACHE; } #else if (err <= 0) info->feature_flush = QUEUE_ORDERED_DRAIN; else if (barrier) info->feature_flush = QUEUE_ORDERED_TAG; else info->feature_flush = QUEUE_ORDERED_NONE; #endif err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-discard", "%d", &discard); if (err > 0 && discard) blkfront_setup_discard(info); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-persistent", "%u", &persistent, NULL); if (err) info->feature_persistent = 0; else info->feature_persistent = persistent; err = blkfront_setup_indirect(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); return; } err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&info->io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); add_disk(info->gd); info->is_ready = 1; register_vcd(info); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct blkfront_info *info) { unsigned long flags; DPRINTK("blkfront_closing: %d removed\n", info->vdevice); if (info->rq == NULL) goto out; spin_lock_irqsave(&info->io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&info->io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work_sync(&info->work); xlvbd_sysfs_delif(info); ssleep(2); while(RING_FREE_REQUESTS(&info->ring) != RING_SIZE(&info->ring)) { ssleep(1); } spin_lock_irqsave(&info->io_lock, flags); /* No more blkif_request(). */ blk_start_queue(info->rq); spin_unlock_irqrestore(&info->io_lock, flags); unregister_vcd(info); xlvbd_del(info); out: if (info->xbdev) xenbus_frontend_closed(info->xbdev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); struct block_device *bd; struct gendisk *disk; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); mutex_lock(&info->mutex); disk = info->gd; bd = disk ? bdget_disk(disk, 0) : NULL; info->xbdev = NULL; mutex_unlock(&info->mutex); if (!bd) { kfree(info); return 0; } /* * The xbdev was removed before we reached the Closed * state. See if it's safe to remove the disk. If the bdev * isn't closed yet, we let release take care of it. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif info = disk->private_data; dev_warn(disk_to_dev(disk), "%s was hot-unplugged, %d stale handles\n", dev->nodename, bd->bd_openers); if (info && !bd->bd_openers) { blkfront_closing(info); disk->private_data = NULL; kfree(info); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); return 0; } static int blkfront_setup_indirect(struct blkfront_info *info) { unsigned int indirect_segments, segs; int err, i; info->max_indirect_segments = 0; segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-max-indirect-segments", "%u", &indirect_segments, NULL); if (!err && indirect_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) { info->max_indirect_segments = min(indirect_segments, xen_blkif_max_segments); segs = info->max_indirect_segments; } printk("[%s:%d], segs %d\n", __func__, __LINE__, segs); err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * RING_SIZE(&info->ring)); if (err) goto out_of_memory; if (!info->feature_persistent && info->max_indirect_segments) { /* * We are using indirect descriptors but not persistent * grants, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int num = INDIRECT_GREFS(segs) * RING_SIZE(&info->ring); BUG_ON(!list_empty(&info->indirect_pages)); for (i = 0; i < num; i++) { struct page *indirect_page = alloc_page(GFP_NOIO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &info->indirect_pages); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { info->shadow[i].grants_used = kzalloc( sizeof(info->shadow[i].grants_used[0]) * segs, GFP_NOIO); /* malloc space for every shadow's sg */ info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO); if (info->max_indirect_segments) info->shadow[i].indirect_grants = kzalloc( sizeof(info->shadow[i].indirect_grants[0]) * INDIRECT_GREFS(segs), GFP_NOIO); if ((info->shadow[i].grants_used == NULL) || (info->shadow[i].sg == NULL) || (info->max_indirect_segments && (info->shadow[i].indirect_grants == NULL))) goto out_of_memory; /* initialise every shadow's sg */ sg_init_table(info->shadow[i].sg, segs); } return 0; out_of_memory: for (i = 0; i < RING_SIZE(&info->ring); i++) { kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; /* free every shadow's sg */ kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; } if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } return -ENOMEM; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= RING_SIZE(&info->ring)); info->shadow_free = info->shadow[free].req.u.rw.id; info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ return free; } static inline int ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { if (info->shadow[id].req.u.rw.id != id) return -EINVAL; if (!info->shadow[id].request) return -ENXIO; info->shadow[id].req.u.rw.id = info->shadow_free; info->shadow[id].request = NULL; info->shadow_free = id; return 0; } static const char *op_name(unsigned int op) { static const char *const names[] = { [BLKIF_OP_READ] = "read", [BLKIF_OP_WRITE] = "write", [BLKIF_OP_WRITE_BARRIER] = "barrier", [BLKIF_OP_FLUSH_DISKCACHE] = "flush", [BLKIF_OP_PACKET] = "packet", [BLKIF_OP_DISCARD] = "discard", }; if (op >= ARRAY_SIZE(names)) return "unknown"; return names[op] ?: "reserved"; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { bool queued = false; /* Recover stage 3: Re-queue pending requests. */ while (!list_empty(&info->resume_list) && !RING_FULL(&info->ring)) { /* Grab a request slot and copy shadow state into it. */ struct blk_resume_entry *ent = list_first_entry(&info->resume_list, struct blk_resume_entry, list); blkif_request_t *req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); unsigned int i; *req = ent->copy.req; /* We get a new request id, and must reset the shadow state. */ req->u.rw.id = GET_ID_FROM_FREELIST(info); info->shadow[req->u.rw.id] = ent->copy; info->shadow[req->u.rw.id].req.u.rw.id = req->u.rw.id; /* Rewrite any grant references invalidated by susp/resume. */ for (i = 0; i < req->u.rw.nr_segments; i++) { gnttab_grant_foreign_access_ref(req->u.rw.seg[i].gref, info->xbdev->otherend_id, pfn_to_mfn(ent->copy.grants_used[i]->pfn), rq_data_dir(ent->copy.request) ? GTF_readonly : 0); } info->ring.req_prod_pvt++; queued = true; __list_del_entry(&ent->list); kfree(ent); } /* Send off requeued requests */ if (queued) flush_requests(info); if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(struct work_struct *arg) { struct blkfront_info *info = container_of(arg, struct blkfront_info, work); spin_lock_irq(&info->io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_open(struct inode *inode, struct file *filep) { struct block_device *bd = inode->i_bdev; #else int blkif_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int err = 0; if (!info) /* xbdev gone */ err = -ERESTARTSYS; else { mutex_lock(&info->mutex); if (!info->gd) /* xbdev is closed */ err = -ERESTARTSYS; mutex_unlock(&info->mutex); } info->users++; return err; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_release(struct inode *inode, struct file *filep) { struct gendisk *disk = inode->i_bdev->bd_disk; #else int blkif_release(struct gendisk *disk, fmode_t mode) { #endif struct blkfront_info *info = disk->private_data; struct block_device *bd = bdget_disk(disk, 0); info->users--; bdput(bd); if (bd->bd_openers) return 0; return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { struct block_device *bd = inode->i_bdev; #else int blkif_ioctl(struct block_device *bd, fmode_t mode, unsigned command, unsigned long argument) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: if (info->gd && (info->gd->flags & GENHD_FL_CD)) return 0; return -EINVAL; default: if (info->mi && info->gd && info->rq) { switch (info->mi->major) { case SCSI_DISK0_MAJOR: case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: case SCSI_CDROM_MAJOR: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) return scsi_cmd_ioctl(filep, info->gd, command, (void __user *)argument); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return scsi_cmd_ioctl(filep, info->rq, info->gd, command, (void __user *)argument); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) \ && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0) \ || LINUX_VERSION_CODE < KERNEL_VERSION(3,0,18)) return scsi_cmd_ioctl(info->rq, info->gd, mode, command, (void __user *)argument); #else return scsi_cmd_blk_ioctl(bd, mode, command, (void __user *)argument); #endif } } return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * Generate a Xen blkfront IO request from a blk layer request. Reads * and writes are handled as expected. * * @req: a request struct */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref, n; struct blkif_request_segment_aligned *segments = NULL; /* * Used to store if we are able to queue the request by just using * existing persistent grants, or if we have to get new grants, * as there are not sufficiently many free. */ bool new_persistent_gnts; grant_ref_t gref_head; struct grant *gnt_list_entry = NULL; struct scatterlist *sg; int nseg, max_grefs; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; max_grefs = info->max_indirect_segments ? info->max_indirect_segments + INDIRECT_GREFS(info->max_indirect_segments) : BLKIF_MAX_SEGMENTS_PER_REQUEST; /* Check if we have enought grants to allocate a requests */ if (info->persistent_gnts_c < max_grefs) { new_persistent_gnts = 1; if (gnttab_alloc_grant_references( max_grefs - info->persistent_gnts_c, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, max_grefs); return 1; } } else new_persistent_gnts = 0; /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = req; BUG_ON(info->max_indirect_segments == 0 && req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); BUG_ON(info->max_indirect_segments && req->nr_phys_segments > info->max_indirect_segments); nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); ring_req->u.rw.id = id; if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { /* * The indirect operation can only be a BLKIF_OP_READ or * BLKIF_OP_WRITE */ BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA)); ring_req->operation = BLKIF_OP_INDIRECT; ring_req->u.indirect.indirect_op = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->u.indirect.handle = info->handle; ring_req->u.indirect.nr_segments = nseg; } else { ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->u.rw.handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) #else if (req->cmd_flags & REQ_HARDBARRIER) #endif ring_req->operation = info->flush_op; if (req->cmd_type == REQ_TYPE_BLOCK_PC) ring_req->operation = BLKIF_OP_PACKET; ring_req->u.rw.nr_segments = nseg; } if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { /* id, sector_number and handle are set above. */ ring_req->operation = BLKIF_OP_DISCARD; ring_req->u.discard.flag = 0; ring_req->u.discard.nr_sectors = blk_rq_sectors(req); if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; } else { for_each_sg(info->shadow[id].sg, sg, nseg, i) { fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; if ((ring_req->operation == BLKIF_OP_INDIRECT) && (i % SEGS_PER_INDIRECT_FRAME == 0)) { unsigned long uninitialized_var(pfn); if (segments) kunmap_atomic(segments, KM_IRQ0); n = i / SEGS_PER_INDIRECT_FRAME; if (!info->feature_persistent) { struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ BUG_ON(list_empty(&info->indirect_pages)); indirect_page = list_first_entry(&info->indirect_pages, struct page, lru); list_del(&indirect_page->lru); pfn = page_to_pfn(indirect_page); } gnt_list_entry = get_grant(&gref_head, pfn, info); info->shadow[id].indirect_grants[n] = gnt_list_entry; segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_IRQ0); ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; } gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); ref = gnt_list_entry->gref; info->shadow[id].grants_used[i] = gnt_list_entry; /* If use persistent grant, it will have a memcpy, * just copy the data from sg page to grant page. */ if (rq_data_dir(req) && info->feature_persistent) { char *bvec_data; void *shared_data; BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_USER0); bvec_data = kmap_atomic(sg_page(sg), KM_USER1); /* * this does not wipe data stored outside the * range sg->offset..sg->offset+sg->length. * Therefore, blkback *could* see data from * previous requests. This is OK as long as * persistent grants are shared with just one * domain. It may need refactoring if this * changes */ memcpy(shared_data + sg->offset, bvec_data + sg->offset, sg->length); kunmap_atomic(bvec_data, KM_USER1); kunmap_atomic(shared_data, KM_USER0); } if (ring_req->operation != BLKIF_OP_INDIRECT) { ring_req->u.rw.seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } else { n = i % SEGS_PER_INDIRECT_FRAME; segments[n] = (struct blkif_request_segment_aligned) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } } } if (segments) kunmap_atomic(segments, KM_IRQ0); info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; /* for persistent grant */ if (new_persistent_gnts) gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = blk_peek_request(rq)) != NULL) { info = req->rq_disk->private_data; if (RING_FULL(&info->ring)) goto wait; blk_start_request(req); /*if ((req->cmd_type != REQ_TYPE_FS && (req->cmd_type != REQ_TYPE_BLOCK_PC || req->cmd_len)) || ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) && !info->flush_op)) { req->errors = (DID_ERROR << 16) | (DRIVER_INVALID << 24); __blk_end_request_all(req, -EIO); continue; }*/ if (req->cmd_type != REQ_TYPE_FS) { __blk_end_request_all(req, -EIO); continue; } DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%u) buffer:%p [%s]\n", req, req->cmd, (long long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), req->buffer, rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; spin_lock_irqsave(&info->io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&info->io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); if (unlikely(bret->id >= RING_SIZE(&info->ring))) { /* * The backend has messed up and given us an id that * we would never have given to it (we stamp it up to * RING_SIZE() - see GET_ID_FROM_FREELIST()). */ pr_warning("%s: response to %s has incorrect id (%#Lx)\n", info->gd->disk_name, op_name(bret->operation), (unsigned long long)bret->id); continue; } id = bret->id; req = info->shadow[id].request; blkif_completion(&info->shadow[id], info, bret); ret = ADD_ID_TO_FREELIST(info, id); if (unlikely(ret)) { pr_warning("%s: id %#lx (response to %s) couldn't be recycled (%d)!\n", info->gd->disk_name, id, op_name(bret->operation), ret); continue; } ret = bret->status == BLKIF_RSP_OKAY ? 0 : -EIO; switch (bret->operation) { const char *kind; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: kind = ""; if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) ret = -EOPNOTSUPP; if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.u.rw.nr_segments == 0)) { kind = "empty "; ret = -EOPNOTSUPP; } if (unlikely(ret)) { if (ret == -EOPNOTSUPP) { pr_warn("blkfront: %s: %s%s op failed\n", info->gd->disk_name, kind, op_name(bret->operation)); ret = 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) info->feature_flush = 0; #else info->feature_flush = QUEUE_ORDERED_NONE; #endif xlvbd_flush(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_PACKET: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev %s request: %d\n", op_name(bret->operation), bret->status); __blk_end_request_all(req, ret); break; case BLKIF_OP_DISCARD: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; pr_warn("blkfront: %s: discard op failed\n", info->gd->disk_name); ret = -EOPNOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; queue_flag_clear(QUEUE_FLAG_DISCARD, rq); queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); } __blk_end_request_all(req, ret); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&info->io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { struct grant *persistent_gnt; struct grant *n; int i, j, segs; /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&info->io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* Remove all persistent grants */ if (!list_empty(&info->grants)) { list_for_each_entry_safe(persistent_gnt, n, &info->grants, node) { list_del(&persistent_gnt->node); if (persistent_gnt->gref != GRANT_INVALID_REF) { gnttab_end_foreign_access(persistent_gnt->gref, 0UL); info->persistent_gnts_c--; } if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } } BUG_ON(info->persistent_gnts_c != 0); /* * Remove indirect pages, this only happens when using indirect * descriptors but not persistent grants */ if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; BUG_ON(info->feature_persistent); list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { /* * Clear persistent grants present in requests already * on the shared ring */ if (!info->shadow[i].request) goto free_shadow; segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? info->shadow[i].req.u.indirect.nr_segments : info->shadow[i].req.u.rw.nr_segments; for (j = 0; j < segs; j++) { persistent_gnt = info->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT) /* * If this is not an indirect operation don't try to * free indirect segments */ goto free_shadow; for (j = 0; j < INDIRECT_GREFS(segs); j++) { persistent_gnt = info->shadow[i].indirect_grants[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } free_shadow: kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; } //kfree(info->sg); //info->sg = NULL; /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&info->io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work_sync(&info->work); /* Free resources associated with old device channel. */ vunmap(info->ring.sring); info->ring.sring = NULL; gnttab_multi_end_foreign_access(info->ring_size, info->ring_refs, info->ring_pages); if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, struct blkif_response *bret) { int i; int nseg; /* for persistent grant */ struct scatterlist *sg; char *bvec_data; void *shared_data; if (s->req.operation == BLKIF_OP_DISCARD) return; nseg = s->req.operation == BLKIF_OP_INDIRECT ? s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { /* * Copy the data received from the backend into the bvec. * Since bv_offset can be different than 0, and bv_len different * than PAGE_SIZE, we have to keep track of the current offset, * to be sure we are copying the data from the right shared page. */ for_each_sg(s->sg, sg, nseg, i) { BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic( pfn_to_page(s->grants_used[i]->pfn), KM_USER0); bvec_data = kmap_atomic(sg_page(sg), KM_USER1); memcpy(bvec_data + sg->offset, shared_data + sg->offset, sg->length); kunmap_atomic(bvec_data, KM_USER1); kunmap_atomic(shared_data, KM_USER0); } } /* Add the persistent grant into the list of free grants */ for (i = 0; i < nseg; i++) { if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->grants_used[i]->gref); list_add(&s->grants_used[i]->node, &info->grants); info->persistent_gnts_c++; } else { /* * If the grant is not mapped by the backend we end the * foreign access and add it to the tail of the list, * so it will not be picked again unless we run out of * persistent grants. */ gnttab_end_foreign_access(s->grants_used[i]->gref, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &info->grants); } } if (s->req.operation == BLKIF_OP_INDIRECT) { for (i = 0; i < INDIRECT_GREFS(nseg); i++) { if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->indirect_grants[i]->gref); list_add(&s->indirect_grants[i]->node, &info->grants); info->persistent_gnts_c++; } else { struct page *indirect_page; gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. */ if (!info->feature_persistent) { indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); list_add(&indirect_page->lru, &info->indirect_pages); } s->indirect_grants[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->indirect_grants[i]->node, &info->grants); } } } } /* * This is a clone of md_trim_bio, used to split a bio into smaller ones */ static void trim_bio(struct bio *bio, int offset, int size) { /* 'bio' is a cloned bio which we need to trim to match * the given offset and size. * This requires adjusting bi_sector, bi_size, and bi_io_vec */ int i; struct bio_vec *bvec; int sofar = 0; size <<= 9; if (offset == 0 && size == bio->bi_size) return; bio->bi_sector += offset; bio->bi_size = size; offset <<= 9; clear_bit(BIO_SEG_VALID, &bio->bi_flags); while (bio->bi_idx < bio->bi_vcnt && bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { /* remove this whole bio_vec */ offset -= bio->bi_io_vec[bio->bi_idx].bv_len; bio->bi_idx++; } if (bio->bi_idx < bio->bi_vcnt) { bio->bi_io_vec[bio->bi_idx].bv_offset += offset; bio->bi_io_vec[bio->bi_idx].bv_len -= offset; } /* avoid any complications with bi_idx being non-zero*/ if (bio->bi_idx) { memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); bio->bi_vcnt -= bio->bi_idx; bio->bi_idx = 0; } /* Make sure vcnt and last bv are not too big */ bio_for_each_segment(bvec, bio, i) { if (sofar + bvec->bv_len > size) bvec->bv_len = size - sofar; if (bvec->bv_len == 0) { bio->bi_vcnt = i; break; } sofar += bvec->bv_len; } } static void split_bio_end(struct bio *bio, int error) { struct split_bio *split_bio = bio->bi_private; if (error) split_bio->err = error; if (atomic_dec_and_test(&split_bio->pending)) { split_bio->bio->bi_phys_segments = 0; bio_endio(split_bio->bio, split_bio->err); kfree(split_bio); } bio_put(bio); } static int blkif_recover(struct blkfront_info *info, unsigned int old_ring_size, unsigned int ring_size) { unsigned int i; struct request *req, *n; unsigned int segs; int rc; struct bio *bio, *cloned_bio; struct bio_list bio_list, merge_bio; unsigned int offset; int pending, size; struct split_bio *split_bio; struct list_head requests; struct blk_resume_entry *ent = NULL; LIST_HEAD(list); //WARN_ON(1); /* Stage 1: Make a safe copy of the shadow state. */ for (i = 0; i < old_ring_size; i++) { /* Not in use? */ if (!info->shadow[i].request) continue; ent = kmalloc(sizeof(*ent), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); if (!ent) break; ent->copy = info->shadow[i]; list_add_tail(&ent->list, &list); } if (i < old_ring_size) { while (!list_empty(&list)) { ent = list_first_entry(&list, struct blk_resume_entry, list); __list_del_entry(&ent->list); kfree(ent); } return -ENOMEM; } list_splice_tail(&list, &info->resume_list); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); shadow_init(info->shadow, ring_size); info->shadow_free = info->ring.req_prod_pvt; rc = blkfront_setup_indirect(info); if (rc) { while (!list_empty(&info->resume_list)) { ent = list_first_entry(&info->resume_list, struct blk_resume_entry, list); __list_del_entry(&ent->list); kfree(ent); } return rc; } segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; blk_queue_max_segments(info->rq, segs); bio_list_init(&bio_list); INIT_LIST_HEAD(&requests); /* Recover stage 3: Re-queue pending requests. */ while (!list_empty(&info->resume_list) && !RING_FULL(&info->ring)) { /* Grab a request slot and copy shadow state into it. */ ent = list_first_entry(&info->resume_list, struct blk_resume_entry, list); merge_bio.head = ent->copy.request->bio; merge_bio.tail = ent->copy.request->biotail; bio_list_merge(&bio_list, &merge_bio); ent->copy.request->bio = NULL; //blk_put_request(ent->copy.request); blk_end_request_all(ent->copy.request, 0); __list_del_entry(&ent->list); kfree(ent); } /* * Empty the queue, this is important because we might have * requests in the queue with more segments than what we * can handle now. */ spin_lock_irq(&info->io_lock); while ((req = blk_fetch_request(info->rq)) != NULL) { merge_bio.head = req->bio; merge_bio.tail = req->biotail; bio_list_merge(&bio_list, &merge_bio); req->bio = NULL; if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) pr_alert("diskcache flush request found!\n"); __blk_end_request_all(req, 0); } spin_unlock_irq(&info->io_lock); (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&info->io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Kick any other new requests queued since we resumed */ if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } list_for_each_entry_safe(req, n, &requests, queuelist) { /* Requeue pending requests (flush or discard) */ list_del_init(&req->queuelist); BUG_ON(req->nr_phys_segments > segs); blk_requeue_request(info->rq, req); } spin_unlock_irq(&info->io_lock); while ((bio = bio_list_pop(&bio_list)) != NULL) { /* Traverse the list of pending bios and re-queue them */ if (bio_segments(bio) > segs) { /* * This bio has more segments than what we can * handle, we have to split it. */ pending = (bio_segments(bio) + segs - 1) / segs; split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO); BUG_ON(split_bio == NULL); atomic_set(&split_bio->pending, pending); split_bio->bio = bio; for (i = 0; i < pending; i++) { offset = (i * segs * PAGE_SIZE) >> 9; size = min((unsigned int)(segs * PAGE_SIZE) >> 9, (unsigned int)(bio->bi_size >> 9) - offset); cloned_bio = bio_clone(bio, GFP_NOIO); BUG_ON(cloned_bio == NULL); trim_bio(cloned_bio, offset, size); cloned_bio->bi_private = split_bio; cloned_bio->bi_end_io = split_bio_end; submit_bio(cloned_bio->bi_rw, cloned_bio); } /* * Now we have to wait for all those smaller bios to * end, so we can also end the "parent" bio. */ continue; } /* We don't need to split this bio */ submit_bio(bio->bi_rw, bio); } if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_INFO "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } return 0; } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); return info->is_ready && info->xbdev; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static DEFINE_XENBUS_DRIVER(blkfront, , .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, ); static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront_driver); } module_init(xlblk_init); static void __exit xlblk_exit(void) { xenbus_unregister_driver(&blkfront_driver); xlbd_release_major_info(); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.101/block.h000066400000000000000000000150511314037446600274210ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #define DPRINTK_IOCTL(_f, _a...) ((void)0) struct xlbd_major_info { int major; int index; int usage; const struct xlbd_type_info *type; struct xlbd_minor_state *minors; }; struct grant { grant_ref_t gref; unsigned long pfn; struct list_head node; }; struct blk_shadow { blkif_request_t req; struct request *request; struct grant **grants_used; struct grant **indirect_grants; /* If use persistent grant, it will copy response * from grant page to sg when read io completion. */ struct scatterlist *sg; }; struct split_bio { struct bio *bio; atomic_t pending; int err; }; /* * Maximum number of segments in indirect requests, the actual value used by * the frontend driver is the minimum of this value and the value provided * by the backend driver. */ static unsigned int xen_blkif_max_segments = 256; //module_param_named(max_seg, xen_blkif_max_segments, int, S_IRUGO); //MODULE_PARM_DESC(max_seg, "Maximum amount of segments in indirect requests (default is 256 (1M))"); #define BLK_MAX_RING_PAGE_ORDER 4U #define BLK_MAX_RING_PAGES (1U << BLK_MAX_RING_PAGE_ORDER) #define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, \ BLK_MAX_RING_PAGES * PAGE_SIZE) #define SEGS_PER_INDIRECT_FRAME \ (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) #define INDIRECT_GREFS(_segs) \ ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; struct gendisk *gd; struct mutex mutex; int vdevice; blkif_vdev_t handle; int connected; unsigned int ring_size; blkif_front_ring_t ring; spinlock_t io_lock; unsigned int irq; struct xlbd_major_info *mi; struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_MAX_RING_SIZE]; struct list_head resume_list; grant_ref_t ring_refs[BLK_MAX_RING_PAGES]; struct page *ring_pages[BLK_MAX_RING_PAGES]; struct list_head grants; struct list_head indirect_pages; unsigned int max_indirect_segments; unsigned long shadow_free; unsigned int feature_flush; unsigned int flush_op; bool feature_discard; bool feature_secdiscard; unsigned int persistent_gnts_c; unsigned int feature_persistent:1; unsigned int discard_granularity; unsigned int discard_alignment; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); #else extern int blkif_open(struct block_device *bdev, fmode_t mode); extern int blkif_release(struct gendisk *disk, fmode_t mode); extern int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument); #endif extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (struct request_queue *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); void xlvbd_flush(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif void xlbd_release_major_info(void); /* Virtual cdrom block-device */ extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.101/vbd.c000066400000000000000000000372111314037446600270770ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #include #include /* ssleep prototype */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) #define XENVBD_MAJOR 202 #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; struct xlbd_minor_state *minors; int do_register; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; minors = kmalloc(sizeof(*minors), GFP_KERNEL); if (minors == NULL) { kfree(ptr); return NULL; } minors->bitmap = kzalloc(BITS_TO_LONGS(256) * sizeof(*minors->bitmap), GFP_KERNEL); if (minors->bitmap == NULL) { kfree(minors); kfree(ptr); return NULL; } spin_lock_init(&minors->lock); minors->nr = 256; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SD_RANGE: ptr->type = &xlbd_sd_type; ptr->index = index - XLBD_MAJOR_SD_START; break; case XLBD_MAJOR_SR_RANGE: ptr->type = &xlbd_sr_type; ptr->index = index - XLBD_MAJOR_SR_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major XENVBD_MAJOR, * don't try to register it again */ if (major_info[XLBD_MAJOR_VBD_ALT(index)] != NULL) { kfree(minors->bitmap); kfree(minors); minors = major_info[XLBD_MAJOR_VBD_ALT(index)]->minors; do_register = 0; } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(minors->bitmap); kfree(minors); kfree(ptr); return NULL; } pr_info("xen-vbd: registered block device major %i\n", ptr->major); } ptr->minors = minors; major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = XLBD_MAJOR_SD_START; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = XLBD_MAJOR_SD_START + 1 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = XLBD_MAJOR_SD_START + 8 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = XLBD_MAJOR_SR_START; break; case XENVBD_MAJOR: index = XLBD_MAJOR_VBD_START + !!VDEV_IS_EXTENDED(vdevice); break; default: return NULL; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } void __exit xlbd_release_major_info(void) { unsigned int i; int vbd_done = 0; for (i = 0; i < ARRAY_SIZE(major_info); ++i) { struct xlbd_major_info *mi = major_info[i]; if (!mi) continue; if (mi->usage) pr_warning("vbd: major %u still in use (%u times)\n", mi->major, mi->usage); if (mi->major != XENVBD_MAJOR || !vbd_done) { unregister_blkdev(mi->major, mi->type->devname); kfree(mi->minors->bitmap); kfree(mi->minors); } if (mi->major == XENVBD_MAJOR) vbd_done = 1; kfree(mi); } } static int xlbd_reserve_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; unsigned int end = minor + nr_minors; int rc; if (end > ms->nr) { unsigned long *bitmap, *old; bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap), GFP_KERNEL); if (bitmap == NULL) return -ENOMEM; spin_lock(&ms->lock); if (end > ms->nr) { old = ms->bitmap; memcpy(bitmap, ms->bitmap, BITS_TO_LONGS(ms->nr) * sizeof(*bitmap)); ms->bitmap = bitmap; ms->nr = BITS_TO_LONGS(end) * BITS_PER_LONG; } else old = bitmap; spin_unlock(&ms->lock); kfree(old); } spin_lock(&ms->lock); if (find_next_bit(ms->bitmap, end, minor) >= end) { bitmap_set(ms->bitmap, minor, nr_minors); rc = 0; } else rc = -EBUSY; spin_unlock(&ms->lock); return rc; } static void xlbd_release_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; BUG_ON(minor + nr_minors > ms->nr); spin_lock(&ms->lock); bitmap_clear(ms->bitmap, minor, nr_minors); spin_unlock(&ms->lock); } static char *encode_disk_name(char *ptr, unsigned int n) { if (n >= 26) ptr = encode_disk_name(ptr, n / 26 - 1); *ptr = 'a' + n % 26; return ptr + 1; } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, struct blkfront_info *info) { struct request_queue *rq; unsigned int segments = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; rq = blk_init_queue(do_blkif_request, &info->io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); #endif if (info->feature_discard) { queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); blk_queue_max_discard_sectors(rq, get_capacity(gd)); rq->limits.discard_granularity = info->discard_granularity; rq->limits.discard_alignment = info->discard_alignment; if (info->feature_secdiscard) queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq); } /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_logical_block_size(rq, sector_size); blk_queue_max_hw_sectors(rq, segments*8); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_segments(rq, segments); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; info->rq = rq; return 0; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { int major, minor; struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; char *ptr; unsigned int offset; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ pr_warning("blkfront: vdevice %#x is above the extended range;" " ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = XENVBD_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); if (minor >> MINORBITS) { pr_warning("blkfront: %#x's minor (%#x) out of range;" " ignoring\n", vdevice, minor); return -ENODEV; } } BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if ((vdisk_info & VDISK_CDROM) || !(minor & ((1 << mi->type->partn_shift) - 1))) nr_minors = 1 << mi->type->partn_shift; err = xlbd_reserve_minors(mi, minor & ~(nr_minors - 1), nr_minors); if (err) goto out; err = -ENODEV; gd = alloc_disk(vdisk_info & VDISK_CDROM ? 1 : nr_minors); if (gd == NULL) goto release; strcpy(gd->disk_name, mi->type->diskname); ptr = gd->disk_name + strlen(mi->type->diskname); offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (mi->type->partn_shift) { ptr = encode_disk_name(ptr, offset); offset = minor & ((1 << mi->type->partn_shift) - 1); } else gd->flags |= GENHD_FL_CD; BUG_ON(ptr >= gd->disk_name + ARRAY_SIZE(gd->disk_name)); if (nr_minors > 1) *ptr = 0; else snprintf(ptr, gd->disk_name + ARRAY_SIZE(gd->disk_name) - ptr, "%u", offset); gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size, info)) { del_gendisk(gd); goto release; } info->gd = gd; xlvbd_flush(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; release: xlbd_release_minors(mi, minor, nr_minors); out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } void xlvbd_del(struct blkfront_info *info) { unsigned int minor, nr_minors; unsigned long flags; if (info->mi == NULL) return; BUG_ON(info->gd == NULL); minor = info->gd->first_minor; nr_minors = (info->gd->flags & GENHD_FL_CD) || !(minor & ((1 << info->mi->type->partn_shift) - 1)) ? 1 << info->mi->type->partn_shift : 1; del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_release_minors(info->mi, minor & ~(nr_minors - 1), nr_minors); xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); spin_lock_irqsave(&info->io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); spin_unlock_irqrestore(&info->io_lock, flags); while (!list_empty(&info->rq->queue_head)){ ssleep(1); } blk_cleanup_queue(info->rq); info->rq = NULL; } void xlvbd_flush(struct blkfront_info *info) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) blk_queue_flush(info->rq, info->feature_flush); pr_info("blkfront: %s: %s: %s; %s %s %s %s\n", info->gd->disk_name, info->flush_op == BLKIF_OP_WRITE_BARRIER ? "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? "flush diskcache" : "barrier or flush"), info->feature_flush ? "enabled" : "disabled", "persistent grants:", info->feature_persistent ? "enabled;" : "disabled;", "indirect descriptors:", info->max_indirect_segments ? "enabled;" : "disabled;"); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int err; const char *barrier; switch (info->feature_flush) { case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; case QUEUE_ORDERED_NONE: barrier = "disabled"; break; default: return -EINVAL; } err = blk_queue_ordered(info->rq, info->feature_flush); if (err) return err; pr_info("blkfront: %s: barriers %s\n", info->gd->disk_name, barrier); #else if (info->feature_flush) pr_info("blkfront: %s: barriers disabled\n", info->gd->disk_name); #endif } #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = dev_get_drvdata(&xendev->dev); if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.101/vcd.c000066400000000000000000000306721314037446600271040ustar00rootroot00000000000000/******************************************************************************* * vcd.c * * Implements CDROM cmd packet passing between frontend guest and backend driver. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include "block.h" /* List of cdrom_device_info, can have as many as blkfront supports */ struct vcd_disk { struct list_head vcd_entry; struct cdrom_device_info vcd_cdrom_info; spinlock_t vcd_cdrom_info_lock; }; static LIST_HEAD(vcd_disks); static DEFINE_SPINLOCK(vcd_disks_lock); static struct vcd_disk *xencdrom_get_list_entry(struct gendisk *disk) { struct vcd_disk *ret_vcd = NULL; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { spin_lock(&vcd->vcd_cdrom_info_lock); ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd; } static void submit_message(struct blkfront_info *info, void *sp) { struct request *req = NULL; req = blk_get_request(info->rq, READ, __GFP_WAIT); if (blk_rq_map_kern(info->rq, req, sp, PAGE_SIZE, __GFP_WAIT)) goto out; req->rq_disk = info->gd; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_NOMERGE; #else req->flags |= REQ_BLOCK_PC; #endif req->__sector = 0; req->cmd_len = 0; req->timeout = 60*HZ; blk_execute_rq(req->q, info->gd, req, 1); out: blk_put_request(req); } static int submit_cdrom_cmd(struct blkfront_info *info, struct packet_command *cgc) { int ret = 0; struct page *page; union xen_block_packet *sp; struct xen_cdrom_packet *xcp; struct vcd_generic_command *vgc; if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) { pr_warn("%s() Packet buffer length is to large \n", __func__); return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcp = &(sp->xcp); xcp->type = XEN_TYPE_CDROM_PACKET; xcp->payload_offset = PACKET_PAYLOAD_OFFSET; vgc = (struct vcd_generic_command *)((char *)sp + xcp->payload_offset); memcpy(vgc->cmd, cgc->cmd, CDROM_PACKET_SIZE); vgc->stat = cgc->stat; vgc->data_direction = cgc->data_direction; vgc->quiet = cgc->quiet; vgc->timeout = cgc->timeout; if (cgc->sense) { vgc->sense_offset = PACKET_SENSE_OFFSET; memcpy((char *)sp + vgc->sense_offset, cgc->sense, sizeof(struct request_sense)); } if (cgc->buffer) { vgc->buffer_offset = PACKET_BUFFER_OFFSET; memcpy((char *)sp + vgc->buffer_offset, cgc->buffer, cgc->buflen); vgc->buflen = cgc->buflen; } submit_message(info,sp); if (xcp->ret) ret = xcp->err; if (cgc->sense) memcpy(cgc->sense, (char *)sp + PACKET_SENSE_OFFSET, sizeof(struct request_sense)); if (cgc->buffer && cgc->buflen) memcpy(cgc->buffer, (char *)sp + PACKET_BUFFER_OFFSET, cgc->buflen); __free_page(page); return ret; } static int xencdrom_open(struct cdrom_device_info *cdi, int purpose) { int ret = 0; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_open *xco; info = cdi->disk->private_data; if (!info->xbdev) return -ENODEV; if (strlen(info->xbdev->otherend) > MAX_PACKET_DATA) { return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xco = &(sp->xco); xco->type = XEN_TYPE_CDROM_OPEN; xco->payload_offset = sizeof(struct xen_cdrom_open); strcpy((char *)sp + xco->payload_offset, info->xbdev->otherend); submit_message(info,sp); if (xco->ret) { ret = xco->err; goto out; } if (xco->media_present) set_capacity(cdi->disk, xco->sectors); out: __free_page(page); return ret; } static void xencdrom_release(struct cdrom_device_info *cdi) { } static int xencdrom_media_changed(struct cdrom_device_info *cdi, int disc_nr) { int ret; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_media_changed *xcmc; info = cdi->disk->private_data; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcmc = &(sp->xcmc); xcmc->type = XEN_TYPE_CDROM_MEDIA_CHANGED; submit_message(info,sp); ret = xcmc->media_changed; __free_page(page); return ret; } static int xencdrom_tray_move(struct cdrom_device_info *cdi, int position) { struct packet_command cgc; struct blkfront_info *info; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_START_STOP_UNIT; if (position) cgc.cmd[4] = 2; else cgc.cmd[4] = 3; return submit_cdrom_cmd(info, &cgc); } static int xencdrom_lock_door(struct cdrom_device_info *cdi, int lock) { struct blkfront_info *info; struct packet_command cgc; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; cgc.cmd[4] = lock; return submit_cdrom_cmd(info, &cgc); } static int xencdrom_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { return cgc->stat = submit_cdrom_cmd(cdi->disk->private_data, cgc); } static int xencdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { return -EINVAL; } /* Query backend to see if CDROM packets are supported */ static int xencdrom_supported(struct blkfront_info *info) { struct page *page; union xen_block_packet *sp; struct xen_cdrom_support *xcs; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcs = &(sp->xcs); xcs->type = XEN_TYPE_CDROM_SUPPORT; submit_message(info,sp); return xcs->supported; } static struct cdrom_device_ops xencdrom_dops = { .open = xencdrom_open, .release = xencdrom_release, .media_changed = xencdrom_media_changed, .tray_move = xencdrom_tray_move, .lock_door = xencdrom_lock_door, .generic_packet = xencdrom_packet, .audio_ioctl = xencdrom_audio_ioctl, .capability = (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | \ CDC_MEDIA_CHANGED | CDC_GENERIC_PACKET | CDC_DVD | \ CDC_CD_R), .n_minors = 1, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_open(struct inode *inode, struct file *file) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!info->xbdev) return -ENODEV; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_open(&vcd->vcd_cdrom_info, inode, file); #else ret = cdrom_open(&vcd->vcd_cdrom_info, bd, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_release(struct inode *inode, struct file *file) { struct gendisk *gd = inode->i_bdev->bd_disk; #else static int xencdrom_block_release(struct gendisk *gd, fmode_t mode) { #endif struct blkfront_info *info = gd->private_data; struct vcd_disk *vcd; int ret = 0; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_release(&vcd->vcd_cdrom_info, file); #else cdrom_release(&vcd->vcd_cdrom_info, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); if (vcd->vcd_cdrom_info.use_count == 0) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) blkif_release(inode, file); #else blkif_release(gd, mode); #endif } } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_ioctl(struct block_device *bd, fmode_t mode, unsigned cmd, unsigned long arg) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!(vcd = xencdrom_get_list_entry(info->gd))) goto out; switch (cmd) { case 2285: /* SG_IO */ ret = -ENOSYS; break; case CDROMEJECT: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 1); break; case CDROMCLOSETRAY: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 0); break; case CDROM_GET_CAPABILITY: ret = vcd->vcd_cdrom_info.ops->capability & ~vcd->vcd_cdrom_info.mask; break; case CDROM_SET_OPTIONS: ret = vcd->vcd_cdrom_info.options; break; case CDROM_SEND_PACKET: { struct packet_command cgc; ret = copy_from_user(&cgc, (void __user *)arg, sizeof(cgc)) ? -EFAULT : submit_cdrom_cmd(info, &cgc); break; } default: spin_unlock(&vcd->vcd_cdrom_info_lock); out: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return blkif_ioctl(inode, file, cmd, arg); #else return blkif_ioctl(bd, mode, cmd, arg); #endif } spin_unlock(&vcd->vcd_cdrom_info_lock); return ret; } /* Called as result of cdrom_open, vcd_cdrom_info_lock already held */ static int xencdrom_block_media_changed(struct gendisk *disk) { struct vcd_disk *vcd; struct vcd_disk *ret_vcd = NULL; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd ? cdrom_media_changed(&ret_vcd->vcd_cdrom_info) : 0; } static const struct block_device_operations xencdrom_bdops = { .owner = THIS_MODULE, .open = xencdrom_block_open, .release = xencdrom_block_release, .ioctl = xencdrom_block_ioctl, .media_changed = xencdrom_block_media_changed, }; void register_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; /* Make sure this is for a CD device */ if (!(gd->flags & GENHD_FL_CD)) goto out; /* Make sure we have backend support */ if (!xencdrom_supported(info)) goto out; /* Create new vcd_disk and fill in cdrom_info */ vcd = kzalloc(sizeof(*vcd), GFP_KERNEL); if (!vcd) { pr_info("%s(): Unable to allocate vcd struct!\n", __func__); goto out; } spin_lock_init(&vcd->vcd_cdrom_info_lock); vcd->vcd_cdrom_info.ops = &xencdrom_dops; vcd->vcd_cdrom_info.speed = 4; vcd->vcd_cdrom_info.capacity = 1; vcd->vcd_cdrom_info.options = 0; strlcpy(vcd->vcd_cdrom_info.name, gd->disk_name, ARRAY_SIZE(vcd->vcd_cdrom_info.name)); vcd->vcd_cdrom_info.mask = (CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_SELECT_SPEED | CDC_MRW | CDC_MRW_W | CDC_RAM); if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) { pr_warn("%s() Cannot register blkdev as a cdrom %d!\n", __func__, gd->major); goto err_out; } gd->fops = &xencdrom_bdops; vcd->vcd_cdrom_info.disk = gd; spin_lock(&vcd_disks_lock); list_add(&(vcd->vcd_entry), &vcd_disks); spin_unlock(&vcd_disks_lock); out: return; err_out: kfree(vcd); } void unregister_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == gd) { spin_lock(&vcd->vcd_cdrom_info_lock); unregister_cdrom(&vcd->vcd_cdrom_info); list_del(&vcd->vcd_entry); spin_unlock(&vcd->vcd_cdrom_info_lock); kfree(vcd); break; } } spin_unlock(&vcd_disks_lock); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.13/000077500000000000000000000000001314037446600260765ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.13/blkfront.c000066400000000000000000000724551314037446600301000ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #include /* ssleep prototype */ #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define UNPLUG_DISK_TMOUT 60 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 static void connect(struct blkfront_info *); static void blkfront_closing(struct blkfront_info *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(struct work_struct *arg); static int blkif_recover(struct blkfront_info *); static void blkif_completion(struct blk_shadow *); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice, i; struct blkfront_info *info; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); printk(KERN_INFO "blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } info->xbdev = dev; info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); err = talk_to_backend(dev, info); if (err) { kfree(info); dev_set_drvdata(&dev->dev, NULL); return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); int err; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); err = talk_to_backend(dev, info); if (info->connected == BLKIF_STATE_SUSPENDED && !err) err = blkif_recover(info); return err; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { const char *message = NULL; struct xenbus_transaction xbt; int err; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } err = xenbus_printf(xbt, dev->nodename, "ring-ref","%u", info->ring_ref); if (err) { message = "writing ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { message = "writing protocol"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (message) xenbus_dev_fatal(dev, err, "%s", message); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; info->ring_ref = GRANT_INVALID_REF; sring = (blkif_sring_t *)__get_free_page(GFP_NOIO | __GFP_HIGH); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); if (err < 0) { free_page((unsigned long)sring); info->ring.sring = NULL; goto fail; } info->ring_ref = err; err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, IRQF_SAMPLE_RANDOM, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } static int xenwatch_unplugdisk_callback(void *data) { int len; char *devname; int iTimeout = 0; char *bestate; struct block_device *bd; struct xenbus_device *dev = data; struct blkfront_info *info = dev_get_drvdata(&dev->dev); if(!strcmp(info->gd->disk_name, "xvda")) { printk(KERN_ERR "xvda disk is unallowed to unplug!\n"); return 0; } devname = xenbus_read(XBT_NIL, dev->otherend, "dev", &len); if (IS_ERR(devname)){ printk(KERN_ERR "read %s xenstore error!\n", dev->otherend); return 0; } else{ xenbus_write(XBT_NIL, "control/uvp", "unplug-disk", devname); kfree(devname); } while(info && info->users != 0){ if(iTimeout > UNPLUG_DISK_TMOUT) break; printk(KERN_INFO "info->users=%d,ssleep(1),iTimeout=%d!\n", info->users, iTimeout); ssleep(1); iTimeout++; } if(info && !info->users) { printk(KERN_INFO "finish to umount,info->users has changed to 0!\n"); } /* if (!info->gd) { printk(KERN_ERR "unplug_disk, info->gd is NULL\n"); xenbus_frontend_closed_uvp(dev); return 0; } */ bd = bdget_disk(info->gd, 0); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(info); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); bestate = xenbus_read(XBT_NIL, dev->otherend, "state", &len); if (IS_ERR(bestate)){ printk(KERN_ERR "read %s state error!\n", dev->otherend); } else{ if(strncmp(bestate, "5", 1) || iTimeout > UNPLUG_DISK_TMOUT) { kfree(bestate); return 0; } kfree(bestate); } return 0; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); //struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateConnected: connect(info); break; case XenbusStateClosing: kthread_run(xenwatch_unplugdisk_callback, dev, "xenwatch_unplugdisk"); break; } } /* ** Connection ** */ /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err, barrier, flush; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (err != 1) return; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sector-size", "%lu", §or_size); if (err != 1) sector_size = 0; if (sector_size) blk_queue_logical_block_size(info->gd->queue, sector_size); pr_info("Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } info->feature_flush = 0; info->flush_op = 0; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%d", &barrier); /* * If there's no "feature-barrier" defined, then it means * we're dealing with a very old backend which writes * synchronously; nothing to do. * * If there are barriers, then we use flush. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) if (err > 0 && barrier) { info->feature_flush = REQ_FLUSH | REQ_FUA; info->flush_op = BLKIF_OP_WRITE_BARRIER; } /* * And if there is "feature-flush-cache" use that above * barriers. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-flush-cache", "%d", &flush); if (err > 0 && flush) { info->feature_flush = REQ_FLUSH; info->flush_op = BLKIF_OP_FLUSH_DISKCACHE; } #else if (err <= 0) info->feature_flush = QUEUE_ORDERED_DRAIN; else if (barrier) info->feature_flush = QUEUE_ORDERED_TAG; else info->feature_flush = QUEUE_ORDERED_NONE; #endif err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&blkif_io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); add_disk(info->gd); info->is_ready = 1; register_vcd(info); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct blkfront_info *info) { unsigned long flags; DPRINTK("blkfront_closing: %d removed\n", info->vdevice); if (info->rq == NULL) goto out; spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&blkif_io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work_sync(&info->work); xlvbd_sysfs_delif(info); ssleep(2); while(RING_FREE_REQUESTS(&info->ring) != RING_SIZE(&info->ring)) { ssleep(1); } spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_start_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); unregister_vcd(info); xlvbd_del(info); out: if (info->xbdev) xenbus_frontend_closed(info->xbdev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); if(info->users == 0) kfree(info); else info->xbdev = NULL; return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= BLK_RING_SIZE); info->shadow_free = info->shadow[free].req.id; info->shadow[free].req.id = 0x0fffffee; /* debug */ return free; } static inline void ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = NULL; info->shadow_free = id; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(struct work_struct *arg) { struct blkfront_info *info = container_of(arg, struct blkfront_info, work); spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_open(struct inode *inode, struct file *filep) { struct block_device *bd = inode->i_bdev; #else int blkif_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; if (!info->xbdev) return -ENODEV; info->users++; return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_release(struct inode *inode, struct file *filep) { struct gendisk *disk = inode->i_bdev->bd_disk; #else int blkif_release(struct gendisk *disk, fmode_t mode) { #endif struct blkfront_info *info = disk->private_data; info->users--; #if 0 if (info->users == 0) { /* Check whether we have been instructed to close. We will have ignored this request initially, as the device was still mounted. */ struct xenbus_device * dev = info->xbdev; if (!dev) { blkfront_closing(info); kfree(info); } else if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosing && info->is_ready) blkfront_closing(info); } #endif return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { struct block_device *bd = inode->i_bdev; #else int blkif_ioctl(struct block_device *bd, fmode_t mode, unsigned command, unsigned long argument) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: if (info->gd && (info->gd->flags & GENHD_FL_CD)) return 0; return -EINVAL; default: if (info->mi && info->gd && info->rq) { switch (info->mi->major) { case SCSI_DISK0_MAJOR: case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: case SCSI_CDROM_MAJOR: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) return scsi_cmd_ioctl(filep, info->gd, command, (void __user *)argument); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return scsi_cmd_ioctl(filep, info->rq, info->gd, command, (void __user *)argument); #else return scsi_cmd_ioctl(info->rq, info->gd, mode, command, (void __user *)argument); #endif } } return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * Generate a Xen blkfront IO request from a blk layer request. Reads * and writes are handled as expected. * * @req: a request struct */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; unsigned long buffer_mfn; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref; grant_ref_t gref_head; struct scatterlist *sg; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; if (gnttab_alloc_grant_references( BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, BLKIF_MAX_SEGMENTS_PER_REQUEST); return 1; } /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) #else if (req->cmd_flags & REQ_HARDBARRIER) #endif ring_req->operation = info->flush_op; if (req->cmd_type == REQ_TYPE_BLOCK_PC) ring_req->operation = BLKIF_OP_PACKET; ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); for_each_sg(info->sg, sg, ring_req->nr_segments, i) { buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; /* install a grant reference. */ ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ? GTF_readonly : 0 ); info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); ring_req->seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = blk_peek_request(rq)) != NULL) { info = req->rq_disk->private_data; if (RING_FULL(&info->ring)) goto wait; blk_start_request(req); if ((req->cmd_type != REQ_TYPE_FS && req->cmd_type != REQ_TYPE_BLOCK_PC) || ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) && !info->flush_op)) { __blk_end_request_all(req, -EIO); continue; } DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%u) buffer:%p [%s]\n", req, req->cmd, (long long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), req->buffer, rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; spin_lock_irqsave(&blkif_io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = info->shadow[id].request; blkif_completion(&info->shadow[id]); ADD_ID_TO_FREELIST(info, id); ret = bret->status == BLKIF_RSP_OKAY ? 0 : -EIO; switch (bret->operation) { const char *what; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: what = bret->operation == BLKIF_OP_WRITE_BARRIER ? "write barrier" : "flush disk cache"; if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { pr_warn("blkfront: %s: %s op failed\n", what, info->gd->disk_name); ret = -EOPNOTSUPP; } if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.nr_segments == 0)) { pr_warn("blkfront: %s: empty %s op failed\n", what, info->gd->disk_name); ret = -EOPNOTSUPP; } if (unlikely(ret)) { if (ret == -EOPNOTSUPP) ret = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) info->feature_flush = 0; #else info->feature_flush = QUEUE_ORDERED_NONE; #endif xlvbd_flush(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_PACKET: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev data " "request: %x\n", bret->status); __blk_end_request_all(req, ret); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&blkif_io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work_sync(&info->work); /* Free resources associated with old device channel. */ if (info->ring_ref != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref, (unsigned long)info->ring.sring); info->ring_ref = GRANT_INVALID_REF; info->ring.sring = NULL; } if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s) { int i; for (i = 0; i < s->req.nr_segments; i++) gnttab_end_foreign_access(s->req.seg[i].gref, 0UL); } static int blkif_recover(struct blkfront_info *info) { int i; blkif_request_t *req; struct blk_shadow *copy; int j; /* Stage 1: Make a safe copy of the shadow state. */ copy = kmemdup(info->shadow, sizeof(info->shadow), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); if (!copy) return -ENOMEM; /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); for (i = 0; i < BLK_RING_SIZE; i++) info->shadow[i].req.id = i+1; info->shadow_free = info->ring.req_prod_pvt; info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ if (!copy[i].request) continue; /* Grab a request slot and copy shadow state into it. */ req = RING_GET_REQUEST( &info->ring, info->ring.req_prod_pvt); *req = copy[i].req; /* We get a new request id, and must reset the shadow state. */ req->id = GET_ID_FROM_FREELIST(info); memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i])); /* Rewrite any grant references invalidated by susp/resume. */ for (j = 0; j < req->nr_segments; j++) gnttab_grant_foreign_access_ref( req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), rq_data_dir(info->shadow[req->id].request) ? GTF_readonly : 0); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; } kfree(copy); (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&blkif_io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Send off requeued requests */ flush_requests(info); /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } return 0; } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); return info->is_ready && info->xbdev; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static struct xenbus_driver blkfront = { .name = "vbd", .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, }; static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront); } module_init(xlblk_init); static void __exit xlblk_exit(void) { return xenbus_unregister_driver(&blkfront); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.13/block.h000066400000000000000000000123771314037446600273530ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #if 0 #define DPRINTK_IOCTL(_f, _a...) pr_alert(_f, ## _a) #else #define DPRINTK_IOCTL(_f, _a...) ((void)0) #endif struct xlbd_type_info { int partn_shift; int disks_per_major; char *devname; char *diskname; }; struct xlbd_major_info { int major; int index; int usage; struct xlbd_type_info *type; struct xlbd_minor_state *minors; }; struct blk_shadow { blkif_request_t req; struct request *request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; struct gendisk *gd; int vdevice; blkif_vdev_t handle; int connected; int ring_ref; blkif_front_ring_t ring; struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int irq; struct xlbd_major_info *mi; struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; unsigned long shadow_free; unsigned int feature_flush; unsigned int flush_op; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; extern spinlock_t blkif_io_lock; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); #else extern int blkif_open(struct block_device *bdev, fmode_t mode); extern int blkif_release(struct gendisk *disk, fmode_t mode); extern int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument); #endif extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (struct request_queue *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); void xlvbd_flush(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif /* Virtual cdrom block-device */ extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.13/vbd.c000066400000000000000000000332151314037446600270210ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #include /* ssleep prototype */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; DEFINE_SPINLOCK(blkif_io_lock); static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; struct xlbd_minor_state *minors; int do_register; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; minors = kmalloc(sizeof(*minors), GFP_KERNEL); if (minors == NULL) { kfree(ptr); return NULL; } minors->bitmap = kzalloc(BITS_TO_LONGS(256) * sizeof(*minors->bitmap), GFP_KERNEL); if (minors->bitmap == NULL) { kfree(minors); kfree(ptr); return NULL; } spin_lock_init(&minors->lock); minors->nr = 256; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SCSI_RANGE: ptr->type = &xlbd_scsi_type; ptr->index = index - XLBD_MAJOR_SCSI_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major 202, * don't try to register it again */ if (major_info[XLBD_MAJOR_VBD_ALT(index)] != NULL) { kfree(minors->bitmap); kfree(minors); minors = major_info[XLBD_MAJOR_VBD_ALT(index)]->minors; do_register = 0; } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(minors->bitmap); kfree(minors); kfree(ptr); return NULL; } pr_info("xen-vbd: registered block device major %i\n", ptr->major); } ptr->minors = minors; major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = 10; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = 11 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = 18 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = 26; break; default: if (!VDEV_IS_EXTENDED(vdevice)) index = 27; else index = 28; break; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } static int xlbd_reserve_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; unsigned int end = minor + nr_minors; int rc; if (end > ms->nr) { unsigned long *bitmap, *old; bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap), GFP_KERNEL); if (bitmap == NULL) return -ENOMEM; spin_lock(&ms->lock); if (end > ms->nr) { old = ms->bitmap; memcpy(bitmap, ms->bitmap, BITS_TO_LONGS(ms->nr) * sizeof(*bitmap)); ms->bitmap = bitmap; ms->nr = BITS_TO_LONGS(end) * BITS_PER_LONG; } else old = bitmap; spin_unlock(&ms->lock); kfree(old); } spin_lock(&ms->lock); if (find_next_bit(ms->bitmap, end, minor) >= end) { for (; minor < end; ++minor) __set_bit(minor, ms->bitmap); rc = 0; } else rc = -EBUSY; spin_unlock(&ms->lock); return rc; } static void xlbd_release_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; unsigned int end = minor + nr_minors; BUG_ON(end > ms->nr); spin_lock(&ms->lock); for (; minor < end; ++minor) __clear_bit(minor, ms->bitmap); spin_unlock(&ms->lock); } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) { struct request_queue *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); #endif /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_logical_block_size(rq, sector_size); blk_queue_max_hw_sectors(rq, 512); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; return 0; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { int major, minor; struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; unsigned int offset; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ pr_warning("blkfront: vdevice %#x is above the extended range;" " ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = 202; minor = BLKIF_MINOR_EXT(vdevice); } BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if (!(vdisk_info & VDISK_CDROM) && (minor & ((1 << mi->type->partn_shift) - 1)) == 0) nr_minors = 1 << mi->type->partn_shift; err = xlbd_reserve_minors(mi, minor, nr_minors); if (err) goto out; err = -ENODEV; gd = alloc_disk(nr_minors); if (gd == NULL) goto release; offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (nr_minors > 1 || (vdisk_info & VDISK_CDROM)) { if (offset < 26) { sprintf(gd->disk_name, "%s%c", mi->type->diskname, 'a' + offset ); } else { sprintf(gd->disk_name, "%s%c%c", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26) ); } } else { if (offset < 26) { sprintf(gd->disk_name, "%s%c%d", mi->type->diskname, 'a' + offset, minor & ((1 << mi->type->partn_shift) - 1)); } else { sprintf(gd->disk_name, "%s%c%c%d", mi->type->diskname, 'a' + ((offset/26)-1), 'a' + (offset%26), minor & ((1 << mi->type->partn_shift) - 1)); } } gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size)) { del_gendisk(gd); goto release; } info->rq = gd->queue; info->gd = gd; xlvbd_flush(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; release: xlbd_release_minors(mi, minor, nr_minors); out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } void xlvbd_del(struct blkfront_info *info) { unsigned int minor, nr_minors; unsigned long flags; if (info->mi == NULL) return; BUG_ON(info->gd == NULL); minor = info->gd->first_minor; nr_minors = info->gd->minors; del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_release_minors(info->mi, minor, nr_minors); xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); spin_unlock_irqrestore(&blkif_io_lock, flags); while (!list_empty(&info->rq->queue_head)){ ssleep(1); } blk_cleanup_queue(info->rq); info->rq = NULL; } void xlvbd_flush(struct blkfront_info *info) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) blk_queue_flush(info->rq, info->feature_flush); pr_info("blkfront: %s: %s: %s\n", info->gd->disk_name, info->flush_op == BLKIF_OP_WRITE_BARRIER ? "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? "flush diskcache" : "barrier or flush"), info->feature_flush ? "enabled" : "disabled"); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int err; const char *barrier; switch (info->feature_flush) { case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; case QUEUE_ORDERED_NONE: barrier = "disabled"; break; default: return -EINVAL; } err = blk_queue_ordered(info->rq, info->feature_flush); if (err) return err; pr_info("blkfront: %s: barriers %s\n", info->gd->disk_name, barrier); #else if (info->feature_flush) pr_info("blkfront: %s: barriers disabled\n", info->gd->disk_name); #endif } #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = dev_get_drvdata(&xendev->dev); if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.13/vcd.c000066400000000000000000000311651314037446600270240ustar00rootroot00000000000000/******************************************************************************* * vcd.c * * Implements CDROM cmd packet passing between frontend guest and backend driver. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define REVISION "$Revision: 1.0 $" #include #include #include #include #include #include "block.h" /* List of cdrom_device_info, can have as many as blkfront supports */ struct vcd_disk { struct list_head vcd_entry; struct cdrom_device_info vcd_cdrom_info; spinlock_t vcd_cdrom_info_lock; }; static LIST_HEAD(vcd_disks); static DEFINE_SPINLOCK(vcd_disks_lock); static struct vcd_disk *xencdrom_get_list_entry(struct gendisk *disk) { struct vcd_disk *ret_vcd = NULL; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { spin_lock(&vcd->vcd_cdrom_info_lock); ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd; } static void submit_message(struct blkfront_info *info, void *sp) { struct request *req = NULL; req = blk_get_request(info->rq, READ, __GFP_WAIT); if (blk_rq_map_kern(info->rq, req, sp, PAGE_SIZE, __GFP_WAIT)) goto out; req->rq_disk = info->gd; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_NOMERGE; #else req->flags |= REQ_BLOCK_PC; #endif req->__sector = 0; req->__data_len = PAGE_SIZE; req->timeout = 60*HZ; blk_execute_rq(req->q, info->gd, req, 1); out: blk_put_request(req); } static int submit_cdrom_cmd(struct blkfront_info *info, struct packet_command *cgc) { int ret = 0; struct page *page; size_t size; union xen_block_packet *sp; struct xen_cdrom_packet *xcp; struct vcd_generic_command *vgc; if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) { pr_warn("%s() Packet buffer length is to large \n", __func__); return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } size = PAGE_SIZE; sp = page_address(page); xcp = &(sp->xcp); xcp->type = XEN_TYPE_CDROM_PACKET; xcp->payload_offset = PACKET_PAYLOAD_OFFSET; vgc = (struct vcd_generic_command *)((char *)sp + xcp->payload_offset); memcpy(vgc->cmd, cgc->cmd, CDROM_PACKET_SIZE); vgc->stat = cgc->stat; vgc->data_direction = cgc->data_direction; vgc->quiet = cgc->quiet; vgc->timeout = cgc->timeout; if (cgc->sense) { vgc->sense_offset = PACKET_SENSE_OFFSET; memcpy((char *)sp + vgc->sense_offset, cgc->sense, sizeof(struct request_sense)); } if (cgc->buffer) { vgc->buffer_offset = PACKET_BUFFER_OFFSET; memcpy((char *)sp + vgc->buffer_offset, cgc->buffer, cgc->buflen); vgc->buflen = cgc->buflen; } submit_message(info,sp); if (xcp->ret) ret = xcp->err; if (cgc->sense) { memcpy(cgc->sense, (char *)sp + PACKET_SENSE_OFFSET, sizeof(struct request_sense)); } if (cgc->buffer && cgc->buflen) { memcpy(cgc->buffer, (char *)sp + PACKET_BUFFER_OFFSET, cgc->buflen); } __free_page(page); return ret; } static int xencdrom_open(struct cdrom_device_info *cdi, int purpose) { int ret = 0; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_open *xco; info = cdi->disk->private_data; if (!info->xbdev) return -ENODEV; if (strlen(info->xbdev->otherend) > MAX_PACKET_DATA) { return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xco = &(sp->xco); xco->type = XEN_TYPE_CDROM_OPEN; xco->payload_offset = sizeof(struct xen_cdrom_open); strcpy((char *)sp + xco->payload_offset, info->xbdev->otherend); submit_message(info,sp); if (xco->ret) { ret = xco->err; goto out; } if (xco->media_present) set_capacity(cdi->disk, xco->sectors); out: __free_page(page); return ret; } static void xencdrom_release(struct cdrom_device_info *cdi) { } static int xencdrom_media_changed(struct cdrom_device_info *cdi, int disc_nr) { int ret; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_media_changed *xcmc; info = cdi->disk->private_data; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcmc = &(sp->xcmc); xcmc->type = XEN_TYPE_CDROM_MEDIA_CHANGED; submit_message(info,sp); ret = xcmc->media_changed; __free_page(page); return ret; } static int xencdrom_tray_move(struct cdrom_device_info *cdi, int position) { int ret; struct packet_command cgc; struct blkfront_info *info; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_START_STOP_UNIT; if (position) cgc.cmd[4] = 2; else cgc.cmd[4] = 3; ret = submit_cdrom_cmd(info, &cgc); return ret; } static int xencdrom_lock_door(struct cdrom_device_info *cdi, int lock) { int ret = 0; struct blkfront_info *info; struct packet_command cgc; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; cgc.cmd[4] = lock; ret = submit_cdrom_cmd(info, &cgc); return ret; } static int xencdrom_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { int ret = -EIO; struct blkfront_info *info; info = cdi->disk->private_data; ret = submit_cdrom_cmd(info, cgc); cgc->stat = ret; return ret; } static int xencdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { return -EINVAL; } /* Query backend to see if CDROM packets are supported */ static int xencdrom_supported(struct blkfront_info *info) { struct page *page; union xen_block_packet *sp; struct xen_cdrom_support *xcs; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcs = &(sp->xcs); xcs->type = XEN_TYPE_CDROM_SUPPORT; submit_message(info,sp); return xcs->supported; } static struct cdrom_device_ops xencdrom_dops = { .open = xencdrom_open, .release = xencdrom_release, .media_changed = xencdrom_media_changed, .tray_move = xencdrom_tray_move, .lock_door = xencdrom_lock_door, .generic_packet = xencdrom_packet, .audio_ioctl = xencdrom_audio_ioctl, .capability = (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | \ CDC_MEDIA_CHANGED | CDC_GENERIC_PACKET | CDC_DVD | \ CDC_CD_R), .n_minors = 1, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_open(struct inode *inode, struct file *file) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!info->xbdev) return -ENODEV; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_open(&vcd->vcd_cdrom_info, inode, file); #else ret = cdrom_open(&vcd->vcd_cdrom_info, bd, mode); #endif info->users = vcd->vcd_cdrom_info.use_count; spin_unlock(&vcd->vcd_cdrom_info_lock); } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_release(struct inode *inode, struct file *file) { struct gendisk *gd = inode->i_bdev->bd_disk; #else static int xencdrom_block_release(struct gendisk *gd, fmode_t mode) { #endif struct blkfront_info *info = gd->private_data; struct vcd_disk *vcd; int ret = 0; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_release(&vcd->vcd_cdrom_info, file); #else cdrom_release(&vcd->vcd_cdrom_info, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); if (vcd->vcd_cdrom_info.use_count == 0) { info->users = 1; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) blkif_release(inode, file); #else blkif_release(gd, mode); #endif } } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_ioctl(struct block_device *bd, fmode_t mode, unsigned cmd, unsigned long arg) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!(vcd = xencdrom_get_list_entry(info->gd))) goto out; switch (cmd) { case 2285: /* SG_IO */ ret = -ENOSYS; break; case CDROMEJECT: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 1); break; case CDROMCLOSETRAY: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 0); break; case CDROM_GET_CAPABILITY: ret = vcd->vcd_cdrom_info.ops->capability & ~vcd->vcd_cdrom_info.mask; break; case CDROM_SET_OPTIONS: ret = vcd->vcd_cdrom_info.options; break; case CDROM_SEND_PACKET: ret = submit_cdrom_cmd(info, (struct packet_command *)arg); break; default: spin_unlock(&vcd->vcd_cdrom_info_lock); out: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return blkif_ioctl(inode, file, cmd, arg); #else return blkif_ioctl(bd, mode, cmd, arg); #endif } spin_unlock(&vcd->vcd_cdrom_info_lock); return ret; } /* Called as result of cdrom_open, vcd_cdrom_info_lock already held */ static int xencdrom_block_media_changed(struct gendisk *disk) { struct vcd_disk *vcd; struct vcd_disk *ret_vcd = NULL; int ret = 0; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); if (ret_vcd) { ret = cdrom_media_changed(&ret_vcd->vcd_cdrom_info); } return ret; } static const struct block_device_operations xencdrom_bdops = { .owner = THIS_MODULE, .open = xencdrom_block_open, .release = xencdrom_block_release, .ioctl = xencdrom_block_ioctl, .media_changed = xencdrom_block_media_changed, }; void register_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; /* Make sure this is for a CD device */ if (!(gd->flags & GENHD_FL_CD)) goto out; /* Make sure we have backend support */ if (!xencdrom_supported(info)) { goto out; } /* Create new vcd_disk and fill in cdrom_info */ vcd = kzalloc(sizeof(*vcd), GFP_KERNEL); if (!vcd) { pr_info("%s(): Unable to allocate vcd struct!\n", __func__); goto out; } spin_lock_init(&vcd->vcd_cdrom_info_lock); vcd->vcd_cdrom_info.ops = &xencdrom_dops; vcd->vcd_cdrom_info.speed = 4; vcd->vcd_cdrom_info.capacity = 1; vcd->vcd_cdrom_info.options = 0; strcpy(vcd->vcd_cdrom_info.name, gd->disk_name); vcd->vcd_cdrom_info.mask = (CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_SELECT_SPEED | CDC_MRW | CDC_MRW_W | CDC_RAM); if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) { pr_warn("%s() Cannot register blkdev as a cdrom %d!\n", __func__, gd->major); goto err_out; } gd->fops = &xencdrom_bdops; vcd->vcd_cdrom_info.disk = gd; spin_lock(&vcd_disks_lock); list_add(&(vcd->vcd_entry), &vcd_disks); spin_unlock(&vcd_disks_lock); out: return; err_out: kfree(vcd); } void unregister_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == gd) { spin_lock(&vcd->vcd_cdrom_info_lock); unregister_cdrom(&vcd->vcd_cdrom_info); list_del(&vcd->vcd_entry); spin_unlock(&vcd->vcd_cdrom_info_lock); kfree(vcd); break; } } spin_unlock(&vcd_disks_lock); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.76/000077500000000000000000000000001314037446600261075ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.76/blkfront.c000066400000000000000000001621051314037446600301010ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* ssleep prototype */ #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct blk_resume_entry { struct list_head list; struct blk_shadow copy; }; #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define UNPLUG_DISK_TMOUT 60 static void connect(struct blkfront_info *); static void blkfront_closing(struct blkfront_info *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(struct work_struct *arg); static int blkif_recover(struct blkfront_info *, unsigned int old_ring_size, unsigned int new_ring_size); static void blkif_completion(struct blk_shadow *, struct blkfront_info *info, struct blkif_response *bret); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); static int blkfront_setup_indirect(struct blkfront_info *info); static int fill_grant_buffer(struct blkfront_info *info, int num) { struct page *granted_page; struct grant *gnt_list_entry, *n; int i = 0; while (i < num) { gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); if (!gnt_list_entry) goto out_of_memory; /* If frontend uses feature_persistent, it will preallocate * (256 + 1) * 256 pages, almost 257M. */ if (info->feature_persistent) { granted_page = alloc_page(GFP_NOIO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; } gnt_list_entry->pfn = page_to_pfn(granted_page); } gnt_list_entry->gref = GRANT_INVALID_REF; list_add(&gnt_list_entry->node, &info->grants); i++; } return 0; out_of_memory: list_for_each_entry_safe(gnt_list_entry, n, &info->grants, node) { list_del(&gnt_list_entry->node); if (info->feature_persistent) __free_page(pfn_to_page(gnt_list_entry->pfn)); kfree(gnt_list_entry); i--; } BUG_ON(i != 0); return -ENOMEM; } static struct grant *get_grant(grant_ref_t *gref_head, unsigned long pfn, struct blkfront_info *info) { struct grant *gnt_list_entry; unsigned long buffer_mfn; BUG_ON(list_empty(&info->grants)); gnt_list_entry = list_first_entry(&info->grants, struct grant, node); list_del(&gnt_list_entry->node); /* for persistent grant */ if (gnt_list_entry->gref != GRANT_INVALID_REF) { info->persistent_gnts_c--; return gnt_list_entry; } /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); if (!info->feature_persistent) { BUG_ON(!pfn); gnt_list_entry->pfn = pfn; } buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); gnttab_grant_foreign_access_ref(gnt_list_entry->gref, info->xbdev->otherend_id, buffer_mfn, 0); return gnt_list_entry; } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice; struct blkfront_info *info; enum xenbus_state backend_state; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); pr_notice("blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } spin_lock_init(&info->io_lock); mutex_init(&info->mutex); INIT_LIST_HEAD(&info->grants); INIT_LIST_HEAD(&info->indirect_pages); info->xbdev = dev; info->vdevice = vdevice; info->persistent_gnts_c = 0; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); INIT_LIST_HEAD(&info->resume_list); /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); backend_state = xenbus_read_driver_state(dev->otherend); /* * XenbusStateInitWait would be the correct state to enter here, * but (at least) blkback considers this a fatal error. */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_backend(dev, info); if (err) { kfree(info); dev_set_drvdata(&dev->dev, NULL); return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); enum xenbus_state backend_state; int i; int err; //printk("[INTO]----->blkfront_resume: %s\n", dev->nodename); for (i=0; iring_size; i++) { if (gnttab_query_foreign_access(info->ring_refs[i])) return 0; } blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); backend_state = xenbus_read_driver_state(dev->otherend); /* See respective comment in blkfront_probe(). */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; //printk("[EXIT]<-----blkfront_resume: %s\n", dev->nodename); err = talk_to_backend(dev, info); return err; } static void shadow_init(struct blk_shadow *shadow, unsigned int ring_size) { unsigned int i = 0; WARN_ON(!ring_size); while (++i < ring_size) shadow[i - 1].req.u.rw.id = i; shadow[i - 1].req.u.rw.id = 0x0fffffff; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { unsigned int ring_size, ring_order; unsigned int old_ring_size = RING_SIZE(&info->ring); const char *what = NULL; struct xenbus_transaction xbt; int err; if (dev->state >= XenbusStateInitialised) return 0; err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-pages", "%u", &ring_size); if (err != 1) ring_size = 0; else if (!ring_size) pr_warn("blkfront: %s: zero max-ring-pages\n", dev->nodename); err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &ring_order); if (err != 1) ring_order = ring_size ? ilog2(ring_size) : 0; else if (!ring_size) /* nothing */; else if ((ring_size - 1) >> ring_order) pr_warn("blkfront: %s: max-ring-pages (%#x) inconsistent with" " max-ring-page-order (%u)\n", dev->nodename, ring_size, ring_order); else ring_order = ilog2(ring_size); if (ring_order > BLK_MAX_RING_PAGE_ORDER) ring_order = BLK_MAX_RING_PAGE_ORDER; /* * While for larger rings not all pages are actually used, be on the * safe side and set up a full power of two to please as many backends * as possible. */ printk("talk_to_blkback ring_size %d, ring_order %d\n", 1U<ring_size = ring_size = 1U << ring_order; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } if (ring_size == 1) { what = "ring-ref"; err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[0]); if (err) goto abort_transaction; } else { unsigned int i; char buf[16]; what = "ring-page-order"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_order); if (err) goto abort_transaction; what = "num-ring-pages"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_size); if (err) goto abort_transaction; what = buf; for (i = 0; i < ring_size; i++) { snprintf(buf, sizeof(buf), "ring-ref%u", i); err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[i]); if (err) goto abort_transaction; } } what = "event-channel"; err = xenbus_printf(xbt, dev->nodename, what, "%u", irq_to_evtchn_port(info->irq)); if (err) goto abort_transaction; what = "protocol"; err = xenbus_printf(xbt, dev->nodename, what, "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) goto abort_transaction; /* for persistent grant */ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1); if (err) dev_warn(&dev->dev, "writing persistent grants feature to xenbus"); err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); ring_size = RING_SIZE(&info->ring); switch (info->connected) { case BLKIF_STATE_DISCONNECTED: shadow_init(info->shadow, ring_size); break; case BLKIF_STATE_SUSPENDED: printk("talk_to_backend:blkif_recover old_ring_size =%d.ring_size=%d\n", old_ring_size,ring_size); err = blkif_recover(info, old_ring_size, ring_size); if (err) goto out; break; } pr_info("blkfront: %s: ring-pages=%u nr_ents=%u\n", dev->nodename, info->ring_size, ring_size); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (what) xenbus_dev_fatal(dev, err, "writing %s", what); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; unsigned int nr; for (nr = 0; nr < info->ring_size; nr++) { info->ring_refs[nr] = GRANT_INVALID_REF; info->ring_pages[nr] = alloc_page(GFP_NOIO | __GFP_HIGH); if (!info->ring_pages[nr]) break; } sring = nr == info->ring_size ? vmap(info->ring_pages, nr, VM_MAP, PAGE_KERNEL) : NULL; if (!sring) { while (nr--) __free_page(info->ring_pages[nr]); xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, (unsigned long)info->ring_size << PAGE_SHIFT); err = xenbus_multi_grant_ring(dev, nr, info->ring_pages, info->ring_refs); if (err < 0) goto fail; err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, IRQF_SAMPLE_RANDOM, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } static int xenwatch_unplugdisk_callback(void *data) { int len; char *devname; int iTimeout = 0; char *bestate; struct block_device *bd; struct xenbus_device *dev = data; struct blkfront_info *info = dev_get_drvdata(&dev->dev); if(!strcmp(info->gd->disk_name, "xvda")) { printk(KERN_ERR "xvda disk is unallowed to unplug!\n"); return 0; } devname = xenbus_read(XBT_NIL, dev->otherend, "dev", &len); if (IS_ERR(devname)){ printk(KERN_ERR "read %s xenstore error!\n", dev->otherend); return 0; } else{ xenbus_write(XBT_NIL, "control/uvp", "unplug-disk", devname); kfree(devname); } while(info && info->users != 0){ if(iTimeout > UNPLUG_DISK_TMOUT) break; printk(KERN_INFO "info->users=%d,ssleep(1),iTimeout=%d!\n", info->users, iTimeout); ssleep(1); iTimeout++; } if(info && !info->users) { printk(KERN_INFO "finish to umount,info->users has changed to 0!\n"); } /* if (!info->gd) { printk(KERN_ERR "unplug_disk, info->gd is NULL\n"); xenbus_frontend_closed_uvp(dev); return 0; } */ bd = bdget_disk(info->gd, 0); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(info); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); bestate = xenbus_read(XBT_NIL, dev->otherend, "state", &len); if (IS_ERR(bestate)){ printk(KERN_ERR "read %s state error!\n", dev->otherend); } else{ if(strncmp(bestate, "5", 1) || iTimeout > UNPLUG_DISK_TMOUT) { kfree(bestate); return 0; } kfree(bestate); } return 0; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); //struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (talk_to_backend(dev, info)) { dev_set_drvdata(&dev->dev, NULL); kfree(info); } break; case XenbusStateConnected: connect(info); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's Closing state -- fallthrough */ case XenbusStateClosing: kthread_run(xenwatch_unplugdisk_callback, dev, "xenwatch_unplugdisk"); break; } } /* ** Connection ** */ static void blkfront_setup_discard(struct blkfront_info *info) { int err; char *type; unsigned int discard_granularity; unsigned int discard_alignment; int discard_secure; type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL); if (IS_ERR(type)) return; info->feature_secdiscard = 0; if (strncmp(type, "phy", 3) == 0) { err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "discard-granularity", "%u", &discard_granularity, "discard-alignment", "%u", &discard_alignment, NULL); if (!err) { info->feature_discard = 1; info->discard_granularity = discard_granularity; info->discard_alignment = discard_alignment; } err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "discard-secure", "%d", &discard_secure); if (err == 1) info->feature_secdiscard = discard_secure; } else if (strncmp(type, "file", 4) == 0) info->feature_discard = 1; kfree(type); } /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err, barrier, flush, discard; int persistent; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (err != 1) return; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sector-size", "%lu", §or_size); if (err != 1) sector_size = 0; if (sector_size) blk_queue_logical_block_size(info->gd->queue, sector_size); pr_info("Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: /* * If we are recovering from suspension, we need to wait * for the backend to announce it's features before * reconnecting, at least we need to know if the backend * supports indirect descriptors, and how many. */ //printk("[connect]:blkif_recover info->ring_size =%d.\n", info->ring_size); //blkif_recover(info, info->ring_size, info->ring_size); return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } info->feature_flush = 0; info->flush_op = 0; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%d", &barrier); /* * If there's no "feature-barrier" defined, then it means * we're dealing with a very old backend which writes * synchronously; nothing to do. * * If there are barriers, then we use flush. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) if (err > 0 && barrier) { info->feature_flush = REQ_FLUSH | REQ_FUA; info->flush_op = BLKIF_OP_WRITE_BARRIER; } /* * And if there is "feature-flush-cache" use that above * barriers. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-flush-cache", "%d", &flush); if (err > 0 && flush) { info->feature_flush = REQ_FLUSH; info->flush_op = BLKIF_OP_FLUSH_DISKCACHE; } #else if (err <= 0) info->feature_flush = QUEUE_ORDERED_DRAIN; else if (barrier) info->feature_flush = QUEUE_ORDERED_TAG; else info->feature_flush = QUEUE_ORDERED_NONE; #endif err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-discard", "%d", &discard); if (err > 0 && discard) blkfront_setup_discard(info); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-persistent", "%u", &persistent, NULL); if (err) info->feature_persistent = 0; else info->feature_persistent = persistent; err = blkfront_setup_indirect(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); return; } err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&info->io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); add_disk(info->gd); info->is_ready = 1; register_vcd(info); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct blkfront_info *info) { unsigned long flags; DPRINTK("blkfront_closing: %d removed\n", info->vdevice); if (info->rq == NULL) goto out; spin_lock_irqsave(&info->io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&info->io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work_sync(&info->work); xlvbd_sysfs_delif(info); ssleep(2); while(RING_FREE_REQUESTS(&info->ring) != RING_SIZE(&info->ring)) { ssleep(1); } spin_lock_irqsave(&info->io_lock, flags); /* No more blkif_request(). */ blk_start_queue(info->rq); spin_unlock_irqrestore(&info->io_lock, flags); unregister_vcd(info); xlvbd_del(info); out: if (info->xbdev) xenbus_frontend_closed(info->xbdev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); struct block_device *bd; struct gendisk *disk; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); mutex_lock(&info->mutex); disk = info->gd; bd = disk ? bdget_disk(disk, 0) : NULL; info->xbdev = NULL; mutex_unlock(&info->mutex); if (!bd) { kfree(info); return 0; } /* * The xbdev was removed before we reached the Closed * state. See if it's safe to remove the disk. If the bdev * isn't closed yet, we let release take care of it. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif info = disk->private_data; dev_warn(disk_to_dev(disk), "%s was hot-unplugged, %d stale handles\n", dev->nodename, bd->bd_openers); if (info && !bd->bd_openers) { blkfront_closing(info); disk->private_data = NULL; kfree(info); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); return 0; } static int blkfront_setup_indirect(struct blkfront_info *info) { unsigned int indirect_segments, segs; int err, i; info->max_indirect_segments = 0; segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-max-indirect-segments", "%u", &indirect_segments, NULL); if (!err && indirect_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) { info->max_indirect_segments = min(indirect_segments, xen_blkif_max_segments); segs = info->max_indirect_segments; } printk("[%s:%d], segs %d\n", __func__, __LINE__, segs); /*info->sg = kzalloc(sizeof(info->sg[0]) * segs, GFP_KERNEL); if (info->sg == NULL) goto out_of_memory; sg_init_table(info->sg, segs);*/ err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * RING_SIZE(&info->ring)); if (err) goto out_of_memory; if (!info->feature_persistent && info->max_indirect_segments) { /* * We are using indirect descriptors but not persistent * grants, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int num = INDIRECT_GREFS(segs) * RING_SIZE(&info->ring); BUG_ON(!list_empty(&info->indirect_pages)); for (i = 0; i < num; i++) { struct page *indirect_page = alloc_page(GFP_NOIO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &info->indirect_pages); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { info->shadow[i].grants_used = kzalloc( sizeof(info->shadow[i].grants_used[0]) * segs, GFP_NOIO); /* malloc space for every shadow's sg */ info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO); if (info->max_indirect_segments) info->shadow[i].indirect_grants = kzalloc( sizeof(info->shadow[i].indirect_grants[0]) * INDIRECT_GREFS(segs), GFP_NOIO); if ((info->shadow[i].grants_used == NULL) || (info->shadow[i].sg == NULL) || (info->max_indirect_segments && (info->shadow[i].indirect_grants == NULL))) goto out_of_memory; /* initialise every shadow's sg */ sg_init_table(info->shadow[i].sg, segs); } return 0; out_of_memory: //kfree(info->sg); //info->sg = NULL; for (i = 0; i < RING_SIZE(&info->ring); i++) { kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; /* free every shadow's sg */ kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; } if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } return -ENOMEM; } //int is_recovered = 0; static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= RING_SIZE(&info->ring)); info->shadow_free = info->shadow[free].req.u.rw.id; info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ return free; } static inline int ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { if (info->shadow[id].req.u.rw.id != id) return -EINVAL; if (!info->shadow[id].request) return -ENXIO; info->shadow[id].req.u.rw.id = info->shadow_free; info->shadow[id].request = NULL; info->shadow_free = id; return 0; } static const char *op_name(unsigned int op) { static const char *const names[] = { [BLKIF_OP_READ] = "read", [BLKIF_OP_WRITE] = "write", [BLKIF_OP_WRITE_BARRIER] = "barrier", [BLKIF_OP_FLUSH_DISKCACHE] = "flush", [BLKIF_OP_PACKET] = "packet", [BLKIF_OP_DISCARD] = "discard", }; if (op >= ARRAY_SIZE(names)) return "unknown"; return names[op] ?: "reserved"; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { bool queued = false; /* Recover stage 3: Re-queue pending requests. */ while (!list_empty(&info->resume_list) && !RING_FULL(&info->ring)) { /* Grab a request slot and copy shadow state into it. */ struct blk_resume_entry *ent = list_first_entry(&info->resume_list, struct blk_resume_entry, list); blkif_request_t *req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); unsigned int i; *req = ent->copy.req; /* We get a new request id, and must reset the shadow state. */ req->u.rw.id = GET_ID_FROM_FREELIST(info); info->shadow[req->u.rw.id] = ent->copy; info->shadow[req->u.rw.id].req.u.rw.id = req->u.rw.id; /* Rewrite any grant references invalidated by susp/resume. */ for (i = 0; i < req->u.rw.nr_segments; i++) { gnttab_grant_foreign_access_ref(req->u.rw.seg[i].gref, info->xbdev->otherend_id, pfn_to_mfn(ent->copy.grants_used[i]->pfn), rq_data_dir(ent->copy.request) ? GTF_readonly : 0); } info->ring.req_prod_pvt++; queued = true; __list_del_entry(&ent->list); kfree(ent); } /* Send off requeued requests */ if (queued) flush_requests(info); if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(struct work_struct *arg) { struct blkfront_info *info = container_of(arg, struct blkfront_info, work); spin_lock_irq(&info->io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_open(struct inode *inode, struct file *filep) { struct block_device *bd = inode->i_bdev; #else int blkif_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int err = 0; if (!info) /* xbdev gone */ err = -ERESTARTSYS; else { mutex_lock(&info->mutex); if (!info->gd) /* xbdev is closed */ err = -ERESTARTSYS; mutex_unlock(&info->mutex); } info->users++; return err; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_release(struct inode *inode, struct file *filep) { struct gendisk *disk = inode->i_bdev->bd_disk; #else int blkif_release(struct gendisk *disk, fmode_t mode) { #endif struct blkfront_info *info = disk->private_data; //struct xenbus_device *xbdev; struct block_device *bd = bdget_disk(disk, 0); info->users--; bdput(bd); if (bd->bd_openers) return 0; return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { struct block_device *bd = inode->i_bdev; #else int blkif_ioctl(struct block_device *bd, fmode_t mode, unsigned command, unsigned long argument) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: if (info->gd && (info->gd->flags & GENHD_FL_CD)) return 0; return -EINVAL; default: if (info->mi && info->gd && info->rq) { switch (info->mi->major) { case SCSI_DISK0_MAJOR: case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: case SCSI_CDROM_MAJOR: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) return scsi_cmd_ioctl(filep, info->gd, command, (void __user *)argument); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return scsi_cmd_ioctl(filep, info->rq, info->gd, command, (void __user *)argument); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) \ && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0) \ || LINUX_VERSION_CODE < KERNEL_VERSION(3,0,18)) return scsi_cmd_ioctl(info->rq, info->gd, mode, command, (void __user *)argument); #else return scsi_cmd_blk_ioctl(bd, mode, command, (void __user *)argument); #endif } } return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * Generate a Xen blkfront IO request from a blk layer request. Reads * and writes are handled as expected. * * @req: a request struct */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref, n; struct blkif_request_segment_aligned *segments = NULL; /* * Used to store if we are able to queue the request by just using * existing persistent grants, or if we have to get new grants, * as there are not sufficiently many free. */ bool new_persistent_gnts; grant_ref_t gref_head; struct grant *gnt_list_entry = NULL; struct scatterlist *sg; int nseg, max_grefs; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; max_grefs = info->max_indirect_segments ? info->max_indirect_segments + INDIRECT_GREFS(info->max_indirect_segments) : BLKIF_MAX_SEGMENTS_PER_REQUEST; /* Check if we have enought grants to allocate a requests */ if (info->persistent_gnts_c < max_grefs) { new_persistent_gnts = 1; if (gnttab_alloc_grant_references( max_grefs - info->persistent_gnts_c, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, max_grefs); return 1; } } else new_persistent_gnts = 0; /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = req; BUG_ON(info->max_indirect_segments == 0 && req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); BUG_ON(info->max_indirect_segments && req->nr_phys_segments > info->max_indirect_segments); nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); ring_req->u.rw.id = id; if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { /* * The indirect operation can only be a BLKIF_OP_READ or * BLKIF_OP_WRITE */ BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA)); ring_req->operation = BLKIF_OP_INDIRECT; ring_req->u.indirect.indirect_op = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->u.indirect.handle = info->handle; ring_req->u.indirect.nr_segments = nseg; } else { ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->u.rw.handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) #else if (req->cmd_flags & REQ_HARDBARRIER) #endif ring_req->operation = info->flush_op; if (req->cmd_type == REQ_TYPE_BLOCK_PC) ring_req->operation = BLKIF_OP_PACKET; ring_req->u.rw.nr_segments = nseg; } if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { /* id, sector_number and handle are set above. */ ring_req->operation = BLKIF_OP_DISCARD; ring_req->u.discard.flag = 0; ring_req->u.discard.nr_sectors = blk_rq_sectors(req); if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; } else { for_each_sg(info->shadow[id].sg, sg, nseg, i) { fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; if ((ring_req->operation == BLKIF_OP_INDIRECT) && (i % SEGS_PER_INDIRECT_FRAME == 0)) { unsigned long uninitialized_var(pfn); if (segments) kunmap_atomic(segments, KM_USER0); n = i / SEGS_PER_INDIRECT_FRAME; if (!info->feature_persistent) { struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ BUG_ON(list_empty(&info->indirect_pages)); indirect_page = list_first_entry(&info->indirect_pages, struct page, lru); list_del(&indirect_page->lru); pfn = page_to_pfn(indirect_page); } gnt_list_entry = get_grant(&gref_head, pfn, info); info->shadow[id].indirect_grants[n] = gnt_list_entry; segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_USER0); ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; } gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); ref = gnt_list_entry->gref; info->shadow[id].grants_used[i] = gnt_list_entry; /* If use persistent grant, it will have a memcpy, * just copy the data from sg page to grant page. */ if (rq_data_dir(req) && info->feature_persistent) { char *bvec_data; void *shared_data; BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_USER0); if (in_interrupt()) { if (in_irq()) bvec_data = kmap_atomic(sg_page(sg), KM_IRQ0); else if (in_softirq()) bvec_data = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); else printk(KERN_WARNING "[kmap] in interrupt, but not irq!\n"); } else bvec_data = kmap_atomic(sg_page(sg), KM_USER0); /* * this does not wipe data stored outside the * range sg->offset..sg->offset+sg->length. * Therefore, blkback *could* see data from * previous requests. This is OK as long as * persistent grants are shared with just one * domain. It may need refactoring if this * changes */ memcpy(shared_data + sg->offset, bvec_data + sg->offset, sg->length); if (in_interrupt()) { if (in_irq()) kunmap_atomic(bvec_data, KM_IRQ0); else if (in_softirq()) kunmap_atomic(bvec_data, KM_SOFTIRQ0); else printk(KERN_WARNING "[kunmap] in interrupt, but not irq!\n"); } else kunmap_atomic(bvec_data, KM_USER0); kunmap_atomic(shared_data, KM_USER0); } if (ring_req->operation != BLKIF_OP_INDIRECT) { ring_req->u.rw.seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } else { n = i % SEGS_PER_INDIRECT_FRAME; segments[n] = (struct blkif_request_segment_aligned) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } } } if (segments) kunmap_atomic(segments, KM_USER0); info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; /* for persistent grant */ if (new_persistent_gnts) gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = blk_peek_request(rq)) != NULL) { info = req->rq_disk->private_data; if (RING_FULL(&info->ring)) goto wait; blk_start_request(req); /*if ((req->cmd_type != REQ_TYPE_FS && (req->cmd_type != REQ_TYPE_BLOCK_PC || req->cmd_len)) || ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) && !info->flush_op)) { req->errors = (DID_ERROR << 16) | (DRIVER_INVALID << 24); __blk_end_request_all(req, -EIO); continue; }*/ if (req->cmd_type != REQ_TYPE_FS) { __blk_end_request_all(req, -EIO); continue; } DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%u) buffer:%p [%s]\n", req, req->cmd, (long long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), req->buffer, rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; spin_lock_irqsave(&info->io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&info->io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); if (unlikely(bret->id >= RING_SIZE(&info->ring))) { /* * The backend has messed up and given us an id that * we would never have given to it (we stamp it up to * RING_SIZE() - see GET_ID_FROM_FREELIST()). */ pr_warning("%s: response to %s has incorrect id (%#Lx)\n", info->gd->disk_name, op_name(bret->operation), (unsigned long long)bret->id); continue; } id = bret->id; req = info->shadow[id].request; blkif_completion(&info->shadow[id], info, bret); ret = ADD_ID_TO_FREELIST(info, id); if (unlikely(ret)) { pr_warning("%s: id %#lx (response to %s) couldn't be recycled (%d)!\n", info->gd->disk_name, id, op_name(bret->operation), ret); continue; } ret = bret->status == BLKIF_RSP_OKAY ? 0 : -EIO; switch (bret->operation) { const char *kind; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: kind = ""; if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) ret = -EOPNOTSUPP; if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.u.rw.nr_segments == 0)) { kind = "empty "; ret = -EOPNOTSUPP; } if (unlikely(ret)) { if (ret == -EOPNOTSUPP) { pr_warn("blkfront: %s: %s%s op failed\n", info->gd->disk_name, kind, op_name(bret->operation)); ret = 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) info->feature_flush = 0; #else info->feature_flush = QUEUE_ORDERED_NONE; #endif xlvbd_flush(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_PACKET: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev %s request: %d\n", op_name(bret->operation), bret->status); __blk_end_request_all(req, ret); break; case BLKIF_OP_DISCARD: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; pr_warn("blkfront: %s: discard op failed\n", info->gd->disk_name); ret = -EOPNOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; queue_flag_clear(QUEUE_FLAG_DISCARD, rq); queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); } __blk_end_request_all(req, ret); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&info->io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { struct grant *persistent_gnt; struct grant *n; int i, j, segs; /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&info->io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* Remove all persistent grants */ if (!list_empty(&info->grants)) { list_for_each_entry_safe(persistent_gnt, n, &info->grants, node) { list_del(&persistent_gnt->node); if (persistent_gnt->gref != GRANT_INVALID_REF) { gnttab_end_foreign_access(persistent_gnt->gref, 0UL); info->persistent_gnts_c--; } if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } } BUG_ON(info->persistent_gnts_c != 0); /* * Remove indirect pages, this only happens when using indirect * descriptors but not persistent grants */ if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; BUG_ON(info->feature_persistent); list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { /* * Clear persistent grants present in requests already * on the shared ring */ if (!info->shadow[i].request) goto free_shadow; segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? info->shadow[i].req.u.indirect.nr_segments : info->shadow[i].req.u.rw.nr_segments; for (j = 0; j < segs; j++) { persistent_gnt = info->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT) /* * If this is not an indirect operation don't try to * free indirect segments */ goto free_shadow; for (j = 0; j < INDIRECT_GREFS(segs); j++) { persistent_gnt = info->shadow[i].indirect_grants[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } free_shadow: kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; } //kfree(info->sg); //info->sg = NULL; /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&info->io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work_sync(&info->work); /* Free resources associated with old device channel. */ vunmap(info->ring.sring); info->ring.sring = NULL; gnttab_multi_end_foreign_access(info->ring_size, info->ring_refs, info->ring_pages); if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, struct blkif_response *bret) { int i; int nseg; /* for persistent grant */ struct scatterlist *sg; char *bvec_data; void *shared_data; if (s->req.operation == BLKIF_OP_DISCARD) return; nseg = s->req.operation == BLKIF_OP_INDIRECT ? s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { /* * Copy the data received from the backend into the bvec. * Since bv_offset can be different than 0, and bv_len different * than PAGE_SIZE, we have to keep track of the current offset, * to be sure we are copying the data from the right shared page. */ for_each_sg(s->sg, sg, nseg, i) { BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic( pfn_to_page(s->grants_used[i]->pfn), KM_USER0); if (in_interrupt()) { if (in_irq()) bvec_data = kmap_atomic(sg_page(sg), KM_IRQ0); else if (in_softirq()) bvec_data = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); else printk(KERN_WARNING "[kmap] in interrupt, but not irq!\n"); } else bvec_data = kmap_atomic(sg_page(sg), KM_USER0); memcpy(bvec_data + sg->offset, shared_data + sg->offset, sg->length); if (in_interrupt()) { if (in_irq()) kunmap_atomic(bvec_data, KM_IRQ0); else if (in_softirq()) kunmap_atomic(bvec_data, KM_SOFTIRQ0); else printk(KERN_WARNING "[kunmap] in interrupt, but not irq!\n"); } else kunmap_atomic(bvec_data, KM_USER0); kunmap_atomic(shared_data, KM_USER0); } } /* Add the persistent grant into the list of free grants */ for (i = 0; i < nseg; i++) { if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->grants_used[i]->gref); list_add(&s->grants_used[i]->node, &info->grants); info->persistent_gnts_c++; } else { /* * If the grant is not mapped by the backend we end the * foreign access and add it to the tail of the list, * so it will not be picked again unless we run out of * persistent grants. */ gnttab_end_foreign_access(s->grants_used[i]->gref, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &info->grants); } } if (s->req.operation == BLKIF_OP_INDIRECT) { for (i = 0; i < INDIRECT_GREFS(nseg); i++) { if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->indirect_grants[i]->gref); list_add(&s->indirect_grants[i]->node, &info->grants); info->persistent_gnts_c++; } else { struct page *indirect_page; gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. */ if (!info->feature_persistent) { indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); list_add(&indirect_page->lru, &info->indirect_pages); } s->indirect_grants[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->indirect_grants[i]->node, &info->grants); } } } } /* * This is a clone of md_trim_bio, used to split a bio into smaller ones */ static void trim_bio(struct bio *bio, int offset, int size) { /* 'bio' is a cloned bio which we need to trim to match * the given offset and size. * This requires adjusting bi_sector, bi_size, and bi_io_vec */ int i; struct bio_vec *bvec; int sofar = 0; size <<= 9; if (offset == 0 && size == bio->bi_size) return; bio->bi_sector += offset; bio->bi_size = size; offset <<= 9; clear_bit(BIO_SEG_VALID, &bio->bi_flags); while (bio->bi_idx < bio->bi_vcnt && bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { /* remove this whole bio_vec */ offset -= bio->bi_io_vec[bio->bi_idx].bv_len; bio->bi_idx++; } if (bio->bi_idx < bio->bi_vcnt) { bio->bi_io_vec[bio->bi_idx].bv_offset += offset; bio->bi_io_vec[bio->bi_idx].bv_len -= offset; } /* avoid any complications with bi_idx being non-zero*/ if (bio->bi_idx) { memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); bio->bi_vcnt -= bio->bi_idx; bio->bi_idx = 0; } /* Make sure vcnt and last bv are not too big */ bio_for_each_segment(bvec, bio, i) { if (sofar + bvec->bv_len > size) bvec->bv_len = size - sofar; if (bvec->bv_len == 0) { bio->bi_vcnt = i; break; } sofar += bvec->bv_len; } } static void split_bio_end(struct bio *bio, int error) { struct split_bio *split_bio = bio->bi_private; if (error) split_bio->err = error; if (atomic_dec_and_test(&split_bio->pending)) { split_bio->bio->bi_phys_segments = 0; bio_endio(split_bio->bio, split_bio->err); kfree(split_bio); } bio_put(bio); } static int blkif_recover(struct blkfront_info *info, unsigned int old_ring_size, unsigned int ring_size) { unsigned int i; struct request *req, *n; unsigned int segs; int rc; struct bio *bio, *cloned_bio; struct bio_list bio_list, merge_bio; unsigned int offset; int pending, size; struct split_bio *split_bio; struct list_head requests; struct blk_resume_entry *ent = NULL; LIST_HEAD(list); //WARN_ON(1); /* Stage 1: Make a safe copy of the shadow state. */ for (i = 0; i < old_ring_size; i++) { /* Not in use? */ if (!info->shadow[i].request) continue; ent = kmalloc(sizeof(*ent), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); if (!ent) break; ent->copy = info->shadow[i]; list_add_tail(&ent->list, &list); } if (i < old_ring_size) { while (!list_empty(&list)) { ent = list_first_entry(&list, struct blk_resume_entry, list); __list_del_entry(&ent->list); kfree(ent); } return -ENOMEM; } list_splice_tail(&list, &info->resume_list); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); shadow_init(info->shadow, ring_size); info->shadow_free = info->ring.req_prod_pvt; rc = blkfront_setup_indirect(info); if (rc) { while (!list_empty(&info->resume_list)) { ent = list_first_entry(&info->resume_list, struct blk_resume_entry, list); __list_del_entry(&ent->list); kfree(ent); } return rc; } segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; blk_queue_max_segments(info->rq, segs); bio_list_init(&bio_list); INIT_LIST_HEAD(&requests); /* Recover stage 3: Re-queue pending requests. */ while (!list_empty(&info->resume_list) && !RING_FULL(&info->ring)) { /* Grab a request slot and copy shadow state into it. */ ent = list_first_entry(&info->resume_list, struct blk_resume_entry, list); merge_bio.head = ent->copy.request->bio; merge_bio.tail = ent->copy.request->biotail; bio_list_merge(&bio_list, &merge_bio); ent->copy.request->bio = NULL; //blk_put_request(ent->copy.request); blk_end_request_all(ent->copy.request, 0); __list_del_entry(&ent->list); kfree(ent); } /* * Empty the queue, this is important because we might have * requests in the queue with more segments than what we * can handle now. */ spin_lock_irq(&info->io_lock); while ((req = blk_fetch_request(info->rq)) != NULL) { merge_bio.head = req->bio; merge_bio.tail = req->biotail; bio_list_merge(&bio_list, &merge_bio); req->bio = NULL; if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) pr_alert("diskcache flush request found!\n"); //__blk_put_request(info->rq, req); __blk_end_request_all(req, 0); } spin_unlock_irq(&info->io_lock); (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&info->io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Kick any other new requests queued since we resumed */ //kick_pending_request_queues(info); if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } list_for_each_entry_safe(req, n, &requests, queuelist) { /* Requeue pending requests (flush or discard) */ list_del_init(&req->queuelist); BUG_ON(req->nr_phys_segments > segs); blk_requeue_request(info->rq, req); } spin_unlock_irq(&info->io_lock); while ((bio = bio_list_pop(&bio_list)) != NULL) { /* Traverse the list of pending bios and re-queue them */ if (bio_segments(bio) > segs) { /* * This bio has more segments than what we can * handle, we have to split it. */ pending = (bio_segments(bio) + segs - 1) / segs; split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO); BUG_ON(split_bio == NULL); atomic_set(&split_bio->pending, pending); split_bio->bio = bio; for (i = 0; i < pending; i++) { offset = (i * segs * PAGE_SIZE) >> 9; size = min((unsigned int)(segs * PAGE_SIZE) >> 9, (unsigned int)(bio->bi_size >> 9) - offset); cloned_bio = bio_clone(bio, GFP_NOIO); BUG_ON(cloned_bio == NULL); trim_bio(cloned_bio, offset, size); cloned_bio->bi_private = split_bio; cloned_bio->bi_end_io = split_bio_end; submit_bio(cloned_bio->bi_rw, cloned_bio); } /* * Now we have to wait for all those smaller bios to * end, so we can also end the "parent" bio. */ continue; } /* We don't need to split this bio */ submit_bio(bio->bi_rw, bio); } if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_INFO "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } return 0; } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); return info->is_ready && info->xbdev; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static DEFINE_XENBUS_DRIVER(blkfront, , .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, ); static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront_driver); } module_init(xlblk_init); static void __exit xlblk_exit(void) { xenbus_unregister_driver(&blkfront_driver); xlbd_release_major_info(); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.76/block.h000066400000000000000000000152161314037446600273570ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include //#include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #define DPRINTK_IOCTL(_f, _a...) ((void)0) struct xlbd_major_info { int major; int index; int usage; const struct xlbd_type_info *type; struct xlbd_minor_state *minors; }; struct grant { grant_ref_t gref; unsigned long pfn; struct list_head node; }; struct blk_shadow { blkif_request_t req; struct request *request; struct grant **grants_used; struct grant **indirect_grants; /* If use persistent grant, it will copy response * from grant page to sg when read io completion. */ struct scatterlist *sg; }; struct split_bio { struct bio *bio; atomic_t pending; int err; }; /* * Maximum number of segments in indirect requests, the actual value used by * the frontend driver is the minimum of this value and the value provided * by the backend driver. */ static unsigned int xen_blkif_max_segments = 256; //module_param_named(max_seg, xen_blkif_max_segments, int, S_IRUGO); //MODULE_PARM_DESC(max_seg, "Maximum amount of segments in indirect requests (default is 256 (1M))"); #define BLK_MAX_RING_PAGE_ORDER 4U #define BLK_MAX_RING_PAGES (1U << BLK_MAX_RING_PAGE_ORDER) #define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, \ BLK_MAX_RING_PAGES * PAGE_SIZE) #define SEGS_PER_INDIRECT_FRAME \ (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) #define INDIRECT_GREFS(_segs) \ ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; struct gendisk *gd; struct mutex mutex; int vdevice; blkif_vdev_t handle; int connected; unsigned int ring_size; blkif_front_ring_t ring; spinlock_t io_lock; /* move sg to blk_shadow struct */ //struct scatterlist *sg; unsigned int irq; struct xlbd_major_info *mi; struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_MAX_RING_SIZE]; struct list_head resume_list; grant_ref_t ring_refs[BLK_MAX_RING_PAGES]; struct page *ring_pages[BLK_MAX_RING_PAGES]; struct list_head grants; struct list_head indirect_pages; unsigned int max_indirect_segments; unsigned long shadow_free; unsigned int feature_flush; unsigned int flush_op; bool feature_discard; bool feature_secdiscard; unsigned int persistent_gnts_c; unsigned int feature_persistent:1; unsigned int discard_granularity; unsigned int discard_alignment; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); #else extern int blkif_open(struct block_device *bdev, fmode_t mode); extern int blkif_release(struct gendisk *disk, fmode_t mode); extern int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument); #endif extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (struct request_queue *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); void xlvbd_flush(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif void xlbd_release_major_info(void); /* Virtual cdrom block-device */ extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.76/vbd.c000066400000000000000000000372111314037446600270320ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #include #include /* ssleep prototype */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) #define XENVBD_MAJOR 202 #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; struct xlbd_minor_state *minors; int do_register; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; minors = kmalloc(sizeof(*minors), GFP_KERNEL); if (minors == NULL) { kfree(ptr); return NULL; } minors->bitmap = kzalloc(BITS_TO_LONGS(256) * sizeof(*minors->bitmap), GFP_KERNEL); if (minors->bitmap == NULL) { kfree(minors); kfree(ptr); return NULL; } spin_lock_init(&minors->lock); minors->nr = 256; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SD_RANGE: ptr->type = &xlbd_sd_type; ptr->index = index - XLBD_MAJOR_SD_START; break; case XLBD_MAJOR_SR_RANGE: ptr->type = &xlbd_sr_type; ptr->index = index - XLBD_MAJOR_SR_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major XENVBD_MAJOR, * don't try to register it again */ if (major_info[XLBD_MAJOR_VBD_ALT(index)] != NULL) { kfree(minors->bitmap); kfree(minors); minors = major_info[XLBD_MAJOR_VBD_ALT(index)]->minors; do_register = 0; } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(minors->bitmap); kfree(minors); kfree(ptr); return NULL; } pr_info("xen-vbd: registered block device major %i\n", ptr->major); } ptr->minors = minors; major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = XLBD_MAJOR_SD_START; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = XLBD_MAJOR_SD_START + 1 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = XLBD_MAJOR_SD_START + 8 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = XLBD_MAJOR_SR_START; break; case XENVBD_MAJOR: index = XLBD_MAJOR_VBD_START + !!VDEV_IS_EXTENDED(vdevice); break; default: return NULL; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } void __exit xlbd_release_major_info(void) { unsigned int i; int vbd_done = 0; for (i = 0; i < ARRAY_SIZE(major_info); ++i) { struct xlbd_major_info *mi = major_info[i]; if (!mi) continue; if (mi->usage) pr_warning("vbd: major %u still in use (%u times)\n", mi->major, mi->usage); if (mi->major != XENVBD_MAJOR || !vbd_done) { unregister_blkdev(mi->major, mi->type->devname); kfree(mi->minors->bitmap); kfree(mi->minors); } if (mi->major == XENVBD_MAJOR) vbd_done = 1; kfree(mi); } } static int xlbd_reserve_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; unsigned int end = minor + nr_minors; int rc; if (end > ms->nr) { unsigned long *bitmap, *old; bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap), GFP_KERNEL); if (bitmap == NULL) return -ENOMEM; spin_lock(&ms->lock); if (end > ms->nr) { old = ms->bitmap; memcpy(bitmap, ms->bitmap, BITS_TO_LONGS(ms->nr) * sizeof(*bitmap)); ms->bitmap = bitmap; ms->nr = BITS_TO_LONGS(end) * BITS_PER_LONG; } else old = bitmap; spin_unlock(&ms->lock); kfree(old); } spin_lock(&ms->lock); if (find_next_bit(ms->bitmap, end, minor) >= end) { bitmap_set(ms->bitmap, minor, nr_minors); rc = 0; } else rc = -EBUSY; spin_unlock(&ms->lock); return rc; } static void xlbd_release_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; BUG_ON(minor + nr_minors > ms->nr); spin_lock(&ms->lock); bitmap_clear(ms->bitmap, minor, nr_minors); spin_unlock(&ms->lock); } static char *encode_disk_name(char *ptr, unsigned int n) { if (n >= 26) ptr = encode_disk_name(ptr, n / 26 - 1); *ptr = 'a' + n % 26; return ptr + 1; } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, struct blkfront_info *info) { struct request_queue *rq; unsigned int segments = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; rq = blk_init_queue(do_blkif_request, &info->io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); #endif if (info->feature_discard) { queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); blk_queue_max_discard_sectors(rq, get_capacity(gd)); rq->limits.discard_granularity = info->discard_granularity; rq->limits.discard_alignment = info->discard_alignment; if (info->feature_secdiscard) queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq); } /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_logical_block_size(rq, sector_size); blk_queue_max_hw_sectors(rq, segments*8); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_segments(rq, segments); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; info->rq = rq; return 0; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { int major, minor; struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; char *ptr; unsigned int offset; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ pr_warning("blkfront: vdevice %#x is above the extended range;" " ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = XENVBD_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); if (minor >> MINORBITS) { pr_warning("blkfront: %#x's minor (%#x) out of range;" " ignoring\n", vdevice, minor); return -ENODEV; } } BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if ((vdisk_info & VDISK_CDROM) || !(minor & ((1 << mi->type->partn_shift) - 1))) nr_minors = 1 << mi->type->partn_shift; err = xlbd_reserve_minors(mi, minor & ~(nr_minors - 1), nr_minors); if (err) goto out; err = -ENODEV; gd = alloc_disk(vdisk_info & VDISK_CDROM ? 1 : nr_minors); if (gd == NULL) goto release; strcpy(gd->disk_name, mi->type->diskname); ptr = gd->disk_name + strlen(mi->type->diskname); offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (mi->type->partn_shift) { ptr = encode_disk_name(ptr, offset); offset = minor & ((1 << mi->type->partn_shift) - 1); } else gd->flags |= GENHD_FL_CD; BUG_ON(ptr >= gd->disk_name + ARRAY_SIZE(gd->disk_name)); if (nr_minors > 1) *ptr = 0; else snprintf(ptr, gd->disk_name + ARRAY_SIZE(gd->disk_name) - ptr, "%u", offset); gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size, info)) { del_gendisk(gd); goto release; } info->gd = gd; xlvbd_flush(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; release: xlbd_release_minors(mi, minor, nr_minors); out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } void xlvbd_del(struct blkfront_info *info) { unsigned int minor, nr_minors; unsigned long flags; if (info->mi == NULL) return; BUG_ON(info->gd == NULL); minor = info->gd->first_minor; nr_minors = (info->gd->flags & GENHD_FL_CD) || !(minor & ((1 << info->mi->type->partn_shift) - 1)) ? 1 << info->mi->type->partn_shift : 1; del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_release_minors(info->mi, minor & ~(nr_minors - 1), nr_minors); xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); spin_lock_irqsave(&info->io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); spin_unlock_irqrestore(&info->io_lock, flags); while (!list_empty(&info->rq->queue_head)){ ssleep(1); } blk_cleanup_queue(info->rq); info->rq = NULL; } void xlvbd_flush(struct blkfront_info *info) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) blk_queue_flush(info->rq, info->feature_flush); pr_info("blkfront: %s: %s: %s; %s %s %s %s\n", info->gd->disk_name, info->flush_op == BLKIF_OP_WRITE_BARRIER ? "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? "flush diskcache" : "barrier or flush"), info->feature_flush ? "enabled" : "disabled", "persistent grants:", info->feature_persistent ? "enabled;" : "disabled;", "indirect descriptors:", info->max_indirect_segments ? "enabled;" : "disabled;"); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int err; const char *barrier; switch (info->feature_flush) { case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; case QUEUE_ORDERED_NONE: barrier = "disabled"; break; default: return -EINVAL; } err = blk_queue_ordered(info->rq, info->feature_flush); if (err) return err; pr_info("blkfront: %s: barriers %s\n", info->gd->disk_name, barrier); #else if (info->feature_flush) pr_info("blkfront: %s: barriers disabled\n", info->gd->disk_name); #endif } #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = dev_get_drvdata(&xendev->dev); if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/3.0.76/vcd.c000066400000000000000000000306721314037446600270370ustar00rootroot00000000000000/******************************************************************************* * vcd.c * * Implements CDROM cmd packet passing between frontend guest and backend driver. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include "block.h" /* List of cdrom_device_info, can have as many as blkfront supports */ struct vcd_disk { struct list_head vcd_entry; struct cdrom_device_info vcd_cdrom_info; spinlock_t vcd_cdrom_info_lock; }; static LIST_HEAD(vcd_disks); static DEFINE_SPINLOCK(vcd_disks_lock); static struct vcd_disk *xencdrom_get_list_entry(struct gendisk *disk) { struct vcd_disk *ret_vcd = NULL; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { spin_lock(&vcd->vcd_cdrom_info_lock); ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd; } static void submit_message(struct blkfront_info *info, void *sp) { struct request *req = NULL; req = blk_get_request(info->rq, READ, __GFP_WAIT); if (blk_rq_map_kern(info->rq, req, sp, PAGE_SIZE, __GFP_WAIT)) goto out; req->rq_disk = info->gd; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_NOMERGE; #else req->flags |= REQ_BLOCK_PC; #endif req->__sector = 0; req->cmd_len = 0; req->timeout = 60*HZ; blk_execute_rq(req->q, info->gd, req, 1); out: blk_put_request(req); } static int submit_cdrom_cmd(struct blkfront_info *info, struct packet_command *cgc) { int ret = 0; struct page *page; union xen_block_packet *sp; struct xen_cdrom_packet *xcp; struct vcd_generic_command *vgc; if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) { pr_warn("%s() Packet buffer length is to large \n", __func__); return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcp = &(sp->xcp); xcp->type = XEN_TYPE_CDROM_PACKET; xcp->payload_offset = PACKET_PAYLOAD_OFFSET; vgc = (struct vcd_generic_command *)((char *)sp + xcp->payload_offset); memcpy(vgc->cmd, cgc->cmd, CDROM_PACKET_SIZE); vgc->stat = cgc->stat; vgc->data_direction = cgc->data_direction; vgc->quiet = cgc->quiet; vgc->timeout = cgc->timeout; if (cgc->sense) { vgc->sense_offset = PACKET_SENSE_OFFSET; memcpy((char *)sp + vgc->sense_offset, cgc->sense, sizeof(struct request_sense)); } if (cgc->buffer) { vgc->buffer_offset = PACKET_BUFFER_OFFSET; memcpy((char *)sp + vgc->buffer_offset, cgc->buffer, cgc->buflen); vgc->buflen = cgc->buflen; } submit_message(info,sp); if (xcp->ret) ret = xcp->err; if (cgc->sense) memcpy(cgc->sense, (char *)sp + PACKET_SENSE_OFFSET, sizeof(struct request_sense)); if (cgc->buffer && cgc->buflen) memcpy(cgc->buffer, (char *)sp + PACKET_BUFFER_OFFSET, cgc->buflen); __free_page(page); return ret; } static int xencdrom_open(struct cdrom_device_info *cdi, int purpose) { int ret = 0; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_open *xco; info = cdi->disk->private_data; if (!info->xbdev) return -ENODEV; if (strlen(info->xbdev->otherend) > MAX_PACKET_DATA) { return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xco = &(sp->xco); xco->type = XEN_TYPE_CDROM_OPEN; xco->payload_offset = sizeof(struct xen_cdrom_open); strcpy((char *)sp + xco->payload_offset, info->xbdev->otherend); submit_message(info,sp); if (xco->ret) { ret = xco->err; goto out; } if (xco->media_present) set_capacity(cdi->disk, xco->sectors); out: __free_page(page); return ret; } static void xencdrom_release(struct cdrom_device_info *cdi) { } static int xencdrom_media_changed(struct cdrom_device_info *cdi, int disc_nr) { int ret; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_media_changed *xcmc; info = cdi->disk->private_data; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcmc = &(sp->xcmc); xcmc->type = XEN_TYPE_CDROM_MEDIA_CHANGED; submit_message(info,sp); ret = xcmc->media_changed; __free_page(page); return ret; } static int xencdrom_tray_move(struct cdrom_device_info *cdi, int position) { struct packet_command cgc; struct blkfront_info *info; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_START_STOP_UNIT; if (position) cgc.cmd[4] = 2; else cgc.cmd[4] = 3; return submit_cdrom_cmd(info, &cgc); } static int xencdrom_lock_door(struct cdrom_device_info *cdi, int lock) { struct blkfront_info *info; struct packet_command cgc; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; cgc.cmd[4] = lock; return submit_cdrom_cmd(info, &cgc); } static int xencdrom_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { return cgc->stat = submit_cdrom_cmd(cdi->disk->private_data, cgc); } static int xencdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { return -EINVAL; } /* Query backend to see if CDROM packets are supported */ static int xencdrom_supported(struct blkfront_info *info) { struct page *page; union xen_block_packet *sp; struct xen_cdrom_support *xcs; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcs = &(sp->xcs); xcs->type = XEN_TYPE_CDROM_SUPPORT; submit_message(info,sp); return xcs->supported; } static struct cdrom_device_ops xencdrom_dops = { .open = xencdrom_open, .release = xencdrom_release, .media_changed = xencdrom_media_changed, .tray_move = xencdrom_tray_move, .lock_door = xencdrom_lock_door, .generic_packet = xencdrom_packet, .audio_ioctl = xencdrom_audio_ioctl, .capability = (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | \ CDC_MEDIA_CHANGED | CDC_GENERIC_PACKET | CDC_DVD | \ CDC_CD_R), .n_minors = 1, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_open(struct inode *inode, struct file *file) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!info->xbdev) return -ENODEV; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_open(&vcd->vcd_cdrom_info, inode, file); #else ret = cdrom_open(&vcd->vcd_cdrom_info, bd, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_release(struct inode *inode, struct file *file) { struct gendisk *gd = inode->i_bdev->bd_disk; #else static int xencdrom_block_release(struct gendisk *gd, fmode_t mode) { #endif struct blkfront_info *info = gd->private_data; struct vcd_disk *vcd; int ret = 0; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_release(&vcd->vcd_cdrom_info, file); #else cdrom_release(&vcd->vcd_cdrom_info, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); if (vcd->vcd_cdrom_info.use_count == 0) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) blkif_release(inode, file); #else blkif_release(gd, mode); #endif } } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_ioctl(struct block_device *bd, fmode_t mode, unsigned cmd, unsigned long arg) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!(vcd = xencdrom_get_list_entry(info->gd))) goto out; switch (cmd) { case 2285: /* SG_IO */ ret = -ENOSYS; break; case CDROMEJECT: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 1); break; case CDROMCLOSETRAY: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 0); break; case CDROM_GET_CAPABILITY: ret = vcd->vcd_cdrom_info.ops->capability & ~vcd->vcd_cdrom_info.mask; break; case CDROM_SET_OPTIONS: ret = vcd->vcd_cdrom_info.options; break; case CDROM_SEND_PACKET: { struct packet_command cgc; ret = copy_from_user(&cgc, (void __user *)arg, sizeof(cgc)) ? -EFAULT : submit_cdrom_cmd(info, &cgc); break; } default: spin_unlock(&vcd->vcd_cdrom_info_lock); out: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return blkif_ioctl(inode, file, cmd, arg); #else return blkif_ioctl(bd, mode, cmd, arg); #endif } spin_unlock(&vcd->vcd_cdrom_info_lock); return ret; } /* Called as result of cdrom_open, vcd_cdrom_info_lock already held */ static int xencdrom_block_media_changed(struct gendisk *disk) { struct vcd_disk *vcd; struct vcd_disk *ret_vcd = NULL; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd ? cdrom_media_changed(&ret_vcd->vcd_cdrom_info) : 0; } static const struct block_device_operations xencdrom_bdops = { .owner = THIS_MODULE, .open = xencdrom_block_open, .release = xencdrom_block_release, .ioctl = xencdrom_block_ioctl, .media_changed = xencdrom_block_media_changed, }; void register_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; /* Make sure this is for a CD device */ if (!(gd->flags & GENHD_FL_CD)) goto out; /* Make sure we have backend support */ if (!xencdrom_supported(info)) goto out; /* Create new vcd_disk and fill in cdrom_info */ vcd = kzalloc(sizeof(*vcd), GFP_KERNEL); if (!vcd) { pr_info("%s(): Unable to allocate vcd struct!\n", __func__); goto out; } spin_lock_init(&vcd->vcd_cdrom_info_lock); vcd->vcd_cdrom_info.ops = &xencdrom_dops; vcd->vcd_cdrom_info.speed = 4; vcd->vcd_cdrom_info.capacity = 1; vcd->vcd_cdrom_info.options = 0; strlcpy(vcd->vcd_cdrom_info.name, gd->disk_name, ARRAY_SIZE(vcd->vcd_cdrom_info.name)); vcd->vcd_cdrom_info.mask = (CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_SELECT_SPEED | CDC_MRW | CDC_MRW_W | CDC_RAM); if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) { pr_warn("%s() Cannot register blkdev as a cdrom %d!\n", __func__, gd->major); goto err_out; } gd->fops = &xencdrom_bdops; vcd->vcd_cdrom_info.disk = gd; spin_lock(&vcd_disks_lock); list_add(&(vcd->vcd_entry), &vcd_disks); spin_unlock(&vcd_disks_lock); out: return; err_out: kfree(vcd); } void unregister_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == gd) { spin_lock(&vcd->vcd_cdrom_info_lock); unregister_cdrom(&vcd->vcd_cdrom_info); list_del(&vcd->vcd_entry); spin_unlock(&vcd->vcd_cdrom_info_lock); kfree(vcd); break; } } spin_unlock(&vcd_disks_lock); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/Kbuild000066400000000000000000000001231314037446600265050ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-vbd.o xen-vbd-objs := blkfront.o vbd.o vcd.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vbd/Makefile000066400000000000000000000000661314037446600270160ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/000077500000000000000000000000001314037446600255435ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/2.6.27/000077500000000000000000000000001314037446600262775ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/2.6.27/accel.c000066400000000000000000000535621314037446600275250ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront/accel: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront/accel: " fmt, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Lock to protect access to accelerators_list */ static spinlock_t accelerators_lock; /* Workqueue to process acceleration configuration changes */ struct workqueue_struct *accel_watch_workqueue; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); spin_lock_init(&accelerators_lock); accel_watch_workqueue = create_workqueue("net_accel"); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; unsigned long flags; flush_workqueue(accel_watch_workqueue); destroy_workqueue(accel_watch_workqueue); spin_lock_irqsave(&accelerators_lock, flags); list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } spin_unlock_irqrestore(&accelerators_lock, flags); } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); queue_work(accel_watch_workqueue, &vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* Check we're not trying to overwrite an existing watch */ BUG_ON(np->accel_vif_state.accel_watch.node != NULL); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; flush_workqueue(accel_watch_workqueue); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { unsigned long flags; /* Need lock to write list */ spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } /* * Initialise the state to track an accelerator plugin module. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); unsigned long flags; int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; spin_lock_irqsave(&accelerators_lock, flags); list_add(&accelerator->link, &accelerators_list); spin_unlock_irqrestore(&accelerators_lock, flags); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { /* This function must be called with the vif_states_lock held */ DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); vif_state->hooks = vif_state->np->accelerator->hooks; netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; unsigned long flags; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks) { if (hooks->new_device(np->netdev, dev) == 0) { spin_lock_irqsave (&accelerator->vif_states_lock, flags); accelerator_set_vif_state_hooks(&np->accel_vif_state); spin_unlock_irqrestore (&accelerator->vif_states_lock, flags); } } return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. Must be called with accelerator_mutex held */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* * currently hold accelerator_mutex, so don't need * vif_states_lock to read the list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if (hooks->new_device(np->netdev, vif_state->dev) == 0) { spin_lock_irqsave (&accelerator->vif_states_lock, flags); accelerator_set_vif_state_hooks(vif_state); spin_unlock_irqrestore (&accelerator->vif_states_lock, flags); } } } /* * Called by the netfront accelerator plugin module when it has loaded */ int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded); /* * Remove the hooks from a single vif state. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Safely remove the accelerator function hooks from a netfront state. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held so don't need vif_states_lock to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if(vif_state->hooks) { hooks = vif_state->hooks; /* Last chance to get statistics from the accelerator */ hooks->get_stats(vif_state->np->netdev, &vif_state->np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } else { spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. */ void netfront_accelerator_stop(const char *frontend) { struct netfront_accelerator *accelerator; unsigned long flags; mutex_lock(&accelerator_mutex); spin_lock_irqsave(&accelerators_lock, flags); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { spin_unlock_irqrestore(&accelerators_lock, flags); accelerator_remove_hooks(accelerator); goto out; } } spin_unlock_irqrestore(&accelerators_lock, flags); out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop); /* Helper for call_remove and do_suspend */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev, unsigned long *lock_flags) { struct netfront_accelerator *accelerator = np->accelerator; struct netfront_accel_hooks *hooks; int rc = 0; if (np->accel_vif_state.hooks) { hooks = np->accel_vif_state.hooks; /* Last chance to get statistics from the accelerator */ hooks->get_stats(np->netdev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, *lock_flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); spin_lock_irqsave(&accelerator->vif_states_lock, *lock_flags); } return rc; } static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; unsigned long flags; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev, &flags); np->accelerator = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); return rc; } int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { unsigned long flags; int rc = 0; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ spin_lock_irqsave(&np->accelerator->vif_states_lock, flags); rc = do_remove(np, dev, &flags); spin_unlock_irqrestore(&np->accelerator->vif_states_lock, flags); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { /* * Setting the watch will cause it to fire and probe the * accelerator, so no need to call accelerator_probe_new_vif() * directly here */ if (dev->state == XenbusStateConnected) netfront_accelerator_add_watch(np); return 0; } void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; spinlock_t *vif_states_lock; unsigned long flags; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); vif_states_lock = &np->accelerator->vif_states_lock; spin_lock_irqsave(vif_states_lock, flags); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; spin_unlock_irqrestore(vif_states_lock, flags); break; } } out: mutex_unlock(&accelerator_mutex); return; } int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; struct netfront_accel_hooks *hooks; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); hooks = np->accel_vif_state.hooks; if (hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_hooks *hooks; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); hooks = np->accel_vif_state.hooks; if (hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_hooks *hooks; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); hooks = np->accel_vif_state.hooks; if (hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/2.6.27/acpi_bus.h000066400000000000000000000263361314037446600302470ustar00rootroot00000000000000/* * acpi_bus.h - ACPI Bus Driver ($Revision: 22 $) * * Copyright (C) 2001, 2002 Andy Grover * Copyright (C) 2001, 2002 Paul Diefenbaugh * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef __ACPI_BUS_H__ #define __ACPI_BUS_H__ #include #include #define PREFIX "ACPI: " /* TBD: Make dynamic */ #define ACPI_MAX_HANDLES 10 struct acpi_handle_list { u32 count; acpi_handle handles[ACPI_MAX_HANDLES]; }; /* acpi_utils.h */ acpi_status acpi_extract_package(union acpi_object *package, struct acpi_buffer *format, struct acpi_buffer *buffer); acpi_status acpi_evaluate_integer(acpi_handle handle, acpi_string pathname, struct acpi_object_list *arguments, unsigned long long *data); acpi_status acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, struct acpi_object_list *arguments, struct acpi_handle_list *list); #ifdef CONFIG_ACPI #include #define ACPI_BUS_FILE_ROOT "acpi" extern struct proc_dir_entry *acpi_root_dir; enum acpi_bus_removal_type { ACPI_BUS_REMOVAL_NORMAL = 0, ACPI_BUS_REMOVAL_EJECT, ACPI_BUS_REMOVAL_SUPRISE, ACPI_BUS_REMOVAL_TYPE_COUNT }; enum acpi_bus_device_type { ACPI_BUS_TYPE_DEVICE = 0, ACPI_BUS_TYPE_POWER, ACPI_BUS_TYPE_PROCESSOR, ACPI_BUS_TYPE_THERMAL, ACPI_BUS_TYPE_SYSTEM, ACPI_BUS_TYPE_POWER_BUTTON, ACPI_BUS_TYPE_SLEEP_BUTTON, ACPI_BUS_DEVICE_TYPE_COUNT }; struct acpi_driver; struct acpi_device; /* * ACPI Driver * ----------- */ typedef int (*acpi_op_add) (struct acpi_device * device); typedef int (*acpi_op_remove) (struct acpi_device * device, int type); typedef int (*acpi_op_lock) (struct acpi_device * device, int type); typedef int (*acpi_op_start) (struct acpi_device * device); typedef int (*acpi_op_stop) (struct acpi_device * device, int type); typedef int (*acpi_op_suspend) (struct acpi_device * device, pm_message_t state); typedef int (*acpi_op_resume) (struct acpi_device * device); typedef int (*acpi_op_scan) (struct acpi_device * device); typedef int (*acpi_op_bind) (struct acpi_device * device); typedef int (*acpi_op_unbind) (struct acpi_device * device); typedef int (*acpi_op_shutdown) (struct acpi_device * device); struct acpi_bus_ops { u32 acpi_op_add:1; u32 acpi_op_remove:1; u32 acpi_op_lock:1; u32 acpi_op_start:1; u32 acpi_op_stop:1; u32 acpi_op_suspend:1; u32 acpi_op_resume:1; u32 acpi_op_scan:1; u32 acpi_op_bind:1; u32 acpi_op_unbind:1; u32 acpi_op_shutdown:1; u32 reserved:21; }; struct acpi_device_ops { acpi_op_add add; acpi_op_remove remove; acpi_op_lock lock; acpi_op_start start; acpi_op_stop stop; acpi_op_suspend suspend; acpi_op_resume resume; acpi_op_scan scan; acpi_op_bind bind; acpi_op_unbind unbind; acpi_op_shutdown shutdown; }; struct acpi_driver { char name[80]; char class[80]; const struct acpi_device_id *ids; /* Supported Hardware IDs */ struct acpi_device_ops ops; struct device_driver drv; struct module *owner; }; /* * ACPI Device * ----------- */ /* Status (_STA) */ struct acpi_device_status { u32 present:1; u32 enabled:1; u32 show_in_ui:1; u32 functional:1; u32 battery_present:1; u32 reserved:27; }; /* Flags */ struct acpi_device_flags { u32 dynamic_status:1; u32 hardware_id:1; u32 compatible_ids:1; u32 bus_address:1; u32 unique_id:1; u32 removable:1; u32 ejectable:1; u32 lockable:1; u32 suprise_removal_ok:1; u32 power_manageable:1; u32 performance_manageable:1; u32 wake_capable:1; /* Wakeup(_PRW) supported? */ u32 force_power_state:1; u32 reserved:19; }; /* File System */ struct acpi_device_dir { struct proc_dir_entry *entry; }; #define acpi_device_dir(d) ((d)->dir.entry) /* Plug and Play */ typedef char acpi_bus_id[5]; typedef unsigned long acpi_bus_address; typedef char acpi_hardware_id[15]; typedef char acpi_unique_id[9]; typedef char acpi_device_name[40]; typedef char acpi_device_class[20]; struct acpi_device_pnp { acpi_bus_id bus_id; /* Object name */ acpi_bus_address bus_address; /* _ADR */ acpi_hardware_id hardware_id; /* _HID */ struct acpi_compatible_id_list *cid_list; /* _CIDs */ acpi_unique_id unique_id; /* _UID */ acpi_device_name device_name; /* Driver-determined */ acpi_device_class device_class; /* " */ }; #define acpi_device_bid(d) ((d)->pnp.bus_id) #define acpi_device_adr(d) ((d)->pnp.bus_address) #define acpi_device_hid(d) ((d)->pnp.hardware_id) #define acpi_device_uid(d) ((d)->pnp.unique_id) #define acpi_device_name(d) ((d)->pnp.device_name) #define acpi_device_class(d) ((d)->pnp.device_class) /* Power Management */ struct acpi_device_power_flags { u32 explicit_get:1; /* _PSC present? */ u32 power_resources:1; /* Power resources */ u32 inrush_current:1; /* Serialize Dx->D0 */ u32 power_removed:1; /* Optimize Dx->D0 */ u32 reserved:28; }; struct acpi_device_power_state { struct { u8 valid:1; u8 explicit_set:1; /* _PSx present? */ u8 reserved:6; } flags; int power; /* % Power (compared to D0) */ int latency; /* Dx->D0 time (microseconds) */ struct acpi_handle_list resources; /* Power resources referenced */ }; struct acpi_device_power { int state; /* Current state */ struct acpi_device_power_flags flags; struct acpi_device_power_state states[4]; /* Power states (D0-D3) */ }; /* Performance Management */ struct acpi_device_perf_flags { u8 reserved:8; }; struct acpi_device_perf_state { struct { u8 valid:1; u8 reserved:7; } flags; u8 power; /* % Power (compared to P0) */ u8 performance; /* % Performance ( " ) */ int latency; /* Px->P0 time (microseconds) */ }; struct acpi_device_perf { int state; struct acpi_device_perf_flags flags; int state_count; struct acpi_device_perf_state *states; }; /* Wakeup Management */ struct acpi_device_wakeup_flags { u8 valid:1; /* Can successfully enable wakeup? */ u8 prepared:1; /* Has the wake-up capability been enabled? */ u8 run_wake:1; /* Run-Wake GPE devices */ }; struct acpi_device_wakeup_state { u8 enabled:1; }; struct acpi_device_wakeup { acpi_handle gpe_device; acpi_integer gpe_number; acpi_integer sleep_state; struct acpi_handle_list resources; struct acpi_device_wakeup_state state; struct acpi_device_wakeup_flags flags; }; /* Device */ struct acpi_device { acpi_handle handle; struct acpi_device *parent; struct list_head children; struct list_head node; struct list_head wakeup_list; struct list_head g_list; struct acpi_device_status status; struct acpi_device_flags flags; struct acpi_device_pnp pnp; struct acpi_device_power power; struct acpi_device_wakeup wakeup; struct acpi_device_perf performance; struct acpi_device_dir dir; struct acpi_device_ops ops; struct acpi_driver *driver; void *driver_data; struct device dev; struct acpi_bus_ops bus_ops; /* workaround for different code path for hotplug */ enum acpi_bus_removal_type removal_type; /* indicate for different removal type */ }; #define acpi_driver_data(d) ((d)->driver_data) #define to_acpi_device(d) container_of(d, struct acpi_device, dev) #define to_acpi_driver(d) container_of(d, struct acpi_driver, drv) /* acpi_device.dev.bus == &acpi_bus_type */ extern struct bus_type acpi_bus_type; /* * Events * ------ */ struct acpi_bus_event { struct list_head node; acpi_device_class device_class; acpi_bus_id bus_id; u32 type; u32 data; }; extern struct kobject *acpi_kobj; extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); void acpi_bus_private_data_handler(acpi_handle, u32, void *); int acpi_bus_get_private_data(acpi_handle, void **); extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); extern int register_acpi_notifier(struct notifier_block *); extern int unregister_acpi_notifier(struct notifier_block *); extern int register_acpi_bus_notifier(struct notifier_block *nb); extern void unregister_acpi_bus_notifier(struct notifier_block *nb); /* * External Functions */ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context); int acpi_bus_get_status(struct acpi_device *device); int acpi_bus_get_power(acpi_handle handle, int *state); int acpi_bus_set_power(acpi_handle handle, int state); bool acpi_bus_power_manageable(acpi_handle handle); bool acpi_bus_can_wakeup(acpi_handle handle); #ifdef CONFIG_ACPI_PROC_EVENT int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data); int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data); int acpi_bus_receive_event(struct acpi_bus_event *event); #else static inline int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data) { return 0; } #endif int acpi_bus_register_driver(struct acpi_driver *driver); void acpi_bus_unregister_driver(struct acpi_driver *driver); int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent, acpi_handle handle, int type); int acpi_bus_trim(struct acpi_device *start, int rmdevice); int acpi_bus_start(struct acpi_device *device); acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd); int acpi_match_device_ids(struct acpi_device *device, const struct acpi_device_id *ids); int acpi_create_dir(struct acpi_device *); void acpi_remove_dir(struct acpi_device *); /* * Bind physical devices with ACPI devices */ #include struct acpi_bus_type { struct list_head list; struct bus_type *bus; /* For general devices under the bus */ int (*find_device) (struct device *, acpi_handle *); /* For bridges, such as PCI root bridge, IDE controller */ int (*find_bridge) (struct device *, acpi_handle *); }; int register_acpi_bus_type(struct acpi_bus_type *); int unregister_acpi_bus_type(struct acpi_bus_type *); struct device *acpi_get_physical_device(acpi_handle); struct device *acpi_get_physical_pci_device(acpi_handle); /* helper */ acpi_handle acpi_get_child(acpi_handle, acpi_integer); acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int); #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->archdata.acpi_handle)) #ifdef CONFIG_PM_SLEEP int acpi_pm_device_sleep_state(struct device *, int *); int acpi_pm_device_sleep_wake(struct device *, bool); #else /* !CONFIG_PM_SLEEP */ static inline int acpi_pm_device_sleep_state(struct device *d, int *p) { if (p) *p = ACPI_STATE_D0; return ACPI_STATE_D3; } static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) { return -ENODEV; } #endif /* !CONFIG_PM_SLEEP */ #endif /* CONFIG_ACPI */ #endif /*__ACPI_BUS_H__*/ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/2.6.27/actypes.h000066400000000000000000001104331314037446600301220ustar00rootroot00000000000000/****************************************************************************** * * Name: actypes.h - Common data types for the entire ACPI subsystem * *****************************************************************************/ /* * Copyright (C) 2000 - 2008, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #ifndef __ACTYPES_H__ #define __ACTYPES_H__ /* acpisrc:struct_defs -- for acpisrc conversion */ /* * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header * and must be either 32 or 64. 16-bit ACPICA is no longer supported, as of * 12/2006. */ #ifndef ACPI_MACHINE_WIDTH #error ACPI_MACHINE_WIDTH not defined #endif /*! [Begin] no source code translation */ /* * Data type ranges * Note: These macros are designed to be compiler independent as well as * working around problems that some 32-bit compilers have with 64-bit * constants. */ #define ACPI_UINT8_MAX (UINT8) (~((UINT8) 0)) /* 0xFF */ #define ACPI_UINT16_MAX (UINT16)(~((UINT16) 0)) /* 0xFFFF */ #define ACPI_UINT32_MAX (UINT32)(~((UINT32) 0)) /* 0xFFFFFFFF */ #define ACPI_UINT64_MAX (UINT64)(~((UINT64) 0)) /* 0xFFFFFFFFFFFFFFFF */ #define ACPI_ASCII_MAX 0x7F /* * Architecture-specific ACPICA Subsystem Data Types * * The goal of these types is to provide source code portability across * 16-bit, 32-bit, and 64-bit targets. * * 1) The following types are of fixed size for all targets (16/32/64): * * BOOLEAN Logical boolean * * UINT8 8-bit (1 byte) unsigned value * UINT16 16-bit (2 byte) unsigned value * UINT32 32-bit (4 byte) unsigned value * UINT64 64-bit (8 byte) unsigned value * * INT16 16-bit (2 byte) signed value * INT32 32-bit (4 byte) signed value * INT64 64-bit (8 byte) signed value * * COMPILER_DEPENDENT_UINT64/INT64 - These types are defined in the * compiler-dependent header(s) and were introduced because there is no common * 64-bit integer type across the various compilation models, as shown in * the table below. * * Datatype LP64 ILP64 LLP64 ILP32 LP32 16bit * char 8 8 8 8 8 8 * short 16 16 16 16 16 16 * _int32 32 * int 32 64 32 32 16 16 * long 64 64 32 32 32 32 * long long 64 64 * pointer 64 64 64 32 32 32 * * Note: ILP64 and LP32 are currently not supported. * * * 2) These types represent the native word size of the target mode of the * processor, and may be 16-bit, 32-bit, or 64-bit as required. They are * usually used for memory allocation, efficient loop counters, and array * indexes. The types are similar to the size_t type in the C library and are * required because there is no C type that consistently represents the native * data width. ACPI_SIZE is needed because there is no guarantee that a * kernel-level C library is present. * * ACPI_SIZE 16/32/64-bit unsigned value * ACPI_NATIVE_INT 16/32/64-bit signed value * */ /******************************************************************************* * * Common types for all compilers, all targets * ******************************************************************************/ typedef unsigned char BOOLEAN; typedef unsigned char UINT8; typedef unsigned short UINT16; typedef COMPILER_DEPENDENT_UINT64 UINT64; typedef COMPILER_DEPENDENT_INT64 INT64; /*! [End] no source code translation !*/ /******************************************************************************* * * Types specific to 64-bit targets * ******************************************************************************/ #if ACPI_MACHINE_WIDTH == 64 /*! [Begin] no source code translation (keep the typedefs as-is) */ typedef unsigned int UINT32; typedef int INT32; /*! [End] no source code translation !*/ typedef s64 acpi_native_int; typedef u64 acpi_size; typedef u64 acpi_io_address; typedef u64 acpi_physical_address; #define ACPI_MAX_PTR ACPI_UINT64_MAX #define ACPI_SIZE_MAX ACPI_UINT64_MAX #define ACPI_USE_NATIVE_DIVIDE /* Has native 64-bit integer support */ /* * In the case of the Itanium Processor Family (IPF), the hardware does not * support misaligned memory transfers. Set the MISALIGNMENT_NOT_SUPPORTED flag * to indicate that special precautions must be taken to avoid alignment faults. * (IA64 or ia64 is currently used by existing compilers to indicate IPF.) * * Note: Em64_t and other X86-64 processors support misaligned transfers, * so there is no need to define this flag. */ #if defined (__IA64__) || defined (__ia64__) #define ACPI_MISALIGNMENT_NOT_SUPPORTED #endif /******************************************************************************* * * Types specific to 32-bit targets * ******************************************************************************/ #elif ACPI_MACHINE_WIDTH == 32 /*! [Begin] no source code translation (keep the typedefs as-is) */ typedef unsigned int UINT32; typedef int INT32; /*! [End] no source code translation !*/ typedef s32 acpi_native_int; typedef u32 acpi_size; typedef u32 acpi_io_address; typedef u32 acpi_physical_address; #define ACPI_MAX_PTR ACPI_UINT32_MAX #define ACPI_SIZE_MAX ACPI_UINT32_MAX #else /* ACPI_MACHINE_WIDTH must be either 64 or 32 */ #error unknown ACPI_MACHINE_WIDTH #endif /******************************************************************************* * * OS-dependent and compiler-dependent types * * If the defaults below are not appropriate for the host system, they can * be defined in the compiler-specific or OS-specific header, and this will * take precedence. * ******************************************************************************/ /* Value returned by acpi_os_get_thread_id */ #ifndef acpi_thread_id #define acpi_thread_id acpi_size #endif /* Object returned from acpi_os_create_lock */ #ifndef acpi_spinlock #define acpi_spinlock void * #endif /* Flags for acpi_os_acquire_lock/acpi_os_release_lock */ #ifndef acpi_cpu_flags #define acpi_cpu_flags acpi_size #endif /* Object returned from acpi_os_create_cache */ #ifndef acpi_cache_t #define acpi_cache_t struct acpi_memory_list #endif /* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ #ifndef acpi_uintptr_t #define acpi_uintptr_t void * #endif /* * ACPI_PRINTF_LIKE is used to tag functions as "printf-like" because * some compilers can catch printf format string problems */ #ifndef ACPI_PRINTF_LIKE #define ACPI_PRINTF_LIKE(c) #endif /* * Some compilers complain about unused variables. Sometimes we don't want to * use all the variables (for example, _acpi_module_name). This allows us * to to tell the compiler in a per-variable manner that a variable * is unused */ #ifndef ACPI_UNUSED_VAR #define ACPI_UNUSED_VAR #endif /* * All ACPICA functions that are available to the rest of the kernel are * tagged with this macro which can be defined as appropriate for the host. */ #ifndef ACPI_EXPORT_SYMBOL #define ACPI_EXPORT_SYMBOL(symbol) #endif /******************************************************************************* * * Independent types * ******************************************************************************/ /* Logical defines and NULL */ #ifdef FALSE #undef FALSE #endif #define FALSE (1 == 0) #ifdef TRUE #undef TRUE #endif #define TRUE (1 == 1) #ifndef NULL #define NULL (void *) 0 #endif /* * Mescellaneous types */ typedef u32 acpi_status; /* All ACPI Exceptions */ typedef u32 acpi_name; /* 4-byte ACPI name */ typedef char *acpi_string; /* Null terminated ASCII string */ typedef void *acpi_handle; /* Actually a ptr to a NS Node */ struct uint64_struct { u32 lo; u32 hi; }; union uint64_overlay { u64 full; struct uint64_struct part; }; struct uint32_struct { u32 lo; u32 hi; }; /* Synchronization objects */ #define acpi_mutex void * #define acpi_semaphore void * /* * Acpi integer width. In ACPI version 1, integers are 32 bits. In ACPI * version 2, integers are 64 bits. Note that this pertains to the ACPI integer * type only, not other integers used in the implementation of the ACPI CA * subsystem. */ typedef unsigned long long acpi_integer; #define ACPI_INTEGER_MAX ACPI_UINT64_MAX #define ACPI_INTEGER_BIT_SIZE 64 #define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */ #if ACPI_MACHINE_WIDTH == 64 #define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */ #endif #define ACPI_MAX64_DECIMAL_DIGITS 20 #define ACPI_MAX32_DECIMAL_DIGITS 10 #define ACPI_MAX16_DECIMAL_DIGITS 5 #define ACPI_MAX8_DECIMAL_DIGITS 3 /* * Constants with special meanings */ #define ACPI_ROOT_OBJECT ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR) /* * Initialization sequence */ #define ACPI_FULL_INITIALIZATION 0x00 #define ACPI_NO_ADDRESS_SPACE_INIT 0x01 #define ACPI_NO_HARDWARE_INIT 0x02 #define ACPI_NO_EVENT_INIT 0x04 #define ACPI_NO_HANDLER_INIT 0x08 #define ACPI_NO_ACPI_ENABLE 0x10 #define ACPI_NO_DEVICE_INIT 0x20 #define ACPI_NO_OBJECT_INIT 0x40 /* * Initialization state */ #define ACPI_SUBSYSTEM_INITIALIZE 0x01 #define ACPI_INITIALIZED_OK 0x02 /* * Power state values */ #define ACPI_STATE_UNKNOWN (u8) 0xFF #define ACPI_STATE_S0 (u8) 0 #define ACPI_STATE_S1 (u8) 1 #define ACPI_STATE_S2 (u8) 2 #define ACPI_STATE_S3 (u8) 3 #define ACPI_STATE_S4 (u8) 4 #define ACPI_STATE_S5 (u8) 5 #define ACPI_S_STATES_MAX ACPI_STATE_S5 #define ACPI_S_STATE_COUNT 6 #define ACPI_STATE_D0 (u8) 0 #define ACPI_STATE_D1 (u8) 1 #define ACPI_STATE_D2 (u8) 2 #define ACPI_STATE_D3 (u8) 3 #define ACPI_D_STATES_MAX ACPI_STATE_D3 #define ACPI_D_STATE_COUNT 4 #define ACPI_STATE_C0 (u8) 0 #define ACPI_STATE_C1 (u8) 1 #define ACPI_STATE_C2 (u8) 2 #define ACPI_STATE_C3 (u8) 3 #define ACPI_C_STATES_MAX ACPI_STATE_C3 #define ACPI_C_STATE_COUNT 4 /* * Sleep type invalid value */ #define ACPI_SLEEP_TYPE_MAX 0x7 #define ACPI_SLEEP_TYPE_INVALID 0xFF /* * Standard notify values */ #define ACPI_NOTIFY_BUS_CHECK (u8) 0x00 #define ACPI_NOTIFY_DEVICE_CHECK (u8) 0x01 #define ACPI_NOTIFY_DEVICE_WAKE (u8) 0x02 #define ACPI_NOTIFY_EJECT_REQUEST (u8) 0x03 #define ACPI_NOTIFY_DEVICE_CHECK_LIGHT (u8) 0x04 #define ACPI_NOTIFY_FREQUENCY_MISMATCH (u8) 0x05 #define ACPI_NOTIFY_BUS_MODE_MISMATCH (u8) 0x06 #define ACPI_NOTIFY_POWER_FAULT (u8) 0x07 #define ACPI_NOTIFY_CAPABILITIES_CHECK (u8) 0x08 #define ACPI_NOTIFY_DEVICE_PLD_CHECK (u8) 0x09 #define ACPI_NOTIFY_RESERVED (u8) 0x0A #define ACPI_NOTIFY_LOCALITY_UPDATE (u8) 0x0B #define ACPI_NOTIFY_MAX 0x0B /* * Types associated with ACPI names and objects. The first group of * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition * of the ACPI object_type() operator (See the ACPI Spec). Therefore, * only add to the first group if the spec changes. * * NOTE: Types must be kept in sync with the global acpi_ns_properties * and acpi_ns_type_names arrays. */ typedef u32 acpi_object_type; #define ACPI_TYPE_ANY 0x00 #define ACPI_TYPE_INTEGER 0x01 /* Byte/Word/Dword/Zero/One/Ones */ #define ACPI_TYPE_STRING 0x02 #define ACPI_TYPE_BUFFER 0x03 #define ACPI_TYPE_PACKAGE 0x04 /* byte_const, multiple data_term/Constant/super_name */ #define ACPI_TYPE_FIELD_UNIT 0x05 #define ACPI_TYPE_DEVICE 0x06 /* Name, multiple Node */ #define ACPI_TYPE_EVENT 0x07 #define ACPI_TYPE_METHOD 0x08 /* Name, byte_const, multiple Code */ #define ACPI_TYPE_MUTEX 0x09 #define ACPI_TYPE_REGION 0x0A #define ACPI_TYPE_POWER 0x0B /* Name,byte_const,word_const,multi Node */ #define ACPI_TYPE_PROCESSOR 0x0C /* Name,byte_const,Dword_const,byte_const,multi nm_o */ #define ACPI_TYPE_THERMAL 0x0D /* Name, multiple Node */ #define ACPI_TYPE_BUFFER_FIELD 0x0E #define ACPI_TYPE_DDB_HANDLE 0x0F #define ACPI_TYPE_DEBUG_OBJECT 0x10 #define ACPI_TYPE_EXTERNAL_MAX 0x10 /* * These are object types that do not map directly to the ACPI * object_type() operator. They are used for various internal purposes only. * If new predefined ACPI_TYPEs are added (via the ACPI specification), these * internal types must move upwards. (There is code that depends on these * values being contiguous with the external types above.) */ #define ACPI_TYPE_LOCAL_REGION_FIELD 0x11 #define ACPI_TYPE_LOCAL_BANK_FIELD 0x12 #define ACPI_TYPE_LOCAL_INDEX_FIELD 0x13 #define ACPI_TYPE_LOCAL_REFERENCE 0x14 /* Arg#, Local#, Name, Debug, ref_of, Index */ #define ACPI_TYPE_LOCAL_ALIAS 0x15 #define ACPI_TYPE_LOCAL_METHOD_ALIAS 0x16 #define ACPI_TYPE_LOCAL_NOTIFY 0x17 #define ACPI_TYPE_LOCAL_ADDRESS_HANDLER 0x18 #define ACPI_TYPE_LOCAL_RESOURCE 0x19 #define ACPI_TYPE_LOCAL_RESOURCE_FIELD 0x1A #define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */ #define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */ /* * These are special object types that never appear in * a Namespace node, only in an union acpi_operand_object */ #define ACPI_TYPE_LOCAL_EXTRA 0x1C #define ACPI_TYPE_LOCAL_DATA 0x1D #define ACPI_TYPE_LOCAL_MAX 0x1D /* All types above here are invalid */ #define ACPI_TYPE_INVALID 0x1E #define ACPI_TYPE_NOT_FOUND 0xFF #define ACPI_NUM_NS_TYPES (ACPI_TYPE_INVALID + 1) /* * All I/O */ #define ACPI_READ 0 #define ACPI_WRITE 1 #define ACPI_IO_MASK 1 /* * Event Types: Fixed & General Purpose */ typedef u32 acpi_event_type; /* * Fixed events */ #define ACPI_EVENT_PMTIMER 0 #define ACPI_EVENT_GLOBAL 1 #define ACPI_EVENT_POWER_BUTTON 2 #define ACPI_EVENT_SLEEP_BUTTON 3 #define ACPI_EVENT_RTC 4 #define ACPI_EVENT_MAX 4 #define ACPI_NUM_FIXED_EVENTS ACPI_EVENT_MAX + 1 /* * Event Status - Per event * ------------- * The encoding of acpi_event_status is illustrated below. * Note that a set bit (1) indicates the property is TRUE * (e.g. if bit 0 is set then the event is enabled). * +-------------+-+-+-+ * | Bits 31:3 |2|1|0| * +-------------+-+-+-+ * | | | | * | | | +- Enabled? * | | +--- Enabled for wake? * | +----- Set? * +----------- */ typedef u32 acpi_event_status; #define ACPI_EVENT_FLAG_DISABLED (acpi_event_status) 0x00 #define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01 #define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02 #define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04 /* * General Purpose Events (GPE) */ #define ACPI_GPE_INVALID 0xFF #define ACPI_GPE_MAX 0xFF #define ACPI_NUM_GPE 256 #define ACPI_GPE_ENABLE 0 #define ACPI_GPE_DISABLE 1 /* * GPE info flags - Per GPE * +-+-+-+---+---+-+ * |7|6|5|4:3|2:1|0| * +-+-+-+---+---+-+ * | | | | | | * | | | | | +--- Interrupt type: Edge or Level Triggered * | | | | +--- Type: Wake-only, Runtime-only, or wake/runtime * | | | +--- Type of dispatch -- to method, handler, or none * | | +--- Enabled for runtime? * | +--- Enabled for wake? * +--- Unused */ #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01 #define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01 #define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 #define ACPI_GPE_TYPE_MASK (u8) 0x06 #define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x06 #define ACPI_GPE_TYPE_WAKE (u8) 0x02 #define ACPI_GPE_TYPE_RUNTIME (u8) 0x04 /* Default */ #define ACPI_GPE_DISPATCH_MASK (u8) 0x18 #define ACPI_GPE_DISPATCH_HANDLER (u8) 0x08 #define ACPI_GPE_DISPATCH_METHOD (u8) 0x10 #define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 /* Default */ #define ACPI_GPE_RUN_ENABLE_MASK (u8) 0x20 #define ACPI_GPE_RUN_ENABLED (u8) 0x20 #define ACPI_GPE_RUN_DISABLED (u8) 0x00 /* Default */ #define ACPI_GPE_WAKE_ENABLE_MASK (u8) 0x40 #define ACPI_GPE_WAKE_ENABLED (u8) 0x40 #define ACPI_GPE_WAKE_DISABLED (u8) 0x00 /* Default */ #define ACPI_GPE_ENABLE_MASK (u8) 0x60 /* Both run/wake */ /* * Flags for GPE and Lock interfaces */ #define ACPI_EVENT_WAKE_ENABLE 0x2 /* acpi_gpe_enable */ #define ACPI_EVENT_WAKE_DISABLE 0x2 /* acpi_gpe_disable */ #define ACPI_NOT_ISR 0x1 #define ACPI_ISR 0x0 /* Notify types */ #define ACPI_SYSTEM_NOTIFY 0x1 #define ACPI_DEVICE_NOTIFY 0x2 #define ACPI_ALL_NOTIFY (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY) #define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3 #define ACPI_MAX_SYS_NOTIFY 0x7f /* Address Space (Operation Region) Types */ typedef u8 acpi_adr_space_type; #define ACPI_ADR_SPACE_SYSTEM_MEMORY (acpi_adr_space_type) 0 #define ACPI_ADR_SPACE_SYSTEM_IO (acpi_adr_space_type) 1 #define ACPI_ADR_SPACE_PCI_CONFIG (acpi_adr_space_type) 2 #define ACPI_ADR_SPACE_EC (acpi_adr_space_type) 3 #define ACPI_ADR_SPACE_SMBUS (acpi_adr_space_type) 4 #define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5 #define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6 #define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 7 #define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 127 /* * bit_register IDs * These are bitfields defined within the full ACPI registers */ #define ACPI_BITREG_TIMER_STATUS 0x00 #define ACPI_BITREG_BUS_MASTER_STATUS 0x01 #define ACPI_BITREG_GLOBAL_LOCK_STATUS 0x02 #define ACPI_BITREG_POWER_BUTTON_STATUS 0x03 #define ACPI_BITREG_SLEEP_BUTTON_STATUS 0x04 #define ACPI_BITREG_RT_CLOCK_STATUS 0x05 #define ACPI_BITREG_WAKE_STATUS 0x06 #define ACPI_BITREG_PCIEXP_WAKE_STATUS 0x07 #define ACPI_BITREG_TIMER_ENABLE 0x08 #define ACPI_BITREG_GLOBAL_LOCK_ENABLE 0x09 #define ACPI_BITREG_POWER_BUTTON_ENABLE 0x0A #define ACPI_BITREG_SLEEP_BUTTON_ENABLE 0x0B #define ACPI_BITREG_RT_CLOCK_ENABLE 0x0C #define ACPI_BITREG_WAKE_ENABLE 0x0D #define ACPI_BITREG_PCIEXP_WAKE_DISABLE 0x0E #define ACPI_BITREG_SCI_ENABLE 0x0F #define ACPI_BITREG_BUS_MASTER_RLD 0x10 #define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x11 #define ACPI_BITREG_SLEEP_TYPE_A 0x12 #define ACPI_BITREG_SLEEP_TYPE_B 0x13 #define ACPI_BITREG_SLEEP_ENABLE 0x14 #define ACPI_BITREG_ARB_DISABLE 0x15 #define ACPI_BITREG_MAX 0x15 #define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1 /* * External ACPI object definition */ /* * Note: Type == ACPI_TYPE_ANY (0) is used to indicate a NULL package element * or an unresolved named reference. */ union acpi_object { acpi_object_type type; /* See definition of acpi_ns_type for values */ struct { acpi_object_type type; /* ACPI_TYPE_INTEGER */ acpi_integer value; /* The actual number */ } integer; struct { acpi_object_type type; /* ACPI_TYPE_STRING */ u32 length; /* # of bytes in string, excluding trailing null */ char *pointer; /* points to the string value */ } string; struct { acpi_object_type type; /* ACPI_TYPE_BUFFER */ u32 length; /* # of bytes in buffer */ u8 *pointer; /* points to the buffer */ } buffer; struct { acpi_object_type type; /* ACPI_TYPE_PACKAGE */ u32 count; /* # of elements in package */ union acpi_object *elements; /* Pointer to an array of ACPI_OBJECTs */ } package; struct { acpi_object_type type; /* ACPI_TYPE_LOCAL_REFERENCE */ acpi_object_type actual_type; /* Type associated with the Handle */ acpi_handle handle; /* object reference */ } reference; struct { acpi_object_type type; /* ACPI_TYPE_PROCESSOR */ u32 proc_id; acpi_io_address pblk_address; u32 pblk_length; } processor; struct { acpi_object_type type; /* ACPI_TYPE_POWER */ u32 system_level; u32 resource_order; } power_resource; }; /* * List of objects, used as a parameter list for control method evaluation */ struct acpi_object_list { u32 count; union acpi_object *pointer; }; /* * Miscellaneous common Data Structures used by the interfaces */ #define ACPI_NO_BUFFER 0 #define ACPI_ALLOCATE_BUFFER (acpi_size) (-1) #define ACPI_ALLOCATE_LOCAL_BUFFER (acpi_size) (-2) struct acpi_buffer { acpi_size length; /* Length in bytes of the buffer */ void *pointer; /* pointer to buffer */ }; /* * name_type for acpi_get_name */ #define ACPI_FULL_PATHNAME 0 #define ACPI_SINGLE_NAME 1 #define ACPI_NAME_TYPE_MAX 1 /* * Structure and flags for acpi_get_system_info */ #define ACPI_SYS_MODE_UNKNOWN 0x0000 #define ACPI_SYS_MODE_ACPI 0x0001 #define ACPI_SYS_MODE_LEGACY 0x0002 #define ACPI_SYS_MODES_MASK 0x0003 /* * System info returned by acpi_get_system_info() */ struct acpi_system_info { u32 acpi_ca_version; u32 flags; u32 timer_resolution; u32 reserved1; u32 reserved2; u32 debug_level; u32 debug_layer; }; /* Table Event Types */ #define ACPI_TABLE_EVENT_LOAD 0x0 #define ACPI_TABLE_EVENT_UNLOAD 0x1 #define ACPI_NUM_TABLE_EVENTS 2 /* * Types specific to the OS service interfaces */ typedef u32(ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context); typedef void (ACPI_SYSTEM_XFACE * acpi_osd_exec_callback) (void *context); /* * Various handlers and callback procedures */ typedef u32(*acpi_event_handler) (void *context); typedef void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context); typedef void (*acpi_object_handler) (acpi_handle object, u32 function, void *data); typedef acpi_status(*acpi_init_handler) (acpi_handle object, u32 function); #define ACPI_INIT_DEVICE_INI 1 typedef acpi_status(*acpi_exception_handler) (acpi_status aml_status, acpi_name name, u16 opcode, u32 aml_offset, void *context); /* Table Event handler (Load, load_table etc) and types */ typedef acpi_status(*acpi_tbl_handler) (u32 event, void *table, void *context); /* Address Spaces (For Operation Regions) */ typedef acpi_status(*acpi_adr_space_handler) (u32 function, acpi_physical_address address, u32 bit_width, acpi_integer * value, void *handler_context, void *region_context); #define ACPI_DEFAULT_HANDLER NULL typedef acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle, u32 function, void *handler_context, void **region_context); #define ACPI_REGION_ACTIVATE 0 #define ACPI_REGION_DEACTIVATE 1 typedef acpi_status(*acpi_walk_callback) (acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value); /* Interrupt handler return values */ #define ACPI_INTERRUPT_NOT_HANDLED 0x00 #define ACPI_INTERRUPT_HANDLED 0x01 /* Common string version of device HIDs and UIDs */ struct acpica_device_id { char value[ACPI_DEVICE_ID_LENGTH]; }; /* Common string version of device CIDs */ struct acpi_compatible_id { char value[ACPI_MAX_CID_LENGTH]; }; struct acpi_compatible_id_list { u32 count; u32 size; struct acpi_compatible_id id[1]; }; /* Structure and flags for acpi_get_object_info */ #define ACPI_VALID_STA 0x0001 #define ACPI_VALID_ADR 0x0002 #define ACPI_VALID_HID 0x0004 #define ACPI_VALID_UID 0x0008 #define ACPI_VALID_CID 0x0010 #define ACPI_VALID_SXDS 0x0020 /* Flags for _STA method */ #define ACPI_STA_DEVICE_PRESENT 0x01 #define ACPI_STA_DEVICE_ENABLED 0x02 #define ACPI_STA_DEVICE_UI 0x04 #define ACPI_STA_DEVICE_FUNCTIONING 0x08 #define ACPI_STA_DEVICE_OK 0x08 /* Synonym */ #define ACPI_STA_BATTERY_PRESENT 0x10 #define ACPI_COMMON_OBJ_INFO \ acpi_object_type type; /* ACPI object type */ \ acpi_name name /* ACPI object Name */ struct acpi_obj_info_header { ACPI_COMMON_OBJ_INFO; }; /* Structure returned from Get Object Info */ struct acpi_device_info { ACPI_COMMON_OBJ_INFO; u32 valid; /* Indicates which fields below are valid */ u32 current_status; /* _STA value */ acpi_integer address; /* _ADR value if any */ struct acpica_device_id hardware_id; /* _HID value if any */ struct acpica_device_id unique_id; /* _UID value if any */ u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ struct acpi_compatible_id_list compatibility_id; /* List of _CIDs if any */ }; /* Context structs for address space handlers */ struct acpi_pci_id { u16 segment; u16 bus; u16 device; u16 function; }; struct acpi_mem_space_context { u32 length; acpi_physical_address address; acpi_physical_address mapped_physical_address; u8 *mapped_logical_address; acpi_size mapped_length; }; /* * Definitions for Resource Attributes */ typedef u16 acpi_rs_length; /* Resource Length field is fixed at 16 bits */ typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (64_k-1)+3 */ /* * Memory Attributes */ #define ACPI_READ_ONLY_MEMORY (u8) 0x00 #define ACPI_READ_WRITE_MEMORY (u8) 0x01 #define ACPI_NON_CACHEABLE_MEMORY (u8) 0x00 #define ACPI_CACHABLE_MEMORY (u8) 0x01 #define ACPI_WRITE_COMBINING_MEMORY (u8) 0x02 #define ACPI_PREFETCHABLE_MEMORY (u8) 0x03 /* * IO Attributes * The ISA IO ranges are: n000-n0_fFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh. * The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cd0-n_fFFh. */ #define ACPI_NON_ISA_ONLY_RANGES (u8) 0x01 #define ACPI_ISA_ONLY_RANGES (u8) 0x02 #define ACPI_ENTIRE_RANGE (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES) /* Type of translation - 1=Sparse, 0=Dense */ #define ACPI_SPARSE_TRANSLATION (u8) 0x01 /* * IO Port Descriptor Decode */ #define ACPI_DECODE_10 (u8) 0x00 /* 10-bit IO address decode */ #define ACPI_DECODE_16 (u8) 0x01 /* 16-bit IO address decode */ /* * IRQ Attributes */ #define ACPI_LEVEL_SENSITIVE (u8) 0x00 #define ACPI_EDGE_SENSITIVE (u8) 0x01 #define ACPI_ACTIVE_HIGH (u8) 0x00 #define ACPI_ACTIVE_LOW (u8) 0x01 #define ACPI_EXCLUSIVE (u8) 0x00 #define ACPI_SHARED (u8) 0x01 /* * DMA Attributes */ #define ACPI_COMPATIBILITY (u8) 0x00 #define ACPI_TYPE_A (u8) 0x01 #define ACPI_TYPE_B (u8) 0x02 #define ACPI_TYPE_F (u8) 0x03 #define ACPI_NOT_BUS_MASTER (u8) 0x00 #define ACPI_BUS_MASTER (u8) 0x01 #define ACPI_TRANSFER_8 (u8) 0x00 #define ACPI_TRANSFER_8_16 (u8) 0x01 #define ACPI_TRANSFER_16 (u8) 0x02 /* * Start Dependent Functions Priority definitions */ #define ACPI_GOOD_CONFIGURATION (u8) 0x00 #define ACPI_ACCEPTABLE_CONFIGURATION (u8) 0x01 #define ACPI_SUB_OPTIMAL_CONFIGURATION (u8) 0x02 /* * 16, 32 and 64-bit Address Descriptor resource types */ #define ACPI_MEMORY_RANGE (u8) 0x00 #define ACPI_IO_RANGE (u8) 0x01 #define ACPI_BUS_NUMBER_RANGE (u8) 0x02 #define ACPI_ADDRESS_NOT_FIXED (u8) 0x00 #define ACPI_ADDRESS_FIXED (u8) 0x01 #define ACPI_POS_DECODE (u8) 0x00 #define ACPI_SUB_DECODE (u8) 0x01 #define ACPI_PRODUCER (u8) 0x00 #define ACPI_CONSUMER (u8) 0x01 /* * If possible, pack the following structures to byte alignment */ #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED #pragma pack(1) #endif /* UUID data structures for use in vendor-defined resource descriptors */ struct acpi_uuid { u8 data[ACPI_UUID_LENGTH]; }; struct acpi_vendor_uuid { u8 subtype; u8 data[ACPI_UUID_LENGTH]; }; /* * Structures used to describe device resources */ struct acpi_resource_irq { u8 descriptor_length; u8 triggering; u8 polarity; u8 sharable; u8 interrupt_count; u8 interrupts[1]; }; struct acpi_resource_dma { u8 type; u8 bus_master; u8 transfer; u8 channel_count; u8 channels[1]; }; struct acpi_resource_start_dependent { u8 descriptor_length; u8 compatibility_priority; u8 performance_robustness; }; /* * END_DEPENDENT_FUNCTIONS_RESOURCE struct is not * needed because it has no fields */ struct acpi_resource_io { u8 io_decode; u8 alignment; u8 address_length; u16 minimum; u16 maximum; }; struct acpi_resource_fixed_io { u16 address; u8 address_length; }; struct acpi_resource_vendor { u16 byte_length; u8 byte_data[1]; }; /* Vendor resource with UUID info (introduced in ACPI 3.0) */ struct acpi_resource_vendor_typed { u16 byte_length; u8 uuid_subtype; u8 uuid[ACPI_UUID_LENGTH]; u8 byte_data[1]; }; struct acpi_resource_end_tag { u8 checksum; }; struct acpi_resource_memory24 { u8 write_protect; u16 minimum; u16 maximum; u16 alignment; u16 address_length; }; struct acpi_resource_memory32 { u8 write_protect; u32 minimum; u32 maximum; u32 alignment; u32 address_length; }; struct acpi_resource_fixed_memory32 { u8 write_protect; u32 address; u32 address_length; }; struct acpi_memory_attribute { u8 write_protect; u8 caching; u8 range_type; u8 translation; }; struct acpi_io_attribute { u8 range_type; u8 translation; u8 translation_type; u8 reserved1; }; union acpi_resource_attribute { struct acpi_memory_attribute mem; struct acpi_io_attribute io; /* Used for the *word_space macros */ u8 type_specific; }; struct acpi_resource_source { u8 index; u16 string_length; char *string_ptr; }; /* Fields common to all address descriptors, 16/32/64 bit */ #define ACPI_RESOURCE_ADDRESS_COMMON \ u8 resource_type; \ u8 producer_consumer; \ u8 decode; \ u8 min_address_fixed; \ u8 max_address_fixed; \ union acpi_resource_attribute info; struct acpi_resource_address { ACPI_RESOURCE_ADDRESS_COMMON}; struct acpi_resource_address16 { ACPI_RESOURCE_ADDRESS_COMMON u16 granularity; u16 minimum; u16 maximum; u16 translation_offset; u16 address_length; struct acpi_resource_source resource_source; }; struct acpi_resource_address32 { ACPI_RESOURCE_ADDRESS_COMMON u32 granularity; u32 minimum; u32 maximum; u32 translation_offset; u32 address_length; struct acpi_resource_source resource_source; }; struct acpi_resource_address64 { ACPI_RESOURCE_ADDRESS_COMMON u64 granularity; u64 minimum; u64 maximum; u64 translation_offset; u64 address_length; struct acpi_resource_source resource_source; }; struct acpi_resource_extended_address64 { ACPI_RESOURCE_ADDRESS_COMMON u8 revision_iD; u64 granularity; u64 minimum; u64 maximum; u64 translation_offset; u64 address_length; u64 type_specific; }; struct acpi_resource_extended_irq { u8 producer_consumer; u8 triggering; u8 polarity; u8 sharable; u8 interrupt_count; struct acpi_resource_source resource_source; u32 interrupts[1]; }; struct acpi_resource_generic_register { u8 space_id; u8 bit_width; u8 bit_offset; u8 access_size; u64 address; }; /* ACPI_RESOURCE_TYPEs */ #define ACPI_RESOURCE_TYPE_IRQ 0 #define ACPI_RESOURCE_TYPE_DMA 1 #define ACPI_RESOURCE_TYPE_START_DEPENDENT 2 #define ACPI_RESOURCE_TYPE_END_DEPENDENT 3 #define ACPI_RESOURCE_TYPE_IO 4 #define ACPI_RESOURCE_TYPE_FIXED_IO 5 #define ACPI_RESOURCE_TYPE_VENDOR 6 #define ACPI_RESOURCE_TYPE_END_TAG 7 #define ACPI_RESOURCE_TYPE_MEMORY24 8 #define ACPI_RESOURCE_TYPE_MEMORY32 9 #define ACPI_RESOURCE_TYPE_FIXED_MEMORY32 10 #define ACPI_RESOURCE_TYPE_ADDRESS16 11 #define ACPI_RESOURCE_TYPE_ADDRESS32 12 #define ACPI_RESOURCE_TYPE_ADDRESS64 13 #define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 14 /* ACPI 3.0 */ #define ACPI_RESOURCE_TYPE_EXTENDED_IRQ 15 #define ACPI_RESOURCE_TYPE_GENERIC_REGISTER 16 #define ACPI_RESOURCE_TYPE_MAX 16 union acpi_resource_data { struct acpi_resource_irq irq; struct acpi_resource_dma dma; struct acpi_resource_start_dependent start_dpf; struct acpi_resource_io io; struct acpi_resource_fixed_io fixed_io; struct acpi_resource_vendor vendor; struct acpi_resource_vendor_typed vendor_typed; struct acpi_resource_end_tag end_tag; struct acpi_resource_memory24 memory24; struct acpi_resource_memory32 memory32; struct acpi_resource_fixed_memory32 fixed_memory32; struct acpi_resource_address16 address16; struct acpi_resource_address32 address32; struct acpi_resource_address64 address64; struct acpi_resource_extended_address64 ext_address64; struct acpi_resource_extended_irq extended_irq; struct acpi_resource_generic_register generic_reg; /* Common fields */ struct acpi_resource_address address; /* Common 16/32/64 address fields */ }; struct acpi_resource { u32 type; u32 length; union acpi_resource_data data; }; /* restore default alignment */ #pragma pack() #define ACPI_RS_SIZE_NO_DATA 8 /* Id + Length fields */ #define ACPI_RS_SIZE_MIN (u32) ACPI_ROUND_UP_TO_NATIVE_WORD (12) #define ACPI_RS_SIZE(type) (u32) (ACPI_RS_SIZE_NO_DATA + sizeof (type)) #define ACPI_NEXT_RESOURCE(res) (struct acpi_resource *)((u8 *) res + res->length) struct acpi_pci_routing_table { u32 length; u32 pin; acpi_integer address; /* here for 64-bit alignment */ u32 source_index; char source[4]; /* pad to 64 bits so sizeof() works in all cases */ }; #endif /* __ACTYPES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/2.6.27/netfront.c000066400000000000000000001630431314037446600303110ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "acpi_bus.h" #include "actypes.h" struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif #define RX_COPY_THRESHOLD 256 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define HAVE_CSUM_OFFLOAD 1 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define HAVE_CSUM_OFFLOAD 0 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define HAVE_CSUM_OFFLOAD 0 #define netif_needs_gso(dev, skb) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif #define GRANT_INVALID_REF 0 struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront: " fmt, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static void send_fake_arp(struct net_device *, int arpType); static irqreturn_t netif_int(int irq, void *dev_id); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of nic8139 */ static int rtl8139_acpi_bus_trim(acpi_handle handle) { struct acpi_device *device; int retval; retval = acpi_bus_get_device(handle, &device); if (retval) { printk(KERN_INFO "acpi_device not found\n"); return retval; } retval = acpi_bus_trim(device, 1); if (retval) printk(KERN_INFO "cannot remove from acpi list\n"); return retval; } static void remove_rtl8139(void) { struct pci_dev *pdev = NULL; acpi_handle phandle = NULL; pdev = pci_get_device(0x10ec, 0x8139, pdev); if (pdev) { phandle = DEVICE_ACPI_HANDLE(&pdev->dev); printk(KERN_INFO "remove_rtl8139 : rtl8139 of subsystem(0x%2x,0x%2x) removed.\n", pdev->subsystem_vendor, pdev->subsystem_device); pci_disable_device(pdev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /* check kernel version */ pci_remove_bus_device(pdev); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pdev); pci_remove_bus_device(pdev); #else pci_stop_and_remove_bus_device(pdev); #endif /* end check kernel version */ pci_dev_put(pdev); } if (phandle) { rtl8139_acpi_bus_trim(phandle); } return; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; /* remove rtl8139 when xen nic devices appear */ remove_rtl8139(); netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev->dev.driver_data = info; err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { printk(KERN_WARNING "%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); printk(KERN_WARNING "%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev->dev.driver_data = NULL; return err; } static int __devexit netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev->dev.driver_data; DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(info->mac)) { err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", !HAVE_CSUM_OFFLOAD); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", HAVE_TSO); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, IRQF_SAMPLE_RANDOM, netdev->name, netdev); if (err < 0) goto fail; info->irq = err; return 0; fail: return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev->dev.driver_data; struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); break; case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @return 0 on success, error code otherwise */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; // creat GARP if ( ARPOP_REQUEST == arpType ) { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); } else { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); } if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); memset(&np->stats, 0, sizeof(np->stats)); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); netif_rx_schedule(dev, &np->napi); } } spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { printk(KERN_ALERT "network_tx_buf_gc: warning " "-- grant still in use by backend " "domain.\n"); BUG(); } gnttab_end_foreign_access_ref(np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); netif_rx_schedule(dev, &np->napi); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; struct xen_memory_reservation reservation; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if ( nr_flips != 0 ) { /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); reservation.nr_extents = nr_flips; reservation.extent_order = 0; reservation.address_bits = 0; reservation.domid = DOMID_SELF; if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ np->rx_mcl[i].op = __HYPERVISOR_memory_op; np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; np->rx_mcl[i].args[1] = (unsigned long)&reservation; /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (nr_flips--) BUG_ON(np->rx_mcl[nr_flips].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return 0; } frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irq(&np->tx_lock); if (unlikely(!netfront_carrier_ok(np) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb))) { spin_unlock_irq(&np->tx_lock); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; #ifdef CONFIG_XEN if (skb->proto_data_valid) /* remote but checksummed? */ tx->flags |= NETTXF_data_validated; #endif #if HAVE_TSO if (skb_shinfo(skb)->gso_size) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->irq); np->stats.tx_bytes += skb->len; np->stats.tx_packets++; dev->trans_start = jiffies; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irq(&np->tx_lock); return 0; drop: np->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } static irqreturn_t netif_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); netif_rx_schedule(dev, &np->napi); dev->last_rx = jiffies; } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) WPRINTK("Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) WPRINTK("Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) WPRINTK("rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) WPRINTK("Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { if (net_ratelimit()) WPRINTK("Unfulfilled rx req " "(id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); BUG_ON(!ret); } gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) WPRINTK("Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) WPRINTK("GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) WPRINTK("Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) WPRINTK("GSO unsupported by this kernel.\n"); return -EINVAL; #endif } static int netif_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; struct multicall_entry *mcl; int work_done, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); np->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize must approximates the size of true data plus * any supervisor overheads. Adding hypervisor overheads * has been shown to significantly reduce achievable * bandwidth with the default receive buffer size. It is * therefore not wise to account for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to * RX_COPY_THRESHOLD + the supervisor overheads. Here, we * add the size of the data pulled in xennet_fill_frags(). * * We also adjust for any unused space in the main data * area by subtracting (RX_COPY_THRESHOLD - len). This is * especially important with drivers which split incoming * packets into header and data, using only 66 bytes of * the main data area (see the e1000 driver for example.) * On such systems, without this last adjustement, our * achievable receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; /* * Old backends do not assert data_validated but we * can infer it from csum_blank so test both flags. */ if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; #ifdef CONFIG_XEN skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE); skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank); #endif np->stats.rx_packets++; np->stats.rx_bytes += skb->len; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { mcl = np->rx_mcl + pages_flipped; mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = pages_flipped; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } __skb_queue_purge(&errq); while ((skb = __skb_dequeue(&rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); /* Pass it up. */ netif_receive_skb(skb); dev->last_rx = jiffies; } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) __netif_rx_complete(dev, napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); if (0 == mfn) { struct page *page = skb_shinfo(skb)->frags[0].page; balloon_release_driver_page(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = mmu - np->rx_mmu; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; mcl++; rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl - np->rx_mcl, NULL); BUG_ON(rc); } } __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_ref(ref)) { busy++; continue; } gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static struct net_device_stats *network_get_stats(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_get_stats(np, dev); return &np->stats; } static int xennet_set_mac_address(struct net_device *dev, void *p) { struct netfront_info *np = netdev_priv(dev); struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(np->mac, addr->sa_data, ETH_ALEN); return 0; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static int xennet_set_sg(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } else if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; return ethtool_op_set_sg(dev, data); } static int xennet_set_tso(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } return ethtool_op_set_tso(dev, data); } static void xennet_set_features(struct net_device *dev) { dev_disable_gso_features(dev); xennet_set_sg(dev, 0); /* We need checksum offload to enable scatter/gather and TSO. */ if (!(dev->features & NETIF_F_IP_CSUM)) return; if (xennet_set_sg(dev, 1)) return; /* Before 2.6.9 TSO seems to be unreliable so do not enable it * on older kernels. */ if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)) xennet_set_tso(dev, 1); } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; xennet_set_features(dev); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref( ref, np->xbdev->otherend_id, page_to_pfn(skb_shinfo(skb)->frags->page)); } else { gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static struct ethtool_ops network_ethtool_ops = { .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = xennet_set_sg, #if HAVE_TSO .get_tso = ethtool_op_get_tso, .set_tso = xennet_set_tso, #endif .get_link = ethtool_op_get_link, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { error = device_create_file(&netdev->dev, &xennet_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } static struct net_device * __devinit create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->open = network_open; netdev->hard_start_xmit = network_start_xmit; netdev->stop = network_close; netdev->get_stats = network_get_stats; netif_napi_add(netdev, &np->napi, netif_poll, 64); netdev->set_multicast_list = network_set_multicast_list; netdev->uninit = netif_uninit; netdev->set_mac_address = xennet_set_mac_address; netdev->change_mtu = xennet_change_mtu; netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } #ifdef CONFIG_INET /* * We use this notifier to send out a fake ARP reply to reset switches and * router ARP caches when an IP interface is brought up on a VIF. */ static int inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) { int count = 0; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ if (event == NETDEV_UP && dev->open == network_open) { for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REQUEST); } for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REPLY); } } return NOTIFY_DONE; } static struct notifier_block notifier_inetdev = { .notifier_call = inetdev_notify, .next = NULL, .priority = 0 }; #endif static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler(info->irq, info->netdev); info->irq = 0; end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, (unsigned long)page); } /* ** Driver registration ** */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static struct xenbus_driver netfront_driver = { .name = "vif", .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(netfront_remove), .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, }; static int __init netif_init(void) { if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN if (MODPARM_rx_flip && MODPARM_rx_copy) { WPRINTK("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_flip = 1; /* Default is to flip. */ #endif netif_init_accel(); IPRINTK("Initialising virtual ethernet driver.\n"); #ifdef CONFIG_INET (void)register_inetaddr_notifier(¬ifier_inetdev); #endif return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif netif_exit_accel(); return xenbus_unregister_driver(&netfront_driver); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/2.6.27/netfront.h000066400000000000000000000210111314037446600303020ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include #include #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct net_device_stats stats; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; struct napi_struct napi; unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded() */ extern void netfront_accelerator_stop(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/2.6.32/000077500000000000000000000000001314037446600262735ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/2.6.32/accel.c000066400000000000000000000533731314037446600275210ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront/accel: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront/accel: " fmt, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); static void netfront_accelerator_remove_watch(struct netfront_info *np); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Workqueue to process acceleration configuration changes */ struct workqueue_struct *accel_watch_workqueue; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); accel_watch_workqueue = create_workqueue("net_accel"); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; flush_workqueue(accel_watch_workqueue); destroy_workqueue(accel_watch_workqueue); /* No lock required as everything else should be quiet by now */ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); queue_work(accel_watch_workqueue, &vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* * If old watch exists, e.g. from before suspend/resume, * remove it now */ netfront_accelerator_remove_watch(np); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_purge_watch(struct netfront_accel_vif_state *vif_state) { flush_workqueue(accel_watch_workqueue); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; netfront_accelerator_purge_watch(vif_state); } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. Must be called with the accelerator * mutex held. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } } /* * Initialise the state to track an accelerator plugin module. * * Must be called with the accelerator mutex held. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; list_add(&accelerator->link, &accelerators_list); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { struct netfront_accelerator *accelerator; unsigned long flags; DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); accelerator = vif_state->np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); vif_state->hooks = accelerator->hooks; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks && hooks->new_device(np->netdev, dev) == 0) accelerator_set_vif_state_hooks(&np->accel_vif_state); return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* Holds accelerator_mutex to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if (hooks->new_device(np->netdev, vif_state->dev) == 0) accelerator_set_vif_state_hooks(vif_state); } } /* * Called by the netfront accelerator plugin module when it has * loaded. * * Takes the accelerator_mutex and vif_states_lock spinlock. */ int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded); /* * Remove the hooks from a single vif state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { unsigned long flags; /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Safely remove the accelerator function hooks from a netfront state. * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { if(vif_state->hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ vif_state->hooks->get_stats(vif_state->np->netdev, &vif_state->np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. * * Takes the accelerator mutex, and vif_states_lock spinlock. */ void netfront_accelerator_stop(const char *frontend) { struct netfront_accelerator *accelerator; mutex_lock(&accelerator_mutex); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_remove_hooks(accelerator); goto out; } } out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop); /* * Helper for call_remove and do_suspend * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator = np->accelerator; unsigned long flags; int rc = 0; if (np->accel_vif_state.hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ np->accel_vif_state.hooks->get_stats(np->netdev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); } return rc; } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock */ static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev); np->accelerator = NULL; return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { int rc = 0; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ rc = do_remove(np, dev); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { netfront_accelerator_purge_watch(&np->accel_vif_state); /* * Gratuitously fire the watch handler to reinstate the * configured accelerator */ if (dev->state == XenbusStateConnected) queue_work(accel_watch_workqueue, &np->accel_vif_state.accel_work); return 0; } /* * No lock pre-requisites. Takes the accelerator mutex */ void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; break; } } out: mutex_unlock(&accelerator_mutex); return; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &np->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/2.6.32/acpi_bus.h000066400000000000000000000261731314037446600302420ustar00rootroot00000000000000/* * acpi_bus.h - ACPI Bus Driver ($Revision: 22 $) * * Copyright (C) 2001, 2002 Andy Grover * Copyright (C) 2001, 2002 Paul Diefenbaugh * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef __ACPI_BUS_H__ #define __ACPI_BUS_H__ #include #include /* TBD: Make dynamic */ #define ACPI_MAX_HANDLES 10 struct acpi_handle_list { u32 count; acpi_handle handles[ACPI_MAX_HANDLES]; }; /* acpi_utils.h */ acpi_status acpi_extract_package(union acpi_object *package, struct acpi_buffer *format, struct acpi_buffer *buffer); acpi_status acpi_evaluate_integer(acpi_handle handle, acpi_string pathname, struct acpi_object_list *arguments, unsigned long long *data); acpi_status acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, struct acpi_object_list *arguments, struct acpi_handle_list *list); #ifdef CONFIG_ACPI #include #define ACPI_BUS_FILE_ROOT "acpi" extern struct proc_dir_entry *acpi_root_dir; enum acpi_bus_removal_type { ACPI_BUS_REMOVAL_NORMAL = 0, ACPI_BUS_REMOVAL_EJECT, ACPI_BUS_REMOVAL_SUPRISE, ACPI_BUS_REMOVAL_TYPE_COUNT }; enum acpi_bus_device_type { ACPI_BUS_TYPE_DEVICE = 0, ACPI_BUS_TYPE_POWER, ACPI_BUS_TYPE_PROCESSOR, ACPI_BUS_TYPE_THERMAL, ACPI_BUS_TYPE_POWER_BUTTON, ACPI_BUS_TYPE_SLEEP_BUTTON, ACPI_BUS_DEVICE_TYPE_COUNT }; struct acpi_driver; struct acpi_device; /* * ACPI Driver * ----------- */ typedef int (*acpi_op_add) (struct acpi_device * device); typedef int (*acpi_op_remove) (struct acpi_device * device, int type); typedef int (*acpi_op_start) (struct acpi_device * device); typedef int (*acpi_op_suspend) (struct acpi_device * device, pm_message_t state); typedef int (*acpi_op_resume) (struct acpi_device * device); typedef int (*acpi_op_bind) (struct acpi_device * device); typedef int (*acpi_op_unbind) (struct acpi_device * device); typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event); struct acpi_bus_ops { u32 acpi_op_add:1; u32 acpi_op_start:1; }; struct acpi_device_ops { acpi_op_add add; acpi_op_remove remove; acpi_op_start start; acpi_op_suspend suspend; acpi_op_resume resume; acpi_op_bind bind; acpi_op_unbind unbind; acpi_op_notify notify; }; #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */ struct acpi_driver { char name[80]; char class[80]; const struct acpi_device_id *ids; /* Supported Hardware IDs */ unsigned int flags; struct acpi_device_ops ops; struct device_driver drv; struct module *owner; }; /* * ACPI Device * ----------- */ /* Status (_STA) */ struct acpi_device_status { u32 present:1; u32 enabled:1; u32 show_in_ui:1; u32 functional:1; u32 battery_present:1; u32 reserved:27; }; /* Flags */ struct acpi_device_flags { u32 dynamic_status:1; u32 bus_address:1; u32 removable:1; u32 ejectable:1; u32 lockable:1; u32 suprise_removal_ok:1; u32 power_manageable:1; u32 performance_manageable:1; u32 wake_capable:1; /* Wakeup(_PRW) supported? */ u32 force_power_state:1; u32 reserved:22; }; /* File System */ struct acpi_device_dir { struct proc_dir_entry *entry; }; #define acpi_device_dir(d) ((d)->dir.entry) /* Plug and Play */ typedef char acpi_bus_id[8]; typedef unsigned long acpi_bus_address; typedef char acpi_device_name[40]; typedef char acpi_device_class[20]; struct acpi_hardware_id { struct list_head list; char *id; }; struct acpi_device_pnp { acpi_bus_id bus_id; /* Object name */ acpi_bus_address bus_address; /* _ADR */ char *unique_id; /* _UID */ struct list_head ids; /* _HID and _CIDs */ acpi_device_name device_name; /* Driver-determined */ acpi_device_class device_class; /* " */ }; #define acpi_device_bid(d) ((d)->pnp.bus_id) #define acpi_device_adr(d) ((d)->pnp.bus_address) char *acpi_device_hid(struct acpi_device *device); #define acpi_device_name(d) ((d)->pnp.device_name) #define acpi_device_class(d) ((d)->pnp.device_class) /* Power Management */ struct acpi_device_power_flags { u32 explicit_get:1; /* _PSC present? */ u32 power_resources:1; /* Power resources */ u32 inrush_current:1; /* Serialize Dx->D0 */ u32 power_removed:1; /* Optimize Dx->D0 */ u32 reserved:28; }; struct acpi_device_power_state { struct { u8 valid:1; u8 explicit_set:1; /* _PSx present? */ u8 reserved:6; } flags; int power; /* % Power (compared to D0) */ int latency; /* Dx->D0 time (microseconds) */ struct acpi_handle_list resources; /* Power resources referenced */ }; struct acpi_device_power { int state; /* Current state */ struct acpi_device_power_flags flags; struct acpi_device_power_state states[4]; /* Power states (D0-D3) */ }; /* Performance Management */ struct acpi_device_perf_flags { u8 reserved:8; }; struct acpi_device_perf_state { struct { u8 valid:1; u8 reserved:7; } flags; u8 power; /* % Power (compared to P0) */ u8 performance; /* % Performance ( " ) */ int latency; /* Px->P0 time (microseconds) */ }; struct acpi_device_perf { int state; struct acpi_device_perf_flags flags; int state_count; struct acpi_device_perf_state *states; }; /* Wakeup Management */ struct acpi_device_wakeup_flags { u8 valid:1; /* Can successfully enable wakeup? */ u8 run_wake:1; /* Run-Wake GPE devices */ }; struct acpi_device_wakeup_state { u8 enabled:1; }; struct acpi_device_wakeup { acpi_handle gpe_device; acpi_integer gpe_number; acpi_integer sleep_state; struct acpi_handle_list resources; struct acpi_device_wakeup_state state; struct acpi_device_wakeup_flags flags; int prepare_count; }; /* Device */ struct acpi_device { int device_type; acpi_handle handle; /* no handle for fixed hardware */ struct acpi_device *parent; struct list_head children; struct list_head node; struct list_head wakeup_list; struct acpi_device_status status; struct acpi_device_flags flags; struct acpi_device_pnp pnp; struct acpi_device_power power; struct acpi_device_wakeup wakeup; struct acpi_device_perf performance; struct acpi_device_dir dir; struct acpi_device_ops ops; struct acpi_driver *driver; void *driver_data; struct device dev; struct acpi_bus_ops bus_ops; /* workaround for different code path for hotplug */ enum acpi_bus_removal_type removal_type; /* indicate for different removal type */ }; static inline void *acpi_driver_data(struct acpi_device *d) { return d->driver_data; } #define to_acpi_device(d) container_of(d, struct acpi_device, dev) #define to_acpi_driver(d) container_of(d, struct acpi_driver, drv) /* acpi_device.dev.bus == &acpi_bus_type */ extern struct bus_type acpi_bus_type; /* * Events * ------ */ struct acpi_bus_event { struct list_head node; acpi_device_class device_class; acpi_bus_id bus_id; u32 type; u32 data; }; extern struct kobject *acpi_kobj; extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); void acpi_bus_private_data_handler(acpi_handle, void *); int acpi_bus_get_private_data(acpi_handle, void **); extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); extern int register_acpi_notifier(struct notifier_block *); extern int unregister_acpi_notifier(struct notifier_block *); extern int register_acpi_bus_notifier(struct notifier_block *nb); extern void unregister_acpi_bus_notifier(struct notifier_block *nb); /* * External Functions */ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); void acpi_bus_data_handler(acpi_handle handle, void *context); acpi_status acpi_bus_get_status_handle(acpi_handle handle, unsigned long long *sta); int acpi_bus_get_status(struct acpi_device *device); int acpi_bus_get_power(acpi_handle handle, int *state); int acpi_bus_set_power(acpi_handle handle, int state); bool acpi_bus_power_manageable(acpi_handle handle); bool acpi_bus_can_wakeup(acpi_handle handle); #ifdef CONFIG_ACPI_PROC_EVENT int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data); int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data); int acpi_bus_receive_event(struct acpi_bus_event *event); #else static inline int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data) { return 0; } #endif int acpi_bus_register_driver(struct acpi_driver *driver); void acpi_bus_unregister_driver(struct acpi_driver *driver); int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent, acpi_handle handle, int type); int acpi_bus_trim(struct acpi_device *start, int rmdevice); int acpi_bus_start(struct acpi_device *device); acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd); int acpi_match_device_ids(struct acpi_device *device, const struct acpi_device_id *ids); int acpi_create_dir(struct acpi_device *); void acpi_remove_dir(struct acpi_device *); /* * Bind physical devices with ACPI devices */ struct acpi_bus_type { struct list_head list; struct bus_type *bus; /* For general devices under the bus */ int (*find_device) (struct device *, acpi_handle *); /* For bridges, such as PCI root bridge, IDE controller */ int (*find_bridge) (struct device *, acpi_handle *); }; int register_acpi_bus_type(struct acpi_bus_type *); int unregister_acpi_bus_type(struct acpi_bus_type *); struct device *acpi_get_physical_device(acpi_handle); struct acpi_pci_root { struct list_head node; struct acpi_device * device; struct acpi_pci_id id; struct pci_bus *bus; u16 segment; u8 bus_nr; u32 osc_support_set; /* _OSC state of support bits */ u32 osc_control_set; /* _OSC state of control bits */ u32 osc_control_qry; /* the latest _OSC query result */ u32 osc_queried:1; /* has _OSC control been queried? */ }; /* helper */ acpi_handle acpi_get_child(acpi_handle, acpi_integer); int acpi_is_root_bridge(acpi_handle); acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int); struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->archdata.acpi_handle)) #ifdef CONFIG_PM_SLEEP int acpi_pm_device_sleep_state(struct device *, int *); int acpi_pm_device_sleep_wake(struct device *, bool); #else /* !CONFIG_PM_SLEEP */ static inline int acpi_pm_device_sleep_state(struct device *d, int *p) { if (p) *p = ACPI_STATE_D0; return ACPI_STATE_D3; } static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) { return -ENODEV; } #endif /* !CONFIG_PM_SLEEP */ #endif /* CONFIG_ACPI */ #endif /*__ACPI_BUS_H__*/ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/2.6.32/actypes.h000066400000000000000000001033731314037446600301230ustar00rootroot00000000000000/****************************************************************************** * * Name: actypes.h - Common data types for the entire ACPI subsystem * *****************************************************************************/ /* * Copyright (C) 2000 - 2008, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #ifndef __ACTYPES_H__ #define __ACTYPES_H__ /* acpisrc:struct_defs -- for acpisrc conversion */ /* * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header * and must be either 32 or 64. 16-bit ACPICA is no longer supported, as of * 12/2006. */ #ifndef ACPI_MACHINE_WIDTH #error ACPI_MACHINE_WIDTH not defined #endif /*! [Begin] no source code translation */ /* * Data type ranges * Note: These macros are designed to be compiler independent as well as * working around problems that some 32-bit compilers have with 64-bit * constants. */ #define ACPI_UINT8_MAX (UINT8) (~((UINT8) 0)) /* 0xFF */ #define ACPI_UINT16_MAX (UINT16)(~((UINT16) 0)) /* 0xFFFF */ #define ACPI_UINT32_MAX (UINT32)(~((UINT32) 0)) /* 0xFFFFFFFF */ #define ACPI_UINT64_MAX (UINT64)(~((UINT64) 0)) /* 0xFFFFFFFFFFFFFFFF */ #define ACPI_ASCII_MAX 0x7F /* * Architecture-specific ACPICA Subsystem Data Types * * The goal of these types is to provide source code portability across * 16-bit, 32-bit, and 64-bit targets. * * 1) The following types are of fixed size for all targets (16/32/64): * * BOOLEAN Logical boolean * * UINT8 8-bit (1 byte) unsigned value * UINT16 16-bit (2 byte) unsigned value * UINT32 32-bit (4 byte) unsigned value * UINT64 64-bit (8 byte) unsigned value * * INT16 16-bit (2 byte) signed value * INT32 32-bit (4 byte) signed value * INT64 64-bit (8 byte) signed value * * COMPILER_DEPENDENT_UINT64/INT64 - These types are defined in the * compiler-dependent header(s) and were introduced because there is no common * 64-bit integer type across the various compilation models, as shown in * the table below. * * Datatype LP64 ILP64 LLP64 ILP32 LP32 16bit * char 8 8 8 8 8 8 * short 16 16 16 16 16 16 * _int32 32 * int 32 64 32 32 16 16 * long 64 64 32 32 32 32 * long long 64 64 * pointer 64 64 64 32 32 32 * * Note: ILP64 and LP32 are currently not supported. * * * 2) These types represent the native word size of the target mode of the * processor, and may be 16-bit, 32-bit, or 64-bit as required. They are * usually used for memory allocation, efficient loop counters, and array * indexes. The types are similar to the size_t type in the C library and are * required because there is no C type that consistently represents the native * data width. ACPI_SIZE is needed because there is no guarantee that a * kernel-level C library is present. * * ACPI_SIZE 16/32/64-bit unsigned value * ACPI_NATIVE_INT 16/32/64-bit signed value * */ /******************************************************************************* * * Common types for all compilers, all targets * ******************************************************************************/ typedef unsigned char BOOLEAN; typedef unsigned char UINT8; typedef unsigned short UINT16; typedef COMPILER_DEPENDENT_UINT64 UINT64; typedef COMPILER_DEPENDENT_INT64 INT64; /*! [End] no source code translation !*/ /******************************************************************************* * * Types specific to 64-bit targets * ******************************************************************************/ #if ACPI_MACHINE_WIDTH == 64 /*! [Begin] no source code translation (keep the typedefs as-is) */ typedef unsigned int UINT32; typedef int INT32; /*! [End] no source code translation !*/ typedef s64 acpi_native_int; typedef u64 acpi_size; typedef u64 acpi_io_address; typedef u64 acpi_physical_address; #define ACPI_MAX_PTR ACPI_UINT64_MAX #define ACPI_SIZE_MAX ACPI_UINT64_MAX #define ACPI_USE_NATIVE_DIVIDE /* Has native 64-bit integer support */ /* * In the case of the Itanium Processor Family (IPF), the hardware does not * support misaligned memory transfers. Set the MISALIGNMENT_NOT_SUPPORTED flag * to indicate that special precautions must be taken to avoid alignment faults. * (IA64 or ia64 is currently used by existing compilers to indicate IPF.) * * Note: Em64_t and other X86-64 processors support misaligned transfers, * so there is no need to define this flag. */ #if defined (__IA64__) || defined (__ia64__) #define ACPI_MISALIGNMENT_NOT_SUPPORTED #endif /******************************************************************************* * * Types specific to 32-bit targets * ******************************************************************************/ #elif ACPI_MACHINE_WIDTH == 32 /*! [Begin] no source code translation (keep the typedefs as-is) */ typedef unsigned int UINT32; typedef int INT32; /*! [End] no source code translation !*/ typedef s32 acpi_native_int; typedef u32 acpi_size; typedef u32 acpi_io_address; typedef u32 acpi_physical_address; #define ACPI_MAX_PTR ACPI_UINT32_MAX #define ACPI_SIZE_MAX ACPI_UINT32_MAX #else /* ACPI_MACHINE_WIDTH must be either 64 or 32 */ #error unknown ACPI_MACHINE_WIDTH #endif /******************************************************************************* * * OS-dependent types * * If the defaults below are not appropriate for the host system, they can * be defined in the OS-specific header, and this will take precedence. * ******************************************************************************/ /* Value returned by acpi_os_get_thread_id */ #ifndef acpi_thread_id #define acpi_thread_id acpi_size #endif /* Flags for acpi_os_acquire_lock/acpi_os_release_lock */ #ifndef acpi_cpu_flags #define acpi_cpu_flags acpi_size #endif /* Object returned from acpi_os_create_cache */ #ifndef acpi_cache_t #ifdef ACPI_USE_LOCAL_CACHE #define acpi_cache_t struct acpi_memory_list #else #define acpi_cache_t void * #endif #endif /* * Synchronization objects - Mutexes, Semaphores, and spin_locks */ #if (ACPI_MUTEX_TYPE == ACPI_BINARY_SEMAPHORE) /* * These macros are used if the host OS does not support a mutex object. * Map the OSL Mutex interfaces to binary semaphores. */ #define acpi_mutex acpi_semaphore #define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle) #define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle) #define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time) #define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1) #endif /* Configurable types for synchronization objects */ #ifndef acpi_spinlock #define acpi_spinlock void * #endif #ifndef acpi_semaphore #define acpi_semaphore void * #endif #ifndef acpi_mutex #define acpi_mutex void * #endif /******************************************************************************* * * Compiler-dependent types * * If the defaults below are not appropriate for the host compiler, they can * be defined in the compiler-specific header, and this will take precedence. * ******************************************************************************/ /* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ #ifndef acpi_uintptr_t #define acpi_uintptr_t void * #endif /* * ACPI_PRINTF_LIKE is used to tag functions as "printf-like" because * some compilers can catch printf format string problems */ #ifndef ACPI_PRINTF_LIKE #define ACPI_PRINTF_LIKE(c) #endif /* * Some compilers complain about unused variables. Sometimes we don't want to * use all the variables (for example, _acpi_module_name). This allows us * to tell the compiler in a per-variable manner that a variable * is unused */ #ifndef ACPI_UNUSED_VAR #define ACPI_UNUSED_VAR #endif /* * All ACPICA functions that are available to the rest of the kernel are * tagged with this macro which can be defined as appropriate for the host. */ #ifndef ACPI_EXPORT_SYMBOL #define ACPI_EXPORT_SYMBOL(symbol) #endif /****************************************************************************** * * ACPI Specification constants (Do not change unless the specification changes) * *****************************************************************************/ /* Number of distinct FADT-based GPE register blocks (GPE0 and GPE1) */ #define ACPI_MAX_GPE_BLOCKS 2 /* Default ACPI register widths */ #define ACPI_GPE_REGISTER_WIDTH 8 #define ACPI_PM1_REGISTER_WIDTH 16 #define ACPI_PM2_REGISTER_WIDTH 8 #define ACPI_PM_TIMER_WIDTH 32 /* Names within the namespace are 4 bytes long */ #define ACPI_NAME_SIZE 4 #define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */ #define ACPI_PATH_SEPARATOR '.' /* Sizes for ACPI table headers */ #define ACPI_OEM_ID_SIZE 6 #define ACPI_OEM_TABLE_ID_SIZE 8 /* ACPI/PNP hardware IDs */ #define PCI_ROOT_HID_STRING "PNP0A03" #define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08" /* PM Timer ticks per second (HZ) */ #define PM_TIMER_FREQUENCY 3579545 /******************************************************************************* * * Independent types * ******************************************************************************/ /* Logical defines and NULL */ #ifdef FALSE #undef FALSE #endif #define FALSE (1 == 0) #ifdef TRUE #undef TRUE #endif #define TRUE (1 == 1) #ifndef NULL #define NULL (void *) 0 #endif /* * Miscellaneous types */ typedef u32 acpi_status; /* All ACPI Exceptions */ typedef u32 acpi_name; /* 4-byte ACPI name */ typedef char *acpi_string; /* Null terminated ASCII string */ typedef void *acpi_handle; /* Actually a ptr to a NS Node */ /* Owner IDs are used to track namespace nodes for selective deletion */ typedef u8 acpi_owner_id; #define ACPI_OWNER_ID_MAX 0xFF struct uint64_struct { u32 lo; u32 hi; }; union uint64_overlay { u64 full; struct uint64_struct part; }; struct uint32_struct { u32 lo; u32 hi; }; /* * Acpi integer width. In ACPI version 1, integers are 32 bits. In ACPI * version 2, integers are 64 bits. Note that this pertains to the ACPI integer * type only, not other integers used in the implementation of the ACPI CA * subsystem. */ typedef unsigned long long acpi_integer; #define ACPI_INTEGER_MAX ACPI_UINT64_MAX #define ACPI_INTEGER_BIT_SIZE 64 #define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */ #if ACPI_MACHINE_WIDTH == 64 #define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */ #endif #define ACPI_MAX64_DECIMAL_DIGITS 20 #define ACPI_MAX32_DECIMAL_DIGITS 10 #define ACPI_MAX16_DECIMAL_DIGITS 5 #define ACPI_MAX8_DECIMAL_DIGITS 3 /* PM Timer ticks per second (HZ) */ #define PM_TIMER_FREQUENCY 3579545 /* * Constants with special meanings */ #define ACPI_ROOT_OBJECT ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR) #define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */ #define ACPI_DO_NOT_WAIT 0 /******************************************************************************* * * Commonly used macros * ******************************************************************************/ /* Data manipulation */ #define ACPI_LOBYTE(integer) ((u8) (u16)(integer)) #define ACPI_HIBYTE(integer) ((u8) (((u16)(integer)) >> 8)) #define ACPI_LOWORD(integer) ((u16) (u32)(integer)) #define ACPI_HIWORD(integer) ((u16)(((u32)(integer)) >> 16)) #define ACPI_LODWORD(integer64) ((u32) (u64)(integer64)) #define ACPI_HIDWORD(integer64) ((u32)(((u64)(integer64)) >> 32)) #define ACPI_SET_BIT(target,bit) ((target) |= (bit)) #define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit)) #define ACPI_MIN(a,b) (((a)<(b))?(a):(b)) #define ACPI_MAX(a,b) (((a)>(b))?(a):(b)) /* Size calculation */ #define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) /* Pointer manipulation */ #define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p)) #define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p)) #define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b))) #define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b))) /* Pointer/Integer type conversions */ #define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) NULL,(acpi_size) i) #define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) NULL) #define ACPI_OFFSET(d, f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f), (void *) NULL) #define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED #define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) #else #define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) #endif /******************************************************************************* * * Miscellaneous constants * ******************************************************************************/ /* * Initialization sequence */ #define ACPI_FULL_INITIALIZATION 0x00 #define ACPI_NO_ADDRESS_SPACE_INIT 0x01 #define ACPI_NO_HARDWARE_INIT 0x02 #define ACPI_NO_EVENT_INIT 0x04 #define ACPI_NO_HANDLER_INIT 0x08 #define ACPI_NO_ACPI_ENABLE 0x10 #define ACPI_NO_DEVICE_INIT 0x20 #define ACPI_NO_OBJECT_INIT 0x40 /* * Initialization state */ #define ACPI_SUBSYSTEM_INITIALIZE 0x01 #define ACPI_INITIALIZED_OK 0x02 /* * Power state values */ #define ACPI_STATE_UNKNOWN (u8) 0xFF #define ACPI_STATE_S0 (u8) 0 #define ACPI_STATE_S1 (u8) 1 #define ACPI_STATE_S2 (u8) 2 #define ACPI_STATE_S3 (u8) 3 #define ACPI_STATE_S4 (u8) 4 #define ACPI_STATE_S5 (u8) 5 #define ACPI_S_STATES_MAX ACPI_STATE_S5 #define ACPI_S_STATE_COUNT 6 #define ACPI_STATE_D0 (u8) 0 #define ACPI_STATE_D1 (u8) 1 #define ACPI_STATE_D2 (u8) 2 #define ACPI_STATE_D3 (u8) 3 #define ACPI_D_STATES_MAX ACPI_STATE_D3 #define ACPI_D_STATE_COUNT 4 #define ACPI_STATE_C0 (u8) 0 #define ACPI_STATE_C1 (u8) 1 #define ACPI_STATE_C2 (u8) 2 #define ACPI_STATE_C3 (u8) 3 #define ACPI_C_STATES_MAX ACPI_STATE_C3 #define ACPI_C_STATE_COUNT 4 /* * Sleep type invalid value */ #define ACPI_SLEEP_TYPE_MAX 0x7 #define ACPI_SLEEP_TYPE_INVALID 0xFF /* * Standard notify values */ #define ACPI_NOTIFY_BUS_CHECK (u8) 0x00 #define ACPI_NOTIFY_DEVICE_CHECK (u8) 0x01 #define ACPI_NOTIFY_DEVICE_WAKE (u8) 0x02 #define ACPI_NOTIFY_EJECT_REQUEST (u8) 0x03 #define ACPI_NOTIFY_DEVICE_CHECK_LIGHT (u8) 0x04 #define ACPI_NOTIFY_FREQUENCY_MISMATCH (u8) 0x05 #define ACPI_NOTIFY_BUS_MODE_MISMATCH (u8) 0x06 #define ACPI_NOTIFY_POWER_FAULT (u8) 0x07 #define ACPI_NOTIFY_CAPABILITIES_CHECK (u8) 0x08 #define ACPI_NOTIFY_DEVICE_PLD_CHECK (u8) 0x09 #define ACPI_NOTIFY_RESERVED (u8) 0x0A #define ACPI_NOTIFY_LOCALITY_UPDATE (u8) 0x0B #define ACPI_NOTIFY_MAX 0x0B /* * Types associated with ACPI names and objects. The first group of * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition * of the ACPI object_type() operator (See the ACPI Spec). Therefore, * only add to the first group if the spec changes. * * NOTE: Types must be kept in sync with the global acpi_ns_properties * and acpi_ns_type_names arrays. */ typedef u32 acpi_object_type; #define ACPI_TYPE_ANY 0x00 #define ACPI_TYPE_INTEGER 0x01 /* Byte/Word/Dword/Zero/One/Ones */ #define ACPI_TYPE_STRING 0x02 #define ACPI_TYPE_BUFFER 0x03 #define ACPI_TYPE_PACKAGE 0x04 /* byte_const, multiple data_term/Constant/super_name */ #define ACPI_TYPE_FIELD_UNIT 0x05 #define ACPI_TYPE_DEVICE 0x06 /* Name, multiple Node */ #define ACPI_TYPE_EVENT 0x07 #define ACPI_TYPE_METHOD 0x08 /* Name, byte_const, multiple Code */ #define ACPI_TYPE_MUTEX 0x09 #define ACPI_TYPE_REGION 0x0A #define ACPI_TYPE_POWER 0x0B /* Name,byte_const,word_const,multi Node */ #define ACPI_TYPE_PROCESSOR 0x0C /* Name,byte_const,Dword_const,byte_const,multi nm_o */ #define ACPI_TYPE_THERMAL 0x0D /* Name, multiple Node */ #define ACPI_TYPE_BUFFER_FIELD 0x0E #define ACPI_TYPE_DDB_HANDLE 0x0F #define ACPI_TYPE_DEBUG_OBJECT 0x10 #define ACPI_TYPE_EXTERNAL_MAX 0x10 /* * These are object types that do not map directly to the ACPI * object_type() operator. They are used for various internal purposes only. * If new predefined ACPI_TYPEs are added (via the ACPI specification), these * internal types must move upwards. (There is code that depends on these * values being contiguous with the external types above.) */ #define ACPI_TYPE_LOCAL_REGION_FIELD 0x11 #define ACPI_TYPE_LOCAL_BANK_FIELD 0x12 #define ACPI_TYPE_LOCAL_INDEX_FIELD 0x13 #define ACPI_TYPE_LOCAL_REFERENCE 0x14 /* Arg#, Local#, Name, Debug, ref_of, Index */ #define ACPI_TYPE_LOCAL_ALIAS 0x15 #define ACPI_TYPE_LOCAL_METHOD_ALIAS 0x16 #define ACPI_TYPE_LOCAL_NOTIFY 0x17 #define ACPI_TYPE_LOCAL_ADDRESS_HANDLER 0x18 #define ACPI_TYPE_LOCAL_RESOURCE 0x19 #define ACPI_TYPE_LOCAL_RESOURCE_FIELD 0x1A #define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */ #define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */ /* * These are special object types that never appear in * a Namespace node, only in a union acpi_operand_object */ #define ACPI_TYPE_LOCAL_EXTRA 0x1C #define ACPI_TYPE_LOCAL_DATA 0x1D #define ACPI_TYPE_LOCAL_MAX 0x1D /* All types above here are invalid */ #define ACPI_TYPE_INVALID 0x1E #define ACPI_TYPE_NOT_FOUND 0xFF #define ACPI_NUM_NS_TYPES (ACPI_TYPE_INVALID + 1) /* * All I/O */ #define ACPI_READ 0 #define ACPI_WRITE 1 #define ACPI_IO_MASK 1 /* * Event Types: Fixed & General Purpose */ typedef u32 acpi_event_type; /* * Fixed events */ #define ACPI_EVENT_PMTIMER 0 #define ACPI_EVENT_GLOBAL 1 #define ACPI_EVENT_POWER_BUTTON 2 #define ACPI_EVENT_SLEEP_BUTTON 3 #define ACPI_EVENT_RTC 4 #define ACPI_EVENT_MAX 4 #define ACPI_NUM_FIXED_EVENTS ACPI_EVENT_MAX + 1 /* * Event Status - Per event * ------------- * The encoding of acpi_event_status is illustrated below. * Note that a set bit (1) indicates the property is TRUE * (e.g. if bit 0 is set then the event is enabled). * +-------------+-+-+-+ * | Bits 31:3 |2|1|0| * +-------------+-+-+-+ * | | | | * | | | +- Enabled? * | | +--- Enabled for wake? * | +----- Set? * +----------- */ typedef u32 acpi_event_status; #define ACPI_EVENT_FLAG_DISABLED (acpi_event_status) 0x00 #define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01 #define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02 #define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04 #define ACPI_EVENT_FLAG_HANDLE (acpi_event_status) 0x08 /* * General Purpose Events (GPE) */ #define ACPI_GPE_INVALID 0xFF #define ACPI_GPE_MAX 0xFF #define ACPI_NUM_GPE 256 #define ACPI_GPE_ENABLE 0 #define ACPI_GPE_DISABLE 1 /* * GPE info flags - Per GPE * +-+-+-+---+---+-+ * |7|6|5|4:3|2:1|0| * +-+-+-+---+---+-+ * | | | | | | * | | | | | +--- Interrupt type: Edge or Level Triggered * | | | | +--- Type: Wake-only, Runtime-only, or wake/runtime * | | | +--- Type of dispatch -- to method, handler, or none * | | +--- Enabled for runtime? * | +--- Enabled for wake? * +--- Unused */ #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01 #define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01 #define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 #define ACPI_GPE_TYPE_MASK (u8) 0x06 #define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x06 #define ACPI_GPE_TYPE_WAKE (u8) 0x02 #define ACPI_GPE_TYPE_RUNTIME (u8) 0x04 /* Default */ #define ACPI_GPE_DISPATCH_MASK (u8) 0x18 #define ACPI_GPE_DISPATCH_HANDLER (u8) 0x08 #define ACPI_GPE_DISPATCH_METHOD (u8) 0x10 #define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 /* Default */ #define ACPI_GPE_RUN_ENABLE_MASK (u8) 0x20 #define ACPI_GPE_RUN_ENABLED (u8) 0x20 #define ACPI_GPE_RUN_DISABLED (u8) 0x00 /* Default */ #define ACPI_GPE_WAKE_ENABLE_MASK (u8) 0x40 #define ACPI_GPE_WAKE_ENABLED (u8) 0x40 #define ACPI_GPE_WAKE_DISABLED (u8) 0x00 /* Default */ #define ACPI_GPE_ENABLE_MASK (u8) 0x60 /* Both run/wake */ /* * Flags for GPE and Lock interfaces */ #define ACPI_EVENT_WAKE_ENABLE 0x2 /* acpi_gpe_enable */ #define ACPI_EVENT_WAKE_DISABLE 0x2 /* acpi_gpe_disable */ #define ACPI_NOT_ISR 0x1 #define ACPI_ISR 0x0 /* Notify types */ #define ACPI_SYSTEM_NOTIFY 0x1 #define ACPI_DEVICE_NOTIFY 0x2 #define ACPI_ALL_NOTIFY (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY) #define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3 #define ACPI_MAX_SYS_NOTIFY 0x7f /* Address Space (Operation Region) Types */ typedef u8 acpi_adr_space_type; #define ACPI_ADR_SPACE_SYSTEM_MEMORY (acpi_adr_space_type) 0 #define ACPI_ADR_SPACE_SYSTEM_IO (acpi_adr_space_type) 1 #define ACPI_ADR_SPACE_PCI_CONFIG (acpi_adr_space_type) 2 #define ACPI_ADR_SPACE_EC (acpi_adr_space_type) 3 #define ACPI_ADR_SPACE_SMBUS (acpi_adr_space_type) 4 #define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5 #define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6 #define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7 #define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 8 #define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 127 /* * bit_register IDs * * These values are intended to be used by the hardware interfaces * and are mapped to individual bitfields defined within the ACPI * registers. See the acpi_gbl_bit_register_info global table in utglobal.c * for this mapping. */ /* PM1 Status register */ #define ACPI_BITREG_TIMER_STATUS 0x00 #define ACPI_BITREG_BUS_MASTER_STATUS 0x01 #define ACPI_BITREG_GLOBAL_LOCK_STATUS 0x02 #define ACPI_BITREG_POWER_BUTTON_STATUS 0x03 #define ACPI_BITREG_SLEEP_BUTTON_STATUS 0x04 #define ACPI_BITREG_RT_CLOCK_STATUS 0x05 #define ACPI_BITREG_WAKE_STATUS 0x06 #define ACPI_BITREG_PCIEXP_WAKE_STATUS 0x07 /* PM1 Enable register */ #define ACPI_BITREG_TIMER_ENABLE 0x08 #define ACPI_BITREG_GLOBAL_LOCK_ENABLE 0x09 #define ACPI_BITREG_POWER_BUTTON_ENABLE 0x0A #define ACPI_BITREG_SLEEP_BUTTON_ENABLE 0x0B #define ACPI_BITREG_RT_CLOCK_ENABLE 0x0C #define ACPI_BITREG_PCIEXP_WAKE_DISABLE 0x0D /* PM1 Control register */ #define ACPI_BITREG_SCI_ENABLE 0x0E #define ACPI_BITREG_BUS_MASTER_RLD 0x0F #define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10 #define ACPI_BITREG_SLEEP_TYPE 0x11 #define ACPI_BITREG_SLEEP_ENABLE 0x12 /* PM2 Control register */ #define ACPI_BITREG_ARB_DISABLE 0x13 #define ACPI_BITREG_MAX 0x13 #define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1 /* Status register values. A 1 clears a status bit. 0 = no effect */ #define ACPI_CLEAR_STATUS 1 /* Enable and Control register values */ #define ACPI_ENABLE_EVENT 1 #define ACPI_DISABLE_EVENT 0 /* * External ACPI object definition */ /* * Note: Type == ACPI_TYPE_ANY (0) is used to indicate a NULL package element * or an unresolved named reference. */ union acpi_object { acpi_object_type type; /* See definition of acpi_ns_type for values */ struct { acpi_object_type type; /* ACPI_TYPE_INTEGER */ acpi_integer value; /* The actual number */ } integer; struct { acpi_object_type type; /* ACPI_TYPE_STRING */ u32 length; /* # of bytes in string, excluding trailing null */ char *pointer; /* points to the string value */ } string; struct { acpi_object_type type; /* ACPI_TYPE_BUFFER */ u32 length; /* # of bytes in buffer */ u8 *pointer; /* points to the buffer */ } buffer; struct { acpi_object_type type; /* ACPI_TYPE_PACKAGE */ u32 count; /* # of elements in package */ union acpi_object *elements; /* Pointer to an array of ACPI_OBJECTs */ } package; struct { acpi_object_type type; /* ACPI_TYPE_LOCAL_REFERENCE */ acpi_object_type actual_type; /* Type associated with the Handle */ acpi_handle handle; /* object reference */ } reference; struct { acpi_object_type type; /* ACPI_TYPE_PROCESSOR */ u32 proc_id; acpi_io_address pblk_address; u32 pblk_length; } processor; struct { acpi_object_type type; /* ACPI_TYPE_POWER */ u32 system_level; u32 resource_order; } power_resource; }; /* * List of objects, used as a parameter list for control method evaluation */ struct acpi_object_list { u32 count; union acpi_object *pointer; }; /* * Miscellaneous common Data Structures used by the interfaces */ #define ACPI_NO_BUFFER 0 #define ACPI_ALLOCATE_BUFFER (acpi_size) (-1) #define ACPI_ALLOCATE_LOCAL_BUFFER (acpi_size) (-2) struct acpi_buffer { acpi_size length; /* Length in bytes of the buffer */ void *pointer; /* pointer to buffer */ }; /* * name_type for acpi_get_name */ #define ACPI_FULL_PATHNAME 0 #define ACPI_SINGLE_NAME 1 #define ACPI_NAME_TYPE_MAX 1 /* * Predefined Namespace items */ struct acpi_predefined_names { char *name; u8 type; char *val; }; /* * Structure and flags for acpi_get_system_info */ #define ACPI_SYS_MODE_UNKNOWN 0x0000 #define ACPI_SYS_MODE_ACPI 0x0001 #define ACPI_SYS_MODE_LEGACY 0x0002 #define ACPI_SYS_MODES_MASK 0x0003 /* * System info returned by acpi_get_system_info() */ struct acpi_system_info { u32 acpi_ca_version; u32 flags; u32 timer_resolution; u32 reserved1; u32 reserved2; u32 debug_level; u32 debug_layer; }; /* Table Event Types */ #define ACPI_TABLE_EVENT_LOAD 0x0 #define ACPI_TABLE_EVENT_UNLOAD 0x1 #define ACPI_NUM_TABLE_EVENTS 2 /* * Types specific to the OS service interfaces */ typedef u32(ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context); typedef void (ACPI_SYSTEM_XFACE * acpi_osd_exec_callback) (void *context); /* * Various handlers and callback procedures */ typedef u32(*acpi_event_handler) (void *context); typedef void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context); typedef void (*acpi_object_handler) (acpi_handle object, void *data); typedef acpi_status(*acpi_init_handler) (acpi_handle object, u32 function); #define ACPI_INIT_DEVICE_INI 1 typedef acpi_status(*acpi_exception_handler) (acpi_status aml_status, acpi_name name, u16 opcode, u32 aml_offset, void *context); /* Table Event handler (Load, load_table, etc.) and types */ typedef acpi_status(*acpi_tbl_handler) (u32 event, void *table, void *context); /* Address Spaces (For Operation Regions) */ typedef acpi_status(*acpi_adr_space_handler) (u32 function, acpi_physical_address address, u32 bit_width, acpi_integer * value, void *handler_context, void *region_context); #define ACPI_DEFAULT_HANDLER NULL typedef acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle, u32 function, void *handler_context, void **region_context); #define ACPI_REGION_ACTIVATE 0 #define ACPI_REGION_DEACTIVATE 1 typedef acpi_status(*acpi_walk_callback) (acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value); /* Interrupt handler return values */ #define ACPI_INTERRUPT_NOT_HANDLED 0x00 #define ACPI_INTERRUPT_HANDLED 0x01 /* Length of 32-bit EISAID values when converted back to a string */ #define ACPI_EISAID_STRING_SIZE 8 /* Includes null terminator */ /* Length of UUID (string) values */ #define ACPI_UUID_LENGTH 16 /* Structures used for device/processor HID, UID, CID */ struct acpica_device_id { u32 length; /* Length of string + null */ char *string; }; struct acpica_device_id_list { u32 count; /* Number of IDs in Ids array */ u32 list_size; /* Size of list, including ID strings */ struct acpica_device_id ids[1]; /* ID array */ }; /* * Structure returned from acpi_get_object_info. * Optimized for both 32- and 64-bit builds */ struct acpi_device_info { u32 info_size; /* Size of info, including ID strings */ u32 name; /* ACPI object Name */ acpi_object_type type; /* ACPI object Type */ u8 param_count; /* If a method, required parameter count */ u8 valid; /* Indicates which optional fields are valid */ u8 flags; /* Miscellaneous info */ u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ u32 current_status; /* _STA value */ acpi_integer address; /* _ADR value */ struct acpica_device_id hardware_id; /* _HID value */ struct acpica_device_id unique_id; /* _UID value */ struct acpica_device_id_list compatible_id_list; /* _CID list */ }; /* Values for Flags field above (acpi_get_object_info) */ #define ACPI_PCI_ROOT_BRIDGE 0x01 /* Flags for Valid field above (acpi_get_object_info) */ #define ACPI_VALID_STA 0x01 #define ACPI_VALID_ADR 0x02 #define ACPI_VALID_HID 0x04 #define ACPI_VALID_UID 0x08 #define ACPI_VALID_CID 0x10 #define ACPI_VALID_SXDS 0x20 #define ACPI_VALID_SXWS 0x40 /* Flags for _STA method */ #define ACPI_STA_DEVICE_PRESENT 0x01 #define ACPI_STA_DEVICE_ENABLED 0x02 #define ACPI_STA_DEVICE_UI 0x04 #define ACPI_STA_DEVICE_FUNCTIONING 0x08 #define ACPI_STA_DEVICE_OK 0x08 /* Synonym */ #define ACPI_STA_BATTERY_PRESENT 0x10 /* Context structs for address space handlers */ struct acpi_pci_id { u16 segment; u16 bus; u16 device; u16 function; }; struct acpi_mem_space_context { u32 length; acpi_physical_address address; acpi_physical_address mapped_physical_address; u8 *mapped_logical_address; acpi_size mapped_length; }; /* * struct acpi_memory_list is used only if the ACPICA local cache is enabled */ struct acpi_memory_list { char *list_name; void *list_head; u16 object_size; u16 max_depth; u16 current_depth; u16 link_offset; #ifdef ACPI_DBG_TRACK_ALLOCATIONS /* Statistics for debug memory tracking only */ u32 total_allocated; u32 total_freed; u32 max_occupied; u32 total_size; u32 current_total_size; u32 requests; u32 hits; #endif }; #endif /* __ACTYPES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/2.6.32/netfront.c000066400000000000000000001726201314037446600303060ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "acpi_bus.h" #include "actypes.h" struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif #define RX_COPY_THRESHOLD 256 #define DEFAULT_DEBUG_LEVEL_SHIFT 3 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define HAVE_CSUM_OFFLOAD 1 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= ~NETIF_F_GSO_MASK; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define HAVE_CSUM_OFFLOAD 0 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define HAVE_CSUM_OFFLOAD 0 #define netif_needs_gso(dev, skb) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif #define GRANT_INVALID_REF 0 struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) \ printk(KERN_INFO "netfront: " fmt, ##args) #define WPRINTK(fmt, args...) \ printk(KERN_WARNING "netfront: " fmt, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static void send_fake_arp(struct net_device *, int arpType); static irqreturn_t netif_int(int irq, void *dev_id); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of nic8139 */ static int rtl8139_acpi_bus_trim(acpi_handle handle) { struct acpi_device *device; int retval; retval = acpi_bus_get_device(handle, &device); if (retval) { printk(KERN_INFO "acpi_device not found\n"); return retval; } retval = acpi_bus_trim(device, 1); if (retval) printk(KERN_INFO "cannot remove from acpi list\n"); return retval; } static void remove_rtl8139(void) { struct pci_dev *pdev = NULL; acpi_handle phandle = NULL; pdev = pci_get_device(0x10ec, 0x8139, pdev); if (pdev) { phandle = DEVICE_ACPI_HANDLE(&pdev->dev); printk(KERN_INFO "remove_rtl8139 : rtl8139 of subsystem(0x%2x,0x%2x) removed.\n", pdev->subsystem_vendor, pdev->subsystem_device); pci_disable_device(pdev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /* check kernel version */ pci_remove_bus_device(pdev); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pdev); pci_remove_bus_device(pdev); #else pci_stop_and_remove_bus_device(pdev); #endif /* end check kernel version */ pci_dev_put(pdev); } if (phandle) { rtl8139_acpi_bus_trim(phandle); } return; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; /* remove rtl8139 when xen nic devices appear */ remove_rtl8139(); netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { printk(KERN_WARNING "%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); printk(KERN_WARNING "%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } static int __devexit netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(info->mac)) { err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", !HAVE_CSUM_OFFLOAD); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", HAVE_TSO); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, IRQF_SAMPLE_RANDOM, netdev->name, netdev); if (err < 0) goto fail; info->irq = err; return 0; fail: return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); break; case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @return 0 on success, error code otherwise */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; // creat GARP if ( ARPOP_REQUEST == arpType ) { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); } else { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); } if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); memset(&np->stats, 0, sizeof(np->stats)); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } /** * Description: * When NIC device has been actived successfully, operstate which is * member of netdev structure should be set to UP. */ np->netdev->operstate = (IF_OPER_DOWN != np->netdev->operstate) ? IF_OPER_UP : IF_OPER_DOWN; spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { printk(KERN_ALERT "network_tx_buf_gc: warning " "-- grant still in use by backend " "domain.\n"); BUG(); } gnttab_end_foreign_access_ref(np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; struct xen_memory_reservation reservation; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if ( nr_flips != 0 ) { /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); reservation.nr_extents = nr_flips; reservation.extent_order = 0; reservation.address_bits = 0; reservation.domid = DOMID_SELF; if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ np->rx_mcl[i].op = __HYPERVISOR_memory_op; np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; np->rx_mcl[i].args[1] = (unsigned long)&reservation; /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (nr_flips--) BUG_ON(np->rx_mcl[nr_flips].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return NETDEV_TX_OK; } frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irq(&np->tx_lock); if (unlikely(!netfront_carrier_ok(np) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb))) { spin_unlock_irq(&np->tx_lock); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; #ifdef CONFIG_XEN if (skb->proto_data_valid) /* remote but checksummed? */ tx->flags |= NETTXF_data_validated; #endif #if HAVE_TSO if (skb_shinfo(skb)->gso_size) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->irq); np->stats.tx_bytes += skb->len; np->stats.tx_packets++; dev->trans_start = jiffies; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irq(&np->tx_lock); return NETDEV_TX_OK; drop: np->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static irqreturn_t netif_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) WPRINTK("Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) WPRINTK("Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) WPRINTK("rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) WPRINTK("Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { if (net_ratelimit()) WPRINTK("Unfulfilled rx req " "(id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); BUG_ON(!ret); } gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) WPRINTK("Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) WPRINTK("GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) WPRINTK("Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) WPRINTK("GSO unsupported by this kernel.\n"); return -EINVAL; #endif } static int xen_skb_checksum_setup(struct sk_buff *skb) { struct iphdr *iph; unsigned char *th; int err = -EPROTO; if (skb->protocol != htons(ETH_P_IP)) goto out; iph = (void *)skb->data; th = skb->data + 4 * iph->ihl; if (th >= skb_tail_pointer(skb)) goto out; skb->csum_start = th - skb->head; switch (iph->protocol) { case IPPROTO_TCP: skb->csum_offset = offsetof(struct tcphdr, check); break; case IPPROTO_UDP: skb->csum_offset = offsetof(struct udphdr, check); break; default: if (net_ratelimit()) printk(KERN_ERR "Attempting to checksum a non-" "TCP/UDP packet, dropping a protocol" " %d packet", iph->protocol); goto out; } if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) goto out; err = 0; out: return err; } static int netif_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; struct multicall_entry *mcl; int work_done, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); np->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize must approximates the size of true data plus * any supervisor overheads. Adding hypervisor overheads * has been shown to significantly reduce achievable * bandwidth with the default receive buffer size. It is * therefore not wise to account for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to * RX_COPY_THRESHOLD + the supervisor overheads. Here, we * add the size of the data pulled in xennet_fill_frags(). * * We also adjust for any unused space in the main data * area by subtracting (RX_COPY_THRESHOLD - len). This is * especially important with drivers which split incoming * packets into header and data, using only 66 bytes of * the main data area (see the e1000 driver for example.) * On such systems, without this last adjustement, our * achievable receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; /* * Old backends do not assert data_validated but we * can infer it from csum_blank so test both flags. */ if (rx->flags & NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; #ifdef CONFIG_XEN skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE); skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank); #endif np->stats.rx_packets++; np->stats.rx_bytes += skb->len; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { mcl = np->rx_mcl + pages_flipped; mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = pages_flipped; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } __skb_queue_purge(&errq); while ((skb = __skb_dequeue(&rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); if (skb->ip_summed == CHECKSUM_PARTIAL) { if (xen_skb_checksum_setup(skb)) { kfree_skb(skb); np->stats.rx_errors++; continue; } } /* Pass it up. */ netif_receive_skb(skb); } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); if (0 == mfn) { struct page *page = skb_shinfo(skb)->frags[0].page; balloon_release_driver_page(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = mmu - np->rx_mmu; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; mcl++; rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl - np->rx_mcl, NULL); BUG_ON(rc); } } __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_ref(ref)) { busy++; continue; } gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static struct net_device_stats *network_get_stats(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_get_stats(np, dev); return &np->stats; } static int xennet_set_mac_address(struct net_device *dev, void *p) { struct netfront_info *np = netdev_priv(dev); struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(np->mac, addr->sa_data, ETH_ALEN); return 0; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static int xennet_set_sg(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } else if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; return ethtool_op_set_sg(dev, data); } static int xennet_set_tso(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } return ethtool_op_set_tso(dev, data); } static void xennet_set_features(struct net_device *dev) { dev_disable_gso_features(dev); xennet_set_sg(dev, 0); /* We need checksum offload to enable scatter/gather and TSO. */ if (!(dev->features & NETIF_F_IP_CSUM)) return; if (xennet_set_sg(dev, 1)) return; /* Before 2.6.9 TSO seems to be unreliable so do not enable it * on older kernels. */ if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)) xennet_set_tso(dev, 1); } /** * Description: * Get device-specific settings * Version: */ static int netfront_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct netfront_info *adapter = netdev_priv(dev); struct netfront_hw *hw = &adapter->hw; u32 link_speed = 0; bool link_up = 1; ecmd->supported = SUPPORTED_1000baseT_Full; ecmd->transceiver = XCVR_EXTERNAL; ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_FIBRE); ecmd->port = PORT_FIBRE; ecmd->autoneg = AUTONEG_DISABLE; if (link_up) { ecmd->speed = (link_speed == NETFRONT_LINK_SPEED_10GB_FULL) ? SPEED_10000 : SPEED_1000; ecmd->duplex = DUPLEX_FULL; } else { ecmd->speed = -1; ecmd->duplex = -1; } return 0; } /** * Description: * Report whether Wake-on-Lan is enabled */ static void netfront_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct netfront_info *adapter = netdev_priv(dev); wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; wol->wolopts = 0; if (adapter->wol & NETFRONT_WUFC_EX) wol->wolopts |= WAKE_UCAST; if (adapter->wol & NETFRONT_WUFC_MC) wol->wolopts |= WAKE_MCAST; if (adapter->wol & NETFRONT_WUFC_BC) wol->wolopts |= WAKE_BCAST; if (adapter->wol & NETFRONT_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; return; } /** * Description: * Report driver message level */ static u32 netfront_get_msglevel(struct net_device *dev) { struct netfront_info *adapter = netdev_priv(dev); adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; return adapter->msg_enable; } /* to support "ethtool -i" for getting driver information*/ static void netfront_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "netfront"); strcpy(info->version, "1.0"); strcpy(info->bus_info, dev_name(dev->dev.parent)); } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; xennet_set_features(dev); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref( ref, np->xbdev->otherend_id, page_to_pfn(skb_shinfo(skb)->frags->page)); } else { gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static const struct ethtool_ops network_ethtool_ops = { /** * Description: * Added some information for ethtool. */ .get_settings = netfront_get_settings, .get_wol = netfront_get_wol, .get_msglevel = netfront_get_msglevel, .get_drvinfo = netfront_get_drvinfo, .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = xennet_set_sg, #if HAVE_TSO .get_tso = ethtool_op_get_tso, .set_tso = xennet_set_tso, #endif .get_link = ethtool_op_get_link, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { error = device_create_file(&netdev->dev, &xennet_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } static const struct net_device_ops xennet_netdev_ops = { .ndo_uninit = netif_uninit, .ndo_open = network_open, .ndo_stop = network_close, .ndo_start_xmit = network_start_xmit, .ndo_set_multicast_list = network_set_multicast_list, .ndo_set_mac_address = xennet_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats = network_get_stats, }; static struct net_device * __devinit create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, netif_poll, 64); netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } #ifdef CONFIG_INET /* * We use this notifier to send out a fake ARP reply to reset switches and * router ARP caches when an IP interface is brought up on a VIF. */ static int inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) { int count = 0; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ if (event == NETDEV_UP && dev->netdev_ops->ndo_open == network_open) { for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REQUEST); } for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REPLY); } } return NOTIFY_DONE; } static struct notifier_block notifier_inetdev = { .notifier_call = inetdev_notify, .next = NULL, .priority = 0 }; #endif static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler(info->irq, info->netdev); info->irq = 0; end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, (unsigned long)page); } /* ** Driver registration ** */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static struct xenbus_driver netfront_driver = { .name = "vif", .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(netfront_remove), .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, }; static int __init netif_init(void) { int err; if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN if (MODPARM_rx_flip && MODPARM_rx_copy) { WPRINTK("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_flip = 1; /* Default is to flip. */ #endif netif_init_accel(); IPRINTK("Initialising virtual ethernet driver.\n"); #ifdef CONFIG_INET (void)register_inetaddr_notifier(¬ifier_inetdev); #endif err = xenbus_register_frontend(&netfront_driver); if (err) { #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif } return err; } module_init(netif_init); static void __exit netif_exit(void) { #ifdef CONFIG_INET unregister_inetaddr_notifier(¬ifier_inetdev); #endif xenbus_unregister_driver(&netfront_driver); netif_exit_accel(); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/2.6.32/netfront.h000066400000000000000000000244351314037446600303130ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include #include #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct net_device_stats stats; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; struct napi_struct napi; unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; struct pci_dev *pdev; struct netfront_hw *hw; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; u16 msg_enable; u32 wol; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; #define NETFRONT_WUFC_MAG 0x00000002 #define NETFRONT_WUFC_EX 0x00000004 #define NETFRONT_WUFC_MC 0x00000008 #define NETFRONT_WUFC_BC 0x00000010 typedef u32 netfront_autoneg_advertised; typedef u32 netfront_link_speed; #define NETFRONT_LINK_SPEED_UNKNOWN 0 #define NETFRONT_LINK_SPEED_100_FULL 0x0008 #define NETFRONT_LINK_SPEED_1GB_FULL 0x0020 #define NETFRONT_LINK_SPEED_10GB_FULL 0x0080 #define NETFRONT_LINK_SPEED_82598_AUTONEG (NETFRONT_LINK_SPEED_1GB_FULL | \ NETFRONT_LINK_SPEED_10GB_FULL) #define NETFRONT_LINK_SPEED_82599_AUTONEG (NETFRONT_LINK_SPEED_100_FULL | \ NETFRONT_LINK_SPEED_1GB_FULL | \ NETFRONT_LINK_SPEED_10GB_FULL) #define NETFRONT_PCIE_DEV_CTRL_2 0xC8 #define PCIE_COMPL_TO_VALUE 0x05 struct netfront_mac_operations { s32 (*setup_link)(struct netfront_hw *, netfront_link_speed, bool, bool); s32 (*check_link)(struct netfront_hw *, netfront_link_speed *, bool *, bool); s32 (*get_link_capabilities)(struct netfront_hw *, netfront_link_speed *, bool *); }; struct netfront_mac_info { struct netfront_mac_operations ops; bool autotry_restart; }; enum netfront_media_type { netfront_media_type_unknown = 0, netfront_media_type_fiber, netfront_media_type_copper, netfront_media_type_backplane, netfront_media_type_cx4, netfront_media_type_virtual }; struct netfront_phy_info { enum netfront_media_type media_type; netfront_autoneg_advertised autoneg_advertised; bool multispeed_fiber; }; struct netfront_hw { u16 device_id; struct netfront_mac_info mac; struct netfront_phy_info phy; }; /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded() */ extern void netfront_accelerator_stop(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.101/000077500000000000000000000000001314037446600263435ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.101/accel.c000066400000000000000000000531521314037446600275640ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); static void netfront_accelerator_remove_watch(struct netfront_info *np); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Workqueue to process acceleration configuration changes */ struct workqueue_struct *accel_watch_workqueue; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); accel_watch_workqueue = create_workqueue("net_accel"); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; flush_workqueue(accel_watch_workqueue); destroy_workqueue(accel_watch_workqueue); /* No lock required as everything else should be quiet by now */ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); queue_work(accel_watch_workqueue, &vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* * If old watch exists, e.g. from before suspend/resume, * remove it now */ netfront_accelerator_remove_watch(np); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_purge_watch(struct netfront_accel_vif_state *vif_state) { flush_workqueue(accel_watch_workqueue); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; netfront_accelerator_purge_watch(vif_state); } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. Must be called with the accelerator * mutex held. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } } /* * Initialise the state to track an accelerator plugin module. * * Must be called with the accelerator mutex held. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; list_add(&accelerator->link, &accelerators_list); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { struct netfront_accelerator *accelerator; unsigned long flags; DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); accelerator = vif_state->np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); vif_state->hooks = accelerator->hooks; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks && hooks->new_device(np->netdev, dev) == 0) accelerator_set_vif_state_hooks(&np->accel_vif_state); return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* Holds accelerator_mutex to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if (hooks->new_device(np->netdev, vif_state->dev) == 0) accelerator_set_vif_state_hooks(vif_state); } } /* * Called by the netfront accelerator plugin module when it has * loaded. * * Takes the accelerator_mutex and vif_states_lock spinlock. */ int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded); /* * Remove the hooks from a single vif state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { unsigned long flags; /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Safely remove the accelerator function hooks from a netfront state. * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { if(vif_state->hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ vif_state->hooks->get_stats(vif_state->np->netdev, &vif_state->np->netdev->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. * * Takes the accelerator mutex, and vif_states_lock spinlock. */ void netfront_accelerator_stop(const char *frontend) { struct netfront_accelerator *accelerator; mutex_lock(&accelerator_mutex); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_remove_hooks(accelerator); goto out; } } out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop); /* * Helper for call_remove and do_suspend * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator = np->accelerator; unsigned long flags; int rc = 0; if (np->accel_vif_state.hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ np->accel_vif_state.hooks->get_stats(np->netdev, &np->netdev->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); } return rc; } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock */ static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev); np->accelerator = NULL; return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { int rc = 0; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ rc = do_remove(np, dev); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { netfront_accelerator_purge_watch(&np->accel_vif_state); /* * Gratuitously fire the watch handler to reinstate the * configured accelerator */ if (dev->state == XenbusStateConnected) queue_work(accel_watch_workqueue, &np->accel_vif_state.accel_work); return 0; } /* * No lock pre-requisites. Takes the accelerator mutex */ void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; break; } } out: mutex_unlock(&accelerator_mutex); return; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &dev->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.101/netfront.c000066400000000000000000001705341314037446600303600ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif #define RX_COPY_THRESHOLD 256 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define NO_CSUM_OFFLOAD 0 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= ~NETIF_F_GSO_MASK; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define NO_CSUM_OFFLOAD 1 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } #define netif_skb_features(skb) ((skb)->dev->features) static inline int netif_needs_gso(struct sk_buff *skb, int features) { return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define NO_CSUM_OFFLOAD 1 #define netif_needs_gso(skb, feat) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_release_rings(struct netfront_info *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static void send_fake_arp(struct net_device *, int arpType); static irqreturn_t netif_int(int irq, void *dev_id); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of nic8139 */ #include static void remove_nic8139(void) { struct pci_dev *pci_dev_nic8139 = NULL; pci_dev_nic8139 = pci_get_device(0x10ec, 0x8139, pci_dev_nic8139); if (pci_dev_nic8139) { printk(KERN_INFO "remove_nic8139 : 8139 NIC of subsystem(0x%2x,0x%2x) removed.\n", pci_dev_nic8139->subsystem_vendor, pci_dev_nic8139->subsystem_device); pci_disable_device(pci_dev_nic8139); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /* check kernel version */ pci_remove_bus_device(pci_dev_nic8139); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pci_dev_nic8139); pci_remove_bus_device(pci_dev_nic8139); #else pci_stop_and_remove_bus_device(pci_dev_nic8139); #endif /* end check kernel version */ } return; } /* * Work around net.ipv4.conf.*.arp_notify not being enabled by default. */ static void __devinit netfront_enable_arp_notify(struct netfront_info *info) { #ifdef CONFIG_INET struct in_device *in_dev; rtnl_lock(); in_dev = __in_dev_get_rtnl(info->netdev); if (in_dev && !IN_DEV_CONF_GET(in_dev, ARP_NOTIFY)) IN_DEV_CONF_SET(in_dev, ARP_NOTIFY, 1); rtnl_unlock(); if (!in_dev) pr_warn("Cannot enable ARP notification on %s\n", info->xbdev->nodename); #endif } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); static bool netfront_nic_unplugged(struct xenbus_device *dev) { bool ret = true; #ifndef CONFIG_XEN char *typestr; /* * For HVM guests: * - pv driver required if booted with xen_emul_unplug=nics|all * - native driver required with xen_emul_unplug=ide-disks|never */ /* Use pv driver if emulated hardware was unplugged */ if (xen_pvonhvm_unplugged_nics) return true; typestr = xenbus_read(XBT_NIL, dev->otherend, "type", NULL); /* Assume emulated+pv interface (ioemu+vif) when type property is missing. */ if (IS_ERR(typestr)) return false; /* If the interface is emulated and not unplugged, skip it. */ if (strcmp(typestr, "vif_ioemu") == 0 || strcmp(typestr, "ioemu") == 0) ret = false; kfree(typestr); #endif return ret; } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; /* remove 8139 nic when xen nic devices appear */ remove_nic8139(); if (!netfront_nic_unplugged(dev)) { pr_warning("netfront: skipping emulated interface, native driver required\n"); return -ENODEV; } netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { pr_warning("%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } netfront_enable_arp_notify(info); err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); pr_warning("%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } static int __devexit netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(info->mac)) { err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-rx-notify", "1"); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-no-csum-offload", __stringify(NO_CSUM_OFFLOAD)); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-sg", "1"); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv4", __stringify(HAVE_TSO)); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, 0, netdev->name, netdev); if (err < 0) goto fail; info->irq = err; return 0; fail: netif_release_rings(info); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); netif_notify_peers(netdev); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state -- fallthrough */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @return 0 on success, error code otherwise */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; //creat GARP if ( ARPOP_REQUEST == arpType ) { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); } else { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); } if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); memset(&np->stats, 0, sizeof(np->stats)); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } np->netdev->operstate = (IF_OPER_DOWN != np->netdev->operstate) ? IF_OPER_UP : IF_OPER_DOWN; spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == XEN_NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { pr_alert("network_tx_buf_gc: grant still" " in use by backend domain\n"); BUG(); } gnttab_end_foreign_access_ref(np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate enough skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if (nr_flips) { struct xen_memory_reservation reservation = { .nr_extents = nr_flips, .domid = DOMID_SELF, }; /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ MULTI_memory_op(np->rx_mcl + i, XENMEM_decrease_reservation, &reservation); /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (nr_flips--) BUG_ON(np->rx_mcl[nr_flips].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= XEN_NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= XEN_NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn, flags; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return NETDEV_TX_OK; } /* * If skb->len is too big for wire format, drop skb and alert * user about misconfiguration. */ if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { pr_alert("xennet: length %u too big for interface\n", skb->len); goto drop; } frags += PFN_UP(offset + len); if (unlikely(frags > MAX_SKB_FRAGS + 1)) { pr_alert("xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irqsave(&np->tx_lock, flags); if (unlikely(!netfront_carrier_ok(np) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&np->tx_lock, flags); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) tx->flags |= XEN_NETTXF_data_validated; #if HAVE_TSO if (skb_is_gso(skb)) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->irq); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; dev->trans_start = jiffies; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irqrestore(&np->tx_lock, flags); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static irqreturn_t netif_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } static int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) netdev_warn(np->netdev, "Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) netdev_warn(np->netdev, "Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & XEN_NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) netdev_warn(np->netdev, "rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) netdev_warn(np->netdev, "Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { if (net_ratelimit()) netdev_warn(np->netdev, "Unfulfilled rx req (id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); BUG_ON(!ret); } gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & XEN_NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) netdev_warn(np->netdev, "Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) netdev_warn(np->netdev, "Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) netdev_warn(skb->dev, "GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) netdev_warn(skb->dev, "Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) netdev_warn(skb->dev, "GSO unsupported by this kernel.\n"); return -EINVAL; #endif } static int netif_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; int work_done, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); dev->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize must approximates the size of true data plus * any supervisor overheads. Adding hypervisor overheads * has been shown to significantly reduce achievable * bandwidth with the default receive buffer size. It is * therefore not wise to account for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to * RX_COPY_THRESHOLD + the supervisor overheads. Here, we * add the size of the data pulled in xennet_fill_frags(). * * We also adjust for any unused space in the main data * area by subtracting (RX_COPY_THRESHOLD - len). This is * especially important with drivers which split incoming * packets into header and data, using only 66 bytes of * the main data area (see the e1000 driver for example.) * On such systems, without this last adjustement, our * achievable receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & XEN_NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { MULTI_mmu_update(np->rx_mcl + pages_flipped, np->rx_mmu, pages_flipped, 0, DOMID_SELF); err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } __skb_queue_purge(&errq); while ((skb = __skb_dequeue(&rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); if (skb_checksum_setup(skb, &np->rx_gso_csum_fixups)) { kfree_skb(skb); dev->stats.rx_errors++; --work_done; continue; } dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; /* Pass it up. */ netif_receive_skb(skb); } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); if (0 == mfn) { struct page *page = skb_shinfo(skb)->frags[0].page; balloon_release_driver_page(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, 0, DOMID_SELF); rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl + 1 - np->rx_mcl, NULL); BUG_ON(rc); } } __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_ref(ref)) { busy++; continue; } gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static struct net_device_stats *network_get_stats(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_get_stats(np, dev); return &dev->stats; } static int xennet_set_mac_address(struct net_device *dev, void *p) { struct netfront_info *np = netdev_priv(dev); struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(np->mac, addr->sa_data, ETH_ALEN); return 0; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static const struct xennet_stat { char name[ETH_GSTRING_LEN]; u16 offset; } xennet_stats[] = { { "rx_gso_csum_fixups", offsetof(struct netfront_info, rx_gso_csum_fixups) / sizeof(long) }, }; static int xennet_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(xennet_stats); } return -EOPNOTSUPP; } static void xennet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { unsigned long *np = netdev_priv(dev); unsigned int i; for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) data[i] = np[xennet_stats[i].offset]; } static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 *data) { unsigned int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, xennet_stats[i].name, ETH_GSTRING_LEN); break; } } static void netfront_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "netfront"); strlcpy(info->bus_info, dev_name(dev->dev.parent), ARRAY_SIZE(info->bus_info)); } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; rtnl_lock(); netdev_update_features(dev); rtnl_unlock(); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref( ref, np->xbdev->otherend_id, page_to_pfn(skb_shinfo(skb)->frags->page)); } else { gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static const struct ethtool_ops network_ethtool_ops = { .get_drvinfo = netfront_get_drvinfo, .get_link = ethtool_op_get_link, .get_sset_count = xennet_get_sset_count, .get_ethtool_stats = xennet_get_ethtool_stats, .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { error = device_create_file(&netdev->dev, &xennet_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } static u32 xennet_fix_features(struct net_device *dev, u32 features) { struct netfront_info *np = netdev_priv(dev); int val; if (features & NETIF_F_SG) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_SG; } if (features & NETIF_F_TSO) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_TSO; } return features; } static int xennet_set_features(struct net_device *dev, u32 features) { if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { netdev_info(dev, "Reducing MTU because no SG offload"); dev->mtu = ETH_DATA_LEN; } return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xennet_poll_controller(struct net_device *dev) { netif_int(0, dev); } #endif static const struct net_device_ops xennet_netdev_ops = { .ndo_uninit = netif_uninit, .ndo_open = network_open, .ndo_stop = network_close, .ndo_start_xmit = network_start_xmit, .ndo_set_multicast_list = network_set_multicast_list, .ndo_set_mac_address = xennet_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = xennet_fix_features, .ndo_set_features = xennet_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xennet_poll_controller, #endif .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats = network_get_stats, }; static struct net_device * __devinit create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { pr_warning("%s: alloc_etherdev failed\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { pr_alert("#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { pr_alert("#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, netif_poll, 64); netdev->features = NETIF_F_RXCSUM | NETIF_F_GSO_ROBUST; netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; /* * Assume that all hw features are available for now. This set * will be adjusted by the call to netdev_update_features() in * xennet_connect() which is the earliest point where we can * negotiate with the backend regarding supported features. */ netdev->features |= netdev->hw_features; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); SET_NETDEV_DEV(netdev, &dev->dev); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); #endif np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } #ifdef CONFIG_INET /* * We use this notifier to send out a fake ARP reply to reset switches and * router ARP caches when an IP interface is brought up on a VIF. */ static int inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) { int count = 0; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ if (event == NETDEV_UP && dev->netdev_ops->ndo_open == network_open) { for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REQUEST); } for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REPLY); } } return NOTIFY_DONE; } static struct notifier_block notifier_inetdev = { .notifier_call = inetdev_notify, .next = NULL, .priority = 0 }; #endif static void netif_release_rings(struct netfront_info *info) { end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler(info->irq, info->netdev); info->irq = 0; netif_release_rings(info); } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, (unsigned long)page); } /* ** Driver registration ** */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static DEFINE_XENBUS_DRIVER(netfront, , .probe = netfront_probe, .remove = __devexit_p(netfront_remove), .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, ); static int __init netif_init(void) { if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN if (MODPARM_rx_flip && MODPARM_rx_copy) { pr_warning("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_copy = 1; /* Default is to copy. */ #endif netif_init_accel(); pr_info("Initialising virtual ethernet driver.\n"); return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { xenbus_unregister_driver(&netfront_driver); netif_exit_accel(); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.101/netfront.h000066400000000000000000000224611314037446600303600ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include #include #include #include #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct net_device_stats stats; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; struct napi_struct napi; unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; u16 msg_enable; u32 wol; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Statistics */ unsigned long rx_gso_csum_fixups; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; #define NETFRONT_WUFC_MAG 0x00000002 #define NETFRONT_WUFC_EX 0x00000004 #define NETFRONT_WUFC_MC 0x00000008 #define NETFRONT_WUFC_BC 0x00000010 #define NETFRONT_LINK_SPEED_UNKNOWN 0 #define NETFRONT_LINK_SPEED_100_FULL 0x0008 #define NETFRONT_LINK_SPEED_1GB_FULL 0x0020 #define NETFRONT_LINK_SPEED_10GB_FULL 0x0080 #define NETFRONT_LINK_SPEED_82598_AUTONEG (NETFRONT_LINK_SPEED_1GB_FULL | \ NETFRONT_LINK_SPEED_10GB_FULL) #define NETFRONT_LINK_SPEED_82599_AUTONEG (NETFRONT_LINK_SPEED_100_FULL | \ NETFRONT_LINK_SPEED_1GB_FULL | \ NETFRONT_LINK_SPEED_10GB_FULL) /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded() */ extern void netfront_accelerator_stop(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.13/000077500000000000000000000000001314037446600262655ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.13/accel.c000066400000000000000000000533711314037446600275110ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) pr_info("netfront/accel: " fmt, ##args) #define WPRINTK(fmt, args...) pr_warning("netfront/accel: " fmt, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); static void netfront_accelerator_remove_watch(struct netfront_info *np); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Workqueue to process acceleration configuration changes */ struct workqueue_struct *accel_watch_workqueue; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); accel_watch_workqueue = create_workqueue("net_accel"); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; flush_workqueue(accel_watch_workqueue); destroy_workqueue(accel_watch_workqueue); /* No lock required as everything else should be quiet by now */ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); queue_work(accel_watch_workqueue, &vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* * If old watch exists, e.g. from before suspend/resume, * remove it now */ netfront_accelerator_remove_watch(np); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_purge_watch(struct netfront_accel_vif_state *vif_state) { flush_workqueue(accel_watch_workqueue); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; netfront_accelerator_purge_watch(vif_state); } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. Must be called with the accelerator * mutex held. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } } /* * Initialise the state to track an accelerator plugin module. * * Must be called with the accelerator mutex held. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; list_add(&accelerator->link, &accelerators_list); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { struct netfront_accelerator *accelerator; unsigned long flags; DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); accelerator = vif_state->np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); vif_state->hooks = accelerator->hooks; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks && hooks->new_device(np->netdev, dev) == 0) accelerator_set_vif_state_hooks(&np->accel_vif_state); return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* Holds accelerator_mutex to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if (hooks->new_device(np->netdev, vif_state->dev) == 0) accelerator_set_vif_state_hooks(vif_state); } } /* * Called by the netfront accelerator plugin module when it has * loaded. * * Takes the accelerator_mutex and vif_states_lock spinlock. */ int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded); /* * Remove the hooks from a single vif state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { unsigned long flags; /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Safely remove the accelerator function hooks from a netfront state. * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { if(vif_state->hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ vif_state->hooks->get_stats(vif_state->np->netdev, &vif_state->np->netdev->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. * * Takes the accelerator mutex, and vif_states_lock spinlock. */ void netfront_accelerator_stop(const char *frontend) { struct netfront_accelerator *accelerator; mutex_lock(&accelerator_mutex); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_remove_hooks(accelerator); goto out; } } out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop); /* * Helper for call_remove and do_suspend * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator = np->accelerator; unsigned long flags; int rc = 0; if (np->accel_vif_state.hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ np->accel_vif_state.hooks->get_stats(np->netdev, &np->netdev->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); } return rc; } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock */ static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev); np->accelerator = NULL; return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { int rc = 0; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ rc = do_remove(np, dev); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { netfront_accelerator_purge_watch(&np->accel_vif_state); /* * Gratuitously fire the watch handler to reinstate the * configured accelerator */ if (dev->state == XenbusStateConnected) queue_work(accel_watch_workqueue, &np->accel_vif_state.accel_work); return 0; } /* * No lock pre-requisites. Takes the accelerator mutex */ void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; break; } } out: mutex_unlock(&accelerator_mutex); return; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &dev->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.13/acpi_bus.h000066400000000000000000000261211314037446600302250ustar00rootroot00000000000000/* * acpi_bus.h - ACPI Bus Driver ($Revision: 22 $) * * Copyright (C) 2001, 2002 Andy Grover * Copyright (C) 2001, 2002 Paul Diefenbaugh * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef __ACPI_BUS_H__ #define __ACPI_BUS_H__ #include #include /* TBD: Make dynamic */ #define ACPI_MAX_HANDLES 10 struct acpi_handle_list { u32 count; acpi_handle handles[ACPI_MAX_HANDLES]; }; /* acpi_utils.h */ acpi_status acpi_extract_package(union acpi_object *package, struct acpi_buffer *format, struct acpi_buffer *buffer); acpi_status acpi_evaluate_integer(acpi_handle handle, acpi_string pathname, struct acpi_object_list *arguments, unsigned long long *data); acpi_status acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, struct acpi_object_list *arguments, struct acpi_handle_list *list); #ifdef CONFIG_ACPI #include #define ACPI_BUS_FILE_ROOT "acpi" extern struct proc_dir_entry *acpi_root_dir; enum acpi_bus_removal_type { ACPI_BUS_REMOVAL_NORMAL = 0, ACPI_BUS_REMOVAL_EJECT, ACPI_BUS_REMOVAL_SUPRISE, ACPI_BUS_REMOVAL_TYPE_COUNT }; enum acpi_bus_device_type { ACPI_BUS_TYPE_DEVICE = 0, ACPI_BUS_TYPE_POWER, ACPI_BUS_TYPE_PROCESSOR, ACPI_BUS_TYPE_THERMAL, ACPI_BUS_TYPE_POWER_BUTTON, ACPI_BUS_TYPE_SLEEP_BUTTON, ACPI_BUS_DEVICE_TYPE_COUNT }; struct acpi_driver; struct acpi_device; /* * ACPI Driver * ----------- */ typedef int (*acpi_op_add) (struct acpi_device * device); typedef int (*acpi_op_remove) (struct acpi_device * device, int type); typedef int (*acpi_op_start) (struct acpi_device * device); typedef int (*acpi_op_suspend) (struct acpi_device * device, pm_message_t state); typedef int (*acpi_op_resume) (struct acpi_device * device); typedef int (*acpi_op_bind) (struct acpi_device * device); typedef int (*acpi_op_unbind) (struct acpi_device * device); typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event); struct acpi_bus_ops { u32 acpi_op_add:1; u32 acpi_op_start:1; }; struct acpi_device_ops { acpi_op_add add; acpi_op_remove remove; acpi_op_start start; acpi_op_suspend suspend; acpi_op_resume resume; acpi_op_bind bind; acpi_op_unbind unbind; acpi_op_notify notify; }; #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */ struct acpi_driver { char name[80]; char class[80]; const struct acpi_device_id *ids; /* Supported Hardware IDs */ unsigned int flags; struct acpi_device_ops ops; struct device_driver drv; struct module *owner; }; /* * ACPI Device * ----------- */ /* Status (_STA) */ struct acpi_device_status { u32 present:1; u32 enabled:1; u32 show_in_ui:1; u32 functional:1; u32 battery_present:1; u32 reserved:27; }; /* Flags */ struct acpi_device_flags { u32 dynamic_status:1; u32 bus_address:1; u32 removable:1; u32 ejectable:1; u32 lockable:1; u32 suprise_removal_ok:1; u32 power_manageable:1; u32 performance_manageable:1; u32 reserved:24; }; /* File System */ struct acpi_device_dir { struct proc_dir_entry *entry; }; #define acpi_device_dir(d) ((d)->dir.entry) /* Plug and Play */ typedef char acpi_bus_id[8]; typedef unsigned long acpi_bus_address; typedef char acpi_device_name[40]; typedef char acpi_device_class[20]; struct acpi_hardware_id { struct list_head list; char *id; }; struct acpi_device_pnp { acpi_bus_id bus_id; /* Object name */ acpi_bus_address bus_address; /* _ADR */ char *unique_id; /* _UID */ struct list_head ids; /* _HID and _CIDs */ acpi_device_name device_name; /* Driver-determined */ acpi_device_class device_class; /* " */ }; #define acpi_device_bid(d) ((d)->pnp.bus_id) #define acpi_device_adr(d) ((d)->pnp.bus_address) const char *acpi_device_hid(struct acpi_device *device); #define acpi_device_name(d) ((d)->pnp.device_name) #define acpi_device_class(d) ((d)->pnp.device_class) /* Power Management */ struct acpi_device_power_flags { u32 explicit_get:1; /* _PSC present? */ u32 power_resources:1; /* Power resources */ u32 inrush_current:1; /* Serialize Dx->D0 */ u32 power_removed:1; /* Optimize Dx->D0 */ u32 reserved:28; }; struct acpi_device_power_state { struct { u8 valid:1; u8 explicit_set:1; /* _PSx present? */ u8 reserved:6; } flags; int power; /* % Power (compared to D0) */ int latency; /* Dx->D0 time (microseconds) */ struct acpi_handle_list resources; /* Power resources referenced */ }; struct acpi_device_power { int state; /* Current state */ struct acpi_device_power_flags flags; struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */ }; /* Performance Management */ struct acpi_device_perf_flags { u8 reserved:8; }; struct acpi_device_perf_state { struct { u8 valid:1; u8 reserved:7; } flags; u8 power; /* % Power (compared to P0) */ u8 performance; /* % Performance ( " ) */ int latency; /* Px->P0 time (microseconds) */ }; struct acpi_device_perf { int state; struct acpi_device_perf_flags flags; int state_count; struct acpi_device_perf_state *states; }; /* Wakeup Management */ struct acpi_device_wakeup_flags { u8 valid:1; /* Can successfully enable wakeup? */ u8 run_wake:1; /* Run-Wake GPE devices */ u8 notifier_present:1; /* Wake-up notify handler has been installed */ }; struct acpi_device_wakeup { acpi_handle gpe_device; u64 gpe_number; u64 sleep_state; struct acpi_handle_list resources; struct acpi_device_wakeup_flags flags; int prepare_count; }; /* Device */ struct acpi_device { int device_type; acpi_handle handle; /* no handle for fixed hardware */ struct acpi_device *parent; struct list_head children; struct list_head node; struct list_head wakeup_list; struct acpi_device_status status; struct acpi_device_flags flags; struct acpi_device_pnp pnp; struct acpi_device_power power; struct acpi_device_wakeup wakeup; struct acpi_device_perf performance; struct acpi_device_dir dir; struct acpi_device_ops ops; struct acpi_driver *driver; void *driver_data; struct device dev; struct acpi_bus_ops bus_ops; /* workaround for different code path for hotplug */ enum acpi_bus_removal_type removal_type; /* indicate for different removal type */ }; static inline void *acpi_driver_data(struct acpi_device *d) { return d->driver_data; } #define to_acpi_device(d) container_of(d, struct acpi_device, dev) #define to_acpi_driver(d) container_of(d, struct acpi_driver, drv) /* acpi_device.dev.bus == &acpi_bus_type */ extern struct bus_type acpi_bus_type; /* * Events * ------ */ struct acpi_bus_event { struct list_head node; acpi_device_class device_class; acpi_bus_id bus_id; u32 type; u32 data; }; extern struct kobject *acpi_kobj; extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); void acpi_bus_private_data_handler(acpi_handle, void *); int acpi_bus_get_private_data(acpi_handle, void **); extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); extern int register_acpi_notifier(struct notifier_block *); extern int unregister_acpi_notifier(struct notifier_block *); extern int register_acpi_bus_notifier(struct notifier_block *nb); extern void unregister_acpi_bus_notifier(struct notifier_block *nb); /* * External Functions */ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); void acpi_bus_data_handler(acpi_handle handle, void *context); acpi_status acpi_bus_get_status_handle(acpi_handle handle, unsigned long long *sta); int acpi_bus_get_status(struct acpi_device *device); int acpi_bus_set_power(acpi_handle handle, int state); int acpi_bus_update_power(acpi_handle handle, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); bool acpi_bus_can_wakeup(acpi_handle handle); #ifdef CONFIG_ACPI_PROC_EVENT int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data); int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data); int acpi_bus_receive_event(struct acpi_bus_event *event); #else static inline int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data) { return 0; } #endif int acpi_bus_register_driver(struct acpi_driver *driver); void acpi_bus_unregister_driver(struct acpi_driver *driver); int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent, acpi_handle handle, int type); int acpi_bus_trim(struct acpi_device *start, int rmdevice); int acpi_bus_start(struct acpi_device *device); acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd); int acpi_match_device_ids(struct acpi_device *device, const struct acpi_device_id *ids); int acpi_create_dir(struct acpi_device *); void acpi_remove_dir(struct acpi_device *); /* * Bind physical devices with ACPI devices */ struct acpi_bus_type { struct list_head list; struct bus_type *bus; /* For general devices under the bus */ int (*find_device) (struct device *, acpi_handle *); /* For bridges, such as PCI root bridge, IDE controller */ int (*find_bridge) (struct device *, acpi_handle *); }; int register_acpi_bus_type(struct acpi_bus_type *); int unregister_acpi_bus_type(struct acpi_bus_type *); struct device *acpi_get_physical_device(acpi_handle); struct acpi_pci_root { struct list_head node; struct acpi_device * device; struct acpi_pci_id id; struct pci_bus *bus; u16 segment; struct resource secondary; /* downstream bus range */ u32 osc_support_set; /* _OSC state of support bits */ u32 osc_control_set; /* _OSC state of control bits */ }; /* helper */ acpi_handle acpi_get_child(acpi_handle, u64); int acpi_is_root_bridge(acpi_handle); acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int); struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->archdata.acpi_handle)) int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); int acpi_disable_wakeup_device_power(struct acpi_device *dev); #ifdef CONFIG_PM int acpi_pm_device_sleep_state(struct device *, int *); #else static inline int acpi_pm_device_sleep_state(struct device *d, int *p) { if (p) *p = ACPI_STATE_D0; return ACPI_STATE_D3; } #endif #ifdef CONFIG_PM_SLEEP int acpi_pm_device_sleep_wake(struct device *, bool); #else static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) { return -ENODEV; } #endif #endif /* CONFIG_ACPI */ #endif /*__ACPI_BUS_H__*/ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.13/actypes.h000066400000000000000000001050071314037446600301110ustar00rootroot00000000000000/****************************************************************************** * * Name: actypes.h - Common data types for the entire ACPI subsystem * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #ifndef __ACTYPES_H__ #define __ACTYPES_H__ /* acpisrc:struct_defs -- for acpisrc conversion */ /* * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header * and must be either 32 or 64. 16-bit ACPICA is no longer supported, as of * 12/2006. */ #ifndef ACPI_MACHINE_WIDTH #error ACPI_MACHINE_WIDTH not defined #endif /*! [Begin] no source code translation */ /* * Data type ranges * Note: These macros are designed to be compiler independent as well as * working around problems that some 32-bit compilers have with 64-bit * constants. */ #define ACPI_UINT8_MAX (UINT8) (~((UINT8) 0)) /* 0xFF */ #define ACPI_UINT16_MAX (UINT16)(~((UINT16) 0)) /* 0xFFFF */ #define ACPI_UINT32_MAX (UINT32)(~((UINT32) 0)) /* 0xFFFFFFFF */ #define ACPI_UINT64_MAX (UINT64)(~((UINT64) 0)) /* 0xFFFFFFFFFFFFFFFF */ #define ACPI_ASCII_MAX 0x7F /* * Architecture-specific ACPICA Subsystem Data Types * * The goal of these types is to provide source code portability across * 16-bit, 32-bit, and 64-bit targets. * * 1) The following types are of fixed size for all targets (16/32/64): * * BOOLEAN Logical boolean * * UINT8 8-bit (1 byte) unsigned value * UINT16 16-bit (2 byte) unsigned value * UINT32 32-bit (4 byte) unsigned value * UINT64 64-bit (8 byte) unsigned value * * INT16 16-bit (2 byte) signed value * INT32 32-bit (4 byte) signed value * INT64 64-bit (8 byte) signed value * * COMPILER_DEPENDENT_UINT64/INT64 - These types are defined in the * compiler-dependent header(s) and were introduced because there is no common * 64-bit integer type across the various compilation models, as shown in * the table below. * * Datatype LP64 ILP64 LLP64 ILP32 LP32 16bit * char 8 8 8 8 8 8 * short 16 16 16 16 16 16 * _int32 32 * int 32 64 32 32 16 16 * long 64 64 32 32 32 32 * long long 64 64 * pointer 64 64 64 32 32 32 * * Note: ILP64 and LP32 are currently not supported. * * * 2) These types represent the native word size of the target mode of the * processor, and may be 16-bit, 32-bit, or 64-bit as required. They are * usually used for memory allocation, efficient loop counters, and array * indexes. The types are similar to the size_t type in the C library and are * required because there is no C type that consistently represents the native * data width. ACPI_SIZE is needed because there is no guarantee that a * kernel-level C library is present. * * ACPI_SIZE 16/32/64-bit unsigned value * ACPI_NATIVE_INT 16/32/64-bit signed value */ /******************************************************************************* * * Common types for all compilers, all targets * ******************************************************************************/ typedef unsigned char BOOLEAN; typedef unsigned char UINT8; typedef unsigned short UINT16; typedef COMPILER_DEPENDENT_UINT64 UINT64; typedef COMPILER_DEPENDENT_INT64 INT64; /*! [End] no source code translation !*/ /* * Value returned by acpi_os_get_thread_id. There is no standard "thread_id" * across operating systems or even the various UNIX systems. Since ACPICA * only needs the thread ID as a unique thread identifier, we use a u64 * as the only common data type - it will accommodate any type of pointer or * any type of integer. It is up to the host-dependent OSL to cast the * native thread ID type to a u64 (in acpi_os_get_thread_id). */ #define acpi_thread_id u64 /******************************************************************************* * * Types specific to 64-bit targets * ******************************************************************************/ #if ACPI_MACHINE_WIDTH == 64 /*! [Begin] no source code translation (keep the typedefs as-is) */ typedef unsigned int UINT32; typedef int INT32; /*! [End] no source code translation !*/ typedef s64 acpi_native_int; typedef u64 acpi_size; typedef u64 acpi_io_address; typedef u64 acpi_physical_address; #define ACPI_MAX_PTR ACPI_UINT64_MAX #define ACPI_SIZE_MAX ACPI_UINT64_MAX #define ACPI_USE_NATIVE_DIVIDE /* Has native 64-bit integer support */ /* * In the case of the Itanium Processor Family (IPF), the hardware does not * support misaligned memory transfers. Set the MISALIGNMENT_NOT_SUPPORTED flag * to indicate that special precautions must be taken to avoid alignment faults. * (IA64 or ia64 is currently used by existing compilers to indicate IPF.) * * Note: Em64_t and other X86-64 processors support misaligned transfers, * so there is no need to define this flag. */ #if defined (__IA64__) || defined (__ia64__) #define ACPI_MISALIGNMENT_NOT_SUPPORTED #endif /******************************************************************************* * * Types specific to 32-bit targets * ******************************************************************************/ #elif ACPI_MACHINE_WIDTH == 32 /*! [Begin] no source code translation (keep the typedefs as-is) */ typedef unsigned int UINT32; typedef int INT32; /*! [End] no source code translation !*/ typedef s32 acpi_native_int; typedef u32 acpi_size; typedef u32 acpi_io_address; typedef u32 acpi_physical_address; #define ACPI_MAX_PTR ACPI_UINT32_MAX #define ACPI_SIZE_MAX ACPI_UINT32_MAX #else /* ACPI_MACHINE_WIDTH must be either 64 or 32 */ #error unknown ACPI_MACHINE_WIDTH #endif /******************************************************************************* * * OS-dependent types * * If the defaults below are not appropriate for the host system, they can * be defined in the OS-specific header, and this will take precedence. * ******************************************************************************/ /* Flags for acpi_os_acquire_lock/acpi_os_release_lock */ #ifndef acpi_cpu_flags #define acpi_cpu_flags acpi_size #endif /* Object returned from acpi_os_create_cache */ #ifndef acpi_cache_t #ifdef ACPI_USE_LOCAL_CACHE #define acpi_cache_t struct acpi_memory_list #else #define acpi_cache_t void * #endif #endif /* * Synchronization objects - Mutexes, Semaphores, and spin_locks */ #if (ACPI_MUTEX_TYPE == ACPI_BINARY_SEMAPHORE) /* * These macros are used if the host OS does not support a mutex object. * Map the OSL Mutex interfaces to binary semaphores. */ #define acpi_mutex acpi_semaphore #define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle) #define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle) #define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time) #define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1) #endif /* Configurable types for synchronization objects */ #ifndef acpi_spinlock #define acpi_spinlock void * #endif #ifndef acpi_semaphore #define acpi_semaphore void * #endif #ifndef acpi_mutex #define acpi_mutex void * #endif /******************************************************************************* * * Compiler-dependent types * * If the defaults below are not appropriate for the host compiler, they can * be defined in the compiler-specific header, and this will take precedence. * ******************************************************************************/ /* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ #ifndef acpi_uintptr_t #define acpi_uintptr_t void * #endif /* * ACPI_PRINTF_LIKE is used to tag functions as "printf-like" because * some compilers can catch printf format string problems */ #ifndef ACPI_PRINTF_LIKE #define ACPI_PRINTF_LIKE(c) #endif /* * Some compilers complain about unused variables. Sometimes we don't want to * use all the variables (for example, _acpi_module_name). This allows us * to tell the compiler in a per-variable manner that a variable * is unused */ #ifndef ACPI_UNUSED_VAR #define ACPI_UNUSED_VAR #endif /* * All ACPICA functions that are available to the rest of the kernel are * tagged with this macro which can be defined as appropriate for the host. */ #ifndef ACPI_EXPORT_SYMBOL #define ACPI_EXPORT_SYMBOL(symbol) #endif /****************************************************************************** * * ACPI Specification constants (Do not change unless the specification changes) * *****************************************************************************/ /* Number of distinct FADT-based GPE register blocks (GPE0 and GPE1) */ #define ACPI_MAX_GPE_BLOCKS 2 /* Default ACPI register widths */ #define ACPI_GPE_REGISTER_WIDTH 8 #define ACPI_PM1_REGISTER_WIDTH 16 #define ACPI_PM2_REGISTER_WIDTH 8 #define ACPI_PM_TIMER_WIDTH 32 /* Names within the namespace are 4 bytes long */ #define ACPI_NAME_SIZE 4 #define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */ #define ACPI_PATH_SEPARATOR '.' /* Sizes for ACPI table headers */ #define ACPI_OEM_ID_SIZE 6 #define ACPI_OEM_TABLE_ID_SIZE 8 /* ACPI/PNP hardware IDs */ #define PCI_ROOT_HID_STRING "PNP0A03" #define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08" /* PM Timer ticks per second (HZ) */ #define PM_TIMER_FREQUENCY 3579545 /******************************************************************************* * * Independent types * ******************************************************************************/ /* Logical defines and NULL */ #ifdef FALSE #undef FALSE #endif #define FALSE (1 == 0) #ifdef TRUE #undef TRUE #endif #define TRUE (1 == 1) #ifndef NULL #define NULL (void *) 0 #endif /* * Miscellaneous types */ typedef u32 acpi_status; /* All ACPI Exceptions */ typedef u32 acpi_name; /* 4-byte ACPI name */ typedef char *acpi_string; /* Null terminated ASCII string */ typedef void *acpi_handle; /* Actually a ptr to a NS Node */ /* Owner IDs are used to track namespace nodes for selective deletion */ typedef u8 acpi_owner_id; #define ACPI_OWNER_ID_MAX 0xFF #define ACPI_INTEGER_BIT_SIZE 64 #define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */ #if ACPI_MACHINE_WIDTH == 64 #define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */ #endif #define ACPI_MAX64_DECIMAL_DIGITS 20 #define ACPI_MAX32_DECIMAL_DIGITS 10 #define ACPI_MAX16_DECIMAL_DIGITS 5 #define ACPI_MAX8_DECIMAL_DIGITS 3 /* PM Timer ticks per second (HZ) */ #define PM_TIMER_FREQUENCY 3579545 /* * Constants with special meanings */ #define ACPI_ROOT_OBJECT ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR) #define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */ #define ACPI_DO_NOT_WAIT 0 /* * Obsolete: Acpi integer width. In ACPI version 1 (1996), integers are 32 bits. * In ACPI version 2 (2000) and later, integers are 64 bits. Note that this * pertains to the ACPI integer type only, not to other integers used in the * implementation of the ACPICA subsystem. * * 01/2010: This type is obsolete and has been removed from the entire ACPICA * code base. It remains here for compatibility with device drivers that use * the type. However, it will be removed in the future. */ typedef u64 acpi_integer; #define ACPI_INTEGER_MAX ACPI_UINT64_MAX /******************************************************************************* * * Commonly used macros * ******************************************************************************/ /* Data manipulation */ #define ACPI_LOBYTE(integer) ((u8) (u16)(integer)) #define ACPI_HIBYTE(integer) ((u8) (((u16)(integer)) >> 8)) #define ACPI_LOWORD(integer) ((u16) (u32)(integer)) #define ACPI_HIWORD(integer) ((u16)(((u32)(integer)) >> 16)) #define ACPI_LODWORD(integer64) ((u32) (u64)(integer64)) #define ACPI_HIDWORD(integer64) ((u32)(((u64)(integer64)) >> 32)) #define ACPI_SET_BIT(target,bit) ((target) |= (bit)) #define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit)) #define ACPI_MIN(a,b) (((a)<(b))?(a):(b)) #define ACPI_MAX(a,b) (((a)>(b))?(a):(b)) /* Size calculation */ #define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) /* Pointer manipulation */ #define ACPI_CAST_PTR(t, p) ((t *) (acpi_uintptr_t) (p)) #define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p)) #define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b))) #define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b))) /* Pointer/Integer type conversions */ #define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) NULL,(acpi_size) i) #define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) NULL) #define ACPI_OFFSET(d, f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f), (void *) NULL) #define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED #define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) #else #define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) #endif /******************************************************************************* * * Miscellaneous constants * ******************************************************************************/ /* * Initialization sequence */ #define ACPI_FULL_INITIALIZATION 0x00 #define ACPI_NO_ADDRESS_SPACE_INIT 0x01 #define ACPI_NO_HARDWARE_INIT 0x02 #define ACPI_NO_EVENT_INIT 0x04 #define ACPI_NO_HANDLER_INIT 0x08 #define ACPI_NO_ACPI_ENABLE 0x10 #define ACPI_NO_DEVICE_INIT 0x20 #define ACPI_NO_OBJECT_INIT 0x40 /* * Initialization state */ #define ACPI_SUBSYSTEM_INITIALIZE 0x01 #define ACPI_INITIALIZED_OK 0x02 /* * Power state values */ #define ACPI_STATE_UNKNOWN (u8) 0xFF #define ACPI_STATE_S0 (u8) 0 #define ACPI_STATE_S1 (u8) 1 #define ACPI_STATE_S2 (u8) 2 #define ACPI_STATE_S3 (u8) 3 #define ACPI_STATE_S4 (u8) 4 #define ACPI_STATE_S5 (u8) 5 #define ACPI_S_STATES_MAX ACPI_STATE_S5 #define ACPI_S_STATE_COUNT 6 #define ACPI_STATE_D0 (u8) 0 #define ACPI_STATE_D1 (u8) 1 #define ACPI_STATE_D2 (u8) 2 #define ACPI_STATE_D3 (u8) 3 #define ACPI_STATE_D3_COLD (u8) 4 #define ACPI_D_STATES_MAX ACPI_STATE_D3_COLD #define ACPI_D_STATE_COUNT 5 #define ACPI_STATE_C0 (u8) 0 #define ACPI_STATE_C1 (u8) 1 #define ACPI_STATE_C2 (u8) 2 #define ACPI_STATE_C3 (u8) 3 #define ACPI_C_STATES_MAX ACPI_STATE_C3 #define ACPI_C_STATE_COUNT 4 /* * Sleep type invalid value */ #define ACPI_SLEEP_TYPE_MAX 0x7 #define ACPI_SLEEP_TYPE_INVALID 0xFF /* * Standard notify values */ #define ACPI_NOTIFY_BUS_CHECK (u8) 0x00 #define ACPI_NOTIFY_DEVICE_CHECK (u8) 0x01 #define ACPI_NOTIFY_DEVICE_WAKE (u8) 0x02 #define ACPI_NOTIFY_EJECT_REQUEST (u8) 0x03 #define ACPI_NOTIFY_DEVICE_CHECK_LIGHT (u8) 0x04 #define ACPI_NOTIFY_FREQUENCY_MISMATCH (u8) 0x05 #define ACPI_NOTIFY_BUS_MODE_MISMATCH (u8) 0x06 #define ACPI_NOTIFY_POWER_FAULT (u8) 0x07 #define ACPI_NOTIFY_CAPABILITIES_CHECK (u8) 0x08 #define ACPI_NOTIFY_DEVICE_PLD_CHECK (u8) 0x09 #define ACPI_NOTIFY_RESERVED (u8) 0x0A #define ACPI_NOTIFY_LOCALITY_UPDATE (u8) 0x0B #define ACPI_NOTIFY_MAX 0x0B /* * Types associated with ACPI names and objects. The first group of * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition * of the ACPI object_type() operator (See the ACPI Spec). Therefore, * only add to the first group if the spec changes. * * NOTE: Types must be kept in sync with the global acpi_ns_properties * and acpi_ns_type_names arrays. */ typedef u32 acpi_object_type; #define ACPI_TYPE_ANY 0x00 #define ACPI_TYPE_INTEGER 0x01 /* Byte/Word/Dword/Zero/One/Ones */ #define ACPI_TYPE_STRING 0x02 #define ACPI_TYPE_BUFFER 0x03 #define ACPI_TYPE_PACKAGE 0x04 /* byte_const, multiple data_term/Constant/super_name */ #define ACPI_TYPE_FIELD_UNIT 0x05 #define ACPI_TYPE_DEVICE 0x06 /* Name, multiple Node */ #define ACPI_TYPE_EVENT 0x07 #define ACPI_TYPE_METHOD 0x08 /* Name, byte_const, multiple Code */ #define ACPI_TYPE_MUTEX 0x09 #define ACPI_TYPE_REGION 0x0A #define ACPI_TYPE_POWER 0x0B /* Name,byte_const,word_const,multi Node */ #define ACPI_TYPE_PROCESSOR 0x0C /* Name,byte_const,Dword_const,byte_const,multi nm_o */ #define ACPI_TYPE_THERMAL 0x0D /* Name, multiple Node */ #define ACPI_TYPE_BUFFER_FIELD 0x0E #define ACPI_TYPE_DDB_HANDLE 0x0F #define ACPI_TYPE_DEBUG_OBJECT 0x10 #define ACPI_TYPE_EXTERNAL_MAX 0x10 /* * These are object types that do not map directly to the ACPI * object_type() operator. They are used for various internal purposes only. * If new predefined ACPI_TYPEs are added (via the ACPI specification), these * internal types must move upwards. (There is code that depends on these * values being contiguous with the external types above.) */ #define ACPI_TYPE_LOCAL_REGION_FIELD 0x11 #define ACPI_TYPE_LOCAL_BANK_FIELD 0x12 #define ACPI_TYPE_LOCAL_INDEX_FIELD 0x13 #define ACPI_TYPE_LOCAL_REFERENCE 0x14 /* Arg#, Local#, Name, Debug, ref_of, Index */ #define ACPI_TYPE_LOCAL_ALIAS 0x15 #define ACPI_TYPE_LOCAL_METHOD_ALIAS 0x16 #define ACPI_TYPE_LOCAL_NOTIFY 0x17 #define ACPI_TYPE_LOCAL_ADDRESS_HANDLER 0x18 #define ACPI_TYPE_LOCAL_RESOURCE 0x19 #define ACPI_TYPE_LOCAL_RESOURCE_FIELD 0x1A #define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */ #define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */ /* * These are special object types that never appear in * a Namespace node, only in a union acpi_operand_object */ #define ACPI_TYPE_LOCAL_EXTRA 0x1C #define ACPI_TYPE_LOCAL_DATA 0x1D #define ACPI_TYPE_LOCAL_MAX 0x1D /* All types above here are invalid */ #define ACPI_TYPE_INVALID 0x1E #define ACPI_TYPE_NOT_FOUND 0xFF #define ACPI_NUM_NS_TYPES (ACPI_TYPE_INVALID + 1) /* * All I/O */ #define ACPI_READ 0 #define ACPI_WRITE 1 #define ACPI_IO_MASK 1 /* * Event Types: Fixed & General Purpose */ typedef u32 acpi_event_type; /* * Fixed events */ #define ACPI_EVENT_PMTIMER 0 #define ACPI_EVENT_GLOBAL 1 #define ACPI_EVENT_POWER_BUTTON 2 #define ACPI_EVENT_SLEEP_BUTTON 3 #define ACPI_EVENT_RTC 4 #define ACPI_EVENT_MAX 4 #define ACPI_NUM_FIXED_EVENTS ACPI_EVENT_MAX + 1 /* * Event Status - Per event * ------------- * The encoding of acpi_event_status is illustrated below. * Note that a set bit (1) indicates the property is TRUE * (e.g. if bit 0 is set then the event is enabled). * +-------------+-+-+-+ * | Bits 31:3 |2|1|0| * +-------------+-+-+-+ * | | | | * | | | +- Enabled? * | | +--- Enabled for wake? * | +----- Set? * +----------- */ typedef u32 acpi_event_status; #define ACPI_EVENT_FLAG_DISABLED (acpi_event_status) 0x00 #define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01 #define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02 #define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04 #define ACPI_EVENT_FLAG_HANDLE (acpi_event_status) 0x08 /* * General Purpose Events (GPE) */ #define ACPI_GPE_INVALID 0xFF #define ACPI_GPE_MAX 0xFF #define ACPI_NUM_GPE 256 /* Actions for acpi_set_gpe_wake_mask, acpi_hw_low_set_gpe */ #define ACPI_GPE_ENABLE 0 #define ACPI_GPE_DISABLE 1 #define ACPI_GPE_CONDITIONAL_ENABLE 2 /* * GPE info flags - Per GPE * +-------+-+-+---+ * | 7:4 |3|2|1:0| * +-------+-+-+---+ * | | | | * | | | +-- Type of dispatch:to method, handler, notify, or none * | | +----- Interrupt type: edge or level triggered * | +------- Is a Wake GPE * +------------ */ #define ACPI_GPE_DISPATCH_NONE (u8) 0x00 #define ACPI_GPE_DISPATCH_METHOD (u8) 0x01 #define ACPI_GPE_DISPATCH_HANDLER (u8) 0x02 #define ACPI_GPE_DISPATCH_NOTIFY (u8) 0x03 #define ACPI_GPE_DISPATCH_MASK (u8) 0x03 #define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x04 #define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x04 #define ACPI_GPE_CAN_WAKE (u8) 0x08 /* * Flags for GPE and Lock interfaces */ #define ACPI_NOT_ISR 0x1 #define ACPI_ISR 0x0 /* Notify types */ #define ACPI_SYSTEM_NOTIFY 0x1 #define ACPI_DEVICE_NOTIFY 0x2 #define ACPI_ALL_NOTIFY (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY) #define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3 #define ACPI_MAX_SYS_NOTIFY 0x7f /* Address Space (Operation Region) Types */ typedef u8 acpi_adr_space_type; #define ACPI_ADR_SPACE_SYSTEM_MEMORY (acpi_adr_space_type) 0 #define ACPI_ADR_SPACE_SYSTEM_IO (acpi_adr_space_type) 1 #define ACPI_ADR_SPACE_PCI_CONFIG (acpi_adr_space_type) 2 #define ACPI_ADR_SPACE_EC (acpi_adr_space_type) 3 #define ACPI_ADR_SPACE_SMBUS (acpi_adr_space_type) 4 #define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5 #define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6 #define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7 #define ACPI_NUM_PREDEFINED_REGIONS 8 /* * Special Address Spaces * * Note: A Data Table region is a special type of operation region * that has its own AML opcode. However, internally, the AML * interpreter simply creates an operation region with an an address * space type of ACPI_ADR_SPACE_DATA_TABLE. */ #define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 0x7E /* Internal to ACPICA only */ #define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 0x7F /* Values for _REG connection code */ #define ACPI_REG_DISCONNECT 0 #define ACPI_REG_CONNECT 1 /* * bit_register IDs * * These values are intended to be used by the hardware interfaces * and are mapped to individual bitfields defined within the ACPI * registers. See the acpi_gbl_bit_register_info global table in utglobal.c * for this mapping. */ /* PM1 Status register */ #define ACPI_BITREG_TIMER_STATUS 0x00 #define ACPI_BITREG_BUS_MASTER_STATUS 0x01 #define ACPI_BITREG_GLOBAL_LOCK_STATUS 0x02 #define ACPI_BITREG_POWER_BUTTON_STATUS 0x03 #define ACPI_BITREG_SLEEP_BUTTON_STATUS 0x04 #define ACPI_BITREG_RT_CLOCK_STATUS 0x05 #define ACPI_BITREG_WAKE_STATUS 0x06 #define ACPI_BITREG_PCIEXP_WAKE_STATUS 0x07 /* PM1 Enable register */ #define ACPI_BITREG_TIMER_ENABLE 0x08 #define ACPI_BITREG_GLOBAL_LOCK_ENABLE 0x09 #define ACPI_BITREG_POWER_BUTTON_ENABLE 0x0A #define ACPI_BITREG_SLEEP_BUTTON_ENABLE 0x0B #define ACPI_BITREG_RT_CLOCK_ENABLE 0x0C #define ACPI_BITREG_PCIEXP_WAKE_DISABLE 0x0D /* PM1 Control register */ #define ACPI_BITREG_SCI_ENABLE 0x0E #define ACPI_BITREG_BUS_MASTER_RLD 0x0F #define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10 #define ACPI_BITREG_SLEEP_TYPE 0x11 #define ACPI_BITREG_SLEEP_ENABLE 0x12 /* PM2 Control register */ #define ACPI_BITREG_ARB_DISABLE 0x13 #define ACPI_BITREG_MAX 0x13 #define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1 /* Status register values. A 1 clears a status bit. 0 = no effect */ #define ACPI_CLEAR_STATUS 1 /* Enable and Control register values */ #define ACPI_ENABLE_EVENT 1 #define ACPI_DISABLE_EVENT 0 /* * External ACPI object definition */ /* * Note: Type == ACPI_TYPE_ANY (0) is used to indicate a NULL package element * or an unresolved named reference. */ union acpi_object { acpi_object_type type; /* See definition of acpi_ns_type for values */ struct { acpi_object_type type; /* ACPI_TYPE_INTEGER */ u64 value; /* The actual number */ } integer; struct { acpi_object_type type; /* ACPI_TYPE_STRING */ u32 length; /* # of bytes in string, excluding trailing null */ char *pointer; /* points to the string value */ } string; struct { acpi_object_type type; /* ACPI_TYPE_BUFFER */ u32 length; /* # of bytes in buffer */ u8 *pointer; /* points to the buffer */ } buffer; struct { acpi_object_type type; /* ACPI_TYPE_PACKAGE */ u32 count; /* # of elements in package */ union acpi_object *elements; /* Pointer to an array of ACPI_OBJECTs */ } package; struct { acpi_object_type type; /* ACPI_TYPE_LOCAL_REFERENCE */ acpi_object_type actual_type; /* Type associated with the Handle */ acpi_handle handle; /* object reference */ } reference; struct { acpi_object_type type; /* ACPI_TYPE_PROCESSOR */ u32 proc_id; acpi_io_address pblk_address; u32 pblk_length; } processor; struct { acpi_object_type type; /* ACPI_TYPE_POWER */ u32 system_level; u32 resource_order; } power_resource; }; /* * List of objects, used as a parameter list for control method evaluation */ struct acpi_object_list { u32 count; union acpi_object *pointer; }; /* * Miscellaneous common Data Structures used by the interfaces */ #define ACPI_NO_BUFFER 0 #define ACPI_ALLOCATE_BUFFER (acpi_size) (-1) #define ACPI_ALLOCATE_LOCAL_BUFFER (acpi_size) (-2) struct acpi_buffer { acpi_size length; /* Length in bytes of the buffer */ void *pointer; /* pointer to buffer */ }; /* * name_type for acpi_get_name */ #define ACPI_FULL_PATHNAME 0 #define ACPI_SINGLE_NAME 1 #define ACPI_NAME_TYPE_MAX 1 /* * Predefined Namespace items */ struct acpi_predefined_names { char *name; u8 type; char *val; }; /* * Structure and flags for acpi_get_system_info */ #define ACPI_SYS_MODE_UNKNOWN 0x0000 #define ACPI_SYS_MODE_ACPI 0x0001 #define ACPI_SYS_MODE_LEGACY 0x0002 #define ACPI_SYS_MODES_MASK 0x0003 /* * System info returned by acpi_get_system_info() */ struct acpi_system_info { u32 acpi_ca_version; u32 flags; u32 timer_resolution; u32 reserved1; u32 reserved2; u32 debug_level; u32 debug_layer; }; /* Table Event Types */ #define ACPI_TABLE_EVENT_LOAD 0x0 #define ACPI_TABLE_EVENT_UNLOAD 0x1 #define ACPI_NUM_TABLE_EVENTS 2 /* * Types specific to the OS service interfaces */ typedef u32(ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context); typedef void (ACPI_SYSTEM_XFACE * acpi_osd_exec_callback) (void *context); /* * Various handlers and callback procedures */ typedef void (*ACPI_GBL_EVENT_HANDLER) (u32 event_type, acpi_handle device, u32 event_number, void *context); #define ACPI_EVENT_TYPE_GPE 0 #define ACPI_EVENT_TYPE_FIXED 1 typedef u32(*acpi_event_handler) (void *context); typedef u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context); typedef void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context); typedef void (*acpi_object_handler) (acpi_handle object, void *data); typedef acpi_status(*acpi_init_handler) (acpi_handle object, u32 function); #define ACPI_INIT_DEVICE_INI 1 typedef acpi_status(*acpi_exception_handler) (acpi_status aml_status, acpi_name name, u16 opcode, u32 aml_offset, void *context); /* Table Event handler (Load, load_table, etc.) and types */ typedef acpi_status(*acpi_tbl_handler) (u32 event, void *table, void *context); /* Address Spaces (For Operation Regions) */ typedef acpi_status(*acpi_adr_space_handler) (u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context); #define ACPI_DEFAULT_HANDLER NULL typedef acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle, u32 function, void *handler_context, void **region_context); #define ACPI_REGION_ACTIVATE 0 #define ACPI_REGION_DEACTIVATE 1 typedef acpi_status(*acpi_walk_callback) (acpi_handle object, u32 nesting_level, void *context, void **return_value); typedef u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported); /* Interrupt handler return values */ #define ACPI_INTERRUPT_NOT_HANDLED 0x00 #define ACPI_INTERRUPT_HANDLED 0x01 /* GPE handler return values */ #define ACPI_REENABLE_GPE 0x80 /* Length of 32-bit EISAID values when converted back to a string */ #define ACPI_EISAID_STRING_SIZE 8 /* Includes null terminator */ /* Length of UUID (string) values */ #define ACPI_UUID_LENGTH 16 /* Structures used for device/processor HID, UID, CID */ struct acpica_device_id { u32 length; /* Length of string + null */ char *string; }; struct acpica_device_id_list { u32 count; /* Number of IDs in Ids array */ u32 list_size; /* Size of list, including ID strings */ struct acpica_device_id ids[1]; /* ID array */ }; /* * Structure returned from acpi_get_object_info. * Optimized for both 32- and 64-bit builds */ struct acpi_device_info { u32 info_size; /* Size of info, including ID strings */ u32 name; /* ACPI object Name */ acpi_object_type type; /* ACPI object Type */ u8 param_count; /* If a method, required parameter count */ u8 valid; /* Indicates which optional fields are valid */ u8 flags; /* Miscellaneous info */ u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */ u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ u32 current_status; /* _STA value */ u64 address; /* _ADR value */ struct acpica_device_id hardware_id; /* _HID value */ struct acpica_device_id unique_id; /* _UID value */ struct acpica_device_id_list compatible_id_list; /* _CID list */ }; /* Values for Flags field above (acpi_get_object_info) */ #define ACPI_PCI_ROOT_BRIDGE 0x01 /* Flags for Valid field above (acpi_get_object_info) */ #define ACPI_VALID_STA 0x01 #define ACPI_VALID_ADR 0x02 #define ACPI_VALID_HID 0x04 #define ACPI_VALID_UID 0x08 #define ACPI_VALID_CID 0x10 #define ACPI_VALID_SXDS 0x20 #define ACPI_VALID_SXWS 0x40 /* Flags for _STA method */ #define ACPI_STA_DEVICE_PRESENT 0x01 #define ACPI_STA_DEVICE_ENABLED 0x02 #define ACPI_STA_DEVICE_UI 0x04 #define ACPI_STA_DEVICE_FUNCTIONING 0x08 #define ACPI_STA_DEVICE_OK 0x08 /* Synonym */ #define ACPI_STA_BATTERY_PRESENT 0x10 /* Context structs for address space handlers */ struct acpi_pci_id { u16 segment; u16 bus; u16 device; u16 function; }; struct acpi_mem_space_context { u32 length; acpi_physical_address address; acpi_physical_address mapped_physical_address; u8 *mapped_logical_address; acpi_size mapped_length; }; /* * struct acpi_memory_list is used only if the ACPICA local cache is enabled */ struct acpi_memory_list { char *list_name; void *list_head; u16 object_size; u16 max_depth; u16 current_depth; u16 link_offset; #ifdef ACPI_DBG_TRACK_ALLOCATIONS /* Statistics for debug memory tracking only */ u32 total_allocated; u32 total_freed; u32 max_occupied; u32 total_size; u32 current_total_size; u32 requests; u32 hits; #endif }; #endif /* __ACTYPES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.13/netfront.c000066400000000000000000001750021314037446600302750ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "acpi_bus.h" #include "actypes.h" struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif #define RX_COPY_THRESHOLD 256 #define DEFAULT_DEBUG_LEVEL_SHIFT 3 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define HAVE_CSUM_OFFLOAD 1 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= ~NETIF_F_GSO_MASK; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define HAVE_CSUM_OFFLOAD 0 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } #define netif_skb_features(skb) ((skb)->dev->features) static inline int netif_needs_gso(struct sk_buff *skb, int features) { return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define HAVE_CSUM_OFFLOAD 0 #define netif_needs_gso(skb, feat) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif #define GRANT_INVALID_REF 0 struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) #define IPRINTK(fmt, args...) pr_info("netfront: " fmt, ##args) #define WPRINTK(fmt, args...) pr_warning("netfront: " fmt, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_release_rings(struct netfront_info *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static void send_fake_arp(struct net_device *, int arpType); static irqreturn_t netif_int(int irq, void *dev_id); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of rtl8139 */ static int rtl8139_acpi_bus_trim(acpi_handle handle) { struct acpi_device *device; int retval; retval = acpi_bus_get_device(handle, &device); if (retval) { printk(KERN_INFO "acpi_device not found\n"); return retval; } retval = acpi_bus_trim(device, 1); if (retval) printk(KERN_INFO "cannot remove from acpi list\n"); return retval; } static void remove_rtl8139(void) { struct pci_dev *pdev = NULL; acpi_handle phandle = NULL; pdev = pci_get_device(0x10ec, 0x8139, pdev); if (pdev) { phandle = DEVICE_ACPI_HANDLE(&pdev->dev); printk(KERN_INFO "remove_rtl8139 : rtl8139 of subsystem(0x%2x,0x%2x) removed.\n", pdev->subsystem_vendor, pdev->subsystem_device); pci_disable_device(pdev); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /* check kernel version */ pci_remove_bus_device(pdev); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pdev); pci_remove_bus_device(pdev); #else pci_stop_and_remove_bus_device(pdev); #endif /* end check kernel version */ pci_dev_put(pdev); } if (phandle) { rtl8139_acpi_bus_trim(phandle); } return; } /* * Work around net.ipv4.conf.*.arp_notify not being enabled by default. */ static void __devinit netfront_enable_arp_notify(struct netfront_info *info) { #ifdef CONFIG_INET struct in_device *in_dev; rtnl_lock(); in_dev = __in_dev_get_rtnl(info->netdev); if (in_dev && !IN_DEV_CONF_GET(in_dev, ARP_NOTIFY)) IN_DEV_CONF_SET(in_dev, ARP_NOTIFY, 1); rtnl_unlock(); if (!in_dev) printk(KERN_WARNING "Cannot enable ARP notification on %s\n", info->xbdev->nodename); #endif } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; /* remove rtl8139 when xen nic devices appear */ remove_rtl8139(); netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { pr_warning("%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } netfront_enable_arp_notify(info); err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); pr_warning("%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } static int __devexit netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(info->mac)) { err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", !HAVE_CSUM_OFFLOAD); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", HAVE_TSO); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, 0, netdev->name, netdev); if (err < 0) goto fail; info->irq = err; return 0; fail: netif_release_rings(info); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { int count = 0; struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); netif_notify_peers(netdev); break; case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @return 0 on success, error code otherwise */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; // creat GARP if ( ARPOP_REQUEST == arpType ) { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); } else { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); } if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); memset(&np->stats, 0, sizeof(np->stats)); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } /** * Description: * When NIC device has been actived successfully, operstate which is * member of netdev structure should be set to UP. */ np->netdev->operstate = (IF_OPER_DOWN != np->netdev->operstate) ? IF_OPER_UP : IF_OPER_DOWN; spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == XEN_NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { pr_alert("network_tx_buf_gc: grant still" " in use by backend domain\n"); BUG(); } gnttab_end_foreign_access_ref(np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; struct xen_memory_reservation reservation; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if ( nr_flips != 0 ) { /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); reservation.nr_extents = nr_flips; reservation.extent_order = 0; reservation.address_bits = 0; reservation.domid = DOMID_SELF; if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ np->rx_mcl[i].op = __HYPERVISOR_memory_op; np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; np->rx_mcl[i].args[1] = (unsigned long)&reservation; /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (nr_flips--) BUG_ON(np->rx_mcl[nr_flips].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= XEN_NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= XEN_NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return NETDEV_TX_OK; } frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); if (unlikely(frags > MAX_SKB_FRAGS + 1)) { pr_alert("xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irq(&np->tx_lock); if (unlikely(!netfront_carrier_ok(np) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irq(&np->tx_lock); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) tx->flags |= XEN_NETTXF_data_validated; #if HAVE_TSO if (skb_shinfo(skb)->gso_size) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->irq); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; dev->trans_start = jiffies; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irq(&np->tx_lock); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static irqreturn_t netif_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) WPRINTK("Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) WPRINTK("Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & XEN_NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) WPRINTK("rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) WPRINTK("Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { if (net_ratelimit()) WPRINTK("Unfulfilled rx req " "(id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); BUG_ON(!ret); } gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & XEN_NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) WPRINTK("Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) WPRINTK("Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) WPRINTK("GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) WPRINTK("Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) WPRINTK("GSO unsupported by this kernel.\n"); return -EINVAL; #endif } static int netif_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; struct multicall_entry *mcl; int work_done, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); dev->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize must approximates the size of true data plus * any supervisor overheads. Adding hypervisor overheads * has been shown to significantly reduce achievable * bandwidth with the default receive buffer size. It is * therefore not wise to account for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to * RX_COPY_THRESHOLD + the supervisor overheads. Here, we * add the size of the data pulled in xennet_fill_frags(). * * We also adjust for any unused space in the main data * area by subtracting (RX_COPY_THRESHOLD - len). This is * especially important with drivers which split incoming * packets into header and data, using only 66 bytes of * the main data area (see the e1000 driver for example.) * On such systems, without this last adjustement, our * achievable receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & XEN_NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { mcl = np->rx_mcl + pages_flipped; mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = pages_flipped; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } __skb_queue_purge(&errq); while ((skb = __skb_dequeue(&rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); if (skb_checksum_setup(skb, &np->rx_gso_csum_fixups)) { kfree_skb(skb); continue; } /* Pass it up. */ netif_receive_skb(skb); } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); if (0 == mfn) { struct page *page = skb_shinfo(skb)->frags[0].page; balloon_release_driver_page(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)np->rx_mmu; mcl->args[1] = mmu - np->rx_mmu; mcl->args[2] = 0; mcl->args[3] = DOMID_SELF; mcl++; rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl - np->rx_mcl, NULL); BUG_ON(rc); } } __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_ref(ref)) { busy++; continue; } gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static struct net_device_stats *network_get_stats(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_get_stats(np, dev); return &dev->stats; } static int xennet_set_mac_address(struct net_device *dev, void *p) { struct netfront_info *np = netdev_priv(dev); struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(np->mac, addr->sa_data, ETH_ALEN); return 0; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static const struct xennet_stat { char name[ETH_GSTRING_LEN]; u16 offset; } xennet_stats[] = { { "rx_gso_csum_fixups", offsetof(struct netfront_info, rx_gso_csum_fixups) / sizeof(long) }, }; static int xennet_set_sg(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } else if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; return ethtool_op_set_sg(dev, data); } static int xennet_set_tso(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } return ethtool_op_set_tso(dev, data); } static int xennet_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(xennet_stats); } return -EOPNOTSUPP; } static void xennet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { unsigned long *np = netdev_priv(dev); unsigned int i; for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) data[i] = np[xennet_stats[i].offset]; } /** * Description: * Get device-specific settings * Version: */ static int netfront_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct netfront_info *adapter = netdev_priv(dev); struct netfront_hw *hw = &adapter->hw; u32 link_speed = 0; bool link_up = 1; ecmd->supported = SUPPORTED_1000baseT_Full; ecmd->transceiver = XCVR_EXTERNAL; ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_FIBRE); ecmd->port = PORT_FIBRE; ecmd->autoneg = AUTONEG_DISABLE; if (link_up) { ecmd->speed = (link_speed == NETFRONT_LINK_SPEED_10GB_FULL) ? SPEED_10000 : SPEED_1000; ecmd->duplex = DUPLEX_FULL; } else { ecmd->speed = -1; ecmd->duplex = -1; } return 0; } /** * Description: * Report whether Wake-on-Lan is enabled */ static void netfront_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct netfront_info *adapter = netdev_priv(dev); wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; wol->wolopts = 0; if (adapter->wol & NETFRONT_WUFC_EX) wol->wolopts |= WAKE_UCAST; if (adapter->wol & NETFRONT_WUFC_MC) wol->wolopts |= WAKE_MCAST; if (adapter->wol & NETFRONT_WUFC_BC) wol->wolopts |= WAKE_BCAST; if (adapter->wol & NETFRONT_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; return; } /** * Description: * Report driver message level */ static u32 netfront_get_msglevel(struct net_device *dev) { struct netfront_info *adapter = netdev_priv(dev); adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; return adapter->msg_enable; } static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 *data) { unsigned int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, xennet_stats[i].name, ETH_GSTRING_LEN); break; } } static void netfront_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "netfront"); strcpy(info->version, "1.0"); strcpy(info->bus_info, dev_name(dev->dev.parent)); } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; rtnl_lock(); netdev_update_features(dev); rtnl_unlock(); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref( ref, np->xbdev->otherend_id, page_to_pfn(skb_shinfo(skb)->frags->page)); } else { gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static const struct ethtool_ops network_ethtool_ops = { /** * Description: * Added some information for ethtool. */ .get_settings = netfront_get_settings, .get_wol = netfront_get_wol, .get_msglevel = netfront_get_msglevel, .get_drvinfo = netfront_get_drvinfo, .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = xennet_set_sg, #if HAVE_TSO .get_tso = ethtool_op_get_tso, .set_tso = xennet_set_tso, #endif .get_link = ethtool_op_get_link, .get_sset_count = xennet_get_sset_count, .get_ethtool_stats = xennet_get_ethtool_stats, .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { error = device_create_file(&netdev->dev, &xennet_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } static u32 xennet_fix_features(struct net_device *dev, u32 features) { struct netfront_info *np = netdev_priv(dev); int val; if (features & NETIF_F_SG) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_SG; } if (features & NETIF_F_TSO) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_TSO; } return features; } static int xennet_set_features(struct net_device *dev, u32 features) { if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { netdev_info(dev, "Reducing MTU because no SG offload"); dev->mtu = ETH_DATA_LEN; } return 0; } static const struct net_device_ops xennet_netdev_ops = { .ndo_uninit = netif_uninit, .ndo_open = network_open, .ndo_stop = network_close, .ndo_start_xmit = network_start_xmit, .ndo_set_multicast_list = network_set_multicast_list, .ndo_set_mac_address = xennet_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = xennet_fix_features, .ndo_set_features = xennet_set_features, .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats = network_get_stats, }; static struct net_device * __devinit create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { pr_warning("%s: alloc_etherdev failed\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { pr_alert("#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { pr_alert("#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, netif_poll, 64); netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_GSO_ROBUST; netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; /* * Assume that all hw features are available for now. This set * will be adjusted by the call to netdev_update_features() in * xennet_connect() which is the earliest point where we can * negotiate with the backend regarding supported features. */ netdev->features |= netdev->hw_features; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } #ifdef CONFIG_INET /* * We use this notifier to send out a fake ARP reply to reset switches and * router ARP caches when an IP interface is brought up on a VIF. */ static int inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) { int count = 0; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ if (event == NETDEV_UP && dev->netdev_ops->ndo_open == network_open) { for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REQUEST); } for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REPLY); } } return NOTIFY_DONE; } static struct notifier_block notifier_inetdev = { .notifier_call = inetdev_notify, .next = NULL, .priority = 0 }; #endif static void netif_release_rings(struct netfront_info *info) { end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler(info->irq, info->netdev); info->irq = 0; netif_release_rings(info); } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, (unsigned long)page); } /* ** Driver registration ** */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static struct xenbus_driver netfront_driver = { .name = "vif", .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(netfront_remove), .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, }; static int __init netif_init(void) { if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN if (MODPARM_rx_flip && MODPARM_rx_copy) { WPRINTK("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_copy = 1; /* Default is to copy. */ #endif netif_init_accel(); IPRINTK("Initialising virtual ethernet driver.\n"); return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { xenbus_unregister_driver(&netfront_driver); netif_exit_accel(); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.13/netfront.h000066400000000000000000000245011314037446600302770ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include #include #include #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct net_device_stats stats; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; struct napi_struct napi; unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; struct pci_dev *pdev; struct netfront_hw *hw; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; u16 msg_enable; u32 wol; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Statistics */ unsigned long rx_gso_csum_fixups; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; #define NETFRONT_WUFC_MAG 0x00000002 #define NETFRONT_WUFC_EX 0x00000004 #define NETFRONT_WUFC_MC 0x00000008 #define NETFRONT_WUFC_BC 0x00000010 typedef u32 netfront_autoneg_advertised; typedef u32 netfront_link_speed; #define NETFRONT_LINK_SPEED_UNKNOWN 0 #define NETFRONT_LINK_SPEED_100_FULL 0x0008 #define NETFRONT_LINK_SPEED_1GB_FULL 0x0020 #define NETFRONT_LINK_SPEED_10GB_FULL 0x0080 #define NETFRONT_LINK_SPEED_82598_AUTONEG (NETFRONT_LINK_SPEED_1GB_FULL | \ NETFRONT_LINK_SPEED_10GB_FULL) #define NETFRONT_LINK_SPEED_82599_AUTONEG (NETFRONT_LINK_SPEED_100_FULL | \ NETFRONT_LINK_SPEED_1GB_FULL | \ NETFRONT_LINK_SPEED_10GB_FULL) #define NETFRONT_PCIE_DEV_CTRL_2 0xC8 #define PCIE_COMPL_TO_VALUE 0x05 struct netfront_mac_operations { s32 (*setup_link)(struct netfront_hw *, netfront_link_speed, bool, bool); s32 (*check_link)(struct netfront_hw *, netfront_link_speed *, bool *, bool); s32 (*get_link_capabilities)(struct netfront_hw *, netfront_link_speed *, bool *); }; struct netfront_mac_info { struct netfront_mac_operations ops; bool autotry_restart; }; enum netfront_media_type { netfront_media_type_unknown = 0, netfront_media_type_fiber, netfront_media_type_copper, netfront_media_type_backplane, netfront_media_type_cx4, netfront_media_type_virtual }; struct netfront_phy_info { enum netfront_media_type media_type; netfront_autoneg_advertised autoneg_advertised; bool multispeed_fiber; }; struct netfront_hw { u16 device_id; struct netfront_mac_info mac; struct netfront_phy_info phy; }; /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded() */ extern void netfront_accelerator_stop(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.76/000077500000000000000000000000001314037446600262765ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.76/accel.c000066400000000000000000000531521314037446600275170ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); static void netfront_accelerator_remove_watch(struct netfront_info *np); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Workqueue to process acceleration configuration changes */ struct workqueue_struct *accel_watch_workqueue; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); accel_watch_workqueue = create_workqueue("net_accel"); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; flush_workqueue(accel_watch_workqueue); destroy_workqueue(accel_watch_workqueue); /* No lock required as everything else should be quiet by now */ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); queue_work(accel_watch_workqueue, &vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* * If old watch exists, e.g. from before suspend/resume, * remove it now */ netfront_accelerator_remove_watch(np); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_purge_watch(struct netfront_accel_vif_state *vif_state) { flush_workqueue(accel_watch_workqueue); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; netfront_accelerator_purge_watch(vif_state); } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. Must be called with the accelerator * mutex held. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } } /* * Initialise the state to track an accelerator plugin module. * * Must be called with the accelerator mutex held. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; list_add(&accelerator->link, &accelerators_list); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { struct netfront_accelerator *accelerator; unsigned long flags; DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); accelerator = vif_state->np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); vif_state->hooks = accelerator->hooks; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks && hooks->new_device(np->netdev, dev) == 0) accelerator_set_vif_state_hooks(&np->accel_vif_state); return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* Holds accelerator_mutex to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if (hooks->new_device(np->netdev, vif_state->dev) == 0) accelerator_set_vif_state_hooks(vif_state); } } /* * Called by the netfront accelerator plugin module when it has * loaded. * * Takes the accelerator_mutex and vif_states_lock spinlock. */ int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded); /* * Remove the hooks from a single vif state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { unsigned long flags; /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Safely remove the accelerator function hooks from a netfront state. * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { if(vif_state->hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ vif_state->hooks->get_stats(vif_state->np->netdev, &vif_state->np->netdev->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. * * Takes the accelerator mutex, and vif_states_lock spinlock. */ void netfront_accelerator_stop(const char *frontend) { struct netfront_accelerator *accelerator; mutex_lock(&accelerator_mutex); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_remove_hooks(accelerator); goto out; } } out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop); /* * Helper for call_remove and do_suspend * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator = np->accelerator; unsigned long flags; int rc = 0; if (np->accel_vif_state.hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ np->accel_vif_state.hooks->get_stats(np->netdev, &np->netdev->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); } return rc; } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock */ static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev); np->accelerator = NULL; return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { int rc = 0; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ rc = do_remove(np, dev); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { netfront_accelerator_purge_watch(&np->accel_vif_state); /* * Gratuitously fire the watch handler to reinstate the * configured accelerator */ if (dev->state == XenbusStateConnected) queue_work(accel_watch_workqueue, &np->accel_vif_state.accel_work); return 0; } /* * No lock pre-requisites. Takes the accelerator mutex */ void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; break; } } out: mutex_unlock(&accelerator_mutex); return; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &dev->stats); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.76/netfront.c000066400000000000000000001736621314037446600303200ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN static int MODPARM_rx_copy = 0; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static int MODPARM_rx_flip = 0; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else static const int MODPARM_rx_copy = 1; static const int MODPARM_rx_flip = 0; #endif #define RX_COPY_THRESHOLD 256 #define DEFAULT_DEBUG_LEVEL_SHIFT 3 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define HAVE_CSUM_OFFLOAD 1 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= ~NETIF_F_GSO_MASK; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define HAVE_CSUM_OFFLOAD 0 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } #define netif_skb_features(skb) ((skb)->dev->features) static inline int netif_needs_gso(struct sk_buff *skb, int features) { return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define HAVE_CSUM_OFFLOAD 0 #define netif_needs_gso(skb, feat) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_release_rings(struct netfront_info *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static void send_fake_arp(struct net_device *, int arpType); static irqreturn_t netif_int(int irq, void *dev_id); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of nic8139 */ #include static void remove_nic8139(void) { struct pci_dev *pci_dev_nic8139 = NULL; pci_dev_nic8139 = pci_get_device(0x10ec, 0x8139, pci_dev_nic8139); if (pci_dev_nic8139) { printk(KERN_INFO "remove_nic8139 : 8139 NIC of subsystem(0x%2x,0x%2x) removed.\n", pci_dev_nic8139->subsystem_vendor, pci_dev_nic8139->subsystem_device); pci_disable_device(pci_dev_nic8139); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /* check kernel version */ pci_remove_bus_device(pci_dev_nic8139); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pci_dev_nic8139); pci_remove_bus_device(pci_dev_nic8139); #else pci_stop_and_remove_bus_device(pci_dev_nic8139); #endif /* end check kernel version */ } return; } /* * Work around net.ipv4.conf.*.arp_notify not being enabled by default. */ static void __devinit netfront_enable_arp_notify(struct netfront_info *info) { #ifdef CONFIG_INET struct in_device *in_dev; rtnl_lock(); in_dev = __in_dev_get_rtnl(info->netdev); if (in_dev && !IN_DEV_CONF_GET(in_dev, ARP_NOTIFY)) IN_DEV_CONF_SET(in_dev, ARP_NOTIFY, 1); rtnl_unlock(); if (!in_dev) pr_warn("Cannot enable ARP notification on %s\n", info->xbdev->nodename); #endif } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; /* remove 8139 nic when xen nic devices appear */ remove_nic8139(); netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { pr_warning("%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } netfront_enable_arp_notify(info); err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); pr_warning("%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } static int __devexit netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(info->mac)) { err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload", "%d", !HAVE_CSUM_OFFLOAD); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", HAVE_TSO); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, 0, netdev->name, netdev); if (err < 0) goto fail; info->irq = err; return 0; fail: netif_release_rings(info); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); netif_notify_peers(netdev); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state -- fallthrough */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @return 0 on success, error code otherwise */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; // creat GARP if ( ARPOP_REQUEST == arpType ) { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); } else { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); } if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); memset(&np->stats, 0, sizeof(np->stats)); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } np->netdev->operstate = (IF_OPER_DOWN != np->netdev->operstate) ? IF_OPER_UP : IF_OPER_DOWN; spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == XEN_NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { pr_alert("network_tx_buf_gc: grant still" " in use by backend domain\n"); BUG(); } gnttab_end_foreign_access_ref(np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if (nr_flips) { struct xen_memory_reservation reservation = { .nr_extents = nr_flips, .domid = DOMID_SELF, }; /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ MULTI_memory_op(np->rx_mcl + i, XENMEM_decrease_reservation, &reservation); /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (nr_flips--) BUG_ON(np->rx_mcl[nr_flips].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= XEN_NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= XEN_NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn, flags; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return NETDEV_TX_OK; } frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); if (unlikely(frags > MAX_SKB_FRAGS + 1)) { pr_alert("xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irqsave(&np->tx_lock, flags); if (unlikely(!netfront_carrier_ok(np) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&np->tx_lock, flags); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) tx->flags |= XEN_NETTXF_data_validated; #if HAVE_TSO if (skb_shinfo(skb)->gso_size) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->irq); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; dev->trans_start = jiffies; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irqrestore(&np->tx_lock, flags); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static irqreturn_t netif_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) netdev_warn(np->netdev, "Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) netdev_warn(np->netdev, "Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & XEN_NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) netdev_warn(np->netdev, "rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) netdev_warn(np->netdev, "Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { if (net_ratelimit()) netdev_warn(np->netdev, "Unfulfilled rx req (id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); BUG_ON(!ret); } gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & XEN_NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) netdev_warn(np->netdev, "Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) netdev_warn(np->netdev, "Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) netdev_warn(skb->dev, "GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) netdev_warn(skb->dev, "Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) netdev_warn(skb->dev, "GSO unsupported by this kernel.\n"); return -EINVAL; #endif } static int netif_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; int work_done, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); dev->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize must approximates the size of true data plus * any supervisor overheads. Adding hypervisor overheads * has been shown to significantly reduce achievable * bandwidth with the default receive buffer size. It is * therefore not wise to account for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to * RX_COPY_THRESHOLD + the supervisor overheads. Here, we * add the size of the data pulled in xennet_fill_frags(). * * We also adjust for any unused space in the main data * area by subtracting (RX_COPY_THRESHOLD - len). This is * especially important with drivers which split incoming * packets into header and data, using only 66 bytes of * the main data area (see the e1000 driver for example.) * On such systems, without this last adjustement, our * achievable receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & XEN_NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { MULTI_mmu_update(np->rx_mcl + pages_flipped, np->rx_mmu, pages_flipped, 0, DOMID_SELF); err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } __skb_queue_purge(&errq); while ((skb = __skb_dequeue(&rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); if (skb_checksum_setup(skb, &np->rx_gso_csum_fixups)) { kfree_skb(skb); continue; } /* Pass it up. */ netif_receive_skb(skb); } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); if (0 == mfn) { struct page *page = skb_shinfo(skb)->frags[0].page; balloon_release_driver_page(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, 0, DOMID_SELF); rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl + 1 - np->rx_mcl, NULL); BUG_ON(rc); } } __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_ref(ref)) { busy++; continue; } gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static struct net_device_stats *network_get_stats(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_get_stats(np, dev); return &dev->stats; } static int xennet_set_mac_address(struct net_device *dev, void *p) { struct netfront_info *np = netdev_priv(dev); struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(np->mac, addr->sa_data, ETH_ALEN); return 0; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static const struct xennet_stat { char name[ETH_GSTRING_LEN]; u16 offset; } xennet_stats[] = { { "rx_gso_csum_fixups", offsetof(struct netfront_info, rx_gso_csum_fixups) / sizeof(long) }, }; static int xennet_set_sg(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } else if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; return ethtool_op_set_sg(dev, data); } static int xennet_set_tso(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } return ethtool_op_set_tso(dev, data); } static int xennet_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(xennet_stats); } return -EOPNOTSUPP; } static void xennet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { unsigned long *np = netdev_priv(dev); unsigned int i; for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) data[i] = np[xennet_stats[i].offset]; } /** * Description: * Get device-specific settings * Version: */ static int netfront_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct netfront_info *adapter = netdev_priv(dev); u32 link_speed = 0; bool link_up = 1; ecmd->supported = SUPPORTED_1000baseT_Full; ecmd->transceiver = XCVR_EXTERNAL; ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_FIBRE); ecmd->port = PORT_FIBRE; ecmd->autoneg = AUTONEG_DISABLE; if (link_up) { ecmd->speed = (link_speed == NETFRONT_LINK_SPEED_10GB_FULL) ? SPEED_10000 : SPEED_1000; ecmd->duplex = DUPLEX_FULL; } else { ecmd->speed = -1; ecmd->duplex = -1; } return 0; } /** * Description: * Report whether Wake-on-Lan is enabled */ static void netfront_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct netfront_info *adapter = netdev_priv(dev); wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; wol->wolopts = 0; if (adapter->wol & NETFRONT_WUFC_EX) wol->wolopts |= WAKE_UCAST; if (adapter->wol & NETFRONT_WUFC_MC) wol->wolopts |= WAKE_MCAST; if (adapter->wol & NETFRONT_WUFC_BC) wol->wolopts |= WAKE_BCAST; if (adapter->wol & NETFRONT_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; return; } /** * Description: * Report driver message level */ static u32 netfront_get_msglevel(struct net_device *dev) { struct netfront_info *adapter = netdev_priv(dev); adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; return adapter->msg_enable; } static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 *data) { unsigned int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, xennet_stats[i].name, ETH_GSTRING_LEN); break; } } static void netfront_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "netfront"); strcpy(info->version, "1.0"); strlcpy(info->bus_info, dev_name(dev->dev.parent), ARRAY_SIZE(info->bus_info)); } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; rtnl_lock(); netdev_update_features(dev); rtnl_unlock(); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref( ref, np->xbdev->otherend_id, page_to_pfn(skb_shinfo(skb)->frags->page)); } else { gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static const struct ethtool_ops network_ethtool_ops = { /* * Description: * Added some information for ethtool. */ .get_settings = netfront_get_settings, .get_wol = netfront_get_wol, .get_msglevel = netfront_get_msglevel, .get_drvinfo = netfront_get_drvinfo, .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = xennet_set_sg, #if HAVE_TSO .get_tso = ethtool_op_get_tso, .set_tso = xennet_set_tso, #endif .get_drvinfo = netfront_get_drvinfo, .get_link = ethtool_op_get_link, .get_sset_count = xennet_get_sset_count, .get_ethtool_stats = xennet_get_ethtool_stats, .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { error = device_create_file(&netdev->dev, &xennet_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } static u32 xennet_fix_features(struct net_device *dev, u32 features) { struct netfront_info *np = netdev_priv(dev); int val; if (features & NETIF_F_SG) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_SG; } if (features & NETIF_F_TSO) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_TSO; } return features; } static int xennet_set_features(struct net_device *dev, u32 features) { if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { netdev_info(dev, "Reducing MTU because no SG offload"); dev->mtu = ETH_DATA_LEN; } return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xennet_poll_controller(struct net_device *dev) { netif_int(0, dev); } #endif static const struct net_device_ops xennet_netdev_ops = { .ndo_uninit = netif_uninit, .ndo_open = network_open, .ndo_stop = network_close, .ndo_start_xmit = network_start_xmit, .ndo_set_multicast_list = network_set_multicast_list, .ndo_set_mac_address = xennet_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = xennet_fix_features, .ndo_set_features = xennet_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xennet_poll_controller, #endif .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats = network_get_stats, }; static struct net_device * __devinit create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { pr_warning("%s: alloc_etherdev failed\n", __FUNCTION__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { pr_alert("#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { pr_alert("#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, netif_poll, 64); netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_GSO_ROBUST; netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; /* * Assume that all hw features are available for now. This set * will be adjusted by the call to netdev_update_features() in * xennet_connect() which is the earliest point where we can * negotiate with the backend regarding supported features. */ netdev->features |= netdev->hw_features; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } #ifdef CONFIG_INET /* * We use this notifier to send out a fake ARP reply to reset switches and * router ARP caches when an IP interface is brought up on a VIF. */ static int inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) { int count = 0; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ if (event == NETDEV_UP && dev->netdev_ops->ndo_open == network_open) { for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REQUEST); } for (count=0; count<3; count++) { (void)send_fake_arp(dev, ARPOP_REPLY); } } return NOTIFY_DONE; } static struct notifier_block notifier_inetdev = { .notifier_call = inetdev_notify, .next = NULL, .priority = 0 }; #endif static void netif_release_rings(struct netfront_info *info) { end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler(info->irq, info->netdev); info->irq = 0; netif_release_rings(info); } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, (unsigned long)page); } /* ** Driver registration ** */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static DEFINE_XENBUS_DRIVER(netfront, , .probe = netfront_probe, .remove = __devexit_p(netfront_remove), .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, ); static int __init netif_init(void) { if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN if (MODPARM_rx_flip && MODPARM_rx_copy) { pr_warning("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_copy = 1; /* Default is to copy. */ #endif netif_init_accel(); pr_info("Initialising virtual ethernet driver.\n"); return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { xenbus_unregister_driver(&netfront_driver); netif_exit_accel(); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/3.0.76/netfront.h000066400000000000000000000245531314037446600303170ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include #include #include #include #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct net_device_stats stats; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; struct napi_struct napi; unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; struct pci_dev *pdev; struct netfront_hw *hw; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; u16 msg_enable; u32 wol; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Statistics */ unsigned long rx_gso_csum_fixups; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; #define NETFRONT_WUFC_MAG 0x00000002 #define NETFRONT_WUFC_EX 0x00000004 #define NETFRONT_WUFC_MC 0x00000008 #define NETFRONT_WUFC_BC 0x00000010 typedef u32 netfront_autoneg_advertised; typedef u32 netfront_link_speed; #define NETFRONT_LINK_SPEED_UNKNOWN 0 #define NETFRONT_LINK_SPEED_100_FULL 0x0008 #define NETFRONT_LINK_SPEED_1GB_FULL 0x0020 #define NETFRONT_LINK_SPEED_10GB_FULL 0x0080 #define NETFRONT_LINK_SPEED_82598_AUTONEG (NETFRONT_LINK_SPEED_1GB_FULL | \ NETFRONT_LINK_SPEED_10GB_FULL) #define NETFRONT_LINK_SPEED_82599_AUTONEG (NETFRONT_LINK_SPEED_100_FULL | \ NETFRONT_LINK_SPEED_1GB_FULL | \ NETFRONT_LINK_SPEED_10GB_FULL) #define NETFRONT_PCIE_DEV_CTRL_2 0xC8 #define PCIE_COMPL_TO_VALUE 0x05 struct netfront_mac_operations { s32 (*setup_link)(struct netfront_hw *, netfront_link_speed, bool, bool); s32 (*check_link)(struct netfront_hw *, netfront_link_speed *, bool *, bool); s32 (*get_link_capabilities)(struct netfront_hw *, netfront_link_speed *, bool *); }; struct netfront_mac_info { struct netfront_mac_operations ops; bool autotry_restart; }; enum netfront_media_type { netfront_media_type_unknown = 0, netfront_media_type_fiber, netfront_media_type_copper, netfront_media_type_backplane, netfront_media_type_cx4, netfront_media_type_virtual }; struct netfront_phy_info { enum netfront_media_type media_type; netfront_autoneg_advertised autoneg_advertised; bool multispeed_fiber; }; struct netfront_hw { u16 device_id; struct netfront_mac_info mac; struct netfront_phy_info phy; }; /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded() */ extern void netfront_accelerator_stop(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/Kbuild000066400000000000000000000001411314037446600266740ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-vnif.o xen-vnif-objs := netfront.o xen-vnif-objs += accel.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-2.6.32to3.0.x/xen-vnif/Makefile000066400000000000000000000000661314037446600272050ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/000077500000000000000000000000001314037446600242015ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/Makefile000066400000000000000000000033211314037446600256400ustar00rootroot00000000000000############################################################################### ###### File name : Makefile ###### ###### Author : ###### ###### Modify : Jinjian 00196667 ###### ###### Description : To optimize the PV_ON_HVM drivers's Makefile ###### ###### History : ###### ###### 2012-02-24 : Create the file ###### ############################################################################### M=$(shell pwd) -include $(M)/config.mk COMPILEARGS = $(CROSSCOMPILE) obj-m += xen-platform-pci/ ifneq ("3.16.6", "$(OSVERSION)") obj-m += xen-balloon/ obj-m += xen-scsi/ endif obj-m += xen-vbd/ obj-m += xen-vnif/ ifeq ("SUSE12SP1", $(OSTYPE)) OSVERSION=3.12.49 endif ifeq ("SUSE12SP0", $(OSTYPE)) OSVERSION=3.12.28 endif all:lnfile make -C $(KERNDIR) M=$(M) modules $(COMPILEARGS) modules_install: make -C $(KERNDIR) M=$(M) modules_install $(COMPILEARGS) lnfile: @set -e; \ for dir in $(obj-m); \ do \ if [ "xen-balloon/" = "$$dir" ] || [ "xen-scsi/" = "$$dir" ]; then \ continue; \ fi; \ if [ "xen-vbd/" = "$$dir" ] && [ "3.16.6" = "$(OSVERSION)" ]; then \ cd $$dir; \ for file in `ls opensuse`; \ do \ ln -sf opensuse/$$file $$file; \ done; \ cd -; \ continue; \ fi; \ cd $$dir; \ for file in `ls $(OSVERSION)`; \ do \ ln -sf $(OSVERSION)/$$file $$file; \ done; \ cd -; \ done; \ clean: make -C $(KERNDIR) M=$(M) clean $(COMPILEARGS) find ./ -name modules.order | xargs rm -f rm -rf Module.symvers cd $$M find ./ -type l | xargs rm -f UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/compat-include/000077500000000000000000000000001314037446600271055ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/compat-include/asm-generic/000077500000000000000000000000001314037446600312775ustar00rootroot00000000000000pgtable-nopmd.h000066400000000000000000000005611314037446600341240ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/compat-include/asm-generic#ifndef _PGTABLE_NOPMD_H #define _PGTABLE_NOPMD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopmd.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPMD_H */ pgtable-nopud.h000066400000000000000000000006211314037446600341310ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/compat-include/asm-generic#ifndef _PGTABLE_NOPUD_H #define _PGTABLE_NOPUD_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) #error "This version of Linux should not need compat pgtable-nopud.h" #endif #define pud_t pgd_t #define pud_offset(d, va) d #define pud_none(pud) 0 #define pud_present(pud) 1 #define pud_bad(pud) 0 #define PTRS_PER_PUD 1 #endif /* _PGTABLE_NOPUD_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/compat-include/linux/000077500000000000000000000000001314037446600302445ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/compat-include/linux/io.h000066400000000000000000000002771314037446600310320ustar00rootroot00000000000000#ifndef _LINUX_IO_H #define _LINUX_IO_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) #error "This version of Linux should not need compat linux/io.h" #endif #include #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/compat-include/linux/mutex.h000066400000000000000000000015411314037446600315600ustar00rootroot00000000000000/* * Copyright (c) 2006 Cisco Systems. All rights reserved. * * This file is released under the GPLv2. */ /* mutex compatibility for pre-2.6.16 kernels */ #ifndef __LINUX_MUTEX_H #define __LINUX_MUTEX_H #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) #error "This version of Linux should not need compat mutex.h" #endif #include #include #define mutex semaphore #define DEFINE_MUTEX(foo) DECLARE_MUTEX(foo) #define mutex_init(foo) init_MUTEX(foo) #define mutex_lock(foo) down(foo) #define mutex_lock_interruptible(foo) down_interruptible(foo) /* this function follows the spin_trylock() convention, so * * it is negated to the down_trylock() return values! Be careful */ #define mutex_trylock(foo) !down_trylock(foo) #define mutex_unlock(foo) up(foo) #endif /* __LINUX_MUTEX_H */ scatterlist.h000066400000000000000000000003761314037446600327050ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/compat-include/linux#ifndef _LINUX_SCATTERLIST_H #define _LINUX_SCATTERLIST_H #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) #error "This version of Linux should not need compat linux/scatterlist.h" #endif #include #endif /* _LINUX_SCATTERLIST_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/compat-include/xen/000077500000000000000000000000001314037446600276775ustar00rootroot00000000000000platform-compat.h000066400000000000000000000135401314037446600331010ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/compat-include/xen#ifndef COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #define COMPAT_INCLUDE_XEN_PLATFORM_COMPAT_H #include #include #include #if defined(__LINUX_COMPILER_H) && !defined(__always_inline) #define __always_inline inline #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_SPINLOCK) #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED #endif #ifdef _LINUX_INIT_H #ifndef __init #define __init #endif #ifndef __devinit #define __devinit #define __devinitdata #endif #endif /* _LINUX_INIT_H */ #if defined(__LINUX_CACHE_H) && !defined(__read_mostly) #define __read_mostly #endif #if defined(_LINUX_SKBUFF_H) && !defined(NET_IP_ALIGN) #define NET_IP_ALIGN 0 #endif #if defined(_LINUX_SKBUFF_H) && !defined(CHECKSUM_HW) #define CHECKSUM_HW CHECKSUM_PARTIAL #endif #if defined(_LINUX_ERR_H) && !defined(IS_ERR_VALUE) #define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) #endif #if defined(_ASM_IA64_PGTABLE_H) && !defined(_PGTABLE_NOPUD_H) #include #endif /* Some kernels have this typedef backported so we cannot reliably * detect based on version number, hence we forcibly #define it. */ #if defined(__LINUX_TYPES_H) || defined(__LINUX_GFP_H) || defined(_LINUX_KERNEL_H) #define gfp_t unsigned #endif #if defined(_LINUX_NOTIFIER_H) && !defined(ATOMIC_NOTIFIER_HEAD) #define ATOMIC_NOTIFIER_HEAD(name) struct notifier_block *name #define atomic_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define atomic_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define atomic_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_NOTIFIER_H) && !defined(BLOCKING_NOTIFIER_HEAD) #define BLOCKING_NOTIFIER_HEAD(name) struct notifier_block *name #define blocking_notifier_chain_register(chain,nb) notifier_chain_register(chain,nb) #define blocking_notifier_chain_unregister(chain,nb) notifier_chain_unregister(chain,nb) #define blocking_notifier_call_chain(chain,val,v) notifier_call_chain(chain,val,v) #endif #if defined(_LINUX_MM_H) && defined set_page_count #define init_page_count(page) set_page_count(page, 1) #endif #if defined(__LINUX_GFP_H) && !defined __GFP_NOMEMALLOC #define __GFP_NOMEMALLOC 0 #endif #if defined(_LINUX_FS_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) #define nonseekable_open(inode, filp) /* Nothing to do */ #endif #if defined(_LINUX_MM_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) unsigned long vmalloc_to_pfn(void *addr); #endif #if defined(__LINUX_COMPLETION_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); #endif #if defined(_LINUX_SCHED_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout); #endif #if defined(_LINUX_SLAB_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) void *kzalloc(size_t size, int flags); #endif #if defined(_LINUX_BLKDEV_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define end_that_request_last(req, uptodate) end_that_request_last(req) #endif #if defined(_LINUX_CAPABILITY_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #define capable(cap) (1) #endif #if defined(_LINUX_KERNEL_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) extern char *kasprintf(gfp_t gfp, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); #endif #if defined(_LINUX_SYSRQ_H) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) #define handle_sysrq(x,y,z) handle_sysrq(x,y) #endif #if defined(_PAGE_PRESENT) && !defined(_PAGE_NX) #define _PAGE_NX 0 /* * This variable at present is referenced by netfront, but only in code that * is dead when running in hvm guests. To detect potential active uses of it * in the future, don't try to supply a 'valid' value here, so that any * mappings created with it will fault when accessed. */ #define __supported_pte_mask ((maddr_t)0) #endif /* This code duplication is not ideal, but || does not seem to properly * short circuit in a #if condition. **/ #if defined(_LINUX_NETDEVICE_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) #if !defined(SLE_VERSION) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #elif SLE_VERSION_CODE < SLE_VERSION(10,1,0) #define netif_tx_lock_bh(dev) spin_lock_bh(&(dev)->xmit_lock) #define netif_tx_unlock_bh(dev) spin_unlock_bh(&(dev)->xmit_lock) #endif #endif #if defined(__LINUX_SEQLOCK_H) && !defined(DEFINE_SEQLOCK) #define DEFINE_SEQLOCK(x) seqlock_t x = SEQLOCK_UNLOCKED #endif /* Bug in RHEL4-U3: rw_lock_t is mistakenly defined in DEFINE_RWLOCK() macro */ #if defined(__LINUX_SPINLOCK_H) && defined(DEFINE_RWLOCK) #define rw_lock_t rwlock_t #endif #if defined(__LINUX_SPINLOCK_H) && !defined(DEFINE_RWLOCK) #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED #endif #if defined(_LINUX_INTERRUPT_H) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /** * RHEL4-U5 pulled back this feature into the older kernel * Since it is a typedef, and not a macro - detect this kernel via * RHEL_VERSION */ #if !defined(RHEL_VERSION) || (RHEL_VERSION == 4 && RHEL_UPDATE < 5) #if !defined(RHEL_MAJOR) || (RHEL_MAJOR == 4 && RHEL_MINOR < 5) typedef irqreturn_t (*irq_handler_t)(int, void *, struct pt_regs *); #endif #endif #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) #define setup_xen_features xen_setup_features #endif #ifndef atomic_cmpxchg #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) #endif #ifdef sync_test_bit #define synch_change_bit sync_change_bit #define synch_clear_bit sync_clear_bit #define synch_set_bit sync_set_bit #define synch_test_and_change_bit sync_test_and_change_bit #define synch_test_and_clear_bit sync_test_and_clear_bit #define synch_test_and_set_bit sync_test_and_set_bit #define synch_test_bit sync_test_bit #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/config.mk000066400000000000000000000022111314037446600257730ustar00rootroot00000000000000# Hack: we need to use the config which was used to build the kernel, # except that that won't have the right headers etc., so duplicate # some of the mach-xen infrastructure in here. # # (i.e. we need the native config for things like -mregparm, but # a Xen kernel to find the right headers) _XEN_CPPFLAGS += -D__XEN_INTERFACE_VERSION__=0x00030205 _XEN_CPPFLAGS += -DCONFIG_XEN_COMPAT=0xffffff _XEN_CPPFLAGS += -I$(M)/include -I$(M)/compat-include -DHAVE_XEN_PLATFORM_COMPAT_H OSVERSION = $(shell echo $(BUILDKERNEL) | awk -F"." '{print $$1 "." $$2 "." $$3}' | awk -F"-" '{print $$1}') ifeq ($(ARCH),ia64) _XEN_CPPFLAGS += -DCONFIG_VMX_GUEST endif ifeq ("3.16.6", "$(OSVERSION)") _XEN_CPPFLAGS += -DOPENSUSE_1302 endif VERSION := $(shell cat /etc/SuSE-release | grep VERSION | awk -F" " '{print $$3}') PATCHLEVEL := $(shell cat /etc/SuSE-release | grep PATCHLEVEL | awk -F" " '{print $$3}') OSTYPE := "SUSE$(VERSION)SP$(PATCHLEVEL)" AUTOCONF := $(shell find $(CROSS_COMPILE_KERNELSOURCE) -name autoconf.h) _XEN_CPPFLAGS += -include $(AUTOCONF) EXTRA_CFLAGS += $(_XEN_CPPFLAGS) EXTRA_AFLAGS += $(_XEN_CPPFLAGS) CPPFLAGS := -I$(M)/include $(CPPFLAGS) UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/000077500000000000000000000000001314037446600256245ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/asm/000077500000000000000000000000001314037446600264045ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/asm/gnttab_dma.h000066400000000000000000000031611314037446600306560ustar00rootroot00000000000000/* * Copyright (c) 2007 Herbert Xu * Copyright (c) 2007 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _ASM_I386_GNTTAB_DMA_H #define _ASM_I386_GNTTAB_DMA_H #include static inline int gnttab_dma_local_pfn(struct page *page) { /* Has it become a local MFN? */ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page)))); } static inline maddr_t gnttab_dma_map_page(struct page *page, unsigned long offset) { unsigned int pgnr = offset >> PAGE_SHIFT; unsigned int order = compound_order(page); BUG_ON(pgnr >> order); __gnttab_dma_map_page(page); return ((maddr_t)pfn_to_mfn(page_to_pfn(page) + pgnr) << PAGE_SHIFT) + (offset & ~PAGE_MASK); } static inline void gnttab_dma_unmap_page(maddr_t maddr) { __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr))); } #endif /* _ASM_I386_GNTTAB_DMA_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/asm/hypercall.h000066400000000000000000000247671314037446600305600ustar00rootroot00000000000000/****************************************************************************** * hypercall.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * 64-bit updates: * Benjamin Liu * Jun Nakajima * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERCALL_H__ #define __HYPERCALL_H__ #ifndef __HYPERVISOR_H__ # error "please don't include this file directly" #endif #ifdef CONFIG_XEN_PRIVILEGED_GUEST # include # include #endif #ifdef CONFIG_XEN #define HYPERCALL_ASM_OPERAND "%c" #define HYPERCALL_LOCATION(op) (hypercall_page + (op) * 32) #define HYPERCALL_C_OPERAND(name) "i" (HYPERCALL_LOCATION(__HYPERVISOR_##name)) #else #define HYPERCALL_ASM_OPERAND "*%" #define HYPERCALL_LOCATION(op) (hypercall_stubs + (op) * 32) #define HYPERCALL_C_OPERAND(name) "g" (HYPERCALL_LOCATION(__HYPERVISOR_##name)) #endif #define HYPERCALL_ARG(arg, n) \ register typeof((arg)+0) __arg##n asm(HYPERCALL_arg##n) = (arg) #define _hypercall0(type, name) \ ({ \ type __res; \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "1" \ : "=a" (__res) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall1(type, name, arg) \ ({ \ type __res; \ HYPERCALL_ARG(arg, 1); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "2" \ : "=a" (__res), "+r" (__arg1) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall2(type, name, a1, a2) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "3" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall3(type, name, a1, a2, a3) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "4" \ : "=a" (__res), "+r" (__arg1), \ "+r" (__arg2), "+r" (__arg3) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall4(type, name, a1, a2, a3, a4) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ HYPERCALL_ARG(a4, 4); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "5" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ "+r" (__arg3), "+r" (__arg4) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ HYPERCALL_ARG(a4, 4); \ HYPERCALL_ARG(a5, 5); \ asm volatile ( \ "call " HYPERCALL_ASM_OPERAND "6" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ : HYPERCALL_C_OPERAND(name) \ : "memory" ); \ __res; \ }) #define _hypercall(type, op, a1, a2, a3, a4, a5) \ ({ \ type __res; \ HYPERCALL_ARG(a1, 1); \ HYPERCALL_ARG(a2, 2); \ HYPERCALL_ARG(a3, 3); \ HYPERCALL_ARG(a4, 4); \ HYPERCALL_ARG(a5, 5); \ asm volatile ( \ "call *%6" \ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \ : "g" (HYPERCALL_LOCATION(op)) \ : "memory" ); \ __res; \ }) #ifdef CONFIG_X86_32 # include "hypercall_32.h" #else # include "hypercall_64.h" #endif static inline int __must_check HYPERVISOR_set_trap_table( const trap_info_t *table) { return _hypercall1(int, set_trap_table, table); } static inline int __must_check HYPERVISOR_mmu_update( mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { if (arch_use_lazy_mmu_mode()) return xen_multi_mmu_update(req, count, success_count, domid); return _hypercall4(int, mmu_update, req, count, success_count, domid); } static inline int __must_check HYPERVISOR_mmuext_op( struct mmuext_op *op, unsigned int count, unsigned int *success_count, domid_t domid) { if (arch_use_lazy_mmu_mode()) return xen_multi_mmuext_op(op, count, success_count, domid); return _hypercall4(int, mmuext_op, op, count, success_count, domid); } static inline int __must_check HYPERVISOR_set_gdt( unsigned long *frame_list, unsigned int entries) { return _hypercall2(int, set_gdt, frame_list, entries); } static inline int __must_check HYPERVISOR_stack_switch( unsigned long ss, unsigned long esp) { return _hypercall2(int, stack_switch, ss, esp); } static inline int HYPERVISOR_fpu_taskswitch( int set) { return _hypercall1(int, fpu_taskswitch, set); } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int __must_check HYPERVISOR_sched_op_compat( int cmd, unsigned long arg) { return _hypercall2(int, sched_op_compat, cmd, arg); } #endif static inline int __must_check HYPERVISOR_sched_op( int cmd, void *arg) { return _hypercall2(int, sched_op, cmd, arg); } #ifdef CONFIG_XEN_PRIVILEGED_GUEST static inline int __must_check HYPERVISOR_platform_op( struct xen_platform_op *platform_op) { platform_op->interface_version = XENPF_INTERFACE_VERSION; return _hypercall1(int, platform_op, platform_op); } static inline int __must_check HYPERVISOR_mca( struct xen_mc *mc_op) { mc_op->interface_version = XEN_MCA_INTERFACE_VERSION; return _hypercall1(int, mca, mc_op); } #endif static inline int __must_check HYPERVISOR_set_debugreg( unsigned int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } static inline unsigned long __must_check HYPERVISOR_get_debugreg( unsigned int reg) { return _hypercall1(unsigned long, get_debugreg, reg); } static inline int __must_check HYPERVISOR_memory_op( unsigned int cmd, void *arg) { if (arch_use_lazy_mmu_mode()) xen_multicall_flush(); return _hypercall2(int, memory_op, cmd, arg); } static inline int __must_check HYPERVISOR_multicall( multicall_entry_t *call_list, unsigned int nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } static inline int __must_check HYPERVISOR_event_channel_op( int cmd, void *arg) { int rc = _hypercall2(int, event_channel_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct evtchn_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, event_channel_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_xen_version( int cmd, void *arg) { return _hypercall2(int, xen_version, cmd, arg); } static inline int __must_check HYPERVISOR_console_io( int cmd, unsigned int count, char *str) { return _hypercall3(int, console_io, cmd, count, str); } static inline int __must_check HYPERVISOR_physdev_op( int cmd, void *arg) { int rc = _hypercall2(int, physdev_op, cmd, arg); #if CONFIG_XEN_COMPAT <= 0x030002 if (unlikely(rc == -ENOSYS)) { struct physdev_op op; op.cmd = cmd; memcpy(&op.u, arg, sizeof(op.u)); rc = _hypercall1(int, physdev_op_compat, &op); memcpy(arg, &op.u, sizeof(op.u)); } #endif return rc; } static inline int __must_check HYPERVISOR_grant_table_op( unsigned int cmd, void *uop, unsigned int count) { bool fixup = false; int rc; if (arch_use_lazy_mmu_mode()) xen_multicall_flush(); #ifdef GNTTABOP_map_grant_ref if (cmd == GNTTABOP_map_grant_ref) #endif fixup = gnttab_pre_map_adjust(cmd, uop, count); rc = _hypercall3(int, grant_table_op, cmd, uop, count); if (rc == 0 && fixup) rc = gnttab_post_map_adjust(uop, count); return rc; } static inline int __must_check HYPERVISOR_vm_assist( unsigned int cmd, unsigned int type) { return _hypercall2(int, vm_assist, cmd, type); } static inline int __must_check HYPERVISOR_vcpu_op( int cmd, unsigned int vcpuid, void *extra_args) { return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); } static inline int __must_check HYPERVISOR_suspend( unsigned long srec) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_suspend }; int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown, &sched_shutdown, srec); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown, SHUTDOWN_suspend, srec); #endif return rc; } #if CONFIG_XEN_COMPAT <= 0x030002 static inline int HYPERVISOR_nmi_op( unsigned long op, void *arg) { return _hypercall2(int, nmi_op, op, arg); } #endif #ifndef CONFIG_XEN static inline unsigned long __must_check HYPERVISOR_hvm_op( int op, void *arg) { return _hypercall2(unsigned long, hvm_op, op, arg); } #endif static inline int __must_check HYPERVISOR_callback_op( int cmd, const void *arg) { return _hypercall2(int, callback_op, cmd, arg); } static inline int __must_check HYPERVISOR_xenoprof_op( int op, void *arg) { return _hypercall2(int, xenoprof_op, op, arg); } static inline int __must_check HYPERVISOR_kexec_op( unsigned long op, void *args) { return _hypercall2(int, kexec_op, op, args); } struct tmem_op; static inline int __must_check HYPERVISOR_tmem_op( struct tmem_op *op) { return _hypercall1(int, tmem_op, (void *)op); } #endif /* __HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/asm/hypercall_32.h000066400000000000000000000031401314037446600310420ustar00rootroot00000000000000#define HYPERCALL_arg1 "ebx" #define HYPERCALL_arg2 "ecx" #define HYPERCALL_arg3 "edx" #define HYPERCALL_arg4 "esi" #define HYPERCALL_arg5 "edi" #if CONFIG_XEN_COMPAT <= 0x030002 static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_selector, unsigned long event_address, unsigned long failsafe_selector, unsigned long failsafe_address) { return _hypercall4(int, set_callbacks, event_selector, event_address, failsafe_selector, failsafe_address); } #endif static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall2(long, set_timer_op, (unsigned long)timeout, (unsigned long)(timeout>>32)); } static inline int __must_check HYPERVISOR_update_descriptor( u64 ma, u64 desc) { return _hypercall4(int, update_descriptor, (unsigned long)ma, (unsigned long)(ma>>32), (unsigned long)desc, (unsigned long)(desc>>32)); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { unsigned long pte_hi = 0; if (arch_use_lazy_mmu_mode()) return xen_multi_update_va_mapping(va, new_val, flags); #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall4(int, update_va_mapping, va, new_val.pte_low, pte_hi, flags); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { unsigned long pte_hi = 0; #ifdef CONFIG_X86_PAE pte_hi = new_val.pte_high; #endif return _hypercall5(int, update_va_mapping_otherdomain, va, new_val.pte_low, pte_hi, flags, domid); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/asm/hypercall_64.h000066400000000000000000000025661314037446600310620ustar00rootroot00000000000000#define HYPERCALL_arg1 "rdi" #define HYPERCALL_arg2 "rsi" #define HYPERCALL_arg3 "rdx" #define HYPERCALL_arg4 "r10" #define HYPERCALL_arg5 "r8" #if CONFIG_XEN_COMPAT <= 0x030002 static inline int __must_check HYPERVISOR_set_callbacks( unsigned long event_address, unsigned long failsafe_address, unsigned long syscall_address) { return _hypercall3(int, set_callbacks, event_address, failsafe_address, syscall_address); } #endif static inline long __must_check HYPERVISOR_set_timer_op( u64 timeout) { return _hypercall1(long, set_timer_op, timeout); } static inline int __must_check HYPERVISOR_update_descriptor( unsigned long ma, unsigned long word) { return _hypercall2(int, update_descriptor, ma, word); } static inline int __must_check HYPERVISOR_update_va_mapping( unsigned long va, pte_t new_val, unsigned long flags) { if (arch_use_lazy_mmu_mode()) return xen_multi_update_va_mapping(va, new_val, flags); return _hypercall3(int, update_va_mapping, va, new_val.pte, flags); } static inline int __must_check HYPERVISOR_update_va_mapping_otherdomain( unsigned long va, pte_t new_val, unsigned long flags, domid_t domid) { return _hypercall4(int, update_va_mapping_otherdomain, va, new_val.pte, flags, domid); } static inline int __must_check HYPERVISOR_set_segment_base( int reg, unsigned long value) { return _hypercall2(int, set_segment_base, reg, value); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/asm/hypervisor.h000066400000000000000000000252561314037446600310010ustar00rootroot00000000000000/****************************************************************************** * hypervisor.h * * Linux-specific hypervisor handling. * * Copyright (c) 2002-2004, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __HYPERVISOR_H__ #define __HYPERVISOR_H__ #include #include #include #include #include #include #include #include #include extern shared_info_t *HYPERVISOR_shared_info; #if defined(CONFIG_XEN_VCPU_INFO_PLACEMENT) DECLARE_PER_CPU(struct vcpu_info, vcpu_info); # define vcpu_info(cpu) (&per_cpu(vcpu_info, cpu)) # define current_vcpu_info() (&__get_cpu_var(vcpu_info)) void setup_vcpu_info(unsigned int cpu); void adjust_boot_vcpu_info(void); #elif defined(CONFIG_XEN) # define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu)) # ifdef CONFIG_SMP # include # define current_vcpu_info() vcpu_info(smp_processor_id()) # else # define current_vcpu_info() vcpu_info(0) # endif static inline void setup_vcpu_info(unsigned int cpu) {} #endif #ifdef CONFIG_X86_32 extern unsigned long hypervisor_virt_start; #endif /* arch/xen/i386/kernel/setup.c */ extern start_info_t *xen_start_info; #ifdef CONFIG_XEN_PRIVILEGED_GUEST #define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN) #else #define is_initial_xendomain() 0 #endif #define init_hypervisor(c) ((void)(c)) #define init_hypervisor_platform() init_hypervisor(&boot_cpu_data) DECLARE_PER_CPU(struct vcpu_runstate_info, runstate); #define vcpu_running(cpu) (per_cpu(runstate.state, cpu) == RUNSTATE_running) /* arch/xen/kernel/evtchn.c */ /* Force a proper event-channel callback from Xen. */ void force_evtchn_callback(void); /* arch/xen/kernel/process.c */ void xen_cpu_idle (void); /* arch/xen/i386/kernel/hypervisor.c */ void do_hypervisor_callback(struct pt_regs *regs); /* arch/xen/i386/mm/hypervisor.c */ /* * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already * be MACHINE addresses. */ void xen_pt_switch(pgd_t *); void xen_new_user_pt(pgd_t *); /* x86_64 only */ void xen_load_gs(unsigned int selector); /* x86_64 only */ void xen_tlb_flush(void); void xen_invlpg(unsigned long ptr); void xen_l1_entry_update(pte_t *ptr, pte_t val); void xen_l2_entry_update(pmd_t *ptr, pmd_t val); void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */ void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */ void xen_pgd_pin(pgd_t *); void xen_pgd_unpin(pgd_t *); void xen_init_pgd_pin(void); #ifdef CONFIG_PM_SLEEP void setup_pfn_to_mfn_frame_list(void *(*)(unsigned long, unsigned long, unsigned long)); #endif void xen_set_ldt(const void *ptr, unsigned int ents); #ifdef CONFIG_SMP #include void xen_tlb_flush_all(void); void xen_invlpg_all(unsigned long ptr); void xen_tlb_flush_mask(const cpumask_t *mask); void xen_invlpg_mask(const cpumask_t *mask, unsigned long ptr); #else #define xen_tlb_flush_all xen_tlb_flush #define xen_invlpg_all xen_invlpg #endif /* Returns zero on success else negative errno. */ int xen_create_contiguous_region( unsigned long vstart, unsigned int order, unsigned int address_bits); void xen_destroy_contiguous_region( unsigned long vstart, unsigned int order); int early_create_contiguous_region(unsigned long pfn, unsigned int order, unsigned int address_bits); struct page; int xen_limit_pages_to_max_mfn( struct page *pages, unsigned int order, unsigned int address_bits); bool __cold hypervisor_oom(void); /* Turn jiffies into Xen system time. */ u64 jiffies_to_st(unsigned long jiffies); #ifdef CONFIG_XEN_SCRUB_PAGES void xen_scrub_pages(void *, unsigned int); #else #define xen_scrub_pages(_p,_n) ((void)0) #endif #if defined(CONFIG_XEN) && !defined(MODULE) DECLARE_PER_CPU(bool, xen_lazy_mmu); void xen_multicall_flush(void); int __must_check xen_multi_update_va_mapping(unsigned long va, pte_t, unsigned long flags); int __must_check xen_multi_mmu_update(mmu_update_t *, unsigned int count, unsigned int *success_count, domid_t); int __must_check xen_multi_mmuext_op(struct mmuext_op *, unsigned int count, unsigned int *success_count, domid_t); #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE static inline void arch_enter_lazy_mmu_mode(void) { this_cpu_write_1(xen_lazy_mmu, true); } static inline void arch_leave_lazy_mmu_mode(void) { this_cpu_write_1(xen_lazy_mmu, false); xen_multicall_flush(); } #define arch_use_lazy_mmu_mode() unlikely(this_cpu_read_1(xen_lazy_mmu)) #else /* !CONFIG_XEN || MODULE */ static inline void xen_multicall_flush(void) {} #define arch_use_lazy_mmu_mode() false #define xen_multi_update_va_mapping(...) ({ BUG(); -ENOSYS; }) #define xen_multi_mmu_update(...) ({ BUG(); -ENOSYS; }) #define xen_multi_mmuext_op(...) ({ BUG(); -ENOSYS; }) #endif /* CONFIG_XEN && !MODULE */ #ifdef CONFIG_XEN struct gnttab_map_grant_ref; bool gnttab_pre_map_adjust(unsigned int cmd, struct gnttab_map_grant_ref *, unsigned int count); #if CONFIG_XEN_COMPAT < 0x030400 int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *, unsigned int); #else static inline int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *m, unsigned int count) { BUG(); return -ENOSYS; } #endif #else /* !CONFIG_XEN */ #define gnttab_pre_map_adjust(...) false #define gnttab_post_map_adjust(...) ({ BUG(); -ENOSYS; }) #endif /* CONFIG_XEN */ #if defined(CONFIG_X86_64) #define MULTI_UVMFLAGS_INDEX 2 #define MULTI_UVMDOMID_INDEX 3 #else #define MULTI_UVMFLAGS_INDEX 3 #define MULTI_UVMDOMID_INDEX 4 #endif #ifdef CONFIG_XEN #define is_running_on_xen() 1 extern char hypercall_page[PAGE_SIZE]; #define in_hypercall(regs) (!user_mode_vm(regs) && \ (regs)->ip >= (unsigned long)hypercall_page && \ (regs)->ip < (unsigned long)hypercall_page + PAGE_SIZE) #else extern char *hypercall_stubs; #define is_running_on_xen() (!!hypercall_stubs) #endif #include static inline int HYPERVISOR_yield( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int HYPERVISOR_block( void) { int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0); #endif return rc; } static inline void __noreturn HYPERVISOR_shutdown( unsigned int reason) { struct sched_shutdown sched_shutdown = { .reason = reason }; VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown)); #if CONFIG_XEN_COMPAT <= 0x030002 VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason)); #endif /* Don't recurse needlessly. */ BUG_ON(reason != SHUTDOWN_crash); for(;;); } static inline int __must_check HYPERVISOR_poll( evtchn_port_t *ports, unsigned int nr_ports, u64 timeout) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports, .timeout = jiffies_to_st(timeout) }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } static inline int __must_check HYPERVISOR_poll_no_timeout( evtchn_port_t *ports, unsigned int nr_ports) { int rc; struct sched_poll sched_poll = { .nr_ports = nr_ports }; set_xen_guest_handle(sched_poll.ports, ports); rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); #if CONFIG_XEN_COMPAT <= 0x030002 if (rc == -ENOSYS) rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0); #endif return rc; } #ifdef CONFIG_XEN static inline void MULTI_update_va_mapping( multicall_entry_t *mcl, unsigned long va, pte_t new_val, unsigned long flags) { mcl->op = __HYPERVISOR_update_va_mapping; mcl->args[0] = va; #if defined(CONFIG_X86_64) mcl->args[1] = new_val.pte; #elif defined(CONFIG_X86_PAE) mcl->args[1] = new_val.pte_low; mcl->args[2] = new_val.pte_high; #else mcl->args[1] = new_val.pte_low; mcl->args[2] = 0; #endif mcl->args[MULTI_UVMFLAGS_INDEX] = flags; } static inline void MULTI_mmu_update(multicall_entry_t *mcl, mmu_update_t *req, unsigned int count, unsigned int *success_count, domid_t domid) { mcl->op = __HYPERVISOR_mmu_update; mcl->args[0] = (unsigned long)req; mcl->args[1] = count; mcl->args[2] = (unsigned long)success_count; mcl->args[3] = domid; } static inline void MULTI_memory_op(multicall_entry_t *mcl, unsigned int cmd, void *arg) { mcl->op = __HYPERVISOR_memory_op; mcl->args[0] = cmd; mcl->args[1] = (unsigned long)arg; } static inline void MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd, void *uop, unsigned int count) { mcl->op = __HYPERVISOR_grant_table_op; mcl->args[0] = cmd; mcl->args[1] = (unsigned long)uop; mcl->args[2] = count; } #else /* !defined(CONFIG_XEN) */ /* Multicalls not supported for HVM guests. */ static inline void MULTI_bug(multicall_entry_t *mcl, ...) { BUG_ON(mcl); } #define MULTI_update_va_mapping MULTI_bug #define MULTI_mmu_update MULTI_bug #define MULTI_memory_op MULTI_bug #define MULTI_grant_table_op MULTI_bug #endif #define uvm_multi(cpumask) ((unsigned long)cpumask_bits(cpumask) | UVMF_MULTI) #ifdef LINUX /* drivers/staging/ use Windows-style types, including VOID */ #undef VOID #endif #endif /* __HYPERVISOR_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/asm/maddr.h000066400000000000000000000110261314037446600276440ustar00rootroot00000000000000#ifndef _X86_MADDR_H #define _X86_MADDR_H #include #include #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME_BIT (1UL << (BITS_PER_LONG - 1)) #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ #ifdef CONFIG_X86_PAE typedef unsigned long long paddr_t; typedef unsigned long long maddr_t; #else typedef unsigned long paddr_t; typedef unsigned long maddr_t; #endif #ifdef CONFIG_XEN extern unsigned long *phys_to_machine_mapping; extern unsigned long max_mapnr; #undef machine_to_phys_mapping extern const unsigned long *machine_to_phys_mapping; extern unsigned long machine_to_phys_nr; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return pfn; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return 1; if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return mfn; if (unlikely(mfn >= machine_to_phys_nr)) return max_mapnr; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1: "_ASM_MOV" %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3: "_ASM_MOV" %2,%0\n" " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b,3b) : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(phys_addr_t mfn) { unsigned long pfn = mfn_to_pfn(mfn); if (likely(pfn < max_mapnr) && likely(!xen_feature(XENFEAT_auto_translated_physmap)) && unlikely(phys_to_machine_mapping[pfn] != mfn)) return max_mapnr; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (likely(max_mapnr)) BUG_ON(pfn >= max_mapnr); if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } #ifdef CONFIG_X86_32 # include "maddr_32.h" #else # include "maddr_64.h" #endif #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) 1 #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) phys_to_machine(__pa(v)) #define virt_to_mfn(v) pfn_to_mfn(__pa(v) >> PAGE_SHIFT) #define mfn_to_virt(m) __va(mfn_to_pfn(m) << PAGE_SHIFT) #endif /* _X86_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/asm/maddr_32.h000066400000000000000000000017701314037446600301550ustar00rootroot00000000000000#ifndef _I386_MADDR_H #define _I386_MADDR_H #ifdef CONFIG_X86_PAE static inline paddr_t pte_phys_to_machine(paddr_t phys) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to pfn_to_mfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { /* * In PAE mode, the NX bit needs to be dealt with in the value * passed to mfn_to_pfn(). On x86_64, we need to mask it off, * but for i386 the conversion to ulong for the argument will * clip it off. */ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #else #define pte_phys_to_machine phys_to_machine #define pte_machine_to_phys machine_to_phys #endif #endif /* _I386_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/asm/maddr_64.h000066400000000000000000000010231314037446600301510ustar00rootroot00000000000000#ifndef _X86_64_MADDR_H #define _X86_64_MADDR_H static inline paddr_t pte_phys_to_machine(paddr_t phys) { maddr_t machine; machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { paddr_t phys; phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #endif /* _X86_64_MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/000077500000000000000000000000001314037446600264165ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/acpi.h000066400000000000000000000066441314037446600275150ustar00rootroot00000000000000/****************************************************************************** * acpi.h * acpi file for domain 0 kernel * * Copyright (c) 2011 Konrad Rzeszutek Wilk * Copyright (c) 2011 Yu Ke * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XEN_ACPI_H #define _XEN_ACPI_H #include #ifdef CONFIG_XEN_DOM0 #include #include #include #define ACPI_MEMORY_DEVICE_CLASS "memory" #define ACPI_MEMORY_DEVICE_HID "PNP0C80" #define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device" int xen_stub_memory_device_init(void); void xen_stub_memory_device_exit(void); #define ACPI_PROCESSOR_CLASS "processor" #define ACPI_PROCESSOR_DEVICE_HID "ACPI0007" #define ACPI_PROCESSOR_DEVICE_NAME "Processor" int xen_stub_processor_init(void); void xen_stub_processor_exit(void); void xen_pcpu_hotplug_sync(void); int xen_pcpu_id(uint32_t acpi_id); static inline int xen_acpi_get_pxm(acpi_handle h) { unsigned long long pxm; acpi_status status; acpi_handle handle; acpi_handle phandle = h; do { handle = phandle; status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm); if (ACPI_SUCCESS(status)) return pxm; status = acpi_get_parent(handle, &phandle); } while (ACPI_SUCCESS(status)); return -ENXIO; } int xen_acpi_notify_hypervisor_sleep(u8 sleep_state, u32 pm1a_cnt, u32 pm1b_cnd); int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b); static inline int xen_acpi_suspend_lowlevel(void) { /* * Xen will save and restore CPU context, so * we can skip that and just go straight to * the suspend. */ acpi_enter_sleep_state(ACPI_STATE_S3); return 0; } static inline void xen_acpi_sleep_register(void) { if (xen_initial_domain()) { acpi_os_set_prepare_sleep( &xen_acpi_notify_hypervisor_sleep); acpi_os_set_prepare_extended_sleep( &xen_acpi_notify_hypervisor_extended_sleep); acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel; } } #else static inline void xen_acpi_sleep_register(void) { } #endif #endif /* _XEN_ACPI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/balloon.h000066400000000000000000000073761314037446600302320ustar00rootroot00000000000000/****************************************************************************** * balloon.h * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_H__ #define __XEN_BALLOON_H__ #include #if !defined(CONFIG_PARAVIRT_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /* * Inform the balloon driver that it should allow some slop for device-driver * memory activities. */ void balloon_update_driver_allowance(long delta); /* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */ struct page **alloc_empty_pages_and_pagevec(int nr_pages); void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages); /* Free an empty page range (not allocated through alloc_empty_pages_and_pagevec), adding to the balloon. */ void free_empty_pages(struct page **pagevec, int nr_pages); void balloon_release_driver_page(struct page *page); /* * Prevent the balloon driver from changing the memory reservation during * a driver critical region. */ extern spinlock_t balloon_lock; #define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags) #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags) #else /* CONFIG_PARAVIRT_XEN */ #define RETRY_UNLIMITED 0 struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; unsigned long schedule_delay; unsigned long max_schedule_delay; unsigned long retry_count; unsigned long max_retry_count; #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG unsigned long hotplug_pages; unsigned long balloon_hotplug; #endif }; extern struct balloon_stats balloon_stats; void balloon_set_new_target(unsigned long target); int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem); void free_xenballooned_pages(int nr_pages, struct page **pages); struct page *get_balloon_scratch_page(void); void put_balloon_scratch_page(void); #endif /* CONFIG_PARAVIRT_XEN */ struct device; #ifdef CONFIG_XEN_SELFBALLOONING extern int register_xen_selfballooning(struct device *dev); #else static inline int register_xen_selfballooning(struct device *dev) { return -ENOSYS; } #endif #endif /* __XEN_BALLOON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/barrier.h000066400000000000000000000002661314037446600302210ustar00rootroot00000000000000#ifndef __XEN_BARRIER_H__ #define __XEN_BARRIER_H__ #include #define xen_mb() mb() #define xen_rmb() rmb() #define xen_wmb() wmb() #endif /* __XEN_BARRIER_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/blkif.h000066400000000000000000000212451314037446600276620ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_BLKIF_H__ #define __XEN_BLKIF_H__ #include #include #include #define BLKIF_SEGS_PER_INDIRECT_FRAME \ (PAGE_SIZE / sizeof(struct blkif_request_segment)) #define BLKIF_INDIRECT_PAGES(segs) \ (((segs) + BLKIF_SEGS_PER_INDIRECT_FRAME - 1) \ / BLKIF_SEGS_PER_INDIRECT_FRAME) /* Not a real protocol. Used to generate ring structs which contain * the elements common to all protocols only. This way we get a * compiler-checkable way to use common struct elements, so we can * avoid using switch(protocol) in a number of places. */ struct blkif_common_request { char dummy; }; struct blkif_common_response { char dummy; }; union __attribute__((transparent_union)) blkif_union { struct blkif_request *generic; struct blkif_request_discard *discard; struct blkif_request_indirect *indirect; }; /* i386 protocol version */ #pragma pack(push, 4) struct blkif_x86_32_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_32_discard { uint8_t operation; /* BLKIF_OP_DISCARD */ uint8_t flag; /* BLKIF_DISCARD_* */ blkif_vdev_t handle; /* same as for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk */ uint64_t nr_sectors; /* number of contiguous sectors */ }; struct blkif_x86_32_indirect { uint8_t operation; /* BLKIF_OP_INDIRECT */ uint8_t indirect_op; /* BLKIF_OP_{READ/WRITE} */ uint16_t nr_segments; /* number of segments */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ blkif_vdev_t handle; /* same as for read/write requests */ grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; uint64_t pad; /* make it 64 byte aligned */ }; union blkif_x86_32_union { struct blkif_x86_32_request generic; struct blkif_x86_32_discard discard; struct blkif_x86_32_indirect indirect; }; struct blkif_x86_32_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; #pragma pack(pop) /* x86_64 protocol version */ struct blkif_x86_64_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t __attribute__((__aligned__(8))) id; blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_64_discard { uint8_t operation; /* BLKIF_OP_DISCARD */ uint8_t flag; /* BLKIF_DISCARD_* */ blkif_vdev_t handle; /* sane as for read/write requests */ uint64_t __attribute__((__aligned__(8))) id; blkif_sector_t sector_number;/* start sector idx on disk */ uint64_t nr_sectors; /* number of contiguous sectors */ }; struct blkif_x86_64_indirect { uint8_t operation; /* BLKIF_OP_INDIRECT */ uint8_t indirect_op; /* BLKIF_OP_{READ/WRITE} */ uint16_t nr_segments; /* number of segments */ uint64_t __attribute__((__aligned__(8))) id; blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ blkif_vdev_t handle; /* same as for read/write requests */ grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; }; union blkif_x86_64_union { struct blkif_x86_64_request generic; struct blkif_x86_64_discard discard; struct blkif_x86_64_indirect indirect; }; struct blkif_x86_64_response { uint64_t __attribute__((__aligned__(8))) id; uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; #define blkif_native_sring blkif_sring DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response); DEFINE_RING_TYPES(blkif_x86_32, union blkif_x86_32_union, struct blkif_x86_32_response); DEFINE_RING_TYPES(blkif_x86_64, union blkif_x86_64_union, struct blkif_x86_64_response); union blkif_back_rings { blkif_back_ring_t native; blkif_common_back_ring_t common; blkif_x86_32_back_ring_t x86_32; blkif_x86_64_back_ring_t x86_64; }; typedef union blkif_back_rings blkif_back_rings_t; enum blkif_protocol { BLKIF_PROTOCOL_NATIVE = 1, BLKIF_PROTOCOL_X86_32 = 2, BLKIF_PROTOCOL_X86_64 = 3, }; static void inline blkif_get_x86_32_req(union blkif_union dst, const union blkif_x86_32_union *src) { #ifdef __i386__ memcpy(dst.generic, src, sizeof(*dst.generic)); #else unsigned int i, n; dst.generic->operation = src->generic.operation; dst.generic->nr_segments = src->generic.nr_segments; dst.generic->handle = src->generic.handle; dst.generic->id = src->generic.id; dst.generic->sector_number = src->generic.sector_number; barrier(); switch (__builtin_expect(dst.generic->operation, BLKIF_OP_READ)) { default: n = min_t(unsigned int, dst.generic->nr_segments, BLKIF_MAX_SEGMENTS_PER_REQUEST); for (i = 0; i < n; i++) dst.generic->seg[i] = src->generic.seg[i]; break; case BLKIF_OP_DISCARD: /* All fields up to sector_number got copied above already. */ dst.discard->nr_sectors = src->discard.nr_sectors; break; case BLKIF_OP_INDIRECT: /* All fields up to sector_number got copied above already. */ dst.indirect->handle = src->indirect.handle; n = min_t(unsigned int, BLKIF_INDIRECT_PAGES(dst.indirect->nr_segments), BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); for (i = 0; i < n; ++i) dst.indirect->indirect_grefs[i] = src->indirect.indirect_grefs[i]; break; } #endif } static void inline blkif_get_x86_64_req(union blkif_union dst, const union blkif_x86_64_union *src) { #ifdef __x86_64__ memcpy(dst.generic, src, sizeof(*dst.generic)); #else unsigned int i, n; dst.generic->operation = src->generic.operation; dst.generic->nr_segments = src->generic.nr_segments; dst.generic->handle = src->generic.handle; dst.generic->id = src->generic.id; dst.generic->sector_number = src->generic.sector_number; barrier(); switch (__builtin_expect(dst.generic->operation, BLKIF_OP_READ)) { default: n = min_t(unsigned int, dst.generic->nr_segments, BLKIF_MAX_SEGMENTS_PER_REQUEST); for (i = 0; i < n; i++) dst.generic->seg[i] = src->generic.seg[i]; break; case BLKIF_OP_DISCARD: /* All fields up to sector_number got copied above already. */ dst.discard->nr_sectors = src->discard.nr_sectors; break; case BLKIF_OP_INDIRECT: /* All fields up to sector_number got copied above already. */ dst.indirect->handle = src->indirect.handle; n = min_t(unsigned int, BLKIF_INDIRECT_PAGES(dst.indirect->nr_segments), BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); for (i = 0; i < n; ++i) dst.indirect->indirect_grefs[i] = src->indirect.indirect_grefs[i]; break; } #endif } #endif /* __XEN_BLKIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/clock.h000066400000000000000000000011311314037446600276560ustar00rootroot00000000000000#ifndef __XEN_CPU_CLOCK_H__ #define __XEN_CPU_CLOCK_H__ void setup_runstate_area(unsigned int cpu); extern struct pvclock_vsyscall_time_info *pvclock_vsyscall_time; void setup_vsyscall_time_area(unsigned int cpu); unsigned long long xen_local_clock(void); void xen_check_wallclock_update(void); #ifdef CONFIG_GENERIC_CLOCKEVENTS void xen_clockevents_init(void); void xen_setup_cpu_clockevents(void); void xen_clockevents_resume(bool late); #else static inline void xen_setup_cpu_clockevents(void) {} static inline void xen_clockevents_resume(bool late) {} #endif #endif /* __XEN_CPU_CLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/compat_ioctl.h000066400000000000000000000000001314037446600312320ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/cpu_hotplug.h000066400000000000000000000014361314037446600311240ustar00rootroot00000000000000#ifndef __XEN_CPU_HOTPLUG_H__ #define __XEN_CPU_HOTPLUG_H__ #include #include #if defined(CONFIG_X86) && defined(CONFIG_SMP) extern cpumask_var_t vcpu_initialized_mask; #endif #if defined(CONFIG_HOTPLUG_CPU) int cpu_up_check(unsigned int cpu); void init_xenbus_allowed_cpumask(void); int smp_suspend(void); void smp_resume(void); #else /* !defined(CONFIG_HOTPLUG_CPU) */ #define cpu_up_check(cpu) (0) #define init_xenbus_allowed_cpumask() ((void)0) static inline int smp_suspend(void) { if (num_online_cpus() > 1) { pr_warning("Can't suspend SMP guests without" " CONFIG_HOTPLUG_CPU\n"); return -EOPNOTSUPP; } return 0; } static inline void smp_resume(void) { } #endif /* !defined(CONFIG_HOTPLUG_CPU) */ #endif /* __XEN_CPU_HOTPLUG_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/driver_util.h000066400000000000000000000006071314037446600311220ustar00rootroot00000000000000#ifndef __XEN_DRIVER_UTIL_H__ #define __XEN_DRIVER_UTIL_H__ #include #include extern struct class *get_xen_class(void); extern struct device *xen_class_device_create(struct device_type *, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...) __printf(5, 6); #endif /* __XEN_DRIVER_UTIL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/events.h000066400000000000000000000073351314037446600301030ustar00rootroot00000000000000#ifndef _XEN_EVENTS_H #define _XEN_EVENTS_H #include #include #include #include int bind_evtchn_to_irq(unsigned int evtchn); int bind_evtchn_to_irqhandler(unsigned int evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irq(unsigned int virq, unsigned int cpu); int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_ipi_to_irqhandler(enum ipi_vector ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, unsigned int remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (even for bindings * made with bind_evtchn_to_irqhandler()). */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); /* * Allow extra references to event channels exposed to userspace by evtchn */ int evtchn_make_refcounted(unsigned int evtchn); int evtchn_get(unsigned int evtchn); void evtchn_put(unsigned int evtchn); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); int resend_irq_on_evtchn(unsigned int irq); void rebind_evtchn_irq(int evtchn, int irq); static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); } void notify_remote_via_irq(int irq); void xen_irq_resume(void); /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq); void xen_set_irq_pending(int irq); bool xen_test_irq_pending(int irq); /* Poll waiting for an irq to become pending. In the usual case, the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq(int irq); /* Poll waiting for an irq to become pending with a timeout. In the usual case, * the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq_timeout(int irq, u64 timeout); /* Determine the IRQ which is bound to an event channel */ unsigned irq_from_evtchn(unsigned int evtchn); /* Xen HVM evtchn vector callback */ void xen_hvm_callback_vector(void); #ifdef CONFIG_TRACING #define trace_xen_hvm_callback_vector xen_hvm_callback_vector #endif extern int xen_have_vector_callback; int xen_set_callback_via(uint64_t via); void xen_evtchn_do_upcall(struct pt_regs *regs); void xen_hvm_evtchn_do_upcall(void); /* Bind a pirq for a physical interrupt to an irq. */ int xen_bind_pirq_gsi_to_irq(unsigned gsi, unsigned pirq, int shareable, char *name); #ifdef CONFIG_PCI_MSI /* Allocate a pirq for a MSI style physical interrupt. */ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); /* Bind an PSI pirq to an irq. */ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, int pirq, const char *name, domid_t domid); #endif /* De-allocates the above mentioned physical interrupt. */ int xen_destroy_irq(int irq); /* Return irq from pirq */ int xen_irq_from_pirq(unsigned pirq); /* Return the pirq allocated to the irq. */ int xen_pirq_from_irq(unsigned irq); /* Return the irq allocated to the gsi */ int xen_irq_from_gsi(unsigned gsi); /* Determine whether to ignore this IRQ if it is passed to a guest. */ int xen_test_irq_shared(int irq); /* initialize Xen IRQ subsystem */ void xen_init_IRQ(void); #endif /* _XEN_EVENTS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/evtchn.h000066400000000000000000000162241314037446600300630ustar00rootroot00000000000000#ifdef CONFIG_PARAVIRT_XEN #include #else /****************************************************************************** * evtchn.h * * Communication via Xen event channels. * Also definitions for the device that demuxes notifications to userspace. * * Copyright (c) 2004-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_EVTCHN_H__ #define __ASM_EVTCHN_H__ #include #include #include #include #include #include /* * LOW-LEVEL DEFINITIONS */ #ifdef CONFIG_XEN struct irq_cfg { u32 info; union { int bindcount; /* for dynamic IRQs */ #ifdef CONFIG_X86_IO_APIC u8 vector; /* for physical IRQs */ #endif }; }; struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node); static inline int evtchn_make_refcounted(unsigned int evtchn) { return 0; } #endif /* * Dynamically bind an event source to an IRQ-like callback handler. * On some platforms this may not be implemented via the Linux IRQ subsystem. * The IRQ argument passed to the callback handler is the same as returned * from the bind call. It may not correspond to a Linux IRQ number. * Returns IRQ or negative errno. */ int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_interdomain_evtchn_to_irqhandler( unsigned int remote_domain, unsigned int remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irqhandler( unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); #if defined(CONFIG_SMP) && defined(CONFIG_XEN) && defined(CONFIG_X86) int bind_virq_to_irqaction( unsigned int virq, unsigned int cpu, struct irqaction *action); #else #define bind_virq_to_irqaction(virq, cpu, action) \ bind_virq_to_irqhandler(virq, cpu, (action)->handler, \ (action)->flags | IRQF_NOBALANCING, \ (action)->name, action) #endif #if defined(CONFIG_SMP) && !defined(MODULE) #ifndef CONFIG_X86 int bind_ipi_to_irqhandler( unsigned int ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); #else int bind_ipi_to_irqaction( unsigned int cpu, struct irqaction *action); DECLARE_PER_CPU(DECLARE_BITMAP(, NR_IPIS), ipi_pending); #endif #endif /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (except for bindings * made with bind_caller_port_to_irqhandler()). */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); #if defined(CONFIG_SMP) && defined(CONFIG_XEN) && defined(CONFIG_X86) /* Specialized unbind function for per-CPU IRQs. */ void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu, struct irqaction *); #else #define unbind_from_per_cpu_irq(irq, cpu, action) \ unbind_from_irqhandler(irq, action) #endif #ifndef CONFIG_XEN void irq_resume(void); #endif /* Entry point for notifications into Linux subsystems. */ asmlinkage #ifdef CONFIG_PREEMPT void #else bool #endif evtchn_do_upcall(struct pt_regs *regs); /* Mark a PIRQ as unavailable for dynamic allocation. */ void evtchn_register_pirq(int irq); /* Map a Xen-supplied PIRQ to a dynamically allocated one. */ int evtchn_map_pirq(int irq, unsigned int xen_pirq, unsigned int nr); /* Look up a Xen-supplied PIRQ for a dynamically allocated one. */ int evtchn_get_xen_pirq(int irq); void mask_evtchn(int port); void disable_all_local_evtchn(void); void unmask_evtchn(int port); unsigned int irq_from_evtchn(unsigned int port); static inline int test_and_set_evtchn_mask(int port) { shared_info_t *s = HYPERVISOR_shared_info; return sync_test_and_set_bit(port, s->evtchn_mask); } static inline void clear_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; sync_clear_bit(port, s->evtchn_pending); } static inline void set_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; sync_set_bit(port, s->evtchn_pending); } static inline int test_evtchn(int port) { shared_info_t *s = HYPERVISOR_shared_info; return sync_test_bit(port, s->evtchn_pending); } static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send)); } static inline void multi_notify_remote_via_evtchn(multicall_entry_t *mcl, int port) { struct evtchn_send *send = (void *)(mcl->args + 2); BUILD_BUG_ON(sizeof(*send) > sizeof(mcl->args) - 2 * sizeof(*mcl->args)); send->port = port; mcl->op = __HYPERVISOR_event_channel_op; mcl->args[0] = EVTCHNOP_send; mcl->args[1] = (unsigned long)send; } static inline int close_evtchn(int port) { struct evtchn_close close = { .port = port }; return HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); } /* Test an irq's pending state. */ int xen_test_irq_pending(int irq); /* * Use these to access the event channel underlying the IRQ handle returned * by bind_*_to_irqhandler(). */ void notify_remote_via_irq(int irq); int multi_notify_remote_via_irq(multicall_entry_t *, int irq); int irq_to_evtchn_port(int irq); #if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86) void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu); void clear_ipi_evtchn(void); #endif DECLARE_PER_CPU(bool, privcmd_hcall); #if defined(CONFIG_XEN_SPINLOCK_ACQUIRE_NESTING) \ && CONFIG_XEN_SPINLOCK_ACQUIRE_NESTING void xen_spin_irq_enter(void); void xen_spin_irq_exit(void); #else static inline void xen_spin_irq_enter(void) {} static inline void xen_spin_irq_exit(void) {} #endif #endif /* __ASM_EVTCHN_H__ */ #endif /* CONFIG_PARAVIRT_XEN */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/features.h000066400000000000000000000016471314037446600304150ustar00rootroot00000000000000/****************************************************************************** * features.h * * Query the features reported by Xen. * * Copyright (c) 2006, Ian Campbell */ #ifndef __XEN_FEATURES_H__ #define __XEN_FEATURES_H__ #include #include void xen_setup_features(void); extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32]; static inline int xen_feature(int flag) { return xen_features[flag]; } /* * unmodified_drivers/linux-2.6/platform-pci/platform-pci.c from xen-kmp has to * include a new header to get the unplug defines from xen/xen_pvonhvm.h. To * allow compiliation of xen-kmp with older kernel-source a guard has to be * provided to skip inclusion of the new header. An easy place was * xen/features.h, but any header included by platform-pci.c would do. */ #ifndef CONFIG_XEN #define HAVE_XEN_PVONHVM_UNPLUG 1 #endif #endif /* __XEN_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/firmware.h000066400000000000000000000003741314037446600304070ustar00rootroot00000000000000#ifndef __XEN_FIRMWARE_H__ #define __XEN_FIRMWARE_H__ #if IS_ENABLED(CONFIG_EDD) void copy_edd(void); #endif #ifdef CONFIG_XEN_PRIVILEGED_GUEST void copy_edid(void); #else static inline void copy_edid(void) {} #endif #endif /* __XEN_FIRMWARE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/gntalloc.h000066400000000000000000000050771314037446600304030ustar00rootroot00000000000000/****************************************************************************** * gntalloc.h * * Interface to /dev/xen/gntalloc. * * Author: Daniel De Graaf * * This file is in the public domain. */ #ifndef __LINUX_PUBLIC_GNTALLOC_H__ #define __LINUX_PUBLIC_GNTALLOC_H__ /* * Allocates a new page and creates a new grant reference. */ #define IOCTL_GNTALLOC_ALLOC_GREF \ _IOC(_IOC_NONE, 'G', 5, sizeof(struct ioctl_gntalloc_alloc_gref)) struct ioctl_gntalloc_alloc_gref { /* IN parameters */ /* The ID of the domain to be given access to the grants. */ uint16_t domid; /* Flags for this mapping */ uint16_t flags; /* Number of pages to map */ uint32_t count; /* OUT parameters */ /* The offset to be used on a subsequent call to mmap(). */ uint64_t index; /* The grant references of the newly created grant, one per page */ /* Variable size, depending on count */ uint32_t gref_ids[1]; }; #define GNTALLOC_FLAG_WRITABLE 1 /* * Deallocates the grant reference, allowing the associated page to be freed if * no other domains are using it. */ #define IOCTL_GNTALLOC_DEALLOC_GREF \ _IOC(_IOC_NONE, 'G', 6, sizeof(struct ioctl_gntalloc_dealloc_gref)) struct ioctl_gntalloc_dealloc_gref { /* IN parameters */ /* The offset returned in the map operation */ uint64_t index; /* Number of references to unmap */ uint32_t count; }; /* * Sets up an unmap notification within the page, so that the other side can do * cleanup if this side crashes. Required to implement cross-domain robust * mutexes or close notification on communication channels. * * Each mapped page only supports one notification; multiple calls referring to * the same page overwrite the previous notification. You must clear the * notification prior to the IOCTL_GNTALLOC_DEALLOC_GREF if you do not want it * to occur. */ #define IOCTL_GNTALLOC_SET_UNMAP_NOTIFY \ _IOC(_IOC_NONE, 'G', 7, sizeof(struct ioctl_gntalloc_unmap_notify)) struct ioctl_gntalloc_unmap_notify { /* IN parameters */ /* Offset in the file descriptor for a byte within the page (same as * used in mmap). If using UNMAP_NOTIFY_CLEAR_BYTE, this is the byte to * be cleared. Otherwise, it can be any byte in the page whose * notification we are adjusting. */ uint64_t index; /* Action(s) to take on unmap */ uint32_t action; /* Event channel to notify */ uint32_t event_channel_port; }; /* Clear (set to zero) the byte specified by index */ #define UNMAP_NOTIFY_CLEAR_BYTE 0x1 /* Send an interrupt on the indicated event channel */ #define UNMAP_NOTIFY_SEND_EVENT 0x2 #endif /* __LINUX_PUBLIC_GNTALLOC_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/gnttab.h000066400000000000000000000154401314037446600300520ustar00rootroot00000000000000/****************************************************************************** * gnttab.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include /* maddr_t */ #include #include #include #include #define GRANT_INVALID_REF 0 struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; u8 queued; }; int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_ref(grant_ref_t ref); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page); void gnttab_multi_end_foreign_access(unsigned int nr, grant_ref_t [], struct page *[]); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); int gnttab_query_foreign_access(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags); void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep); #if IS_ENABLED(CONFIG_XEN_BACKEND) void __gnttab_dma_map_page(struct page *page); #else #define __gnttab_dma_map_page __gnttab_dma_unmap_page #endif static inline void __gnttab_dma_unmap_page(struct page *page) { } void gnttab_reset_grant_page(struct page *page); #ifndef CONFIG_XEN int gnttab_resume(void); #endif void *arch_gnttab_alloc_shared(xen_pfn_t *frames); static inline void gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr, uint32_t flags, grant_ref_t ref, domid_t domid) { if (flags & GNTMAP_contains_pte) map->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) map->host_addr = __pa(addr); else map->host_addr = addr; map->flags = flags; map->ref = ref; map->dom = domid; } static inline void gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr, uint32_t flags, grant_handle_t handle) { if (flags & GNTMAP_contains_pte) unmap->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) unmap->host_addr = __pa(addr); else unmap->host_addr = addr; unmap->handle = handle; unmap->dev_bus_addr = 0; } static inline void gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr, maddr_t new_addr, grant_handle_t handle) { if (xen_feature(XENFEAT_auto_translated_physmap)) { unmap->host_addr = __pa(addr); unmap->new_addr = __pa(new_addr); } else { unmap->host_addr = addr; unmap->new_addr = new_addr; } unmap->handle = handle; } #define gnttab_check_GNTST_eagain_while(__HCop, __HCarg_p) \ { \ u8 __hc_delay = 1; \ int __ret; \ while (unlikely((__HCarg_p)->status == GNTST_eagain && __hc_delay)) { \ msleep(__hc_delay++); \ __ret = HYPERVISOR_grant_table_op(__HCop, (__HCarg_p), 1); \ BUG_ON(__ret); \ } \ if (__hc_delay == 0) { \ pr_err("%s: %s gnt busy\n", __func__, current->comm); \ (__HCarg_p)->status = GNTST_bad_page; \ } \ if ((__HCarg_p)->status != GNTST_okay) \ pr_err("%s: %s gnt status %x\n", \ __func__, current->comm, (__HCarg_p)->status); \ } #define gnttab_check_GNTST_eagain_do_while(__HCop, __HCarg_p) \ { \ u8 __hc_delay = 1; \ int __ret; \ do { \ __ret = HYPERVISOR_grant_table_op(__HCop, (__HCarg_p), 1); \ BUG_ON(__ret); \ if ((__HCarg_p)->status == GNTST_eagain) \ msleep(__hc_delay++); \ } while ((__HCarg_p)->status == GNTST_eagain && __hc_delay); \ if (__hc_delay == 0) { \ pr_err("%s: %s gnt busy\n", __func__, current->comm); \ (__HCarg_p)->status = GNTST_bad_page; \ } \ if ((__HCarg_p)->status != GNTST_okay) \ pr_err("%s: %s gnt status %x\n", \ __func__, current->comm, (__HCarg_p)->status); \ } #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/grant_table.h000066400000000000000000000157761314037446600310710ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include #include #include #include #define GNTTAB_RESERVED_XENSTORE 1 /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ #define NR_GRANT_FRAMES 4 struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; }; int gnttab_init(void); int gnttab_suspend(void); int gnttab_resume(void); int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly); int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame, int flags, unsigned page_off, unsigned length); int gnttab_grant_foreign_access_trans(domid_t domid, int flags, domid_t trans_domid, grant_ref_t trans_gref); /* * Are sub-page grants available on this version of Xen? Returns true if they * are, and false if they're not. */ bool gnttab_subpage_grants_available(void); /* * Are transitive grants available on this version of Xen? Returns true if they * are, and false if they're not. */ bool gnttab_trans_grants_available(void); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); int gnttab_query_foreign_access(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly); int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags, unsigned page_off, unsigned length); int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid, int flags, domid_t trans_domid, grant_ref_t trans_gref); void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); static inline void gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr, uint32_t flags, grant_ref_t ref, domid_t domid) { if (flags & GNTMAP_contains_pte) map->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) map->host_addr = __pa(addr); else map->host_addr = addr; map->flags = flags; map->ref = ref; map->dom = domid; } static inline void gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr, uint32_t flags, grant_handle_t handle) { if (flags & GNTMAP_contains_pte) unmap->host_addr = addr; else if (xen_feature(XENFEAT_auto_translated_physmap)) unmap->host_addr = __pa(addr); else unmap->host_addr = addr; unmap->handle = handle; unmap->dev_bus_addr = 0; } int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, void **__shared); int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, grant_status_t **__shared); void arch_gnttab_unmap(void *shared, unsigned long nr_gframes); extern unsigned long xen_hvm_resume_frames; unsigned int gnttab_max_grant_frames(void); #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count); int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, struct gnttab_map_grant_ref *kunmap_ops, struct page **pages, unsigned int count); /* Perform a batch of grant map/copy operations. Retry every batch slot * for which the hypervisor returns GNTST_eagain. This is typically due * to paged out target frames. * * Will retry for 1, 2, ... 255 ms, i.e. 256 times during 32 seconds. * * Return value in each iand every status field of the batch guaranteed * to not be GNTST_eagain. */ void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count); void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count); #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/hvc-console.h000066400000000000000000000007371314037446600310160ustar00rootroot00000000000000#ifndef XEN_HVC_CONSOLE_H #define XEN_HVC_CONSOLE_H extern struct console xenboot_console; #ifdef CONFIG_HVC_XEN void xen_console_resume(void); void xen_raw_console_write(const char *str); __printf(1, 2) void xen_raw_printk(const char *fmt, ...); #else static inline void xen_console_resume(void) { } static inline void xen_raw_console_write(const char *str) { } static inline __printf(1, 2) void xen_raw_printk(const char *fmt, ...) { } #endif #endif /* XEN_HVC_CONSOLE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/hvm.h000066400000000000000000000024641314037446600273670ustar00rootroot00000000000000/* Simple wrappers around HVM functions */ #ifndef XEN_HVM_H__ #define XEN_HVM_H__ #include #ifndef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static inline const char *param_name(int op) { #define PARAM(x) [HVM_PARAM_##x] = #x static const char *const names[] = { PARAM(CALLBACK_IRQ), PARAM(STORE_PFN), PARAM(STORE_EVTCHN), PARAM(PAE_ENABLED), PARAM(IOREQ_PFN), PARAM(BUFIOREQ_PFN), PARAM(TIMER_MODE), PARAM(HPET_ENABLED), PARAM(IDENT_PT), PARAM(DM_DOMAIN), PARAM(ACPI_S_STATE), PARAM(VM86_TSS), PARAM(VPT_ALIGN), PARAM(CONSOLE_PFN), PARAM(CONSOLE_EVTCHN), }; #undef PARAM if (op >= ARRAY_SIZE(names)) return "unknown"; if (!names[op]) return "reserved"; return names[op]; } static inline int hvm_get_parameter(int idx, uint64_t *value) { struct xen_hvm_param xhv; int r; xhv.domid = DOMID_SELF; xhv.index = idx; r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); if (r < 0) { pr_err("Cannot get hvm parameter %s (%d): %d!\n", param_name(idx), idx, r); return r; } *value = xhv.value; return r; } #define HVM_CALLBACK_VIA_TYPE_VECTOR 0x2 #define HVM_CALLBACK_VIA_TYPE_SHIFT 56 #define HVM_CALLBACK_VECTOR(x) (((uint64_t)HVM_CALLBACK_VIA_TYPE_VECTOR)<<\ HVM_CALLBACK_VIA_TYPE_SHIFT | (x)) #endif /* XEN_HVM_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/hypercall.h000066400000000000000000000013751314037446600305600ustar00rootroot00000000000000#ifndef __XEN_HYPERCALL_H__ #define __XEN_HYPERCALL_H__ #include static inline int __must_check HYPERVISOR_multicall_check( multicall_entry_t *call_list, unsigned int nr_calls, const unsigned long *rc_list) { int rc = HYPERVISOR_multicall(call_list, nr_calls); if (unlikely(rc < 0)) return rc; BUG_ON(rc); BUG_ON((int)nr_calls < 0); for ( ; nr_calls > 0; --nr_calls, ++call_list) if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0))) return nr_calls; return 0; } /* A construct to ignore the return value of hypercall wrappers in a few * exceptional cases (simply casting the function result to void doesn't * avoid the compiler warning): */ #define VOID(expr) ((void)((expr)?:0)) #endif /* __XEN_HYPERCALL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/000077500000000000000000000000001314037446600303565ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/COPYING000066400000000000000000000032641314037446600314160ustar00rootroot00000000000000XEN NOTICE ========== This copyright applies to all files within this subdirectory and its subdirectories: include/public/*.h include/public/hvm/*.h include/public/io/*.h The intention is that these files can be freely copied into the source tree of an operating system when porting that OS to run on Xen. Doing so does *not* cause the OS to become subject to the terms of the GPL. All other files in the Xen source distribution are covered by version 2 of the GNU General Public License except where explicitly stated otherwise within individual source files. -- Keir Fraser (on behalf of the Xen team) ===================================================================== Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/arch-arm.h000066400000000000000000000213501314037446600322220ustar00rootroot00000000000000/****************************************************************************** * arch-arm.h * * Guest OS interface to ARM Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright 2011 (C) Citrix Systems */ #ifndef __XEN_PUBLIC_ARCH_ARM_H__ #define __XEN_PUBLIC_ARCH_ARM_H__ /* hypercall calling convention * ---------------------------- * * A hypercall is issued using the ARM HVC instruction. * * A hypercall can take up to 5 arguments. These are passed in * registers, the first argument in x0/r0 (for arm64/arm32 guests * respectively irrespective of whether the underlying hypervisor is * 32- or 64-bit), the second argument in x1/r1, the third in x2/r2, * the forth in x3/r3 and the fifth in x4/r4. * * The hypercall number is passed in r12 (arm) or x16 (arm64). In both * cases the relevant ARM procedure calling convention specifies this * is an inter-procedure-call scratch register (e.g. for use in linker * stubs). This use does not conflict with use during a hypercall. * * The HVC ISS must contain a Xen specific TAG: XEN_HYPERCALL_TAG. * * The return value is in x0/r0. * * The hypercall will clobber x16/r12 and the argument registers used * by that hypercall (except r0 which is the return value) i.e. in * addition to x16/r12 a 2 argument hypercall will clobber x1/r1 and a * 4 argument hypercall will clobber x1/r1, x2/r2 and x3/r3. * * Parameter structs passed to hypercalls are laid out according to * the Procedure Call Standard for the ARM Architecture (AAPCS, AKA * EABI) and Procedure Call Standard for the ARM 64-bit Architecture * (AAPCS64). Where there is a conflict the 64-bit standard should be * used regardless of guest type. Structures which are passed as * hypercall arguments are always little endian. */ #define XEN_HYPERCALL_TAG 0XEA1 #define uint64_aligned_t uint64_t __attribute__((aligned(8))) #ifndef __ASSEMBLY__ #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef union { type *p; unsigned long q; } \ __guest_handle_ ## name; \ typedef union { type *p; uint64_aligned_t q; } \ __guest_handle_64_ ## name; /* * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field * in a struct in memory. On ARM is always 8 bytes sizes and 8 bytes * aligned. * XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an * hypercall argument. It is 4 bytes on aarch and 8 bytes on aarch64. */ #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ ___DEFINE_XEN_GUEST_HANDLE(name, type); \ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name #define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) /* this is going to be changed on 64 bit */ #define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name #define set_xen_guest_handle_raw(hnd, val) \ do { \ typeof(&(hnd)) _sxghr_tmp = &(hnd); \ _sxghr_tmp->q = 0; \ _sxghr_tmp->p = val; \ } while ( 0 ) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif #define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val) #if defined(__GNUC__) && !defined(__STRICT_ANSI__) /* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */ # define __DECL_REG(n64, n32) union { \ uint64_t n64; \ uint32_t n32; \ } #else /* Non-gcc sources must always use the proper 64-bit name (e.g., x0). */ #define __DECL_REG(n64, n32) uint64_t n64 #endif struct vcpu_guest_core_regs { /* Aarch64 Aarch32 */ __DECL_REG(x0, r0_usr); __DECL_REG(x1, r1_usr); __DECL_REG(x2, r2_usr); __DECL_REG(x3, r3_usr); __DECL_REG(x4, r4_usr); __DECL_REG(x5, r5_usr); __DECL_REG(x6, r6_usr); __DECL_REG(x7, r7_usr); __DECL_REG(x8, r8_usr); __DECL_REG(x9, r9_usr); __DECL_REG(x10, r10_usr); __DECL_REG(x11, r11_usr); __DECL_REG(x12, r12_usr); __DECL_REG(x13, sp_usr); __DECL_REG(x14, lr_usr); __DECL_REG(x15, __unused_sp_hyp); __DECL_REG(x16, lr_irq); __DECL_REG(x17, sp_irq); __DECL_REG(x18, lr_svc); __DECL_REG(x19, sp_svc); __DECL_REG(x20, lr_abt); __DECL_REG(x21, sp_abt); __DECL_REG(x22, lr_und); __DECL_REG(x23, sp_und); __DECL_REG(x24, r8_fiq); __DECL_REG(x25, r9_fiq); __DECL_REG(x26, r10_fiq); __DECL_REG(x27, r11_fiq); __DECL_REG(x28, r12_fiq); __DECL_REG(x29, sp_fiq); __DECL_REG(x30, lr_fiq); /* Return address and mode */ __DECL_REG(pc64, pc32); /* ELR_EL2 */ uint32_t cpsr; /* SPSR_EL2 */ union { uint32_t spsr_el1; /* AArch64 */ uint32_t spsr_svc; /* AArch32 */ }; /* AArch32 guests only */ uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt; /* AArch64 guests only */ uint64_t sp_el0; uint64_t sp_el1, elr_el1; }; typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t); #undef __DECL_REG typedef uint64_t xen_pfn_t; #define PRI_xen_pfn PRIx64 /* Maximum number of virtual CPUs in legacy multi-processor guests. */ /* Only one. All other VCPUS must use VCPUOP_register_vcpu_info */ #define XEN_LEGACY_MAX_VCPUS 1 typedef uint64_t xen_ulong_t; #define PRI_xen_ulong PRIx64 struct vcpu_guest_context { #define _VGCF_online 0 #define VGCF_online (1<<_VGCF_online) uint32_t flags; /* VGCF_* */ struct vcpu_guest_core_regs user_regs; /* Core CPU registers */ uint32_t sctlr, ttbcr; uint64_t ttbr0, ttbr1; }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); struct arch_vcpu_info { }; typedef struct arch_vcpu_info arch_vcpu_info_t; struct arch_shared_info { }; typedef struct arch_shared_info arch_shared_info_t; typedef uint64_t xen_callback_t; #endif /* ifndef __ASSEMBLY __ */ /* PSR bits (CPSR, SPSR)*/ /* 32 bit modes */ #define PSR_MODE_USR 0x10 #define PSR_MODE_FIQ 0x11 #define PSR_MODE_IRQ 0x12 #define PSR_MODE_SVC 0x13 #define PSR_MODE_MON 0x16 #define PSR_MODE_ABT 0x17 #define PSR_MODE_HYP 0x1a #define PSR_MODE_UND 0x1b #define PSR_MODE_SYS 0x1f /* 64 bit modes */ #ifdef __aarch64__ #define PSR_MODE_BIT 0x10 /* Set iff AArch32 */ #define PSR_MODE_EL3h 0x0d #define PSR_MODE_EL3t 0x0c #define PSR_MODE_EL2h 0x09 #define PSR_MODE_EL2t 0x08 #define PSR_MODE_EL1h 0x05 #define PSR_MODE_EL1t 0x04 #define PSR_MODE_EL0t 0x00 #endif #define PSR_THUMB (1<<5) /* Thumb Mode enable */ #define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */ #define PSR_IRQ_MASK (1<<7) /* Interrupt mask */ #define PSR_ABT_MASK (1<<8) /* Asynchronous Abort mask */ #define PSR_BIG_ENDIAN (1<<9) /* Big Endian Mode */ #define PSR_IT_MASK (0x0600fc00) /* Thumb If-Then Mask */ #define PSR_JAZELLE (1<<24) /* Jazelle Mode */ #define PSR_GUEST_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC) #endif /* __XEN_PUBLIC_ARCH_ARM_H__ */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/arch-arm/000077500000000000000000000000001314037446600320505ustar00rootroot00000000000000hvm/000077500000000000000000000000001314037446600325635ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/arch-armsave.h000066400000000000000000000027071314037446600337000ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/arch-arm/hvm/* * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * Copyright (c) 2012 Citrix Systems Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_ARM_H__ #define __XEN_PUBLIC_HVM_SAVE_ARM_H__ #endif /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/arch-x86/000077500000000000000000000000001314037446600317165ustar00rootroot00000000000000cpuid.h000066400000000000000000000050601314037446600331150ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/arch-x86/****************************************************************************** * arch-x86/cpuid.h * * CPUID interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2007 Citrix Systems, Inc. * * Authors: * Keir Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__ #define __XEN_PUBLIC_ARCH_X86_CPUID_H__ /* Xen identification leaves start at 0x40000000. */ #define XEN_CPUID_FIRST_LEAF 0x40000000 #define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i)) /* * Leaf 1 (0x40000000) * EAX: Largest Xen-information leaf. All leaves up to an including @EAX * are supported by the Xen host. * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification * of a Xen host. */ #define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */ #define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */ #define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */ /* * Leaf 2 (0x40000001) * EAX[31:16]: Xen major version. * EAX[15: 0]: Xen minor version. * EBX-EDX: Reserved (currently all zeroes). */ /* * Leaf 3 (0x40000002) * EAX: Number of hypercall transfer pages. This register is always guaranteed * to specify one hypercall page. * EBX: Base address of Xen-specific MSRs. * ECX: Features 1. Unused bits are set to zero. * EDX: Features 2. Unused bits are set to zero. */ /* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */ #define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0 #define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0) #endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */ hvm/000077500000000000000000000000001314037446600324315ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/arch-x86save.h000066400000000000000000000341371314037446600335500ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/arch-x86/hvm/* * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * Copyright (c) 2007 XenSource Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__ #define __XEN_PUBLIC_HVM_SAVE_X86_H__ /* * Save/restore header: general info about the save file. */ #define HVM_FILE_MAGIC 0x54381286 #define HVM_FILE_VERSION 0x00000001 struct hvm_save_header { uint32_t magic; /* Must be HVM_FILE_MAGIC */ uint32_t version; /* File format version */ uint64_t changeset; /* Version of Xen that saved this file */ uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */ uint32_t gtsc_khz; /* Guest's TSC frequency in kHz */ }; DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header); /* * Processor * * Compat: Pre-3.4 didn't have msr_tsc_aux */ struct hvm_hw_cpu { uint8_t fpu_regs[512]; uint64_t rax; uint64_t rbx; uint64_t rcx; uint64_t rdx; uint64_t rbp; uint64_t rsi; uint64_t rdi; uint64_t rsp; uint64_t r8; uint64_t r9; uint64_t r10; uint64_t r11; uint64_t r12; uint64_t r13; uint64_t r14; uint64_t r15; uint64_t rip; uint64_t rflags; uint64_t cr0; uint64_t cr2; uint64_t cr3; uint64_t cr4; uint64_t dr0; uint64_t dr1; uint64_t dr2; uint64_t dr3; uint64_t dr6; uint64_t dr7; uint32_t cs_sel; uint32_t ds_sel; uint32_t es_sel; uint32_t fs_sel; uint32_t gs_sel; uint32_t ss_sel; uint32_t tr_sel; uint32_t ldtr_sel; uint32_t cs_limit; uint32_t ds_limit; uint32_t es_limit; uint32_t fs_limit; uint32_t gs_limit; uint32_t ss_limit; uint32_t tr_limit; uint32_t ldtr_limit; uint32_t idtr_limit; uint32_t gdtr_limit; uint64_t cs_base; uint64_t ds_base; uint64_t es_base; uint64_t fs_base; uint64_t gs_base; uint64_t ss_base; uint64_t tr_base; uint64_t ldtr_base; uint64_t idtr_base; uint64_t gdtr_base; uint32_t cs_arbytes; uint32_t ds_arbytes; uint32_t es_arbytes; uint32_t fs_arbytes; uint32_t gs_arbytes; uint32_t ss_arbytes; uint32_t tr_arbytes; uint32_t ldtr_arbytes; uint64_t sysenter_cs; uint64_t sysenter_esp; uint64_t sysenter_eip; /* msr for em64t */ uint64_t shadow_gs; /* msr content saved/restored. */ uint64_t msr_flags; uint64_t msr_lstar; uint64_t msr_star; uint64_t msr_cstar; uint64_t msr_syscall_mask; uint64_t msr_efer; uint64_t msr_tsc_aux; /* guest's idea of what rdtsc() would return */ uint64_t tsc; /* pending event, if any */ union { uint32_t pending_event; struct { uint8_t pending_vector:8; uint8_t pending_type:3; uint8_t pending_error_valid:1; uint32_t pending_reserved:19; uint8_t pending_valid:1; }; }; /* error code for pending event */ uint32_t error_code; }; struct hvm_hw_cpu_compat { uint8_t fpu_regs[512]; uint64_t rax; uint64_t rbx; uint64_t rcx; uint64_t rdx; uint64_t rbp; uint64_t rsi; uint64_t rdi; uint64_t rsp; uint64_t r8; uint64_t r9; uint64_t r10; uint64_t r11; uint64_t r12; uint64_t r13; uint64_t r14; uint64_t r15; uint64_t rip; uint64_t rflags; uint64_t cr0; uint64_t cr2; uint64_t cr3; uint64_t cr4; uint64_t dr0; uint64_t dr1; uint64_t dr2; uint64_t dr3; uint64_t dr6; uint64_t dr7; uint32_t cs_sel; uint32_t ds_sel; uint32_t es_sel; uint32_t fs_sel; uint32_t gs_sel; uint32_t ss_sel; uint32_t tr_sel; uint32_t ldtr_sel; uint32_t cs_limit; uint32_t ds_limit; uint32_t es_limit; uint32_t fs_limit; uint32_t gs_limit; uint32_t ss_limit; uint32_t tr_limit; uint32_t ldtr_limit; uint32_t idtr_limit; uint32_t gdtr_limit; uint64_t cs_base; uint64_t ds_base; uint64_t es_base; uint64_t fs_base; uint64_t gs_base; uint64_t ss_base; uint64_t tr_base; uint64_t ldtr_base; uint64_t idtr_base; uint64_t gdtr_base; uint32_t cs_arbytes; uint32_t ds_arbytes; uint32_t es_arbytes; uint32_t fs_arbytes; uint32_t gs_arbytes; uint32_t ss_arbytes; uint32_t tr_arbytes; uint32_t ldtr_arbytes; uint64_t sysenter_cs; uint64_t sysenter_esp; uint64_t sysenter_eip; /* msr for em64t */ uint64_t shadow_gs; /* msr content saved/restored. */ uint64_t msr_flags; uint64_t msr_lstar; uint64_t msr_star; uint64_t msr_cstar; uint64_t msr_syscall_mask; uint64_t msr_efer; /*uint64_t msr_tsc_aux; COMPAT */ /* guest's idea of what rdtsc() would return */ uint64_t tsc; /* pending event, if any */ union { uint32_t pending_event; struct { uint8_t pending_vector:8; uint8_t pending_type:3; uint8_t pending_error_valid:1; uint32_t pending_reserved:19; uint8_t pending_valid:1; }; }; /* error code for pending event */ uint32_t error_code; }; static inline int _hvm_hw_fix_cpu(void *h) { union hvm_hw_cpu_union { struct hvm_hw_cpu nat; struct hvm_hw_cpu_compat cmp; } *ucpu = (union hvm_hw_cpu_union *)h; /* If we copy from the end backwards, we should * be able to do the modification in-place */ ucpu->nat.error_code = ucpu->cmp.error_code; ucpu->nat.pending_event = ucpu->cmp.pending_event; ucpu->nat.tsc = ucpu->cmp.tsc; ucpu->nat.msr_tsc_aux = 0; return 0; } DECLARE_HVM_SAVE_TYPE_COMPAT(CPU, 2, struct hvm_hw_cpu, \ struct hvm_hw_cpu_compat, _hvm_hw_fix_cpu); /* * PIC */ struct hvm_hw_vpic { /* IR line bitmasks. */ uint8_t irr; uint8_t imr; uint8_t isr; /* Line IRx maps to IRQ irq_base+x */ uint8_t irq_base; /* * Where are we in ICW2-4 initialisation (0 means no init in progress)? * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1). * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence) * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence) */ uint8_t init_state:4; /* IR line with highest priority. */ uint8_t priority_add:4; /* Reads from A=0 obtain ISR or IRR? */ uint8_t readsel_isr:1; /* Reads perform a polling read? */ uint8_t poll:1; /* Automatically clear IRQs from the ISR during INTA? */ uint8_t auto_eoi:1; /* Automatically rotate IRQ priorities during AEOI? */ uint8_t rotate_on_auto_eoi:1; /* Exclude slave inputs when considering in-service IRQs? */ uint8_t special_fully_nested_mode:1; /* Special mask mode excludes masked IRs from AEOI and priority checks. */ uint8_t special_mask_mode:1; /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */ uint8_t is_master:1; /* Edge/trigger selection. */ uint8_t elcr; /* Virtual INT output. */ uint8_t int_output; }; DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic); /* * IO-APIC */ #define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */ struct hvm_hw_vioapic { uint64_t base_address; uint32_t ioregsel; uint32_t id; union vioapic_redir_entry { uint64_t bits; struct { uint8_t vector; uint8_t delivery_mode:3; uint8_t dest_mode:1; uint8_t delivery_status:1; uint8_t polarity:1; uint8_t remote_irr:1; uint8_t trig_mode:1; uint8_t mask:1; uint8_t reserve:7; uint8_t reserved[4]; uint8_t dest_id; } fields; } redirtbl[VIOAPIC_NUM_PINS]; }; DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic); /* * LAPIC */ struct hvm_hw_lapic { uint64_t apic_base_msr; uint32_t disabled; /* VLAPIC_xx_DISABLED */ uint32_t timer_divisor; uint64_t tdt_msr; }; DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic); struct hvm_hw_lapic_regs { uint8_t data[1024]; }; DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs); /* * IRQs */ struct hvm_hw_pci_irqs { /* * Virtual interrupt wires for a single PCI bus. * Indexed by: device*4 + INTx#. */ union { unsigned long i[16 / sizeof (unsigned long)]; /* DECLARE_BITMAP(i, 32*4); */ uint64_t pad[2]; }; }; DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs); struct hvm_hw_isa_irqs { /* * Virtual interrupt wires for ISA devices. * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing). */ union { unsigned long i[1]; /* DECLARE_BITMAP(i, 16); */ uint64_t pad[1]; }; }; DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs); struct hvm_hw_pci_link { /* * PCI-ISA interrupt router. * Each PCI is 'wire-ORed' into one of four links using * the traditional 'barber's pole' mapping ((device + INTx#) & 3). * The router provides a programmable mapping from each link to a GSI. */ uint8_t route[4]; uint8_t pad0[4]; }; DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link); /* * PIT */ struct hvm_hw_pit { struct hvm_hw_pit_channel { uint32_t count; /* can be 65536 */ uint16_t latched_count; uint8_t count_latched; uint8_t status_latched; uint8_t status; uint8_t read_state; uint8_t write_state; uint8_t write_latch; uint8_t rw_mode; uint8_t mode; uint8_t bcd; /* not supported */ uint8_t gate; /* timer start */ } channels[3]; /* 3 x 16 bytes */ uint32_t speaker_data_on; uint32_t pad0; }; DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit); /* * RTC */ #define RTC_CMOS_SIZE 14 struct hvm_hw_rtc { /* CMOS bytes */ uint8_t cmos_data[RTC_CMOS_SIZE]; /* Index register for 2-part operations */ uint8_t cmos_index; uint8_t pad0; }; DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc); /* * HPET */ #define HPET_TIMER_NUM 3 /* 3 timers supported now */ struct hvm_hw_hpet { /* Memory-mapped, software visible registers */ uint64_t capability; /* capabilities */ uint64_t res0; /* reserved */ uint64_t config; /* configuration */ uint64_t res1; /* reserved */ uint64_t isr; /* interrupt status reg */ uint64_t res2[25]; /* reserved */ uint64_t mc64; /* main counter */ uint64_t res3; /* reserved */ struct { /* timers */ uint64_t config; /* configuration/cap */ uint64_t cmp; /* comparator */ uint64_t fsb; /* FSB route, not supported now */ uint64_t res4; /* reserved */ } timers[HPET_TIMER_NUM]; uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */ /* Hidden register state */ uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */ }; DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet); /* * PM timer */ struct hvm_hw_pmtimer { uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */ uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */ uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */ }; DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer); /* * MTRR MSRs */ struct hvm_hw_mtrr { #define MTRR_VCNT 8 #define NUM_FIXED_MSR 11 uint64_t msr_pat_cr; /* mtrr physbase & physmask msr pair*/ uint64_t msr_mtrr_var[MTRR_VCNT*2]; uint64_t msr_mtrr_fixed[NUM_FIXED_MSR]; uint64_t msr_mtrr_cap; uint64_t msr_mtrr_def_type; }; DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr); /* * The save area of XSAVE/XRSTOR. */ struct hvm_hw_cpu_xsave { uint64_t xfeature_mask; uint64_t xcr0; /* Updated by XSETBV */ uint64_t xcr0_accum; /* Updated by XSETBV */ struct { struct { char x[512]; } fpu_sse; struct { uint64_t xstate_bv; /* Updated by XRSTOR */ uint64_t reserved[7]; } xsave_hdr; /* The 64-byte header */ struct { char x[0]; } ymm; /* YMM */ } save_area; }; #define CPU_XSAVE_CODE 16 /* * Viridian hypervisor context. */ struct hvm_viridian_domain_context { uint64_t hypercall_gpa; uint64_t guest_os_id; }; DECLARE_HVM_SAVE_TYPE(VIRIDIAN_DOMAIN, 15, struct hvm_viridian_domain_context); struct hvm_viridian_vcpu_context { uint64_t apic_assist; }; DECLARE_HVM_SAVE_TYPE(VIRIDIAN_VCPU, 17, struct hvm_viridian_vcpu_context); struct hvm_vmce_vcpu { uint64_t caps; uint64_t mci_ctl2_bank0; uint64_t mci_ctl2_bank1; }; DECLARE_HVM_SAVE_TYPE(VMCE_VCPU, 18, struct hvm_vmce_vcpu); struct hvm_tsc_adjust { uint64_t tsc_adjust; }; DECLARE_HVM_SAVE_TYPE(TSC_ADJUST, 19, struct hvm_tsc_adjust); /* * Largest type-code in use */ #define HVM_SAVE_CODE_MAX 19 #endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */ xen-mca.h000066400000000000000000000351331314037446600333450ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/arch-x86/****************************************************************************** * arch-x86/mca.h * * Contributed by Advanced Micro Devices, Inc. * Author: Christoph Egger * * Guest OS machine check interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ /* Full MCA functionality has the following Usecases from the guest side: * * Must have's: * 1. Dom0 and DomU register machine check trap callback handlers * (already done via "set_trap_table" hypercall) * 2. Dom0 registers machine check event callback handler * (doable via EVTCHNOP_bind_virq) * 3. Dom0 and DomU fetches machine check data * 4. Dom0 wants Xen to notify a DomU * 5. Dom0 gets DomU ID from physical address * 6. Dom0 wants Xen to kill DomU (already done for "xm destroy") * * Nice to have's: * 7. Dom0 wants Xen to deactivate a physical CPU * This is better done as separate task, physical CPU hotplugging, * and hypercall(s) should be sysctl's * 8. Page migration proposed from Xen NUMA work, where Dom0 can tell Xen to * move a DomU (or Dom0 itself) away from a malicious page * producing correctable errors. * 9. offlining physical page: * Xen free's and never re-uses a certain physical page. * 10. Testfacility: Allow Dom0 to write values into machine check MSR's * and tell Xen to trigger a machine check */ #ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__ #define __XEN_PUBLIC_ARCH_X86_MCA_H__ /* Hypercall */ #define __HYPERVISOR_mca __HYPERVISOR_arch_0 /* * The xen-unstable repo has interface version 0x03000001; out interface * is incompatible with that and any future minor revisions, so we * choose a different version number range that is numerically less * than that used in xen-unstable. */ #define XEN_MCA_INTERFACE_VERSION 0x01ecc003 /* IN: Dom0 calls hypercall to retrieve nonurgent telemetry */ #define XEN_MC_NONURGENT 0x0001 /* IN: Dom0/DomU calls hypercall to retrieve urgent telemetry */ #define XEN_MC_URGENT 0x0002 /* IN: Dom0 acknowledges previosly-fetched telemetry */ #define XEN_MC_ACK 0x0004 /* OUT: All is ok */ #define XEN_MC_OK 0x0 /* OUT: Domain could not fetch data. */ #define XEN_MC_FETCHFAILED 0x1 /* OUT: There was no machine check data to fetch. */ #define XEN_MC_NODATA 0x2 /* OUT: Between notification time and this hypercall an other * (most likely) correctable error happened. The fetched data, * does not match the original machine check data. */ #define XEN_MC_NOMATCH 0x4 /* OUT: DomU did not register MC NMI handler. Try something else. */ #define XEN_MC_CANNOTHANDLE 0x8 /* OUT: Notifying DomU failed. Retry later or try something else. */ #define XEN_MC_NOTDELIVERED 0x10 /* Note, XEN_MC_CANNOTHANDLE and XEN_MC_NOTDELIVERED are mutually exclusive. */ #ifndef __ASSEMBLY__ #define VIRQ_MCA VIRQ_ARCH_0 /* G. (DOM0) Machine Check Architecture */ /* * Machine Check Architecure: * structs are read-only and used to report all kinds of * correctable and uncorrectable errors detected by the HW. * Dom0 and DomU: register a handler to get notified. * Dom0 only: Correctable errors are reported via VIRQ_MCA * Dom0 and DomU: Uncorrectable errors are reported via nmi handlers */ #define MC_TYPE_GLOBAL 0 #define MC_TYPE_BANK 1 #define MC_TYPE_EXTENDED 2 #define MC_TYPE_RECOVERY 3 struct mcinfo_common { uint16_t type; /* structure type */ uint16_t size; /* size of this struct in bytes */ }; #define MC_FLAG_CORRECTABLE (1 << 0) #define MC_FLAG_UNCORRECTABLE (1 << 1) #define MC_FLAG_RECOVERABLE (1 << 2) #define MC_FLAG_POLLED (1 << 3) #define MC_FLAG_RESET (1 << 4) #define MC_FLAG_CMCI (1 << 5) #define MC_FLAG_MCE (1 << 6) /* contains global x86 mc information */ struct mcinfo_global { struct mcinfo_common common; /* running domain at the time in error (most likely the impacted one) */ uint16_t mc_domid; uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */ uint32_t mc_socketid; /* physical socket of the physical core */ uint16_t mc_coreid; /* physical impacted core */ uint16_t mc_core_threadid; /* core thread of physical core */ uint32_t mc_apicid; uint32_t mc_flags; uint64_t mc_gstatus; /* global status */ }; /* contains bank local x86 mc information */ struct mcinfo_bank { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint16_t mc_domid; /* Usecase 5: domain referenced by mc_addr on dom0 * and if mc_addr is valid. Never valid on DomU. */ uint64_t mc_status; /* bank status */ uint64_t mc_addr; /* bank address, only valid * if addr bit is set in mc_status */ uint64_t mc_misc; uint64_t mc_ctrl2; uint64_t mc_tsc; }; struct mcinfo_msr { uint64_t reg; /* MSR */ uint64_t value; /* MSR value */ }; /* contains mc information from other * or additional mc MSRs */ struct mcinfo_extended { struct mcinfo_common common; /* You can fill up to five registers. * If you need more, then use this structure * multiple times. */ uint32_t mc_msrs; /* Number of msr with valid values. */ /* * Currently Intel extended MSR (32/64) include all gp registers * and E(R)FLAGS, E(R)IP, E(R)MISC, up to 11/19 of them might be * useful at present. So expand this array to 16/32 to leave room. */ struct mcinfo_msr mc_msr[sizeof(void *) * 4]; }; /* Recovery Action flags. Giving recovery result information to DOM0 */ /* Xen takes successful recovery action, the error is recovered */ #define REC_ACTION_RECOVERED (0x1 << 0) /* No action is performed by XEN */ #define REC_ACTION_NONE (0x1 << 1) /* It's possible DOM0 might take action ownership in some case */ #define REC_ACTION_NEED_RESET (0x1 << 2) /* Different Recovery Action types, if the action is performed successfully, * REC_ACTION_RECOVERED flag will be returned. */ /* Page Offline Action */ #define MC_ACTION_PAGE_OFFLINE (0x1 << 0) /* CPU offline Action */ #define MC_ACTION_CPU_OFFLINE (0x1 << 1) /* L3 cache disable Action */ #define MC_ACTION_CACHE_SHRINK (0x1 << 2) /* Below interface used between XEN/DOM0 for passing XEN's recovery action * information to DOM0. * usage Senario: After offlining broken page, XEN might pass its page offline * recovery action result to DOM0. DOM0 will save the information in * non-volatile memory for further proactive actions, such as offlining the * easy broken page earlier when doing next reboot. */ struct page_offline_action { /* Params for passing the offlined page number to DOM0 */ uint64_t mfn; uint64_t status; }; struct cpu_offline_action { /* Params for passing the identity of the offlined CPU to DOM0 */ uint32_t mc_socketid; uint16_t mc_coreid; uint16_t mc_core_threadid; }; #define MAX_UNION_SIZE 16 struct mcinfo_recovery { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint8_t action_flags; uint8_t action_types; union { struct page_offline_action page_retire; struct cpu_offline_action cpu_offline; uint8_t pad[MAX_UNION_SIZE]; } action_info; }; #define MCINFO_HYPERCALLSIZE 1024 #define MCINFO_MAXSIZE 768 #define MCINFO_FLAGS_UNCOMPLETE 0x1 struct mc_info { /* Number of mcinfo_* entries in mi_data */ uint32_t mi_nentries; uint32_t flags; uint64_t mi_data[(MCINFO_MAXSIZE - 1) / 8]; }; typedef struct mc_info mc_info_t; DEFINE_XEN_GUEST_HANDLE(mc_info_t); #define __MC_MSR_ARRAYSIZE 8 #define __MC_NMSRS 1 #define MC_NCAPS 7 /* 7 CPU feature flag words */ #define MC_CAPS_STD_EDX 0 /* cpuid level 0x00000001 (%edx) */ #define MC_CAPS_AMD_EDX 1 /* cpuid level 0x80000001 (%edx) */ #define MC_CAPS_TM 2 /* cpuid level 0x80860001 (TransMeta) */ #define MC_CAPS_LINUX 3 /* Linux-defined */ #define MC_CAPS_STD_ECX 4 /* cpuid level 0x00000001 (%ecx) */ #define MC_CAPS_VIA 5 /* cpuid level 0xc0000001 */ #define MC_CAPS_AMD_ECX 6 /* cpuid level 0x80000001 (%ecx) */ struct mcinfo_logical_cpu { uint32_t mc_cpunr; uint32_t mc_chipid; uint16_t mc_coreid; uint16_t mc_threadid; uint32_t mc_apicid; uint32_t mc_clusterid; uint32_t mc_ncores; uint32_t mc_ncores_active; uint32_t mc_nthreads; int32_t mc_cpuid_level; uint32_t mc_family; uint32_t mc_vendor; uint32_t mc_model; uint32_t mc_step; char mc_vendorid[16]; char mc_brandid[64]; uint32_t mc_cpu_caps[MC_NCAPS]; uint32_t mc_cache_size; uint32_t mc_cache_alignment; int32_t mc_nmsrvals; struct mcinfo_msr mc_msrvalues[__MC_MSR_ARRAYSIZE]; }; typedef struct mcinfo_logical_cpu xen_mc_logical_cpu_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_logical_cpu_t); /* * OS's should use these instead of writing their own lookup function * each with its own bugs and drawbacks. * We use macros instead of static inline functions to allow guests * to include this header in assembly files (*.S). */ /* Prototype: * uint32_t x86_mcinfo_nentries(struct mc_info *mi); */ #define x86_mcinfo_nentries(_mi) \ (_mi)->mi_nentries /* Prototype: * struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi); */ #define x86_mcinfo_first(_mi) \ ((struct mcinfo_common *)(_mi)->mi_data) /* Prototype: * struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic); */ #define x86_mcinfo_next(_mic) \ ((struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size)) /* Prototype: * void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type); */ #define x86_mcinfo_lookup(_ret, _mi, _type) \ do { \ uint32_t found, i; \ struct mcinfo_common *_mic; \ \ found = 0; \ (_ret) = NULL; \ if (_mi == NULL) break; \ _mic = x86_mcinfo_first(_mi); \ for (i = 0; i < x86_mcinfo_nentries(_mi); i++) { \ if (_mic->type == (_type)) { \ found = 1; \ break; \ } \ _mic = x86_mcinfo_next(_mic); \ } \ (_ret) = found ? _mic : NULL; \ } while (0) /* Usecase 1 * Register machine check trap callback handler * (already done via "set_trap_table" hypercall) */ /* Usecase 2 * Dom0 registers machine check event callback handler * done by EVTCHNOP_bind_virq */ /* Usecase 3 * Fetch machine check data from hypervisor. * Note, this hypercall is special, because both Dom0 and DomU must use this. */ #define XEN_MC_fetch 1 struct xen_mc_fetch { /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_NONURGENT, XEN_MC_URGENT, XEN_MC_ACK if ack'ing an earlier fetch */ /* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED, XEN_MC_NODATA, XEN_MC_NOMATCH */ uint32_t _pad0; uint64_t fetch_id; /* OUT: id for ack, IN: id we are ack'ing */ /* OUT variables. */ XEN_GUEST_HANDLE(mc_info_t) data; }; typedef struct xen_mc_fetch xen_mc_fetch_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t); /* Usecase 4 * This tells the hypervisor to notify a DomU about the machine check error */ #define XEN_MC_notifydomain 2 struct xen_mc_notifydomain { /* IN variables. */ uint16_t mc_domid; /* The unprivileged domain to notify. */ uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify. * Usually echo'd value from the fetch hypercall. */ /* IN/OUT variables. */ uint32_t flags; /* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */ /* OUT: XEN_MC_OK, XEN_MC_CANNOTHANDLE, XEN_MC_NOTDELIVERED, XEN_MC_NOMATCH */ }; typedef struct xen_mc_notifydomain xen_mc_notifydomain_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t); #define XEN_MC_physcpuinfo 3 struct xen_mc_physcpuinfo { /* IN/OUT */ uint32_t ncpus; uint32_t _pad0; /* OUT */ XEN_GUEST_HANDLE(xen_mc_logical_cpu_t) info; }; #define XEN_MC_msrinject 4 #define MC_MSRINJ_MAXMSRS 8 struct xen_mc_msrinject { /* IN */ uint32_t mcinj_cpunr; /* target processor id */ uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */ uint32_t mcinj_count; /* 0 .. count-1 in array are valid */ uint32_t _pad0; struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS]; }; /* Flags for mcinj_flags above; bits 16-31 are reserved */ #define MC_MSRINJ_F_INTERPOSE 0x1 #define XEN_MC_mceinject 5 struct xen_mc_mceinject { unsigned int mceinj_cpunr; /* target processor id */ }; #if defined(__XEN__) || defined(__XEN_TOOLS__) #define XEN_MC_inject_v2 6 #define XEN_MC_INJECT_TYPE_MASK 0x7 #define XEN_MC_INJECT_TYPE_MCE 0x0 #define XEN_MC_INJECT_TYPE_CMCI 0x1 #define XEN_MC_INJECT_CPU_BROADCAST 0x8 struct xen_mc_inject_v2 { uint32_t flags; struct xenctl_bitmap cpumap; }; #endif struct xen_mc { uint32_t cmd; uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */ union { struct xen_mc_fetch mc_fetch; struct xen_mc_notifydomain mc_notifydomain; struct xen_mc_physcpuinfo mc_physcpuinfo; struct xen_mc_msrinject mc_msrinject; struct xen_mc_mceinject mc_mceinject; #if defined(__XEN__) || defined(__XEN_TOOLS__) struct xen_mc_inject_v2 mc_inject_v2; #endif } u; }; typedef struct xen_mc xen_mc_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_t); #endif /* __ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */ xen-x86_32.h000066400000000000000000000142101314037446600335270ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/arch-x86/****************************************************************************** * xen-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2007, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ /* * Hypercall interface: * Input: %ebx, %ecx, %edx, %esi, %edi, %ebp (arguments 1-6) * Output: %eax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx) */ /* * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ #define FLAT_KERNEL_CS FLAT_RING1_CS #define FLAT_KERNEL_DS FLAT_RING1_DS #define FLAT_KERNEL_SS FLAT_RING1_SS #define FLAT_USER_CS FLAT_RING3_CS #define FLAT_USER_DS FLAT_RING3_DS #define FLAT_USER_SS FLAT_RING3_SS #define __HYPERVISOR_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_START_PAE 0xF5800000 #define __MACH2PHYS_VIRT_END_PAE 0xF6800000 #define HYPERVISOR_VIRT_START_PAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE) #define MACH2PHYS_VIRT_START_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE) #define MACH2PHYS_VIRT_END_PAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE) /* Non-PAE bounds are obsolete. */ #define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000 #define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000 #define HYPERVISOR_VIRT_START_NONPAE \ mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_START_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE) #define MACH2PHYS_VIRT_END_NONPAE \ mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE) #define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE #define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE #define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START) #endif /* 32-/64-bit invariability for control interfaces (domctl/sysctl). */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #undef ___DEFINE_XEN_GUEST_HANDLE #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } \ __guest_handle_ ## name; \ typedef struct { union { type *p; uint64_aligned_t q; }; } \ __guest_handle_64_ ## name #undef set_xen_guest_handle_raw #define set_xen_guest_handle_raw(hnd, val) \ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \ (hnd).p = val; \ } while ( 0 ) #define uint64_aligned_t uint64_t __attribute__((aligned(8))) #define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name #define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name) #endif #ifndef __ASSEMBLY__ struct cpu_user_regs { uint32_t ebx; uint32_t ecx; uint32_t edx; uint32_t esi; uint32_t edi; uint32_t ebp; uint32_t eax; uint16_t error_code; /* private */ uint16_t entry_vector; /* private */ uint32_t eip; uint16_t cs; uint8_t saved_upcall_mask; uint8_t _pad0; uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ uint32_t esp; uint16_t ss, _pad1; uint16_t es, _pad2; uint16_t ds, _pad3; uint16_t fs, _pad4; uint16_t gs, _pad5; }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); /* * Page-directory addresses above 4GB do not fit into architectural %cr3. * When accessing %cr3, or equivalent field in vcpu_guest_context, guests * must use the following accessor macros to pack/unpack valid MFNs. */ #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) struct arch_vcpu_info { unsigned long cr2; unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; struct xen_callback { unsigned long cs; unsigned long eip; }; typedef struct xen_callback xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ xen-x86_64.h000066400000000000000000000153141314037446600335420ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/arch-x86/****************************************************************************** * xen-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ /* * Hypercall interface: * Input: %rdi, %rsi, %rdx, %r10, %r8, %r9 (arguments 1-6) * Output: %rax * Access is via hypercall page (set up by guest loader or via a Xen MSR): * call hypercall_page + hypercall-number * 32 * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi) */ /* * 64-bit segment selectors * These flat segments are in the Xen-private section of every GDT. Since these * are also present in the initial GDT, many OSes will be able to avoid * installing their own GDT. */ #define FLAT_RING3_CS32 0xe023 /* GDT index 260 */ #define FLAT_RING3_CS64 0xe033 /* GDT index 261 */ #define FLAT_RING3_DS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_DS64 0x0000 /* NULL selector */ #define FLAT_RING3_SS32 0xe02b /* GDT index 262 */ #define FLAT_RING3_SS64 0xe02b /* GDT index 262 */ #define FLAT_KERNEL_DS64 FLAT_RING3_DS64 #define FLAT_KERNEL_DS32 FLAT_RING3_DS32 #define FLAT_KERNEL_DS FLAT_KERNEL_DS64 #define FLAT_KERNEL_CS64 FLAT_RING3_CS64 #define FLAT_KERNEL_CS32 FLAT_RING3_CS32 #define FLAT_KERNEL_CS FLAT_KERNEL_CS64 #define FLAT_KERNEL_SS64 FLAT_RING3_SS64 #define FLAT_KERNEL_SS32 FLAT_RING3_SS32 #define FLAT_KERNEL_SS FLAT_KERNEL_SS64 #define FLAT_USER_DS64 FLAT_RING3_DS64 #define FLAT_USER_DS32 FLAT_RING3_DS32 #define FLAT_USER_DS FLAT_USER_DS64 #define FLAT_USER_CS64 FLAT_RING3_CS64 #define FLAT_USER_CS32 FLAT_RING3_CS32 #define FLAT_USER_CS FLAT_USER_CS64 #define FLAT_USER_SS64 FLAT_RING3_SS64 #define FLAT_USER_SS32 FLAT_RING3_SS32 #define FLAT_USER_SS FLAT_USER_SS64 #define __HYPERVISOR_VIRT_START 0xFFFF800000000000 #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 #ifndef HYPERVISOR_VIRT_START #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) #endif #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) #ifndef machine_to_phys_mapping #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) #endif /* * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) * @which == SEGBASE_* ; @base == 64-bit base address * Returns 0 on success. */ #define SEGBASE_FS 0 #define SEGBASE_GS_USER 1 #define SEGBASE_GS_KERNEL 2 #define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */ /* * int HYPERVISOR_iret(void) * All arguments are on the kernel stack, in the following format. * Never returns if successful. Current kernel context is lost. * The saved CS is mapped as follows: * RING0 -> RING3 kernel mode. * RING1 -> RING3 kernel mode. * RING2 -> RING3 kernel mode. * RING3 -> RING3 user mode. * However RING0 indicates that the guest kernel should return to iteself * directly with * orb $3,1*8(%rsp) * iretq * If flags contains VGCF_in_syscall: * Restore RAX, RIP, RFLAGS, RSP. * Discard R11, RCX, CS, SS. * Otherwise: * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP. * All other registers are saved on hypercall entry and restored to user. */ /* Guest exited in SYSCALL context? Return to guest with SYSRET? */ #define _VGCF_in_syscall 8 #define VGCF_in_syscall (1<<_VGCF_in_syscall) #define VGCF_IN_SYSCALL VGCF_in_syscall #ifndef __ASSEMBLY__ struct iret_context { /* Top of stack (%rsp at point of hypercall). */ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; /* Bottom of iret stack frame. */ }; #if defined(__GNUC__) && !defined(__STRICT_ANSI__) /* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */ #define __DECL_REG(name) union { \ uint64_t r ## name, e ## name; \ uint32_t _e ## name; \ } #else /* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */ #define __DECL_REG(name) uint64_t r ## name #endif struct cpu_user_regs { uint64_t r15; uint64_t r14; uint64_t r13; uint64_t r12; __DECL_REG(bp); __DECL_REG(bx); uint64_t r11; uint64_t r10; uint64_t r9; uint64_t r8; __DECL_REG(ax); __DECL_REG(cx); __DECL_REG(dx); __DECL_REG(si); __DECL_REG(di); uint32_t error_code; /* private */ uint32_t entry_vector; /* private */ __DECL_REG(ip); uint16_t cs, _pad0[1]; uint8_t saved_upcall_mask; uint8_t _pad1[3]; __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */ __DECL_REG(sp); uint16_t ss, _pad2[3]; uint16_t es, _pad3[3]; uint16_t ds, _pad4[3]; uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */ }; typedef struct cpu_user_regs cpu_user_regs_t; DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t); #undef __DECL_REG #define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12) #define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12) struct arch_vcpu_info { unsigned long cr2; unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ }; typedef struct arch_vcpu_info arch_vcpu_info_t; typedef unsigned long xen_callback_t; #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ xen.h000066400000000000000000000235431314037446600326110ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/arch-x86/****************************************************************************** * arch-x86/xen.h * * Guest OS interface to x86 Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "../xen.h" #ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__ #define __XEN_PUBLIC_ARCH_X86_XEN_H__ /* Structural guest handles introduced in 0x00030201. */ #if __XEN_INTERFACE_VERSION__ >= 0x00030201 #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef struct { type *p; } __guest_handle_ ## name #else #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ typedef type * __guest_handle_ ## name #endif /* * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field * in a struct in memory. * XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an * hypercall argument. * XEN_GUEST_HANDLE_PARAM and XEN_GUEST_HANDLE are the same on X86 but * they might not be on other architectures. */ #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ ___DEFINE_XEN_GUEST_HANDLE(name, type); \ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) #define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name #define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) #define XEN_GUEST_HANDLE_PARAM(name) XEN_GUEST_HANDLE(name) #define set_xen_guest_handle_raw(hnd, val) do { (hnd).p = val; } while (0) #ifdef __XEN_TOOLS__ #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) #endif #define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val) /* Allow co-existing Linux 2.6.23+ Xen interface definitions. */ #define DEFINE_GUEST_HANDLE_STRUCT(name) struct name #if defined(__i386__) #include "xen-x86_32.h" #elif defined(__x86_64__) #include "xen-x86_64.h" #endif #ifndef __ASSEMBLY__ typedef unsigned long xen_pfn_t; #define PRI_xen_pfn "lx" #endif #define XEN_HAVE_PV_GUEST_ENTRY 1 #define XEN_HAVE_PV_UPCALL_MASK 1 /* * `incontents 200 segdesc Segment Descriptor Tables */ /* * ` enum neg_errnoval * ` HYPERVISOR_set_gdt(const xen_pfn_t frames[], unsigned int entries); * ` */ /* * A number of GDT entries are reserved by Xen. These are not situated at the * start of the GDT because some stupid OSes export hard-coded selector values * in their ABI. These hard-coded values are always near the start of the GDT, * so Xen places itself out of the way, at the far end of the GDT. * * NB The LDT is set using the MMUEXT_SET_LDT op of HYPERVISOR_mmuext_op */ #define FIRST_RESERVED_GDT_PAGE 14 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) /* * ` enum neg_errnoval * ` HYPERVISOR_update_descriptor(u64 pa, u64 desc); * ` * ` @pa The machine physical address of the descriptor to * ` update. Must be either a descriptor page or writable. * ` @desc The descriptor value to update, in the same format as a * ` native descriptor table entry. */ /* Maximum number of virtual CPUs in legacy multi-processor guests. */ #define XEN_LEGACY_MAX_VCPUS 32 #ifndef __ASSEMBLY__ typedef unsigned long xen_ulong_t; #define PRI_xen_ulong "lx" /* * ` enum neg_errnoval * ` HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp); * ` * Sets the stack segment and pointer for the current vcpu. */ /* * ` enum neg_errnoval * ` HYPERVISOR_set_trap_table(const struct trap_info traps[]); * ` */ /* * Send an array of these to HYPERVISOR_set_trap_table(). * Terminate the array with a sentinel entry, with traps[].address==0. * The privilege level specifies which modes may enter a trap via a software * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate * privilege levels as follows: * Level == 0: Noone may enter * Level == 1: Kernel may enter * Level == 2: Kernel may enter * Level == 3: Everyone may enter */ #define TI_GET_DPL(_ti) ((_ti)->flags & 3) #define TI_GET_IF(_ti) ((_ti)->flags & 4) #define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl)) #define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2)) struct trap_info { uint8_t vector; /* exception vector */ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ uint16_t cs; /* code selector */ unsigned long address; /* code offset */ }; typedef struct trap_info trap_info_t; DEFINE_XEN_GUEST_HANDLE(trap_info_t); typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* * The following is all CPU context. Note that the fpu_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. * * Also note that when calling DOMCTL_setvcpucontext and VCPU_initialise * for HVM and PVH guests, not all information in this structure is updated: * * - For HVM guests, the structures read include: fpu_ctxt (if * VGCT_I387_VALID is set), flags, user_regs, debugreg[*] * * - PVH guests are the same as HVM guests, but additionally use ctrlreg[3] to * set cr3. All other fields not used should be set to 0. */ struct vcpu_guest_context { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ #define VGCF_I387_VALID (1<<0) #define VGCF_IN_KERNEL (1<<2) #define _VGCF_i387_valid 0 #define VGCF_i387_valid (1<<_VGCF_i387_valid) #define _VGCF_in_kernel 2 #define VGCF_in_kernel (1<<_VGCF_in_kernel) #define _VGCF_failsafe_disables_events 3 #define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events) #define _VGCF_syscall_disables_events 4 #define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events) #define _VGCF_online 5 #define VGCF_online (1<<_VGCF_online) unsigned long flags; /* VGCF_* flags */ struct cpu_user_regs user_regs; /* User-level CPU registers */ struct trap_info trap_ctxt[256]; /* Virtual IDT */ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ #ifdef __i386__ unsigned long event_callback_cs; /* CS:EIP of event callback */ unsigned long event_callback_eip; unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ unsigned long failsafe_callback_eip; #else unsigned long event_callback_eip; unsigned long failsafe_callback_eip; #ifdef __XEN__ union { unsigned long syscall_callback_eip; struct { unsigned int event_callback_cs; /* compat CS of event cb */ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */ }; }; #else unsigned long syscall_callback_eip; #endif #endif unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ #ifdef __x86_64__ /* Segment base addresses. */ uint64_t fs_base; uint64_t gs_base_kernel; uint64_t gs_base_user; #endif }; typedef struct vcpu_guest_context vcpu_guest_context_t; DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); struct arch_shared_info { unsigned long max_pfn; /* max pfn that appears in table */ /* Frame containing list of mfns containing list of mfns containing p2m. */ xen_pfn_t pfn_to_mfn_frame_list_list; unsigned long nmi_reason; uint64_t pad[32]; }; typedef struct arch_shared_info arch_shared_info_t; #endif /* !__ASSEMBLY__ */ /* * ` enum neg_errnoval * ` HYPERVISOR_fpu_taskswitch(int set); * ` * Sets (if set!=0) or clears (if set==0) CR0.TS. */ /* * ` enum neg_errnoval * ` HYPERVISOR_set_debugreg(int regno, unsigned long value); * * ` unsigned long * ` HYPERVISOR_get_debugreg(int regno); * For 0<=reg<=7, returns the debug register value. * For other values of reg, returns ((unsigned long)-EINVAL). * (Unfortunately, this interface is defective.) */ /* * Prefix forces emulation of some non-trapping instructions. * Currently only CPUID. */ #ifdef __ASSEMBLY__ #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; #define XEN_CPUID XEN_EMULATE_PREFIX cpuid #else #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" #endif #endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ arch-x86_32.h000066400000000000000000000024131314037446600323140ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/****************************************************************************** * arch-x86_32.h * * Guest OS interface to x86 32-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" arch-x86_64.h000066400000000000000000000035461314037446600323310ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/****************************************************************************** * arch-x86_64.h * * Guest OS interface to x86 64-bit Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004-2006, K A Fraser */ #include "arch-x86/xen.h" /* * ` enum neg_errnoval * ` HYPERVISOR_set_callbacks(unsigned long event_selector, * ` unsigned long event_address, * ` unsigned long failsafe_selector, * ` unsigned long failsafe_address); * ` * Register for callbacks on events. When an event (from an event * channel) occurs, event_address is used as the value of eip. * * A similar callback occurs if the segment selectors are invalid. * failsafe_address is used as the value of eip. * * On x86_64, event_selector and failsafe_selector are ignored (???). */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/blkif.h000066400000000000000000000305011314037446600316150ustar00rootroot00000000000000/****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_BLKIF_H__ #define __XEN_PUBLIC_IO_BLKIF_H__ #include #include /* * Front->back notifications: When enqueuing a new request, sending a * notification can be made conditional on req_event (i.e., the generic * hold-off mechanism provided by the ring macros). Backends must set * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). * * Back->front notifications: When enqueuing a new response, sending a * notification can be made conditional on rsp_event (i.e., the generic * hold-off mechanism provided by the ring macros). Frontends must set * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). */ #ifndef blkif_vdev_t #define blkif_vdev_t uint16_t #endif #define blkif_sector_t uint64_t /* * REQUEST CODES. */ #define BLKIF_OP_READ 0 #define BLKIF_OP_WRITE 1 /* * Recognised only if "feature-barrier" is present in backend xenbus info. * The "feature-barrier" node contains a boolean indicating whether barrier * requests are likely to succeed or fail. Either way, a barrier request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt barrier requests. * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* * create the "feature-barrier" node! */ #define BLKIF_OP_WRITE_BARRIER 2 /* * Recognised if "feature-flush-cache" is present in backend xenbus * info. A flush will ask the underlying storage hardware to flush its * non-volatile caches as appropriate. The "feature-flush-cache" node * contains a boolean indicating whether flush requests are likely to * succeed or fail. Either way, a flush request may fail at any time * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying * block-device hardware. The boolean simply indicates whether or not it * is worthwhile for the frontend to attempt flushes. If a backend does * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the * "feature-flush-cache" node! */ #define BLKIF_OP_FLUSH_DISKCACHE 3 /* * Device specific command packet contained within the request */ #define BLKIF_OP_PACKET 4 /* * Recognised only if "feature-discard" is present in backend xenbus info. * The "feature-discard" node contains a boolean indicating whether trim * (ATA) or unmap (SCSI) - conviently called discard requests are likely * to succeed or fail. Either way, a discard request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt discard requests. * If a backend does not recognise BLKIF_OP_DISCARD, it should *not* * create the "feature-discard" node! * * Discard operation is a request for the underlying block device to mark * extents to be erased. However, discard does not guarantee that the blocks * will be erased from the device - it is just a hint to the device * controller that these blocks are no longer in use. What the device * controller does with that information is left to the controller. * Discard operations are passed with sector_number as the * sector index to begin discard operations at and nr_sectors as the number of * sectors to be discarded. The specified sectors should be discarded if the * underlying block device supports trim (ATA) or unmap (SCSI) operations, * or a BLKIF_RSP_EOPNOTSUPP should be returned. * More information about trim/unmap operations at: * http://t13.org/Documents/UploadedDocuments/docs2008/ * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc * http://www.seagate.com/staticfiles/support/disc/manuals/ * Interface%20manuals/100293068c.pdf * The backend can optionally provide these extra XenBus attributes to * further optimize the discard functionality: * 'discard-aligment' - Devices that support discard functionality may * internally allocate space in units that are bigger than the exported * logical block size. The discard-alignment parameter indicates how many bytes * the beginning of the partition is offset from the internal allocation unit's * natural alignment. Do not confuse this with natural disk alignment offset. * 'discard-granularity' - Devices that support discard functionality may * internally allocate space using units that are bigger than the logical block * size. The discard-granularity parameter indicates the size of the internal * allocation unit in bytes if reported by the device. Otherwise the * discard-granularity will be set to match the device's physical block size. * It is the minimum size you can discard. * 'discard-secure' - All copies of the discarded sectors (potentially created * by garbage collection) must also be erased. To use this feature, the flag * BLKIF_DISCARD_SECURE must be set in the blkif_request_discard. */ #define BLKIF_OP_DISCARD 5 /* * Recognized if "feature-max-indirect-segments" in present in the backend * xenbus info. The "feature-max-indirect-segments" node contains the maximum * number of segments allowed by the backend per request. If the node is * present, the frontend might use blkif_request_indirect structs in order to * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The * maximum number of indirect segments is fixed by the backend, but the * frontend can issue requests with any number of indirect segments as long as * it's less than the number provided by the backend. The indirect_grefs field * in blkif_request_indirect should be filled by the frontend with the * grant references of the pages that are holding the indirect segments. * This pages are filled with an array of blkif_request_segment_aligned * that hold the information about the segments. The number of indirect * pages to use is determined by the maximum number of segments * a indirect request contains. Every indirect page can contain a maximum * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), * so to calculate the number of indirect pages to use we have to do * ceil(indirect_segments/512). * * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* * create the "feature-max-indirect-segments" node! */ #define BLKIF_OP_INDIRECT 6 /* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 struct blkif_request_segment_aligned { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ } __attribute__((__packed__)); struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; } __attribute__((__packed__)); struct blkif_request_discard { uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ blkif_vdev_t _pad1; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number; uint64_t nr_sectors; uint8_t _pad3; } __attribute__((__packed__)); struct blkif_request_other { uint8_t _pad1; blkif_vdev_t _pad2; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ } __attribute__((__packed__)); struct blkif_request_indirect { uint8_t indirect_op; uint16_t nr_segments; #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ #endif uint64_t id; blkif_sector_t sector_number; blkif_vdev_t handle; uint16_t _pad2; grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; #ifndef CONFIG_X86_32 uint32_t _pad3; /* make it 64 byte aligned */ #else uint64_t _pad3; /* make it 64 byte aligned */ #endif } __attribute__((__packed__)); struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_request_rw rw; struct blkif_request_discard discard; struct blkif_request_other other; struct blkif_request_indirect indirect; } u; } __attribute__((__packed__)); typedef struct blkif_request blkif_request_t; struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_response blkif_response_t; /* * STATUS RETURN CODES. */ /* Operation not supported (only happens on barrier writes). */ #define BLKIF_RSP_EOPNOTSUPP -2 /* Operation failed for some unspecified reason (-EIO). */ #define BLKIF_RSP_ERROR -1 /* Operation completed successfully. */ #define BLKIF_RSP_OKAY 0 /* * Generate blkif ring structures and types. */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 /* Xen-defined major numbers for virtual disks, they look strangely * familiar */ #define XEN_IDE0_MAJOR 3 #define XEN_IDE1_MAJOR 22 #define XEN_SCSI_DISK0_MAJOR 8 #define XEN_SCSI_DISK1_MAJOR 65 #define XEN_SCSI_DISK2_MAJOR 66 #define XEN_SCSI_DISK3_MAJOR 67 #define XEN_SCSI_DISK4_MAJOR 68 #define XEN_SCSI_DISK5_MAJOR 69 #define XEN_SCSI_DISK6_MAJOR 70 #define XEN_SCSI_DISK7_MAJOR 71 #define XEN_SCSI_DISK8_MAJOR 128 #define XEN_SCSI_DISK9_MAJOR 129 #define XEN_SCSI_DISK10_MAJOR 130 #define XEN_SCSI_DISK11_MAJOR 131 #define XEN_SCSI_DISK12_MAJOR 132 #define XEN_SCSI_DISK13_MAJOR 133 #define XEN_SCSI_DISK14_MAJOR 134 #define XEN_SCSI_DISK15_MAJOR 135 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/callback.h000066400000000000000000000074531314037446600322740ustar00rootroot00000000000000/****************************************************************************** * callback.h * * Register guest OS callbacks with Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell */ #ifndef __XEN_PUBLIC_CALLBACK_H__ #define __XEN_PUBLIC_CALLBACK_H__ #include "xen.h" /* * Prototype for this hypercall is: * long callback_op(int cmd, void *extra_args) * @cmd == CALLBACKOP_??? (callback operation). * @extra_args == Operation-specific extra arguments (NULL if none). */ /* ia64, x86: Callback for event delivery. */ #define CALLBACKTYPE_event 0 /* x86: Failsafe callback when guest state cannot be restored by Xen. */ #define CALLBACKTYPE_failsafe 1 /* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */ #define CALLBACKTYPE_syscall 2 /* * x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel * feature is enabled. Do not use this callback type in new code. */ #define CALLBACKTYPE_sysenter_deprecated 3 /* x86: Callback for NMI delivery. */ #define CALLBACKTYPE_nmi 4 /* * x86: sysenter is only available as follows: * - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled * - 64-bit hypervisor: 32-bit guest applications on Intel CPUs * ('32-on-32-on-64', '32-on-64-on-64') * [nb. also 64-bit guest applications on Intel CPUs * ('64-on-64-on-64'), but syscall is preferred] */ #define CALLBACKTYPE_sysenter 5 /* * x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs * ('32-on-32-on-64', '32-on-64-on-64') */ #define CALLBACKTYPE_syscall32 7 /* * Disable event deliver during callback? This flag is ignored for event and * NMI callbacks: event delivery is unconditionally disabled. */ #define _CALLBACKF_mask_events 0 #define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events) /* * Register a callback. */ #define CALLBACKOP_register 0 struct callback_register { uint16_t type; uint16_t flags; xen_callback_t address; }; typedef struct callback_register callback_register_t; DEFINE_XEN_GUEST_HANDLE(callback_register_t); /* * Unregister a callback. * * Not all callbacks can be unregistered. -EINVAL will be returned if * you attempt to unregister such a callback. */ #define CALLBACKOP_unregister 1 struct callback_unregister { uint16_t type; uint16_t _unused; }; typedef struct callback_unregister callback_unregister_t; DEFINE_XEN_GUEST_HANDLE(callback_unregister_t); #if __XEN_INTERFACE_VERSION__ < 0x00030207 #undef CALLBACKTYPE_sysenter #define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated #endif #endif /* __XEN_PUBLIC_CALLBACK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/dom0_ops.h000066400000000000000000000077071314037446600322620ustar00rootroot00000000000000/****************************************************************************** * dom0_ops.h * * Process command requests from domain-0 guest OS. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOM0_OPS_H__ #define __XEN_PUBLIC_DOM0_OPS_H__ #include "xen.h" #include "platform.h" #if __XEN_INTERFACE_VERSION__ >= 0x00030204 #error "dom0_ops.h is a compatibility interface only" #endif #define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION #define DOM0_SETTIME XENPF_settime #define dom0_settime xenpf_settime #define dom0_settime_t xenpf_settime_t #define DOM0_ADD_MEMTYPE XENPF_add_memtype #define dom0_add_memtype xenpf_add_memtype #define dom0_add_memtype_t xenpf_add_memtype_t #define DOM0_DEL_MEMTYPE XENPF_del_memtype #define dom0_del_memtype xenpf_del_memtype #define dom0_del_memtype_t xenpf_del_memtype_t #define DOM0_READ_MEMTYPE XENPF_read_memtype #define dom0_read_memtype xenpf_read_memtype #define dom0_read_memtype_t xenpf_read_memtype_t #define DOM0_MICROCODE XENPF_microcode_update #define dom0_microcode xenpf_microcode_update #define dom0_microcode_t xenpf_microcode_update_t #define DOM0_PLATFORM_QUIRK XENPF_platform_quirk #define dom0_platform_quirk xenpf_platform_quirk #define dom0_platform_quirk_t xenpf_platform_quirk_t typedef uint64_t cpumap_t; /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_MSR 15 struct dom0_msr { /* IN variables. */ uint32_t write; cpumap_t cpu_mask; uint32_t msr; uint32_t in1; uint32_t in2; /* OUT variables. */ uint32_t out1; uint32_t out2; }; typedef struct dom0_msr dom0_msr_t; DEFINE_XEN_GUEST_HANDLE(dom0_msr_t); /* Unsupported legacy operation -- defined for API compatibility. */ #define DOM0_PHYSICAL_MEMORY_MAP 40 struct dom0_memory_map_entry { uint64_t start, end; uint32_t flags; /* reserved */ uint8_t is_ram; }; typedef struct dom0_memory_map_entry dom0_memory_map_entry_t; DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t); struct dom0_op { uint32_t cmd; uint32_t interface_version; /* DOM0_INTERFACE_VERSION */ union { struct dom0_msr msr; struct dom0_settime settime; struct dom0_add_memtype add_memtype; struct dom0_del_memtype del_memtype; struct dom0_read_memtype read_memtype; struct dom0_microcode microcode; struct dom0_platform_quirk platform_quirk; struct dom0_memory_map_entry physical_memory_map; uint8_t pad[128]; } u; }; typedef struct dom0_op dom0_op_t; DEFINE_XEN_GUEST_HANDLE(dom0_op_t); #endif /* __XEN_PUBLIC_DOM0_OPS_H__ */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/domctl.h000066400000000000000000001142211314037446600320120ustar00rootroot00000000000000/****************************************************************************** * domctl.h * * Domain management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2003, B Dragovic * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_DOMCTL_H__ #define __XEN_PUBLIC_DOMCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "domctl operations are intended for use by node control tools only" #endif #include "xen.h" #include "grant_table.h" #include "hvm/save.h" #define XEN_DOMCTL_INTERFACE_VERSION 0x00000009 /* * NB. xen_domctl.domain is an IN/OUT parameter for this operation. * If it is specified as zero, an id is auto-allocated and returned. */ /* XEN_DOMCTL_createdomain */ struct xen_domctl_createdomain { /* IN parameters */ uint32_t ssidref; xen_domain_handle_t handle; /* Is this an HVM guest (as opposed to a PVH or PV guest)? */ #define _XEN_DOMCTL_CDF_hvm_guest 0 #define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest) /* Use hardware-assisted paging if available? */ #define _XEN_DOMCTL_CDF_hap 1 #define XEN_DOMCTL_CDF_hap (1U<<_XEN_DOMCTL_CDF_hap) /* Should domain memory integrity be verifed by tboot during Sx? */ #define _XEN_DOMCTL_CDF_s3_integrity 2 #define XEN_DOMCTL_CDF_s3_integrity (1U<<_XEN_DOMCTL_CDF_s3_integrity) /* Disable out-of-sync shadow page tables? */ #define _XEN_DOMCTL_CDF_oos_off 3 #define XEN_DOMCTL_CDF_oos_off (1U<<_XEN_DOMCTL_CDF_oos_off) /* Is this a PVH guest (as opposed to an HVM or PV guest)? */ #define _XEN_DOMCTL_CDF_pvh_guest 4 #define XEN_DOMCTL_CDF_pvh_guest (1U<<_XEN_DOMCTL_CDF_pvh_guest) uint32_t flags; }; typedef struct xen_domctl_createdomain xen_domctl_createdomain_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t); /* XEN_DOMCTL_getdomaininfo */ struct xen_domctl_getdomaininfo { /* OUT variables. */ domid_t domain; /* Also echoed in domctl.domain */ /* Domain is scheduled to die. */ #define _XEN_DOMINF_dying 0 #define XEN_DOMINF_dying (1U<<_XEN_DOMINF_dying) /* Domain is an HVM guest (as opposed to a PV guest). */ #define _XEN_DOMINF_hvm_guest 1 #define XEN_DOMINF_hvm_guest (1U<<_XEN_DOMINF_hvm_guest) /* The guest OS has shut down. */ #define _XEN_DOMINF_shutdown 2 #define XEN_DOMINF_shutdown (1U<<_XEN_DOMINF_shutdown) /* Currently paused by control software. */ #define _XEN_DOMINF_paused 3 #define XEN_DOMINF_paused (1U<<_XEN_DOMINF_paused) /* Currently blocked pending an event. */ #define _XEN_DOMINF_blocked 4 #define XEN_DOMINF_blocked (1U<<_XEN_DOMINF_blocked) /* Domain is currently running. */ #define _XEN_DOMINF_running 5 #define XEN_DOMINF_running (1U<<_XEN_DOMINF_running) /* Being debugged. */ #define _XEN_DOMINF_debugged 6 #define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged) /* domain is PVH */ #define _XEN_DOMINF_pvh_guest 7 #define XEN_DOMINF_pvh_guest (1U<<_XEN_DOMINF_pvh_guest) /* XEN_DOMINF_shutdown guest-supplied code. */ #define XEN_DOMINF_shutdownmask 255 #define XEN_DOMINF_shutdownshift 16 uint32_t flags; /* XEN_DOMINF_* */ uint64_aligned_t tot_pages; uint64_aligned_t max_pages; uint64_aligned_t outstanding_pages; uint64_aligned_t shr_pages; uint64_aligned_t paged_pages; uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */ uint64_aligned_t cpu_time; uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */ uint32_t ssidref; xen_domain_handle_t handle; uint32_t cpupool; }; typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t); /* XEN_DOMCTL_getmemlist */ struct xen_domctl_getmemlist { /* IN variables. */ /* Max entries to write to output buffer. */ uint64_aligned_t max_pfns; /* Start index in guest's page list. */ uint64_aligned_t start_pfn; XEN_GUEST_HANDLE_64(uint64) buffer; /* OUT variables. */ uint64_aligned_t num_pfns; }; typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t); /* XEN_DOMCTL_getpageframeinfo */ #define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28 #define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28) #define XEN_DOMCTL_PFINFO_L1TAB (0x1U<<28) #define XEN_DOMCTL_PFINFO_L2TAB (0x2U<<28) #define XEN_DOMCTL_PFINFO_L3TAB (0x3U<<28) #define XEN_DOMCTL_PFINFO_L4TAB (0x4U<<28) #define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28) #define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31) #define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */ #define XEN_DOMCTL_PFINFO_XALLOC (0xeU<<28) /* allocate-only page */ #define XEN_DOMCTL_PFINFO_BROKEN (0xdU<<28) /* broken page */ #define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28) struct xen_domctl_getpageframeinfo { /* IN variables. */ uint64_aligned_t gmfn; /* GMFN to query */ /* OUT variables. */ /* Is the page PINNED to a type? */ uint32_t type; /* see above type defs */ }; typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t); /* XEN_DOMCTL_getpageframeinfo2 */ struct xen_domctl_getpageframeinfo2 { /* IN variables. */ uint64_aligned_t num; /* IN/OUT variables. */ XEN_GUEST_HANDLE_64(uint32) array; }; typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t); /* XEN_DOMCTL_getpageframeinfo3 */ struct xen_domctl_getpageframeinfo3 { /* IN variables. */ uint64_aligned_t num; /* IN/OUT variables. */ XEN_GUEST_HANDLE_64(xen_pfn_t) array; }; /* * Control shadow pagetables operation */ /* XEN_DOMCTL_shadow_op */ /* Disable shadow mode. */ #define XEN_DOMCTL_SHADOW_OP_OFF 0 /* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */ #define XEN_DOMCTL_SHADOW_OP_ENABLE 32 /* Log-dirty bitmap operations. */ /* Return the bitmap and clean internal copy for next round. */ #define XEN_DOMCTL_SHADOW_OP_CLEAN 11 /* Return the bitmap but do not modify internal copy. */ #define XEN_DOMCTL_SHADOW_OP_PEEK 12 /* Memory allocation accessors. */ #define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30 #define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31 /* Legacy enable operations. */ /* Equiv. to ENABLE with no mode flags. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST 1 /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY 2 /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */ #define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE 3 /* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */ /* * Shadow pagetables are refcounted: guest does not use explicit mmu * operations nor write-protect its pagetables. */ #define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT (1 << 1) /* * Log pages in a bitmap as they are dirtied. * Used for live relocation to determine which pages must be re-sent. */ #define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2) /* * Automatically translate GPFNs into MFNs. */ #define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3) /* * Xen does not steal virtual address space from the guest. * Requires HVM support. */ #define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4) struct xen_domctl_shadow_op_stats { uint32_t fault_count; uint32_t dirty_count; }; typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t); struct xen_domctl_shadow_op { /* IN variables. */ uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */ /* OP_ENABLE */ uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */ /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */ uint32_t mb; /* Shadow memory allocation in MB */ /* OP_PEEK / OP_CLEAN */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */ struct xen_domctl_shadow_op_stats stats; }; typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t); /* XEN_DOMCTL_max_mem */ struct xen_domctl_max_mem { /* IN variables. */ uint64_aligned_t max_memkb; }; typedef struct xen_domctl_max_mem xen_domctl_max_mem_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t); /* XEN_DOMCTL_setvcpucontext */ /* XEN_DOMCTL_getvcpucontext */ struct xen_domctl_vcpucontext { uint32_t vcpu; /* IN */ XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */ }; typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t); /* XEN_DOMCTL_getvcpuinfo */ struct xen_domctl_getvcpuinfo { /* IN variables. */ uint32_t vcpu; /* OUT variables. */ uint8_t online; /* currently online (not hotplugged)? */ uint8_t blocked; /* blocked waiting for an event? */ uint8_t running; /* currently scheduled on its CPU? */ uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */ uint32_t cpu; /* current mapping */ }; typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t); /* Get/set the NUMA node(s) with which the guest has affinity with. */ /* XEN_DOMCTL_setnodeaffinity */ /* XEN_DOMCTL_getnodeaffinity */ struct xen_domctl_nodeaffinity { struct xenctl_bitmap nodemap;/* IN */ }; typedef struct xen_domctl_nodeaffinity xen_domctl_nodeaffinity_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_nodeaffinity_t); /* Get/set which physical cpus a vcpu can execute on. */ /* XEN_DOMCTL_setvcpuaffinity */ /* XEN_DOMCTL_getvcpuaffinity */ struct xen_domctl_vcpuaffinity { uint32_t vcpu; /* IN */ struct xenctl_bitmap cpumap; /* IN/OUT */ }; typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t); /* XEN_DOMCTL_max_vcpus */ struct xen_domctl_max_vcpus { uint32_t max; /* maximum number of vcpus */ }; typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t); /* XEN_DOMCTL_scheduler_op */ /* Scheduler types. */ #define XEN_SCHEDULER_SEDF 4 #define XEN_SCHEDULER_CREDIT 5 #define XEN_SCHEDULER_CREDIT2 6 #define XEN_SCHEDULER_ARINC653 7 /* Set or get info? */ #define XEN_DOMCTL_SCHEDOP_putinfo 0 #define XEN_DOMCTL_SCHEDOP_getinfo 1 struct xen_domctl_scheduler_op { uint32_t sched_id; /* XEN_SCHEDULER_* */ uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */ union { struct xen_domctl_sched_sedf { uint64_aligned_t period; uint64_aligned_t slice; uint64_aligned_t latency; uint32_t extratime; uint32_t weight; } sedf; struct xen_domctl_sched_credit { uint16_t weight; uint16_t cap; } credit; struct xen_domctl_sched_credit2 { uint16_t weight; } credit2; } u; }; typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t); /* XEN_DOMCTL_setdomainhandle */ struct xen_domctl_setdomainhandle { xen_domain_handle_t handle; }; typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t); /* XEN_DOMCTL_setdebugging */ struct xen_domctl_setdebugging { uint8_t enable; }; typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t); /* XEN_DOMCTL_irq_permission */ struct xen_domctl_irq_permission { uint8_t pirq; uint8_t allow_access; /* flag to specify enable/disable of IRQ access */ }; typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t); /* XEN_DOMCTL_iomem_permission */ struct xen_domctl_iomem_permission { uint64_aligned_t first_mfn;/* first page (physical page number) in range */ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */ }; typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t); /* XEN_DOMCTL_ioport_permission */ struct xen_domctl_ioport_permission { uint32_t first_port; /* first port int range */ uint32_t nr_ports; /* size of port range */ uint8_t allow_access; /* allow or deny access to range? */ }; typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t); /* XEN_DOMCTL_hypercall_init */ struct xen_domctl_hypercall_init { uint64_aligned_t gmfn; /* GMFN to be initialised */ }; typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t); /* XEN_DOMCTL_arch_setup */ #define _XEN_DOMAINSETUP_hvm_guest 0 #define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest) #define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */ #define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query) #define _XEN_DOMAINSETUP_sioemu_guest 2 #define XEN_DOMAINSETUP_sioemu_guest (1UL<<_XEN_DOMAINSETUP_sioemu_guest) typedef struct xen_domctl_arch_setup { uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */ #ifdef __ia64__ uint64_aligned_t bp; /* mpaddr of boot param area */ uint64_aligned_t maxmem; /* Highest memory address for MDT. */ uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */ int8_t vhpt_size_log2; /* Log2 of VHPT size. */ #endif } xen_domctl_arch_setup_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t); /* XEN_DOMCTL_settimeoffset */ struct xen_domctl_settimeoffset { int32_t time_offset_seconds; /* applied to domain wallclock time */ }; typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t); /* XEN_DOMCTL_gethvmcontext */ /* XEN_DOMCTL_sethvmcontext */ typedef struct xen_domctl_hvmcontext { uint32_t size; /* IN/OUT: size of buffer / bytes filled */ XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call * gethvmcontext with NULL * buffer to get size req'd */ } xen_domctl_hvmcontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t); /* XEN_DOMCTL_set_address_size */ /* XEN_DOMCTL_get_address_size */ typedef struct xen_domctl_address_size { uint32_t size; } xen_domctl_address_size_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t); /* XEN_DOMCTL_real_mode_area */ struct xen_domctl_real_mode_area { uint32_t log; /* log2 of Real Mode Area size */ }; typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t); /* XEN_DOMCTL_sendtrigger */ #define XEN_DOMCTL_SENDTRIGGER_NMI 0 #define XEN_DOMCTL_SENDTRIGGER_RESET 1 #define XEN_DOMCTL_SENDTRIGGER_INIT 2 #define XEN_DOMCTL_SENDTRIGGER_POWER 3 #define XEN_DOMCTL_SENDTRIGGER_SLEEP 4 struct xen_domctl_sendtrigger { uint32_t trigger; /* IN */ uint32_t vcpu; /* IN */ }; typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t); /* Assign PCI device to HVM guest. Sets up IOMMU structures. */ /* XEN_DOMCTL_assign_device */ /* XEN_DOMCTL_test_assign_device */ /* XEN_DOMCTL_deassign_device */ struct xen_domctl_assign_device { uint32_t machine_sbdf; /* machine PCI ID of assigned device */ }; typedef struct xen_domctl_assign_device xen_domctl_assign_device_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t); /* Retrieve sibling devices infomation of machine_sbdf */ /* XEN_DOMCTL_get_device_group */ struct xen_domctl_get_device_group { uint32_t machine_sbdf; /* IN */ uint32_t max_sdevs; /* IN */ uint32_t num_sdevs; /* OUT */ XEN_GUEST_HANDLE_64(uint32) sdev_array; /* OUT */ }; typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t); /* Pass-through interrupts: bind real irq -> hvm devfn. */ /* XEN_DOMCTL_bind_pt_irq */ /* XEN_DOMCTL_unbind_pt_irq */ typedef enum pt_irq_type_e { PT_IRQ_TYPE_PCI, PT_IRQ_TYPE_ISA, PT_IRQ_TYPE_MSI, PT_IRQ_TYPE_MSI_TRANSLATE, } pt_irq_type_t; struct xen_domctl_bind_pt_irq { uint32_t machine_irq; pt_irq_type_t irq_type; uint32_t hvm_domid; union { struct { uint8_t isa_irq; } isa; struct { uint8_t bus; uint8_t device; uint8_t intx; } pci; struct { uint8_t gvec; uint32_t gflags; uint64_aligned_t gtable; } msi; } u; }; typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t); /* Bind machine I/O address range -> HVM address range. */ /* XEN_DOMCTL_memory_mapping */ #define DPCI_ADD_MAPPING 1 #define DPCI_REMOVE_MAPPING 0 struct xen_domctl_memory_mapping { uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */ uint64_aligned_t first_mfn; /* first page (machine page) in range */ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */ uint32_t add_mapping; /* add or remove mapping */ uint32_t padding; /* padding for 64-bit aligned structure */ }; typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t); /* Bind machine I/O port range -> HVM I/O port range. */ /* XEN_DOMCTL_ioport_mapping */ struct xen_domctl_ioport_mapping { uint32_t first_gport; /* first guest IO port*/ uint32_t first_mport; /* first machine IO port */ uint32_t nr_ports; /* size of port range */ uint32_t add_mapping; /* add or remove mapping */ }; typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t); /* * Pin caching type of RAM space for x86 HVM domU. */ /* XEN_DOMCTL_pin_mem_cacheattr */ /* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */ #define XEN_DOMCTL_MEM_CACHEATTR_UC 0 #define XEN_DOMCTL_MEM_CACHEATTR_WC 1 #define XEN_DOMCTL_MEM_CACHEATTR_WT 4 #define XEN_DOMCTL_MEM_CACHEATTR_WP 5 #define XEN_DOMCTL_MEM_CACHEATTR_WB 6 #define XEN_DOMCTL_MEM_CACHEATTR_UCM 7 struct xen_domctl_pin_mem_cacheattr { uint64_aligned_t start, end; uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */ }; typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t); /* XEN_DOMCTL_set_ext_vcpucontext */ /* XEN_DOMCTL_get_ext_vcpucontext */ struct xen_domctl_ext_vcpucontext { /* IN: VCPU that this call applies to. */ uint32_t vcpu; /* * SET: Size of struct (IN) * GET: Size of struct (OUT, up to 128 bytes) */ uint32_t size; #if defined(__i386__) || defined(__x86_64__) /* SYSCALL from 32-bit mode and SYSENTER callback information. */ /* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */ uint64_aligned_t syscall32_callback_eip; uint64_aligned_t sysenter_callback_eip; uint16_t syscall32_callback_cs; uint16_t sysenter_callback_cs; uint8_t syscall32_disables_events; uint8_t sysenter_disables_events; #if defined(__GNUC__) union { uint64_aligned_t mcg_cap; struct hvm_vmce_vcpu vmce; }; #else struct hvm_vmce_vcpu vmce; #endif #endif }; typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t); /* * Set optimizaton features for a domain */ /* XEN_DOMCTL_set_opt_feature */ struct xen_domctl_set_opt_feature { #if defined(__ia64__) struct xen_ia64_opt_feature optf; #else /* Make struct non-empty: do not depend on this field name! */ uint64_t dummy; #endif }; typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t); /* * Set the target domain for a domain */ /* XEN_DOMCTL_set_target */ struct xen_domctl_set_target { domid_t target; }; typedef struct xen_domctl_set_target xen_domctl_set_target_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t); #if defined(__i386__) || defined(__x86_64__) # define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF /* XEN_DOMCTL_set_cpuid */ struct xen_domctl_cpuid { uint32_t input[2]; uint32_t eax; uint32_t ebx; uint32_t ecx; uint32_t edx; }; typedef struct xen_domctl_cpuid xen_domctl_cpuid_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t); #endif /* * Arranges that if the domain suspends (specifically, if it shuts * down with code SHUTDOWN_suspend), this event channel will be * notified. * * This is _instead of_ the usual notification to the global * VIRQ_DOM_EXC. (In most systems that pirq is owned by xenstored.) * * Only one subscription per domain is possible. Last subscriber * wins; others are silently displaced. * * NB that contrary to the rather general name, it only applies to * domain shutdown with code suspend. Shutdown for other reasons * (including crash), and domain death, are notified to VIRQ_DOM_EXC * regardless. */ /* XEN_DOMCTL_subscribe */ struct xen_domctl_subscribe { uint32_t port; /* IN */ }; typedef struct xen_domctl_subscribe xen_domctl_subscribe_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t); /* * Define the maximum machine address size which should be allocated * to a guest. */ /* XEN_DOMCTL_set_machine_address_size */ /* XEN_DOMCTL_get_machine_address_size */ /* * Do not inject spurious page faults into this domain. */ /* XEN_DOMCTL_suppress_spurious_page_faults */ /* XEN_DOMCTL_debug_op */ #define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF 0 #define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON 1 struct xen_domctl_debug_op { uint32_t op; /* IN */ uint32_t vcpu; /* IN */ }; typedef struct xen_domctl_debug_op xen_domctl_debug_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_debug_op_t); /* * Request a particular record from the HVM context */ /* XEN_DOMCTL_gethvmcontext_partial */ typedef struct xen_domctl_hvmcontext_partial { uint32_t type; /* IN: Type of record required */ uint32_t instance; /* IN: Instance of that type */ XEN_GUEST_HANDLE_64(uint8) buffer; /* OUT: buffer to write record into */ } xen_domctl_hvmcontext_partial_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_partial_t); /* XEN_DOMCTL_disable_migrate */ typedef struct xen_domctl_disable_migrate { uint32_t disable; /* IN: 1: disable migration and restore */ } xen_domctl_disable_migrate_t; /* XEN_DOMCTL_gettscinfo */ /* XEN_DOMCTL_settscinfo */ struct xen_guest_tsc_info { uint32_t tsc_mode; uint32_t gtsc_khz; uint32_t incarnation; uint32_t pad; uint64_aligned_t elapsed_nsec; }; typedef struct xen_guest_tsc_info xen_guest_tsc_info_t; DEFINE_XEN_GUEST_HANDLE(xen_guest_tsc_info_t); typedef struct xen_domctl_tsc_info { XEN_GUEST_HANDLE_64(xen_guest_tsc_info_t) out_info; /* OUT */ xen_guest_tsc_info_t info; /* IN */ } xen_domctl_tsc_info_t; /* XEN_DOMCTL_gdbsx_guestmemio guest mem io */ struct xen_domctl_gdbsx_memio { /* IN */ uint64_aligned_t pgd3val;/* optional: init_mm.pgd[3] value */ uint64_aligned_t gva; /* guest virtual address */ uint64_aligned_t uva; /* user buffer virtual address */ uint32_t len; /* number of bytes to read/write */ uint8_t gwr; /* 0 = read from guest. 1 = write to guest */ /* OUT */ uint32_t remain; /* bytes remaining to be copied */ }; /* XEN_DOMCTL_gdbsx_pausevcpu */ /* XEN_DOMCTL_gdbsx_unpausevcpu */ struct xen_domctl_gdbsx_pauseunp_vcpu { /* pause/unpause a vcpu */ uint32_t vcpu; /* which vcpu */ }; /* XEN_DOMCTL_gdbsx_domstatus */ struct xen_domctl_gdbsx_domstatus { /* OUT */ uint8_t paused; /* is the domain paused */ uint32_t vcpu_id; /* any vcpu in an event? */ uint32_t vcpu_ev; /* if yes, what event? */ }; /* * Memory event operations */ /* XEN_DOMCTL_mem_event_op */ /* * Domain memory paging * Page memory in and out. * Domctl interface to set up and tear down the * pager<->hypervisor interface. Use XENMEM_paging_op* * to perform per-page operations. * * The XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE domctl returns several * non-standard error codes to indicate why paging could not be enabled: * ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest * EMLINK - guest has iommu passthrough enabled * EXDEV - guest has PoD enabled * EBUSY - guest has or had paging enabled, ring buffer still active */ #define XEN_DOMCTL_MEM_EVENT_OP_PAGING 1 #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE 0 #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE 1 /* * Access permissions. * * As with paging, use the domctl for teardown/setup of the * helper<->hypervisor interface. * * There are HVM hypercalls to set the per-page access permissions of every * page in a domain. When one of these permissions--independent, read, * write, and execute--is violated, the VCPU is paused and a memory event * is sent with what happened. (See public/mem_event.h) . * * The memory event handler can then resume the VCPU and redo the access * with a XENMEM_access_op_resume hypercall. * * The XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE domctl returns several * non-standard error codes to indicate why access could not be enabled: * ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest * EBUSY - guest has or had access enabled, ring buffer still active */ #define XEN_DOMCTL_MEM_EVENT_OP_ACCESS 2 #define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE 0 #define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE 1 /* * Sharing ENOMEM helper. * * As with paging, use the domctl for teardown/setup of the * helper<->hypervisor interface. * * If setup, this ring is used to communicate failed allocations * in the unshare path. XENMEM_sharing_op_resume is used to wake up * vcpus that could not unshare. * * Note that shring can be turned on (as per the domctl below) * *without* this ring being setup. */ #define XEN_DOMCTL_MEM_EVENT_OP_SHARING 3 #define XEN_DOMCTL_MEM_EVENT_OP_SHARING_ENABLE 0 #define XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE 1 /* Use for teardown/setup of helper<->hypervisor interface for paging, * access and sharing.*/ struct xen_domctl_mem_event_op { uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_*_* */ uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */ uint32_t port; /* OUT: event channel for ring */ }; typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t); /* * Memory sharing operations */ /* XEN_DOMCTL_mem_sharing_op. * The CONTROL sub-domctl is used for bringup/teardown. */ #define XEN_DOMCTL_MEM_SHARING_CONTROL 0 struct xen_domctl_mem_sharing_op { uint8_t op; /* XEN_DOMCTL_MEM_SHARING_* */ union { uint8_t enable; /* CONTROL */ } u; }; typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t); struct xen_domctl_audit_p2m { /* OUT error counts */ uint64_t orphans; uint64_t m2p_bad; uint64_t p2m_bad; }; typedef struct xen_domctl_audit_p2m xen_domctl_audit_p2m_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_audit_p2m_t); struct xen_domctl_set_virq_handler { uint32_t virq; /* IN */ }; typedef struct xen_domctl_set_virq_handler xen_domctl_set_virq_handler_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_virq_handler_t); #if defined(__i386__) || defined(__x86_64__) /* XEN_DOMCTL_setvcpuextstate */ /* XEN_DOMCTL_getvcpuextstate */ struct xen_domctl_vcpuextstate { /* IN: VCPU that this call applies to. */ uint32_t vcpu; /* * SET: xfeature support mask of struct (IN) * GET: xfeature support mask of struct (IN/OUT) * xfeature mask is served as identifications of the saving format * so that compatible CPUs can have a check on format to decide * whether it can restore. */ uint64_aligned_t xfeature_mask; /* * SET: Size of struct (IN) * GET: Size of struct (IN/OUT) */ uint64_aligned_t size; XEN_GUEST_HANDLE_64(uint64) buffer; }; typedef struct xen_domctl_vcpuextstate xen_domctl_vcpuextstate_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuextstate_t); #endif /* XEN_DOMCTL_set_access_required: sets whether a memory event listener * must be present to handle page access events: if false, the page * access will revert to full permissions if no one is listening; * */ struct xen_domctl_set_access_required { uint8_t access_required; }; typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t); struct xen_domctl_set_broken_page_p2m { uint64_aligned_t pfn; }; typedef struct xen_domctl_set_broken_page_p2m xen_domctl_set_broken_page_p2m_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t); /* * XEN_DOMCTL_set_max_evtchn: sets the maximum event channel port * number the guest may use. Use this limit the amount of resources * (global mapping space, xenheap) a guest may use for event channels. */ struct xen_domctl_set_max_evtchn { uint32_t max_port; }; typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t); /* * ARM: Clean and invalidate caches associated with given region of * guest memory. */ struct xen_domctl_cacheflush { /* IN: page range to flush. */ xen_pfn_t start_pfn, nr_pfns; }; typedef struct xen_domctl_cacheflush xen_domctl_cacheflush_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_cacheflush_t); struct xen_domctl { uint32_t cmd; #define XEN_DOMCTL_createdomain 1 #define XEN_DOMCTL_destroydomain 2 #define XEN_DOMCTL_pausedomain 3 #define XEN_DOMCTL_unpausedomain 4 #define XEN_DOMCTL_getdomaininfo 5 #define XEN_DOMCTL_getmemlist 6 #define XEN_DOMCTL_getpageframeinfo 7 #define XEN_DOMCTL_getpageframeinfo2 8 #define XEN_DOMCTL_setvcpuaffinity 9 #define XEN_DOMCTL_shadow_op 10 #define XEN_DOMCTL_max_mem 11 #define XEN_DOMCTL_setvcpucontext 12 #define XEN_DOMCTL_getvcpucontext 13 #define XEN_DOMCTL_getvcpuinfo 14 #define XEN_DOMCTL_max_vcpus 15 #define XEN_DOMCTL_scheduler_op 16 #define XEN_DOMCTL_setdomainhandle 17 #define XEN_DOMCTL_setdebugging 18 #define XEN_DOMCTL_irq_permission 19 #define XEN_DOMCTL_iomem_permission 20 #define XEN_DOMCTL_ioport_permission 21 #define XEN_DOMCTL_hypercall_init 22 #define XEN_DOMCTL_arch_setup 23 #define XEN_DOMCTL_settimeoffset 24 #define XEN_DOMCTL_getvcpuaffinity 25 #define XEN_DOMCTL_real_mode_area 26 #define XEN_DOMCTL_resumedomain 27 #define XEN_DOMCTL_sendtrigger 28 #define XEN_DOMCTL_subscribe 29 #define XEN_DOMCTL_gethvmcontext 33 #define XEN_DOMCTL_sethvmcontext 34 #define XEN_DOMCTL_set_address_size 35 #define XEN_DOMCTL_get_address_size 36 #define XEN_DOMCTL_assign_device 37 #define XEN_DOMCTL_bind_pt_irq 38 #define XEN_DOMCTL_memory_mapping 39 #define XEN_DOMCTL_ioport_mapping 40 #define XEN_DOMCTL_pin_mem_cacheattr 41 #define XEN_DOMCTL_set_ext_vcpucontext 42 #define XEN_DOMCTL_get_ext_vcpucontext 43 #define XEN_DOMCTL_set_opt_feature 44 #define XEN_DOMCTL_test_assign_device 45 #define XEN_DOMCTL_set_target 46 #define XEN_DOMCTL_deassign_device 47 #define XEN_DOMCTL_unbind_pt_irq 48 #define XEN_DOMCTL_set_cpuid 49 #define XEN_DOMCTL_get_device_group 50 #define XEN_DOMCTL_set_machine_address_size 51 #define XEN_DOMCTL_get_machine_address_size 52 #define XEN_DOMCTL_suppress_spurious_page_faults 53 #define XEN_DOMCTL_debug_op 54 #define XEN_DOMCTL_gethvmcontext_partial 55 #define XEN_DOMCTL_mem_event_op 56 #define XEN_DOMCTL_mem_sharing_op 57 #define XEN_DOMCTL_disable_migrate 58 #define XEN_DOMCTL_gettscinfo 59 #define XEN_DOMCTL_settscinfo 60 #define XEN_DOMCTL_getpageframeinfo3 61 #define XEN_DOMCTL_setvcpuextstate 62 #define XEN_DOMCTL_getvcpuextstate 63 #define XEN_DOMCTL_set_access_required 64 #define XEN_DOMCTL_audit_p2m 65 #define XEN_DOMCTL_set_virq_handler 66 #define XEN_DOMCTL_set_broken_page_p2m 67 #define XEN_DOMCTL_setnodeaffinity 68 #define XEN_DOMCTL_getnodeaffinity 69 #define XEN_DOMCTL_set_max_evtchn 70 #define XEN_DOMCTL_cacheflush 71 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 #define XEN_DOMCTL_gdbsx_domstatus 1003 uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */ domid_t domain; union { struct xen_domctl_createdomain createdomain; struct xen_domctl_getdomaininfo getdomaininfo; struct xen_domctl_getmemlist getmemlist; struct xen_domctl_getpageframeinfo getpageframeinfo; struct xen_domctl_getpageframeinfo2 getpageframeinfo2; struct xen_domctl_getpageframeinfo3 getpageframeinfo3; struct xen_domctl_nodeaffinity nodeaffinity; struct xen_domctl_vcpuaffinity vcpuaffinity; struct xen_domctl_shadow_op shadow_op; struct xen_domctl_max_mem max_mem; struct xen_domctl_vcpucontext vcpucontext; struct xen_domctl_getvcpuinfo getvcpuinfo; struct xen_domctl_max_vcpus max_vcpus; struct xen_domctl_scheduler_op scheduler_op; struct xen_domctl_setdomainhandle setdomainhandle; struct xen_domctl_setdebugging setdebugging; struct xen_domctl_irq_permission irq_permission; struct xen_domctl_iomem_permission iomem_permission; struct xen_domctl_ioport_permission ioport_permission; struct xen_domctl_hypercall_init hypercall_init; struct xen_domctl_arch_setup arch_setup; struct xen_domctl_settimeoffset settimeoffset; struct xen_domctl_disable_migrate disable_migrate; struct xen_domctl_tsc_info tsc_info; struct xen_domctl_real_mode_area real_mode_area; struct xen_domctl_hvmcontext hvmcontext; struct xen_domctl_hvmcontext_partial hvmcontext_partial; struct xen_domctl_address_size address_size; struct xen_domctl_sendtrigger sendtrigger; struct xen_domctl_get_device_group get_device_group; struct xen_domctl_assign_device assign_device; struct xen_domctl_bind_pt_irq bind_pt_irq; struct xen_domctl_memory_mapping memory_mapping; struct xen_domctl_ioport_mapping ioport_mapping; struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr; struct xen_domctl_ext_vcpucontext ext_vcpucontext; struct xen_domctl_set_opt_feature set_opt_feature; struct xen_domctl_set_target set_target; struct xen_domctl_subscribe subscribe; struct xen_domctl_debug_op debug_op; struct xen_domctl_mem_event_op mem_event_op; struct xen_domctl_mem_sharing_op mem_sharing_op; #if defined(__i386__) || defined(__x86_64__) struct xen_domctl_cpuid cpuid; struct xen_domctl_vcpuextstate vcpuextstate; #endif struct xen_domctl_set_access_required access_required; struct xen_domctl_audit_p2m audit_p2m; struct xen_domctl_set_virq_handler set_virq_handler; struct xen_domctl_set_max_evtchn set_max_evtchn; struct xen_domctl_gdbsx_memio gdbsx_guest_memio; struct xen_domctl_set_broken_page_p2m set_broken_page_p2m; struct xen_domctl_cacheflush cacheflush; struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu; struct xen_domctl_gdbsx_domstatus gdbsx_domstatus; uint8_t pad[128]; } u; }; typedef struct xen_domctl xen_domctl_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_t); #endif /* __XEN_PUBLIC_DOMCTL_H__ */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/elfnote.h000066400000000000000000000201371314037446600321660ustar00rootroot00000000000000/****************************************************************************** * elfnote.h * * Definitions used for the Xen ELF notes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell, XenSource Ltd. */ #ifndef __XEN_PUBLIC_ELFNOTE_H__ #define __XEN_PUBLIC_ELFNOTE_H__ /* * `incontents 200 elfnotes ELF notes * * The notes should live in a PT_NOTE segment and have "Xen" in the * name field. * * Numeric types are either 4 or 8 bytes depending on the content of * the desc field. * * LEGACY indicated the fields in the legacy __xen_guest string which * this a note type replaces. * * String values (for non-legacy) are NULL terminated ASCII, also known * as ASCIZ type. */ /* * NAME=VALUE pair (string). */ #define XEN_ELFNOTE_INFO 0 /* * The virtual address of the entry point (numeric). * * LEGACY: VIRT_ENTRY */ #define XEN_ELFNOTE_ENTRY 1 /* The virtual address of the hypercall transfer page (numeric). * * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page * number not a virtual address) */ #define XEN_ELFNOTE_HYPERCALL_PAGE 2 /* The virtual address where the kernel image should be mapped (numeric). * * Defaults to 0. * * LEGACY: VIRT_BASE */ #define XEN_ELFNOTE_VIRT_BASE 3 /* * The offset of the ELF paddr field from the actual required * pseudo-physical address (numeric). * * This is used to maintain backwards compatibility with older kernels * which wrote __PAGE_OFFSET into that field. This field defaults to 0 * if not present. * * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE) */ #define XEN_ELFNOTE_PADDR_OFFSET 4 /* * The version of Xen that we work with (string). * * LEGACY: XEN_VER */ #define XEN_ELFNOTE_XEN_VERSION 5 /* * The name of the guest operating system (string). * * LEGACY: GUEST_OS */ #define XEN_ELFNOTE_GUEST_OS 6 /* * The version of the guest operating system (string). * * LEGACY: GUEST_VER */ #define XEN_ELFNOTE_GUEST_VERSION 7 /* * The loader type (string). * * LEGACY: LOADER */ #define XEN_ELFNOTE_LOADER 8 /* * The kernel supports PAE (x86/32 only, string = "yes", "no" or * "bimodal"). * * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting * may be given as "yes,bimodal" which will cause older Xen to treat * this kernel as PAE. * * LEGACY: PAE (n.b. The legacy interface included a provision to * indicate 'extended-cr3' support allowing L3 page tables to be * placed above 4G. It is assumed that any kernel new enough to use * these ELF notes will include this and therefore "yes" here is * equivalent to "yes[entended-cr3]" in the __xen_guest interface. */ #define XEN_ELFNOTE_PAE_MODE 9 /* * The features supported/required by this kernel (string). * * The string must consist of a list of feature names (as given in * features.h, without the "XENFEAT_" prefix) separated by '|' * characters. If a feature is required for the kernel to function * then the feature name must be preceded by a '!' character. * * LEGACY: FEATURES */ #define XEN_ELFNOTE_FEATURES 10 /* * The kernel requires the symbol table to be loaded (string = "yes" or "no") * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence * of this string as a boolean flag rather than requiring "yes" or * "no". */ #define XEN_ELFNOTE_BSD_SYMTAB 11 /* * The lowest address the hypervisor hole can begin at (numeric). * * This must not be set higher than HYPERVISOR_VIRT_START. Its presence * also indicates to the hypervisor that the kernel can deal with the * hole starting at a higher address. */ #define XEN_ELFNOTE_HV_START_LOW 12 /* * List of maddr_t-sized mask/value pairs describing how to recognize * (non-present) L1 page table entries carrying valid MFNs (numeric). */ #define XEN_ELFNOTE_L1_MFN_VALID 13 /* * Whether or not the guest supports cooperative suspend cancellation. * This is a numeric value. * * Default is 0 */ #define XEN_ELFNOTE_SUSPEND_CANCEL 14 /* * The (non-default) location the initial phys-to-machine map should be * placed at by the hypervisor (Dom0) or the tools (DomU). * The kernel must be prepared for this mapping to be established using * large pages, despite such otherwise not being available to guests. * The kernel must also be able to handle the page table pages used for * this mapping not being accessible through the initial mapping. * (Only x86-64 supports this at present.) */ #define XEN_ELFNOTE_INIT_P2M 15 /* * Whether or not the guest can deal with being passed an initrd not * mapped through its initial page tables. */ #define XEN_ELFNOTE_MOD_START_PFN 16 /* * The features supported by this kernel (numeric). * * Other than XEN_ELFNOTE_FEATURES on pre-4.2 Xen, this note allows a * kernel to specify support for features that older hypervisors don't * know about. The set of features 4.2 and newer hypervisors will * consider supported by the kernel is the combination of the sets * specified through this and the string note. * * LEGACY: FEATURES */ #define XEN_ELFNOTE_SUPPORTED_FEATURES 17 /* * The number of the highest elfnote defined. */ #define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUPPORTED_FEATURES /* * System information exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO * note in case of a system crash. This note will contain various * information about the system, see xen/include/xen/elfcore.h. */ #define XEN_ELFNOTE_CRASH_INFO 0x1000001 /* * System registers exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS * note per cpu in case of a system crash. This note is architecture * specific and will contain registers not saved in the "CORE" note. * See xen/include/xen/elfcore.h for more information. */ #define XEN_ELFNOTE_CRASH_REGS 0x1000002 /* * xen dump-core none note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE * in its dump file to indicate that the file is xen dump-core * file. This note doesn't have any other information. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000 /* * xen dump-core header note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER * in its dump file. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001 /* * xen dump-core xen version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION * in its dump file. It contains the xen version obtained via the * XENVER hypercall. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002 /* * xen dump-core format version note. * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION * in its dump file. It contains a format version identifier. * See tools/libxc/xc_core.h for more information. */ #define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003 #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ event_channel.h000066400000000000000000000274331314037446600332720ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/****************************************************************************** * event_channel.h * * Event channels between domains. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, K A Fraser. */ #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ #define __XEN_PUBLIC_EVENT_CHANNEL_H__ #include /* * `incontents 150 evtchn Event Channels * * Event channels are the basic primitive provided by Xen for event * notifications. An event is the Xen equivalent of a hardware * interrupt. They essentially store one bit of information, the event * of interest is signalled by transitioning this bit from 0 to 1. * * Notifications are received by a guest via an upcall from Xen, * indicating when an event arrives (setting the bit). Further * notifications are masked until the bit is cleared again (therefore, * guests must check the value of the bit after re-enabling event * delivery to ensure no missed notifications). * * Event notifications can be masked by setting a flag; this is * equivalent to disabling interrupts and can be used to ensure * atomicity of certain operations in the guest kernel. * * Event channels are represented by the evtchn_* fields in * struct shared_info and struct vcpu_info. */ /* * ` enum neg_errnoval * ` HYPERVISOR_event_channel_op(enum event_channel_op cmd, void *args) * ` * @cmd == EVTCHNOP_* (event-channel operation). * @args == struct evtchn_* Operation-specific extra arguments (NULL if none). */ /* ` enum event_channel_op { // EVTCHNOP_* => struct evtchn_* */ #define EVTCHNOP_bind_interdomain 0 #define EVTCHNOP_bind_virq 1 #define EVTCHNOP_bind_pirq 2 #define EVTCHNOP_close 3 #define EVTCHNOP_send 4 #define EVTCHNOP_status 5 #define EVTCHNOP_alloc_unbound 6 #define EVTCHNOP_bind_ipi 7 #define EVTCHNOP_bind_vcpu 8 #define EVTCHNOP_unmask 9 #define EVTCHNOP_reset 10 #define EVTCHNOP_init_control 11 #define EVTCHNOP_expand_array 12 #define EVTCHNOP_set_priority 13 /* ` } */ typedef uint32_t evtchn_port_t; DEFINE_XEN_GUEST_HANDLE(evtchn_port_t); /* * EVTCHNOP_alloc_unbound: Allocate a port in domain and mark as * accepting interdomain bindings from domain . A fresh port * is allocated in and returned as . * NOTES: * 1. If the caller is unprivileged then must be DOMID_SELF. * 2. may be DOMID_SELF, allowing loopback connections. */ struct evtchn_alloc_unbound { /* IN parameters */ domid_t dom, remote_dom; /* OUT parameters */ evtchn_port_t port; }; typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; /* * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between * the calling domain and . must identify * a port that is unbound and marked as accepting bindings from the calling * domain. A fresh port is allocated in the calling domain and returned as * . * * In case the peer domain has already tried to set our event channel * pending, before it was bound, EVTCHNOP_bind_interdomain always sets * the local event channel pending. * * The usual pattern of use, in the guest's upcall (or subsequent * handler) is as follows: (Re-enable the event channel for subsequent * signalling and then) check for the existence of whatever condition * is being waited for by other means, and take whatever action is * needed (if any). * * NOTES: * 1. may be DOMID_SELF, allowing loopback connections. */ struct evtchn_bind_interdomain { /* IN parameters. */ domid_t remote_dom; evtchn_port_t remote_port; /* OUT parameters. */ evtchn_port_t local_port; }; typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t; /* * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ on specified * vcpu. * NOTES: * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list * in xen.h for the classification of each VIRQ. * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be * re-bound via EVTCHNOP_bind_vcpu. * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. * The allocated event channel is bound to the specified vcpu and the * binding cannot be changed. */ struct evtchn_bind_virq { /* IN parameters. */ uint32_t virq; /* enum virq */ uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_virq evtchn_bind_virq_t; /* * EVTCHNOP_bind_pirq: Bind a local event channel to a real IRQ (PIRQ ). * NOTES: * 1. A physical IRQ may be bound to at most one event channel per domain. * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. */ struct evtchn_bind_pirq { /* IN parameters. */ uint32_t pirq; #define BIND_PIRQ__WILL_SHARE 1 uint32_t flags; /* BIND_PIRQ__* */ /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_pirq evtchn_bind_pirq_t; /* * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. * NOTES: * 1. The allocated event channel is bound to the specified vcpu. The binding * may not be changed. */ struct evtchn_bind_ipi { uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; typedef struct evtchn_bind_ipi evtchn_bind_ipi_t; /* * EVTCHNOP_close: Close a local event channel . If the channel is * interdomain then the remote end is placed in the unbound state * (EVTCHNSTAT_unbound), awaiting a new connection. */ struct evtchn_close { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_close evtchn_close_t; /* * EVTCHNOP_send: Send an event to the remote end of the channel whose local * endpoint is . */ struct evtchn_send { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_send evtchn_send_t; /* * EVTCHNOP_status: Get the current status of the communication channel which * has an endpoint at . * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may obtain the status of an event * channel for which is not DOMID_SELF. */ struct evtchn_status { /* IN parameters */ domid_t dom; evtchn_port_t port; /* OUT parameters */ #define EVTCHNSTAT_closed 0 /* Channel is not in use. */ #define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ #define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ uint32_t status; uint32_t vcpu; /* VCPU to which this channel is bound. */ union { struct { domid_t dom; } unbound; /* EVTCHNSTAT_unbound */ struct { domid_t dom; evtchn_port_t port; } interdomain; /* EVTCHNSTAT_interdomain */ uint32_t pirq; /* EVTCHNSTAT_pirq */ uint32_t virq; /* EVTCHNSTAT_virq */ } u; }; typedef struct evtchn_status evtchn_status_t; /* * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an * event is pending. * NOTES: * 1. IPI-bound channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. * This binding cannot be changed. * 3. All other channels notify vcpu0 by default. This default is set when * the channel is allocated (a port that is freed and subsequently reused * has its binding reset to vcpu0). */ struct evtchn_bind_vcpu { /* IN parameters. */ evtchn_port_t port; uint32_t vcpu; }; typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t; /* * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver * a notification to the appropriate VCPU if an event is pending. */ struct evtchn_unmask { /* IN parameters. */ evtchn_port_t port; }; typedef struct evtchn_unmask evtchn_unmask_t; /* * EVTCHNOP_reset: Close all event channels associated with specified domain. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. */ struct evtchn_reset { /* IN parameters. */ domid_t dom; }; typedef struct evtchn_reset evtchn_reset_t; /* * EVTCHNOP_init_control: initialize the control block for the FIFO ABI. * * Note: any events that are currently pending will not be resent and * will be lost. Guests should call this before binding any event to * avoid losing any events. */ struct evtchn_init_control { /* IN parameters. */ uint64_t control_gfn; uint32_t offset; uint32_t vcpu; /* OUT parameters. */ uint8_t link_bits; uint8_t _pad[7]; }; typedef struct evtchn_init_control evtchn_init_control_t; /* * EVTCHNOP_expand_array: add an additional page to the event array. */ struct evtchn_expand_array { /* IN parameters. */ uint64_t array_gfn; }; typedef struct evtchn_expand_array evtchn_expand_array_t; /* * EVTCHNOP_set_priority: set the priority for an event channel. */ struct evtchn_set_priority { /* IN parameters. */ uint32_t port; uint32_t priority; }; typedef struct evtchn_set_priority evtchn_set_priority_t; /* * ` enum neg_errnoval * ` HYPERVISOR_event_channel_op_compat(struct evtchn_op *op) * ` * Superceded by new event_channel_op() hypercall since 0x00030202. */ struct evtchn_op { uint32_t cmd; /* enum event_channel_op */ union { struct evtchn_alloc_unbound alloc_unbound; struct evtchn_bind_interdomain bind_interdomain; struct evtchn_bind_virq bind_virq; struct evtchn_bind_pirq bind_pirq; struct evtchn_bind_ipi bind_ipi; struct evtchn_close close; struct evtchn_send send; struct evtchn_status status; struct evtchn_bind_vcpu bind_vcpu; struct evtchn_unmask unmask; } u; }; DEFINE_GUEST_HANDLE_STRUCT(evtchn_op); typedef struct evtchn_op evtchn_op_t; DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); /* * 2-level ABI */ #define EVTCHN_2L_NR_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64) /* * FIFO ABI */ /* Events may have priorities from 0 (highest) to 15 (lowest). */ #define EVTCHN_FIFO_PRIORITY_MAX 0 #define EVTCHN_FIFO_PRIORITY_DEFAULT 7 #define EVTCHN_FIFO_PRIORITY_MIN 15 #define EVTCHN_FIFO_MAX_QUEUES (EVTCHN_FIFO_PRIORITY_MIN + 1) typedef uint32_t event_word_t; #define EVTCHN_FIFO_PENDING 31 #define EVTCHN_FIFO_MASKED 30 #define EVTCHN_FIFO_LINKED 29 #define EVTCHN_FIFO_BUSY 28 #define EVTCHN_FIFO_LINK_BITS 17 #define EVTCHN_FIFO_LINK_MASK ((1 << EVTCHN_FIFO_LINK_BITS) - 1) #define EVTCHN_FIFO_NR_CHANNELS (1 << EVTCHN_FIFO_LINK_BITS) struct evtchn_fifo_control_block { uint32_t ready; uint32_t _rsvd; uint32_t head[EVTCHN_FIFO_MAX_QUEUES]; }; typedef struct evtchn_fifo_control_block evtchn_fifo_control_block_t; #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/features.h000066400000000000000000000072761314037446600323610ustar00rootroot00000000000000/****************************************************************************** * features.h * * Feature flags, reported by XENVER_get_features. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Keir Fraser */ #ifndef __XEN_PUBLIC_FEATURES_H__ #define __XEN_PUBLIC_FEATURES_H__ /* * `incontents 200 elfnotes_features XEN_ELFNOTE_FEATURES * * The list of all the features the guest supports. They are set by * parsing the XEN_ELFNOTE_FEATURES and XEN_ELFNOTE_SUPPORTED_FEATURES * string. The format is the feature names (as given here without the * "XENFEAT_" prefix) separated by '|' characters. * If a feature is required for the kernel to function then the feature name * must be preceded by a '!' character. * * Note that if XEN_ELFNOTE_SUPPORTED_FEATURES is used, then in the * XENFEAT_dom0 MUST be set if the guest is to be booted as dom0, */ /* * If set, the guest does not need to write-protect its pagetables, and can * update them via direct writes. */ #define XENFEAT_writable_page_tables 0 /* * If set, the guest does not need to write-protect its segment descriptor * tables, and can update them via direct writes. */ #define XENFEAT_writable_descriptor_tables 1 /* * If set, translation between the guest's 'pseudo-physical' address space * and the host's machine address space are handled by the hypervisor. In this * mode the guest does not need to perform phys-to/from-machine translations * when performing page table operations. */ #define XENFEAT_auto_translated_physmap 2 /* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ #define XENFEAT_supervisor_mode_kernel 3 /* * If set, the guest does not need to allocate x86 PAE page directories * below 4GB. This flag is usually implied by auto_translated_physmap. */ #define XENFEAT_pae_pgdir_above_4gb 4 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ #define XENFEAT_mmu_pt_update_preserve_ad 5 /* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */ #define XENFEAT_highmem_assist 6 /* * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel * available pte bits. */ #define XENFEAT_gnttab_map_avail_bits 7 /* x86: Does this Xen host support the HVM callback vector type? */ #define XENFEAT_hvm_callback_vector 8 /* x86: pvclock algorithm is safe to use on HVM */ #define XENFEAT_hvm_safe_pvclock 9 /* x86: pirq can be used by HVM guests */ #define XENFEAT_hvm_pirqs 10 /* operation as Dom0 is supported */ #define XENFEAT_dom0 11 #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/gcov.h000066400000000000000000000064441314037446600314750ustar00rootroot00000000000000/****************************************************************************** * gcov.h * * Coverage structures exported by Xen. * Structure is different from Gcc one. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2013, Citrix Systems R&D Ltd. */ #ifndef __XEN_PUBLIC_GCOV_H__ #define __XEN_PUBLIC_GCOV_H__ __XEN_PUBLIC_GCOV_H__ #define XENCOV_COUNTERS 5 #define XENCOV_TAG_BASE 0x58544300u #define XENCOV_TAG_FILE (XENCOV_TAG_BASE+0x46u) #define XENCOV_TAG_FUNC (XENCOV_TAG_BASE+0x66u) #define XENCOV_TAG_COUNTER(n) (XENCOV_TAG_BASE+0x30u+((n)&0xfu)) #define XENCOV_TAG_END (XENCOV_TAG_BASE+0x2eu) #define XENCOV_IS_TAG_COUNTER(n) \ ((n) >= XENCOV_TAG_COUNTER(0) && (n) < XENCOV_TAG_COUNTER(XENCOV_COUNTERS)) #define XENCOV_COUNTER_NUM(n) ((n)-XENCOV_TAG_COUNTER(0)) /* * The main structure for the blob is * BLOB := FILE.. END * FILE := TAG_FILE VERSION STAMP FILENAME COUNTERS FUNCTIONS * FILENAME := LEN characters * characters are padded to 32 bit * LEN := 32 bit value * COUNTERS := TAG_COUNTER(n) NUM COUNTER.. * NUM := 32 bit valie * COUNTER := 64 bit value * FUNCTIONS := TAG_FUNC NUM FUNCTION.. * FUNCTION := IDENT CHECKSUM NUM_COUNTERS * * All tagged structures are aligned to 8 bytes */ /** * File information * Prefixed with XENCOV_TAG_FILE and a string with filename * Aligned to 8 bytes */ struct xencov_file { uint32_t tag; /* XENCOV_TAG_FILE */ uint32_t version; uint32_t stamp; uint32_t fn_len; char filename[1]; }; /** * Counters information * Prefixed with XENCOV_TAG_COUNTER(n) where n is 0..(XENCOV_COUNTERS-1) * Aligned to 8 bytes */ struct xencov_counter { uint32_t tag; /* XENCOV_TAG_COUNTER(n) */ uint32_t num; uint64_t values[1]; }; /** * Information for each function * Number of counter is equal to the number of counter structures got before */ struct xencov_function { uint32_t ident; uint32_t checksum; uint32_t num_counters[1]; }; /** * Information for all functions * Aligned to 8 bytes */ struct xencov_functions { uint32_t tag; /* XENCOV_TAG_FUNC */ uint32_t num; struct xencov_function xencov_function[1]; }; /** * Terminator */ struct xencov_end { uint32_t tag; /* XENCOV_TAG_END */ }; #endif /* __XEN_PUBLIC_GCOV_H__ */ grant_table.h000066400000000000000000000610771314037446600327450ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/****************************************************************************** * grant_table.h * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ #define __XEN_PUBLIC_GRANT_TABLE_H__ #include /* * `incontents 150 gnttab Grant Tables * * Xen's grant tables provide a generic mechanism to memory sharing * between domains. This shared memory interface underpins the split * device drivers for block and network IO. * * Each domain has its own grant table. This is a data structure that * is shared with Xen; it allows the domain to tell Xen what kind of * permissions other domains have on its pages. Entries in the grant * table are identified by grant references. A grant reference is an * integer, which indexes into the grant table. It acts as a * capability which the grantee can use to perform operations on the * granter’s memory. * * This capability-based system allows shared-memory communications * between unprivileged domains. A grant reference also encapsulates * the details of a shared page, removing the need for a domain to * know the real machine address of a page it is sharing. This makes * it possible to share memory correctly with domains running in * fully virtualised memory. */ /*********************************** * GRANT TABLE REPRESENTATION */ /* Some rough guidelines on accessing and updating grant-table entries * in a concurrency-safe manner. For more information, Linux contains a * * reference implementation for guest OSes (drivers/xen/grant_table.c, see * http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob;f=drivers/xen/grant-table.c;hb=HEAD * NB. WMB is a no-op on current-generation x86 processors. However, a * compiler barrier will still be required. * * Introducing a valid entry into the grant table: * 1. Write ent->domid. * 2. Write ent->frame: * GTF_permit_access: Frame to which access is permitted. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new * frame, or zero if none. * 3. Write memory barrier (WMB). * 4. Write ent->flags, inc. valid type. * * Invalidating an unused GTF_permit_access entry: * 1. flags = ent->flags. * 2. Observe that !(flags & (GTF_reading|GTF_writing)). * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * * Invalidating an in-use GTF_permit_access entry: * This cannot be done directly. Request assistance from the domain controller * which can set a timeout on the use of a grant entry and take necessary * action. (NB. This is not yet implemented!). * * Invalidating an unused GTF_accept_transfer entry: * 1. flags = ent->flags. * 2. Observe that !(flags & GTF_transfer_committed). [*] * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. * The guest must /not/ modify the grant entry until the address of the * transferred frame is written. It is safe for the guest to spin waiting * for this to occur (detect by observing GTF_transfer_completed in * ent->flags). * * Invalidating a committed GTF_accept_transfer entry: * 1. Wait for (ent->flags & GTF_transfer_completed). * * Changing a GTF_permit_access from writable to read-only: * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. * * Changing a GTF_permit_access from read-only to writable: * Use SMP-safe bit-setting instruction. */ /* * Reference to a grant entry in a specified domain's grant table. */ typedef uint32_t grant_ref_t; /* * A grant table comprises a packed array of grant entries in one or more * page frames shared between Xen and a guest. * [XEN]: This field is written by Xen and read by the sharing guest. * [GST]: This field is written by the guest and read by Xen. */ /* * Version 1 of the grant table entry structure is maintained purely * for backwards compatibility. New guests should use version 2. */ #if defined(CONFIG_PARAVIRT_XEN) #define grant_entry grant_entry_v1 #elif __XEN_INTERFACE_VERSION__ < 0x0003020a #define grant_entry_v1 grant_entry #define grant_entry_v1_t grant_entry_t #endif struct grant_entry_v1 { /* GTF_xxx: various type and flag information. [XEN,GST] */ uint16_t flags; /* The domain being granted foreign privileges. [GST] */ domid_t domid; /* * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] */ uint32_t frame; }; typedef struct grant_entry_v1 grant_entry_v1_t; /* The first few grant table entries will be preserved across grant table * version changes and may be pre-populated at domain creation by tools. */ #define GNTTAB_NR_RESERVED_ENTRIES 8 #define GNTTAB_RESERVED_CONSOLE 0 #define GNTTAB_RESERVED_XENSTORE 1 /* * Type of grant entry. * GTF_invalid: This grant entry grants no privileges. * GTF_permit_access: Allow @domid to map/access @frame. * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame * to this guest. Xen writes the page number to @frame. * GTF_transitive: Allow @domid to transitively access a subrange of * @trans_grant in @trans_domid. No mappings are allowed. */ #define GTF_invalid (0U<<0) #define GTF_permit_access (1U<<0) #define GTF_accept_transfer (2U<<0) #define GTF_transitive (3U<<0) #define GTF_type_mask (3U<<0) /* * Subflags for GTF_permit_access. * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST] * GTF_sub_page: Grant access to only a subrange of the page. @domid * will only be allowed to copy from the grant, and not * map it. [GST] */ #define _GTF_readonly (2) #define GTF_readonly (1U<<_GTF_readonly) #define _GTF_reading (3) #define GTF_reading (1U<<_GTF_reading) #define _GTF_writing (4) #define GTF_writing (1U<<_GTF_writing) #define _GTF_PWT (5) #define GTF_PWT (1U<<_GTF_PWT) #define _GTF_PCD (6) #define GTF_PCD (1U<<_GTF_PCD) #define _GTF_PAT (7) #define GTF_PAT (1U<<_GTF_PAT) #define _GTF_sub_page (8) #define GTF_sub_page (1U<<_GTF_sub_page) /* * Subflags for GTF_accept_transfer: * GTF_transfer_committed: Xen sets this flag to indicate that it is committed * to transferring ownership of a page frame. When a guest sees this flag * it must /not/ modify the grant entry until GTF_transfer_completed is * set by Xen. * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag * after reading GTF_transfer_committed. Xen will always write the frame * address, followed by ORing this flag, in a timely manner. */ #define _GTF_transfer_committed (2) #define GTF_transfer_committed (1U<<_GTF_transfer_committed) #define _GTF_transfer_completed (3) #define GTF_transfer_completed (1U<<_GTF_transfer_completed) /* * Version 2 grant table entries. These fulfil the same role as * version 1 entries, but can represent more complicated operations. * Any given domain will have either a version 1 or a version 2 table, * and every entry in the table will be the same version. * * The interface by which domains use grant references does not depend * on the grant table version in use by the other domain. */ #if defined(CONFIG_PARAVIRT_XEN) || __XEN_INTERFACE_VERSION__ >= 0x0003020a /* * Version 1 and version 2 grant entries share a common prefix. The * fields of the prefix are documented as part of struct * grant_entry_v1. */ struct grant_entry_header { uint16_t flags; domid_t domid; }; typedef struct grant_entry_header grant_entry_header_t; /* * Version 2 of the grant entry structure, here is a union because three * different types are supported: full_page, sub_page and transitive. */ union grant_entry_v2 { struct grant_entry_header hdr; /* * This member is used for V1-style full page grants, where either: * * -- hdr.type is GTF_accept_transfer, or * -- hdr.type is GTF_permit_access and GTF_sub_page is not set. * * In that case, the frame field has the same semantics as the * field of the same name in the V1 entry structure. */ struct { struct grant_entry_header hdr; uint32_t pad0; uint64_t frame; } full_page; /* * If the grant type is GTF_grant_access and GTF_sub_page is set, * @domid is allowed to access bytes [@page_off,@page_off+@length) * in frame @frame. */ struct { struct grant_entry_header hdr; uint16_t page_off; uint16_t length; uint64_t frame; } sub_page; /* * If the grant is GTF_transitive, @domid is allowed to use the * grant @gref in domain @trans_domid, as if it was the local * domain. Obviously, the transitive access must be compatible * with the original grant. * * The current version of Xen does not allow transitive grants * to be mapped. */ struct { struct grant_entry_header hdr; domid_t trans_domid; uint16_t pad0; grant_ref_t gref; } transitive; uint32_t __spacer[4]; /* Pad to a power of two */ }; typedef union grant_entry_v2 grant_entry_v2_t; typedef uint16_t grant_status_t; #endif /* __XEN_INTERFACE_VERSION__ */ /*********************************** * GRANT TABLE QUERIES AND USES */ /* ` enum neg_errnoval * ` HYPERVISOR_grant_table_op(enum grant_table_op cmd, * ` void *args, * ` unsigned int count) * ` * * @args points to an array of a per-command data structure. The array * has @count members */ /* ` enum grant_table_op { // GNTTABOP_* => struct gnttab_* */ #define GNTTABOP_map_grant_ref 0 #define GNTTABOP_unmap_grant_ref 1 #define GNTTABOP_setup_table 2 #define GNTTABOP_dump_table 3 #define GNTTABOP_transfer 4 #define GNTTABOP_copy 5 #define GNTTABOP_query_size 6 #define GNTTABOP_unmap_and_replace 7 #if defined(CONFIG_PARAVIRT_XEN) || __XEN_INTERFACE_VERSION__ >= 0x0003020a #define GNTTABOP_set_version 8 #define GNTTABOP_get_status_frames 9 #define GNTTABOP_get_version 10 #define GNTTABOP_swap_grant_ref 11 #endif /* __XEN_INTERFACE_VERSION__ */ /* ` } */ /* * Handle to track a mapping created via a grant reference. */ typedef uint32_t grant_handle_t; /* * GNTTABOP_map_grant_ref: Map the grant entry (,) for access * by devices and/or host CPUs. If successful, is a tracking number * that must be presented later to destroy the mapping(s). On error, * is a negative status code. * NOTES: * 1. If GNTMAP_device_map is specified then is the address * via which I/O devices may access the granted frame. * 2. If GNTMAP_host_map is specified then a mapping will be added at * either a host virtual address in the current address space, or at * a PTE at the specified machine address. The type of mapping to * perform is selected through the GNTMAP_contains_pte flag, and the * address is specified in . * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a * host mapping is destroyed by other means then it is *NOT* guaranteed * to be accounted to the correct grant reference! */ struct gnttab_map_grant_ref { /* IN parameters. */ uint64_t host_addr; uint32_t flags; /* GNTMAP_* */ grant_ref_t ref; domid_t dom; /* OUT parameters. */ int16_t status; /* => enum grant_status */ grant_handle_t handle; uint64_t dev_bus_addr; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref); typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t); /* * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings * tracked by . If or is zero, that * field is ignored. If non-zero, they must refer to a device/host mapping * that is tracked by * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 3. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ struct gnttab_unmap_grant_ref { /* IN parameters. */ uint64_t host_addr; uint64_t dev_bus_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* => enum grant_status */ }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref); typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t); /* * GNTTABOP_setup_table: Set up a grant table for comprising at least * pages. The frame addresses are written to the . * Only addresses are written, even if the table is larger. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. * 3. Xen may not support more than a single grant-table page per domain. */ struct gnttab_setup_table { /* IN parameters. */ domid_t dom; uint32_t nr_frames; /* OUT parameters. */ int16_t status; /* => enum grant_status */ #if !defined(CONFIG_PARAVIRT_XEN) && __XEN_INTERFACE_VERSION__ < 0x00040300 XEN_GUEST_HANDLE(ulong) frame_list; #else XEN_GUEST_HANDLE(xen_pfn_t) frame_list; #endif }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table); typedef struct gnttab_setup_table gnttab_setup_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t); /* * GNTTABOP_dump_table: Dump the contents of the grant table to the * xen console. Debugging use only. */ struct gnttab_dump_table { /* IN parameters. */ domid_t dom; /* OUT parameters. */ int16_t status; /* => enum grant_status */ }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table); typedef struct gnttab_dump_table gnttab_dump_table_t; DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t); /* * GNTTABOP_transfer_grant_ref: Transfer to a foreign domain. The * foreign domain has previously registered its interest in the transfer via * . * * Note that, even if the transfer fails, the specified page no longer belongs * to the calling domain *unless* the error is GNTST_bad_page. */ struct gnttab_transfer { /* IN parameters. */ xen_pfn_t mfn; domid_t domid; grant_ref_t ref; /* OUT parameters. */ int16_t status; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer); typedef struct gnttab_transfer gnttab_transfer_t; DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t); /* * GNTTABOP_copy: Hypervisor based copy * source and destinations can be eithers MFNs or, for foreign domains, * grant references. the foreign domain has to grant read/write access * in its grant table. * * The flags specify what type source and destinations are (either MFN * or grant reference). * * Note that this can also be used to copy data between two domains * via a third party if the source and destination domains had previously * grant appropriate access to their pages to the third party. * * source_offset specifies an offset in the source frame, dest_offset * the offset in the target frame and len specifies the number of * bytes to be copied. */ #define _GNTCOPY_source_gref (0) #define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) #define _GNTCOPY_dest_gref (1) #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) struct gnttab_copy { /* IN parameters. */ struct { union { grant_ref_t ref; xen_pfn_t gmfn; } u; domid_t domid; uint16_t offset; } source, dest; uint16_t len; uint16_t flags; /* GNTCOPY_* */ /* OUT parameters. */ int16_t status; }; typedef struct gnttab_copy gnttab_copy_t; DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy); DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t); /* * GNTTABOP_query_size: Query the current and maximum sizes of the shared * grant table. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. */ struct gnttab_query_size { /* IN parameters. */ domid_t dom; /* OUT parameters. */ uint32_t nr_frames; uint32_t max_nr_frames; int16_t status; /* => enum grant_status */ }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size); typedef struct gnttab_query_size gnttab_query_size_t; DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t); /* * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings * tracked by but atomically replace the page table entry with one * pointing to the machine address under . will be * redirected to the null entry. * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 2. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ struct gnttab_unmap_and_replace { /* IN parameters. */ uint64_t host_addr; uint64_t new_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* => enum grant_status */ }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_and_replace); typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t; DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t); #if defined(CONFIG_PARAVIRT_XEN) || __XEN_INTERFACE_VERSION__ >= 0x0003020a /* * GNTTABOP_set_version: Request a particular version of the grant * table shared table structure. This operation can only be performed * once in any given domain. It must be performed before any grants * are activated; otherwise, the domain will be stuck with version 1. * The only defined versions are 1 and 2. */ struct gnttab_set_version { /* IN/OUT parameters */ uint32_t version; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_set_version); typedef struct gnttab_set_version gnttab_set_version_t; DEFINE_XEN_GUEST_HANDLE(gnttab_set_version_t); /* * GNTTABOP_get_status_frames: Get the list of frames used to store grant * status for . In grant format version 2, the status is separated * from the other shared grant fields to allow more efficient synchronization * using barriers instead of atomic cmpexch operations. * specify the size of vector . * The frame addresses are returned in the . * Only addresses are returned, even if the table is larger. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. */ struct gnttab_get_status_frames { /* IN parameters. */ uint32_t nr_frames; domid_t dom; /* OUT parameters. */ int16_t status; /* => enum grant_status */ XEN_GUEST_HANDLE(uint64_t) frame_list; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_status_frames); typedef struct gnttab_get_status_frames gnttab_get_status_frames_t; DEFINE_XEN_GUEST_HANDLE(gnttab_get_status_frames_t); /* * GNTTABOP_get_version: Get the grant table version which is in * effect for domain . */ struct gnttab_get_version { /* IN parameters */ domid_t dom; uint16_t pad; /* OUT parameters */ uint32_t version; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version); typedef struct gnttab_get_version gnttab_get_version_t; DEFINE_XEN_GUEST_HANDLE(gnttab_get_version_t); /* * GNTTABOP_swap_grant_ref: Swap the contents of two grant entries. */ struct gnttab_swap_grant_ref { /* IN parameters */ grant_ref_t ref_a; grant_ref_t ref_b; /* OUT parameters */ int16_t status; /* => enum grant_status */ }; typedef struct gnttab_swap_grant_ref gnttab_swap_grant_ref_t; DEFINE_XEN_GUEST_HANDLE(gnttab_swap_grant_ref_t); #endif /* __XEN_INTERFACE_VERSION__ */ /* * Bitfield values for gnttab_map_grant_ref.flags. */ /* Map the grant entry for access by I/O devices. */ #define _GNTMAP_device_map (0) #define GNTMAP_device_map (1<<_GNTMAP_device_map) /* Map the grant entry for access by host CPUs. */ #define _GNTMAP_host_map (1) #define GNTMAP_host_map (1<<_GNTMAP_host_map) /* Accesses to the granted frame will be restricted to read-only access. */ #define _GNTMAP_readonly (2) #define GNTMAP_readonly (1<<_GNTMAP_readonly) /* * GNTMAP_host_map subflag: * 0 => The host mapping is usable only by the guest OS. * 1 => The host mapping is usable by guest OS + current application. */ #define _GNTMAP_application_map (3) #define GNTMAP_application_map (1<<_GNTMAP_application_map) /* * GNTMAP_contains_pte subflag: * 0 => This map request contains a host virtual address. * 1 => This map request contains the machine addess of the PTE to update. */ #define _GNTMAP_contains_pte (4) #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) #define _GNTMAP_can_fail (5) #define GNTMAP_can_fail (1<<_GNTMAP_can_fail) /* * Bits to be placed in guest kernel available PTE bits (architecture * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set). */ #define _GNTMAP_guest_avail0 (16) #define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0) /* * Values for error status returns. All errors are -ve. */ /* ` enum grant_status { */ #define GNTST_okay (0) /* Normal return. */ #define GNTST_general_error (-1) /* General undefined error. */ #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ #define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ #define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ #define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ #define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ #define GNTST_address_too_big (-11) /* transfer page address too large. */ #define GNTST_eagain (-12) /* Operation not done; try again. */ /* ` } */ #define GNTTABOP_error_msgs { \ "okay", \ "undefined error", \ "unrecognised domain id", \ "invalid grant reference", \ "invalid mapping handle", \ "invalid virtual address", \ "invalid device address", \ "no spare translation slot in the I/O MMU", \ "permission denied", \ "bad page", \ "copy arguments cross page boundary", \ "page address size too large", \ "operation not done; try again" \ } #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/hvm/000077500000000000000000000000001314037446600311505ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/hvm/e820.h000066400000000000000000000030061314037446600317760ustar00rootroot00000000000000 /* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_E820_H__ #define __XEN_PUBLIC_HVM_E820_H__ /* E820 location in HVM virtual address space. */ #define HVM_E820_PAGE 0x00090000 #define HVM_E820_NR_OFFSET 0x000001E8 #define HVM_E820_OFFSET 0x000002D0 #define HVM_BELOW_4G_RAM_END 0xF0000000 #define HVM_BELOW_4G_MMIO_START HVM_BELOW_4G_RAM_END #define HVM_BELOW_4G_MMIO_LENGTH ((1ULL << 32) - HVM_BELOW_4G_MMIO_START) #endif /* __XEN_PUBLIC_HVM_E820_H__ */ hvm_info_table.h000066400000000000000000000051531314037446600342220ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/hvm/****************************************************************************** * hvm/hvm_info_table.h * * HVM parameter and information table, written into guest memory map. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ #define HVM_INFO_PFN 0x09F #define HVM_INFO_OFFSET 0x800 #define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET) /* Maximum we can support with current vLAPIC ID mapping. */ #define HVM_MAX_VCPUS 128 struct hvm_info_table { char signature[8]; /* "HVM INFO" */ uint32_t length; uint8_t checksum; /* Should firmware build APIC descriptors (APIC MADT / MP BIOS)? */ uint8_t apic_mode; /* How many CPUs does this domain have? */ uint32_t nr_vcpus; /* * MEMORY MAP provided by HVM domain builder. * Notes: * 1. page_to_phys(x) = x << 12 * 2. If a field is zero, the corresponding range does not exist. */ /* * 0x0 to page_to_phys(low_mem_pgend)-1: * RAM below 4GB (except for VGA hole 0xA0000-0xBFFFF) */ uint32_t low_mem_pgend; /* * page_to_phys(reserved_mem_pgstart) to 0xFFFFFFFF: * Reserved for special memory mappings */ uint32_t reserved_mem_pgstart; /* * 0x100000000 to page_to_phys(high_mem_pgend)-1: * RAM above 4GB */ uint32_t high_mem_pgend; /* Bitmap of which CPUs are online at boot time. */ uint8_t vcpu_online[(HVM_MAX_VCPUS + 7)/8]; }; #endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */ hvm_op.h000066400000000000000000000231761314037446600325430ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/hvm/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ #define __XEN_PUBLIC_HVM_HVM_OP_H__ #include "../xen.h" #include "../trace.h" /* Get/set subcommands: the second argument of the hypercall is a * pointer to a xen_hvm_param struct. */ #define HVMOP_set_param 0 #define HVMOP_get_param 1 struct xen_hvm_param { domid_t domid; /* IN */ uint32_t index; /* IN */ uint64_t value; /* IN/OUT */ }; DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_param); typedef struct xen_hvm_param xen_hvm_param_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); /* Set the logical level of one of a domain's PCI INTx wires. */ #define HVMOP_set_pci_intx_level 2 struct xen_hvm_set_pci_intx_level { /* Domain to be updated. */ domid_t domid; /* PCI INTx identification in PCI topology (domain:bus:device:intx). */ uint8_t domain, bus, device, intx; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t); /* Set the logical level of one of a domain's ISA IRQ wires. */ #define HVMOP_set_isa_irq_level 3 struct xen_hvm_set_isa_irq_level { /* Domain to be updated. */ domid_t domid; /* ISA device identification, by ISA IRQ (0-15). */ uint8_t isa_irq; /* Assertion level (0 = unasserted, 1 = asserted). */ uint8_t level; }; typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t); #define HVMOP_set_pci_link_route 4 struct xen_hvm_set_pci_link_route { /* Domain to be updated. */ domid_t domid; /* PCI link identifier (0-3). */ uint8_t link; /* ISA IRQ (1-15), or 0 (disable link). */ uint8_t isa_irq; }; typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); /* Flushes all VCPU TLBs: @arg must be NULL. */ #define HVMOP_flush_tlbs 5 typedef enum { HVMMEM_ram_rw, /* Normal read/write guest RAM */ HVMMEM_ram_ro, /* Read-only; writes are discarded */ HVMMEM_mmio_dm, /* Reads and write go to the device model */ } hvmmem_type_t; /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Track dirty VRAM. */ #define HVMOP_track_dirty_vram 6 struct xen_hvm_track_dirty_vram { /* Domain to be tracked. */ domid_t domid; /* First pfn to track. */ uint64_aligned_t first_pfn; /* Number of pages to track. */ uint64_aligned_t nr; /* OUT variable. */ /* Dirty bitmap buffer. */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; }; typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t); /* Notify that some pages got modified by the Device Model. */ #define HVMOP_modified_memory 7 struct xen_hvm_modified_memory { /* Domain to be updated. */ domid_t domid; /* First pfn. */ uint64_aligned_t first_pfn; /* Number of pages. */ uint64_aligned_t nr; }; typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); #define HVMOP_set_mem_type 8 /* Notify that a region of memory is to be treated in a specific way. */ struct xen_hvm_set_mem_type { /* Domain to be updated. */ domid_t domid; /* Memory type */ uint16_t hvmmem_type; /* Number of pages. */ uint32_t nr; /* First pfn. */ uint64_aligned_t first_pfn; }; typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t); #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ /* Hint from PV drivers for pagetable destruction. */ #define HVMOP_pagetable_dying 9 struct xen_hvm_pagetable_dying { /* Domain with a pagetable about to be destroyed. */ domid_t domid; uint16_t pad[3]; /* align next field on 8-byte boundary */ /* guest physical address of the toplevel pagetable dying */ uint64_t gpa; }; DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_pagetable_dying); typedef struct xen_hvm_pagetable_dying xen_hvm_pagetable_dying_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_pagetable_dying_t); /* Get the current Xen time, in nanoseconds since system boot. */ #define HVMOP_get_time 10 struct xen_hvm_get_time { uint64_t now; /* OUT */ }; typedef struct xen_hvm_get_time xen_hvm_get_time_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_time_t); #define HVMOP_xentrace 11 struct xen_hvm_xentrace { uint16_t event, extra_bytes; uint8_t extra[TRACE_EXTRA_MAX * sizeof(uint32_t)]; }; typedef struct xen_hvm_xentrace xen_hvm_xentrace_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t); /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #define HVMOP_set_mem_access 12 typedef enum { HVMMEM_access_n, HVMMEM_access_r, HVMMEM_access_w, HVMMEM_access_rw, HVMMEM_access_x, HVMMEM_access_rx, HVMMEM_access_wx, HVMMEM_access_rwx, HVMMEM_access_rx2rw, /* Page starts off as r-x, but automatically * change to r-w on a write */ HVMMEM_access_n2rwx, /* Log access: starts off as n, automatically * goes to rwx, generating an event without * pausing the vcpu */ HVMMEM_access_default /* Take the domain default */ } hvmmem_access_t; /* Notify that a region of memory is to have specific access types */ struct xen_hvm_set_mem_access { /* Domain to be updated. */ domid_t domid; /* Memory type */ uint16_t hvmmem_access; /* hvm_access_t */ /* Number of pages, ignored on setting default access */ uint32_t nr; /* First pfn, or ~0ull to set the default access for new pages */ uint64_aligned_t first_pfn; }; typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t); #define HVMOP_get_mem_access 13 /* Get the specific access type for that region of memory */ struct xen_hvm_get_mem_access { /* Domain to be queried. */ domid_t domid; /* Memory type: OUT */ uint16_t hvmmem_access; /* hvm_access_t */ /* pfn, or ~0ull for default access for new pages. IN */ uint64_aligned_t pfn; }; typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t); #define HVMOP_inject_trap 14 /* Inject a trap into a VCPU, which will get taken up on the next * scheduling of it. Note that the caller should know enough of the * state of the CPU before injecting, to know what the effect of * injecting the trap will be. */ struct xen_hvm_inject_trap { /* Domain to be queried. */ domid_t domid; /* VCPU */ uint32_t vcpuid; /* Vector number */ uint32_t vector; /* Trap type (HVMOP_TRAP_*) */ uint32_t type; /* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */ # define HVMOP_TRAP_ext_int 0 /* external interrupt */ # define HVMOP_TRAP_nmi 2 /* nmi */ # define HVMOP_TRAP_hw_exc 3 /* hardware exception */ # define HVMOP_TRAP_sw_int 4 /* software interrupt (CD nn) */ # define HVMOP_TRAP_pri_sw_exc 5 /* ICEBP (F1) */ # define HVMOP_TRAP_sw_exc 6 /* INT3 (CC), INTO (CE) */ /* Error code, or ~0u to skip */ uint32_t error_code; /* Intruction length */ uint32_t insn_len; /* CR2 for page faults */ uint64_aligned_t cr2; }; typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t); #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ #define HVMOP_get_mem_type 15 /* Return hvmmem_type_t for the specified pfn. */ struct xen_hvm_get_mem_type { /* Domain to be queried. */ domid_t domid; /* OUT variable. */ uint16_t mem_type; uint16_t pad[2]; /* align next field on 8-byte boundary */ /* IN variable. */ uint64_t pfn; }; typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t); /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) /* MSI injection for emulated devices */ #define HVMOP_inject_msi 16 struct xen_hvm_inject_msi { /* Domain to be injected */ domid_t domid; /* Data -- lower 32 bits */ uint32_t data; /* Address (0xfeexxxxx) */ uint64_t addr; }; typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t); #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ hvm_xs_strings.h000066400000000000000000000077751314037446600343370ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/hvm/****************************************************************************** * hvm/hvm_xs_strings.h * * HVM xenstore strings used in HVMLOADER. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_XS_STRINGS_H__ #define __XEN_PUBLIC_HVM_HVM_XS_STRINGS_H__ #define HVM_XS_HVMLOADER "hvmloader" #define HVM_XS_BIOS "hvmloader/bios" #define HVM_XS_GENERATION_ID_ADDRESS "hvmloader/generation-id-address" #define HVM_XS_ALLOW_MEMORY_RELOCATE "hvmloader/allow-memory-relocate" /* The following values allow additional ACPI tables to be added to the * virtual ACPI BIOS that hvmloader constructs. The values specify the guest * physical address and length of a block of ACPI tables to add. The format of * the block is simply concatenated raw tables (which specify their own length * in the ACPI header). */ #define HVM_XS_ACPI_PT_ADDRESS "hvmloader/acpi/address" #define HVM_XS_ACPI_PT_LENGTH "hvmloader/acpi/length" /* Any number of SMBIOS types can be passed through to an HVM guest using * the following xenstore values. The values specify the guest physical * address and length of a block of SMBIOS structures for hvmloader to use. * The block is formatted in the following way: * * ... * * Each length separator is a 32b integer indicating the length of the next * SMBIOS structure. For DMTF defined types (0 - 121), the passed in struct * will replace the default structure in hvmloader. In addition, any * OEM/vendortypes (128 - 255) will all be added. */ #define HVM_XS_SMBIOS_PT_ADDRESS "hvmloader/smbios/address" #define HVM_XS_SMBIOS_PT_LENGTH "hvmloader/smbios/length" /* Set to 1 to enable SMBIOS default portable battery (type 22) values. */ #define HVM_XS_SMBIOS_DEFAULT_BATTERY "hvmloader/smbios/default_battery" /* The following xenstore values are used to override some of the default * string values in the SMBIOS table constructed in hvmloader. */ #define HVM_XS_BIOS_STRINGS "bios-strings" #define HVM_XS_BIOS_VENDOR "bios-strings/bios-vendor" #define HVM_XS_BIOS_VERSION "bios-strings/bios-version" #define HVM_XS_SYSTEM_MANUFACTURER "bios-strings/system-manufacturer" #define HVM_XS_SYSTEM_PRODUCT_NAME "bios-strings/system-product-name" #define HVM_XS_SYSTEM_VERSION "bios-strings/system-version" #define HVM_XS_SYSTEM_SERIAL_NUMBER "bios-strings/system-serial-number" #define HVM_XS_ENCLOSURE_MANUFACTURER "bios-strings/enclosure-manufacturer" #define HVM_XS_ENCLOSURE_SERIAL_NUMBER "bios-strings/enclosure-serial-number" #define HVM_XS_BATTERY_MANUFACTURER "bios-strings/battery-manufacturer" #define HVM_XS_BATTERY_DEVICE_NAME "bios-strings/battery-device-name" /* 1 to 99 OEM strings can be set in xenstore using values of the form * below. These strings will be loaded into the SMBIOS type 11 structure. */ #define HVM_XS_OEM_STRINGS "bios-strings/oem-%d" #endif /* __XEN_PUBLIC_HVM_HVM_XS_STRINGS_H__ */ ioreq.h000066400000000000000000000116151314037446600323650ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/hvm/* * ioreq.h: I/O request definitions for device models * Copyright (c) 2004, Intel Corporation. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef _IOREQ_H_ #define _IOREQ_H_ #define IOREQ_READ 1 #define IOREQ_WRITE 0 #define STATE_IOREQ_NONE 0 #define STATE_IOREQ_READY 1 #define STATE_IOREQ_INPROCESS 2 #define STATE_IORESP_READY 3 #define IOREQ_TYPE_PIO 0 /* pio */ #define IOREQ_TYPE_COPY 1 /* mmio ops */ #define IOREQ_TYPE_TIMEOFFSET 7 #define IOREQ_TYPE_INVALIDATE 8 /* mapcache */ /* * VMExit dispatcher should cooperate with instruction decoder to * prepare this structure and notify service OS and DM by sending * virq */ struct ioreq { uint64_t addr; /* physical address */ uint64_t data; /* data (or paddr of data) */ uint32_t count; /* for rep prefixes */ uint32_t size; /* size in bytes */ uint32_t vp_eport; /* evtchn for notifications to/from device model */ uint16_t _pad0; uint8_t state:4; uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr * of the real data to use. */ uint8_t dir:1; /* 1=read, 0=write */ uint8_t df:1; uint8_t _pad1:1; uint8_t type; /* I/O type */ }; typedef struct ioreq ioreq_t; struct shared_iopage { struct ioreq vcpu_ioreq[1]; }; typedef struct shared_iopage shared_iopage_t; struct buf_ioreq { uint8_t type; /* I/O type */ uint8_t pad:1; uint8_t dir:1; /* 1=read, 0=write */ uint8_t size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */ uint32_t addr:20;/* physical address */ uint32_t data; /* data */ }; typedef struct buf_ioreq buf_ioreq_t; #define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */ struct buffered_iopage { unsigned int read_pointer; unsigned int write_pointer; buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM]; }; /* NB. Size of this structure must be no greater than one page. */ typedef struct buffered_iopage buffered_iopage_t; #if defined(__ia64__) struct pio_buffer { uint32_t page_offset; uint32_t pointer; uint32_t data_end; uint32_t buf_size; void *opaque; }; #define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */ #define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */ #define PIO_BUFFER_ENTRY_NUM 2 struct buffered_piopage { struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM]; uint8_t buffer[1]; }; #endif /* defined(__ia64__) */ /* * ACPI Control/Event register locations. Location is controlled by a * version number in HVM_PARAM_ACPI_IOPORTS_LOCATION. */ /* Version 0 (default): Traditional Xen locations. */ #define ACPI_PM1A_EVT_BLK_ADDRESS_V0 0x1f40 #define ACPI_PM1A_CNT_BLK_ADDRESS_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 0x04) #define ACPI_PM_TMR_BLK_ADDRESS_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 0x08) #define ACPI_GPE0_BLK_ADDRESS_V0 (ACPI_PM_TMR_BLK_ADDRESS_V0 + 0x20) #define ACPI_GPE0_BLK_LEN_V0 0x08 /* Version 1: Locations preferred by modern Qemu. */ #define ACPI_PM1A_EVT_BLK_ADDRESS_V1 0xb000 #define ACPI_PM1A_CNT_BLK_ADDRESS_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 0x04) #define ACPI_PM_TMR_BLK_ADDRESS_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 0x08) #define ACPI_GPE0_BLK_ADDRESS_V1 0xafe0 #define ACPI_GPE0_BLK_LEN_V1 0x04 /* Compatibility definitions for the default location (version 0). */ #define ACPI_PM1A_EVT_BLK_ADDRESS ACPI_PM1A_EVT_BLK_ADDRESS_V0 #define ACPI_PM1A_CNT_BLK_ADDRESS ACPI_PM1A_CNT_BLK_ADDRESS_V0 #define ACPI_PM_TMR_BLK_ADDRESS ACPI_PM_TMR_BLK_ADDRESS_V0 #define ACPI_GPE0_BLK_ADDRESS ACPI_GPE0_BLK_ADDRESS_V0 #define ACPI_GPE0_BLK_LEN ACPI_GPE0_BLK_LEN_V0 #endif /* _IOREQ_H_ */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ params.h000066400000000000000000000131531314037446600325300ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/hvm/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_PARAMS_H__ #define __XEN_PUBLIC_HVM_PARAMS_H__ #include "hvm_op.h" /* * Parameter space for HVMOP_{set,get}_param. */ /* * How should CPU0 event-channel notifications be delivered? * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: * Domain = val[47:32], Bus = val[31:16], * DevFn = val[15: 8], IntX = val[ 1: 0] * val[63:56] == 2: val[7:0] is a vector number, check for * XENFEAT_hvm_callback_vector to know if this delivery * method is available. * If val == 0 then CPU0 event-channel notifications are not delivered. */ #define HVM_PARAM_CALLBACK_IRQ 0 /* * These are not used by Xen. They are here for convenience of HVM-guest * xenbus implementations. */ #define HVM_PARAM_STORE_PFN 1 #define HVM_PARAM_STORE_EVTCHN 2 #define HVM_PARAM_PAE_ENABLED 4 #define HVM_PARAM_IOREQ_PFN 5 #define HVM_PARAM_BUFIOREQ_PFN 6 #define HVM_PARAM_BUFIOREQ_EVTCHN 26 #ifdef __ia64__ #define HVM_PARAM_NVRAM_FD 7 #define HVM_PARAM_VHPT_SIZE 8 #define HVM_PARAM_BUFPIOREQ_PFN 9 #elif defined(__i386__) || defined(__x86_64__) /* Expose Viridian interfaces to this HVM guest? */ #define HVM_PARAM_VIRIDIAN 9 #endif /* * Set mode for virtual timers (currently x86 only): * delay_for_missed_ticks (default): * Do not advance a vcpu's time beyond the correct delivery time for * interrupts that have been missed due to preemption. Deliver missed * interrupts when the vcpu is rescheduled and advance the vcpu's virtual * time stepwise for each one. * no_delay_for_missed_ticks: * As above, missed interrupts are delivered, but guest time always tracks * wallclock (i.e., real) time while doing so. * no_missed_ticks_pending: * No missed interrupts are held pending. Instead, to ensure ticks are * delivered at some non-zero rate, if we detect missed ticks then the * internal tick alarm is not disabled if the VCPU is preempted during the * next tick period. * one_missed_tick_pending: * Missed interrupts are collapsed together and delivered as one 'late tick'. * Guest time always tracks wallclock (i.e., real) time. */ #define HVM_PARAM_TIMER_MODE 10 #define HVMPTM_delay_for_missed_ticks 0 #define HVMPTM_no_delay_for_missed_ticks 1 #define HVMPTM_no_missed_ticks_pending 2 #define HVMPTM_one_missed_tick_pending 3 /* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */ #define HVM_PARAM_HPET_ENABLED 11 /* Identity-map page directory used by Intel EPT when CR0.PG=0. */ #define HVM_PARAM_IDENT_PT 12 /* Device Model domain, defaults to 0. */ #define HVM_PARAM_DM_DOMAIN 13 /* ACPI S state: currently support S0 and S3 on x86. */ #define HVM_PARAM_ACPI_S_STATE 14 /* TSS used on Intel when CR0.PE=0. */ #define HVM_PARAM_VM86_TSS 15 /* Boolean: Enable aligning all periodic vpts to reduce interrupts */ #define HVM_PARAM_VPT_ALIGN 16 /* Console debug shared memory ring and event channel */ #define HVM_PARAM_CONSOLE_PFN 17 #define HVM_PARAM_CONSOLE_EVTCHN 18 /* * Select location of ACPI PM1a and TMR control blocks. Currently two locations * are supported, specified by version 0 or 1 in this parameter: * - 0: default, use the old addresses * PM1A_EVT == 0x1f40; PM1A_CNT == 0x1f44; PM_TMR == 0x1f48 * - 1: use the new default qemu addresses * PM1A_EVT == 0xb000; PM1A_CNT == 0xb004; PM_TMR == 0xb008 * You can find these address definitions in */ #define HVM_PARAM_ACPI_IOPORTS_LOCATION 19 /* Enable blocking memory events, async or sync (pause vcpu until response) * onchangeonly indicates messages only on a change of value */ #define HVM_PARAM_MEMORY_EVENT_CR0 20 #define HVM_PARAM_MEMORY_EVENT_CR3 21 #define HVM_PARAM_MEMORY_EVENT_CR4 22 #define HVM_PARAM_MEMORY_EVENT_INT3 23 #define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25 #define HVM_PARAM_MEMORY_EVENT_MSR 30 #define HVMPME_MODE_MASK (3 << 0) #define HVMPME_mode_disabled 0 #define HVMPME_mode_async 1 #define HVMPME_mode_sync 2 #define HVMPME_onchangeonly (1 << 2) /* Boolean: Enable nestedhvm (hvm only) */ #define HVM_PARAM_NESTEDHVM 24 /* Params for the mem event rings */ #define HVM_PARAM_PAGING_RING_PFN 27 #define HVM_PARAM_ACCESS_RING_PFN 28 #define HVM_PARAM_SHARING_RING_PFN 29 /* SHUTDOWN_* action in case of a triple fault */ #define HVM_PARAM_TRIPLE_FAULT_REASON 31 #define HVM_NR_PARAMS 32 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ pvdrivers.h000066400000000000000000000043411314037446600332700ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/hvm/* * pvdrivers.h: Register of PV drivers product numbers. * Copyright (c) 2012, Citrix Systems Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef _XEN_PUBLIC_PVDRIVERS_H_ #define _XEN_PUBLIC_PVDRIVERS_H_ /* * This is the master registry of product numbers for * PV drivers. * If you need a new product number allocating, please * post to xen-devel@lists.xensource.com. You should NOT use * a product number without allocating one. * If you maintain a separate versioning and distribution path * for PV drivers you should have a separate product number so * that your drivers can be separated from others. * * During development, you may use the product ID to * indicate a driver which is yet to be released. */ #define PVDRIVERS_PRODUCT_LIST(EACH) \ EACH("xensource-windows", 0x0001) /* Citrix */ \ EACH("gplpv-windows", 0x0002) /* James Harper */ \ EACH("linux", 0x0003) \ EACH("xenserver-windows-v7.0+", 0x0004) /* Citrix */ \ EACH("xenserver-windows-v7.2+", 0x0005) /* Citrix */ \ EACH("experimental", 0xffff) #endif /* _XEN_PUBLIC_PVDRIVERS_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/hvm/save.h000066400000000000000000000107301314037446600322600ustar00rootroot00000000000000/* * hvm/save.h * * Structure definitions for HVM state that is held by Xen and must * be saved along with the domain's memory and device-model state. * * Copyright (c) 2007 XenSource Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_SAVE_H__ #define __XEN_PUBLIC_HVM_SAVE_H__ /* * Structures in this header *must* have the same layout in 32bit * and 64bit environments: this means that all fields must be explicitly * sized types and aligned to their sizes, and the structs must be * a multiple of eight bytes long. * * Only the state necessary for saving and restoring (i.e. fields * that are analogous to actual hardware state) should go in this file. * Internal mechanisms should be kept in Xen-private headers. */ #if !defined(__GNUC__) || defined(__STRICT_ANSI__) #error "Anonymous structs/unions are a GNU extension." #endif /* * Each entry is preceded by a descriptor giving its type and length */ struct hvm_save_descriptor { uint16_t typecode; /* Used to demux the various types below */ uint16_t instance; /* Further demux within a type */ uint32_t length; /* In bytes, *not* including this descriptor */ }; /* * Each entry has a datatype associated with it: for example, the CPU state * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU), * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU). * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system * ugliness. */ #ifdef __XEN__ # define DECLARE_HVM_SAVE_TYPE_COMPAT(_x, _code, _type, _ctype, _fix) \ static inline int __HVM_SAVE_FIX_COMPAT_##_x(void *h) { return _fix(h); } \ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[2];}; \ struct __HVM_SAVE_TYPE_COMPAT_##_x { _ctype t; } # include /* BUG() */ # define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \ static inline int __HVM_SAVE_FIX_COMPAT_##_x(void *h) { BUG(); return -1; } \ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[1];}; \ struct __HVM_SAVE_TYPE_COMPAT_##_x { _type t; } #else # define DECLARE_HVM_SAVE_TYPE_COMPAT(_x, _code, _type, _ctype, _fix) \ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[2];} # define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[1];} #endif #define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t) #define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x))) #define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c)) #ifdef __XEN__ # define HVM_SAVE_TYPE_COMPAT(_x) typeof (((struct __HVM_SAVE_TYPE_COMPAT_##_x *)(0))->t) # define HVM_SAVE_LENGTH_COMPAT(_x) (sizeof (HVM_SAVE_TYPE_COMPAT(_x))) # define HVM_SAVE_HAS_COMPAT(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->cpt)-1) # define HVM_SAVE_FIX_COMPAT(_x, _dst) __HVM_SAVE_FIX_COMPAT_##_x(_dst) #endif /* * The series of save records is teminated by a zero-type, zero-length * descriptor. */ struct hvm_save_end {}; DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end); #if defined(__i386__) || defined(__x86_64__) #include "../arch-x86/hvm/save.h" #elif defined(__ia64__) #include "../arch-ia64/hvm/save.h" #elif defined(__arm__) || defined(__aarch64__) #include "../arch-arm/hvm/save.h" #else #error "unsupported architecture" #endif #endif /* __XEN_PUBLIC_HVM_SAVE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/000077500000000000000000000000001314037446600307655ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/blkif.h000066400000000000000000000676411314037446600322430ustar00rootroot00000000000000/****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser * Copyright (c) 2012, Spectra Logic Corporation */ #ifndef __XEN_PUBLIC_IO_BLKIF_H__ #define __XEN_PUBLIC_IO_BLKIF_H__ #include "ring.h" #include "../grant_table.h" /* * Front->back notifications: When enqueuing a new request, sending a * notification can be made conditional on req_event (i.e., the generic * hold-off mechanism provided by the ring macros). Backends must set * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). * * Back->front notifications: When enqueuing a new response, sending a * notification can be made conditional on rsp_event (i.e., the generic * hold-off mechanism provided by the ring macros). Frontends must set * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). */ #ifndef blkif_vdev_t #define blkif_vdev_t uint16_t #endif #define blkif_sector_t uint64_t /* * Feature and Parameter Negotiation * ================================= * The two halves of a Xen block driver utilize nodes within the XenStore to * communicate capabilities and to negotiate operating parameters. This * section enumerates these nodes which reside in the respective front and * backend portions of the XenStore, following the XenBus convention. * * All data in the XenStore is stored as strings. Nodes specifying numeric * values are encoded in decimal. Integer value ranges listed below are * expressed as fixed sized integer types capable of storing the conversion * of a properly formated node string, without loss of information. * * Any specified default value is in effect if the corresponding XenBus node * is not present in the XenStore. * * XenStore nodes in sections marked "PRIVATE" are solely for use by the * driver side whose XenBus tree contains them. * * XenStore nodes marked "DEPRECATED" in their notes section should only be * used to provide interoperability with legacy implementations. * * See the XenBus state transition diagram below for details on when XenBus * nodes must be published and when they can be queried. * ***************************************************************************** * Backend XenBus Nodes ***************************************************************************** * *------------------ Backend Device Identification (PRIVATE) ------------------ * * mode * Values: "r" (read only), "w" (writable) * * The read or write access permissions to the backing store to be * granted to the frontend. * * params * Values: string * * A free formatted string providing sufficient information for the * backend driver to open the backing device. (e.g. the path to the * file or block device representing the backing store.) * * type * Values: "file", "phy", "tap" * * The type of the backing device/object. * *--------------------------------- Features --------------------------------- * * feature-barrier * Values: 0/1 (boolean) * Default Value: 0 * * A value of "1" indicates that the backend can process requests * containing the BLKIF_OP_WRITE_BARRIER request opcode. Requests * of this type may still be returned at any time with the * BLKIF_RSP_EOPNOTSUPP result code. * * feature-flush-cache * Values: 0/1 (boolean) * Default Value: 0 * * A value of "1" indicates that the backend can process requests * containing the BLKIF_OP_FLUSH_DISKCACHE request opcode. Requests * of this type may still be returned at any time with the * BLKIF_RSP_EOPNOTSUPP result code. * * feature-discard * Values: 0/1 (boolean) * Default Value: 0 * * A value of "1" indicates that the backend can process requests * containing the BLKIF_OP_DISCARD request opcode. Requests * of this type may still be returned at any time with the * BLKIF_RSP_EOPNOTSUPP result code. * * feature-persistent * Values: 0/1 (boolean) * Default Value: 0 * Notes: 7 * * A value of "1" indicates that the backend can keep the grants used * by the frontend driver mapped, so the same set of grants should be * used in all transactions. The maximum number of grants the backend * can map persistently depends on the implementation, but ideally it * should be RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST. Using this * feature the backend doesn't need to unmap each grant, preventing * costly TLB flushes. The backend driver should only map grants * persistently if the frontend supports it. If a backend driver chooses * to use the persistent protocol when the frontend doesn't support it, * it will probably hit the maximum number of persistently mapped grants * (due to the fact that the frontend won't be reusing the same grants), * and fall back to non-persistent mode. Backend implementations may * shrink or expand the number of persistently mapped grants without * notifying the frontend depending on memory constraints (this might * cause a performance degradation). * * If a backend driver wants to limit the maximum number of persistently * mapped grants to a value less than RING_SIZE * * BLKIF_MAX_SEGMENTS_PER_REQUEST a LRU strategy should be used to * discard the grants that are less commonly used. Using a LRU in the * backend driver paired with a LIFO queue in the frontend will * allow us to have better performance in this scenario. * *----------------------- Request Transport Parameters ------------------------ * * max-ring-page-order * Values: * Default Value: 0 * Notes: 1, 3 * * The maximum supported size of the request ring buffer in units of * lb(machine pages). (e.g. 0 == 1 page, 1 = 2 pages, 2 == 4 pages, * etc.). * * max-ring-pages * Values: * Default Value: 1 * Notes: DEPRECATED, 2, 3 * * The maximum supported size of the request ring buffer in units of * machine pages. The value must be a power of 2. * *------------------------- Backend Device Properties ------------------------- * * discard-alignment * Values: * Default Value: 0 * Notes: 4, 5 * * The offset, in bytes from the beginning of the virtual block device, * to the first, addressable, discard extent on the underlying device. * * discard-granularity * Values: * Default Value: <"sector-size"> * Notes: 4 * * The size, in bytes, of the individually addressable discard extents * of the underlying device. * * discard-secure * Values: 0/1 (boolean) * Default Value: 0 * Notes: 10 * * A value of "1" indicates that the backend can process BLKIF_OP_DISCARD * requests with the BLKIF_DISCARD_SECURE flag set. * * info * Values: (bitmap) * * A collection of bit flags describing attributes of the backing * device. The VDISK_* macros define the meaning of each bit * location. * * sector-size * Values: * * The logical sector size, in bytes, of the backend device. * * physical-sector-size * Values: * * The physical sector size, in bytes, of the backend device. * * sectors * Values: * * The size of the backend device, expressed in units of its logical * sector size ("sector-size"). * ***************************************************************************** * Frontend XenBus Nodes ***************************************************************************** * *----------------------- Request Transport Parameters ----------------------- * * event-channel * Values: * * The identifier of the Xen event channel used to signal activity * in the ring buffer. * * ring-ref * Values: * Notes: 6 * * The Xen grant reference granting permission for the backend to map * the sole page in a single page sized ring buffer. * * ring-ref%u * Values: * Notes: 6 * * For a frontend providing a multi-page ring, a "number of ring pages" * sized list of nodes, each containing a Xen grant reference granting * permission for the backend to map the page of the ring located * at page index "%u". Page indexes are zero based. * * protocol * Values: string (XEN_IO_PROTO_ABI_*) * Default Value: XEN_IO_PROTO_ABI_NATIVE * * The machine ABI rules governing the format of all ring request and * response structures. * * ring-page-order * Values: * Default Value: 0 * Maximum Value: MAX(ffs(max-ring-pages) - 1, max-ring-page-order) * Notes: 1, 3 * * The size of the frontend allocated request ring buffer in units * of lb(machine pages). (e.g. 0 == 1 page, 1 = 2 pages, 2 == 4 pages, * etc.). * * num-ring-pages * Values: * Default Value: 1 * Maximum Value: MAX(max-ring-pages,(0x1 << max-ring-page-order)) * Notes: DEPRECATED, 2, 3 * * The size of the frontend allocated request ring buffer in units of * machine pages. The value must be a power of 2. * * feature-persistent * Values: 0/1 (boolean) * Default Value: 0 * Notes: 7, 8, 9 * * A value of "1" indicates that the frontend will reuse the same grants * for all transactions, allowing the backend to map them with write * access (even when it should be read-only). If the frontend hits the * maximum number of allowed persistently mapped grants, it can fallback * to non persistent mode. This will cause a performance degradation, * since the the backend driver will still try to map those grants * persistently. Since the persistent grants protocol is compatible with * the previous protocol, a frontend driver can choose to work in * persistent mode even when the backend doesn't support it. * * It is recommended that the frontend driver stores the persistently * mapped grants in a LIFO queue, so a subset of all persistently mapped * grants gets used commonly. This is done in case the backend driver * decides to limit the maximum number of persistently mapped grants * to a value less than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST. * *------------------------- Virtual Device Properties ------------------------- * * device-type * Values: "disk", "cdrom", "floppy", etc. * * virtual-device * Values: * * A value indicating the physical device to virtualize within the * frontend's domain. (e.g. "The first ATA disk", "The third SCSI * disk", etc.) * * See docs/misc/vbd-interface.txt for details on the format of this * value. * * Notes * ----- * (1) Multi-page ring buffer scheme first developed in the Citrix XenServer * PV drivers. * (2) Multi-page ring buffer scheme first used in some RedHat distributions * including a distribution deployed on certain nodes of the Amazon * EC2 cluster. * (3) Support for multi-page ring buffers was implemented independently, * in slightly different forms, by both Citrix and RedHat/Amazon. * For full interoperability, block front and backends should publish * identical ring parameters, adjusted for unit differences, to the * XenStore nodes used in both schemes. * (4) Devices that support discard functionality may internally allocate space * (discardable extents) in units that are larger than the exported logical * block size. If the backing device has such discardable extents the * backend should provide both discard-granularity and discard-alignment. * Providing just one of the two may be considered an error by the frontend. * Backends supporting discard should include discard-granularity and * discard-alignment even if it supports discarding individual sectors. * Frontends should assume discard-alignment == 0 and discard-granularity * == sector size if these keys are missing. * (5) The discard-alignment parameter allows a physical device to be * partitioned into virtual devices that do not necessarily begin or * end on a discardable extent boundary. * (6) When there is only a single page allocated to the request ring, * 'ring-ref' is used to communicate the grant reference for this * page to the backend. When using a multi-page ring, the 'ring-ref' * node is not created. Instead 'ring-ref0' - 'ring-refN' are used. * (7) When using persistent grants data has to be copied from/to the page * where the grant is currently mapped. The overhead of doing this copy * however doesn't suppress the speed improvement of not having to unmap * the grants. * (8) The frontend driver has to allow the backend driver to map all grants * with write access, even when they should be mapped read-only, since * further requests may reuse these grants and require write permissions. * (9) Linux implementation doesn't have a limit on the maximum number of * grants that can be persistently mapped in the frontend driver, but * due to the frontent driver implementation it should never be bigger * than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST. *(10) The discard-secure property may be present and will be set to 1 if the * backing device supports secure discard. */ /* * STATE DIAGRAMS * ***************************************************************************** * Startup * ***************************************************************************** * * Tool stack creates front and back nodes with state XenbusStateInitialising. * * Front Back * ================================= ===================================== * XenbusStateInitialising XenbusStateInitialising * o Query virtual device o Query backend device identification * properties. data. * o Setup OS device instance. o Open and validate backend device. * o Publish backend features and * transport parameters. * | * | * V * XenbusStateInitWait * * o Query backend features and * transport parameters. * o Allocate and initialize the * request ring. * o Publish transport parameters * that will be in effect during * this connection. * | * | * V * XenbusStateInitialised * * o Query frontend transport parameters. * o Connect to the request ring and * event channel. * o Publish backend device properties. * | * | * V * XenbusStateConnected * * o Query backend device properties. * o Finalize OS virtual device * instance. * | * | * V * XenbusStateConnected * * Note: Drivers that do not support any optional features, or the negotiation * of transport parameters, can skip certain states in the state machine: * * o A frontend may transition to XenbusStateInitialised without * waiting for the backend to enter XenbusStateInitWait. In this * case, default transport parameters are in effect and any * transport parameters published by the frontend must contain * their default values. * * o A backend may transition to XenbusStateInitialised, bypassing * XenbusStateInitWait, without waiting for the frontend to first * enter the XenbusStateInitialised state. In this case, default * transport parameters are in effect and any transport parameters * published by the backend must contain their default values. * * Drivers that support optional features and/or transport parameter * negotiation must tolerate these additional state transition paths. * In general this means performing the work of any skipped state * transition, if it has not already been performed, in addition to the * work associated with entry into the current state. */ /* * REQUEST CODES. */ #define BLKIF_OP_READ 0 #define BLKIF_OP_WRITE 1 /* * All writes issued prior to a request with the BLKIF_OP_WRITE_BARRIER * operation code ("barrier request") must be completed prior to the * execution of the barrier request. All writes issued after the barrier * request must not execute until after the completion of the barrier request. * * Optional. See "feature-barrier" XenBus node documentation above. */ #define BLKIF_OP_WRITE_BARRIER 2 /* * Commit any uncommitted contents of the backing device's volatile cache * to stable storage. * * Optional. See "feature-flush-cache" XenBus node documentation above. */ #define BLKIF_OP_FLUSH_DISKCACHE 3 /* * Device specific command packet contained within the request */ #define BLKIF_OP_PACKET 4 /* * Indicate to the backend device that a region of storage is no longer in * use, and may be discarded at any time without impact to the client. If * the BLKIF_DISCARD_SECURE flag is set on the request, all copies of the * discarded region on the device must be rendered unrecoverable before the * command returns. * * This operation is analogous to performing a trim (ATA) or unamp (SCSI), * command on a native device. * * More information about trim/unmap operations can be found at: * http://t13.org/Documents/UploadedDocuments/docs2008/ * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc * http://www.seagate.com/staticfiles/support/disc/manuals/ * Interface%20manuals/100293068c.pdf * * Optional. See "feature-discard", "discard-alignment", * "discard-granularity", and "discard-secure" in the XenBus node * documentation above. */ #define BLKIF_OP_DISCARD 5 /* * Recognized if "feature-max-indirect-segments" in present in the backend * xenbus info. The "feature-max-indirect-segments" node contains the maximum * number of segments allowed by the backend per request. If the node is * present, the frontend might use blkif_request_indirect structs in order to * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The * maximum number of indirect segments is fixed by the backend, but the * frontend can issue requests with any number of indirect segments as long as * it's less than the number provided by the backend. The indirect_grefs field * in blkif_request_indirect should be filled by the frontend with the * grant references of the pages that are holding the indirect segments. * These pages are filled with an array of blkif_request_segment that hold the * information about the segments. The number of indirect pages to use is * determined by the number of segments an indirect request contains. Every * indirect page can contain a maximum of * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to * calculate the number of indirect pages to use we have to do * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))). * * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* * create the "feature-max-indirect-segments" node! */ #define BLKIF_OP_INDIRECT 6 /* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 /* * Maximum number of indirect pages to use per request. */ #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 /* * NB. first_sect and last_sect in blkif_request_segment, as well as * sector_number in blkif_request, are always expressed in 512-byte units. * However they must be properly aligned to the real sector size of the * physical disk, which is reported in the "physical-sector-size" node in * the backend xenbus info. Also the xenbus "sectors" node is expressed in * 512-byte units. */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; }; /* * Starting ring element for any I/O request. */ struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ #if !defined(CONFIG_PARAVIRT_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #else union { struct __attribute__((__packed__)) blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ #ifdef CONFIG_X86_64 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; } rw; struct __attribute__((__packed__)) blkif_request_discard { uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ blkif_vdev_t _pad1; /* only for read/write requests */ #ifdef CONFIG_X86_64 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number; uint64_t nr_sectors; uint8_t _pad3; } discard; struct __attribute__((__packed__)) blkif_request_other { uint8_t _pad1; blkif_vdev_t _pad2; /* only for read/write requests */ #ifdef CONFIG_X86_64 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ } other; struct __attribute__((__packed__)) blkif_request_indirect { uint8_t indirect_op; uint16_t nr_segments; #ifdef CONFIG_X86_64 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ #endif uint64_t id; blkif_sector_t sector_number; blkif_vdev_t handle; uint16_t _pad2; grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; #ifdef CONFIG_X86_64 uint32_t _pad3; /* make it 64 byte aligned */ #else uint64_t _pad3; /* make it 64 byte aligned */ #endif } indirect; } u; } __attribute__((__packed__)); #endif typedef struct blkif_request blkif_request_t; #if !defined(CONFIG_PARAVIRT_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /* * Cast to this structure when blkif_request.operation == BLKIF_OP_DISCARD * sizeof(struct blkif_request_discard) <= sizeof(struct blkif_request) */ struct blkif_request_discard { uint8_t operation; /* BLKIF_OP_DISCARD */ uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */ #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ blkif_vdev_t handle; /* same as for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk */ uint64_t nr_sectors; /* number of contiguous sectors to discard*/ }; typedef struct blkif_request_discard blkif_request_discard_t; struct blkif_request_indirect { uint8_t operation; /* BLKIF_OP_INDIRECT */ uint8_t indirect_op; /* BLKIF_OP_{READ/WRITE} */ uint16_t nr_segments; /* number of segments */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ blkif_vdev_t handle; /* same as for read/write requests */ grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; #ifdef __i386__ uint64_t pad; /* Make it 64 byte aligned on i386 */ #endif }; typedef struct blkif_request_indirect blkif_request_indirect_t; #endif struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_response blkif_response_t; /* * STATUS RETURN CODES. */ /* Operation not supported (only happens on barrier writes). */ #define BLKIF_RSP_EOPNOTSUPP -2 /* Operation failed for some unspecified reason (-EIO). */ #define BLKIF_RSP_ERROR -1 /* Operation completed successfully. */ #define BLKIF_RSP_OKAY 0 /* * Generate blkif ring structures and types. */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 /* Xen-defined major numbers for virtual disks, they look strangely * familiar */ #define XEN_IDE0_MAJOR 3 #define XEN_IDE1_MAJOR 22 #define XEN_SCSI_DISK0_MAJOR 8 #define XEN_SCSI_DISK1_MAJOR 65 #define XEN_SCSI_DISK2_MAJOR 66 #define XEN_SCSI_DISK3_MAJOR 67 #define XEN_SCSI_DISK4_MAJOR 68 #define XEN_SCSI_DISK5_MAJOR 69 #define XEN_SCSI_DISK6_MAJOR 70 #define XEN_SCSI_DISK7_MAJOR 71 #define XEN_SCSI_DISK8_MAJOR 128 #define XEN_SCSI_DISK9_MAJOR 129 #define XEN_SCSI_DISK10_MAJOR 130 #define XEN_SCSI_DISK11_MAJOR 131 #define XEN_SCSI_DISK12_MAJOR 132 #define XEN_SCSI_DISK13_MAJOR 133 #define XEN_SCSI_DISK14_MAJOR 134 #define XEN_SCSI_DISK15_MAJOR 135 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ cdromif.h000066400000000000000000000067321314037446600325120ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/****************************************************************************** * cdromif.h * * Shared definitions between backend driver and Xen guest Virtual CDROM * block device. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_IO_CDROMIF_H__ #define __XEN_PUBLIC_IO_CDROMIF_H__ /* * Queries backend for CDROM support */ #define XEN_TYPE_CDROM_SUPPORT _IO('c', 1) struct xen_cdrom_support { uint32_t type; int8_t ret; /* returned, 0 succeded, -1 error */ int8_t err; /* returned, backend errno */ int8_t supported; /* returned, 1 supported */ }; /* * Opens backend device, returns drive geometry or * any encountered errors */ #define XEN_TYPE_CDROM_OPEN _IO('c', 2) struct xen_cdrom_open { uint32_t type; int8_t ret; int8_t err; int8_t pad; int8_t media_present; /* returned */ uint32_t sectors; /* returned */ uint32_t sector_size; /* returned */ int32_t payload_offset; /* offset to backend node name payload */ }; /* * Queries backend for media changed status */ #define XEN_TYPE_CDROM_MEDIA_CHANGED _IO('c', 3) struct xen_cdrom_media_changed { uint32_t type; int8_t ret; int8_t err; int8_t media_changed; /* returned */ }; /* * Sends vcd generic CDROM packet to backend, followed * immediately by the vcd_generic_command payload */ #define XEN_TYPE_CDROM_PACKET _IO('c', 4) struct xen_cdrom_packet { uint32_t type; int8_t ret; int8_t err; int8_t pad[2]; int32_t payload_offset; /* offset to vcd_generic_command payload */ }; /* CDROM_PACKET_COMMAND, payload for XEN_TYPE_CDROM_PACKET */ struct vcd_generic_command { uint8_t cmd[CDROM_PACKET_SIZE]; uint8_t pad[4]; uint32_t buffer_offset; uint32_t buflen; int32_t stat; uint32_t sense_offset; uint8_t data_direction; uint8_t pad1[3]; int32_t quiet; int32_t timeout; }; union xen_block_packet { uint32_t type; struct xen_cdrom_support xcs; struct xen_cdrom_open xco; struct xen_cdrom_media_changed xcmc; struct xen_cdrom_packet xcp; }; #define PACKET_PAYLOAD_OFFSET (sizeof(struct xen_cdrom_packet)) #define PACKET_SENSE_OFFSET (PACKET_PAYLOAD_OFFSET + sizeof(struct vcd_generic_command)) #define PACKET_BUFFER_OFFSET (PACKET_SENSE_OFFSET + sizeof(struct request_sense)) #define MAX_PACKET_DATA (PAGE_SIZE - sizeof(struct xen_cdrom_packet) - \ sizeof(struct vcd_generic_command) - sizeof(struct request_sense)) #endif console.h000066400000000000000000000031271314037446600325240ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/****************************************************************************** * console.h * * Console I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_CONSOLE_H__ #define __XEN_PUBLIC_IO_CONSOLE_H__ typedef uint32_t XENCONS_RING_IDX; #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) struct xencons_interface { char in[1024]; char out[2048]; XENCONS_RING_IDX in_cons, in_prod; XENCONS_RING_IDX out_cons, out_prod; }; #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/fbif.h000066400000000000000000000124361314037446600320520ustar00rootroot00000000000000/* * fbif.h -- Xen virtual frame buffer device * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_FBIF_H__ #define __XEN_PUBLIC_IO_FBIF_H__ /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. */ /* Event type 1 currently not used */ /* * Framebuffer update notification event * Capable frontend sets feature-update in xenstore. * Backend requests it by setting request-update in xenstore. */ #define XENFB_TYPE_UPDATE 2 struct xenfb_update { uint8_t type; /* XENFB_TYPE_UPDATE */ int32_t x; /* source x */ int32_t y; /* source y */ int32_t width; /* rect width */ int32_t height; /* rect height */ }; /* * Framebuffer resize notification event * Capable backend sets feature-resize in xenstore. */ #define XENFB_TYPE_RESIZE 3 struct xenfb_resize { uint8_t type; /* XENFB_TYPE_RESIZE */ int32_t width; /* width in pixels */ int32_t height; /* height in pixels */ int32_t stride; /* stride in bytes */ int32_t depth; /* depth in bits */ int32_t offset; /* start offset within framebuffer */ }; #define XENFB_OUT_EVENT_SIZE 40 union xenfb_out_event { uint8_t type; struct xenfb_update update; struct xenfb_resize resize; char pad[XENFB_OUT_EVENT_SIZE]; }; /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. */ /* * Framebuffer refresh period advice * Backend sends it to advise the frontend their preferred period of * refresh. Frontends that keep the framebuffer constantly up-to-date * just ignore it. Frontends that use the advice should immediately * refresh the framebuffer (and send an update notification event if * those have been requested), then use the update frequency to guide * their periodical refreshs. */ #define XENFB_TYPE_REFRESH_PERIOD 1 #define XENFB_NO_REFRESH 0 struct xenfb_refresh_period { uint8_t type; /* XENFB_TYPE_UPDATE_PERIOD */ uint32_t period; /* period of refresh, in ms, * XENFB_NO_REFRESH if no refresh is needed */ }; #define XENFB_IN_EVENT_SIZE 40 union xenfb_in_event { uint8_t type; struct xenfb_refresh_period refresh_period; char pad[XENFB_IN_EVENT_SIZE]; }; /* shared page */ #define XENFB_IN_RING_SIZE 1024 #define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE) #define XENFB_IN_RING_OFFS 1024 #define XENFB_IN_RING(page) \ ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS)) #define XENFB_IN_RING_REF(page, idx) \ (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN]) #define XENFB_OUT_RING_SIZE 2048 #define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE) #define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE) #define XENFB_OUT_RING(page) \ ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS)) #define XENFB_OUT_RING_REF(page, idx) \ (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN]) struct xenfb_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; int32_t width; /* width of the framebuffer (in pixels) */ int32_t height; /* height of the framebuffer (in pixels) */ uint32_t line_length; /* length of a row of pixels (in bytes) */ uint32_t mem_length; /* length of the framebuffer (in bytes) */ uint8_t depth; /* depth of a pixel (in bits) */ /* * Framebuffer page directory * * Each directory page holds PAGE_SIZE / sizeof(*pd) * framebuffer pages, and can thus map up to PAGE_SIZE * * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 * Megs 64 bit. 256 directories give enough room for a 512 * Meg framebuffer with a max resolution of 12,800x10,240. * Should be enough for a while with room leftover for * expansion. */ #ifndef CONFIG_PARAVIRT_XEN unsigned long pd[256]; #else /* Two directory pages should be enough for a while. */ unsigned long pd[2]; #endif }; /* * Wart: xenkbd needs to know default resolution. Put it here until a * better solution is found, but don't leak it to the backend. */ #ifdef __KERNEL__ #define XENFB_WIDTH 800 #define XENFB_HEIGHT 600 #define XENFB_DEPTH 32 #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/fsif.h000066400000000000000000000123761314037446600320760ustar00rootroot00000000000000/****************************************************************************** * fsif.h * * Interface to FS level split device drivers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2007, Grzegorz Milos, . */ #ifndef __XEN_PUBLIC_IO_FSIF_H__ #define __XEN_PUBLIC_IO_FSIF_H__ #include "ring.h" #include "../grant_table.h" #define REQ_FILE_OPEN 1 #define REQ_FILE_CLOSE 2 #define REQ_FILE_READ 3 #define REQ_FILE_WRITE 4 #define REQ_STAT 5 #define REQ_FILE_TRUNCATE 6 #define REQ_REMOVE 7 #define REQ_RENAME 8 #define REQ_CREATE 9 #define REQ_DIR_LIST 10 #define REQ_CHMOD 11 #define REQ_FS_SPACE 12 #define REQ_FILE_SYNC 13 struct fsif_open_request { grant_ref_t gref; }; struct fsif_close_request { uint32_t fd; }; struct fsif_read_request { uint32_t fd; int32_t pad; uint64_t len; uint64_t offset; grant_ref_t grefs[1]; /* Variable length */ }; struct fsif_write_request { uint32_t fd; int32_t pad; uint64_t len; uint64_t offset; grant_ref_t grefs[1]; /* Variable length */ }; struct fsif_stat_request { uint32_t fd; }; /* This structure is a copy of some fields from stat structure, returned * via the ring. */ struct fsif_stat_response { int32_t stat_mode; uint32_t stat_uid; uint32_t stat_gid; int32_t stat_ret; int64_t stat_size; int64_t stat_atime; int64_t stat_mtime; int64_t stat_ctime; }; struct fsif_truncate_request { uint32_t fd; int32_t pad; int64_t length; }; struct fsif_remove_request { grant_ref_t gref; }; struct fsif_rename_request { uint16_t old_name_offset; uint16_t new_name_offset; grant_ref_t gref; }; struct fsif_create_request { int8_t directory; int8_t pad; int16_t pad2; int32_t mode; grant_ref_t gref; }; struct fsif_list_request { uint32_t offset; grant_ref_t gref; }; #define NR_FILES_SHIFT 0 #define NR_FILES_SIZE 16 /* 16 bits for the number of files mask */ #define NR_FILES_MASK (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT) #define ERROR_SIZE 32 /* 32 bits for the error mask */ #define ERROR_SHIFT (NR_FILES_SIZE + NR_FILES_SHIFT) #define ERROR_MASK (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT) #define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE) #define HAS_MORE_FLAG (1ULL << HAS_MORE_SHIFT) struct fsif_chmod_request { uint32_t fd; int32_t mode; }; struct fsif_space_request { grant_ref_t gref; }; struct fsif_sync_request { uint32_t fd; }; /* FS operation request */ struct fsif_request { uint8_t type; /* Type of the request */ uint8_t pad; uint16_t id; /* Request ID, copied to the response */ uint32_t pad2; union { struct fsif_open_request fopen; struct fsif_close_request fclose; struct fsif_read_request fread; struct fsif_write_request fwrite; struct fsif_stat_request fstat; struct fsif_truncate_request ftruncate; struct fsif_remove_request fremove; struct fsif_rename_request frename; struct fsif_create_request fcreate; struct fsif_list_request flist; struct fsif_chmod_request fchmod; struct fsif_space_request fspace; struct fsif_sync_request fsync; } u; }; typedef struct fsif_request fsif_request_t; /* FS operation response */ struct fsif_response { uint16_t id; uint16_t pad1; uint32_t pad2; union { uint64_t ret_val; struct fsif_stat_response fstat; } u; }; typedef struct fsif_response fsif_response_t; #define FSIF_RING_ENTRY_SIZE 64 #define FSIF_NR_READ_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_read_request)) / \ sizeof(grant_ref_t) + 1) #define FSIF_NR_WRITE_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_write_request)) / \ sizeof(grant_ref_t) + 1) DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response); #define STATE_INITIALISED "init" #define STATE_READY "ready" #define STATE_CLOSING "closing" #define STATE_CLOSED "closed" #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/kbdif.h000066400000000000000000000072171314037446600322240ustar00rootroot00000000000000/* * kbdif.h -- Xen virtual keyboard/mouse * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_KBDIF_H__ #define __XEN_PUBLIC_IO_KBDIF_H__ /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. */ /* Pointer movement event */ #define XENKBD_TYPE_MOTION 1 /* Event type 2 currently not used */ /* Key event (includes pointer buttons) */ #define XENKBD_TYPE_KEY 3 /* * Pointer position event * Capable backend sets feature-abs-pointer in xenstore. * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting * request-abs-update in xenstore. */ #define XENKBD_TYPE_POS 4 struct xenkbd_motion { uint8_t type; /* XENKBD_TYPE_MOTION */ int32_t rel_x; /* relative X motion */ int32_t rel_y; /* relative Y motion */ int32_t rel_z; /* relative Z motion (wheel) */ }; struct xenkbd_key { uint8_t type; /* XENKBD_TYPE_KEY */ uint8_t pressed; /* 1 if pressed; 0 otherwise */ uint32_t keycode; /* KEY_* from linux/input.h */ }; struct xenkbd_position { uint8_t type; /* XENKBD_TYPE_POS */ int32_t abs_x; /* absolute X position (in FB pixels) */ int32_t abs_y; /* absolute Y position (in FB pixels) */ int32_t rel_z; /* relative Z motion (wheel) */ }; #define XENKBD_IN_EVENT_SIZE 40 union xenkbd_in_event { uint8_t type; struct xenkbd_motion motion; struct xenkbd_key key; struct xenkbd_position pos; char pad[XENKBD_IN_EVENT_SIZE]; }; /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. * No out events currently defined. */ #define XENKBD_OUT_EVENT_SIZE 40 union xenkbd_out_event { uint8_t type; char pad[XENKBD_OUT_EVENT_SIZE]; }; /* shared page */ #define XENKBD_IN_RING_SIZE 2048 #define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE) #define XENKBD_IN_RING_OFFS 1024 #define XENKBD_IN_RING(page) \ ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS)) #define XENKBD_IN_RING_REF(page, idx) \ (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN]) #define XENKBD_OUT_RING_SIZE 1024 #define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE) #define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE) #define XENKBD_OUT_RING(page) \ ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS)) #define XENKBD_OUT_RING_REF(page, idx) \ (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN]) struct xenkbd_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; }; #endif libxenvchan.h000066400000000000000000000066131314037446600333660ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/** * @file * @section AUTHORS * * Copyright (C) 2010 Rafal Wojtczuk * * Authors: * Rafal Wojtczuk * Daniel De Graaf * * @section LICENSE * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * @section DESCRIPTION * * Originally borrowed from the Qubes OS Project, http://www.qubes-os.org, * this code has been substantially rewritten to use the gntdev and gntalloc * devices instead of raw MFNs and map_foreign_range. * * This is a library for inter-domain communication. A standard Xen ring * buffer is used, with a datagram-based interface built on top. The grant * reference and event channels are shared in XenStore under a user-specified * path. * * The ring.h macros define an asymmetric interface to a shared data structure * that assumes all rings reside in a single contiguous memory space. This is * not suitable for vchan because the interface to the ring is symmetric except * for the setup. Unlike the producer-consumer rings defined in ring.h, the * size of the rings used in vchan are determined at execution time instead of * compile time, so the macros in ring.h cannot be used to access the rings. */ #include #include struct ring_shared { uint32_t cons, prod; }; #define VCHAN_NOTIFY_WRITE 0x1 #define VCHAN_NOTIFY_READ 0x2 /** * vchan_interface: primary shared data structure */ struct vchan_interface { /** * Standard consumer/producer interface, one pair per buffer * left is client write, server read * right is client read, server write */ struct ring_shared left, right; /** * size of the rings, which determines their location * 10 - at offset 1024 in ring's page * 11 - at offset 2048 in ring's page * 12+ - uses 2^(N-12) grants to describe the multi-page ring * These should remain constant once the page is shared. * Only one of the two orders can be 10 (or 11). */ uint16_t left_order, right_order; /** * Shutdown detection: * 0: client (or server) has exited * 1: client (or server) is connected * 2: client has not yet connected */ uint8_t cli_live, srv_live; /** * Notification bits: * VCHAN_NOTIFY_WRITE: send notify when data is written * VCHAN_NOTIFY_READ: send notify when data is read (consumed) * cli_notify is used for the client to inform the server of its action */ uint8_t cli_notify, srv_notify; /** * Grant list: ordering is left, right. Must not extend into actual ring * or grow beyond the end of the initial shared page. * These should remain constant once the page is shared, to allow * for possible remapping by a client that restarts. */ uint32_t grants[0]; }; UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/netif.h000066400000000000000000000226221314037446600322470ustar00rootroot00000000000000/****************************************************************************** * netif.h * * Unified network-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_NETIF_H__ #define __XEN_PUBLIC_IO_NETIF_H__ #include "ring.h" #include "../grant_table.h" /* * Older implementation of Xen network frontend / backend has an * implicit dependency on the MAX_SKB_FRAGS as the maximum number of * ring slots a skb can use. Netfront / netback may not work as * expected when frontend and backend have different MAX_SKB_FRAGS. * * A better approach is to add mechanism for netfront / netback to * negotiate this value. However we cannot fix all possible * frontends, so we need to define a value which states the minimum * slots backend must support. * * The minimum value derives from older Linux kernel's MAX_SKB_FRAGS * (18), which is proved to work with most frontends. Any new backend * which doesn't negotiate with frontend should expect frontend to * send a valid packet using slots up to this value. */ #define XEN_NETIF_NR_SLOTS_MIN 18 /* * Notifications after enqueuing any type of message should be conditional on * the appropriate req_event or rsp_event field in the shared ring. * If the client sends notification for rx requests then it should specify * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume * that it cannot safely queue packets (as it may not be kicked to send them). */ /* * "feature-split-event-channels" is introduced to separate guest TX * and RX notification. Backend either doesn't support this feature or * advertises it via xenstore as 0 (disabled) or 1 (enabled). * * To make use of this feature, frontend should allocate two event * channels for TX and RX, advertise them to backend as * "event-channel-tx" and "event-channel-rx" respectively. If frontend * doesn't want to use this feature, it just writes "event-channel" * node as before. */ /* * "feature-no-csum-offload" should be used to turn IPv4 TCP/UDP checksum * offload off or on. If it is missing then the feature is assumed to be on. * "feature-ipv6-csum-offload" should be used to turn IPv6 TCP/UDP checksum * offload on or off. If it is missing then the feature is assumed to be off. */ /* * "feature-gso-tcpv4" and "feature-gso-tcpv6" advertise the capability to * handle large TCP packets (in IPv4 or IPv6 form respectively). Neither * frontends nor backends are assumed to be capable unless the flags are * present. */ /* * This is the 'wire' format for packets: * Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags) * [Request 2: xen_netif_extra_info] (only if request 1 has XEN_NETTXF_extra_info) * [Request 3: xen_netif_extra_info] (only if request 2 has XEN_NETIF_EXTRA_MORE) * Request 4: xen_netif_tx_request -- XEN_NETTXF_more_data * Request 5: xen_netif_tx_request -- XEN_NETTXF_more_data * ... * Request N: xen_netif_tx_request -- 0 */ /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _XEN_NETTXF_csum_blank (0) #define XEN_NETTXF_csum_blank (1U<<_XEN_NETTXF_csum_blank) /* Packet data has been validated against protocol checksum. */ #define _XEN_NETTXF_data_validated (1) #define XEN_NETTXF_data_validated (1U<<_XEN_NETTXF_data_validated) /* Packet continues in the next request descriptor. */ #define _XEN_NETTXF_more_data (2) #define XEN_NETTXF_more_data (1U<<_XEN_NETTXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _XEN_NETTXF_extra_info (3) #define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info) #define XEN_NETIF_MAX_TX_SIZE 0xFFFF struct netif_tx_request { grant_ref_t gref; /* Reference to buffer page */ uint16_t offset; /* Offset within buffer page */ uint16_t flags; /* XEN_NETTXF_* */ uint16_t id; /* Echoed in response message. */ uint16_t size; /* Packet size in bytes. */ }; typedef struct netif_tx_request netif_tx_request_t; /* Types of netif_extra_info descriptors. */ #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MAX (4) /* xen_netif_extra_info flags. */ #define _XEN_NETIF_EXTRA_FLAG_MORE (0) #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) /* GSO types */ #define XEN_NETIF_GSO_TYPE_NONE (0) #define XEN_NETIF_GSO_TYPE_TCPV4 (1) #define XEN_NETIF_GSO_TYPE_TCPV6 (2) /* * This structure needs to fit within both netif_tx_request and * netif_rx_response for compatibility. */ struct netif_extra_info { uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ union { /* * XEN_NETIF_EXTRA_TYPE_GSO: */ struct { /* * Maximum payload size of each segment. For * example, for TCP this is just the path MSS. */ uint16_t size; /* * GSO type. This determines the protocol of * the packet and any extra features required * to segment the packet properly. */ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ /* Future expansion. */ uint8_t pad; /* * GSO features. This specifies any extra GSO * features required to process this packet, * such as ECN support for TCPv4. */ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ } gso; /* * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}: * Backend advertises availability via * 'feature-multicast-control' xenbus node containing value * '1'. * Frontend requests this feature by advertising * 'request-multicast-control' xenbus node containing value * '1'. If multicast control is requested then multicast * flooding is disabled and the frontend must explicitly * register its interest in multicast groups using dummy * transmit requests containing MCAST_{ADD,DEL} extra-info * fragments. */ struct { uint8_t addr[6]; /* Address to add/remove. */ } mcast; uint16_t pad[3]; } u; }; typedef struct netif_extra_info netif_extra_info_t; struct netif_tx_response { uint16_t id; int16_t status; /* XEN_NETIF_RSP_* */ }; typedef struct netif_tx_response netif_tx_response_t; struct netif_rx_request { uint16_t id; /* Echoed in response message. */ grant_ref_t gref; /* Reference to incoming granted frame */ }; typedef struct netif_rx_request netif_rx_request_t; /* Packet data has been validated against protocol checksum. */ #define _XEN_NETRXF_data_validated (0) #define XEN_NETRXF_data_validated (1U<<_XEN_NETRXF_data_validated) /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _XEN_NETRXF_csum_blank (1) #define XEN_NETRXF_csum_blank (1U<<_XEN_NETRXF_csum_blank) /* Packet continues in the next request descriptor. */ #define _XEN_NETRXF_more_data (2) #define XEN_NETRXF_more_data (1U<<_XEN_NETRXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _XEN_NETRXF_extra_info (3) #define XEN_NETRXF_extra_info (1U<<_XEN_NETRXF_extra_info) /* GSO Prefix descriptor. */ #define _XEN_NETRXF_gso_prefix (4) #define XEN_NETRXF_gso_prefix (1U<<_XEN_NETRXF_gso_prefix) struct netif_rx_response { uint16_t id; uint16_t offset; /* Offset in page of start of received packet */ uint16_t flags; /* XEN_NETRXF_* */ int16_t status; /* -ve: NETIF_RSP_* ; +ve: Rx'ed pkt size. */ }; typedef struct netif_rx_response netif_rx_response_t; /* * Generate netif ring structures and types. */ #if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response); DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response); #else #define xen_netif_tx_request netif_tx_request #define xen_netif_rx_request netif_rx_request #define xen_netif_tx_response netif_tx_response #define xen_netif_rx_response netif_rx_response DEFINE_RING_TYPES(xen_netif_tx, struct xen_netif_tx_request, struct xen_netif_tx_response); DEFINE_RING_TYPES(xen_netif_rx, struct xen_netif_rx_request, struct xen_netif_rx_response); #define xen_netif_extra_info netif_extra_info #endif #define XEN_NETIF_RSP_DROPPED -2 #define XEN_NETIF_RSP_ERROR -1 #define XEN_NETIF_RSP_OKAY 0 /* No response: used for auxiliary requests (e.g., netif_tx_extra). */ #define XEN_NETIF_RSP_NULL 1 #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/pciif.h000066400000000000000000000070721314037446600322360ustar00rootroot00000000000000/* * PCI Backend/Frontend Common Data Structures & Macros * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Author: Ryan Wilson */ #ifndef __XEN_PCI_COMMON_H__ #define __XEN_PCI_COMMON_H__ /* Be sure to bump this number if you change this file */ #define XEN_PCI_MAGIC "7" /* xen_pci_sharedinfo flags */ #define _XEN_PCIF_active (0) #define XEN_PCIF_active (1<<_XEN_PCIF_active) #define _XEN_PCIB_AERHANDLER (1) #define XEN_PCIB_AERHANDLER (1<<_XEN_PCIB_AERHANDLER) #define _XEN_PCIB_active (2) #define XEN_PCIB_active (1<<_XEN_PCIB_active) /* xen_pci_op commands */ #define XEN_PCI_OP_conf_read (0) #define XEN_PCI_OP_conf_write (1) #define XEN_PCI_OP_enable_msi (2) #define XEN_PCI_OP_disable_msi (3) #define XEN_PCI_OP_enable_msix (4) #define XEN_PCI_OP_disable_msix (5) #define XEN_PCI_OP_aer_detected (6) #define XEN_PCI_OP_aer_resume (7) #define XEN_PCI_OP_aer_mmio (8) #define XEN_PCI_OP_aer_slotreset (9) #define XEN_PCI_OP_enable_multi_msi (10) /* xen_pci_op error numbers */ #define XEN_PCI_ERR_success (0) #define XEN_PCI_ERR_dev_not_found (-1) #define XEN_PCI_ERR_invalid_offset (-2) #define XEN_PCI_ERR_access_denied (-3) #define XEN_PCI_ERR_not_implemented (-4) /* XEN_PCI_ERR_op_failed - backend failed to complete the operation */ #define XEN_PCI_ERR_op_failed (-5) /* * it should be PAGE_SIZE-sizeof(struct xen_pci_op))/sizeof(struct msix_entry)) * Should not exceed 128 */ #define SH_INFO_MAX_VEC 128 struct xen_msix_entry { uint16_t vector; uint16_t entry; }; struct xen_pci_op { /* IN: what action to perform: XEN_PCI_OP_* */ uint32_t cmd; /* OUT: will contain an error number (if any) from errno.h */ int32_t err; /* IN: which device to touch */ uint32_t domain; /* PCI Domain/Segment */ uint32_t bus; uint32_t devfn; /* IN: which configuration registers to touch */ int32_t offset; int32_t size; /* IN/OUT: Contains the result after a READ or the value to WRITE */ uint32_t value; /* IN: Contains extra infor for this operation */ uint32_t info; /*IN: param for msi-x */ struct xen_msix_entry msix_entries[SH_INFO_MAX_VEC]; }; /*used for pcie aer handling*/ struct xen_pcie_aer_op { /* IN: what action to perform: XEN_PCI_OP_* */ uint32_t cmd; /*IN/OUT: return aer_op result or carry error_detected state as input*/ int32_t err; /* IN: which device to touch */ uint32_t domain; /* PCI Domain/Segment*/ uint32_t bus; uint32_t devfn; }; struct xen_pci_sharedinfo { /* flags - XEN_PCIF_* */ uint32_t flags; struct xen_pci_op op; struct xen_pcie_aer_op aer_op; }; #endif /* __XEN_PCI_COMMON_H__ */ protocols.h000066400000000000000000000036471314037446600331150ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/****************************************************************************** * protocols.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PROTOCOLS_H__ #define __XEN_PROTOCOLS_H__ #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi" #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi" #define XEN_IO_PROTO_ABI_IA64 "ia64-abi" #define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi" #define XEN_IO_PROTO_ABI_ARM "arm-abi" #if defined(__i386__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 #elif defined(__x86_64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 #elif defined(__ia64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64 #elif defined(__powerpc64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64 #elif defined(__arm__) || defined(__aarch64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_ARM #else # error arch fixup needed here #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/ring.h000066400000000000000000000264201314037446600321010ustar00rootroot00000000000000/****************************************************************************** * ring.h * * Shared producer-consumer ring macros. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Tim Deegan and Andrew Warfield November 2004. */ #ifndef __XEN_PUBLIC_IO_RING_H__ #define __XEN_PUBLIC_IO_RING_H__ #include "../xen-compat.h" #if __XEN_INTERFACE_VERSION__ < 0x00030208 #define xen_mb() mb() #define xen_rmb() rmb() #define xen_wmb() wmb() #endif typedef unsigned int RING_IDX; /* Round a 32-bit unsigned constant down to the nearest power of two. */ #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) /* * Calculate size of a shared ring, given the total available space for the * ring and indexes (_sz), and the name tag of the request/response structure. * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ #define __CONST_RING_SIZE(_s, _sz) \ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ sizeof(((struct _s##_sring *)0)->ring[0]))) /* * The same for passing in an actual pointer instead of a name tag. */ #define __RING_SIZE(_s, _sz) \ (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* * Macros to make the correct C datatypes for a new kind of ring. * * To make a new ring datatype, you need to have two message structures, * let's say request_t, and response_t already defined. * * In a header where you want the ring datatype declared, you then do: * * DEFINE_RING_TYPES(mytag, request_t, response_t); * * These expand out to give you a set of types, as you can see below. * The most important of these are: * * mytag_sring_t - The shared ring. * mytag_front_ring_t - The 'front' half of the ring. * mytag_back_ring_t - The 'back' half of the ring. * * To initialize a ring in your code you need to know the location and size * of the shared memory area (PAGE_SIZE, for instance). To initialise * the front half: * * mytag_front_ring_t front_ring; * SHARED_RING_INIT((mytag_sring_t *)shared_page); * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); * * Initializing the back follows similarly (note that only the front * initializes the shared ring): * * mytag_back_ring_t back_ring; * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); */ #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ \ /* Shared ring entry */ \ union __name##_sring_entry { \ __req_t req; \ __rsp_t rsp; \ }; \ \ /* Shared ring page */ \ struct __name##_sring { \ RING_IDX req_prod, req_event; \ RING_IDX rsp_prod, rsp_event; \ union { \ struct { \ uint8_t smartpoll_active; \ } netif; \ struct { \ uint8_t msg; \ } tapif_user; \ uint8_t pvt_pad[4]; \ } private; \ uint8_t __pad[44]; \ union __name##_sring_entry ring[1]; /* variable-length */ \ }; \ \ /* "Front" end's private variables */ \ struct __name##_front_ring { \ RING_IDX req_prod_pvt; \ RING_IDX rsp_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* "Back" end's private variables */ \ struct __name##_back_ring { \ RING_IDX rsp_prod_pvt; \ RING_IDX req_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* Syntactic sugar */ \ typedef struct __name##_sring __name##_sring_t; \ typedef struct __name##_front_ring __name##_front_ring_t; \ typedef struct __name##_back_ring __name##_back_ring_t /* * Macros for manipulating rings. * * FRONT_RING_whatever works on the "front end" of a ring: here * requests are pushed on to the ring and responses taken off it. * * BACK_RING_whatever works on the "back end" of a ring: here * requests are taken off the ring and responses put on. * * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. * This is OK in 1-for-1 request-response situations where the * requestor (front end) never has more than RING_SIZE()-1 * outstanding requests. */ /* Initialising empty rings */ #define SHARED_RING_INIT(_s) do { \ (_s)->req_prod = (_s)->rsp_prod = 0; \ (_s)->req_event = (_s)->rsp_event = 1; \ (void)memset((_s)->private.pvt_pad, 0, sizeof((_s)->private.pvt_pad)); \ (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ } while(0) #define FRONT_RING_INIT(_r, _s, __size) do { \ (_r)->req_prod_pvt = 0; \ (_r)->rsp_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) #define BACK_RING_INIT(_r, _s, __size) do { \ (_r)->rsp_prod_pvt = 0; \ (_r)->req_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) /* How big is this ring? */ #define RING_SIZE(_r) \ ((_r)->nr_ents) /* Number of free requests (for use on front side only). */ #define RING_FREE_REQUESTS(_r) \ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) /* Test if there is an empty slot available on the front ring. * (This is only meaningful from the front. ) */ #define RING_FULL(_r) \ (RING_FREE_REQUESTS(_r) == 0) /* Test if there are outstanding messages to be processed on a ring. */ #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) #ifdef __GNUC__ #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ ({ \ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ unsigned int rsp = RING_SIZE(_r) - \ ((_r)->req_cons - (_r)->rsp_prod_pvt); \ req < rsp ? req : rsp; \ }) #else /* Same as above, but without the nice GCC ({ ... }) syntax. */ #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ ((((_r)->sring->req_prod - (_r)->req_cons) < \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ ((_r)->sring->req_prod - (_r)->req_cons) : \ (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) #endif /* Direct access to individual ring elements, by index. */ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) #define RING_GET_RESPONSE(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) /* Loop termination condition: Would the specified index overflow the ring? */ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) /* Ill-behaved frontend determination: Can there be this many requests? */ #define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) #define RING_PUSH_REQUESTS(_r) do { \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) #define RING_PUSH_RESPONSES(_r) do { \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ } while (0) /* * Notification hold-off (req_event and rsp_event): * * When queueing requests or responses on a shared ring, it may not always be * necessary to notify the remote end. For example, if requests are in flight * in a backend, the front may be able to queue further requests without * notifying the back (if the back checks for new requests when it queues * responses). * * When enqueuing requests or responses: * * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument * is a boolean return value. True indicates that the receiver requires an * asynchronous notification. * * After dequeuing requests or responses (before sleeping the connection): * * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). * The second argument is a boolean return value. True indicates that there * are pending messages on the ring (i.e., the connection should not be put * to sleep). * * These macros will set the req_event/rsp_event field to trigger a * notification on the very next message that is enqueued. If you want to * create batches of work (i.e., only receive a notification after several * messages have been enqueued) then you will need to create a customised * version of the FINAL_CHECK macro in your own code, which sets the event * field appropriately. */ #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->req_prod; \ RING_IDX __new = (_r)->req_prod_pvt; \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = __new; \ xen_mb(); /* back sees new requests /before/ we check req_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->rsp_prod; \ RING_IDX __new = (_r)->rsp_prod_pvt; \ xen_wmb(); /* front sees resps /before/ updated producer index */ \ (_r)->sring->rsp_prod = __new; \ xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ if (_work_to_do) break; \ (_r)->sring->req_event = (_r)->req_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ } while (0) #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ if (_work_to_do) break; \ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ xen_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ } while (0) #endif /* __XEN_PUBLIC_IO_RING_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/tpmif.h000066400000000000000000000130261314037446600322570ustar00rootroot00000000000000/****************************************************************************** * tpmif.h * * TPM I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, IBM Corporation * * Author: Stefan Berger, stefanb@us.ibm.com * Grant table support: Mahadevan Gomathisankaran * * This code has been derived from tools/libxc/xen/io/netif.h * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_TPMIF_H__ #define __XEN_PUBLIC_IO_TPMIF_H__ #include "../grant_table.h" struct tpmif_tx_request { unsigned long addr; /* Machine address of packet. */ grant_ref_t ref; /* grant table access reference */ uint16_t unused; uint16_t size; /* Packet size in bytes. */ }; typedef struct tpmif_tx_request tpmif_tx_request_t; /* * The TPMIF_TX_RING_SIZE defines the number of pages the * front-end and backend can exchange (= size of array). */ typedef uint32_t TPMIF_RING_IDX; #define TPMIF_TX_RING_SIZE 1 /* This structure must fit in a memory page. */ struct tpmif_ring { struct tpmif_tx_request req; }; typedef struct tpmif_ring tpmif_ring_t; struct tpmif_tx_interface { struct tpmif_ring ring[TPMIF_TX_RING_SIZE]; }; typedef struct tpmif_tx_interface tpmif_tx_interface_t; /****************************************************************************** * TPM I/O interface for Xen guest OSes, v2 * * Author: Daniel De Graaf * * This protocol emulates the request/response behavior of a TPM using a Xen * shared memory interface. All interaction with the TPM is at the direction * of the frontend, since a TPM (hardware or virtual) is a passive device - * the backend only processes commands as requested by the frontend. * * The frontend sends a request to the TPM by populating the shared page with * the request packet, changing the state to TPMIF_STATE_SUBMIT, and sending * and event channel notification. When the backend is finished, it will set * the state to TPMIF_STATE_FINISH and send an event channel notification. * * In order to allow long-running commands to be canceled, the frontend can * at any time change the state to TPMIF_STATE_CANCEL and send a notification. * The TPM can either finish the command (changing state to TPMIF_STATE_FINISH) * or can cancel the command and change the state to TPMIF_STATE_IDLE. The TPM * can also change the state to TPMIF_STATE_IDLE instead of TPMIF_STATE_FINISH * if another reason for cancellation is required - for example, a physical * TPM may cancel a command if the interface is seized by another locality. * * The TPM command format is defined by the TCG, and is available at * http://www.trustedcomputinggroup.org/resources/tpm_main_specification */ enum tpmif_state { TPMIF_STATE_IDLE, /* no contents / vTPM idle / cancel complete */ TPMIF_STATE_SUBMIT, /* request ready / vTPM working */ TPMIF_STATE_FINISH, /* response ready / vTPM idle */ TPMIF_STATE_CANCEL, /* cancel requested / vTPM working */ }; /* Note: The backend should only change state to IDLE or FINISH, while the * frontend should only change to SUBMIT or CANCEL. Status changes do not need * to use atomic operations. */ /* The shared page for vTPM request/response packets looks like: * * Offset Contents * ================================================= * 0 struct tpmif_shared_page * 16 [optional] List of grant IDs * 16+4*nr_extra_pages TPM packet data * * If the TPM packet data extends beyond the end of a single page, the grant IDs * defined in extra_pages are used as if they were mapped immediately following * the primary shared page. The grants are allocated by the frontend and mapped * by the backend. Before sending a request spanning multiple pages, the * frontend should verify that the TPM supports such large requests by querying * the TPM_CAP_PROP_INPUT_BUFFER property from the TPM. */ struct tpmif_shared_page { uint32_t length; /* request/response length in bytes */ uint8_t state; /* enum tpmif_state */ uint8_t locality; /* for the current request */ uint8_t pad; /* should be zero */ uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */ uint32_t extra_pages[0]; /* grant IDs; length is actually nr_extra_pages */ }; typedef struct tpmif_shared_page tpmif_shared_page_t; #endif /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/usbif.h000066400000000000000000000105061314037446600322500ustar00rootroot00000000000000/* * usbif.h * * USB I/O interface for Xen guest OSes. * * Copyright (C) 2009, FUJITSU LABORATORIES LTD. * Author: Noboru Iwamatsu * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_IO_USBIF_H__ #define __XEN_PUBLIC_IO_USBIF_H__ #include "ring.h" #include "../grant_table.h" enum usb_spec_version { USB_VER_UNKNOWN = 0, USB_VER_USB11, USB_VER_USB20, USB_VER_USB30, /* not supported yet */ }; /* * USB pipe in usbif_request * * bits 0-5 are specific bits for virtual USB driver. * bits 7-31 are standard urb pipe. * * - port number(NEW): bits 0-4 * (USB_MAXCHILDREN is 31) * * - operation flag(NEW): bit 5 * (0 = submit urb, * 1 = unlink urb) * * - direction: bit 7 * (0 = Host-to-Device [Out] * 1 = Device-to-Host [In]) * * - device address: bits 8-14 * * - endpoint: bits 15-18 * * - pipe type: bits 30-31 * (00 = isochronous, 01 = interrupt, * 10 = control, 11 = bulk) */ #define usbif_pipeportnum(pipe) ((pipe) & 0x1f) #define usbif_setportnum_pipe(pipe, portnum) \ ((pipe)|(portnum)) #define usbif_pipeunlink(pipe) ((pipe) & 0x20) #define usbif_pipesubmit(pipe) (!usbif_pipeunlink(pipe)) #define usbif_setunlink_pipe(pipe) ((pipe)|(0x20)) #define USBIF_MAX_SEGMENTS_PER_REQUEST (16) /* * RING for transferring urbs. */ struct usbif_request_segment { grant_ref_t gref; uint16_t offset; uint16_t length; }; struct usbif_urb_request { uint16_t id; /* request id */ uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */ /* basic urb parameter */ uint32_t pipe; uint16_t transfer_flags; uint16_t buffer_length; union { uint8_t ctrl[8]; /* setup_packet (Ctrl) */ struct { uint16_t interval; /* maximum (1024*8) in usb core */ uint16_t start_frame; /* start frame */ uint16_t number_of_packets; /* number of ISO packet */ uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */ } isoc; struct { uint16_t interval; /* maximum (1024*8) in usb core */ uint16_t pad[3]; } intr; struct { uint16_t unlink_id; /* unlink request id */ uint16_t pad[3]; } unlink; } u; /* urb data segments */ struct usbif_request_segment seg[USBIF_MAX_SEGMENTS_PER_REQUEST]; }; typedef struct usbif_urb_request usbif_urb_request_t; struct usbif_urb_response { uint16_t id; /* request id */ uint16_t start_frame; /* start frame (ISO) */ int32_t status; /* status (non-ISO) */ int32_t actual_length; /* actual transfer length */ int32_t error_count; /* number of ISO errors */ }; typedef struct usbif_urb_response usbif_urb_response_t; DEFINE_RING_TYPES(usbif_urb, struct usbif_urb_request, struct usbif_urb_response); #define USB_URB_RING_SIZE __CONST_RING_SIZE(usbif_urb, PAGE_SIZE) /* * RING for notifying connect/disconnect events to frontend */ struct usbif_conn_request { uint16_t id; }; typedef struct usbif_conn_request usbif_conn_request_t; struct usbif_conn_response { uint16_t id; /* request id */ uint8_t portnum; /* port number */ uint8_t speed; /* usb_device_speed */ }; typedef struct usbif_conn_response usbif_conn_response_t; DEFINE_RING_TYPES(usbif_conn, struct usbif_conn_request, struct usbif_conn_response); #define USB_CONN_RING_SIZE __CONST_RING_SIZE(usbif_conn, PAGE_SIZE) #endif /* __XEN_PUBLIC_IO_USBIF_H__ */ vscsiif.h000066400000000000000000000135041314037446600325300ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/****************************************************************************** * vscsiif.h * * Based on the blkif.h code. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright(c) FUJITSU Limited 2008. */ #ifndef __XEN__PUBLIC_IO_SCSI_H__ #define __XEN__PUBLIC_IO_SCSI_H__ #include #include /* commands between backend and frontend */ #define VSCSIIF_ACT_SCSI_CDB 1 /* SCSI CDB command */ #define VSCSIIF_ACT_SCSI_ABORT 2 /* SCSI Device(Lun) Abort*/ #define VSCSIIF_ACT_SCSI_RESET 3 /* SCSI Device(Lun) Reset*/ #define VSCSIIF_ACT_SCSI_SG_PRESET 4 /* Preset SG elements */ /* indirect: indirect requestڴΣֵҲbackend */ #define MAX_VSCSI_INDIRECT_SEGMENTS 128U /* indirect: ҳܱ512 */ #define SEGS_PER_VSCSI_INDIRECT_FRAME \ (PAGE_SIZE/sizeof(struct scsiif_request_segment_aligned)) /* indirect: ĿǰSCSIIF_MAX_SEGMENTS_PER_REQUEST 8 ʹindirect * ÿСʹSCSIIF_MAX_SEGMENTS_PER_REQUEST(8)ҳ * Ŀǰֻʹõ1ҳ(256 * 4K)Ѿ㹻 */ #define MAX_VSCSI_INDIRECT_PAGES \ ((MAX_VSCSI_INDIRECT_SEGMENTS + SEGS_PER_VSCSI_INDIRECT_FRAME - 1)/SEGS_PER_VSCSI_INDIRECT_FRAME) /* indirect: 㵱ǰindirect pageĿǰֻʹ 1 */ #define VSCSI_INDIRECT_PAGES(_segs) \ ((_segs + SEGS_PER_VSCSI_INDIRECT_FRAME - 1)/SEGS_PER_VSCSI_INDIRECT_FRAME) /* * Maximum scatter/gather segments per request. * * Considering balance between allocating at least 16 "vscsiif_request" * structures on one page (4096 bytes) and the number of scatter/gather * elements needed, we decided to use 26 as a magic number. */ #define VSCSIIF_SG_TABLESIZE 26 /* indirect: ÿindirect requestֻʹõindirect page */ #define SCSIIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 /* * based on Linux kernel 2.6.18 */ #define VSCSIIF_MAX_COMMAND_SIZE 16 #define VSCSIIF_SENSE_BUFFERSIZE 96 /* indirect: indirect pageбĽṹ壬ҳ */ struct scsiif_request_segment_aligned { grant_ref_t gref; uint16_t offset; uint16_t length; } __attribute__((__packed__)); struct scsiif_request_segment { grant_ref_t gref; uint16_t offset; uint16_t length; }; typedef struct scsiif_request_segment vscsiif_segment_t; /* */ struct vscsiif_request_rw { uint8_t nr_segments; /* Number of pieces of scatter-gather */ vscsiif_segment_t seg[VSCSIIF_SG_TABLESIZE]; uint32_t reserved[3]; } __attribute__((__packed__)); /* indirect: indirect */ struct vscsiif_request_indirect { uint8_t indirect_op; grant_ref_t indirect_grefs[SCSIIF_MAX_INDIRECT_PAGES_PER_REQUEST]; uint16_t nr_segments; uint16_t _pad1; uint32_t reserved[3]; } __attribute__((__packed__)); /* е */ struct vscsiif_request { uint16_t rqid; /* private guest value, echoed in resp */ uint8_t act; /* command between backend and frontend */ uint8_t cmd_len; uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; uint16_t timeout_per_command; /* The command is issued by twice the value in Backend. */ uint16_t channel, id, lun; uint16_t padding; uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1) DMA_FROM_DEVICE(2) DMA_NONE(3) requests */ union { struct vscsiif_request_rw rw; struct vscsiif_request_indirect indirect; } u; } __attribute__((__packed__)); typedef struct vscsiif_request vscsiif_request_t; #define VSCSIIF_SG_LIST_SIZE ((sizeof(vscsiif_request_t) - 4) \ / sizeof(vscsiif_segment_t)) struct vscsiif_sg_list { /* First two fields must match struct vscsiif_request! */ uint16_t rqid; /* private guest value, must match main req */ uint8_t act; /* VSCSIIF_ACT_SCSI_SG_PRESET */ uint8_t nr_segments; /* Number of pieces of scatter-gather */ vscsiif_segment_t seg[VSCSIIF_SG_LIST_SIZE]; }; typedef struct vscsiif_sg_list vscsiif_sg_list_t; struct vscsiif_response { uint16_t rqid; uint8_t act; /* valid only when backend supports SG_PRESET */ uint8_t sense_len; uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE]; int32_t rslt; uint32_t residual_len; /* request bufflen - return the value from physical device */ uint32_t reserved[36]; }; typedef struct vscsiif_response vscsiif_response_t; DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response); #endif /*__XEN__PUBLIC_IO_SCSI_H__*/ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ xenbus.h000066400000000000000000000024501314037446600323640ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/***************************************************************************** * xenbus.h * * Xenbus protocol details. * * Copyright (C) 2005 XenSource Ltd. */ #ifndef _XEN_PUBLIC_IO_XENBUS_H #define _XEN_PUBLIC_IO_XENBUS_H /* The state of either end of the Xenbus, i.e. the current communication status of initialisation across the bus. States here imply nothing about the state of the connection between the driver and the kernel's device layers. */ enum xenbus_state { XenbusStateUnknown = 0, XenbusStateInitialising = 1, XenbusStateInitWait = 2, /* Finished early initialisation, but waiting for information from the peer or hotplug scripts. */ XenbusStateInitialised = 3, /* Initialised and waiting for a connection from the peer. */ XenbusStateConnected = 4, XenbusStateClosing = 5, /* The device is being closed due to an error or an unplug event. */ XenbusStateClosed = 6, /* * Reconfiguring: The device is being reconfigured. */ XenbusStateReconfiguring = 7, XenbusStateReconfigured = 8 }; typedef enum xenbus_state XenbusState; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ /* * Local variables: * c-file-style: "linux" * indent-tabs-mode: t * c-indent-level: 8 * c-basic-offset: 8 * tab-width: 8 * End: */ xs_wire.h000066400000000000000000000070071314037446600325430ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/io/* * Details of the "wire" protocol between Xen Store Daemon and client * library or guest kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Rusty Russell IBM Corporation */ #ifndef _XS_WIRE_H #define _XS_WIRE_H enum xsd_sockmsg_type { XS_DEBUG, XS_DIRECTORY, XS_READ, XS_GET_PERMS, XS_WATCH, XS_UNWATCH, XS_TRANSACTION_START, XS_TRANSACTION_END, XS_INTRODUCE, XS_RELEASE, XS_GET_DOMAIN_PATH, XS_WRITE, XS_MKDIR, XS_RM, XS_SET_PERMS, XS_WATCH_EVENT, XS_ERROR, XS_IS_DOMAIN_INTRODUCED, XS_RESUME, XS_SET_TARGET, XS_RESTRICT, XS_RESET_WATCHES }; #define XS_WRITE_NONE "NONE" #define XS_WRITE_CREATE "CREATE" #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" /* We hand errors as strings, for portability. */ struct xsd_errors { int errnum; const char *errstring; }; #ifdef EINVAL #define XSD_ERROR(x) { x, #x } /* LINTED: static unused */ static struct xsd_errors xsd_errors[] #if defined(__GNUC__) __attribute__((unused)) #endif = { XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), XSD_ERROR(EISDIR), XSD_ERROR(ENOENT), XSD_ERROR(ENOMEM), XSD_ERROR(ENOSPC), XSD_ERROR(EIO), XSD_ERROR(ENOTEMPTY), XSD_ERROR(ENOSYS), XSD_ERROR(EROFS), XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN), XSD_ERROR(E2BIG) }; #endif struct xsd_sockmsg { uint32_t type; /* XS_??? */ uint32_t req_id;/* Request identifier, echoed in daemon's response. */ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ uint32_t len; /* Length of data following this. */ /* Generally followed by nul-terminated string(s). */ }; enum xs_watch_type { XS_WATCH_PATH = 0, XS_WATCH_TOKEN }; /* * `incontents 150 xenstore_struct XenStore wire protocol. * * Inter-domain shared memory communications. */ #define XENSTORE_RING_SIZE 1024 typedef uint32_t XENSTORE_RING_IDX; #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) struct xenstore_domain_interface { char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ XENSTORE_RING_IDX req_cons, req_prod; XENSTORE_RING_IDX rsp_cons, rsp_prod; }; /* Violating this is very bad. See docs/misc/xenstore.txt. */ #define XENSTORE_PAYLOAD_MAX 4096 /* Violating these just gets you an error back */ #define XENSTORE_ABS_PATH_MAX 3072 #define XENSTORE_REL_PATH_MAX 2048 #endif /* _XS_WIRE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/kexec.h000066400000000000000000000215531314037446600316340ustar00rootroot00000000000000/****************************************************************************** * kexec.h - Public portion * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Xen port written by: * - Simon 'Horms' Horman * - Magnus Damm */ #ifndef _XEN_PUBLIC_KEXEC_H #define _XEN_PUBLIC_KEXEC_H /* This file describes the Kexec / Kdump hypercall interface for Xen. * * Kexec under vanilla Linux allows a user to reboot the physical machine * into a new user-specified kernel. The Xen port extends this idea * to allow rebooting of the machine from dom0. When kexec for dom0 * is used to reboot, both the hypervisor and the domains get replaced * with some other kernel. It is possible to kexec between vanilla * Linux and Xen and back again. Xen to Xen works well too. * * The hypercall interface for kexec can be divided into three main * types of hypercall operations: * * 1) Range information: * This is used by the dom0 kernel to ask the hypervisor about various * address information. This information is needed to allow kexec-tools * to fill in the ELF headers for /proc/vmcore properly. * * 2) Load and unload of images: * There are no big surprises here, the kexec binary from kexec-tools * runs in userspace in dom0. The tool loads/unloads data into the * dom0 kernel such as new kernel, initramfs and hypervisor. When * loaded the dom0 kernel performs a load hypercall operation, and * before releasing all page references the dom0 kernel calls unload. * * 3) Kexec operation: * This is used to start a previously loaded kernel. */ #include "xen.h" #if defined(__i386__) || defined(__x86_64__) #define KEXEC_XEN_NO_PAGES 17 #endif /* * Prototype for this hypercall is: * int kexec_op(int cmd, void *args) * @cmd == KEXEC_CMD_... * KEXEC operation to perform * @args == Operation-specific extra arguments (NULL if none). */ /* * Kexec supports two types of operation: * - kexec into a regular kernel, very similar to a standard reboot * - KEXEC_TYPE_DEFAULT is used to specify this type * - kexec into a special "crash kernel", aka kexec-on-panic * - KEXEC_TYPE_CRASH is used to specify this type * - parts of our system may be broken at kexec-on-panic time * - the code should be kept as simple and self-contained as possible */ #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 /* The kexec implementation for Xen allows the user to load two * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH. * All data needed for a kexec reboot is kept in one xen_kexec_image_t * per "instance". The data mainly consists of machine address lists to pages * together with destination addresses. The data in xen_kexec_image_t * is passed to the "code page" which is one page of code that performs * the final relocations before jumping to the new kernel. */ typedef struct xen_kexec_image { #if defined(__i386__) || defined(__x86_64__) unsigned long page_list[KEXEC_XEN_NO_PAGES]; #endif #if defined(__ia64__) unsigned long reboot_code_buffer; #endif unsigned long indirection_page; unsigned long start_address; } xen_kexec_image_t; /* * Perform kexec having previously loaded a kexec or kdump kernel * as appropriate. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] * * Control is transferred to the image entry point with the host in * the following state. * * - The image may be executed on any PCPU and all other PCPUs are * stopped. * * - Local interrupts are disabled. * * - Register values are undefined. * * - The image segments have writeable 1:1 virtual to machine * mappings. The location of any page tables is undefined and these * page table frames are not be mapped. */ #define KEXEC_CMD_kexec 0 typedef struct xen_kexec_exec { int type; } xen_kexec_exec_t; /* * Load/Unload kernel image for kexec or kdump. * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in] * image == relocation information for kexec (ignored for unload) [in] */ #define KEXEC_CMD_kexec_load_v1 1 /* obsolete since 0x00040400 */ #define KEXEC_CMD_kexec_unload_v1 2 /* obsolete since 0x00040400 */ typedef struct xen_kexec_load_v1 { int type; xen_kexec_image_t image; } xen_kexec_load_v1_t; #define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */ #define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */ #define KEXEC_RANGE_MA_CPU 2 /* machine address and size of a CPU note */ #define KEXEC_RANGE_MA_XENHEAP 3 /* machine address and size of xenheap * Note that although this is adjacent * to Xen it exists in a separate EFI * region on ia64, and thus needs to be * inserted into iomem_machine separately */ #define KEXEC_RANGE_MA_BOOT_PARAM 4 /* machine address and size of * the ia64_boot_param */ #define KEXEC_RANGE_MA_EFI_MEMMAP 5 /* machine address and size of * of the EFI Memory Map */ #define KEXEC_RANGE_MA_VMCOREINFO 6 /* machine address and size of vmcoreinfo */ /* * Find the address and size of certain memory areas * range == KEXEC_RANGE_... [in] * nr == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in] * size == number of bytes reserved in window [out] * start == address of the first byte in the window [out] */ #define KEXEC_CMD_kexec_get_range 3 typedef struct xen_kexec_range { int range; int nr; unsigned long size; unsigned long start; } xen_kexec_range_t; #if __XEN_INTERFACE_VERSION__ >= 0x00040400 /* * A contiguous chunk of a kexec image and it's destination machine * address. */ typedef struct xen_kexec_segment { union { XEN_GUEST_HANDLE(const_void) h; uint64_t _pad; } buf; uint64_t buf_size; uint64_t dest_maddr; uint64_t dest_size; } xen_kexec_segment_t; DEFINE_XEN_GUEST_HANDLE(xen_kexec_segment_t); /* * Load a kexec image into memory. * * For KEXEC_TYPE_DEFAULT images, the segments may be anywhere in RAM. * The image is relocated prior to being executed. * * For KEXEC_TYPE_CRASH images, each segment of the image must reside * in the memory region reserved for kexec (KEXEC_RANGE_MA_CRASH) and * the entry point must be within the image. The caller is responsible * for ensuring that multiple images do not overlap. * * All image segments will be loaded to their destination machine * addresses prior to being executed. The trailing portion of any * segments with a source buffer (from dest_maddr + buf_size to * dest_maddr + dest_size) will be zeroed. * * Segments with no source buffer will be accessible to the image when * it is executed. */ #define KEXEC_CMD_kexec_load 4 typedef struct xen_kexec_load { uint8_t type; /* One of KEXEC_TYPE_* */ uint8_t _pad; uint16_t arch; /* ELF machine type (EM_*). */ uint32_t nr_segments; union { XEN_GUEST_HANDLE(xen_kexec_segment_t) h; uint64_t _pad; } segments; uint64_t entry_maddr; /* image entry point machine address. */ } xen_kexec_load_t; DEFINE_XEN_GUEST_HANDLE(xen_kexec_load_t); /* * Unload a kexec image. * * Type must be one of KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH. */ #define KEXEC_CMD_kexec_unload 5 typedef struct xen_kexec_unload { uint8_t type; } xen_kexec_unload_t; DEFINE_XEN_GUEST_HANDLE(xen_kexec_unload_t); #else /* __XEN_INTERFACE_VERSION__ < 0x00040400 */ #define KEXEC_CMD_kexec_load KEXEC_CMD_kexec_load_v1 #define KEXEC_CMD_kexec_unload KEXEC_CMD_kexec_unload_v1 #define xen_kexec_load xen_kexec_load_v1 #define xen_kexec_load_t xen_kexec_load_v1_t #endif #endif /* _XEN_PUBLIC_KEXEC_H */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ mem_event.h000066400000000000000000000056441314037446600324400ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/****************************************************************************** * mem_event.h * * Memory event common structures. * * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef _XEN_PUBLIC_MEM_EVENT_H #define _XEN_PUBLIC_MEM_EVENT_H #include "xen.h" #include "io/ring.h" /* Memory event flags */ #define MEM_EVENT_FLAG_VCPU_PAUSED (1 << 0) #define MEM_EVENT_FLAG_DROP_PAGE (1 << 1) #define MEM_EVENT_FLAG_EVICT_FAIL (1 << 2) #define MEM_EVENT_FLAG_FOREIGN (1 << 3) #define MEM_EVENT_FLAG_DUMMY (1 << 4) /* Reasons for the memory event request */ #define MEM_EVENT_REASON_UNKNOWN 0 /* typical reason */ #define MEM_EVENT_REASON_VIOLATION 1 /* access violation, GFN is address */ #define MEM_EVENT_REASON_CR0 2 /* CR0 was hit: gfn is CR0 value */ #define MEM_EVENT_REASON_CR3 3 /* CR3 was hit: gfn is CR3 value */ #define MEM_EVENT_REASON_CR4 4 /* CR4 was hit: gfn is CR4 value */ #define MEM_EVENT_REASON_INT3 5 /* int3 was hit: gla/gfn are RIP */ #define MEM_EVENT_REASON_SINGLESTEP 6 /* single step was invoked: gla/gfn are RIP */ #define MEM_EVENT_REASON_MSR 7 /* MSR was hit: gfn is MSR value, gla is MSR address; does NOT honour HVMPME_onchangeonly */ typedef struct mem_event_st { uint32_t flags; uint32_t vcpu_id; uint64_t gfn; uint64_t offset; uint64_t gla; /* if gla_valid */ uint32_t p2mt; uint16_t access_r:1; uint16_t access_w:1; uint16_t access_x:1; uint16_t gla_valid:1; uint16_t available:12; uint16_t reason; } mem_event_request_t, mem_event_response_t; DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t); #endif /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/memory.h000066400000000000000000000425401314037446600320440ustar00rootroot00000000000000/****************************************************************************** * memory.h * * Memory reservation and information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_MEMORY_H__ #define __XEN_PUBLIC_MEMORY_H__ #include "xen.h" /* * Increase or decrease the specified domain's memory reservation. Returns a * -ve errcode on failure, or the # extents successfully allocated or freed. * arg == addr of struct xen_memory_reservation. */ #define XENMEM_increase_reservation 0 #define XENMEM_decrease_reservation 1 #define XENMEM_populate_physmap 6 #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* * Maximum # bits addressable by the user of the allocated region (e.g., I/O * devices often have a 32-bit limitation even in 64-bit systems). If zero * then the user has no addressing restriction. This field is not used by * XENMEM_decrease_reservation. */ #define XENMEMF_address_bits(x) (x) #define XENMEMF_get_address_bits(x) ((x) & 0xffu) /* NUMA node to allocate from. */ #define XENMEMF_node(x) (((x) + 1) << 8) #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu) /* Flag to populate physmap with populate-on-demand entries */ #define XENMEMF_populate_on_demand (1<<16) /* Flag to request allocation only from the node specified */ #define XENMEMF_exact_node_request (1<<17) #define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request) #endif struct xen_memory_reservation { /* * XENMEM_increase_reservation: * OUT: MFN (*not* GMFN) bases of extents that were allocated * XENMEM_decrease_reservation: * IN: GMFN bases of extents to free * XENMEM_populate_physmap: * IN: GPFN bases of extents to populate with memory * OUT: GMFN bases of extents that were allocated * (NB. This command also updates the mach_to_phys translation table) * XENMEM_claim_pages: * IN: must be zero */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* Number of extents, and size/alignment of each (2^extent_order pages). */ xen_ulong_t nr_extents; unsigned int extent_order; #if __XEN_INTERFACE_VERSION__ >= 0x00030209 /* XENMEMF flags. */ unsigned int mem_flags; #else /* * Maximum # bits addressable by the user of the allocated region (e.g., * I/O devices often have a 32-bit limitation even in 64-bit systems). If * zero then the user has no addressing restriction. * This field is not used by XENMEM_decrease_reservation. */ unsigned int address_bits; #endif /* * Domain whose reservation is being changed. * Unprivileged domains can specify only DOMID_SELF. */ domid_t domid; }; DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation); typedef struct xen_memory_reservation xen_memory_reservation_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t); /* * An atomic exchange of memory pages. If return code is zero then * @out.extent_list provides GMFNs of the newly-allocated memory. * Returns zero on complete success, otherwise a negative error code. * On complete success then always @nr_exchanged == @in.nr_extents. * On partial success @nr_exchanged indicates how much work was done. */ #define XENMEM_exchange 11 struct xen_memory_exchange { /* * [IN] Details of memory extents to be exchanged (GMFN bases). * Note that @in.address_bits is ignored and unused. */ struct xen_memory_reservation in; /* * [IN/OUT] Details of new memory extents. * We require that: * 1. @in.domid == @out.domid * 2. @in.nr_extents << @in.extent_order == * @out.nr_extents << @out.extent_order * 3. @in.extent_start and @out.extent_start lists must not overlap * 4. @out.extent_start lists GPFN bases to be populated * 5. @out.extent_start is overwritten with allocated GMFN bases */ struct xen_memory_reservation out; /* * [OUT] Number of input extents that were successfully exchanged: * 1. The first @nr_exchanged input extents were successfully * deallocated. * 2. The corresponding first entries in the output extent list correctly * indicate the GMFNs that were successfully exchanged. * 3. All other input and output extents are untouched. * 4. If not all input exents are exchanged then the return code of this * command will be non-zero. * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER! */ xen_ulong_t nr_exchanged; }; DEFINE_GUEST_HANDLE_STRUCT(xen_memory_exchange); typedef struct xen_memory_exchange xen_memory_exchange_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t); /* * Returns the maximum machine frame number of mapped RAM in this system. * This command always succeeds (it never returns an error code). * arg == NULL. */ #define XENMEM_maximum_ram_page 2 /* * Returns the current or maximum memory reservation, in pages, of the * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. * arg == addr of domid_t. */ #define XENMEM_current_reservation 3 #define XENMEM_maximum_reservation 4 /* * Returns the maximum GPFN in use by the guest, or -ve errcode on failure. */ #define XENMEM_maximum_gpfn 14 /* * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys * mapping table. Architectures which do not have a m2p table do not implement * this command. * arg == addr of xen_machphys_mfn_list_t. */ #define XENMEM_machphys_mfn_list 5 struct xen_machphys_mfn_list { /* * Size of the 'extent_start' array. Fewer entries will be filled if the * machphys table is smaller than max_extents * 2MB. */ unsigned int max_extents; /* * Pointer to buffer to fill with list of extent starts. If there are * any large discontiguities in the machine address space, 2MB gaps in * the machphys table will be represented by an MFN base of zero. */ XEN_GUEST_HANDLE(xen_pfn_t) extent_start; /* * Number of extents written to the above array. This will be smaller * than 'max_extents' if the machphys table is smaller than max_e * 2MB. */ unsigned int nr_extents; }; DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list); typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); /* * Returns the location in virtual address space of the machine_to_phys * mapping table. Architectures which do not have a m2p table, or which do not * map it by default into guest address space, do not implement this command. * arg == addr of xen_machphys_mapping_t. */ #define XENMEM_machphys_mapping 12 struct xen_machphys_mapping { xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */ xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */ }; DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping); typedef struct xen_machphys_mapping xen_machphys_mapping_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); /* Source mapping space. */ /* ` enum phys_map_space { */ #define XENMAPSPACE_shared_info 0 /* shared info page */ #define XENMAPSPACE_grant_table 1 /* grant table page */ #define XENMAPSPACE_gmfn 2 /* GMFN */ #define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */ #define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom, * XENMEM_add_to_physmap_batch only. */ /* ` } */ /* * Sets the GPFN at which a particular page appears in the specified guest's * pseudophysical address space. * arg == addr of xen_add_to_physmap_t. */ #define XENMEM_add_to_physmap 7 struct xen_add_to_physmap { /* Which domain to change the mapping for. */ domid_t domid; /* Number of pages to go through for gmfn_range */ uint16_t size; unsigned int space; /* => enum phys_map_space */ #define XENMAPIDX_grant_table_status 0x80000000 /* Index into space being mapped. */ xen_ulong_t idx; /* GPFN in domid where the source mapping page should appear. */ xen_pfn_t gpfn; }; DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap); typedef struct xen_add_to_physmap xen_add_to_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); /* A batched version of add_to_physmap. */ #define XENMEM_add_to_physmap_batch 23 struct xen_add_to_physmap_batch { /* IN */ /* Which domain to change the mapping for. */ domid_t domid; uint16_t space; /* => enum phys_map_space */ /* Number of pages to go through */ uint16_t size; domid_t foreign_domid; /* IFF gmfn_foreign */ /* Indexes into space being mapped. */ XEN_GUEST_HANDLE(xen_ulong_t) idxs; /* GPFN in domid where the source mapping page should appear. */ XEN_GUEST_HANDLE(xen_pfn_t) gpfns; /* OUT */ /* Per index error code. */ XEN_GUEST_HANDLE(int) errs; }; DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap_batch); typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t); #if defined(CONFIG_PARAVIRT_XEN) || __XEN_INTERFACE_VERSION__ < 0x00040400 #define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch __DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range, struct xen_add_to_physmap_batch); #define xen_add_to_physmap_range xen_add_to_physmap_batch typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t); #endif /* * Unmaps the page appearing at a particular GPFN from the specified guest's * pseudophysical address space. * arg == addr of xen_remove_from_physmap_t. */ #define XENMEM_remove_from_physmap 15 struct xen_remove_from_physmap { /* Which domain to change the mapping for. */ domid_t domid; /* GPFN of the current mapping of the page. */ xen_pfn_t gpfn; }; DEFINE_GUEST_HANDLE_STRUCT(xen_remove_from_physmap); typedef struct xen_remove_from_physmap xen_remove_from_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t); /*** REMOVED ***/ /*#define XENMEM_translate_gpfn_list 8*/ /* * Returns the pseudo-physical memory map as it was when the domain * was started (specified by XENMEM_set_memory_map). * arg == addr of struct xen_memory_map. */ #define XENMEM_memory_map 9 struct xen_memory_map { /* * On call the number of entries which can be stored in buffer. On * return the number of entries which have been stored in * buffer. */ unsigned int nr_entries; /* * Entries in the buffer are in the same format as returned by the * BIOS INT 0x15 EAX=0xE820 call. */ XEN_GUEST_HANDLE(void) buffer; }; DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map); typedef struct xen_memory_map xen_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t); /* * Returns the real physical memory map. Passes the same structure as * XENMEM_memory_map. * arg == addr of struct xen_memory_map. */ #define XENMEM_machine_memory_map 10 /* * Set the pseudo-physical memory map of a domain, as returned by * XENMEM_memory_map. * arg == addr of xen_foreign_memory_map_t. */ #define XENMEM_set_memory_map 13 struct xen_foreign_memory_map { domid_t domid; struct xen_memory_map map; }; typedef struct xen_foreign_memory_map xen_foreign_memory_map_t; DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t); #define XENMEM_set_pod_target 16 #define XENMEM_get_pod_target 17 struct xen_pod_target { /* IN */ uint64_t target_pages; /* OUT */ uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; /* IN */ domid_t domid; }; typedef struct xen_pod_target xen_pod_target_t; #if defined(__XEN__) || defined(__XEN_TOOLS__) #ifndef uint64_aligned_t #define uint64_aligned_t uint64_t #endif /* * Get the number of MFNs saved through memory sharing. * The call never fails. */ #define XENMEM_get_sharing_freed_pages 18 #define XENMEM_get_sharing_shared_pages 19 #define XENMEM_paging_op 20 #define XENMEM_paging_op_nominate 0 #define XENMEM_paging_op_evict 1 #define XENMEM_paging_op_prep 2 #define XENMEM_access_op 21 #define XENMEM_access_op_resume 0 struct xen_mem_event_op { uint8_t op; /* XENMEM_*_op_* */ domid_t domain; /* PAGING_PREP IN: buffer to immediately fill page in */ uint64_aligned_t buffer; /* Other OPs */ uint64_aligned_t gfn; /* IN: gfn of page being operated on */ }; typedef struct xen_mem_event_op xen_mem_event_op_t; DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t); #define XENMEM_sharing_op 22 #define XENMEM_sharing_op_nominate_gfn 0 #define XENMEM_sharing_op_nominate_gref 1 #define XENMEM_sharing_op_share 2 #define XENMEM_sharing_op_resume 3 #define XENMEM_sharing_op_debug_gfn 4 #define XENMEM_sharing_op_debug_mfn 5 #define XENMEM_sharing_op_debug_gref 6 #define XENMEM_sharing_op_add_physmap 7 #define XENMEM_sharing_op_audit 8 #define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10) #define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9) /* The following allows sharing of grant refs. This is useful * for sharing utilities sitting as "filters" in IO backends * (e.g. memshr + blktap(2)). The IO backend is only exposed * to grant references, and this allows sharing of the grefs */ #define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (1ULL << 62) #define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \ (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val) #define XENMEM_SHARING_OP_FIELD_IS_GREF(field) \ ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG) #define XENMEM_SHARING_OP_FIELD_GET_GREF(field) \ ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)) struct xen_mem_sharing_op { uint8_t op; /* XENMEM_sharing_op_* */ domid_t domain; union { struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */ union { uint64_aligned_t gfn; /* IN: gfn to nominate */ uint32_t grant_ref; /* IN: grant ref to nominate */ } u; uint64_aligned_t handle; /* OUT: the handle */ } nominate; struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */ uint64_aligned_t source_gfn; /* IN: the gfn of the source page */ uint64_aligned_t source_handle; /* IN: handle to the source page */ uint64_aligned_t client_gfn; /* IN: the client gfn */ uint64_aligned_t client_handle; /* IN: handle to the client page */ domid_t client_domain; /* IN: the client domain id */ } share; struct mem_sharing_op_debug { /* OP_DEBUG_xxx */ union { uint64_aligned_t gfn; /* IN: gfn to debug */ uint64_aligned_t mfn; /* IN: mfn to debug */ uint32_t gref; /* IN: gref to debug */ } u; } debug; } u; }; typedef struct xen_mem_sharing_op xen_mem_sharing_op_t; DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t); /* * Attempt to stake a claim for a domain on a quantity of pages * of system RAM, but _not_ assign specific pageframes. Only * arithmetic is performed so the hypercall is very fast and need * not be preemptible, thus sidestepping time-of-check-time-of-use * races for memory allocation. Returns 0 if the hypervisor page * allocator has atomically and successfully claimed the requested * number of pages, else non-zero. * * Any domain may have only one active claim. When sufficient memory * has been allocated to resolve the claim, the claim silently expires. * Claiming zero pages effectively resets any outstanding claim and * is always successful. * * Note that a valid claim may be staked even after memory has been * allocated for a domain. In this case, the claim is not incremental, * i.e. if the domain's tot_pages is 3, and a claim is staked for 10, * only 7 additional pages are claimed. * * Caller must be privileged or the hypercall fails. */ #define XENMEM_claim_pages 24 /* * XENMEM_claim_pages flags - the are no flags at this time. * The zero value is appropiate. */ #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ #ifndef CONFIG_XEN #include /* * Prevent the balloon driver from changing the memory reservation * during a driver critical region. */ extern spinlock_t xen_reservation_lock; #endif #endif /* __XEN_PUBLIC_MEMORY_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/nmi.h000066400000000000000000000056671314037446600313300ustar00rootroot00000000000000/****************************************************************************** * nmi.h * * NMI callback registration and reason codes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_NMI_H__ #define __XEN_PUBLIC_NMI_H__ #include "xen.h" /* * NMI reason codes: * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. */ /* I/O-check error reported via ISA port 0x61, bit 6. */ #define _XEN_NMIREASON_io_error 0 #define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) /* PCI SERR reported via ISA port 0x61, bit 7. */ #define _XEN_NMIREASON_pci_serr 1 #define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr) #if __XEN_INTERFACE_VERSION__ < 0x00040300 /* legacy alias of the above */ /* Parity error reported via ISA port 0x61, bit 7. */ #define _XEN_NMIREASON_parity_error 1 #define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error) #endif /* Unknown hardware-generated NMI. */ #define _XEN_NMIREASON_unknown 2 #define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) /* * long nmi_op(unsigned int cmd, void *arg) * NB. All ops return zero on success, else a negative error code. */ /* * Register NMI callback for this (calling) VCPU. Currently this only makes * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. * arg == pointer to xennmi_callback structure. */ #define XENNMI_register_callback 0 struct xennmi_callback { unsigned long handler_address; unsigned long pad; }; typedef struct xennmi_callback xennmi_callback_t; DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t); /* * Deregister NMI callback for this (calling) VCPU. * arg == NULL. */ #define XENNMI_unregister_callback 1 #endif /* __XEN_PUBLIC_NMI_H__ */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/physdev.h000066400000000000000000000243331314037446600322160ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_PHYSDEV_H__ #define __XEN_PUBLIC_PHYSDEV_H__ #include "xen.h" /* * Prototype for this hypercall is: * int physdev_op(int cmd, void *args) * @cmd == PHYSDEVOP_??? (physdev operation). * @args == Operation-specific extra arguments (NULL if none). */ /* * Notify end-of-interrupt (EOI) for the specified IRQ. * @arg == pointer to physdev_eoi structure. */ #define PHYSDEVOP_eoi 12 struct physdev_eoi { /* IN */ uint32_t irq; }; typedef struct physdev_eoi physdev_eoi_t; DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t); /* * Register a shared page for the hypervisor to indicate whether the guest * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly * once the guest used this function in that the associated event channel * will automatically get unmasked. The page registered is used as a bit * array indexed by Xen's PIRQ value. */ #define PHYSDEVOP_pirq_eoi_gmfn_v1 17 /* * Register a shared page for the hypervisor to indicate whether the * guest must issue PHYSDEVOP_eoi. This hypercall is very similar to * PHYSDEVOP_pirq_eoi_gmfn_v1 but it doesn't change the semantics of * PHYSDEVOP_eoi. The page registered is used as a bit array indexed by * Xen's PIRQ value. */ #define PHYSDEVOP_pirq_eoi_gmfn_v2 28 struct physdev_pirq_eoi_gmfn { /* IN */ xen_pfn_t gmfn; }; typedef struct physdev_pirq_eoi_gmfn physdev_pirq_eoi_gmfn_t; DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_gmfn_t); /* * Query the status of an IRQ line. * @arg == pointer to physdev_irq_status_query structure. */ #define PHYSDEVOP_irq_status_query 5 struct physdev_irq_status_query { /* IN */ uint32_t irq; /* OUT */ uint32_t flags; /* XENIRQSTAT_* */ }; typedef struct physdev_irq_status_query physdev_irq_status_query_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t); /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ #define _XENIRQSTAT_needs_eoi (0) #define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) /* IRQ shared by multiple guests? */ #define _XENIRQSTAT_shared (1) #define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) /* * Set the current VCPU's I/O privilege level. * @arg == pointer to physdev_set_iopl structure. */ #define PHYSDEVOP_set_iopl 6 struct physdev_set_iopl { /* IN */ uint32_t iopl; }; typedef struct physdev_set_iopl physdev_set_iopl_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t); /* * Set the current VCPU's I/O-port permissions bitmap. * @arg == pointer to physdev_set_iobitmap structure. */ #define PHYSDEVOP_set_iobitmap 7 struct physdev_set_iobitmap { /* IN */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(uint8) bitmap; #else uint8_t * bitmap; #endif uint32_t nr_ports; }; typedef struct physdev_set_iobitmap physdev_set_iobitmap_t; DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t); /* * Read or write an IO-APIC register. * @arg == pointer to physdev_apic structure. */ #define PHYSDEVOP_apic_read 8 #define PHYSDEVOP_apic_write 9 struct physdev_apic { /* IN */ unsigned long apic_physbase; uint32_t reg; /* IN or OUT */ uint32_t value; }; typedef struct physdev_apic physdev_apic_t; DEFINE_XEN_GUEST_HANDLE(physdev_apic_t); /* * Allocate or free a physical upcall vector for the specified IRQ line. * @arg == pointer to physdev_irq structure. */ #define PHYSDEVOP_alloc_irq_vector 10 #define PHYSDEVOP_free_irq_vector 11 struct physdev_irq { /* IN */ uint32_t irq; /* IN or OUT */ uint32_t vector; }; typedef struct physdev_irq physdev_irq_t; DEFINE_XEN_GUEST_HANDLE(physdev_irq_t); #define MAP_PIRQ_TYPE_MSI 0x0 #define MAP_PIRQ_TYPE_GSI 0x1 #define MAP_PIRQ_TYPE_UNKNOWN 0x2 #define MAP_PIRQ_TYPE_MSI_SEG 0x3 #define MAP_PIRQ_TYPE_MULTI_MSI 0x4 #define PHYSDEVOP_map_pirq 13 struct physdev_map_pirq { domid_t domid; /* IN */ int type; /* IN (ignored for ..._MULTI_MSI) */ int index; /* IN or OUT */ int pirq; /* IN - high 16 bits hold segment for ..._MSI_SEG and ..._MULTI_MSI */ int bus; /* IN */ int devfn; /* IN (also OUT for ..._MULTI_MSI) */ int entry_nr; /* IN */ uint64_t table_base; }; typedef struct physdev_map_pirq physdev_map_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t); #define PHYSDEVOP_unmap_pirq 14 struct physdev_unmap_pirq { domid_t domid; /* IN */ int pirq; }; typedef struct physdev_unmap_pirq physdev_unmap_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t); #define PHYSDEVOP_manage_pci_add 15 #define PHYSDEVOP_manage_pci_remove 16 struct physdev_manage_pci { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_manage_pci physdev_manage_pci_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t); #define PHYSDEVOP_restore_msi 19 struct physdev_restore_msi { /* IN */ uint8_t bus; uint8_t devfn; }; typedef struct physdev_restore_msi physdev_restore_msi_t; DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t); #define PHYSDEVOP_manage_pci_add_ext 20 struct physdev_manage_pci_ext { /* IN */ uint8_t bus; uint8_t devfn; unsigned is_extfn; unsigned is_virtfn; struct { uint8_t bus; uint8_t devfn; } physfn; }; typedef struct physdev_manage_pci_ext physdev_manage_pci_ext_t; DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_ext_t); /* * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() * hypercall since 0x00030202. */ struct physdev_op { uint32_t cmd; union { struct physdev_irq_status_query irq_status_query; struct physdev_set_iopl set_iopl; struct physdev_set_iobitmap set_iobitmap; struct physdev_apic apic_op; struct physdev_irq irq_op; } u; }; typedef struct physdev_op physdev_op_t; DEFINE_XEN_GUEST_HANDLE(physdev_op_t); #define PHYSDEVOP_setup_gsi 21 struct physdev_setup_gsi { int gsi; /* IN */ uint8_t triggering; /* IN */ uint8_t polarity; /* IN */ }; typedef struct physdev_setup_gsi physdev_setup_gsi_t; DEFINE_XEN_GUEST_HANDLE(physdev_setup_gsi_t); /* leave PHYSDEVOP 22 free */ /* type is MAP_PIRQ_TYPE_GSI or MAP_PIRQ_TYPE_MSI * the hypercall returns a free pirq */ #define PHYSDEVOP_get_free_pirq 23 struct physdev_get_free_pirq { /* IN */ int type; /* OUT */ uint32_t pirq; }; typedef struct physdev_get_free_pirq physdev_get_free_pirq_t; DEFINE_XEN_GUEST_HANDLE(physdev_get_free_pirq_t); #define XEN_PCI_MMCFG_RESERVED 0x1 #define PHYSDEVOP_pci_mmcfg_reserved 24 struct physdev_pci_mmcfg_reserved { uint64_t address; uint16_t segment; uint8_t start_bus; uint8_t end_bus; uint32_t flags; }; typedef struct physdev_pci_mmcfg_reserved physdev_pci_mmcfg_reserved_t; DEFINE_XEN_GUEST_HANDLE(physdev_pci_mmcfg_reserved_t); #define XEN_PCI_DEV_EXTFN 0x1 #define XEN_PCI_DEV_VIRTFN 0x2 #define XEN_PCI_DEV_PXM 0x4 #define PHYSDEVOP_pci_device_add 25 struct physdev_pci_device_add { /* IN */ uint16_t seg; uint8_t bus; uint8_t devfn; uint32_t flags; struct { uint8_t bus; uint8_t devfn; } physfn; #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L uint32_t optarr[]; #elif defined(__GNUC__) uint32_t optarr[0]; #endif }; typedef struct physdev_pci_device_add physdev_pci_device_add_t; DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_add_t); #define PHYSDEVOP_pci_device_remove 26 #define PHYSDEVOP_restore_msi_ext 27 /* * Dom0 should use these two to announce MMIO resources assigned to * MSI-X capable devices won't (prepare) or may (release) change. */ #define PHYSDEVOP_prepare_msix 30 #define PHYSDEVOP_release_msix 31 struct physdev_pci_device { /* IN */ uint16_t seg; uint8_t bus; uint8_t devfn; }; typedef struct physdev_pci_device physdev_pci_device_t; DEFINE_XEN_GUEST_HANDLE(physdev_pci_device_t); #define PHYSDEVOP_DBGP_RESET_PREPARE 1 #define PHYSDEVOP_DBGP_RESET_DONE 2 #define PHYSDEVOP_DBGP_BUS_UNKNOWN 0 #define PHYSDEVOP_DBGP_BUS_PCI 1 #define PHYSDEVOP_dbgp_op 29 struct physdev_dbgp_op { /* IN */ uint8_t op; uint8_t bus; union { struct physdev_pci_device pci; } u; }; typedef struct physdev_dbgp_op physdev_dbgp_op_t; DEFINE_XEN_GUEST_HANDLE(physdev_dbgp_op_t); /* * Notify that some PIRQ-bound event channels have been unmasked. * ** This command is obsolete since interface version 0x00030202 and is ** * ** unsupported by newer versions of Xen. ** */ #define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 /* * These all-capitals physdev operation names are superceded by the new names * (defined above) since interface version 0x00030202. */ #define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query #define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl #define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap #define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read #define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write #define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector #define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi #define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared #if __XEN_INTERFACE_VERSION__ < 0x00040200 #define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v1 #else #define PHYSDEVOP_pirq_eoi_gmfn PHYSDEVOP_pirq_eoi_gmfn_v2 #endif #endif /* __XEN_PUBLIC_PHYSDEV_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/platform.h000066400000000000000000000452351314037446600323640ustar00rootroot00000000000000/****************************************************************************** * platform.h * * Hardware platform operations. Intended for use by domain-0 kernel. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_PLATFORM_H__ #define __XEN_PUBLIC_PLATFORM_H__ #include "xen.h" #define XENPF_INTERFACE_VERSION 0x03000001 /* * Set clock such that it would read after 00:00:00 UTC, * 1 January, 1970 if the current system time was . */ #define XENPF_settime 17 struct xenpf_settime { /* IN variables. */ uint32_t secs; uint32_t nsecs; uint64_t system_time; }; typedef struct xenpf_settime xenpf_settime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t); /* * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type. * On x86, @type is an architecture-defined MTRR memory type. * On success, returns the MTRR that was used (@reg) and a handle that can * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting. * (x86-specific). */ #define XENPF_add_memtype 31 struct xenpf_add_memtype { /* IN variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; /* OUT variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_add_memtype xenpf_add_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t); /* * Tear down an existing memory-range type. If @handle is remembered then it * should be passed in to accurately tear down the correct setting (in case * of overlapping memory regions with differing types). If it is not known * then @handle should be set to zero. In all cases @reg must be set. * (x86-specific). */ #define XENPF_del_memtype 32 struct xenpf_del_memtype { /* IN variables. */ uint32_t handle; uint32_t reg; }; typedef struct xenpf_del_memtype xenpf_del_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t); /* Read current type of an MTRR (x86-specific). */ #define XENPF_read_memtype 33 struct xenpf_read_memtype { /* IN variables. */ uint32_t reg; /* OUT variables. */ xen_pfn_t mfn; uint64_t nr_mfns; uint32_t type; }; typedef struct xenpf_read_memtype xenpf_read_memtype_t; DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t); #define XENPF_microcode_update 35 struct xenpf_microcode_update { /* IN variables. */ XEN_GUEST_HANDLE(const_void) data;/* Pointer to microcode data */ uint32_t length; /* Length of microcode data. */ }; typedef struct xenpf_microcode_update xenpf_microcode_update_t; DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t); #define XENPF_platform_quirk 39 #define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */ #define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */ #define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */ struct xenpf_platform_quirk { /* IN variables. */ uint32_t quirk_id; }; typedef struct xenpf_platform_quirk xenpf_platform_quirk_t; DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t); #define XENPF_efi_runtime_call 49 #define XEN_EFI_get_time 1 #define XEN_EFI_set_time 2 #define XEN_EFI_get_wakeup_time 3 #define XEN_EFI_set_wakeup_time 4 #define XEN_EFI_get_next_high_monotonic_count 5 #define XEN_EFI_get_variable 6 #define XEN_EFI_set_variable 7 #define XEN_EFI_get_next_variable_name 8 #define XEN_EFI_query_variable_info 9 #define XEN_EFI_query_capsule_capabilities 10 #define XEN_EFI_update_capsule 11 struct xenpf_efi_runtime_call { uint32_t function; /* * This field is generally used for per sub-function flags (defined * below), except for the XEN_EFI_get_next_high_monotonic_count case, * where it holds the single returned value. */ uint32_t misc; unsigned long status; union { #define XEN_EFI_GET_TIME_SET_CLEARS_NS 0x00000001 struct { struct xenpf_efi_time { uint16_t year; uint8_t month; uint8_t day; uint8_t hour; uint8_t min; uint8_t sec; uint32_t ns; int16_t tz; uint8_t daylight; } time; uint32_t resolution; uint32_t accuracy; } get_time; struct xenpf_efi_time set_time; #define XEN_EFI_GET_WAKEUP_TIME_ENABLED 0x00000001 #define XEN_EFI_GET_WAKEUP_TIME_PENDING 0x00000002 struct xenpf_efi_time get_wakeup_time; #define XEN_EFI_SET_WAKEUP_TIME_ENABLE 0x00000001 #define XEN_EFI_SET_WAKEUP_TIME_ENABLE_ONLY 0x00000002 struct xenpf_efi_time set_wakeup_time; #define XEN_EFI_VARIABLE_NON_VOLATILE 0x00000001 #define XEN_EFI_VARIABLE_BOOTSERVICE_ACCESS 0x00000002 #define XEN_EFI_VARIABLE_RUNTIME_ACCESS 0x00000004 struct { XEN_GUEST_HANDLE(void) name; /* UCS-2/UTF-16 string */ unsigned long size; XEN_GUEST_HANDLE(void) data; struct xenpf_efi_guid { uint32_t data1; uint16_t data2; uint16_t data3; uint8_t data4[8]; } vendor_guid; } get_variable, set_variable; struct { unsigned long size; XEN_GUEST_HANDLE(void) name; /* UCS-2/UTF-16 string */ struct xenpf_efi_guid vendor_guid; } get_next_variable_name; #define XEN_EFI_VARINFO_BOOT_SNAPSHOT 0x00000001 struct { uint32_t attr; uint64_t max_store_size; uint64_t remain_store_size; uint64_t max_size; } query_variable_info; struct { XEN_GUEST_HANDLE(void) capsule_header_array; unsigned long capsule_count; uint64_t max_capsule_size; unsigned int reset_type; } query_capsule_capabilities; struct { XEN_GUEST_HANDLE(void) capsule_header_array; unsigned long capsule_count; uint64_t sg_list; /* machine address */ } update_capsule; } u; }; typedef struct xenpf_efi_runtime_call xenpf_efi_runtime_call_t; DEFINE_XEN_GUEST_HANDLE(xenpf_efi_runtime_call_t); #define XENPF_firmware_info 50 #define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */ #define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */ #define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */ #define XEN_FW_EFI_INFO 4 /* from EFI */ #define XEN_FW_EFI_VERSION 0 #define XEN_FW_EFI_CONFIG_TABLE 1 #define XEN_FW_EFI_VENDOR 2 #define XEN_FW_EFI_MEM_INFO 3 #define XEN_FW_EFI_RT_VERSION 4 #define XEN_FW_EFI_PCI_ROM 5 #define XEN_FW_KBD_SHIFT_FLAGS 5 /* Int16, Fn02: Get keyboard shift flags. */ struct xenpf_firmware_info { /* IN variables. */ uint32_t type; uint32_t index; /* OUT variables. */ union { struct { /* Int13, Fn48: Check Extensions Present. */ uint8_t device; /* %dl: bios device number */ uint8_t version; /* %ah: major version */ uint16_t interface_support; /* %cx: support bitmap */ /* Int13, Fn08: Legacy Get Device Parameters. */ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */ uint8_t legacy_max_head; /* %dh: max head # */ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */ /* NB. First uint16_t of buffer must be set to buffer size. */ XEN_GUEST_HANDLE(void) edd_params; } disk_info; /* XEN_FW_DISK_INFO */ struct { uint8_t device; /* bios device number */ uint32_t mbr_signature; /* offset 0x1b8 in mbr */ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */ struct { /* Int10, AX=4F15: Get EDID info. */ uint8_t capabilities; uint8_t edid_transfer_time; /* must refer to 128-byte buffer */ XEN_GUEST_HANDLE(uint8) edid; } vbeddc_info; /* XEN_FW_VBEDDC_INFO */ union xenpf_efi_info { uint32_t version; struct { uint64_t addr; /* EFI_CONFIGURATION_TABLE */ uint32_t nent; } cfg; struct { uint32_t revision; uint32_t bufsz; /* input, in bytes */ XEN_GUEST_HANDLE(void) name; /* UCS-2/UTF-16 string */ } vendor; struct { uint64_t addr; uint64_t size; uint64_t attr; uint32_t type; } mem; struct { /* IN variables */ uint16_t segment; uint8_t bus; uint8_t devfn; uint16_t vendor; uint16_t devid; /* OUT variables */ uint64_t address; xen_ulong_t size; } pci_rom; } efi_info; /* XEN_FW_EFI_INFO */ /* Int16, Fn02: Get keyboard shift flags. */ uint8_t kbd_shift_flags; /* XEN_FW_KBD_SHIFT_FLAGS */ } u; }; typedef struct xenpf_firmware_info xenpf_firmware_info_t; DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t); #define XENPF_enter_acpi_sleep 51 struct xenpf_enter_acpi_sleep { /* IN variables */ #if !defined(CONFIG_PARAVIRT_XEN) && __XEN_INTERFACE_VERSION__ < 0x00040300 uint16_t pm1a_cnt_val; /* PM1a control value. */ uint16_t pm1b_cnt_val; /* PM1b control value. */ #else uint16_t val_a; /* PM1a control / sleep type A. */ uint16_t val_b; /* PM1b control / sleep type B. */ #endif uint32_t sleep_state; /* Which state to enter (Sn). */ #define XENPF_ACPI_SLEEP_EXTENDED 0x00000001 uint32_t flags; /* XENPF_ACPI_SLEEP_*. */ }; typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t; DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t); #define XENPF_change_freq 52 struct xenpf_change_freq { /* IN variables */ uint32_t flags; /* Must be zero. */ uint32_t cpu; /* Physical cpu. */ uint64_t freq; /* New frequency (Hz). */ }; typedef struct xenpf_change_freq xenpf_change_freq_t; DEFINE_XEN_GUEST_HANDLE(xenpf_change_freq_t); /* * Get idle times (nanoseconds since boot) for physical CPUs specified in the * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is * indexed by CPU number; only entries with the corresponding @cpumap_bitmap * bit set are written to. On return, @cpumap_bitmap is modified so that any * non-existent CPUs are cleared. Such CPUs have their @idletime array entry * cleared. */ #define XENPF_getidletime 53 struct xenpf_getidletime { /* IN/OUT variables */ /* IN: CPUs to interrogate; OUT: subset of IN which are present */ XEN_GUEST_HANDLE(uint8) cpumap_bitmap; /* IN variables */ /* Size of cpumap bitmap. */ uint32_t cpumap_nr_cpus; /* Must be indexable for every cpu in cpumap_bitmap. */ XEN_GUEST_HANDLE(uint64) idletime; /* OUT variables */ /* System time when the idletime snapshots were taken. */ uint64_t now; }; typedef struct xenpf_getidletime xenpf_getidletime_t; DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t); #define XENPF_set_processor_pminfo 54 /* ability bits */ #define XEN_PROCESSOR_PM_CX 1 #define XEN_PROCESSOR_PM_PX 2 #define XEN_PROCESSOR_PM_TX 4 /* cmd type */ #define XEN_PM_CX 0 #define XEN_PM_PX 1 #define XEN_PM_TX 2 #define XEN_PM_PDC 3 /* Px sub info type */ #define XEN_PX_PCT 1 #define XEN_PX_PSS 2 #define XEN_PX_PPC 4 #define XEN_PX_PSD 8 struct xen_power_register { uint32_t space_id; uint32_t bit_width; uint32_t bit_offset; uint32_t access_size; uint64_t address; }; struct xen_processor_csd { uint32_t domain; /* domain number of one dependent group */ uint32_t coord_type; /* coordination type */ uint32_t num; /* number of processors in same domain */ }; DEFINE_GUEST_HANDLE_STRUCT(xen_processor_csd); typedef struct xen_processor_csd xen_processor_csd_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_csd_t); struct xen_processor_cx { struct xen_power_register reg; /* GAS for Cx trigger register */ uint8_t type; /* cstate value, c0: 0, c1: 1, ... */ uint32_t latency; /* worst latency (ms) to enter/exit this cstate */ uint32_t power; /* average power consumption(mW) */ uint32_t dpcnt; /* number of dependency entries */ XEN_GUEST_HANDLE(xen_processor_csd_t) dp; /* NULL if no dependency */ }; DEFINE_GUEST_HANDLE_STRUCT(xen_processor_cx); typedef struct xen_processor_cx xen_processor_cx_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_cx_t); struct xen_processor_flags { uint32_t bm_control:1; uint32_t bm_check:1; uint32_t has_cst:1; uint32_t power_setup_done:1; uint32_t bm_rld_set:1; }; struct xen_processor_power { uint32_t count; /* number of C state entries in array below */ struct xen_processor_flags flags; /* global flags of this processor */ XEN_GUEST_HANDLE(xen_processor_cx_t) states; /* supported c states */ }; struct xen_pct_register { uint8_t descriptor; uint16_t length; uint8_t space_id; uint8_t bit_width; uint8_t bit_offset; uint8_t reserved; uint64_t address; }; struct xen_processor_px { uint64_t core_frequency; /* megahertz */ uint64_t power; /* milliWatts */ uint64_t transition_latency; /* microseconds */ uint64_t bus_master_latency; /* microseconds */ uint64_t control; /* control value */ uint64_t status; /* success indicator */ }; DEFINE_GUEST_HANDLE_STRUCT(xen_processor_px); typedef struct xen_processor_px xen_processor_px_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_px_t); struct xen_psd_package { uint64_t num_entries; uint64_t revision; uint64_t domain; uint64_t coord_type; uint64_t num_processors; }; struct xen_processor_performance { uint32_t flags; /* flag for Px sub info type */ uint32_t platform_limit; /* Platform limitation on freq usage */ struct xen_pct_register control_register; struct xen_pct_register status_register; uint32_t state_count; /* total available performance states */ XEN_GUEST_HANDLE(xen_processor_px_t) states; struct xen_psd_package domain_info; uint32_t shared_type; /* coordination type of this processor */ }; DEFINE_GUEST_HANDLE_STRUCT(xen_processor_performance); typedef struct xen_processor_performance xen_processor_performance_t; DEFINE_XEN_GUEST_HANDLE(xen_processor_performance_t); struct xenpf_set_processor_pminfo { /* IN variables */ uint32_t id; /* ACPI CPU ID */ uint32_t type; /* {XEN_PM_CX, XEN_PM_PX} */ union { struct xen_processor_power power;/* Cx: _CST/_CSD */ struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */ XEN_GUEST_HANDLE(uint32) pdc; /* _PDC */ #ifdef CONFIG_XEN } u; #else }; #endif }; DEFINE_GUEST_HANDLE_STRUCT(xenpf_set_processor_pminfo); typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t; DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t); #define XENPF_get_cpuinfo 55 struct xenpf_pcpuinfo { /* IN */ uint32_t xen_cpuid; /* OUT */ /* The maxium cpu_id that is present */ uint32_t max_present; #define XEN_PCPU_FLAGS_ONLINE 1 /* Correponding xen_cpuid is not present*/ #define XEN_PCPU_FLAGS_INVALID 2 uint32_t flags; uint32_t apic_id; uint32_t acpi_id; }; typedef struct xenpf_pcpuinfo xenpf_pcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xenpf_pcpuinfo_t); #define XENPF_get_cpu_version 48 struct xenpf_pcpu_version { /* IN */ uint32_t xen_cpuid; /* OUT */ /* The maxium cpu_id that is present */ uint32_t max_present; char vendor_id[12]; uint32_t family; uint32_t model; uint32_t stepping; }; typedef struct xenpf_pcpu_version xenpf_pcpu_version_t; DEFINE_XEN_GUEST_HANDLE(xenpf_pcpu_version_t); #define XENPF_cpu_online 56 #define XENPF_cpu_offline 57 struct xenpf_cpu_ol { uint32_t cpuid; }; DEFINE_GUEST_HANDLE_STRUCT(xenpf_cpu_ol); typedef struct xenpf_cpu_ol xenpf_cpu_ol_t; DEFINE_XEN_GUEST_HANDLE(xenpf_cpu_ol_t); #define XENPF_cpu_hotadd 58 struct xenpf_cpu_hotadd { uint32_t apic_id; uint32_t acpi_id; uint32_t pxm; }; #define XENPF_mem_hotadd 59 struct xenpf_mem_hotadd { uint64_t spfn; uint64_t epfn; uint32_t pxm; uint32_t flags; }; #define XENPF_core_parking 60 struct xenpf_core_parking { /* IN variables */ #define XEN_CORE_PARKING_SET 1 #define XEN_CORE_PARKING_GET 2 uint32_t type; /* IN variables: set cpu nums expected to be idled */ /* OUT variables: get cpu nums actually be idled */ uint32_t idle_nums; }; DEFINE_GUEST_HANDLE_STRUCT(xenpf_core_parking); typedef struct xenpf_core_parking xenpf_core_parking_t; DEFINE_XEN_GUEST_HANDLE(xenpf_core_parking_t); #define XENPF_get_cpu_freq ('N' << 24) #define XENPF_get_cpu_freq_min (XENPF_get_cpu_freq + 1) #define XENPF_get_cpu_freq_max (XENPF_get_cpu_freq_min + 1) struct xenpf_get_cpu_freq { /* IN variables */ uint32_t vcpu; /* OUT variables */ uint32_t freq; /* in kHz */ }; /* * ` enum neg_errnoval * ` HYPERVISOR_platform_op(const struct xen_platform_op*); */ struct xen_platform_op { uint32_t cmd; uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ union { struct xenpf_settime settime; struct xenpf_add_memtype add_memtype; struct xenpf_del_memtype del_memtype; struct xenpf_read_memtype read_memtype; struct xenpf_microcode_update microcode; struct xenpf_platform_quirk platform_quirk; struct xenpf_efi_runtime_call efi_runtime_call; struct xenpf_firmware_info firmware_info; struct xenpf_enter_acpi_sleep enter_acpi_sleep; struct xenpf_change_freq change_freq; struct xenpf_getidletime getidletime; struct xenpf_set_processor_pminfo set_pminfo; struct xenpf_pcpuinfo pcpu_info; struct xenpf_pcpu_version pcpu_version; struct xenpf_cpu_ol cpu_ol; struct xenpf_cpu_hotadd cpu_add; struct xenpf_mem_hotadd mem_add; struct xenpf_core_parking core_parking; struct xenpf_get_cpu_freq get_cpu_freq; uint8_t pad[128]; } u; }; typedef struct xen_platform_op xen_platform_op_t; DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t); #endif /* __XEN_PUBLIC_PLATFORM_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/sched.h000066400000000000000000000141141314037446600316160ustar00rootroot00000000000000/****************************************************************************** * sched.h * * Scheduler state interactions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_SCHED_H__ #define __XEN_PUBLIC_SCHED_H__ #include "event_channel.h" /* * `incontents 150 sched Guest Scheduler Operations * * The SCHEDOP interface provides mechanisms for a guest to interact * with the scheduler, including yield, blocking and shutting itself * down. */ /* * The prototype for this hypercall is: * ` long HYPERVISOR_sched_op(enum sched_op cmd, void *arg, ...) * * @cmd == SCHEDOP_??? (scheduler operation). * @arg == Operation-specific extra argument(s), as described below. * ... == Additional Operation-specific extra arguments, described below. * * Versions of Xen prior to 3.0.2 provide only the following legacy version * of this hypercall, supporting only the commands yield, block and shutdown: * long sched_op(int cmd, unsigned long arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) * == SHUTDOWN_* code (SCHEDOP_shutdown) * * This legacy version is available to new guests as: * ` long HYPERVISOR_sched_op_compat(enum sched_op cmd, unsigned long arg) */ /* ` enum sched_op { // SCHEDOP_* => struct sched_* */ /* * Voluntarily yield the CPU. * @arg == NULL. */ #define SCHEDOP_yield 0 /* * Block execution of this VCPU until an event is received for processing. * If called with event upcalls masked, this operation will atomically * reenable event delivery and check for pending events before blocking the * VCPU. This avoids a "wakeup waiting" race. * @arg == NULL. */ #define SCHEDOP_block 1 /* * Halt execution of this domain (all VCPUs) and notify the system controller. * @arg == pointer to sched_shutdown_t structure. * * If the sched_shutdown_t reason is SHUTDOWN_suspend then this * hypercall takes an additional extra argument which should be the * MFN of the guest's start_info_t. * * In addition, which reason is SHUTDOWN_suspend this hypercall * returns 1 if suspend was cancelled or the domain was merely * checkpointed, and 0 if it is resuming in a new domain. */ #define SCHEDOP_shutdown 2 /* * Poll a set of event-channel ports. Return when one or more are pending. An * optional timeout may be specified. * @arg == pointer to sched_poll_t structure. */ #define SCHEDOP_poll 3 /* * Declare a shutdown for another domain. The main use of this function is * in interpreting shutdown requests and reasons for fully-virtualized * domains. A para-virtualized domain may use SCHEDOP_shutdown directly. * @arg == pointer to sched_remote_shutdown_t structure. */ #define SCHEDOP_remote_shutdown 4 /* * Latch a shutdown code, so that when the domain later shuts down it * reports this code to the control tools. * @arg == sched_shutdown_t, as for SCHEDOP_shutdown. */ #define SCHEDOP_shutdown_code 5 /* * Setup, poke and destroy a domain watchdog timer. * @arg == pointer to sched_watchdog_t structure. * With id == 0, setup a domain watchdog timer to cause domain shutdown * after timeout, returns watchdog id. * With id != 0 and timeout == 0, destroy domain watchdog timer. * With id != 0 and timeout != 0, poke watchdog timer and set new timeout. */ #define SCHEDOP_watchdog 6 /* ` } */ struct sched_shutdown { unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */ }; DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown); typedef struct sched_shutdown sched_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t); struct sched_poll { XEN_GUEST_HANDLE(evtchn_port_t) ports; unsigned int nr_ports; uint64_t timeout; }; DEFINE_GUEST_HANDLE_STRUCT(sched_poll); typedef struct sched_poll sched_poll_t; DEFINE_XEN_GUEST_HANDLE(sched_poll_t); struct sched_remote_shutdown { domid_t domain_id; /* Remote domain ID */ unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */ }; typedef struct sched_remote_shutdown sched_remote_shutdown_t; DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t); struct sched_watchdog { uint32_t id; /* watchdog ID */ uint32_t timeout; /* timeout */ }; typedef struct sched_watchdog sched_watchdog_t; DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t); /* * Reason codes for SCHEDOP_shutdown. These may be interpreted by control * software to determine the appropriate action. For the most part, Xen does * not care about the shutdown code. */ /* ` enum sched_shutdown_reason { */ #define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ #define SHUTDOWN_MAX 4 /* Maximum valid shutdown reason. */ /* ` } */ #endif /* __XEN_PUBLIC_SCHED_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/sysctl.h000066400000000000000000000614161314037446600320600ustar00rootroot00000000000000/****************************************************************************** * sysctl.h * * System management operations. For use by node control stack. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2002-2006, K Fraser */ #ifndef __XEN_PUBLIC_SYSCTL_H__ #define __XEN_PUBLIC_SYSCTL_H__ #if !defined(__XEN__) && !defined(__XEN_TOOLS__) #error "sysctl operations are intended for use by node control tools only" #endif #include "xen.h" #include "domctl.h" #define XEN_SYSCTL_INTERFACE_VERSION 0x0000000A /* * Read console content from Xen buffer ring. */ /* XEN_SYSCTL_readconsole */ struct xen_sysctl_readconsole { /* IN: Non-zero -> clear after reading. */ uint8_t clear; /* IN: Non-zero -> start index specified by @index field. */ uint8_t incremental; uint8_t pad0, pad1; /* * IN: Start index for consuming from ring buffer (if @incremental); * OUT: End index after consuming from ring buffer. */ uint32_t index; /* IN: Virtual address to write console data. */ XEN_GUEST_HANDLE_64(char) buffer; /* IN: Size of buffer; OUT: Bytes written to buffer. */ uint32_t count; }; typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t); /* Get trace buffers machine base address */ /* XEN_SYSCTL_tbuf_op */ struct xen_sysctl_tbuf_op { /* IN variables */ #define XEN_SYSCTL_TBUFOP_get_info 0 #define XEN_SYSCTL_TBUFOP_set_cpu_mask 1 #define XEN_SYSCTL_TBUFOP_set_evt_mask 2 #define XEN_SYSCTL_TBUFOP_set_size 3 #define XEN_SYSCTL_TBUFOP_enable 4 #define XEN_SYSCTL_TBUFOP_disable 5 uint32_t cmd; /* IN/OUT variables */ struct xenctl_bitmap cpu_mask; uint32_t evt_mask; /* OUT variables */ uint64_aligned_t buffer_mfn; uint32_t size; /* Also an IN variable! */ }; typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t); /* * Get physical information about the host machine */ /* XEN_SYSCTL_physinfo */ /* (x86) The platform supports HVM guests. */ #define _XEN_SYSCTL_PHYSCAP_hvm 0 #define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm) /* (x86) The platform supports HVM-guest direct access to I/O devices. */ #define _XEN_SYSCTL_PHYSCAP_hvm_directio 1 #define XEN_SYSCTL_PHYSCAP_hvm_directio (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio) struct xen_sysctl_physinfo { uint32_t threads_per_core; uint32_t cores_per_socket; uint32_t nr_cpus; /* # CPUs currently online */ uint32_t max_cpu_id; /* Largest possible CPU ID on this host */ uint32_t nr_nodes; /* # nodes currently online */ uint32_t max_node_id; /* Largest possible node ID on this host */ uint32_t cpu_khz; uint64_aligned_t total_pages; uint64_aligned_t free_pages; uint64_aligned_t scrub_pages; uint64_aligned_t outstanding_pages; uint32_t hw_cap[8]; /* XEN_SYSCTL_PHYSCAP_??? */ uint32_t capabilities; }; typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t); /* * Get the ID of the current scheduler. */ /* XEN_SYSCTL_sched_id */ struct xen_sysctl_sched_id { /* OUT variable */ uint32_t sched_id; }; typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t); /* Interface for controlling Xen software performance counters. */ /* XEN_SYSCTL_perfc_op */ /* Sub-operations: */ #define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */ #define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */ struct xen_sysctl_perfc_desc { char name[80]; /* name of perf counter */ uint32_t nr_vals; /* number of values for this counter */ }; typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t); typedef uint32_t xen_sysctl_perfc_val_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t); struct xen_sysctl_perfc_op { /* IN variables. */ uint32_t cmd; /* XEN_SYSCTL_PERFCOP_??? */ /* OUT variables. */ uint32_t nr_counters; /* number of counters description */ uint32_t nr_vals; /* number of values */ /* counter information (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc; /* counter values (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val; }; typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t); /* XEN_SYSCTL_getdomaininfolist */ struct xen_sysctl_getdomaininfolist { /* IN variables. */ domid_t first_domain; uint32_t max_domains; XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer; /* OUT variables. */ uint32_t num_domains; }; typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t); /* Inject debug keys into Xen. */ /* XEN_SYSCTL_debug_keys */ struct xen_sysctl_debug_keys { /* IN variables. */ XEN_GUEST_HANDLE_64(char) keys; uint32_t nr_keys; }; typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t); /* Get physical CPU information. */ /* XEN_SYSCTL_getcpuinfo */ struct xen_sysctl_cpuinfo { uint64_aligned_t idletime; }; typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t); struct xen_sysctl_getcpuinfo { /* IN variables. */ uint32_t max_cpus; XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info; /* OUT variables. */ uint32_t nr_cpus; }; typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t); /* XEN_SYSCTL_availheap */ struct xen_sysctl_availheap { /* IN variables. */ uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */ uint32_t max_bitwidth; /* Largest address width (zero if don't care). */ int32_t node; /* NUMA node of interest (-1 for all nodes). */ /* OUT variables. */ uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */ }; typedef struct xen_sysctl_availheap xen_sysctl_availheap_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t); /* XEN_SYSCTL_get_pmstat */ struct pm_px_val { uint64_aligned_t freq; /* Px core frequency */ uint64_aligned_t residency; /* Px residency time */ uint64_aligned_t count; /* Px transition count */ }; typedef struct pm_px_val pm_px_val_t; DEFINE_XEN_GUEST_HANDLE(pm_px_val_t); struct pm_px_stat { uint8_t total; /* total Px states */ uint8_t usable; /* usable Px states */ uint8_t last; /* last Px state */ uint8_t cur; /* current Px state */ XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */ XEN_GUEST_HANDLE_64(pm_px_val_t) pt; }; typedef struct pm_px_stat pm_px_stat_t; DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t); struct pm_cx_stat { uint32_t nr; /* entry nr in triggers & residencies, including C0 */ uint32_t last; /* last Cx state */ uint64_aligned_t idle_time; /* idle time from boot */ XEN_GUEST_HANDLE_64(uint64) triggers; /* Cx trigger counts */ XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */ uint64_aligned_t pc2; uint64_aligned_t pc3; uint64_aligned_t pc6; uint64_aligned_t pc7; uint64_aligned_t cc3; uint64_aligned_t cc6; uint64_aligned_t cc7; }; struct xen_sysctl_get_pmstat { #define PMSTAT_CATEGORY_MASK 0xf0 #define PMSTAT_PX 0x10 #define PMSTAT_CX 0x20 #define PMSTAT_get_max_px (PMSTAT_PX | 0x1) #define PMSTAT_get_pxstat (PMSTAT_PX | 0x2) #define PMSTAT_reset_pxstat (PMSTAT_PX | 0x3) #define PMSTAT_get_max_cx (PMSTAT_CX | 0x1) #define PMSTAT_get_cxstat (PMSTAT_CX | 0x2) #define PMSTAT_reset_cxstat (PMSTAT_CX | 0x3) uint32_t type; uint32_t cpuid; union { struct pm_px_stat getpx; struct pm_cx_stat getcx; /* other struct for tx, etc */ } u; }; typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t); /* XEN_SYSCTL_cpu_hotplug */ struct xen_sysctl_cpu_hotplug { /* IN variables */ uint32_t cpu; /* Physical cpu. */ #define XEN_SYSCTL_CPU_HOTPLUG_ONLINE 0 #define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1 uint32_t op; /* hotplug opcode */ }; typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t); /* * Get/set xen power management, include * 1. cpufreq governors and related parameters */ /* XEN_SYSCTL_pm_op */ struct xen_userspace { uint32_t scaling_setspeed; }; typedef struct xen_userspace xen_userspace_t; struct xen_ondemand { uint32_t sampling_rate_max; uint32_t sampling_rate_min; uint32_t sampling_rate; uint32_t up_threshold; }; typedef struct xen_ondemand xen_ondemand_t; /* * cpufreq para name of this structure named * same as sysfs file name of native linux */ #define CPUFREQ_NAME_LEN 16 struct xen_get_cpufreq_para { /* IN/OUT variable */ uint32_t cpu_num; uint32_t freq_num; uint32_t gov_num; /* for all governors */ /* OUT variable */ XEN_GUEST_HANDLE_64(uint32) affected_cpus; XEN_GUEST_HANDLE_64(uint32) scaling_available_frequencies; XEN_GUEST_HANDLE_64(char) scaling_available_governors; char scaling_driver[CPUFREQ_NAME_LEN]; uint32_t cpuinfo_cur_freq; uint32_t cpuinfo_max_freq; uint32_t cpuinfo_min_freq; uint32_t scaling_cur_freq; char scaling_governor[CPUFREQ_NAME_LEN]; uint32_t scaling_max_freq; uint32_t scaling_min_freq; /* for specific governor */ union { struct xen_userspace userspace; struct xen_ondemand ondemand; } u; int32_t turbo_enabled; }; struct xen_set_cpufreq_gov { char scaling_governor[CPUFREQ_NAME_LEN]; }; struct xen_set_cpufreq_para { #define SCALING_MAX_FREQ 1 #define SCALING_MIN_FREQ 2 #define SCALING_SETSPEED 3 #define SAMPLING_RATE 4 #define UP_THRESHOLD 5 uint32_t ctrl_type; uint32_t ctrl_value; }; struct xen_sysctl_pm_op { #define PM_PARA_CATEGORY_MASK 0xf0 #define CPUFREQ_PARA 0x10 /* cpufreq command type */ #define GET_CPUFREQ_PARA (CPUFREQ_PARA | 0x01) #define SET_CPUFREQ_GOV (CPUFREQ_PARA | 0x02) #define SET_CPUFREQ_PARA (CPUFREQ_PARA | 0x03) #define GET_CPUFREQ_AVGFREQ (CPUFREQ_PARA | 0x04) /* set/reset scheduler power saving option */ #define XEN_SYSCTL_pm_op_set_sched_opt_smt 0x21 /* cpuidle max_cstate access command */ #define XEN_SYSCTL_pm_op_get_max_cstate 0x22 #define XEN_SYSCTL_pm_op_set_max_cstate 0x23 /* set scheduler migration cost value */ #define XEN_SYSCTL_pm_op_set_vcpu_migration_delay 0x24 #define XEN_SYSCTL_pm_op_get_vcpu_migration_delay 0x25 /* enable/disable turbo mode when in dbs governor */ #define XEN_SYSCTL_pm_op_enable_turbo 0x26 #define XEN_SYSCTL_pm_op_disable_turbo 0x27 uint32_t cmd; uint32_t cpuid; union { struct xen_get_cpufreq_para get_para; struct xen_set_cpufreq_gov set_gov; struct xen_set_cpufreq_para set_para; uint64_aligned_t get_avgfreq; uint32_t set_sched_opt_smt; uint32_t get_max_cstate; uint32_t set_max_cstate; uint32_t get_vcpu_migration_delay; uint32_t set_vcpu_migration_delay; } u; }; /* XEN_SYSCTL_page_offline_op */ struct xen_sysctl_page_offline_op { /* IN: range of page to be offlined */ #define sysctl_page_offline 1 #define sysctl_page_online 2 #define sysctl_query_page_offline 3 uint32_t cmd; uint32_t start; uint32_t end; /* OUT: result of page offline request */ /* * bit 0~15: result flags * bit 16~31: owner */ XEN_GUEST_HANDLE(uint32) status; }; #define PG_OFFLINE_STATUS_MASK (0xFFUL) /* The result is invalid, i.e. HV does not handle it */ #define PG_OFFLINE_INVALID (0x1UL << 0) #define PG_OFFLINE_OFFLINED (0x1UL << 1) #define PG_OFFLINE_PENDING (0x1UL << 2) #define PG_OFFLINE_FAILED (0x1UL << 3) #define PG_OFFLINE_AGAIN (0x1UL << 4) #define PG_ONLINE_FAILED PG_OFFLINE_FAILED #define PG_ONLINE_ONLINED PG_OFFLINE_OFFLINED #define PG_OFFLINE_STATUS_OFFLINED (0x1UL << 1) #define PG_OFFLINE_STATUS_ONLINE (0x1UL << 2) #define PG_OFFLINE_STATUS_OFFLINE_PENDING (0x1UL << 3) #define PG_OFFLINE_STATUS_BROKEN (0x1UL << 4) #define PG_OFFLINE_MISC_MASK (0xFFUL << 4) /* valid when PG_OFFLINE_FAILED or PG_OFFLINE_PENDING */ #define PG_OFFLINE_XENPAGE (0x1UL << 8) #define PG_OFFLINE_DOM0PAGE (0x1UL << 9) #define PG_OFFLINE_ANONYMOUS (0x1UL << 10) #define PG_OFFLINE_NOT_CONV_RAM (0x1UL << 11) #define PG_OFFLINE_OWNED (0x1UL << 12) #define PG_OFFLINE_BROKEN (0x1UL << 13) #define PG_ONLINE_BROKEN PG_OFFLINE_BROKEN #define PG_OFFLINE_OWNER_SHIFT 16 /* XEN_SYSCTL_lockprof_op */ /* Sub-operations: */ #define XEN_SYSCTL_LOCKPROF_reset 1 /* Reset all profile data to zero. */ #define XEN_SYSCTL_LOCKPROF_query 2 /* Get lock profile information. */ /* Record-type: */ #define LOCKPROF_TYPE_GLOBAL 0 /* global lock, idx meaningless */ #define LOCKPROF_TYPE_PERDOM 1 /* per-domain lock, idx is domid */ #define LOCKPROF_TYPE_N 2 /* number of types */ struct xen_sysctl_lockprof_data { char name[40]; /* lock name (may include up to 2 %d specifiers) */ int32_t type; /* LOCKPROF_TYPE_??? */ int32_t idx; /* index (e.g. domain id) */ uint64_aligned_t lock_cnt; /* # of locking succeeded */ uint64_aligned_t block_cnt; /* # of wait for lock */ uint64_aligned_t lock_time; /* nsecs lock held */ uint64_aligned_t block_time; /* nsecs waited for lock */ }; typedef struct xen_sysctl_lockprof_data xen_sysctl_lockprof_data_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_data_t); struct xen_sysctl_lockprof_op { /* IN variables. */ uint32_t cmd; /* XEN_SYSCTL_LOCKPROF_??? */ uint32_t max_elem; /* size of output buffer */ /* OUT variables (query only). */ uint32_t nr_elem; /* number of elements available */ uint64_aligned_t time; /* nsecs of profile measurement */ /* profile information (or NULL) */ XEN_GUEST_HANDLE_64(xen_sysctl_lockprof_data_t) data; }; typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t); /* XEN_SYSCTL_topologyinfo */ #define INVALID_TOPOLOGY_ID (~0U) struct xen_sysctl_topologyinfo { /* * IN: maximum addressable entry in the caller-provided arrays. * OUT: largest cpu identifier in the system. * If OUT is greater than IN then the arrays are truncated! * If OUT is leass than IN then the array tails are not written by sysctl. */ uint32_t max_cpu_index; /* * If not NULL, these arrays are filled with core/socket/node identifier * for each cpu. * If a cpu has no core/socket/node information (e.g., cpu not present) * then the sentinel value ~0u is written to each array. * The number of array elements written by the sysctl is: * min(@max_cpu_index_IN,@max_cpu_index_OUT)+1 */ XEN_GUEST_HANDLE_64(uint32) cpu_to_core; XEN_GUEST_HANDLE_64(uint32) cpu_to_socket; XEN_GUEST_HANDLE_64(uint32) cpu_to_node; }; typedef struct xen_sysctl_topologyinfo xen_sysctl_topologyinfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_topologyinfo_t); /* XEN_SYSCTL_numainfo */ #define INVALID_NUMAINFO_ID (~0U) struct xen_sysctl_numainfo { /* * IN: maximum addressable entry in the caller-provided arrays. * OUT: largest node identifier in the system. * If OUT is greater than IN then the arrays are truncated! */ uint32_t max_node_index; /* NB. Entries are 0 if node is not present. */ XEN_GUEST_HANDLE_64(uint64) node_to_memsize; XEN_GUEST_HANDLE_64(uint64) node_to_memfree; /* * Array, of size (max_node_index+1)^2, listing memory access distances * between nodes. If an entry has no node distance information (e.g., node * not present) then the value ~0u is written. * * Note that the array rows must be indexed by multiplying by the minimum * of the caller-provided max_node_index and the returned value of * max_node_index. That is, if the largest node index in the system is * smaller than the caller can handle, a smaller 2-d array is constructed * within the space provided by the caller. When this occurs, trailing * space provided by the caller is not modified. If the largest node index * in the system is larger than the caller can handle, then a 2-d array of * the maximum size handleable by the caller is constructed. */ XEN_GUEST_HANDLE_64(uint32) node_to_node_distance; }; typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t); /* XEN_SYSCTL_cpupool_op */ #define XEN_SYSCTL_CPUPOOL_OP_CREATE 1 /* C */ #define XEN_SYSCTL_CPUPOOL_OP_DESTROY 2 /* D */ #define XEN_SYSCTL_CPUPOOL_OP_INFO 3 /* I */ #define XEN_SYSCTL_CPUPOOL_OP_ADDCPU 4 /* A */ #define XEN_SYSCTL_CPUPOOL_OP_RMCPU 5 /* R */ #define XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN 6 /* M */ #define XEN_SYSCTL_CPUPOOL_OP_FREEINFO 7 /* F */ #define XEN_SYSCTL_CPUPOOL_PAR_ANY 0xFFFFFFFF struct xen_sysctl_cpupool_op { uint32_t op; /* IN */ uint32_t cpupool_id; /* IN: CDIARM OUT: CI */ uint32_t sched_id; /* IN: C OUT: I */ uint32_t domid; /* IN: M */ uint32_t cpu; /* IN: AR */ uint32_t n_dom; /* OUT: I */ struct xenctl_bitmap cpumap; /* OUT: IF */ }; typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t); #define ARINC653_MAX_DOMAINS_PER_SCHEDULE 64 /* * This structure is used to pass a new ARINC653 schedule from a * privileged domain (ie dom0) to Xen. */ struct xen_sysctl_arinc653_schedule { /* major_frame holds the time for the new schedule's major frame * in nanoseconds. */ uint64_aligned_t major_frame; /* num_sched_entries holds how many of the entries in the * sched_entries[] array are valid. */ uint8_t num_sched_entries; /* The sched_entries array holds the actual schedule entries. */ struct { /* dom_handle must match a domain's UUID */ xen_domain_handle_t dom_handle; /* If a domain has multiple VCPUs, vcpu_id specifies which one * this schedule entry applies to. It should be set to 0 if * there is only one VCPU for the domain. */ unsigned int vcpu_id; /* runtime specifies the amount of time that should be allocated * to this VCPU per major frame. It is specified in nanoseconds */ uint64_aligned_t runtime; } sched_entries[ARINC653_MAX_DOMAINS_PER_SCHEDULE]; }; typedef struct xen_sysctl_arinc653_schedule xen_sysctl_arinc653_schedule_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_arinc653_schedule_t); struct xen_sysctl_credit_schedule { /* Length of timeslice in milliseconds */ #define XEN_SYSCTL_CSCHED_TSLICE_MAX 1000 #define XEN_SYSCTL_CSCHED_TSLICE_MIN 1 unsigned tslice_ms; /* Rate limit (minimum timeslice) in microseconds */ #define XEN_SYSCTL_SCHED_RATELIMIT_MAX 500000 #define XEN_SYSCTL_SCHED_RATELIMIT_MIN 100 unsigned ratelimit_us; }; typedef struct xen_sysctl_credit_schedule xen_sysctl_credit_schedule_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_credit_schedule_t); /* XEN_SYSCTL_scheduler_op */ /* Set or get info? */ #define XEN_SYSCTL_SCHEDOP_putinfo 0 #define XEN_SYSCTL_SCHEDOP_getinfo 1 struct xen_sysctl_scheduler_op { uint32_t cpupool_id; /* Cpupool whose scheduler is to be targetted. */ uint32_t sched_id; /* XEN_SCHEDULER_* (domctl.h) */ uint32_t cmd; /* XEN_SYSCTL_SCHEDOP_* */ union { struct xen_sysctl_sched_arinc653 { XEN_GUEST_HANDLE_64(xen_sysctl_arinc653_schedule_t) schedule; } sched_arinc653; struct xen_sysctl_credit_schedule sched_credit; } u; }; typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_scheduler_op_t); /* XEN_SYSCTL_coverage_op */ /* * Get total size of information, to help allocate * the buffer. The pointer points to a 32 bit value. */ #define XEN_SYSCTL_COVERAGE_get_total_size 0 /* * Read coverage information in a single run * You must use a tool to split them. */ #define XEN_SYSCTL_COVERAGE_read 1 /* * Reset all the coverage counters to 0 * No parameters. */ #define XEN_SYSCTL_COVERAGE_reset 2 /* * Like XEN_SYSCTL_COVERAGE_read but reset also * counters to 0 in a single call. */ #define XEN_SYSCTL_COVERAGE_read_and_reset 3 struct xen_sysctl_coverage_op { uint32_t cmd; /* XEN_SYSCTL_COVERAGE_* */ union { uint32_t total_size; /* OUT */ XEN_GUEST_HANDLE_64(uint8) raw_info; /* OUT */ } u; }; typedef struct xen_sysctl_coverage_op xen_sysctl_coverage_op_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_coverage_op_t); struct xen_sysctl { uint32_t cmd; #define XEN_SYSCTL_readconsole 1 #define XEN_SYSCTL_tbuf_op 2 #define XEN_SYSCTL_physinfo 3 #define XEN_SYSCTL_sched_id 4 #define XEN_SYSCTL_perfc_op 5 #define XEN_SYSCTL_getdomaininfolist 6 #define XEN_SYSCTL_debug_keys 7 #define XEN_SYSCTL_getcpuinfo 8 #define XEN_SYSCTL_availheap 9 #define XEN_SYSCTL_get_pmstat 10 #define XEN_SYSCTL_cpu_hotplug 11 #define XEN_SYSCTL_pm_op 12 #define XEN_SYSCTL_page_offline_op 14 #define XEN_SYSCTL_lockprof_op 15 #define XEN_SYSCTL_topologyinfo 16 #define XEN_SYSCTL_numainfo 17 #define XEN_SYSCTL_cpupool_op 18 #define XEN_SYSCTL_scheduler_op 19 #define XEN_SYSCTL_coverage_op 20 uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */ union { struct xen_sysctl_readconsole readconsole; struct xen_sysctl_tbuf_op tbuf_op; struct xen_sysctl_physinfo physinfo; struct xen_sysctl_topologyinfo topologyinfo; struct xen_sysctl_numainfo numainfo; struct xen_sysctl_sched_id sched_id; struct xen_sysctl_perfc_op perfc_op; struct xen_sysctl_getdomaininfolist getdomaininfolist; struct xen_sysctl_debug_keys debug_keys; struct xen_sysctl_getcpuinfo getcpuinfo; struct xen_sysctl_availheap availheap; struct xen_sysctl_get_pmstat get_pmstat; struct xen_sysctl_cpu_hotplug cpu_hotplug; struct xen_sysctl_pm_op pm_op; struct xen_sysctl_page_offline_op page_offline; struct xen_sysctl_lockprof_op lockprof_op; struct xen_sysctl_cpupool_op cpupool_op; struct xen_sysctl_scheduler_op scheduler_op; struct xen_sysctl_coverage_op coverage_op; uint8_t pad[128]; } u; }; typedef struct xen_sysctl xen_sysctl_t; DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t); #endif /* __XEN_PUBLIC_SYSCTL_H__ */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/tmem.h000066400000000000000000000114061314037446600314730ustar00rootroot00000000000000/****************************************************************************** * tmem.h * * Guest OS interface to Xen Transcendent Memory. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_TMEM_H__ #define __XEN_PUBLIC_TMEM_H__ #include "xen.h" /* version of ABI */ #define TMEM_SPEC_VERSION 1 /* Commands to HYPERVISOR_tmem_op() */ #define TMEM_CONTROL 0 #define TMEM_NEW_POOL 1 #define TMEM_DESTROY_POOL 2 #define TMEM_PUT_PAGE 4 #define TMEM_GET_PAGE 5 #define TMEM_FLUSH_PAGE 6 #define TMEM_FLUSH_OBJECT 7 #if __XEN_INTERFACE_VERSION__ < 0x00040400 #define TMEM_NEW_PAGE 3 #define TMEM_READ 8 #define TMEM_WRITE 9 #define TMEM_XCHG 10 #endif /* Privileged commands to HYPERVISOR_tmem_op() */ #define TMEM_AUTH 101 #define TMEM_RESTORE_NEW 102 /* Subops for HYPERVISOR_tmem_op(TMEM_CONTROL) */ #define TMEMC_THAW 0 #define TMEMC_FREEZE 1 #define TMEMC_FLUSH 2 #define TMEMC_DESTROY 3 #define TMEMC_LIST 4 #define TMEMC_SET_WEIGHT 5 #define TMEMC_SET_CAP 6 #define TMEMC_SET_COMPRESS 7 #define TMEMC_QUERY_FREEABLE_MB 8 #define TMEMC_SAVE_BEGIN 10 #define TMEMC_SAVE_GET_VERSION 11 #define TMEMC_SAVE_GET_MAXPOOLS 12 #define TMEMC_SAVE_GET_CLIENT_WEIGHT 13 #define TMEMC_SAVE_GET_CLIENT_CAP 14 #define TMEMC_SAVE_GET_CLIENT_FLAGS 15 #define TMEMC_SAVE_GET_POOL_FLAGS 16 #define TMEMC_SAVE_GET_POOL_NPAGES 17 #define TMEMC_SAVE_GET_POOL_UUID 18 #define TMEMC_SAVE_GET_NEXT_PAGE 19 #define TMEMC_SAVE_GET_NEXT_INV 20 #define TMEMC_SAVE_END 21 #define TMEMC_RESTORE_BEGIN 30 #define TMEMC_RESTORE_PUT_PAGE 32 #define TMEMC_RESTORE_FLUSH_PAGE 33 /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ #define TMEM_POOL_PERSIST 1 #define TMEM_POOL_SHARED 2 #define TMEM_POOL_PRECOMPRESSED 4 #define TMEM_POOL_PAGESIZE_SHIFT 4 #define TMEM_POOL_PAGESIZE_MASK 0xf #define TMEM_POOL_VERSION_SHIFT 24 #define TMEM_POOL_VERSION_MASK 0xff #define TMEM_POOL_RESERVED_BITS 0x00ffff00 /* Bits for client flags (save/restore) */ #define TMEM_CLIENT_COMPRESS 1 #define TMEM_CLIENT_FROZEN 2 /* Special errno values */ #define EFROZEN 1000 #define EEMPTY 1001 #ifndef __ASSEMBLY__ #if __XEN_INTERFACE_VERSION__ < 0x00040400 typedef xen_pfn_t tmem_cli_mfn_t; #endif typedef XEN_GUEST_HANDLE(char) tmem_cli_va_t; struct tmem_op { uint32_t cmd; int32_t pool_id; union { struct { uint64_t uuid[2]; uint32_t flags; uint32_t arg1; } creat; /* for cmd == TMEM_NEW_POOL, TMEM_AUTH, TMEM_RESTORE_NEW */ struct { uint32_t subop; uint32_t cli_id; uint32_t arg1; uint32_t arg2; uint64_t oid[3]; tmem_cli_va_t buf; } ctrl; /* for cmd == TMEM_CONTROL */ struct { uint64_t oid[3]; uint32_t index; uint32_t tmem_offset; uint32_t pfn_offset; uint32_t len; xen_pfn_t cmfn; /* client machine page frame */ } gen; /* for all other cmd ("generic") */ } u; }; typedef struct tmem_op tmem_op_t; DEFINE_XEN_GUEST_HANDLE(tmem_op_t); struct tmem_handle { uint32_t pool_id; uint32_t index; uint64_t oid[3]; }; #endif #endif /* __XEN_PUBLIC_TMEM_H__ */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/trace.h000066400000000000000000000327201314037446600316310ustar00rootroot00000000000000/****************************************************************************** * include/public/trace.h * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Mark Williamson, (C) 2004 Intel Research Cambridge * Copyright (C) 2005 Bin Ren */ #ifndef __XEN_PUBLIC_TRACE_H__ #define __XEN_PUBLIC_TRACE_H__ #define TRACE_EXTRA_MAX 7 #define TRACE_EXTRA_SHIFT 28 /* Trace classes */ #define TRC_CLS_SHIFT 16 #define TRC_GEN 0x0001f000 /* General trace */ #define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */ #define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */ #define TRC_HVM 0x0008f000 /* Xen HVM trace */ #define TRC_MEM 0x0010f000 /* Xen memory trace */ #define TRC_PV 0x0020f000 /* Xen PV traces */ #define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */ #define TRC_HW 0x0080f000 /* Xen hardware-related traces */ #define TRC_GUEST 0x0800f000 /* Guest-generated traces */ #define TRC_ALL 0x0ffff000 #define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff) #define TRC_HD_CYCLE_FLAG (1UL<<31) #define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) ) #define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX) /* Trace subclasses */ #define TRC_SUBCLS_SHIFT 12 /* trace subclasses for SVM */ #define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */ #define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */ #define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */ #define TRC_SCHED_CLASS 0x00022000 /* Scheduler-specific */ #define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */ /* * The highest 3 bits of the last 12 bits of TRC_SCHED_CLASS above are * reserved for encoding what scheduler produced the information. The * actual event is encoded in the last 9 bits. * * This means we have 8 scheduling IDs available (which means at most 8 * schedulers generating events) and, in each scheduler, up to 512 * different events. */ #define TRC_SCHED_ID_BITS 3 #define TRC_SCHED_ID_SHIFT (TRC_SUBCLS_SHIFT - TRC_SCHED_ID_BITS) #define TRC_SCHED_ID_MASK (((1UL<cpu_offset[cpu]). */ struct t_info { uint16_t tbuf_size; /* Size in pages of each trace buffer */ uint16_t mfn_offset[]; /* Offset within t_info structure of the page list per cpu */ /* MFN lists immediately after the header */ }; #endif /* __XEN_PUBLIC_TRACE_H__ */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/vcpu.h000066400000000000000000000221111314037446600315010ustar00rootroot00000000000000/****************************************************************************** * vcpu.h * * VCPU initialisation, query, and hotplug. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VCPU_H__ #define __XEN_PUBLIC_VCPU_H__ #include "xen.h" /* * Prototype for this hypercall is: * int vcpu_op(int cmd, int vcpuid, void *extra_args) * @cmd == VCPUOP_??? (VCPU operation). * @vcpuid == VCPU to operate on. * @extra_args == Operation-specific extra arguments (NULL if none). */ /* * Initialise a VCPU. Each VCPU can be initialised only once. A * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. * * @extra_arg == pointer to vcpu_guest_context structure containing initial * state for the VCPU. */ #define VCPUOP_initialise 0 /* * Bring up a VCPU. This makes the VCPU runnable. This operation will fail * if the VCPU has not been initialised (VCPUOP_initialise). */ #define VCPUOP_up 1 /* * Bring down a VCPU (i.e., make it non-runnable). * There are a few caveats that callers should observe: * 1. This operation may return, and VCPU_is_up may return false, before the * VCPU stops running (i.e., the command is asynchronous). It is a good * idea to ensure that the VCPU has entered a non-critical loop before * bringing it down. Alternatively, this operation is guaranteed * synchronous if invoked by the VCPU itself. * 2. After a VCPU is initialised, there is currently no way to drop all its * references to domain memory. Even a VCPU that is down still holds * memory references via its pagetable base pointer and GDT. It is good * practise to move a VCPU onto an 'idle' or default page table, LDT and * GDT before bringing it down. */ #define VCPUOP_down 2 /* Returns 1 if the given VCPU is up. */ #define VCPUOP_is_up 3 /* * Return information about the state and running time of a VCPU. * @extra_arg == pointer to vcpu_runstate_info structure. */ #define VCPUOP_get_runstate_info 4 struct vcpu_runstate_info { /* VCPU's current state (RUNSTATE_*). */ int state; /* When was current state entered (system time, ns)? */ uint64_t state_entry_time; /* * Time spent in each RUNSTATE_* (ns). The sum of these times is * guaranteed not to drift from system time. */ uint64_t time[4]; }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate_info); typedef struct vcpu_runstate_info vcpu_runstate_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t); /* VCPU is currently running on a physical CPU. */ #define RUNSTATE_running 0 /* VCPU is runnable, but not currently scheduled on any physical CPU. */ #define RUNSTATE_runnable 1 /* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ #define RUNSTATE_blocked 2 /* * VCPU is not runnable, but it is not blocked. * This is a 'catch all' state for things like hotplug and pauses by the * system administrator (or for critical sections in the hypervisor). * RUNSTATE_blocked dominates this state (it is the preferred state). */ #define RUNSTATE_offline 3 /* * Register a shared memory area from which the guest may obtain its own * runstate information without needing to execute a hypercall. * Notes: * 1. The registered address may be virtual or physical, depending on the * platform. The virtual address should be registered on x86 systems. * 2. Only one shared area may be registered per VCPU. The shared area is * updated by the hypervisor each time the VCPU is scheduled. Thus * runstate.state will always be RUNSTATE_running and * runstate.state_entry_time will indicate the system time at which the * VCPU was last scheduled to run. * @extra_arg == pointer to vcpu_register_runstate_memory_area structure. */ #define VCPUOP_register_runstate_memory_area 5 struct vcpu_register_runstate_memory_area { union { XEN_GUEST_HANDLE(vcpu_runstate_info_t) h; struct vcpu_runstate_info *v; uint64_t p; } addr; }; typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t); /* * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer * which can be set via these commands. Periods smaller than one millisecond * may not be supported. */ #define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ #define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ struct vcpu_set_periodic_timer { uint64_t period_ns; }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_periodic_timer); typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t); /* * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot * timer which can be set via these commands. */ #define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */ struct vcpu_set_singleshot_timer { uint64_t timeout_abs_ns; uint32_t flags; /* VCPU_SSHOTTMR_??? */ }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_singleshot_timer); typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t; DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t); /* Flags to VCPUOP_set_singleshot_timer. */ /* Require the timeout to be in the future (return -ETIME if it's passed). */ #define _VCPU_SSHOTTMR_future (0) #define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future) /* * Register a memory location in the guest address space for the * vcpu_info structure. This allows the guest to place the vcpu_info * structure in a convenient place, such as in a per-cpu data area. * The pointer need not be page aligned, but the structure must not * cross a page boundary. * * This may be called only once per vcpu. */ #define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */ struct vcpu_register_vcpu_info { uint64_t mfn; /* mfn of page to place vcpu_info */ uint32_t offset; /* offset within page */ uint32_t rsvd; /* unused */ }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info); typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t); /* Send an NMI to the specified VCPU. @extra_arg == NULL. */ #define VCPUOP_send_nmi 11 /* * Get the physical ID information for a pinned vcpu's underlying physical * processor. The physical ID informmation is architecture-specific. * On x86: id[31:0]=apic_id, id[63:32]=acpi_id. * This command returns -EINVAL if it is not a valid operation for this VCPU. */ #define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */ struct vcpu_get_physid { uint64_t phys_id; }; typedef struct vcpu_get_physid vcpu_get_physid_t; DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t); #define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid)) #define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32)) /* * Register a memory location to get a secondary copy of the vcpu time * parameters. The master copy still exists as part of the vcpu shared * memory area, and this secondary copy is updated whenever the master copy * is updated (and using the same versioning scheme for synchronisation). * * The intent is that this copy may be mapped (RO) into userspace so * that usermode can compute system time using the time info and the * tsc. Usermode will see an array of vcpu_time_info structures, one * for each vcpu, and choose the right one by an existing mechanism * which allows it to get the current vcpu number (such as via a * segment limit). It can then apply the normal algorithm to compute * system time from the tsc. * * @extra_arg == pointer to vcpu_register_time_info_memory_area structure. */ #define VCPUOP_register_vcpu_time_memory_area 13 DEFINE_XEN_GUEST_HANDLE(vcpu_time_info_t); struct vcpu_register_time_memory_area { union { XEN_GUEST_HANDLE(vcpu_time_info_t) h; struct vcpu_time_info *v; uint64_t p; } addr; }; typedef struct vcpu_register_time_memory_area vcpu_register_time_memory_area_t; DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t); #endif /* __XEN_PUBLIC_VCPU_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/version.h000066400000000000000000000061761314037446600322260ustar00rootroot00000000000000/****************************************************************************** * version.h * * Xen version, type, and compile information. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Nguyen Anh Quynh * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VERSION_H__ #define __XEN_PUBLIC_VERSION_H__ #include "xen.h" /* NB. All ops return zero on success, except XENVER_{version,pagesize} */ /* arg == NULL; returns major:minor (16:16). */ #define XENVER_version 0 /* arg == xen_extraversion_t. */ #define XENVER_extraversion 1 typedef char xen_extraversion_t[16]; struct xen_extraversion { xen_extraversion_t extraversion; }; #define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t)) /* arg == xen_compile_info_t. */ #define XENVER_compile_info 2 struct xen_compile_info { char compiler[64]; char compile_by[16]; char compile_domain[32]; char compile_date[32]; }; typedef struct xen_compile_info xen_compile_info_t; #define XENVER_capabilities 3 typedef char xen_capabilities_info_t[1024]; struct xen_capabilities_info { xen_capabilities_info_t info; }; #define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t)) #define XENVER_changeset 4 typedef char xen_changeset_info_t[64]; struct xen_changeset_info { xen_changeset_info_t info; }; #define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t)) #define XENVER_platform_parameters 5 struct xen_platform_parameters { xen_ulong_t virt_start; }; typedef struct xen_platform_parameters xen_platform_parameters_t; #define XENVER_get_features 6 struct xen_feature_info { unsigned int submap_idx; /* IN: which 32-bit submap to return */ uint32_t submap; /* OUT: 32-bit submap */ }; typedef struct xen_feature_info xen_feature_info_t; /* Declares the features reported by XENVER_get_features. */ #include "features.h" /* arg == NULL; returns host memory page size. */ #define XENVER_pagesize 7 /* arg == xen_domain_handle_t. */ #define XENVER_guest_handle 8 #define XENVER_commandline 9 typedef char xen_commandline_t[1024]; #endif /* __XEN_PUBLIC_VERSION_H__ */ xen-compat.h000066400000000000000000000036361314037446600325330ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/****************************************************************************** * xen-compat.h * * Guest OS interface to Xen. Compatibility layer. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Christian Limpach */ #ifndef __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_LATEST_INTERFACE_VERSION__ 0x00040400 #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Xen is built with matching headers and implements the latest interface. */ #define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__ #elif !defined(__XEN_INTERFACE_VERSION__) /* Guests which do not specify a version get the legacy interface. */ #define __XEN_INTERFACE_VERSION__ 0x00000000 #endif #if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__ #error "These header files do not support the requested interface version." #endif #endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/xen-mca.h000066400000000000000000000251411314037446600320620ustar00rootroot00000000000000/****************************************************************************** * arch-x86/mca.h * Guest OS machine check interface to x86 Xen. * * Contributed by Advanced Micro Devices, Inc. * Author: Christoph Egger * * Updated by Intel Corporation * Author: Liu, Jinsong * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__ #define __XEN_PUBLIC_ARCH_X86_MCA_H__ /* Hypercall */ #define __HYPERVISOR_mca __HYPERVISOR_arch_0 #define XEN_MCA_INTERFACE_VERSION 0x01ecc003 /* IN: Dom0 calls hypercall to retrieve nonurgent error log entry */ #define XEN_MC_NONURGENT 0x1 /* IN: Dom0 calls hypercall to retrieve urgent error log entry */ #define XEN_MC_URGENT 0x2 /* IN: Dom0 acknowledges previosly-fetched error log entry */ #define XEN_MC_ACK 0x4 /* OUT: All is ok */ #define XEN_MC_OK 0x0 /* OUT: Domain could not fetch data. */ #define XEN_MC_FETCHFAILED 0x1 /* OUT: There was no machine check data to fetch. */ #define XEN_MC_NODATA 0x2 #ifndef __ASSEMBLY__ /* vIRQ injected to Dom0 */ #define VIRQ_MCA VIRQ_ARCH_0 /* * mc_info entry types * mca machine check info are recorded in mc_info entries. * when fetch mca info, it can use MC_TYPE_... to distinguish * different mca info. */ #define MC_TYPE_GLOBAL 0 #define MC_TYPE_BANK 1 #define MC_TYPE_EXTENDED 2 #define MC_TYPE_RECOVERY 3 struct mcinfo_common { uint16_t type; /* structure type */ uint16_t size; /* size of this struct in bytes */ }; #define MC_FLAG_CORRECTABLE (1 << 0) #define MC_FLAG_UNCORRECTABLE (1 << 1) #define MC_FLAG_RECOVERABLE (1 << 2) #define MC_FLAG_POLLED (1 << 3) #define MC_FLAG_RESET (1 << 4) #define MC_FLAG_CMCI (1 << 5) #define MC_FLAG_MCE (1 << 6) /* contains x86 global mc information */ struct mcinfo_global { struct mcinfo_common common; uint16_t mc_domid; /* running domain at the time in error */ uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */ uint32_t mc_socketid; /* physical socket of the physical core */ uint16_t mc_coreid; /* physical impacted core */ uint16_t mc_core_threadid; /* core thread of physical core */ uint32_t mc_apicid; uint32_t mc_flags; uint64_t mc_gstatus; /* global status */ }; /* contains x86 bank mc information */ struct mcinfo_bank { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint16_t mc_domid; /* domain referenced by mc_addr if valid */ uint64_t mc_status; /* bank status */ uint64_t mc_addr; /* bank address */ uint64_t mc_misc; uint64_t mc_ctrl2; uint64_t mc_tsc; }; struct mcinfo_msr { uint64_t reg; /* MSR */ uint64_t value; /* MSR value */ }; /* contains mc information from other or additional mc MSRs */ struct mcinfo_extended { struct mcinfo_common common; uint32_t mc_msrs; /* Number of msr with valid values. */ /* * Currently Intel extended MSR (32/64) include all gp registers * and E(R)FLAGS, E(R)IP, E(R)MISC, up to 11/19 of them might be * useful at present. So expand this array to 16/32 to leave room. */ struct mcinfo_msr mc_msr[sizeof(void *) * 4]; }; /* Recovery Action flags. Giving recovery result information to DOM0 */ /* Xen takes successful recovery action, the error is recovered */ #define REC_ACTION_RECOVERED (0x1 << 0) /* No action is performed by XEN */ #define REC_ACTION_NONE (0x1 << 1) /* It's possible DOM0 might take action ownership in some case */ #define REC_ACTION_NEED_RESET (0x1 << 2) /* * Different Recovery Action types, if the action is performed successfully, * REC_ACTION_RECOVERED flag will be returned. */ /* Page Offline Action */ #define MC_ACTION_PAGE_OFFLINE (0x1 << 0) /* CPU offline Action */ #define MC_ACTION_CPU_OFFLINE (0x1 << 1) /* L3 cache disable Action */ #define MC_ACTION_CACHE_SHRINK (0x1 << 2) /* * Below interface used between XEN/DOM0 for passing XEN's recovery action * information to DOM0. */ struct page_offline_action { /* Params for passing the offlined page number to DOM0 */ uint64_t mfn; uint64_t status; }; struct cpu_offline_action { /* Params for passing the identity of the offlined CPU to DOM0 */ uint32_t mc_socketid; uint16_t mc_coreid; uint16_t mc_core_threadid; }; #define MAX_UNION_SIZE 16 struct mcinfo_recovery { struct mcinfo_common common; uint16_t mc_bank; /* bank nr */ uint8_t action_flags; uint8_t action_types; union { struct page_offline_action page_retire; struct cpu_offline_action cpu_offline; uint8_t pad[MAX_UNION_SIZE]; } action_info; }; #define MCINFO_MAXSIZE 768 struct mc_info { /* Number of mcinfo_* entries in mi_data */ uint32_t mi_nentries; uint32_t flags; uint64_t mi_data[(MCINFO_MAXSIZE - 1) / 8]; }; DEFINE_GUEST_HANDLE_STRUCT(mc_info); #define __MC_MSR_ARRAYSIZE 8 #define __MC_MSR_MCGCAP 0 #define __MC_NMSRS 1 #define MC_NCAPS 7 struct mcinfo_logical_cpu { uint32_t mc_cpunr; uint32_t mc_chipid; uint16_t mc_coreid; uint16_t mc_threadid; uint32_t mc_apicid; uint32_t mc_clusterid; uint32_t mc_ncores; uint32_t mc_ncores_active; uint32_t mc_nthreads; uint32_t mc_cpuid_level; uint32_t mc_family; uint32_t mc_vendor; uint32_t mc_model; uint32_t mc_step; char mc_vendorid[16]; char mc_brandid[64]; uint32_t mc_cpu_caps[MC_NCAPS]; uint32_t mc_cache_size; uint32_t mc_cache_alignment; uint32_t mc_nmsrvals; struct mcinfo_msr mc_msrvalues[__MC_MSR_ARRAYSIZE]; }; DEFINE_GUEST_HANDLE_STRUCT(mcinfo_logical_cpu); /* * Prototype: * uint32_t x86_mcinfo_nentries(struct mc_info *mi); */ #define x86_mcinfo_nentries(_mi) \ ((_mi)->mi_nentries) /* * Prototype: * struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi); */ #define x86_mcinfo_first(_mi) \ ((struct mcinfo_common *)(_mi)->mi_data) /* * Prototype: * struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic); */ #define x86_mcinfo_next(_mic) \ ((struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size)) /* * Prototype: * void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type); */ static inline void x86_mcinfo_lookup(struct mcinfo_common **ret, struct mc_info *mi, uint16_t type) { uint32_t i; struct mcinfo_common *mic; bool found = 0; if (!ret || !mi) return; mic = x86_mcinfo_first(mi); for (i = 0; i < x86_mcinfo_nentries(mi); i++) { if (mic->type == type) { found = 1; break; } mic = x86_mcinfo_next(mic); } *ret = found ? mic : NULL; } /* * Fetch machine check data from hypervisor. */ #define XEN_MC_fetch 1 struct xen_mc_fetch { /* * IN: XEN_MC_NONURGENT, XEN_MC_URGENT, * XEN_MC_ACK if ack'king an earlier fetch * OUT: XEN_MC_OK, XEN_MC_FETCHAILED, XEN_MC_NODATA */ uint32_t flags; uint32_t _pad0; /* OUT: id for ack, IN: id we are ack'ing */ uint64_t fetch_id; /* OUT variables. */ XEN_GUEST_HANDLE(mc_info) data; }; DEFINE_GUEST_HANDLE_STRUCT(xen_mc_fetch); /* * This tells the hypervisor to notify a DomU about the machine check error */ #define XEN_MC_notifydomain 2 struct xen_mc_notifydomain { /* IN variables */ uint16_t mc_domid; /* The unprivileged domain to notify */ uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify */ /* IN/OUT variables */ uint32_t flags; }; DEFINE_GUEST_HANDLE_STRUCT(xen_mc_notifydomain); #define XEN_MC_physcpuinfo 3 struct xen_mc_physcpuinfo { /* IN/OUT */ uint32_t ncpus; uint32_t _pad0; /* OUT */ XEN_GUEST_HANDLE(mcinfo_logical_cpu) info; }; #define XEN_MC_msrinject 4 #define MC_MSRINJ_MAXMSRS 8 struct xen_mc_msrinject { /* IN */ uint32_t mcinj_cpunr; /* target processor id */ uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */ uint32_t mcinj_count; /* 0 .. count-1 in array are valid */ uint32_t _pad0; struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS]; }; /* Flags for mcinj_flags above; bits 16-31 are reserved */ #define MC_MSRINJ_F_INTERPOSE 0x1 #define XEN_MC_mceinject 5 struct xen_mc_mceinject { unsigned int mceinj_cpunr; /* target processor id */ }; struct xen_mc { uint32_t cmd; uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */ union { struct xen_mc_fetch mc_fetch; struct xen_mc_notifydomain mc_notifydomain; struct xen_mc_physcpuinfo mc_physcpuinfo; struct xen_mc_msrinject mc_msrinject; struct xen_mc_mceinject mc_mceinject; } u; }; DEFINE_GUEST_HANDLE_STRUCT(xen_mc); /* Fields are zero when not available */ struct xen_mce { __u64 status; __u64 misc; __u64 addr; __u64 mcgstatus; __u64 ip; __u64 tsc; /* cpu time stamp counter */ __u64 time; /* wall time_t when error was detected */ __u8 cpuvendor; /* cpu vendor as encoded in system.h */ __u8 inject_flags; /* software inject flags */ __u16 pad; __u32 cpuid; /* CPUID 1 EAX */ __u8 cs; /* code segment */ __u8 bank; /* machine check bank */ __u8 cpu; /* cpu number; obsolete; use extcpu now */ __u8 finished; /* entry is valid */ __u32 extcpu; /* linux cpu number that detected the error */ __u32 socketid; /* CPU socket ID */ __u32 apicid; /* CPU initial apic ID */ __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ }; /* * This structure contains all data related to the MCE log. Also * carries a signature to make it easier to find from external * debugging tools. Each entry is only valid when its finished flag * is set. */ #define XEN_MCE_LOG_LEN 32 struct xen_mce_log { char signature[12]; /* "MACHINECHECK" */ unsigned len; /* = XEN_MCE_LOG_LEN */ unsigned next; unsigned flags; unsigned recordlen; /* length of struct xen_mce */ struct xen_mce entry[XEN_MCE_LOG_LEN]; }; #define XEN_MCE_OVERFLOW 0 /* bit 0 in flags means overflow */ #define XEN_MCE_LOG_SIGNATURE "MACHINECHECK" #define MCE_GET_RECORD_LEN _IOR('M', 1, int) #define MCE_GET_LOG_LEN _IOR('M', 2, int) #define MCE_GETCLEAR_FLAGS _IOR('M', 3, int) #endif /* __ASSEMBLY__ */ #endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/xen.h000066400000000000000000001067231314037446600313320ustar00rootroot00000000000000/****************************************************************************** * xen.h * * Guest OS interface to Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_XEN_H__ #define __XEN_PUBLIC_XEN_H__ #include "xen-compat.h" #if defined(CONFIG_PARAVIRT_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) #include #elif defined(__i386__) || defined(__x86_64__) #include "arch-x86/xen.h" #elif defined(__ia64__) #include "arch-ia64.h" #elif defined(__arm__) || defined (__aarch64__) #include "arch-arm.h" #else #error "Unsupported architecture" #endif #ifndef __ASSEMBLY__ /* Guest handles for primitive C types. */ DEFINE_XEN_GUEST_HANDLE(char); __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char); DEFINE_XEN_GUEST_HANDLE(int); __DEFINE_XEN_GUEST_HANDLE(uint, unsigned int); #if __XEN_INTERFACE_VERSION__ < 0x00040300 DEFINE_XEN_GUEST_HANDLE(long); __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long); #endif DEFINE_XEN_GUEST_HANDLE(void); DEFINE_XEN_GUEST_HANDLE(uint64_t); DEFINE_XEN_GUEST_HANDLE(xen_pfn_t); DEFINE_XEN_GUEST_HANDLE(xen_ulong_t); #endif /* * HYPERCALLS */ /* `incontents 100 hcalls List of hypercalls * ` enum hypercall_num { // __HYPERVISOR_* => HYPERVISOR_*() */ #define __HYPERVISOR_set_trap_table 0 #define __HYPERVISOR_mmu_update 1 #define __HYPERVISOR_set_gdt 2 #define __HYPERVISOR_stack_switch 3 #define __HYPERVISOR_set_callbacks 4 #define __HYPERVISOR_fpu_taskswitch 5 #define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */ #define __HYPERVISOR_platform_op 7 #define __HYPERVISOR_set_debugreg 8 #define __HYPERVISOR_get_debugreg 9 #define __HYPERVISOR_update_descriptor 10 #define __HYPERVISOR_memory_op 12 #define __HYPERVISOR_multicall 13 #define __HYPERVISOR_update_va_mapping 14 #define __HYPERVISOR_set_timer_op 15 #define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */ #define __HYPERVISOR_xen_version 17 #define __HYPERVISOR_console_io 18 #define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */ #define __HYPERVISOR_grant_table_op 20 #define __HYPERVISOR_vm_assist 21 #define __HYPERVISOR_update_va_mapping_otherdomain 22 #define __HYPERVISOR_iret 23 /* x86 only */ #define __HYPERVISOR_vcpu_op 24 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ #define __HYPERVISOR_mmuext_op 26 #define __HYPERVISOR_xsm_op 27 #define __HYPERVISOR_nmi_op 28 #define __HYPERVISOR_sched_op_new 29 #define __HYPERVISOR_callback_op 30 #define __HYPERVISOR_xenoprof_op 31 #define __HYPERVISOR_event_channel_op 32 #define __HYPERVISOR_physdev_op 33 #define __HYPERVISOR_hvm_op 34 #define __HYPERVISOR_sysctl 35 #define __HYPERVISOR_domctl 36 #define __HYPERVISOR_kexec_op 37 #define __HYPERVISOR_tmem_op 38 #define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */ /* Architecture-specific hypercall definitions. */ #define __HYPERVISOR_arch_0 48 #define __HYPERVISOR_arch_1 49 #define __HYPERVISOR_arch_2 50 #define __HYPERVISOR_arch_3 51 #define __HYPERVISOR_arch_4 52 #define __HYPERVISOR_arch_5 53 #define __HYPERVISOR_arch_6 54 #define __HYPERVISOR_arch_7 55 /* ` } */ /* * HYPERCALL COMPATIBILITY. */ /* New sched_op hypercall introduced in 0x00030101. */ #if __XEN_INTERFACE_VERSION__ < 0x00030101 || (defined(CONFIG_PARAVIRT_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H)) #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat #else #define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_new #endif /* New event-channel and physdev hypercalls introduced in 0x00030202. */ #if __XEN_INTERFACE_VERSION__ < 0x00030202 #undef __HYPERVISOR_event_channel_op #define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat #undef __HYPERVISOR_physdev_op #define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat #endif /* New platform_op hypercall introduced in 0x00030204. */ #if __XEN_INTERFACE_VERSION__ < 0x00030204 || (defined(CONFIG_PARAVIRT_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H)) #define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op #endif /* * VIRTUAL INTERRUPTS * * Virtual interrupts that a guest OS may receive from Xen. * * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. * The latter can be allocated only once per guest: they must initially be * allocated to VCPU0 but can subsequently be re-bound. */ /* ` enum virq { */ #define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ #define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ #define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ #define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ #define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */ #define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ #define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */ #define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */ #define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */ #define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */ #define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */ #define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */ /* Architecture-specific VIRQ definitions. */ #define VIRQ_ARCH_0 16 #define VIRQ_ARCH_1 17 #define VIRQ_ARCH_2 18 #define VIRQ_ARCH_3 19 #define VIRQ_ARCH_4 20 #define VIRQ_ARCH_5 21 #define VIRQ_ARCH_6 22 #define VIRQ_ARCH_7 23 /* ` } */ #define NR_VIRQS 24 /* * ` enum neg_errnoval * ` HYPERVISOR_mmu_update(const struct mmu_update reqs[], * ` unsigned count, unsigned *done_out, * ` unsigned foreigndom) * ` * @reqs is an array of mmu_update_t structures ((ptr, val) pairs). * @count is the length of the above array. * @pdone is an output parameter indicating number of completed operations * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this * hypercall invocation. Can be DOMID_SELF. * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced * in this hypercall invocation. The value of this field * (x) encodes the PFD as follows: * x == 0 => PFD == DOMID_SELF * x != 0 => PFD == x - 1 * * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command. * ------------- * ptr[1:0] == MMU_NORMAL_PT_UPDATE: * Updates an entry in a page table belonging to PFD. If updating an L1 table, * and the new table entry is valid/present, the mapped frame must belong to * FD. If attempting to map an I/O page then the caller assumes the privilege * of the FD. * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. * FD == DOMID_XEN: Map restricted areas of Xen's heap space. * ptr[:2] -- Machine address of the page-table entry to modify. * val -- Value to write. * * There also certain implicit requirements when using this hypercall. The * pages that make up a pagetable must be mapped read-only in the guest. * This prevents uncontrolled guest updates to the pagetable. Xen strictly * enforces this, and will disallow any pagetable update which will end up * mapping pagetable page RW, and will disallow using any writable page as a * pagetable. In practice it means that when constructing a page table for a * process, thread, etc, we MUST be very dilligient in following these rules: * 1). Start with top-level page (PGD or in Xen language: L4). Fill out * the entries. * 2). Keep on going, filling out the upper (PUD or L3), and middle (PMD * or L2). * 3). Start filling out the PTE table (L1) with the PTE entries. Once * done, make sure to set each of those entries to RO (so writeable bit * is unset). Once that has been completed, set the PMD (L2) for this * PTE table as RO. * 4). When completed with all of the PMD (L2) entries, and all of them have * been set to RO, make sure to set RO the PUD (L3). Do the same * operation on PGD (L4) pagetable entries that have a PUD (L3) entry. * 5). Now before you can use those pages (so setting the cr3), you MUST also * pin them so that the hypervisor can verify the entries. This is done * via the HYPERVISOR_mmuext_op(MMUEXT_PIN_L4_TABLE, guest physical frame * number of the PGD (L4)). And this point the HYPERVISOR_mmuext_op( * MMUEXT_NEW_BASEPTR, guest physical frame number of the PGD (L4)) can be * issued. * For 32-bit guests, the L4 is not used (as there is less pagetables), so * instead use L3. * At this point the pagetables can be modified using the MMU_NORMAL_PT_UPDATE * hypercall. Also if so desired the OS can also try to write to the PTE * and be trapped by the hypervisor (as the PTE entry is RO). * * To deallocate the pages, the operations are the reverse of the steps * mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the * pagetable MUST not be in use (meaning that the cr3 is not set to it). * * ptr[1:0] == MMU_MACHPHYS_UPDATE: * Updates an entry in the machine->pseudo-physical mapping table. * ptr[:2] -- Machine address within the frame whose mapping to modify. * The frame must belong to the FD, if one is specified. * val -- Value to write into the mapping entry. * * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed * with those in @val. * * @val is usually the machine frame number along with some attributes. * The attributes by default follow the architecture defined bits. Meaning that * if this is a X86_64 machine and four page table layout is used, the layout * of val is: * - 63 if set means No execute (NX) * - 46-13 the machine frame number * - 12 available for guest * - 11 available for guest * - 10 available for guest * - 9 available for guest * - 8 global * - 7 PAT (PSE is disabled, must use hypercall to make 4MB or 2MB pages) * - 6 dirty * - 5 accessed * - 4 page cached disabled * - 3 page write through * - 2 userspace accessible * - 1 writeable * - 0 present * * The one bits that does not fit with the default layout is the PAGE_PSE * also called PAGE_PAT). The MMUEXT_[UN]MARK_SUPER arguments to the * HYPERVISOR_mmuext_op serve as mechanism to set a pagetable to be 4MB * (or 2MB) instead of using the PAGE_PSE bit. * * The reason that the PAGE_PSE (bit 7) is not being utilized is due to Xen * using it as the Page Attribute Table (PAT) bit - for details on it please * refer to Intel SDM 10.12. The PAT allows to set the caching attributes of * pages instead of using MTRRs. * * The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits): * PAT4 PAT0 * +-----+-----+----+----+----+-----+----+----+ * | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux * +-----+-----+----+----+----+-----+----+----+ * | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots) * +-----+-----+----+----+----+-----+----+----+ * | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen * +-----+-----+----+----+----+-----+----+----+ * * The lookup of this index table translates to looking up * Bit 7, Bit 4, and Bit 3 of val entry: * * PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3). * * If all bits are off, then we are using PAT0. If bit 3 turned on, * then we are using PAT1, if bit 3 and bit 4, then PAT2.. * * As you can see, the Linux PAT1 translates to PAT4 under Xen. Which means * that if a guest that follows Linux's PAT setup and would like to set Write * Combined on pages it MUST use PAT4 entry. Meaning that Bit 7 (PAGE_PAT) is * set. For example, under Linux it only uses PAT0, PAT1, and PAT2 for the * caching as: * * WB = none (so PAT0) * WC = PWT (bit 3 on) * UC = PWT | PCD (bit 3 and 4 are on). * * To make it work with Xen, it needs to translate the WC bit as so: * * PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3 * * And to translate back it would: * * PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7. */ #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ /* * MMU EXTENDED OPERATIONS * * ` enum neg_errnoval * ` HYPERVISOR_mmuext_op(mmuext_op_t uops[], * ` unsigned int count, * ` unsigned int *pdone, * ` unsigned int foreigndom) */ /* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * * cmd: MMUEXT_(UN)PIN_*_TABLE * mfn: Machine frame number to be (un)pinned as a p.t. page. * The frame must belong to the FD, if one is specified. * * cmd: MMUEXT_NEW_BASEPTR * mfn: Machine frame number of new page-table base to install in MMU. * * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] * mfn: Machine frame number of new page-table base to install in MMU * when in user space. * * cmd: MMUEXT_TLB_FLUSH_LOCAL * No additional arguments. Flushes local TLB. * * cmd: MMUEXT_INVLPG_LOCAL * linear_addr: Linear address to be flushed from the local TLB. * * cmd: MMUEXT_TLB_FLUSH_MULTI * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_INVLPG_MULTI * linear_addr: Linear address to be flushed. * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_TLB_FLUSH_ALL * No additional arguments. Flushes all VCPUs' TLBs. * * cmd: MMUEXT_INVLPG_ALL * linear_addr: Linear address to be flushed from all VCPUs' TLBs. * * cmd: MMUEXT_FLUSH_CACHE * No additional arguments. Writes back and flushes cache contents. * * cmd: MMUEXT_FLUSH_CACHE_GLOBAL * No additional arguments. Writes back and flushes cache contents * on all CPUs in the system. * * cmd: MMUEXT_SET_LDT * linear_addr: Linear address of LDT base (NB. must be page-aligned). * nr_ents: Number of entries in LDT. * * cmd: MMUEXT_CLEAR_PAGE * mfn: Machine frame number to be cleared. * * cmd: MMUEXT_COPY_PAGE * mfn: Machine frame number of the destination page. * src_mfn: Machine frame number of the source page. * * cmd: MMUEXT_[UN]MARK_SUPER * mfn: Machine frame number of head of superpage to be [un]marked. */ /* ` enum mmuext_cmd { */ #define MMUEXT_PIN_L1_TABLE 0 #define MMUEXT_PIN_L2_TABLE 1 #define MMUEXT_PIN_L3_TABLE 2 #define MMUEXT_PIN_L4_TABLE 3 #define MMUEXT_UNPIN_TABLE 4 #define MMUEXT_NEW_BASEPTR 5 #define MMUEXT_TLB_FLUSH_LOCAL 6 #define MMUEXT_INVLPG_LOCAL 7 #define MMUEXT_TLB_FLUSH_MULTI 8 #define MMUEXT_INVLPG_MULTI 9 #define MMUEXT_TLB_FLUSH_ALL 10 #define MMUEXT_INVLPG_ALL 11 #define MMUEXT_FLUSH_CACHE 12 #define MMUEXT_SET_LDT 13 #define MMUEXT_NEW_USER_BASEPTR 15 #define MMUEXT_CLEAR_PAGE 16 #define MMUEXT_COPY_PAGE 17 #define MMUEXT_FLUSH_CACHE_GLOBAL 18 #define MMUEXT_MARK_SUPER 19 #define MMUEXT_UNMARK_SUPER 20 /* ` } */ #ifndef __ASSEMBLY__ struct mmuext_op { unsigned int cmd; /* => enum mmuext_cmd */ union { /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */ xen_pfn_t mfn; /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ unsigned long linear_addr; } arg1; union { /* SET_LDT */ unsigned int nr_ents; /* TLB_FLUSH_MULTI, INVLPG_MULTI */ #if __XEN_INTERFACE_VERSION__ >= 0x00030205 XEN_GUEST_HANDLE(const_void) vcpumask; #else const void *vcpumask; #endif /* COPY_PAGE */ xen_pfn_t src_mfn; } arg2; }; DEFINE_GUEST_HANDLE_STRUCT(mmuext_op); typedef struct mmuext_op mmuext_op_t; DEFINE_XEN_GUEST_HANDLE(mmuext_op_t); #endif /* * ` enum neg_errnoval * ` HYPERVISOR_update_va_mapping(unsigned long va, u64 val, * ` enum uvm_flags flags) * ` * ` enum neg_errnoval * ` HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, u64 val, * ` enum uvm_flags flags, * ` domid_t domid) * ` * ` @va: The virtual address whose mapping we want to change * ` @val: The new page table entry, must contain a machine address * ` @flags: Control TLB flushes */ /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ /* ` enum uvm_flags { */ #define UVMF_NONE (0UL<<0) /* No flushing at all. */ #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ #define UVMF_FLUSHTYPE_MASK (3UL<<0) #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ /* ` } */ /* * Commands to HYPERVISOR_console_io(). */ #define CONSOLEIO_write 0 #define CONSOLEIO_read 1 /* * Commands to HYPERVISOR_vm_assist(). */ #define VMASST_CMD_enable 0 #define VMASST_CMD_disable 1 /* x86/32 guests: simulate full 4GB segment limits. */ #define VMASST_TYPE_4gb_segments 0 /* x86/32 guests: trap (vector 15) whenever above vmassist is used. */ #define VMASST_TYPE_4gb_segments_notify 1 /* * x86 guests: support writes to bottom-level PTEs. * NB1. Page-directory entries cannot be written. * NB2. Guest must continue to remove all writable mappings of PTEs. */ #define VMASST_TYPE_writable_pagetables 2 /* x86/PAE guests: support PDPTs above 4GB. */ #define VMASST_TYPE_pae_extended_cr3 3 #define MAX_VMASST_TYPE 3 #ifndef __ASSEMBLY__ typedef uint16_t domid_t; /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ #define DOMID_FIRST_RESERVED (0x7FF0U) /* DOMID_SELF is used in certain contexts to refer to oneself. */ #define DOMID_SELF (0x7FF0U) /* * DOMID_IO is used to restrict page-table updates to mapping I/O memory. * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO * is useful to ensure that no mappings to the OS's own heap are accidentally * installed. (e.g., in Linux this could cause havoc as reference counts * aren't adjusted on the I/O-mapping code path). * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can * be specified by any calling domain. */ #define DOMID_IO (0x7FF1U) /* * DOMID_XEN is used to allow privileged domains to map restricted parts of * Xen's heap space (e.g., the machine_to_phys table). * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if * the caller is privileged. */ #define DOMID_XEN (0x7FF2U) /* * DOMID_COW is used as the owner of sharable pages */ #define DOMID_COW (0x7FF3U) /* DOMID_INVALID is used to identify pages with unknown owner. */ #define DOMID_INVALID (0x7FF4U) /* Idle domain. */ #define DOMID_IDLE (0x7FFFU) /* * Send an array of these to HYPERVISOR_mmu_update(). * NB. The fields are natural pointer/address size for this architecture. */ struct mmu_update { uint64_t ptr; /* Machine address of PTE. */ uint64_t val; /* New contents of PTE. */ }; DEFINE_GUEST_HANDLE_STRUCT(mmu_update); typedef struct mmu_update mmu_update_t; DEFINE_XEN_GUEST_HANDLE(mmu_update_t); /* * ` enum neg_errnoval * ` HYPERVISOR_multicall(multicall_entry_t call_list[], * ` unsigned int nr_calls); * * NB. The fields are natural register size for this architecture. */ struct multicall_entry { unsigned long op; #if !defined(CONFIG_PARAVIRT_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) unsigned long result; #else long result; #endif unsigned long args[6]; }; DEFINE_GUEST_HANDLE_STRUCT(multicall_entry); typedef struct multicall_entry multicall_entry_t; DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); #if __XEN_INTERFACE_VERSION__ < 0x00040400 /* * Event channel endpoints per domain (when using the 2-level ABI): * 1024 if a long is 32 bits; 4096 if a long is 64 bits. */ #define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS #endif struct vcpu_time_info { /* * Updates to the following values are preceded and followed * by an increment of 'version'. The guest can therefore * detect updates by looking for changes to 'version'. If the * least-significant bit of the version number is set then an * update is in progress and the guest must wait to read a * consistent set of values. The correct way to interact with * the version number is similar to Linux's seqlock: see the * implementations of read_seqbegin/read_seqretry. */ uint32_t version; uint32_t pad0; uint64_t tsc_timestamp; /* TSC at last update of time vals. */ uint64_t system_time; /* Time, in nanosecs, since boot. */ /* * Current system time: * system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul * CPU frequency (Hz): * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift */ uint32_t tsc_to_system_mul; int8_t tsc_shift; int8_t pad1[3]; }; /* 32 bytes */ typedef struct vcpu_time_info vcpu_time_info_t; struct vcpu_info { /* * 'evtchn_upcall_pending' is written non-zero by Xen to indicate * a pending notification for a particular VCPU. It is then cleared * by the guest OS /before/ checking for pending work, thus avoiding * a set-and-check race. Note that the mask is only accessed by Xen * on the CPU that is currently hosting the VCPU. This means that the * pending and mask flags can be updated by the guest without special * synchronisation (i.e., no need for the x86 LOCK prefix). * This may seem suboptimal because if the pending flag is set by * a different CPU then an IPI may be scheduled even when the mask * is set. However, note: * 1. The task of 'interrupt holdoff' is covered by the per-event- * channel mask bits. A 'noisy' event that is continually being * triggered can be masked at source at this very precise * granularity. * 2. The main purpose of the per-VCPU mask is therefore to restrict * reentrant execution: whether for concurrency control, or to * prevent unbounded stack usage. Whatever the purpose, we expect * that the mask will be asserted only for short periods at a time, * and so the likelihood of a 'spurious' IPI is suitably small. * The mask is read before making an event upcall to the guest: a * non-zero mask therefore guarantees that the VCPU will not receive * an upcall activation. The mask is cleared when the VCPU requests * to block: this avoids wakeup-waiting races. */ uint8_t evtchn_upcall_pending; #ifdef XEN_HAVE_PV_UPCALL_MASK uint8_t evtchn_upcall_mask; #else /* XEN_HAVE_PV_UPCALL_MASK */ uint8_t pad0; #endif /* XEN_HAVE_PV_UPCALL_MASK */ xen_ulong_t evtchn_pending_sel; struct arch_vcpu_info arch; #ifdef CONFIG_PARAVIRT_XEN struct pvclock_vcpu_time_info time; #else struct vcpu_time_info time; #endif }; /* 64 bytes (x86) */ #ifndef __XEN__ typedef struct vcpu_info vcpu_info_t; #endif /* * `incontents 200 startofday_shared Start-of-day shared data structure * Xen/kernel shared data -- pointer provided in start_info. * * This structure is defined to be both smaller than a page, and the * only data on the shared page, but may vary in actual size even within * compatible Xen versions; guests should not rely on the size * of this structure remaining constant. */ struct shared_info { struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS]; /* * A domain can create "event channels" on which it can send and receive * asynchronous event notifications. There are three classes of event that * are delivered by this mechanism: * 1. Bi-directional inter- and intra-domain connections. Domains must * arrange out-of-band to set up a connection (usually by allocating * an unbound 'listener' port and avertising that via a storage service * such as xenstore). * 2. Physical interrupts. A domain with suitable hardware-access * privileges can bind an event-channel port to a physical interrupt * source. * 3. Virtual interrupts ('events'). A domain can bind an event-channel * port to a virtual interrupt source, such as the virtual-timer * device or the emergency console. * * Event channels are addressed by a "port index". Each channel is * associated with two bits of information: * 1. PENDING -- notifies the domain that there is a pending notification * to be processed. This bit is cleared by the guest. * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING * will cause an asynchronous upcall to be scheduled. This bit is only * updated by the guest. It is read-only within Xen. If a channel * becomes pending while the channel is masked then the 'edge' is lost * (i.e., when the channel is unmasked, the guest must manually handle * pending notifications as no upcall will be scheduled by Xen). * * To expedite scanning of pending notifications, any 0->1 pending * transition on an unmasked channel causes a corresponding bit in a * per-vcpu selector word to be set. Each bit in the selector covers a * 'C long' in the PENDING bitfield array. */ xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8]; xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8]; /* * Wallclock time: updated only by control software. Guests should base * their gettimeofday() syscall on this wallclock-base value. */ #ifdef CONFIG_PARAVIRT_XEN struct pvclock_wall_clock wc; #else uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */ #endif struct arch_shared_info arch; }; #ifndef __XEN__ typedef struct shared_info shared_info_t; #endif /* * `incontents 200 startofday Start-of-day memory layout * * 1. The domain is started within contiguous virtual-memory region. * 2. The contiguous region ends on an aligned 4MB boundary. * 3. This the order of bootstrap elements in the initial virtual region: * a. relocated kernel image * b. initial ram disk [mod_start, mod_len] * c. list of allocated page frames [mfn_list, nr_pages] * (unless relocated due to XEN_ELFNOTE_INIT_P2M) * d. start_info_t structure [register ESI (x86)] * e. bootstrap page tables [pt_base and CR3 (x86)] * f. bootstrap stack [register ESP (x86)] * 4. Bootstrap elements are packed together, but each is 4kB-aligned. * 5. The initial ram disk may be omitted. * 6. The list of page frames forms a contiguous 'pseudo-physical' memory * layout for the domain. In particular, the bootstrap virtual-memory * region is a 1:1 mapping to the first section of the pseudo-physical map. * 7. All bootstrap elements are mapped read-writable for the guest OS. The * only exception is the bootstrap page table, which is mapped read-only. * 8. There is guaranteed to be at least 512kB padding after the final * bootstrap element. If necessary, the bootstrap virtual region is * extended by an extra 4MB to ensure this. * * Note: Prior to 25833:bb85bbccb1c9. ("x86/32-on-64 adjust Dom0 initial page * table layout") a bug caused the pt_base (3.e above) and cr3 to not point * to the start of the guest page tables (it was offset by two pages). * This only manifested itself on 32-on-64 dom0 kernels and not 32-on-64 domU * or 64-bit kernels of any colour. The page tables for a 32-on-64 dom0 got * allocated in the order: 'first L1','first L2', 'first L3', so the offset * to the page table base is by two pages back. The initial domain if it is * 32-bit and runs under a 64-bit hypervisor should _NOT_ use two of the * pages preceding pt_base and mark them as reserved/unused. */ #if defined(XEN_HAVE_PV_GUEST_ENTRY) || defined(CONFIG_PARAVIRT_XEN) struct start_info { /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ char magic[32]; /* "xen--". */ unsigned long nr_pages; /* Total pages allocated to this domain. */ unsigned long shared_info; /* MACHINE address of shared info struct. */ uint32_t flags; /* SIF_xxx flags. */ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ uint32_t store_evtchn; /* Event channel for store communication. */ union { struct { xen_pfn_t mfn; /* MACHINE page number of console page. */ uint32_t evtchn; /* Event channel for console page. */ } domU; struct { uint32_t info_off; /* Offset of console_info struct. */ uint32_t info_size; /* Size of console_info struct from start.*/ } dom0; } console; /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ unsigned long pt_base; /* VIRTUAL address of page directory. */ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ /* (PFN of pre-loaded module if */ /* SIF_MOD_START_PFN set in flags). */ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ #define MAX_GUEST_CMDLINE 1024 int8_t cmd_line[MAX_GUEST_CMDLINE]; /* The pfn range here covers both page table and p->m table frames. */ unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */ unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */ }; typedef struct start_info start_info_t; /* New console union for dom0 introduced in 0x00030203. */ #if __XEN_INTERFACE_VERSION__ < 0x00030203 #define console_mfn console.domU.mfn #define console_evtchn console.domU.evtchn #endif #endif /* XEN_HAVE_PV_GUEST_ENTRY */ /* These flags are passed in the 'flags' field of start_info_t. */ #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ #define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */ #define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */ #define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ /* * A multiboot module is a package containing modules very similar to a * multiboot module array. The only differences are: * - the array of module descriptors is by convention simply at the beginning * of the multiboot module, * - addresses in the module descriptors are based on the beginning of the * multiboot module, * - the number of modules is determined by a termination descriptor that has * mod_start == 0. * * This permits to both build it statically and reference it in a configuration * file, and let the PV guest easily rebase the addresses to virtual addresses * and at the same time count the number of modules. */ struct xen_multiboot_mod_list { /* Address of first byte of the module */ uint32_t mod_start; /* Address of last byte of the module (inclusive) */ uint32_t mod_end; /* Address of zero-terminated command line */ uint32_t cmdline; /* Unused, must be zero */ uint32_t pad; }; /* * `incontents 200 startofday_dom0_console Dom0_console * * The console structure in start_info.console.dom0 * * This structure includes a variety of information required to * have a working VGA/VESA console. */ typedef struct dom0_vga_console_info { uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */ #define XEN_VGATYPE_TEXT_MODE_3 0x03 #define XEN_VGATYPE_VESA_LFB 0x23 #define XEN_VGATYPE_EFI_LFB 0x70 union { struct { /* Font height, in pixels. */ uint16_t font_height; /* Cursor location (column, row). */ uint16_t cursor_x, cursor_y; /* Number of rows and columns (dimensions in characters). */ uint16_t rows, columns; } text_mode_3; struct { /* Width and height, in pixels. */ uint16_t width, height; /* Bytes per scan line. */ uint16_t bytes_per_line; /* Bits per pixel. */ uint16_t bits_per_pixel; /* LFB physical address, and size (in units of 64kB). */ uint32_t lfb_base; uint32_t lfb_size; /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ uint8_t red_pos, red_size; uint8_t green_pos, green_size; uint8_t blue_pos, blue_size; uint8_t rsvd_pos, rsvd_size; #if __XEN_INTERFACE_VERSION__ >= 0x00030206 || (defined(CONFIG_PARAVIRT_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H)) /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ uint32_t gbl_caps; /* Mode attributes (offset 0x0, VESA command 0x4f01). */ uint16_t mode_attrs; #endif } vesa_lfb; } u; } dom0_vga_console_info_t; #define xen_vga_console_info dom0_vga_console_info #define xen_vga_console_info_t dom0_vga_console_info_t typedef uint8_t xen_domain_handle_t[16]; /* Turn a plain number into a C unsigned long constant. */ #define __mk_unsigned_long(x) x ## UL #define mk_unsigned_long(x) __mk_unsigned_long(x) __DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t); __DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t); __DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t); __DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t); #else /* __ASSEMBLY__ */ /* In assembly code we cannot use C numeric constant suffixes. */ #define mk_unsigned_long(x) x #endif /* !__ASSEMBLY__ */ /* Default definitions for macros used by domctl/sysctl. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) #ifndef uint64_aligned_t #define uint64_aligned_t uint64_t #endif #ifndef XEN_GUEST_HANDLE_64 #define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name) #endif #ifndef __ASSEMBLY__ struct xenctl_bitmap { XEN_GUEST_HANDLE_64(uint8) bitmap; uint32_t nr_bits; }; #endif #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ #endif /* __XEN_PUBLIC_XEN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/xencomm.h000066400000000000000000000032131314037446600321740ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) IBM Corp. 2006 */ #ifndef _XEN_XENCOMM_H_ #define _XEN_XENCOMM_H_ /* A xencomm descriptor is a scatter/gather list containing physical * addresses corresponding to a virtually contiguous memory area. The * hypervisor translates these physical addresses to machine addresses to copy * to and from the virtually contiguous area. */ #define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */ #define XENCOMM_INVALID (~0UL) struct xencomm_desc { uint32_t magic; uint32_t nr_addrs; /* the number of entries in address[] */ uint64_t address[0]; }; #endif /* _XEN_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/xenoprof.h000066400000000000000000000106551314037446600323760ustar00rootroot00000000000000/****************************************************************************** * xenoprof.h * * Interface for enabling system wide profiling based on hardware performance * counters * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Hewlett-Packard Co. * Written by Aravind Menon & Jose Renato Santos */ #ifndef __XEN_PUBLIC_XENOPROF_H__ #define __XEN_PUBLIC_XENOPROF_H__ #include "xen.h" /* * Commands to HYPERVISOR_xenoprof_op(). */ #define XENOPROF_init 0 #define XENOPROF_reset_active_list 1 #define XENOPROF_reset_passive_list 2 #define XENOPROF_set_active 3 #define XENOPROF_set_passive 4 #define XENOPROF_reserve_counters 5 #define XENOPROF_counter 6 #define XENOPROF_setup_events 7 #define XENOPROF_enable_virq 8 #define XENOPROF_start 9 #define XENOPROF_stop 10 #define XENOPROF_disable_virq 11 #define XENOPROF_release_counters 12 #define XENOPROF_shutdown 13 #define XENOPROF_get_buffer 14 #define XENOPROF_set_backtrace 15 /* AMD IBS support */ #define XENOPROF_get_ibs_caps 16 #define XENOPROF_ibs_counter 17 #define XENOPROF_last_op 17 #define MAX_OPROF_EVENTS 32 #define MAX_OPROF_DOMAINS 25 #define XENOPROF_CPU_TYPE_SIZE 64 /* Xenoprof performance events (not Xen events) */ struct event_log { uint64_t eip; uint8_t mode; uint8_t event; }; /* PC value that indicates a special code */ #define XENOPROF_ESCAPE_CODE (~0ULL) /* Transient events for the xenoprof->oprofile cpu buf */ #define XENOPROF_TRACE_BEGIN 1 /* Xenoprof buffer shared between Xen and domain - 1 per VCPU */ struct xenoprof_buf { uint32_t event_head; uint32_t event_tail; uint32_t event_size; uint32_t vcpu_id; uint64_t xen_samples; uint64_t kernel_samples; uint64_t user_samples; uint64_t lost_samples; struct event_log event_log[1]; }; #ifndef __XEN__ typedef struct xenoprof_buf xenoprof_buf_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t); #endif struct xenoprof_init { int32_t num_events; int32_t is_primary; char cpu_type[XENOPROF_CPU_TYPE_SIZE]; }; typedef struct xenoprof_init xenoprof_init_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t); struct xenoprof_get_buffer { int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; }; typedef struct xenoprof_get_buffer xenoprof_get_buffer_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t); struct xenoprof_counter { uint32_t ind; uint64_t count; uint32_t enabled; uint32_t event; uint32_t hypervisor; uint32_t kernel; uint32_t user; uint64_t unit_mask; }; typedef struct xenoprof_counter xenoprof_counter_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t); typedef struct xenoprof_passive { uint16_t domain_id; int32_t max_samples; int32_t nbuf; int32_t bufsize; uint64_t buf_gmaddr; } xenoprof_passive_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t); struct xenoprof_ibs_counter { uint64_t op_enabled; uint64_t fetch_enabled; uint64_t max_cnt_fetch; uint64_t max_cnt_op; uint64_t rand_en; uint64_t dispatched_ops; }; typedef struct xenoprof_ibs_counter xenoprof_ibs_counter_t; DEFINE_XEN_GUEST_HANDLE(xenoprof_ibs_counter_t); #endif /* __XEN_PUBLIC_XENOPROF_H__ */ /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/xsm/000077500000000000000000000000001314037446600311655ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/xsm/acm.h000066400000000000000000000156251314037446600321070ustar00rootroot00000000000000/* * acm.h: Xen access control module interface defintions * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005, International Business Machines Corporation. */ #ifndef _XEN_PUBLIC_ACM_H #define _XEN_PUBLIC_ACM_H #include "../xen.h" /* default ssid reference value if not supplied */ #define ACM_DEFAULT_SSID 0x0 #define ACM_DEFAULT_LOCAL_SSID 0x0 /* Internal ACM ERROR types */ #define ACM_OK 0 #define ACM_UNDEF -1 #define ACM_INIT_SSID_ERROR -2 #define ACM_INIT_SOID_ERROR -3 #define ACM_ERROR -4 /* External ACCESS DECISIONS */ #define ACM_ACCESS_PERMITTED 0 #define ACM_ACCESS_DENIED -111 #define ACM_NULL_POINTER_ERROR -200 /* Error codes reported in when trying to test for a new policy These error codes are reported in an array of tuples where each error code is followed by a parameter describing the error more closely, such as a domain id. */ #define ACM_EVTCHN_SHARING_VIOLATION 0x100 #define ACM_GNTTAB_SHARING_VIOLATION 0x101 #define ACM_DOMAIN_LOOKUP 0x102 #define ACM_CHWALL_CONFLICT 0x103 #define ACM_SSIDREF_IN_USE 0x104 /* primary policy in lower 4 bits */ #define ACM_NULL_POLICY 0 #define ACM_CHINESE_WALL_POLICY 1 #define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2 #define ACM_POLICY_UNDEFINED 15 /* combinations have secondary policy component in higher 4bit */ #define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY) /* policy: */ #define ACM_POLICY_NAME(X) \ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \ "UNDEFINED" /* the following policy versions must be increased * whenever the interpretation of the related * policy's data structure changes */ #define ACM_POLICY_VERSION 4 #define ACM_CHWALL_VERSION 1 #define ACM_STE_VERSION 1 /* defines a ssid reference used by xen */ typedef uint32_t ssidref_t; /* hooks that are known to domains */ #define ACMHOOK_none 0 #define ACMHOOK_sharing 1 #define ACMHOOK_authorization 2 #define ACMHOOK_conflictset 3 /* -------security policy relevant type definitions-------- */ /* type identifier; compares to "equal" or "not equal" */ typedef uint16_t domaintype_t; /* CHINESE WALL POLICY DATA STRUCTURES * * current accumulated conflict type set: * When a domain is started and has a type that is in * a conflict set, the conflicting types are incremented in * the aggregate set. When a domain is destroyed, the * conflicting types to its type are decremented. * If a domain has multiple types, this procedure works over * all those types. * * conflict_aggregate_set[i] holds the number of * running domains that have a conflict with type i. * * running_types[i] holds the number of running domains * that include type i in their ssidref-referenced type set * * conflict_sets[i][j] is "0" if type j has no conflict * with type i and is "1" otherwise. */ /* high-16 = version, low-16 = check magic */ #define ACM_MAGIC 0x0001debc /* size of the SHA1 hash identifying the XML policy from which the binary policy was created */ #define ACM_SHA1_HASH_SIZE 20 /* each offset in bytes from start of the struct they * are part of */ /* V3 of the policy buffer aded a version structure */ struct acm_policy_version { uint32_t major; uint32_t minor; }; /* each buffer consists of all policy information for * the respective policy given in the policy code * * acm_policy_buffer, acm_chwall_policy_buffer, * and acm_ste_policy_buffer need to stay 32-bit aligned * because we create binary policies also with external * tools that assume packed representations (e.g. the java tool) */ struct acm_policy_buffer { uint32_t magic; uint32_t policy_version; /* ACM_POLICY_VERSION */ uint32_t len; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_buffer_offset; uint32_t secondary_policy_code; uint32_t secondary_buffer_offset; struct acm_policy_version xml_pol_version; /* add in V3 */ uint8_t xml_policy_hash[ACM_SHA1_HASH_SIZE]; /* added in V4 */ }; struct acm_policy_reference_buffer { uint32_t len; }; struct acm_chwall_policy_buffer { uint32_t policy_version; /* ACM_CHWALL_VERSION */ uint32_t policy_code; uint32_t chwall_max_types; uint32_t chwall_max_ssidrefs; uint32_t chwall_max_conflictsets; uint32_t chwall_ssid_offset; uint32_t chwall_conflict_sets_offset; uint32_t chwall_running_types_offset; uint32_t chwall_conflict_aggregate_offset; }; struct acm_ste_policy_buffer { uint32_t policy_version; /* ACM_STE_VERSION */ uint32_t policy_code; uint32_t ste_max_types; uint32_t ste_max_ssidrefs; uint32_t ste_ssid_offset; }; struct acm_stats_buffer { uint32_t magic; uint32_t len; uint32_t primary_policy_code; uint32_t primary_stats_offset; uint32_t secondary_policy_code; uint32_t secondary_stats_offset; }; struct acm_ste_stats_buffer { uint32_t ec_eval_count; uint32_t gt_eval_count; uint32_t ec_denied_count; uint32_t gt_denied_count; uint32_t ec_cachehit_count; uint32_t gt_cachehit_count; }; struct acm_ssid_buffer { uint32_t len; ssidref_t ssidref; uint32_t policy_reference_offset; uint32_t primary_policy_code; uint32_t primary_max_types; uint32_t primary_types_offset; uint32_t secondary_policy_code; uint32_t secondary_max_types; uint32_t secondary_types_offset; }; #endif /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ acm_ops.h000066400000000000000000000104741314037446600327060ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/xsm/* * acm_ops.h: Xen access control module hypervisor commands * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Reiner Sailer * Copyright (c) 2005,2006 International Business Machines Corporation. */ #ifndef __XEN_PUBLIC_ACM_OPS_H__ #define __XEN_PUBLIC_ACM_OPS_H__ #include "../xen.h" #include "acm.h" /* * Make sure you increment the interface version whenever you modify this file! * This makes sure that old versions of acm tools will stop working in a * well-defined way (rather than crashing the machine, for instance). */ #define ACM_INTERFACE_VERSION 0xAAAA000A /************************************************************************/ /* * Prototype for this hypercall is: * int acm_op(int cmd, void *args) * @cmd == ACMOP_??? (access control module operation). * @args == Operation-specific extra arguments (NULL if none). */ #define ACMOP_setpolicy 1 struct acm_setpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pushcache; uint32_t pushcache_size; }; #define ACMOP_getpolicy 2 struct acm_getpolicy { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_dumpstats 3 struct acm_dumpstats { /* IN */ XEN_GUEST_HANDLE_64(void) pullcache; uint32_t pullcache_size; }; #define ACMOP_getssid 4 #define ACM_GETBY_ssidref 1 #define ACM_GETBY_domainid 2 struct acm_getssid { /* IN */ uint32_t get_ssid_by; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id; XEN_GUEST_HANDLE_64(void) ssidbuf; uint32_t ssidbuf_size; }; #define ACMOP_getdecision 5 struct acm_getdecision { /* IN */ uint32_t get_decision_by1; /* ACM_GETBY_* */ uint32_t get_decision_by2; /* ACM_GETBY_* */ union { domaintype_t domainid; ssidref_t ssidref; } id1; union { domaintype_t domainid; ssidref_t ssidref; } id2; uint32_t hook; /* OUT */ uint32_t acm_decision; }; #define ACMOP_chgpolicy 6 struct acm_change_policy { /* IN */ XEN_GUEST_HANDLE_64(void) policy_pushcache; uint32_t policy_pushcache_size; XEN_GUEST_HANDLE_64(void) del_array; uint32_t delarray_size; XEN_GUEST_HANDLE_64(void) chg_array; uint32_t chgarray_size; /* OUT */ /* array with error code */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; #define ACMOP_relabeldoms 7 struct acm_relabel_doms { /* IN */ XEN_GUEST_HANDLE_64(void) relabel_map; uint32_t relabel_map_size; /* OUT */ XEN_GUEST_HANDLE_64(void) err_array; uint32_t errarray_size; }; /* future interface to Xen */ struct xen_acmctl { uint32_t cmd; uint32_t interface_version; union { struct acm_setpolicy setpolicy; struct acm_getpolicy getpolicy; struct acm_dumpstats dumpstats; struct acm_getssid getssid; struct acm_getdecision getdecision; struct acm_change_policy change_policy; struct acm_relabel_doms relabel_doms; } u; }; typedef struct xen_acmctl xen_acmctl_t; DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t); #endif /* __XEN_PUBLIC_ACM_OPS_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ flask_op.h000066400000000000000000000132721314037446600330620ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/interface/xsm/* * This file contains the flask_op hypercall commands and definitions. * * Author: George Coker, * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __FLASK_OP_H__ #define __FLASK_OP_H__ #define XEN_FLASK_INTERFACE_VERSION 1 struct xen_flask_load { XEN_GUEST_HANDLE(char) buffer; uint32_t size; }; struct xen_flask_setenforce { uint32_t enforcing; }; struct xen_flask_sid_context { /* IN/OUT: sid to convert to/from string */ uint32_t sid; /* IN: size of the context buffer * OUT: actual size of the output context string */ uint32_t size; XEN_GUEST_HANDLE(char) context; }; struct xen_flask_access { /* IN: access request */ uint32_t ssid; uint32_t tsid; uint32_t tclass; uint32_t req; /* OUT: AVC data */ uint32_t allowed; uint32_t audit_allow; uint32_t audit_deny; uint32_t seqno; }; struct xen_flask_transition { /* IN: transition SIDs and class */ uint32_t ssid; uint32_t tsid; uint32_t tclass; /* OUT: new SID */ uint32_t newsid; }; struct xen_flask_userlist { /* IN: starting SID for list */ uint32_t start_sid; /* IN: size of user string and output buffer * OUT: number of SIDs returned */ uint32_t size; union { /* IN: user to enumerate SIDs */ XEN_GUEST_HANDLE(char) user; /* OUT: SID list */ XEN_GUEST_HANDLE(uint32) sids; } u; }; struct xen_flask_boolean { /* IN/OUT: numeric identifier for boolean [GET/SET] * If -1, name will be used and bool_id will be filled in. */ uint32_t bool_id; /* OUT: current enforcing value of boolean [GET/SET] */ uint8_t enforcing; /* OUT: pending value of boolean [GET/SET] */ uint8_t pending; /* IN: new value of boolean [SET] */ uint8_t new_value; /* IN: commit new value instead of only setting pending [SET] */ uint8_t commit; /* IN: size of boolean name buffer [GET/SET] * OUT: actual size of name [GET only] */ uint32_t size; /* IN: if bool_id is -1, used to find boolean [GET/SET] * OUT: textual name of boolean [GET only] */ XEN_GUEST_HANDLE(char) name; }; struct xen_flask_setavc_threshold { /* IN */ uint32_t threshold; }; struct xen_flask_hash_stats { /* OUT */ uint32_t entries; uint32_t buckets_used; uint32_t buckets_total; uint32_t max_chain_len; }; struct xen_flask_cache_stats { /* IN */ uint32_t cpu; /* OUT */ uint32_t lookups; uint32_t hits; uint32_t misses; uint32_t allocations; uint32_t reclaims; uint32_t frees; }; struct xen_flask_ocontext { /* IN */ uint32_t ocon; uint32_t sid; uint64_t low, high; }; struct xen_flask_peersid { /* IN */ evtchn_port_t evtchn; /* OUT */ uint32_t sid; }; struct xen_flask_relabel { /* IN */ uint32_t domid; uint32_t sid; }; struct xen_flask_op { uint32_t cmd; #define FLASK_LOAD 1 #define FLASK_GETENFORCE 2 #define FLASK_SETENFORCE 3 #define FLASK_CONTEXT_TO_SID 4 #define FLASK_SID_TO_CONTEXT 5 #define FLASK_ACCESS 6 #define FLASK_CREATE 7 #define FLASK_RELABEL 8 #define FLASK_USER 9 #define FLASK_POLICYVERS 10 #define FLASK_GETBOOL 11 #define FLASK_SETBOOL 12 #define FLASK_COMMITBOOLS 13 #define FLASK_MLS 14 #define FLASK_DISABLE 15 #define FLASK_GETAVC_THRESHOLD 16 #define FLASK_SETAVC_THRESHOLD 17 #define FLASK_AVC_HASHSTATS 18 #define FLASK_AVC_CACHESTATS 19 #define FLASK_MEMBER 20 #define FLASK_ADD_OCONTEXT 21 #define FLASK_DEL_OCONTEXT 22 #define FLASK_GET_PEER_SID 23 #define FLASK_RELABEL_DOMAIN 24 uint32_t interface_version; /* XEN_FLASK_INTERFACE_VERSION */ union { struct xen_flask_load load; struct xen_flask_setenforce enforce; /* FLASK_CONTEXT_TO_SID and FLASK_SID_TO_CONTEXT */ struct xen_flask_sid_context sid_context; struct xen_flask_access access; /* FLASK_CREATE, FLASK_RELABEL, FLASK_MEMBER */ struct xen_flask_transition transition; struct xen_flask_userlist userlist; /* FLASK_GETBOOL, FLASK_SETBOOL */ struct xen_flask_boolean boolean; struct xen_flask_setavc_threshold setavc_threshold; struct xen_flask_hash_stats hash_stats; struct xen_flask_cache_stats cache_stats; /* FLASK_ADD_OCONTEXT, FLASK_DEL_OCONTEXT */ struct xen_flask_ocontext ocontext; struct xen_flask_peersid peersid; struct xen_flask_relabel relabel; } u; }; typedef struct xen_flask_op xen_flask_op_t; DEFINE_XEN_GUEST_HANDLE(xen_flask_op_t); #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/net-util.h000066400000000000000000000031051314037446600303270ustar00rootroot00000000000000#ifndef __XEN_NETUTIL_H__ #define __XEN_NETUTIL_H__ #include #include #include #include #include static inline int skb_checksum_setup(struct sk_buff *skb, unsigned long *fixup_counter) { struct iphdr *iph = (void *)skb->data; __be16 *csum = NULL; int err = -EPROTO; skb_reset_network_header(skb); if (skb->ip_summed != CHECKSUM_PARTIAL) { /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ if (!skb_is_gso(skb)) return 0; /* * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy * peers can fail to set NETRXF_csum_blank when sending a GSO * frame. In this case force the SKB to CHECKSUM_PARTIAL and * recalculate the partial checksum. */ ++*fixup_counter; --csum; } if (skb->protocol != htons(ETH_P_IP)) goto out; switch (iph->protocol) { case IPPROTO_TCP: if (!skb_partial_csum_set(skb, 4 * iph->ihl, offsetof(struct tcphdr, check))) goto out; if (csum) csum = &tcp_hdr(skb)->check; break; case IPPROTO_UDP: if (!skb_partial_csum_set(skb, 4 * iph->ihl, offsetof(struct udphdr, check))) goto out; if (csum) csum = &udp_hdr(skb)->check; break; default: net_err_ratelimited("Attempting to checksum a non-TCP/UDP packet," " dropping a protocol %d packet\n", iph->protocol); goto out; } if (csum) *csum = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - iph->ihl*4, iph->protocol, 0); skb_probe_transport_header(skb, 0); err = 0; out: return err; } #endif /* __XEN_NETUTIL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/page.h000066400000000000000000000005331314037446600275040ustar00rootroot00000000000000#ifndef _XEN_PAGE_H #define _XEN_PAGE_H #include struct xen_memory_region { phys_addr_t start; phys_addr_t size; }; #define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */ extern __initdata struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS]; extern unsigned long xen_released_pages; #endif /* _XEN_PAGE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/pcifront.h000066400000000000000000000014321314037446600304130ustar00rootroot00000000000000/* * PCI Frontend - arch-dependendent declarations * * Author: Ryan Wilson */ #ifndef __XEN_ASM_PCIFRONT_H__ #define __XEN_ASM_PCIFRONT_H__ #ifdef __KERNEL__ #include int pci_frontend_enable_msi(struct pci_dev *, unsigned int nvec); void pci_frontend_disable_msi(struct pci_dev *); int pci_frontend_enable_msix(struct pci_dev *, struct msix_entry *, int nvec); void pci_frontend_disable_msix(struct pci_dev *); #ifdef __ia64__ #include extern void xen_add_resource(struct pci_controller *, unsigned int, unsigned int, struct acpi_resource *); extern void xen_pcibios_setup_root_windows(struct pci_bus *, struct pci_controller *); #endif /* __ia64__ */ #endif /* __KERNEL__ */ #endif /* __XEN_ASM_PCIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/pcpu.h000066400000000000000000000011121314037446600275310ustar00rootroot00000000000000#ifndef _XEN_SYSCTL_H #define _XEN_SYSCTL_H #include #include int register_pcpu_notifier(struct notifier_block *); void unregister_pcpu_notifier(struct notifier_block *); #ifdef CONFIG_X86 int __must_check rdmsr_safe_on_pcpu(unsigned int pcpu, u32 msr_no, u32 *l, u32 *h); int __must_check wrmsr_safe_on_pcpu(unsigned int pcpu, u32 msr_no, u32 l, u32 h); int __must_check rdmsr_safe_regs_on_pcpu(unsigned int pcpu, u32 *regs); int __must_check wrmsr_safe_regs_on_pcpu(unsigned int pcpu, u32 *regs); #endif #endif /* _XEN_SYSCTL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/platform_pci.h000066400000000000000000000040541314037446600312510ustar00rootroot00000000000000#ifndef _XEN_PLATFORM_PCI_H #define _XEN_PLATFORM_PCI_H #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0x0003 #define XEN_IOPORT_LINUX_DRVVER 0x0001 #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define XEN_UNPLUG_ALL_IDE_DISKS (1<<0) #define XEN_UNPLUG_ALL_NICS (1<<1) #define XEN_UNPLUG_AUX_IDE_DISKS (1<<2) #define XEN_UNPLUG_ALL (XEN_UNPLUG_ALL_IDE_DISKS|\ XEN_UNPLUG_ALL_NICS|\ XEN_UNPLUG_AUX_IDE_DISKS) #define XEN_UNPLUG_UNNECESSARY (1<<16) #define XEN_UNPLUG_NEVER (1<<17) static inline int xen_must_unplug_nics(void) { #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ defined(CONFIG_XEN_PVHVM) return 1; #else return 0; #endif } static inline int xen_must_unplug_disks(void) { #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ defined(CONFIG_XEN_PVHVM) return 1; #else return 0; #endif } extern int xen_platform_pci_unplug; #if defined(CONFIG_XEN_PVHVM) extern bool xen_has_pv_devices(void); extern bool xen_has_pv_disk_devices(void); extern bool xen_has_pv_nic_devices(void); extern bool xen_has_pv_and_legacy_disk_devices(void); #else static inline bool xen_has_pv_devices(void) { return IS_ENABLED(CONFIG_XEN); } static inline bool xen_has_pv_disk_devices(void) { return IS_ENABLED(CONFIG_XEN); } static inline bool xen_has_pv_nic_devices(void) { return IS_ENABLED(CONFIG_XEN); } static inline bool xen_has_pv_and_legacy_disk_devices(void) { return false; } #endif #endif /* _XEN_PLATFORM_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/swiotlb-xen.h000066400000000000000000000035241314037446600310460ustar00rootroot00000000000000#ifndef __LINUX_SWIOTLB_XEN_H #define __LINUX_SWIOTLB_XEN_H #include extern int xen_swiotlb_init(int verbose, bool early); extern void *xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs); extern void xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs); extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); extern int xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs); extern void xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs); extern void xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir); extern void xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir); extern void xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir); extern void xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir); extern int xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); extern int xen_swiotlb_dma_supported(struct device *hwdev, u64 mask); #endif /* __LINUX_SWIOTLB_XEN_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/sysctl.h000066400000000000000000000002551314037446600301120ustar00rootroot00000000000000#ifndef _XEN_SYSCTL_H #define _XEN_SYSCTL_H /* CTL_XEN names: */ enum { CTL_XEN_INDEPENDENT_WALLCLOCK=1, CTL_XEN_PERMITTED_CLOCK_JITTER=2, }; #endif /* _XEN_SYSCTL_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/tmem.h000066400000000000000000000004661314037446600275370ustar00rootroot00000000000000#ifndef _XEN_TMEM_H #define _XEN_TMEM_H #include #ifdef CONFIG_XEN_TMEM_MODULE #define tmem_enabled true #else /* defined in drivers/xen/tmem.c */ extern bool tmem_enabled; #endif #ifdef CONFIG_XEN_SELFBALLOONING extern int xen_selfballoon_init(bool, bool); #endif #endif /* _XEN_TMEM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/xen-ops.h000066400000000000000000000021321314037446600301560ustar00rootroot00000000000000#ifndef INCLUDE_XEN_OPS_H #define INCLUDE_XEN_OPS_H #include #include DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); void xen_arch_pre_suspend(void); void xen_arch_post_suspend(int suspend_cancelled); void xen_arch_hvm_post_suspend(int suspend_cancelled); void xen_mm_pin_all(void); void xen_mm_unpin_all(void); void xen_timer_resume(void); void xen_arch_resume(void); int xen_setup_shutdown_event(void); extern unsigned long *xen_contiguous_bitmap; int xen_create_contiguous_region(unsigned long vstart, unsigned int order, unsigned int address_bits); void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order); struct vm_area_struct; int xen_remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t mfn, int nr, pgprot_t prot, unsigned domid, struct page **pages); int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, int numpgs, struct page **pages); bool xen_running_on_version_or_later(unsigned int major, unsigned int minor); #endif /* INCLUDE_XEN_OPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/xen.h000066400000000000000000000020661314037446600273650ustar00rootroot00000000000000#ifndef _XEN_XEN_H #define _XEN_XEN_H enum xen_domain_type { XEN_NATIVE, /* running on bare hardware */ XEN_PV_DOMAIN, /* running in a PV domain */ XEN_HVM_DOMAIN, /* running in a Xen hvm domain */ }; #if defined(CONFIG_PARAVIRT_XEN) extern enum xen_domain_type xen_domain_type; #elif defined(CONFIG_XEN) #define xen_domain_type XEN_PV_DOMAIN #else #define xen_domain_type XEN_NATIVE #endif #define xen_domain() (xen_domain_type != XEN_NATIVE) #define xen_pv_domain() (xen_domain() && \ xen_domain_type == XEN_PV_DOMAIN) #define xen_hvm_domain() (xen_domain() && \ xen_domain_type == XEN_HVM_DOMAIN) #ifdef CONFIG_XEN_DOM0 #include #include #define xen_initial_domain() (xen_domain() && \ xen_start_info && xen_start_info->flags & SIF_INITDOMAIN) #elif defined(CONFIG_XEN) #define xen_initial_domain() is_initial_xendomain() #define xen_has_pv_devices() is_running_on_xen() #else /* !CONFIG_XEN_DOM0 */ #define xen_initial_domain() (0) #endif /* CONFIG_XEN_DOM0 */ #endif /* _XEN_XEN_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/xen_proc.h000066400000000000000000000004431314037446600304050ustar00rootroot00000000000000 #ifndef __ASM_XEN_PROC_H__ #define __ASM_XEN_PROC_H__ #include struct proc_dir_entry *create_xen_proc_entry(const char *, mode_t, const struct file_operations *, void *data); void remove_xen_proc_entry(const char *); #endif /* __ASM_XEN_PROC_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/xen_pvonhvm.h000066400000000000000000000010751314037446600311410ustar00rootroot00000000000000#ifndef _XEN_PVONHVM_H #define _XEN_PVONHVM_H #ifdef CONFIG_XEN #define xen_pvonhvm_unplug 0 #else extern int xen_pvonhvm_unplug; #endif #define XEN_PVONHVM_UNPLUG_IDE_DISKS (1 << 1) #define XEN_PVONHVM_UNPLUG_NICS (1 << 2) #define XEN_PVONHVM_UNPLUG_NEVER (1 << 3) #define XEN_PVONHVM_UNPLUG_ALL (XEN_PVONHVM_UNPLUG_IDE_DISKS | XEN_PVONHVM_UNPLUG_NICS) #define xen_pvonhvm_unplugged_disks (xen_pvonhvm_unplug & XEN_PVONHVM_UNPLUG_IDE_DISKS) #define xen_pvonhvm_unplugged_nics (xen_pvonhvm_unplug & XEN_PVONHVM_UNPLUG_NICS) #endif /* _XEN_PVONHVM_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/xenbus.h000066400000000000000000000304141314037446600300750ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XEN_XENBUS_H #define _XEN_XENBUS_H #include #include #include #include #include #include #include #include #include #include #include #include #include /* Register callback to watch this node. */ struct xenbus_watch { struct list_head list; /* Path being watched. */ const char *node; /* Callback (executed in a process context with no locks held). */ void (*callback)(struct xenbus_watch *, const char **vec, unsigned int len); #if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /* See XBWF_ definitions below. */ unsigned long flags; #endif }; #if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /* * Execute callback in its own kthread. Useful if the callback is long * running or heavily serialised, to avoid taking out the main xenwatch thread * for a long period of time (or even unwittingly causing a deadlock). */ #define XBWF_new_thread 1 #endif /* A xenbus device. */ struct xenbus_device { const char *devicetype; const char *nodename; const char *otherend; int otherend_id; struct xenbus_watch otherend_watch; struct device dev; enum xenbus_state state; struct completion down; #if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) struct work_struct work; #endif }; static inline struct xenbus_device *to_xenbus_device(struct device *dev) { return container_of(dev, struct xenbus_device, dev); } struct xenbus_device_id { /* .../device// */ char devicetype[32]; /* General class of device. */ }; /* A xenbus driver. */ struct xenbus_driver { const struct xenbus_device_id *ids; int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); void (*otherend_changed)(struct xenbus_device *dev, enum xenbus_state backend_state); int (*remove)(struct xenbus_device *dev); int (*suspend)(struct xenbus_device *dev); #if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) int (*suspend_cancel)(struct xenbus_device *dev); #endif int (*resume)(struct xenbus_device *dev); int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *); struct device_driver driver; int (*read_otherend_details)(struct xenbus_device *dev); int (*is_ready)(struct xenbus_device *dev); }; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) # define XENBUS_DRIVER_SET_OWNER(mod) .driver.owner = mod, #else # define XENBUS_DRIVER_SET_OWNER(mod) #endif #define DEFINE_XENBUS_DRIVER(var, drvname, methods...) \ struct xenbus_driver var ## _driver = { \ .driver.name = drvname + 0 ?: var ## _ids->devicetype, \ .driver.mod_name = KBUILD_MODNAME, \ XENBUS_DRIVER_SET_OWNER(THIS_MODULE) \ .ids = var ## _ids, ## methods \ } static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) { return container_of(drv, struct xenbus_driver, driver); } int __must_check xenbus_register_frontend(struct xenbus_driver *drv); int __must_check xenbus_register_backend(struct xenbus_driver *drv); void xenbus_unregister_driver(struct xenbus_driver *drv); struct xenbus_transaction { u32 id; }; /* Nil transaction ID. */ #define XBT_NIL ((struct xenbus_transaction) { 0 }) char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num); void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len); int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string); int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_transaction_start(struct xenbus_transaction *t); int xenbus_transaction_end(struct xenbus_transaction t, int abort); /* Single read and scanf: returns -errno or num scanned if > 0. */ __scanf(4, 5) int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...); /* Single printf and write: returns -errno or 0. */ __printf(4, 5) int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...); /* Generic read function: NULL-terminated triples of name, * sprintf-style type string, and pointer. Returns 0 or errno.*/ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...); /* notifer routines for when the xenstore comes up */ int register_xenstore_notifier(struct notifier_block *nb); void unregister_xenstore_notifier(struct notifier_block *nb); int register_xenbus_watch(struct xenbus_watch *watch); void unregister_xenbus_watch(struct xenbus_watch *watch); void xs_suspend(void); void xs_resume(void); void xs_suspend_cancel(void); /* Used by xenbus_dev to borrow kernel's store connection. */ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); struct work_struct; void xenbus_probe(struct work_struct *); /* Prepare for domain suspend: then resume or cancel the suspend. */ void xenbus_suspend(void); void xenbus_resume(void); void xenbus_suspend_cancel(void); #define XENBUS_IS_ERR_READ(str) ({ \ if (!IS_ERR(str) && strlen(str) == 0) { \ kfree(str); \ str = ERR_PTR(-ERANGE); \ } \ IS_ERR(str); \ }) #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE) /** * Register a watch on the given path, using the given xenbus_watch structure * for storage, and the given callback function as the callback. Return 0 on * success, or -errno on error. On success, the given path will be saved as * watch->node, and remains the caller's to free. On error, watch->node will * be NULL, the device will switch to XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); #if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) /** * Register a watch on the given path/path2, using the given xenbus_watch * structure for storage, and the given callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (path/path2) will be saved as watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); #else __printf(4, 5) int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...); #endif /** * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); /** * Grant access to the given ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn); int xenbus_multi_grant_ring(struct xenbus_device *, unsigned int nr, struct page *[], grant_ref_t []); /** * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_valloc allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ #if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, const grant_ref_t refs[], unsigned int nr_refs); #else int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t gnt_ref, void **vaddr); #endif int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t gnt_ref, grant_handle_t *handle, void *vaddr); /** * Unmap a page of memory in this domain that was imported from another domain * and free the virtual address space. * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ #if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *); #else int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr); #endif int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port); /** * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path); /*** * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) __printf(3, 4); /*** * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) __printf(3, 4); #if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H) int xenbus_dev_init(void); #endif const char *xenbus_strstate(enum xenbus_state state); int xenbus_dev_is_online(struct xenbus_device *dev); int xenbus_frontend_closed(struct xenbus_device *dev); int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)); int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)); #endif /* _XEN_XENBUS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/xenbus_dev.h000066400000000000000000000034661314037446600307420ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Interface to /dev/xen/xenbus_backend. * * Copyright (c) 2011 Bastian Blank * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_XEN_XENBUS_DEV_H__ #define __LINUX_XEN_XENBUS_DEV_H__ #include #define IOCTL_XENBUS_BACKEND_EVTCHN \ _IOC(_IOC_NONE, 'B', 0, 0) #define IOCTL_XENBUS_BACKEND_SETUP \ _IOC(_IOC_NONE, 'B', 1, 0) #endif /* __LINUX_XEN_XENBUS_DEV_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/xencomm.h000066400000000000000000000045171314037446600302440ustar00rootroot00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Copyright (C) IBM Corp. 2006 * * Authors: Hollis Blanchard * Jerone Young */ #ifndef _LINUX_XENCOMM_H_ #define _LINUX_XENCOMM_H_ #include #define XENCOMM_MINI_ADDRS 3 struct xencomm_mini { struct xencomm_desc _desc; uint64_t address[XENCOMM_MINI_ADDRS]; }; /* To avoid additionnal virt to phys conversion, an opaque structure is presented. */ struct xencomm_handle; extern void xencomm_free(struct xencomm_handle *desc); extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes); extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, struct xencomm_mini *xc_area); /* * gcc bug workaround: * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660 * gcc doesn't handle properly stack variable with * __attribute__((__align__(sizeof(struct xencomm_mini)))) */ #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ unsigned char xc_desc ## _base[((n) + 1 ) * \ sizeof(struct xencomm_mini)]; \ struct xencomm_mini *xc_desc = (struct xencomm_mini *) \ ((unsigned long)xc_desc ## _base + \ (sizeof(struct xencomm_mini) - \ ((unsigned long)xc_desc ## _base) % \ sizeof(struct xencomm_mini))); #define xencomm_map_no_alloc(ptr, bytes) \ ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \ __xencomm_map_no_alloc(ptr, bytes, xc_desc); }) /* provided by architecture code: */ extern unsigned long xencomm_vtop(unsigned long vaddr); static inline void *xencomm_pa(void *ptr) { return (void *)xencomm_vtop((unsigned long)ptr); } #define xen_guest_handle(hnd) ((hnd).p) #endif /* _LINUX_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/xencons.h000066400000000000000000000005111314037446600302410ustar00rootroot00000000000000#ifndef __ASM_XENCONS_H__ #define __ASM_XENCONS_H__ int xprintk(const char *, ...) __attribute__ ((__format__(__printf__, 1, 2))); struct dom0_vga_console_info; void dom0_init_screen_info(const struct dom0_vga_console_info *, size_t); void xencons_force_flush(void); void xencons_resume(void); #endif /* __ASM_XENCONS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/include/xen/xenoprof.h000066400000000000000000000025651314037446600304370ustar00rootroot00000000000000/****************************************************************************** * xen/xenoprof.h * * Copyright (c) 2006 Isaku Yamahata * VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifndef __XEN_XENOPROF_H__ #define __XEN_XENOPROF_H__ #ifdef CONFIG_XEN #include struct oprofile_operations; int xenoprofile_init(struct oprofile_operations * ops); void xenoprofile_exit(void); struct xenoprof_shared_buffer { char *buffer; struct xenoprof_arch_shared_buffer arch; }; #else #define xenoprofile_init(ops) (-ENOSYS) #define xenoprofile_exit() do { } while (0) #endif /* CONFIG_XEN */ #endif /* __XEN_XENOPROF_H__ */ uvp-classic_xen_driver-3.12.xto3.16.x-write_feature_flag.diff000066400000000000000000000043651314037446600372460ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.xdiff -ruNa uvp-classic_xen_driver-3.12.xto3.16.x.orig/xen-platform-pci/machine_reboot.c uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/machine_reboot.c --- uvp-classic_xen_driver-3.12.xto3.16.x.orig/xen-platform-pci/machine_reboot.c 2016-02-24 17:48:35.000000000 +0800 +++ uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/machine_reboot.c 2016-02-24 17:48:58.000000000 +0800 @@ -112,7 +112,6 @@ /* update uvp flags */ write_driver_resume_flag(); - write_feature_flag(); write_monitor_service_flag(); return 0; diff -ruNa uvp-classic_xen_driver-3.12.xto3.16.x.orig/xen-platform-pci/platform-pci.c uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/platform-pci.c --- uvp-classic_xen_driver-3.12.xto3.16.x.orig/xen-platform-pci/platform-pci.c 2016-02-24 17:48:35.000000000 +0800 +++ uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/platform-pci.c 2016-02-24 17:50:35.000000000 +0800 @@ -74,19 +74,6 @@ static unsigned long shared_info_frame; static uint64_t callback_via; -/*driver should write xenstore flag to tell xen which feature supported, add by w00205029 2012-01-17*/ -void write_feature_flag() -{ - int rc; - char str[32] = {0}; - - (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); - rc = xenbus_write(XBT_NIL, "control/uvp", "feature_flag", str); - if (rc) { - printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); - } -} - /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { @@ -464,7 +451,6 @@ if ((ret = xen_panic_handler_init())) goto out; - write_feature_flag(); out: if (ret) { pci_release_region(pdev, 0); diff -ruNa uvp-classic_xen_driver-3.12.xto3.16.x.orig/xen-platform-pci/platform-pci.h uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/platform-pci.h --- uvp-classic_xen_driver-3.12.xto3.16.x.orig/xen-platform-pci/platform-pci.h 2016-02-24 17:48:35.000000000 +0800 +++ uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/platform-pci.h 2016-02-24 17:49:16.000000000 +0800 @@ -26,7 +26,6 @@ #define XENPAGING 0x1 #define SUPPORTED_FEATURE XENPAGING -extern void write_feature_flag(void); extern void write_monitor_service_flag(void); extern void write_driver_resume_flag(void); unsigned long alloc_xen_mmio(unsigned long len); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/version.ini000066400000000000000000000000631314037446600263660ustar00rootroot00000000000000KernModeVersion=2.2.0.208 UserModeVersion=2.2.0.202UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-balloon/000077500000000000000000000000001314037446600264175ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-balloon/Kbuild000066400000000000000000000002551314037446600275560ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-balloon.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-balloon-y := balloon.o sysfs.o xen-balloon-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-balloon/Makefile000066400000000000000000000000661314037446600300610ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-balloon/balloon.c000066400000000000000000000506631314037446600302230ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static DEFINE_MUTEX(balloon_mutex); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ DEFINE_SPINLOCK(balloon_lock); struct balloon_stats balloon_stats; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; #ifndef OPENSUSE_1302 #ifndef CONFIG_XEN /* * In HVM guests accounting here uses the Xen visible values, but the kernel * determined totalram_pages value shouldn't get altered. Since totalram_pages * includes neither the kernel static image nor any memory allocated prior to * or from the bootmem allocator, we have to synchronize the two values. */ static unsigned long __read_mostly totalram_bias; #else #define totalram_bias 0 #endif #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ static void balloon_process(struct work_struct *unused); static DECLARE_WORK(balloon_worker, balloon_process); #ifndef OPENSUSE_1302 static struct timer_list balloon_timer; #endif /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) #define PAGE_TO_LIST(p) (&(p)->lru) #define LIST_TO_PAGE(l) list_entry((l), struct page, lru) #define UNLIST_PAGE(p) \ do { \ list_del(PAGE_TO_LIST(p)); \ PAGE_TO_LIST(p)->next = NULL; \ PAGE_TO_LIST(p)->prev = NULL; \ } while(0) #define IPRINTK(fmt, args...) pr_info("xen_mem: " fmt, ##args) #define WPRINTK(fmt, args...) pr_warning("xen_mem: " fmt, ##args) /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page, int account) { unsigned long pfn; /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_high++; } else { list_add(PAGE_TO_LIST(page), &ballooned_pages); bs.balloon_low++; } if (account) adjust_managed_page_count(page, -1); pfn = page_to_pfn(page); if (account) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); page_zone(page)->present_pages--; } else { BUG_ON(!PageReserved(page)); WARN_ON_ONCE(phys_to_machine_mapping_valid(pfn)); } } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(int *was_empty) { struct page *page; struct zone *zone; if (list_empty(&ballooned_pages)) return NULL; page = LIST_TO_PAGE(ballooned_pages.next); UNLIST_PAGE(page); BUG_ON(!PageReserved(page)); if (PageHighMem(page)) bs.balloon_high--; else bs.balloon_low--; adjust_managed_page_count(page, 1); zone = page_zone(page); *was_empty |= !populated_zone(zone); zone->present_pages++; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return LIST_TO_PAGE(ballooned_pages.next); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = PAGE_TO_LIST(page)->next; if (next == &ballooned_pages) return NULL; return LIST_TO_PAGE(next); } static inline void balloon_free_page(struct page *page) { #ifndef MODULE if (put_page_testzero(page)) free_hot_cold_page(page, 1); #else /* free_hot_cold_page() is not being exported. */ __free_page(page); #endif } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static DEFINE_TIMER(balloon_timer, balloon_alarm, 0, 0); static unsigned long current_target(void) { unsigned long target = bs.target_pages; if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high)) target = bs.current_pages + bs.balloon_low + bs.balloon_high; return target; } unsigned long balloon_num_physpages(void) { unsigned int nid; unsigned long phys_pages = 0; for_each_online_node(nid) phys_pages += node_present_pages(nid); return phys_pages; } unsigned long balloon_minimum_target(void) { #ifndef CONFIG_XEN #define max_pfn balloon_num_physpages() #endif unsigned long min_pages, curr_pages = current_target(); #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 552 (1/32) * 32768 1320 * 131072 4392 */ if (max_pfn < MB2PAGES(128)) min_pages = MB2PAGES(8) + (max_pfn >> 1); else if (max_pfn < MB2PAGES(512)) min_pages = MB2PAGES(40) + (max_pfn >> 2); else if (max_pfn < MB2PAGES(2048)) min_pages = MB2PAGES(104) + (max_pfn >> 3); else min_pages = MB2PAGES(296) + (max_pfn >> 5); #undef MB2PAGES /* Don't enforce growth */ return min(min_pages, curr_pages); #ifndef CONFIG_XEN #undef max_pfn #endif } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; int need_zonelists_rebuild = 0; struct xen_memory_reservation reservation = { .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page);; page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(&need_zonelists_rebuild); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); init_page_count(page); balloon_free_page(page); } bs.current_pages += rc; out: balloon_unlock(flags); #ifndef MODULE setup_per_zone_wmarks(); if (rc > 0) kswapd_run(0); if (need_zonelists_rebuild) build_all_zonelists(NULL, NULL); else vm_total_pages = nr_free_pagecache_pages(); #endif return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); xen_scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); xen_scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); balloon_append(pfn_to_page(pfn), 1); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); bs.current_pages -= nr_pages; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ static int flag; static void balloon_process(struct work_struct *unused) { int need_sleep = 0; long credit; char buffer[16]; mutex_lock(&balloon_mutex); if(current_target() == bs.current_pages) { mutex_unlock(&balloon_mutex); return; } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - bs.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); /* Schedule more work if there is some still to be done. */ if (current_target() != bs.current_pages || (flag==1)) { #ifdef OPENSUSE_1302 mod_timer(&balloon_timer, jiffies + HZ); #endif sprintf(buffer,"%lu",bs.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ bs.target_pages = max(target, balloon_minimum_target()); flag = bs.target_pages== target ? 0:1; schedule_work(&balloon_worker); } static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) pr_err("Failed to set balloon watcher\n"); return NOTIFY_DONE; } #ifdef CONFIG_PROC_FS static ssize_t balloon_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char memstring[64], *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ if (count > sizeof(memstring)) return -EFBIG; /* too long */ if (copy_from_user(memstring, buffer, count)) return -EFAULT; memstring[sizeof(memstring)-1] = '\0'; target_bytes = memparse(memstring, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static int balloon_show(struct seq_file *m, void *v) { return seq_printf(m, "Current allocation: %8lu kB\n" "Requested target: %8lu kB\n" "Minimum target: %8lu kB\n" "Maximum target: %8lu kB\n" "Low-mem balloon: %8lu kB\n" "High-mem balloon: %8lu kB\n" "Driver pages: %8lu kB\n", PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages), PAGES2KB(balloon_minimum_target()), PAGES2KB(balloon_num_physpages()), PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high), PAGES2KB(bs.driver_pages)); } static int balloon_open(struct inode *inode, struct file *file) { return single_open(file, balloon_show, PDE_DATA(inode)); } static const struct file_operations balloon_fops = { .open = balloon_open, .llseek = seq_lseek, .read = seq_read, .write = balloon_write, .release = single_release }; #endif static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { #if !defined(CONFIG_XEN) # ifndef XENMEM_get_pod_target # define XENMEM_get_pod_target 17 typedef struct xen_pod_target { uint64_t target_pages; uint64_t tot_pages; uint64_t pod_cache_pages; uint64_t pod_entries; domid_t domid; } xen_pod_target_t; # endif xen_pod_target_t pod_target = { .domid = DOMID_SELF }; unsigned long num_physpages = balloon_num_physpages(); int rc; #elif defined(CONFIG_X86) unsigned long pfn; struct page *page; #endif if (!is_running_on_xen()) return -ENODEV; IPRINTK("Initialising balloon driver.\n"); #ifdef CONFIG_XEN bs.current_pages = min(xen_start_info->nr_pages, max_pfn); totalram_pages = bs.current_pages; #else rc = HYPERVISOR_memory_op(XENMEM_get_pod_target, &pod_target); /* * Xen prior to 3.4.0 masks the memory_op command to 4 bits, thus * converting XENMEM_get_pod_target to XENMEM_decrease_reservation. * Fortunately this results in a request with all input fields zero, * but (due to the way bit 4 and upwards get interpreted) a starting * extent of 1. When start_extent > nr_extents (>= in newer Xen), we * simply get start_extent returned. */ bs.current_pages = pod_target.tot_pages + pod_target.pod_entries - pod_target.pod_cache_pages; if (rc || bs.current_pages > num_physpages) bs.current_pages = num_physpages; #endif bs.target_pages = bs.current_pages; bs.balloon_low = 0; bs.balloon_high = 0; bs.driver_pages = 0UL; #ifndef OPENSUSE_1302 init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; #endif #ifdef CONFIG_PROC_FS if (!create_xen_proc_entry("balloon", S_IFREG|S_IRUGO|S_IWUSR, &balloon_fops, NULL)) { WPRINTK("Unable to create /proc/xen/balloon.\n"); return -1; } #endif balloon_sysfs_init(); #if defined(CONFIG_X86) && defined(CONFIG_XEN) /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) { SetPageReserved(page); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(page, 0); } } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void __exit balloon_exit(void) { balloon_sysfs_exit(); /* XXX - release balloon here */ } module_exit(balloon_exit); void balloon_update_driver_allowance(long delta) { unsigned long flags; balloon_lock(flags); bs.driver_pages += delta; balloon_unlock(flags); } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); #if IS_ENABLED(CONFIG_XEN_BACKEND) #ifdef CONFIG_XEN static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long pfn, mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); pfn = __pa(addr) >> PAGE_SHIFT; set_phys_to_machine(pfn, INVALID_P2M_ENTRY); SetPageReserved(pfn_to_page(pfn)); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long flags; void *v; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { balloon_lock(flags); page = balloon_first_page(); if (page && !PageHighMem(page)) { UNLIST_PAGE(page); bs.balloon_low--; balloon_unlock(flags); pagevec[i] = page; continue; } balloon_unlock(flags); page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD); if (page == NULL) goto err; v = page_address(page); xen_scrub_pages(v, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, (unsigned long)v, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_free_page(page); balloon_unlock(flags); goto err; } --bs.current_pages; adjust_managed_page_count(page, -1); page_zone(page)->present_pages--; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i], 0); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; } EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec); #endif /* CONFIG_XEN_BACKEND */ #ifdef CONFIG_XEN static void _free_empty_pages(struct page **pagevec, int nr_pages, bool account) { unsigned long flags; int i; if (pagevec == NULL) return; balloon_lock(flags); for (i = 0; i < nr_pages; i++) { BUG_ON(page_count(pagevec[i]) != 1); balloon_append(pagevec[i], account); } if (account) { bs.current_pages -= nr_pages; totalram_pages = bs.current_pages - totalram_bias; } balloon_unlock(flags); schedule_work(&balloon_worker); } void free_empty_pages(struct page **pagevec, int nr_pages) { _free_empty_pages(pagevec, nr_pages, true); } #endif #if IS_ENABLED(CONFIG_XEN_BACKEND) void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages) { if (pagevec) { _free_empty_pages(pagevec, nr_pages, false); kfree(pagevec); } } EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec); #endif void balloon_release_driver_page(struct page *page) { unsigned long flags; balloon_lock(flags); balloon_append(page, 1); bs.current_pages--; bs.driver_pages--; balloon_unlock(flags); schedule_work(&balloon_worker); } EXPORT_SYMBOL_GPL(balloon_release_driver_page); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-balloon/common.h000066400000000000000000000044471314037446600300710ustar00rootroot00000000000000/****************************************************************************** * balloon/common.h * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_BALLOON_COMMON_H__ #define __XEN_BALLOON_COMMON_H__ #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; extern struct balloon_stats balloon_stats; #define bs balloon_stats int balloon_sysfs_init(void); void balloon_sysfs_exit(void); void balloon_set_new_target(unsigned long target); unsigned long balloon_num_physpages(void); unsigned long balloon_minimum_target(void); #endif /* __XEN_BALLOON_COMMON_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-balloon/scrub.c000066400000000000000000000010521314037446600276770ustar00rootroot00000000000000#include #include #include void xen_scrub_pages(void *v, unsigned int count) { if (likely(cpu_has_xmm2)) { unsigned long n = count * (PAGE_SIZE / sizeof(long) / 4); for (; n--; v += sizeof(long) * 4) asm("movnti %1,(%0)\n\t" "movnti %1,%c2(%0)\n\t" "movnti %1,2*%c2(%0)\n\t" "movnti %1,3*%c2(%0)\n\t" : : "r" (v), "r" (0L), "i" (sizeof(long)) : "memory"); asm volatile("sfence" : : : "memory"); } else for (; count--; v += PAGE_SIZE) clear_page(v); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-balloon/sysfs.c000066400000000000000000000127561314037446600277450ustar00rootroot00000000000000/****************************************************************************** * balloon/sysfs.c * * Xen balloon driver - sysfs interfaces. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "common.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define BALLOON_CLASS_NAME "xen_memory" #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages)); BALLOON_SHOW(min_kb, "%lu\n", PAGES2KB(balloon_minimum_target())); BALLOON_SHOW(max_kb, "%lu\n", PAGES2KB(balloon_num_physpages())); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high)); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages)); static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages)); } static ssize_t store_target_kb(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = simple_strtoull(buf, &endchar, 0) << 10; balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static DEVICE_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static ssize_t show_target(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)balloon_stats.target_pages << PAGE_SHIFT); } static ssize_t store_target(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (count <= 1) return -EBADMSG; /* runt */ target_bytes = memparse(buf, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static DEVICE_ATTR(target, S_IRUGO | S_IWUSR, show_target, store_target); static struct device_attribute *balloon_attrs[] = { &dev_attr_target_kb, &dev_attr_target, }; static struct attribute *balloon_info_attrs[] = { &dev_attr_current_kb.attr, &dev_attr_min_kb.attr, &dev_attr_max_kb.attr, &dev_attr_low_kb.attr, &dev_attr_high_kb.attr, &dev_attr_driver_kb.attr, NULL }; static const struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct bus_type balloon_subsys = { .name = BALLOON_CLASS_NAME, .dev_name = BALLOON_CLASS_NAME, }; static struct device balloon_dev; static int __init register_balloon(struct device *dev) { int i, error; error = subsys_system_register(&balloon_subsys, NULL); if (error) return error; dev->id = 0; dev->bus = &balloon_subsys; error = device_register(dev); if (error) { bus_unregister(&balloon_subsys); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = device_create_file(dev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&dev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) device_remove_file(dev, balloon_attrs[i]); device_unregister(dev); bus_unregister(&balloon_subsys); return error; } static __exit void unregister_balloon(struct device *dev) { int i; sysfs_remove_group(&dev->kobj, &balloon_info_group); for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) device_remove_file(dev, balloon_attrs[i]); device_unregister(dev); bus_unregister(&balloon_subsys); } int __init balloon_sysfs_init(void) { int rc = register_balloon(&balloon_dev); register_xen_selfballooning(&balloon_dev); return rc; } void __exit balloon_sysfs_exit(void) { unregister_balloon(&balloon_dev); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/000077500000000000000000000000001314037446600273665ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/3.12.28/000077500000000000000000000000001314037446600302015ustar00rootroot00000000000000reboot.c000066400000000000000000000171171314037446600315670ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/3.12.28#include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #undef handle_sysrq #endif #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } #ifdef CONFIG_PM_SLEEP static int setup_suspend_evtchn(void); /* Was last suspend request cancelled? */ static int suspend_cancelled; static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; err = set_cpus_allowed_ptr(current, cpumask_of(0)); if (err) { pr_err("Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { pr_err("Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_delayed_work(&shutdown_work, 0); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } #endif static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_delayed_work(&shutdown_work, 0); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(struct work_struct *unused) { struct task_struct *taskp; #ifdef CONFIG_PM_SLEEP if (shutting_down == SHUTDOWN_SUSPEND) taskp = kthread_run(xen_suspend, NULL, "suspend"); else #endif taskp = kthread_run(shutdown_process, NULL, "shutdown"); if (IS_ERR(taskp)) { pr_warning("Error creating shutdown process (%ld): " "retrying...\n", -PTR_ERR(taskp)); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) ctrl_alt_del(); #ifdef CONFIG_PM_SLEEP else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; #endif else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else pr_warning("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key) <= 0) { pr_err("Unable to read sysrq code in control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #ifdef CONFIG_PM_SLEEP static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); pr_info("suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } #else #define setup_suspend_evtchn() 0 #endif static int setup_shutdown_watcher(void) { int err; err = register_xenbus_watch(&sysrq_watch); if (err) { pr_err("Failed to set sysrq watcher\n"); return err; } if (is_initial_xendomain()) return 0; xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { pr_err("Failed to set shutdown watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { pr_err("Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ xenbus_dev.c000066400000000000000000000274151314037446600324410ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/3.12.28/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #ifdef OPENSUSE_1302 #include #endif #include "xenbus_comms.h" #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #include struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[XENSTORE_PAYLOAD_MAX]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; if (!is_xenstored_ready()) return -ENODEV; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static int queue_reply(struct list_head *queue, const void *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return 0; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); if (!rb) return -ENOMEM; rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, queue); return 0; } static void queue_flush(struct xenbus_dev_data *u, struct list_head *queue, int err) { if (!err) { list_splice_tail(queue, &u->read_buffers); wake_up(&u->read_waitq); } else while (!list_empty(queue)) { struct read_buffer *rb = list_entry(queue->next, struct read_buffer, list); list_del(queue->next); kfree(rb); } } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int err, path_len, tok_len, body_len, data_len = 0; LIST_HEAD(queue); path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); err = queue_reply(&queue, &hdr, sizeof(hdr)); if (!err) err = queue_reply(&queue, path, path_len); if (!err) err = queue_reply(&queue, token, tok_len); if (!err && len > 2) err = queue_reply(&queue, vec[2], data_len); queue_flush(adap->dev_data, &queue, err); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply = NULL; LIST_HEAD(queue); char *path, *token; struct watch_adapter *watch; int err, rc = len; if (!is_xenstored_ready()) return -ENODEV; if (len > sizeof(u->u.buffer) - u->len) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: { static const char XS_RESP[] = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { rc = -EILSEQ; goto out; } if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); if (watch == NULL) { rc = -ENOMEM; goto out; } watch->watch.node = kstrdup(path, GFP_KERNEL); watch->watch.callback = watch_fired; watch->token = kstrdup(token, GFP_KERNEL); watch->dev_data = u; err = watch->watch.node && watch->token ? register_xenbus_watch(&watch->watch) : -ENOMEM; if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry(watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = sizeof(XS_RESP); mutex_lock(&u->reply_mutex); err = queue_reply(&queue, &hdr, sizeof(hdr)) ?: queue_reply(&queue, XS_RESP, hdr.len); break; } case XS_TRANSACTION_START: trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } goto common; case XS_TRANSACTION_END: list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; if (&trans->list == &u->transactions) { rc = -ESRCH; goto out; } /* fall through */ common: default: reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { if (msg_type == XS_TRANSACTION_START) kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); err = queue_reply(&queue, &u->u.msg, sizeof(u->u.msg)) ?: queue_reply(&queue, reply, u->u.msg.len); break; } queue_flush(u, &queue, err); mutex_unlock(&u->reply_mutex); kfree(reply); if (err) rc = err; out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; struct read_buffer *rb, *tmp_rb; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { list_del(&rb->list); kfree(rb); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; if (!is_xenstored_ready()) return -ENODEV; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } #ifdef CONFIG_XEN_PRIVILEGED_GUEST static long xenbus_dev_ioctl(struct file *file, unsigned int cmd, unsigned long data) { void __user *udata = (void __user *) data; int ret = -ENOTTY; if (!is_initial_xendomain()) return -ENODEV; switch (cmd) { case IOCTL_XENBUS_ALLOC: { xenbus_alloc_t xa; int old; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_FOREIGN_INIT); if (old != XENBUS_XSD_UNCOMMITTED) return -EBUSY; if (copy_from_user(&xa, udata, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } ret = xenbus_conn(xa.dom, &xa.grant_ref, &xa.port); if (ret != 0) { atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } if (copy_to_user(udata, &xa, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } } break; default: break; } return ret; } #endif static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .llseek = no_llseek, .poll = xenbus_dev_poll, #ifdef CONFIG_XEN_PRIVILEGED_GUEST .unlocked_ioctl = xenbus_dev_ioctl #endif }; #ifndef OPENSUSE_1302 int #ifndef MODULE __init #endif xenbus_dev_init(void) { if (!create_xen_proc_entry("xenbus", S_IFREG|S_IRUSR, &xenbus_dev_file_ops, NULL)) return -ENOMEM; return 0; } #else static struct miscdevice xenbus_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "xen/xenbus", .fops = &xenbus_dev_file_ops, }; int #ifndef MODULE __init #endif xenbus_dev_init(void) { int err; err = misc_register(&xenbus_dev); if (err) pr_err("Could not register xenbus frontend device\n"); return err; } #endifxenbus_xs.c000066400000000000000000000603751314037446600323170ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/3.12.28/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef CONFIG_XEN #include #endif #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif #define XS_LINUX_WEAK_WRITE 128 struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. * * transaction_mutex must be held before incrementing * transaction_count. The mutex is held when a suspend is in * progress to prevent new transactions starting. * * When decrementing transaction_count to zero the wait queue * should be woken up, the suspend code waits for count to * reach zero. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct mutex transaction_mutex; atomic_t transaction_count; wait_queue_head_t transaction_wq; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { pr_warn("xen store gave: unknown error %s\n", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } #ifdef OPENSUSE_1302 static bool xenbus_ok(void) { #if !defined(CONFIG_XEN) && !defined(MODULE) switch (xen_store_domain_type) { case XS_LOCAL: #else if (atomic_read(&xenbus_xsd_state) == XENBUS_XSD_LOCAL_READY) #endif switch (system_state) { case SYSTEM_POWER_OFF: case SYSTEM_RESTART: case SYSTEM_HALT: return false; default: break; } return true; #if !defined(CONFIG_XEN) && !defined(MODULE) case XS_PV: case XS_HVM: /* FIXME: Could check that the remote domain is alive, * but it is normally initial domain. */ return true; default: break; } return false; #endif } #endif static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); #ifdef OPENSUSE_1302 if (xenbus_ok()) /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event_timeout(xs_state.reply_waitq, !list_empty(&xs_state.reply_list), msecs_to_jiffies(500)); else { /* * If we are in the process of being shut-down there is * no point of trying to contact XenBus - it is either * killed (xenstored application) or the other domain * has been killed or is unreachable. */ return ERR_PTR(-EIO); } #else /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); #endif spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } static void transaction_start(void) { mutex_lock(&xs_state.transaction_mutex); atomic_inc(&xs_state.transaction_count); mutex_unlock(&xs_state.transaction_mutex); } static void transaction_end(void) { if (atomic_dec_and_test(&xs_state.transaction_count)) wake_up(&xs_state.transaction_wq); } #if !defined(CONFIG_XEN) || defined(CONFIG_PM_SLEEP) static void transaction_suspend(void) { mutex_lock(&xs_state.transaction_mutex); wait_event(xs_state.transaction_wq, atomic_read(&xs_state.transaction_count) == 0); } static void transaction_resume(void) { mutex_unlock(&xs_state.transaction_mutex); } #endif void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; enum xsd_sockmsg_type type = msg->type; int err; if (type == XS_TRANSACTION_START) transaction_start(); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if ((type == XS_TRANSACTION_END) || ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) transaction_end(); return ret; } #if !defined(CONFIG_XEN) && !defined(MODULE) EXPORT_SYMBOL(xenbus_dev_request_and_reply); #endif /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { pr_warn_ratelimited("unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len) + 1; /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; ret[*num] = strings + len; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; transaction_start(); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { transaction_end(); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); transaction_end(); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *printf_buffer; va_start(ap, fmt); printf_buffer = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap); va_end(ap); if (!printf_buffer) return -ENOMEM; ret = xenbus_write(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } /* * Certain older XenBus toolstack cannot handle reading values that are * not populated. Some Xen 3.4 installation are incapable of doing this * so if we are running on anything older than 4 do not attempt to read * control/platform-feature-xs_reset_watches. */ static inline bool xen_strict_xenbus_quirk(void) { #if !defined(CONFIG_XEN) && defined(CONFIG_X86) uint32_t eax, ebx, ecx, edx, base; base = xen_cpuid_base(); cpuid(base + 1, &eax, &ebx, &ecx, &edx); if ((eax >> 16) < 4) return true; #endif return false; } static void xs_reset_watches(void) { #if defined(CONFIG_PARAVIRT_XEN) || defined(MODULE) int err, supported = 0; #ifdef CONFIG_PARAVIRT_XEN if (!xen_hvm_domain() || xen_initial_domain()) return; #endif if (xen_strict_xenbus_quirk()) return; err = xenbus_scanf(XBT_NIL, "control", "platform-feature-xs_reset_watches", "%d", &supported); if (err != 1 || !supported) return; err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL)); if (err && err != -EEXIST) pr_warn("xs_reset_watches failed: %d\n", err); #endif } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); if (err) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; #if defined(CONFIG_XEN) || defined(MODULE) BUG_ON(watch->flags & XBWF_new_thread); #endif sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) pr_warn("Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Make sure there are no callbacks running currently (unless its us) */ if (current->pid != xenwatch_pid) mutex_lock(&xenwatch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); if (current->pid != xenwatch_pid) mutex_unlock(&xenwatch_mutex); } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); #if !defined(CONFIG_XEN) || defined(CONFIG_PM_SLEEP) void xs_suspend(void) { transaction_suspend(); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; #if !defined(CONFIG_XEN) && !defined(MODULE) xb_init_comms(); #endif mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); transaction_resume(); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); mutex_unlock(&xs_state.transaction_mutex); } #endif #if defined(CONFIG_XEN) || defined(MODULE) static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } #endif static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { #ifdef OPENSUSE_1302 wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); #else wait_event_interruptible(watch_events_waitq, (kgr_task_safe(current), !list_empty(&watch_events))); #endif if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); #if defined(CONFIG_XEN) || defined(MODULE) /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } #else msg->u.watch.handle->callback( msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); mutex_unlock(&xenwatch_mutex); kfree(msg->u.watch.vec); kfree(msg); #endif } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) { kfree(msg); err = -EINVAL; goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) pr_warn("error %d while reading message\n", err); if (kthread_should_stop()) break; } return 0; } int #ifndef MODULE __init #endif xs_init(void) { struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); mutex_init(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); atomic_set(&xs_state.transaction_count, 0); init_waitqueue_head(&xs_state.transaction_wq); task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); /* shutdown watches for kexec boot */ xs_reset_watches(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/3.12.49/000077500000000000000000000000001314037446600302045ustar00rootroot00000000000000reboot.c000066400000000000000000000171521314037446600315710ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/3.12.49#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #undef handle_sysrq #endif #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } #ifdef CONFIG_PM_SLEEP static int setup_suspend_evtchn(void); /* Was last suspend request cancelled? */ static int suspend_cancelled; static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; err = set_cpus_allowed_ptr(current, cpumask_of(0)); if (err) { pr_err("Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { pr_err("Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_delayed_work(&shutdown_work, 0); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } #endif static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_delayed_work(&shutdown_work, 0); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(struct work_struct *unused) { struct task_struct *taskp; #ifdef CONFIG_PM_SLEEP if (shutting_down == SHUTDOWN_SUSPEND) taskp = kthread_run(xen_suspend, NULL, "suspend"); else #endif taskp = kthread_run(shutdown_process, NULL, "shutdown"); if (IS_ERR(taskp)) { pr_warning("Error creating shutdown process (%ld): " "retrying...\n", -PTR_ERR(taskp)); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) ctrl_alt_del(); #ifdef CONFIG_PM_SLEEP else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; #endif else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else pr_warning("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key) <= 0) { pr_err("Unable to read sysrq code in control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #ifdef CONFIG_PM_SLEEP static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); pr_info("suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } #else #define setup_suspend_evtchn() 0 #endif static int setup_shutdown_watcher(void) { int err; err = register_xenbus_watch(&sysrq_watch); if (err) { pr_err("Failed to set sysrq watcher\n"); return err; } if (is_initial_xendomain()) return 0; xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { pr_err("Failed to set shutdown watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { pr_err("Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ xenbus_dev.c000066400000000000000000000267001314037446600324400ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/3.12.49/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #include struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[XENSTORE_PAYLOAD_MAX]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; if (!is_xenstored_ready()) return -ENODEV; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static int queue_reply(struct list_head *queue, const void *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return 0; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); if (!rb) return -ENOMEM; rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, queue); return 0; } static void queue_flush(struct xenbus_dev_data *u, struct list_head *queue, int err) { if (!err) { list_splice_tail(queue, &u->read_buffers); wake_up(&u->read_waitq); } else while (!list_empty(queue)) { struct read_buffer *rb = list_entry(queue->next, struct read_buffer, list); list_del(queue->next); kfree(rb); } } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int err, path_len, tok_len, body_len, data_len = 0; LIST_HEAD(queue); path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); err = queue_reply(&queue, &hdr, sizeof(hdr)); if (!err) err = queue_reply(&queue, path, path_len); if (!err) err = queue_reply(&queue, token, tok_len); if (!err && len > 2) err = queue_reply(&queue, vec[2], data_len); queue_flush(adap->dev_data, &queue, err); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply = NULL; LIST_HEAD(queue); char *path, *token; struct watch_adapter *watch; int err, rc = len; if (!is_xenstored_ready()) return -ENODEV; if (len > sizeof(u->u.buffer) - u->len) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: { static const char XS_RESP[] = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { rc = -EILSEQ; goto out; } if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); if (watch == NULL) { rc = -ENOMEM; goto out; } watch->watch.node = kstrdup(path, GFP_KERNEL); watch->watch.callback = watch_fired; watch->token = kstrdup(token, GFP_KERNEL); watch->dev_data = u; err = watch->watch.node && watch->token ? register_xenbus_watch(&watch->watch) : -ENOMEM; if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry(watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = sizeof(XS_RESP); mutex_lock(&u->reply_mutex); err = queue_reply(&queue, &hdr, sizeof(hdr)) ?: queue_reply(&queue, XS_RESP, hdr.len); break; } case XS_TRANSACTION_START: trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } goto common; case XS_TRANSACTION_END: list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; if (&trans->list == &u->transactions) { rc = -ESRCH; goto out; } /* fall through */ common: default: reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { if (msg_type == XS_TRANSACTION_START) kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { if (u->u.msg.type == XS_ERROR) kfree(trans); else { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } } else if (u->u.msg.type == XS_TRANSACTION_END) { list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); err = queue_reply(&queue, &u->u.msg, sizeof(u->u.msg)) ?: queue_reply(&queue, reply, u->u.msg.len); break; } queue_flush(u, &queue, err); mutex_unlock(&u->reply_mutex); kfree(reply); if (err) rc = err; out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; struct read_buffer *rb, *tmp_rb; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { list_del(&rb->list); kfree(rb); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; if (!is_xenstored_ready()) return -ENODEV; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } #ifdef CONFIG_XEN_PRIVILEGED_GUEST static long xenbus_dev_ioctl(struct file *file, unsigned int cmd, unsigned long data) { void __user *udata = (void __user *) data; int ret = -ENOTTY; if (!is_initial_xendomain()) return -ENODEV; switch (cmd) { case IOCTL_XENBUS_ALLOC: { xenbus_alloc_t xa; int old; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_FOREIGN_INIT); if (old != XENBUS_XSD_UNCOMMITTED) return -EBUSY; if (copy_from_user(&xa, udata, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } ret = xenbus_conn(xa.dom, &xa.grant_ref, &xa.port); if (ret != 0) { atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } if (copy_to_user(udata, &xa, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } } break; default: break; } return ret; } #endif static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .llseek = no_llseek, .poll = xenbus_dev_poll, #ifdef CONFIG_XEN_PRIVILEGED_GUEST .unlocked_ioctl = xenbus_dev_ioctl #endif }; int #ifndef MODULE __init #endif xenbus_dev_init(void) { if (!create_xen_proc_entry("xenbus", S_IFREG|S_IRUSR, &xenbus_dev_file_ops, NULL)) return -ENOMEM; return 0; } xenbus_xs.c000066400000000000000000000560031314037446600323130ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/3.12.49/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef CONFIG_XEN #include #endif #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif #define XS_LINUX_WEAK_WRITE 128 struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. * * transaction_mutex must be held before incrementing * transaction_count. The mutex is held when a suspend is in * progress to prevent new transactions starting. * * When decrementing transaction_count to zero the wait queue * should be woken up, the suspend code waits for count to * reach zero. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct mutex transaction_mutex; atomic_t transaction_count; wait_queue_head_t transaction_wq; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { pr_warn("xen store gave: unknown error %s\n", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } static void transaction_start(void) { mutex_lock(&xs_state.transaction_mutex); atomic_inc(&xs_state.transaction_count); mutex_unlock(&xs_state.transaction_mutex); } static void transaction_end(void) { if (atomic_dec_and_test(&xs_state.transaction_count)) wake_up(&xs_state.transaction_wq); } #if !defined(CONFIG_XEN) || defined(CONFIG_PM_SLEEP) static void transaction_suspend(void) { mutex_lock(&xs_state.transaction_mutex); wait_event(xs_state.transaction_wq, atomic_read(&xs_state.transaction_count) == 0); } static void transaction_resume(void) { mutex_unlock(&xs_state.transaction_mutex); } #endif void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; enum xsd_sockmsg_type type = msg->type; int err; if (type == XS_TRANSACTION_START) transaction_start(); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if ((type == XS_TRANSACTION_END) || ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) transaction_end(); return ret; } #if !defined(CONFIG_XEN) && !defined(MODULE) EXPORT_SYMBOL(xenbus_dev_request_and_reply); #endif /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { pr_warn_ratelimited("unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len) + 1; /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; ret[*num] = strings + len; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; transaction_start(); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { transaction_end(); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); transaction_end(); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *printf_buffer; va_start(ap, fmt); printf_buffer = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap); va_end(ap); if (!printf_buffer) return -ENOMEM; ret = xenbus_write(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } /* * Certain older XenBus toolstack cannot handle reading values that are * not populated. Some Xen 3.4 installation are incapable of doing this * so if we are running on anything older than 4 do not attempt to read * control/platform-feature-xs_reset_watches. */ static inline bool xen_strict_xenbus_quirk(void) { #if !defined(CONFIG_XEN) && defined(CONFIG_X86) uint32_t eax, ebx, ecx, edx, base; base = xen_cpuid_base(); cpuid(base + 1, &eax, &ebx, &ecx, &edx); if ((eax >> 16) < 4) return true; #endif return false; } static void xs_reset_watches(void) { #if defined(CONFIG_PARAVIRT_XEN) || defined(MODULE) int err, supported = 0; #ifdef CONFIG_PARAVIRT_XEN if (!xen_hvm_domain() || xen_initial_domain()) return; #endif if (xen_strict_xenbus_quirk()) return; err = xenbus_scanf(XBT_NIL, "control", "platform-feature-xs_reset_watches", "%d", &supported); if (err != 1 || !supported) return; err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL)); if (err && err != -EEXIST) pr_warn("xs_reset_watches failed: %d\n", err); #endif } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); if (err) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; #if defined(CONFIG_XEN) || defined(MODULE) BUG_ON(watch->flags & XBWF_new_thread); #endif sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) pr_warn("Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Make sure there are no callbacks running currently (unless its us) */ if (current->pid != xenwatch_pid) mutex_lock(&xenwatch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); if (current->pid != xenwatch_pid) mutex_unlock(&xenwatch_mutex); } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); #if !defined(CONFIG_XEN) || defined(CONFIG_PM_SLEEP) void xs_suspend(void) { transaction_suspend(); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; #if !defined(CONFIG_XEN) && !defined(MODULE) xb_init_comms(); #endif mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); transaction_resume(); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); mutex_unlock(&xs_state.transaction_mutex); } #endif #if defined(CONFIG_XEN) || defined(MODULE) static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } #endif static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { wait_event_interruptible(watch_events_waitq, ({ kgr_task_safe(current); !list_empty(&watch_events); })); if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); #if defined(CONFIG_XEN) || defined(MODULE) /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } #else msg->u.watch.handle->callback( msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); mutex_unlock(&xenwatch_mutex); kfree(msg->u.watch.vec); kfree(msg); #endif } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) { kfree(msg); err = -EINVAL; goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) pr_warn("error %d while reading message\n", err); if (kthread_should_stop()) break; } return 0; } int #ifndef MODULE __init #endif xs_init(void) { struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); mutex_init(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); atomic_set(&xs_state.transaction_count, 0); init_waitqueue_head(&xs_state.transaction_wq); task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); /* shutdown watches for kexec boot */ xs_reset_watches(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/Kbuild000066400000000000000000000013171314037446600305250ustar00rootroot00000000000000include $(M)/config.mk obj-m := xen-platform-pci.o EXTRA_CFLAGS += -I$(M)/xen-platform-pci xen-platform-pci-objs := evtchn.o platform-pci.o gnttab.o xen_support.o xen-platform-pci-objs += features.o platform-compat.o xen-platform-pci-objs += reboot.o machine_reboot.o xen-platform-pci-objs += panic-handler.o xen-platform-pci-objs += platform-pci-unplug.o xen-platform-pci-objs += xenbus_comms.o xen-platform-pci-objs += xenbus_xs.o xen-platform-pci-objs += xenbus_probe.o xen-platform-pci-objs += xenbus_dev.o xen-platform-pci-objs += xenbus_client.o xen-platform-pci-objs += xen_proc.o # Can we do better ? ifeq ($(ARCH),ia64) xen-platform-pci-objs += xencomm.o xencomm_arch.o xcom_hcall.o xcom_asm.o endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/Makefile000066400000000000000000000000661314037446600310300ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/evtchn.c000066400000000000000000000213451314037446600310260ustar00rootroot00000000000000/****************************************************************************** * evtchn.c * * A simplified event channel for para-drivers in unmodified linux * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, Intel Corporation * * This file may be distributed separately from the Linux kernel, or * incorporated into other software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif void *shared_info_area; #define is_valid_evtchn(x) ((x) != 0) #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn) static struct { spinlock_t lock; irq_handler_t handler; void *dev_id; int evtchn; int close:1; /* close on unbind_from_irqhandler()? */ int inuse:1; int in_handler:1; } irq_evtchn[256]; static int evtchn_to_irq[NR_EVENT_CHANNELS] = { [0 ... NR_EVENT_CHANNELS-1] = -1 }; static DEFINE_SPINLOCK(irq_alloc_lock); static int alloc_xen_irq(void) { static int warned; int irq; spin_lock(&irq_alloc_lock); for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) { if (irq_evtchn[irq].inuse) continue; irq_evtchn[irq].inuse = 1; spin_unlock(&irq_alloc_lock); return irq; } if (!warned) { warned = 1; printk(KERN_WARNING "No available IRQ to bind to: " "increase irq_evtchn[] size in evtchn.c.\n"); } spin_unlock(&irq_alloc_lock); return -ENOSPC; } static void free_xen_irq(int irq) { spin_lock(&irq_alloc_lock); irq_evtchn[irq].inuse = 0; spin_unlock(&irq_alloc_lock); } int irq_to_evtchn_port(int irq) { return irq_evtchn[irq].evtchn; } EXPORT_SYMBOL(irq_to_evtchn_port); void mask_evtchn(int port) { shared_info_t *s = shared_info_area; synch_set_bit(port, &s->evtchn_mask[0]); } EXPORT_SYMBOL(mask_evtchn); void unmask_evtchn(int port) { evtchn_unmask_t op = { .port = port }; VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op)); } EXPORT_SYMBOL(unmask_evtchn); int bind_listening_port_to_irqhandler( unsigned int remote_domain, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { struct evtchn_alloc_unbound alloc_unbound; int err, irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_domain; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) { spin_unlock_irq(&irq_evtchn[irq].lock); free_xen_irq(irq); return err; } irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = alloc_unbound.port; irq_evtchn[irq].close = 1; evtchn_to_irq[alloc_unbound.port] = irq; unmask_evtchn(alloc_unbound.port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_listening_port_to_irqhandler); int bind_caller_port_to_irqhandler( unsigned int caller_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq; irq = alloc_xen_irq(); if (irq < 0) return irq; spin_lock_irq(&irq_evtchn[irq].lock); irq_evtchn[irq].handler = handler; irq_evtchn[irq].dev_id = dev_id; irq_evtchn[irq].evtchn = caller_port; irq_evtchn[irq].close = 0; evtchn_to_irq[caller_port] = irq; unmask_evtchn(caller_port); spin_unlock_irq(&irq_evtchn[irq].lock); return irq; } EXPORT_SYMBOL(bind_caller_port_to_irqhandler); void unbind_from_irqhandler(unsigned int irq, void *dev_id) { int evtchn; spin_lock_irq(&irq_evtchn[irq].lock); evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) { evtchn_to_irq[evtchn] = -1; mask_evtchn(evtchn); if (irq_evtchn[irq].close) { struct evtchn_close close = { .port = evtchn }; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close)) BUG(); } } irq_evtchn[irq].handler = NULL; irq_evtchn[irq].evtchn = 0; spin_unlock_irq(&irq_evtchn[irq].lock); while (irq_evtchn[irq].in_handler) cpu_relax(); free_xen_irq(irq); } EXPORT_SYMBOL(unbind_from_irqhandler); void notify_remote_via_irq(int irq) { int evtchn; evtchn = evtchn_from_irq(irq); if (is_valid_evtchn(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL(notify_remote_via_irq); static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 }; static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 }; static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh, unsigned int idx) { return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]); } static irqreturn_t evtchn_interrupt(int irq, void *dev_id #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) , struct pt_regs *regs #else # define handler(irq, dev_id, regs) handler(irq, dev_id) #endif ) { unsigned int l1i, l2i, port; unsigned long masked_l1, masked_l2; /* XXX: All events are bound to vcpu0 but irq may be redirected. */ int cpu = 0; /*smp_processor_id();*/ irq_handler_t handler; shared_info_t *s = shared_info_area; vcpu_info_t *v = &s->vcpu_info[cpu]; unsigned long l1, l2; v->evtchn_upcall_pending = 0; #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ /* Clear master flag /before/ clearing selector flag. */ wmb(); #endif l1 = xchg(&v->evtchn_pending_sel, 0); l1i = per_cpu(last_processed_l1i, cpu); l2i = per_cpu(last_processed_l2i, cpu); while (l1 != 0) { l1i = (l1i + 1) % BITS_PER_LONG; masked_l1 = l1 & ((~0UL) << l1i); if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */ l1i = BITS_PER_LONG - 1; l2i = BITS_PER_LONG - 1; continue; } l1i = __ffs(masked_l1); do { l2 = active_evtchns(cpu, s, l1i); l2i = (l2i + 1) % BITS_PER_LONG; masked_l2 = l2 & ((~0UL) << l2i); if (masked_l2 == 0) { /* if we masked out all events, move on */ l2i = BITS_PER_LONG - 1; break; } l2i = __ffs(masked_l2); /* process port */ port = (l1i * BITS_PER_LONG) + l2i; synch_clear_bit(port, &s->evtchn_pending[0]); irq = evtchn_to_irq[port]; if (irq < 0) continue; spin_lock(&irq_evtchn[irq].lock); handler = irq_evtchn[irq].handler; dev_id = irq_evtchn[irq].dev_id; if (unlikely(handler == NULL)) { printk("Xen IRQ%d (port %d) has no handler!\n", irq, port); spin_unlock(&irq_evtchn[irq].lock); continue; } irq_evtchn[irq].in_handler = 1; spin_unlock(&irq_evtchn[irq].lock); local_irq_enable(); handler(irq, irq_evtchn[irq].dev_id, regs); local_irq_disable(); spin_lock(&irq_evtchn[irq].lock); irq_evtchn[irq].in_handler = 0; spin_unlock(&irq_evtchn[irq].lock); /* if this is the final port processed, we'll pick up here+1 next time */ per_cpu(last_processed_l1i, cpu) = l1i; per_cpu(last_processed_l2i, cpu) = l2i; } while (l2i != BITS_PER_LONG - 1); l2 = active_evtchns(cpu, s, l1i); if (l2 == 0) /* we handled all ports, so we can clear the selector bit */ l1 &= ~(1UL << l1i); } return IRQ_HANDLED; } void irq_resume(void) { int evtchn, irq; for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) { mask_evtchn(evtchn); evtchn_to_irq[evtchn] = -1; } for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) irq_evtchn[irq].evtchn = 0; } int xen_irq_init(struct pci_dev *pdev) { int irq; for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++) spin_lock_init(&irq_evtchn[irq].lock); return request_irq(pdev->irq, evtchn_interrupt, #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT, #else IRQF_SHARED | #ifdef IRQF_SAMPLE_RANDOM IRQF_SAMPLE_RANDOM | #endif IRQF_DISABLED, #endif "xen-platform-pci", pdev); }UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/features.c000066400000000000000000000016041314037446600313510ustar00rootroot00000000000000/****************************************************************************** * features.c * * Xen feature flags. * * Copyright (c) 2006, Ian Campbell, XenSource Inc. */ #include #include #include #ifdef CONFIG_PARAVIRT_XEN #include #else #include #endif #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #include #include #include u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly; EXPORT_SYMBOL(xen_features); void xen_setup_features(void) { struct xen_feature_info fi; int i, j; for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) { fi.submap_idx = i; if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j = 0; j < 32; j++) xen_features[i * 32 + j] = !!(fi.submap & 1< #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* External tools reserve first few grant table entries. */ #define NR_RESERVED_ENTRIES 8 #define GNTTAB_LIST_END 0xffffffff #define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_v1_t)) static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; static unsigned int boot_max_nr_grant_frames; static int gnttab_free_count; static grant_ref_t gnttab_free_head; static DEFINE_SPINLOCK(gnttab_list_lock); static struct grant_entry_v1 *shared; static struct gnttab_free_callback *gnttab_free_callback_list; static int gnttab_expand(unsigned int req_entries); #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP]) #define nr_freelist_frames(grant_frames) \ (((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP) static int get_free_entries(int count) { unsigned long flags; int ref, rc; grant_ref_t head; spin_lock_irqsave(&gnttab_list_lock, flags); if ((gnttab_free_count < count) && ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { spin_unlock_irqrestore(&gnttab_list_lock, flags); return rc; } ref = head = gnttab_free_head; gnttab_free_count -= count; while (count-- > 1) head = gnttab_entry(head); gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } #define get_free_entry() get_free_entries(1) static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; callback = gnttab_free_callback_list; gnttab_free_callback_list = NULL; while (callback != NULL) { next = callback->next; if (gnttab_free_count >= callback->count) { callback->next = NULL; callback->queued = 0; callback->fn(callback->arg); } else { callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; } callback = next; } } static inline void check_free_callbacks(void) { if (unlikely(gnttab_free_callback_list)) do_free_callbacks(); } static void put_free_entry(grant_ref_t ref) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; gnttab_free_count++; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } /* * Public grant-issuing interface functions */ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int flags) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int flags) { shared[ref].frame = frame; shared[ref].domid = domid; wmb(); BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing)); shared[ref].flags = GTF_permit_access | flags; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); int gnttab_query_foreign_access(grant_ref_t ref) { u16 nflags; nflags = shared[ref].flags; return (nflags & (GTF_reading|GTF_writing)); } EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref) { u16 flags, nflags; nflags = shared[ref].flags; do { if ((flags = nflags) & (GTF_reading|GTF_writing)) return 0; } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags); return 1; } int gnttab_end_foreign_access_ref(grant_ref_t ref) { if (_gnttab_end_foreign_access_ref(ref)) return 1; printk(KERN_DEBUG "WARNING: g.e. %#x still in use!\n", ref); return 0; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); struct deferred_entry { struct list_head list; grant_ref_t ref; uint16_t warn_delay; struct page *page; }; static LIST_HEAD(deferred_list); static void gnttab_handle_deferred(unsigned long); static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0); static void gnttab_handle_deferred(unsigned long unused) { unsigned int nr = 10; struct deferred_entry *first = NULL; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); while (nr--) { struct deferred_entry *entry = list_first_entry(&deferred_list, struct deferred_entry, list); if (entry == first) break; list_del(&entry->list); spin_unlock_irqrestore(&gnttab_list_lock, flags); if (_gnttab_end_foreign_access_ref(entry->ref)) { put_free_entry(entry->ref); if (entry->page) { printk(KERN_DEBUG "freeing g.e. %#x (pfn %#lx)\n", entry->ref, page_to_pfn(entry->page)); __free_page(entry->page); } else printk(KERN_DEBUG "freeing g.e. %#x\n", entry->ref); kfree(entry); entry = NULL; } else { if (!--entry->warn_delay) pr_info("g.e. %#x still pending\n", entry->ref); if (!first) first = entry; } spin_lock_irqsave(&gnttab_list_lock, flags); if (entry) list_add_tail(&entry->list, &deferred_list); else if (list_empty(&deferred_list)) break; } if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) { deferred_timer.expires = jiffies + HZ; add_timer(&deferred_timer); } spin_unlock_irqrestore(&gnttab_list_lock, flags); } static void gnttab_add_deferred(grant_ref_t ref, struct page *page) { struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); const char *what = KERN_WARNING "leaking"; if (entry) { unsigned long flags; entry->ref = ref; entry->page = page; entry->warn_delay = 60; spin_lock_irqsave(&gnttab_list_lock, flags); list_add_tail(&entry->list, &deferred_list); if (!timer_pending(&deferred_timer)) { deferred_timer.expires = jiffies + HZ; add_timer(&deferred_timer); } spin_unlock_irqrestore(&gnttab_list_lock, flags); what = KERN_DEBUG "deferring"; } printk("%s g.e. %#x (pfn %lx)\n", what, ref, page ? page_to_pfn(page) : -1); } void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page) { if (gnttab_end_foreign_access_ref(ref)) { put_free_entry(ref); if (page != 0) free_page(page); } else gnttab_add_deferred(ref, page ? virt_to_page(page) : NULL); } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); void gnttab_multi_end_foreign_access(unsigned int nr, grant_ref_t refs[], struct page *pages[]) { for (; nr--; ++refs) { if (*refs != GRANT_INVALID_REF) { if (gnttab_end_foreign_access_ref(*refs)) put_free_entry(*refs); else if (pages) { gnttab_add_deferred(*refs, *pages); *pages = NULL; } else gnttab_add_deferred(*refs, NULL); *refs = GRANT_INVALID_REF; } if (pages) { if (*pages) { __free_page(*pages); *pages = NULL; } ++pages; } } } EXPORT_SYMBOL_GPL(gnttab_multi_end_foreign_access); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) { int ref; if (unlikely((ref = get_free_entry()) < 0)) return -ENOSPC; gnttab_grant_foreign_transfer_ref(ref, domid, pfn); return ref; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, unsigned long pfn) { shared[ref].frame = pfn; shared[ref].domid = domid; wmb(); shared[ref].flags = GTF_accept_transfer; } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) { unsigned long frame; u16 flags; /* * If a transfer is not even yet started, try to reclaim the grant * reference and return failure (== 0). */ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags) return 0; cpu_relax(); } /* If a transfer is in progress then wait until it is completed. */ while (!(flags & GTF_transfer_completed)) { flags = shared[ref].flags; cpu_relax(); } /* Read the frame number /after/ reading completion status. */ rmb(); frame = shared[ref].frame; BUG_ON(frame == 0); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) { unsigned long frame = gnttab_end_foreign_transfer_ref(ref); put_free_entry(ref); return frame; } EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); void gnttab_free_grant_reference(grant_ref_t ref) { put_free_entry(ref); } EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; unsigned long flags; int count = 1; if (head == GNTTAB_LIST_END) return; spin_lock_irqsave(&gnttab_list_lock, flags); ref = head; while (gnttab_entry(ref) != GNTTAB_LIST_END) { ref = gnttab_entry(ref); count++; } gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = head; gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_references); int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) { int h = get_free_entries(count); if (h < 0) return -ENOSPC; *head = h; return 0; } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); } EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); int gnttab_claim_grant_reference(grant_ref_t *private_head) { grant_ref_t g = *private_head; if (unlikely(g == GNTTAB_LIST_END)) return -ENOSPC; *private_head = gnttab_entry(g); return g; } EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release) { gnttab_entry(release) = *private_head; *private_head = release; } EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count) { unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); if (callback->queued) goto out; callback->fn = fn; callback->arg = arg; callback->count = count; callback->queued = 1; callback->next = gnttab_free_callback_list; gnttab_free_callback_list = callback; check_free_callbacks(); out: spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_request_free_callback); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) { struct gnttab_free_callback **pcb; unsigned long flags; spin_lock_irqsave(&gnttab_list_lock, flags); for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { if (*pcb == callback) { *pcb = callback->next; callback->queued = 0; break; } } spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); static int grow_gnttab_list(unsigned int more_frames) { unsigned int new_nr_grant_frames, extra_entries, i; unsigned int nr_glist_frames, new_nr_glist_frames; new_nr_grant_frames = nr_grant_frames + more_frames; extra_entries = more_frames * ENTRIES_PER_GRANT_FRAME; nr_glist_frames = nr_freelist_frames(nr_grant_frames); new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames); for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); if (!gnttab_list[i]) goto grow_nomem; } for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(i) = gnttab_free_head; gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames; gnttab_free_count += extra_entries; nr_grant_frames = new_nr_grant_frames; check_free_callbacks(); return 0; grow_nomem: for ( ; i >= nr_glist_frames; i--) free_page((unsigned long) gnttab_list[i]); return -ENOMEM; } static unsigned int __max_nr_grant_frames(void) { struct gnttab_query_size query; int rc; query.dom = DOMID_SELF; rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); if ((rc < 0) || (query.status != GNTST_okay)) return 4; /* Legacy max supported number of frames */ return query.max_nr_frames; } static inline unsigned int max_nr_grant_frames(void) { unsigned int xen_max = __max_nr_grant_frames(); if (xen_max > boot_max_nr_grant_frames) return boot_max_nr_grant_frames; return xen_max; } #ifdef CONFIG_XEN #ifdef CONFIG_X86 static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } #ifdef CONFIG_PM_SLEEP static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } #endif void *arch_gnttab_alloc_shared(xen_pfn_t *frames) { struct vm_struct *area; area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames(), NULL); BUG_ON(area == NULL); return area->addr; } #endif /* CONFIG_X86 */ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; xen_pfn_t *frames; unsigned int nr_gframes = end_idx + 1; int rc; frames = kmalloc(nr_gframes * sizeof(*frames), GFP_ATOMIC); if (!frames) return -ENOMEM; setup.dom = DOMID_SELF; setup.nr_frames = nr_gframes; set_xen_guest_handle(setup.frame_list, frames); rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); if (rc == -ENOSYS) { kfree(frames); return -ENOSYS; } BUG_ON(rc || setup.status != GNTST_okay); if (shared == NULL) shared = arch_gnttab_alloc_shared(frames); #ifdef CONFIG_X86 rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); BUG_ON(rc); frames -= nr_gframes; /* adjust after map_pte_fn() */ #endif /* CONFIG_X86 */ kfree(frames); return 0; } #if IS_ENABLED(CONFIG_XEN_BACKEND) static DEFINE_SEQLOCK(gnttab_dma_lock); static void gnttab_page_free(struct page *page, unsigned int order) { BUG_ON(order); ClearPageForeign(page); gnttab_reset_grant_page(page); ClearPageReserved(page); put_page(page); } /* * Must not be called with IRQs off. This should only be used on the * slow path. * * Copy a foreign granted page to local memory. */ int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep) { struct gnttab_unmap_and_replace unmap; struct page *page, *new_page; void *addr, *new_addr; unsigned long pfn; maddr_t new_mfn; int err; page = *pagep; if (!get_page_unless_zero(page)) return -ENOENT; err = -ENOMEM; new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!new_page) goto out; new_addr = page_address(new_page); addr = page_address(page); copy_page(new_addr, addr); pfn = page_to_pfn(page); new_mfn = virt_to_mfn(new_addr); write_seqlock_bh(&gnttab_dma_lock); /* Make seq visible before checking page_mapped. */ smp_mb(); /* Has the page been DMA-mapped? */ if (unlikely(page_mapped(page))) { write_sequnlock_bh(&gnttab_dma_lock); put_page(new_page); err = -EBUSY; goto out; } gnttab_set_replace_op(&unmap, (unsigned long)addr, (unsigned long)new_addr, ref); if (!xen_feature(XENFEAT_auto_translated_physmap)) { multicall_entry_t mc[2]; mmu_update_t mmu; set_phys_to_machine(pfn, new_mfn); set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY); MULTI_grant_table_op(&mc[0], GNTTABOP_unmap_and_replace, &unmap, 1); mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu.val = pfn; MULTI_mmu_update(&mc[1], &mmu, 1, NULL, DOMID_SELF); err = HYPERVISOR_multicall_check(mc, 2, NULL); } else err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace, &unmap, 1); BUG_ON(err); BUG_ON(unmap.status != GNTST_okay); write_sequnlock_bh(&gnttab_dma_lock); new_page->mapping = page->mapping; new_page->index = page->index; set_bit(PG_foreign, &new_page->flags); if (PageReserved(page)) SetPageReserved(new_page); *pagep = new_page; SetPageForeign(page, gnttab_page_free); page->mapping = NULL; out: put_page(page); return err; } EXPORT_SYMBOL_GPL(gnttab_copy_grant_page); void gnttab_reset_grant_page(struct page *page) { init_page_count(page); page_mapcount_reset(page); } EXPORT_SYMBOL_GPL(gnttab_reset_grant_page); /* * Keep track of foreign pages marked as PageForeign so that we don't * return them to the remote domain prematurely. * * PageForeign pages are pinned down by increasing their mapcount. * * All other pages are simply returned as is. */ void __gnttab_dma_map_page(struct page *page) { unsigned int seq; if (!is_running_on_xen() || !PageForeign(page)) return; BUG_ON(PageCompound(page)); do { seq = read_seqbegin(&gnttab_dma_lock); if (gnttab_dma_local_pfn(page)) break; atomic_set(&page->_mapcount, 0); /* Make _mapcount visible before read_seqretry. */ smp_mb(); } while (unlikely(read_seqretry(&gnttab_dma_lock, seq))); } #endif /* CONFIG_XEN_BACKEND */ #ifdef __HAVE_ARCH_PTE_SPECIAL static unsigned int GNTMAP_pte_special; bool gnttab_pre_map_adjust(unsigned int cmd, struct gnttab_map_grant_ref *map, unsigned int count) { unsigned int i; if (unlikely(cmd != GNTTABOP_map_grant_ref)) count = 0; for (i = 0; i < count; ++i, ++map) { if (!(map->flags & GNTMAP_host_map) || !(map->flags & GNTMAP_application_map)) continue; if (GNTMAP_pte_special) map->flags |= GNTMAP_pte_special; else { BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); return true; } } return false; } EXPORT_SYMBOL(gnttab_pre_map_adjust); #if CONFIG_XEN_COMPAT < 0x030400 int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *map, unsigned int count) { unsigned int i; int rc = 0; for (i = 0; i < count && rc == 0; ++i, ++map) { pte_t pte; if (!(map->flags & GNTMAP_host_map) || !(map->flags & GNTMAP_application_map)) continue; #ifdef CONFIG_X86 pte = __pte_ma((map->dev_bus_addr | _PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NX | _PAGE_SPECIAL) & __supported_pte_mask); #else #error Architecture not yet supported. #endif if (!(map->flags & GNTMAP_readonly)) pte = pte_mkwrite(pte); if (map->flags & GNTMAP_contains_pte) { mmu_update_t u; u.ptr = map->host_addr; u.val = __pte_val(pte); rc = HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF); } else rc = HYPERVISOR_update_va_mapping(map->host_addr, pte, 0); } return rc; } EXPORT_SYMBOL(gnttab_post_map_adjust); #endif #endif /* __HAVE_ARCH_PTE_SPECIAL */ int gnttab_resume(void) { if (max_nr_grant_frames() < nr_grant_frames) return 0; return gnttab_map(0, nr_grant_frames - 1); } #ifdef CONFIG_PM_SLEEP #include #ifdef CONFIG_X86 static int gnttab_suspend(void) { apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_grant_frames, unmap_pte_fn, NULL); return 0; } #else #define gnttab_suspend NULL #endif static void _gnttab_resume(void) { if (gnttab_resume()) BUG(); } static struct syscore_ops gnttab_syscore_ops = { .resume = _gnttab_resume, .suspend = gnttab_suspend, }; #endif #else /* !CONFIG_XEN */ #include static unsigned long resume_frames; static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct xen_add_to_physmap xatp; unsigned int i = end_idx; /* Loop backwards, so that the first hypercall has the largest index, * ensuring that the table will grow only once. */ do { xatp.domid = DOMID_SELF; xatp.idx = i; xatp.space = XENMAPSPACE_grant_table; xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); } while (i-- > start_idx); return 0; } int gnttab_resume(void) { unsigned int max_nr_gframes, nr_gframes; nr_gframes = nr_grant_frames; max_nr_gframes = max_nr_grant_frames(); if (max_nr_gframes < nr_gframes) return -ENOSYS; if (!resume_frames) { resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes); if (shared == NULL) { pr_warning("error to ioremap gnttab share frames\n"); return -1; } } gnttab_map(0, nr_gframes - 1); return 0; } #endif /* !CONFIG_XEN */ static int gnttab_expand(unsigned int req_entries) { int rc; unsigned int cur, extra; cur = nr_grant_frames; extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) / ENTRIES_PER_GRANT_FRAME); if (cur + extra > max_nr_grant_frames()) return -ENOSPC; if ((rc = gnttab_map(cur, cur + extra - 1)) == 0) rc = grow_gnttab_list(extra); return rc; } #ifdef CONFIG_XEN static int __init #else int #endif gnttab_init(void) { int i, ret; unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int nr_init_grefs; if (!is_running_on_xen()) return -ENODEV; nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; nr_glist_frames = nr_freelist_frames(nr_grant_frames); for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) { ret = -ENOMEM; goto ini_nomem; } } if (gnttab_resume() < 0) { ret = -ENODEV; goto ini_nomem; } nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; #if defined(CONFIG_XEN) && defined(__HAVE_ARCH_PTE_SPECIAL) if (!xen_feature(XENFEAT_auto_translated_physmap) && xen_feature(XENFEAT_gnttab_map_avail_bits)) { #ifdef CONFIG_X86 GNTMAP_pte_special = (__pte_val(pte_mkspecial(__pte_ma(0))) >> _PAGE_BIT_SPECIAL) << _GNTMAP_guest_avail0; #else #error Architecture not yet supported. #endif } #endif #if defined(CONFIG_XEN) && defined(CONFIG_PM_SLEEP) if (!is_initial_xendomain()) register_syscore_ops(&gnttab_syscore_ops); #endif return 0; ini_nomem: for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); return ret; } #ifdef CONFIG_XEN core_initcall(gnttab_init); #endif machine_reboot.c000066400000000000000000000042541314037446600324360ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci#include #include #include #include #include #include "platform-pci.h" #include struct ap_suspend_info { int do_spin; atomic_t nr_spinning; }; #ifdef CONFIG_SMP /* * Spinning prevents, for example, APs touching grant table entries while * the shared grant table is not mapped into the address space imemdiately * after resume. */ static void ap_suspend(void *_info) { struct ap_suspend_info *info = _info; BUG_ON(!irqs_disabled()); atomic_inc(&info->nr_spinning); mb(); while (info->do_spin) cpu_relax(); mb(); atomic_dec(&info->nr_spinning); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0, 0) #else #define initiate_ap_suspend(i) smp_call_function(ap_suspend, i, 0) #endif #else /* !defined(CONFIG_SMP) */ #define initiate_ap_suspend(i) 0 #endif static int bp_suspend(void) { int suspend_cancelled; BUG_ON(!irqs_disabled()); suspend_cancelled = HYPERVISOR_suspend(0); if (!suspend_cancelled) { platform_pci_resume(); gnttab_resume(); irq_resume(); } return suspend_cancelled; } int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)) { int err, suspend_cancelled, nr_cpus; struct ap_suspend_info info; xenbus_suspend(); preempt_disable(); /* Prevent any races with evtchn_interrupt() handler. */ disable_irq(xen_platform_pdev->irq); info.do_spin = 1; atomic_set(&info.nr_spinning, 0); smp_mb(); nr_cpus = num_online_cpus() - 1; err = initiate_ap_suspend(&info); if (err < 0) { preempt_enable(); xenbus_suspend_cancel(); return err; } while (atomic_read(&info.nr_spinning) != nr_cpus) cpu_relax(); local_irq_disable(); suspend_cancelled = bp_suspend(); resume_notifier(suspend_cancelled); local_irq_enable(); smp_mb(); info.do_spin = 0; while (atomic_read(&info.nr_spinning) != 0) cpu_relax(); enable_irq(xen_platform_pdev->irq); preempt_enable(); if (!suspend_cancelled) xenbus_resume(); else xenbus_suspend_cancel(); /* update uvp flags */ write_driver_resume_flag(); write_feature_flag(); write_monitor_service_flag(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/panic-handler.c000066400000000000000000000016521314037446600322430ustar00rootroot00000000000000#include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif MODULE_LICENSE("GPL"); #ifdef __ia64__ static void xen_panic_hypercall(struct unw_frame_info *info, void *arg) { current->thread.ksp = (__u64)info->sw - 16; HYPERVISOR_shutdown(SHUTDOWN_crash); /* we're never actually going to get here... */ } #endif static int xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { #ifdef __ia64__ unw_init_running(xen_panic_hypercall, NULL); #else /* !__ia64__ */ HYPERVISOR_shutdown(SHUTDOWN_crash); #endif /* we're never actually going to get here... */ return NOTIFY_DONE; } static struct notifier_block xen_panic_block = { .notifier_call = xen_panic_event }; int xen_panic_handler_init(void) { atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); return 0; } platform-compat.c000066400000000000000000000067611314037446600325720ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci#include #include #include #include #include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) static int system_state = 1; EXPORT_SYMBOL(system_state); #endif void ctrl_alt_del(void) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) kill_proc(1, SIGINT, 1); /* interrupt init */ #else kill_cad_pid(SIGINT, 1); #endif } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) size_t strcspn(const char *s, const char *reject) { const char *p; const char *r; size_t count = 0; for (p = s; *p != '\0'; ++p) { for (r = reject; *r != '\0'; ++r) { if (*p == *r) return count; } ++count; } return count; } EXPORT_SYMBOL(strcspn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(void * vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; } EXPORT_SYMBOL(wait_for_completion_timeout); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) /* fake do_exit using complete_and_exit */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) asmlinkage NORET_TYPE void do_exit(long code) #else fastcall NORET_TYPE void do_exit(long code) #endif { complete_and_exit(NULL, code); } EXPORT_SYMBOL_GPL(do_exit); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) signed long schedule_timeout_interruptible(signed long timeout) { __set_current_state(TASK_INTERRUPTIBLE); return schedule_timeout(timeout); } EXPORT_SYMBOL(schedule_timeout_interruptible); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. */ void *kzalloc(size_t size, int flags) { void *ret = kmalloc(size, flags); if (ret) memset(ret, 0, size); return ret; } EXPORT_SYMBOL(kzalloc); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) /* Simplified asprintf. */ char *kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; unsigned int len; char *p, dummy[1]; va_start(ap, fmt); len = vsnprintf(dummy, 0, fmt, ap); va_end(ap); p = kmalloc(len + 1, gfp); if (!p) return NULL; va_start(ap, fmt); vsprintf(p, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL(kasprintf); #endif platform-pci-unplug.c000066400000000000000000000117421314037446600333650ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/****************************************************************************** * platform-pci-unplug.c * * Xen platform PCI device driver * Copyright (c) 2010, Citrix * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include /****************************************************************************** * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0xffff #define XEN_IOPORT_LINUX_DRVVER ((LINUX_VERSION_CODE << 8) + 0x0) #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define UNPLUG_ALL_IDE_DISKS 1 #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 #define UNPLUG_ALL 7 #define UNPLUG_IGNORE 8 int xen_platform_pci; EXPORT_SYMBOL_GPL(xen_platform_pci); static int xen_emul_unplug; void xen_unplug_emulated_devices(void) { /* If the version matches enable the Xen platform PCI driver. * Also enable the Xen platform PCI driver if the version is really old * and the user told us to ignore it. */ xen_platform_pci = 1; xen_emul_unplug |= UNPLUG_ALL_NICS; xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; /* Set the default value of xen_emul_unplug depending on whether or * not the Xen PV frontends and the Xen platform PCI driver have * been compiled for this kernel (modules or built-in are both OK). */ if (xen_platform_pci && !xen_emul_unplug) { #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Netfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated ICs.\n"); xen_emul_unplug |= UNPLUG_ALL_NICS; #endif #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) printk(KERN_INFO "Blkfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated disks.\n" "You might have to change the root device\n" "from /dev/hd[a-d] to /dev/xvd[a-d]\n" "in your root= kernel command line option\n"); xen_emul_unplug |= UNPLUG_ALL_IDE_DISKS; #endif } /* Now unplug the emulated devices */ if (xen_platform_pci && !(xen_emul_unplug & UNPLUG_IGNORE)) outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/platform-pci.c000066400000000000000000000310031314037446600321240ustar00rootroot00000000000000/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __ia64__ #include #endif #include "platform-pci.h" #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define DRV_NAME "xen-platform-pci" #define DRV_VERSION "0.10" #define DRV_RELDATE "03/03/2005" static int max_hypercall_stub_pages, nr_hypercall_stub_pages; char *hypercall_stubs; EXPORT_SYMBOL(hypercall_stubs); MODULE_AUTHOR("ssmith@xensource.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); /* NB. [aux-]ide-disks options do not unplug IDE CD-ROM drives. */ /* NB. aux-ide-disks is equiv to ide-disks except ignores primary master. */ static char *dev_unplug; module_param(dev_unplug, charp, 0644); MODULE_PARM_DESC(dev_unplug, "Emulated devices to unplug: " "[all,][ide-disks,][aux-ide-disks,][nics,][never] (default is 'all')\n"); struct pci_dev *xen_platform_pdev; static unsigned long shared_info_frame; static uint64_t callback_via; /* driver should write xenstore flag to tell xen which feature supported */ void write_feature_flag() { int rc; char str[32] = {0}; (void)snprintf(str, sizeof(str), "%d", SUPPORTED_FEATURE); rc = xenbus_write(XBT_NIL, "control/uvp", "feature_flag", str); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/feature_flag failed \n"); } } /*2012-7-9 write monitor-service-flag is true*/ void write_monitor_service_flag() { int rc; rc = xenbus_write(XBT_NIL, "control/uvp", "monitor-service-flag", "true"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/monitor-service-flag failed \n"); } } void write_driver_resume_flag() { int rc = 0; rc = xenbus_write(XBT_NIL, "control/uvp", "driver-resume-flag", "1"); if (rc) { printk(KERN_INFO DRV_NAME "write control/uvp/driver-resume-flag failed \n"); } } static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; extern void *shared_info_area; #ifdef __ia64__ xencomm_initialize(); #endif setup_xen_features(); shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); shared_info_area = ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); return 0; } static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } #ifndef __ia64__ /*#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) static uint32_t xen_cpuid_base(void) { uint32_t base, eax, ebx, ecx, edx; char signature[13]; for (base = 0x40000000; base < 0x40010000; base += 0x100) { cpuid(base, &eax, &ebx, &ecx, &edx); *(uint32_t*)(signature + 0) = ebx; *(uint32_t*)(signature + 4) = ecx; *(uint32_t*)(signature + 8) = edx; signature[12] = 0; if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2)) return base; } return 0; } #endif*/ static int init_hypercall_stubs(void) { uint32_t eax, ebx, ecx, edx, pages, msr, i, base; base = xen_cpuid_base(); if (base == 0) { printk(KERN_WARNING "Detected Xen platform device but not Xen VMM?\n"); return -EINVAL; } cpuid(base + 1, &eax, &ebx, &ecx, &edx); printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff); /* * Find largest supported number of hypercall pages. * We'll create as many as possible up to this number. */ cpuid(base + 2, &pages, &msr, &ecx, &edx); /* * Use __vmalloc() because vmalloc_exec() is not an exported symbol. * PAGE_KERNEL_EXEC also is not exported, hence we use PAGE_KERNEL. * hypercall_stubs = vmalloc_exec(pages * PAGE_SIZE); */ while (pages > 0) { hypercall_stubs = __vmalloc( pages * PAGE_SIZE, GFP_KERNEL | __GFP_HIGHMEM, __pgprot(__PAGE_KERNEL & ~_PAGE_NX)); if (hypercall_stubs != NULL) break; pages--; /* vmalloc failed: try one fewer pages */ } if (hypercall_stubs == NULL) return -ENOMEM; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; max_hypercall_stub_pages = pages; printk(KERN_INFO "Hypercall area is %u pages.\n", pages); return 0; } static void resume_hypercall_stubs(void) { uint32_t base, ecx, edx, pages, msr, i; base = xen_cpuid_base(); BUG_ON(base == 0); cpuid(base + 2, &pages, &msr, &ecx, &edx); if (pages > max_hypercall_stub_pages) pages = max_hypercall_stub_pages; for (i = 0; i < pages; i++) { unsigned long pfn; pfn = vmalloc_to_pfn((char *)hypercall_stubs + i*PAGE_SIZE); wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); } nr_hypercall_stub_pages = pages; } #else /* __ia64__ */ #define init_hypercall_stubs() (0) #define resume_hypercall_stubs() ((void)0) #endif static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; #ifdef __ia64__ for (irq = 0; irq < 16; irq++) { if (isa_irq_to_vector(irq) == pdev->irq) return irq; /* ISA IRQ */ } #else /* !__ia64__ */ irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) pin = pdev->pin; #else pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); #endif /* We don't know the GSI. Specify the PCI INTx line instead. */ return (((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3)); } static int set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } int xen_irq_init(struct pci_dev *pdev); int xenbus_init(void); int xen_reboot_init(void); int xen_panic_handler_init(void); int gnttab_init(void); #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0xffff /* NB: register a proper one */ #define XEN_IOPORT_LINUX_DRVVER ((LINUX_VERSION_CODE << 8) + 0x0) #define UNPLUG_ALL_IDE_DISKS 1 #define UNPLUG_ALL_NICS 2 #define UNPLUG_AUX_IDE_DISKS 4 #define UNPLUG_ALL 7 static int check_platform_magic(struct device *dev, long ioaddr, long iolen) { short magic, unplug = 0; char protocol, *p, *q, *err; dev_unplug = "never"; /* Unconditionally unplug everything */ if (!dev_unplug) unplug = UNPLUG_ALL; for (p = dev_unplug; p; p = q) { q = strchr(dev_unplug, ','); if (q) *q++ = '\0'; if (!strcmp(p, "all")) unplug |= UNPLUG_ALL; else if (!strcmp(p, "ide-disks")) unplug |= UNPLUG_ALL_IDE_DISKS; else if (!strcmp(p, "aux-ide-disks")) unplug |= UNPLUG_AUX_IDE_DISKS; else if (!strcmp(p, "nics")) unplug |= UNPLUG_ALL_NICS; else if (!strcmp(p, "never")) unplug = 0; else dev_warn(dev, "unrecognised option '%s' " "in module parameter 'dev_unplug'\n", p); } if (iolen < 0x16) { err = "backend too old"; goto no_dev; } magic = inw(XEN_IOPORT_MAGIC); if (magic != XEN_IOPORT_MAGIC_VAL) { err = "unrecognised magic value"; goto no_dev; } protocol = inb(XEN_IOPORT_PROTOVER); dev_info(dev, "I/O protocol version %d\n", protocol); switch (protocol) { case 1: outw(XEN_IOPORT_LINUX_PRODNUM, XEN_IOPORT_PRODNUM); outl(XEN_IOPORT_LINUX_DRVVER, XEN_IOPORT_DRVVER); if (inw(XEN_IOPORT_MAGIC) != XEN_IOPORT_MAGIC_VAL) { dev_err(dev, "blacklisted by host\n"); return -ENODEV; } /* Fall through */ case 0: outw(unplug, XEN_IOPORT_UNPLUG); break; default: err = "unknown I/O protocol version"; goto no_dev; } return 0; no_dev: dev_warn(dev, "failed backend handshake: %s\n", err); if (!unplug) return 0; dev_err(dev, "failed to execute specified dev_unplug options!\n"); return -ENODEV; } void xen_unplug_emulated_devices(void); static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr, iolen; long mmio_addr, mmio_len; xen_unplug_emulated_devices(); if (xen_platform_pdev) return -EBUSY; xen_platform_pdev = pdev; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); iolen = pci_resource_len(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); callback_via = get_callback_via(pdev); if (mmio_addr == 0 || ioaddr == 0 || callback_via == 0) { printk(KERN_WARNING DRV_NAME ":no resources found\n"); return -ENOENT; } ret = pci_request_region(pdev, 1, DRV_NAME); if (ret < 0) return ret; ret = pci_request_region(pdev, 0, DRV_NAME); if (ret < 0) goto mem_out; platform_mmio = mmio_addr; platform_mmiolen = mmio_len; ret = init_hypercall_stubs(); if (ret < 0) goto out; ret = check_platform_magic(&pdev->dev, ioaddr, iolen); if (ret < 0) goto out; if ((ret = init_xen_info())) goto out; if ((ret = gnttab_init())) goto out; if ((ret = xen_irq_init(pdev))) goto out; if ((ret = set_callback_via(callback_via))) goto out; if ((ret = xenbus_init())) goto out; if ((ret = xen_reboot_init())) goto out; if ((ret = xen_panic_handler_init())) goto out; write_feature_flag(); out: if (ret) { pci_release_region(pdev, 0); mem_out: pci_release_region(pdev, 1); } return ret; } #define XEN_PLATFORM_VENDOR_ID 0x5853 #define XEN_PLATFORM_DEVICE_ID 0x0001 static struct pci_device_id platform_pci_tbl[] __devinitdata = { {XEN_PLATFORM_VENDOR_ID, XEN_PLATFORM_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* Continue to recognise the old ID for now */ {0xfffd, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { name: DRV_NAME, probe: platform_pci_init, id_table: platform_pci_tbl, }; static int pci_device_registered; void platform_pci_resume(void) { struct xen_add_to_physmap xatp; resume_hypercall_stubs(); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = shared_info_frame; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); if (set_callback_via(callback_via)) printk("platform_pci_resume failure!\n"); } static int __init platform_pci_module_init(void) { int rc; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) rc = pci_module_init(&platform_driver); #else rc = pci_register_driver(&platform_driver); #endif if (rc) { printk(KERN_INFO DRV_NAME ": No platform pci device model found\n"); return rc; } pci_device_registered = 1; return 0; } module_init(platform_pci_module_init); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/platform-pci.h000066400000000000000000000024751314037446600321440ustar00rootroot00000000000000/****************************************************************************** * platform-pci.h * * Xen platform PCI device driver * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. */ #ifndef _XEN_PLATFORM_PCI_H #define _XEN_PLATFORM_PCI_H #include #define XENPAGING 0x1 #define SUPPORTED_FEATURE XENPAGING extern void write_feature_flag(void); extern void write_monitor_service_flag(void); extern void write_driver_resume_flag(void); unsigned long alloc_xen_mmio(unsigned long len); void platform_pci_resume(void); extern struct pci_dev *xen_platform_pdev; #endif /* _XEN_PLATFORM_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/reboot.c000066400000000000000000000171171314037446600310330ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #undef handle_sysrq #endif #define SHUTDOWN_INVALID -1 #define SHUTDOWN_POWEROFF 0 #define SHUTDOWN_SUSPEND 2 #define SHUTDOWN_RESUMING 3 #define SHUTDOWN_HALT 4 /* Ignore multiple shutdown requests. */ static int shutting_down = SHUTDOWN_INVALID; /* Can we leave APs online when we suspend? */ static int fast_suspend; static void __shutdown_handler(struct work_struct *unused); static DECLARE_DELAYED_WORK(shutdown_work, __shutdown_handler); int __xen_suspend(int fast_suspend, void (*resume_notifier)(int)); static int shutdown_process(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *poweroff_argv[] = { "/sbin/poweroff", NULL }; extern asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void *arg); if ((shutting_down == SHUTDOWN_POWEROFF) || (shutting_down == SHUTDOWN_HALT)) { if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp, 0) < 0) { #ifdef CONFIG_XEN sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_POWER_OFF, NULL); #endif /* CONFIG_XEN */ } } shutting_down = SHUTDOWN_INVALID; /* could try again */ return 0; } #ifdef CONFIG_PM_SLEEP static int setup_suspend_evtchn(void); /* Was last suspend request cancelled? */ static int suspend_cancelled; static void xen_resume_notifier(int _suspend_cancelled) { int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING); BUG_ON(old_state != SHUTDOWN_SUSPEND); suspend_cancelled = _suspend_cancelled; } static int xen_suspend(void *__unused) { int err, old_state; err = set_cpus_allowed_ptr(current, cpumask_of(0)); if (err) { pr_err("Xen suspend can't run on CPU0 (%d)\n", err); goto fail; } do { err = __xen_suspend(fast_suspend, xen_resume_notifier); if (err) { pr_err("Xen suspend failed (%d)\n", err); goto fail; } if (!suspend_cancelled) setup_suspend_evtchn(); old_state = cmpxchg( &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID); } while (old_state == SHUTDOWN_SUSPEND); switch (old_state) { case SHUTDOWN_INVALID: case SHUTDOWN_SUSPEND: BUG(); case SHUTDOWN_RESUMING: break; default: schedule_delayed_work(&shutdown_work, 0); break; } return 0; fail: old_state = xchg(&shutting_down, SHUTDOWN_INVALID); BUG_ON(old_state != SHUTDOWN_SUSPEND); return 0; } #endif static void switch_shutdown_state(int new_state) { int prev_state, old_state = SHUTDOWN_INVALID; /* We only drive shutdown_state into an active state. */ if (new_state == SHUTDOWN_INVALID) return; do { /* We drop this transition if already in an active state. */ if ((old_state != SHUTDOWN_INVALID) && (old_state != SHUTDOWN_RESUMING)) return; /* Attempt to transition. */ prev_state = old_state; old_state = cmpxchg(&shutting_down, old_state, new_state); } while (old_state != prev_state); /* Either we kick off the work, or we leave it to xen_suspend(). */ if (old_state == SHUTDOWN_INVALID) schedule_delayed_work(&shutdown_work, 0); else BUG_ON(old_state != SHUTDOWN_RESUMING); } static void __shutdown_handler(struct work_struct *unused) { struct task_struct *taskp; #ifdef CONFIG_PM_SLEEP if (shutting_down == SHUTDOWN_SUSPEND) taskp = kthread_run(xen_suspend, NULL, "suspend"); else #endif taskp = kthread_run(shutdown_process, NULL, "shutdown"); if (IS_ERR(taskp)) { pr_warning("Error creating shutdown process (%ld): " "retrying...\n", -PTR_ERR(taskp)); schedule_delayed_work(&shutdown_work, HZ/2); } } static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { extern void ctrl_alt_del(void); char *str; struct xenbus_transaction xbt; int err, new_state = SHUTDOWN_INVALID; if ((shutting_down != SHUTDOWN_INVALID) && (shutting_down != SHUTDOWN_RESUMING)) return; again: err = xenbus_transaction_start(&xbt); if (err) return; str = (char *)xenbus_read(xbt, "control", "shutdown", NULL); /* Ignore read errors and empty reads. */ if (XENBUS_IS_ERR_READ(str)) { xenbus_transaction_end(xbt, 1); return; } xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { kfree(str); goto again; } if (strcmp(str, "poweroff") == 0) new_state = SHUTDOWN_POWEROFF; else if (strcmp(str, "reboot") == 0) ctrl_alt_del(); #ifdef CONFIG_PM_SLEEP else if (strcmp(str, "suspend") == 0) new_state = SHUTDOWN_SUSPEND; #endif else if (strcmp(str, "halt") == 0) new_state = SHUTDOWN_HALT; else pr_warning("Ignoring shutdown request: %s\n", str); switch_shutdown_state(new_state); kfree(str); } static void sysrq_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) { char sysrq_key = '\0'; struct xenbus_transaction xbt; int err; again: err = xenbus_transaction_start(&xbt); if (err) return; if (xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key) <= 0) { pr_err("Unable to read sysrq code in control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; } if (sysrq_key != '\0') xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_key != '\0') handle_sysrq(sysrq_key); #endif } static struct xenbus_watch shutdown_watch = { .node = "control/shutdown", .callback = shutdown_handler }; static struct xenbus_watch sysrq_watch = { .node = "control/sysrq", .callback = sysrq_handler }; #ifdef CONFIG_PM_SLEEP static irqreturn_t suspend_int(int irq, void* dev_id) { switch_shutdown_state(SHUTDOWN_SUSPEND); return IRQ_HANDLED; } static int setup_suspend_evtchn(void) { static int irq; int port; char portstr[16]; if (irq > 0) unbind_from_irqhandler(irq, NULL); irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend", NULL); if (irq <= 0) return -1; port = irq_to_evtchn_port(irq); pr_info("suspend: event channel %d\n", port); sprintf(portstr, "%d", port); xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr); return 0; } #else #define setup_suspend_evtchn() 0 #endif static int setup_shutdown_watcher(void) { int err; err = register_xenbus_watch(&sysrq_watch); if (err) { pr_err("Failed to set sysrq watcher\n"); return err; } if (is_initial_xendomain()) return 0; xenbus_scanf(XBT_NIL, "control", "platform-feature-multiprocessor-suspend", "%d", &fast_suspend); err = register_xenbus_watch(&shutdown_watch); if (err) { pr_err("Failed to set shutdown watcher\n"); return err; } /* suspend event channel */ err = setup_suspend_evtchn(); if (err) { pr_err("Failed to register suspend event channel\n"); return err; } return 0; } #ifdef CONFIG_XEN static int shutdown_event(struct notifier_block *notifier, unsigned long event, void *data) { setup_shutdown_watcher(); return NOTIFY_DONE; } static int __init setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(setup_shutdown_event); #else /* !defined(CONFIG_XEN) */ int xen_reboot_init(void) { return setup_shutdown_watcher(); } #endif /* !defined(CONFIG_XEN) */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/xen_proc.c000066400000000000000000000012551314037446600313520ustar00rootroot00000000000000#include #include #include static struct proc_dir_entry *xen_base; struct proc_dir_entry * #ifndef MODULE __init #endif create_xen_proc_entry(const char *name, mode_t mode, const struct file_operations *fops, void *data) { if ( xen_base == NULL ) if ( (xen_base = proc_mkdir("xen", NULL)) == NULL ) panic("Couldn't create /proc/xen"); return proc_create_data(name, mode, xen_base, fops, data); } #ifdef MODULE #include EXPORT_SYMBOL_GPL(create_xen_proc_entry); #elif defined(CONFIG_XEN_PRIVILEGED_GUEST) void remove_xen_proc_entry(const char *name) { remove_proc_entry(name, xen_base); } #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/xen_support.c000066400000000000000000000040531314037446600321220ustar00rootroot00000000000000/****************************************************************************** * support.c * Xen module support functions. * Copyright (C) 2004, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include #include #include #include #include #include #include "platform-pci.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if defined (__ia64__) unsigned long __hypercall(unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long cmd) { unsigned long __res; __asm__ __volatile__ (";;\n" "mov r2=%1\n" "break 0x1000 ;;\n" "mov %0=r8 ;;\n" : "=r"(__res) : "r"(cmd) : "r2", "r8", "memory"); return __res; } EXPORT_SYMBOL(__hypercall); int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) { return xencomm_hypercall_grant_table_op(cmd, uop, count); } EXPORT_SYMBOL(HYPERVISOR_grant_table_op); /* without using balloon driver on PV-on-HVM for ia64 */ void balloon_update_driver_allowance(long delta) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_update_driver_allowance); void balloon_release_driver_page(struct page *page) { /* nothing */ } EXPORT_SYMBOL_GPL(balloon_release_driver_page); #endif /* __ia64__ */ void xen_machphys_update(unsigned long mfn, unsigned long pfn) { BUG(); } EXPORT_SYMBOL(xen_machphys_update); xenbus_backend_client.c000066400000000000000000000102421314037446600337630ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/****************************************************************************** * Backend-client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code in the backend * driver. * * Copyright (C) 2005-2006 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include static int unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area, unsigned int nr, grant_handle_t handles[]) { unsigned int i; int err = 0; for (i = 0; i < nr; ++i) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)area->addr + i * PAGE_SIZE, GNTMAP_host_map, handles[i]); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) continue; xenbus_dev_error(dev, op.status, "unmapping page %u (handle %#x)", i, handles[i]); err = -EINVAL; } if (!err) { free_vm_area(area); kfree(handles); } return err; } /* Based on Rusty Russell's skeleton driver's map_page */ struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, const grant_ref_t refs[], unsigned int nr) { grant_handle_t *handles = kmalloc(nr * sizeof(*handles), GFP_KERNEL); struct vm_struct *area; unsigned int i; if (!handles) return ERR_PTR(-ENOMEM); area = alloc_vm_area(nr * PAGE_SIZE, NULL); if (!area) { kfree(handles); return ERR_PTR(-ENOMEM); } for (i = 0; i < nr; ++i) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)area->addr + i * PAGE_SIZE, GNTMAP_host_map, refs[i], dev->otherend_id); gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &op); if (op.status == GNTST_okay) { handles[i] = op.handle; continue; } unmap_ring_vfree(dev, area, i, handles); xenbus_dev_fatal(dev, op.status, "mapping page %u (ref %#x, dom%d)", i, refs[i], dev->otherend_id); BUG_ON(!IS_ERR(ERR_PTR(op.status))); return ERR_PTR(-EINVAL); } /* Stuff the handle array in an unused field. */ area->phys_addr = (unsigned long)handles; return area; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); /* Based on Rusty Russell's skeleton driver's unmap_page */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area) { return unmap_ring_vfree(dev, area, get_vm_area_size(area) >> PAGE_SHIFT, (void *)(unsigned long)area->phys_addr); } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); int xenbus_dev_is_online(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/xenbus_client.c000066400000000000000000000502111314037446600323730ustar00rootroot00000000000000/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #else #include #include #include #include #include #include #include #include #include #include #include #endif #include #include #if defined(CONFIG_PARAVIRT_XEN) #include "xenbus_probe.h" struct xenbus_map_node { struct list_head next; union { struct vm_struct *area; /* PV */ struct page *page; /* HVM */ }; grant_handle_t handle; }; static DEFINE_SPINLOCK(xenbus_valloc_lock); static LIST_HEAD(xenbus_valloc_pages); struct xenbus_ring_ops { int (*map)(struct xenbus_device *dev, grant_ref_t gnt, void **vaddr); int (*unmap)(struct xenbus_device *dev, void *vaddr); }; static const struct xenbus_ring_ops *ring_ops __read_mostly; #elif defined(HAVE_XEN_PLATFORM_COMPAT_H) #include #endif const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", [ XenbusStateReconfiguring ] = "Reconfiguring", [ XenbusStateReconfigured ] = "Reconfigured", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate); /** * xenbus_watch_path - register a watch * @dev: xenbus device * @path: path to watch * @watch: watch to register * @callback: callback to register * * Register a @watch on the given path, using the given xenbus_watch structure * for storage, and the given @callback function as the callback. Return 0 on * success, or -errno on error. On success, the given @path will be saved as * @watch->node, and remains the caller's to free. On error, @watch->node will * be NULL, the device will switch to %XenbusStateClosing, and the error will * be saved in the store. */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path); #if defined(CONFIG_XEN) || defined(MODULE) int xenbus_watch_path2(struct xenbus_device *dev, const char *path, const char *path2, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2); if (!state) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, state, watch, callback); if (err) kfree(state); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path2); #else /** * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path * @dev: xenbus device * @watch: watch to register * @callback: callback to register * @pathfmt: format of path to watch * * Register a watch on the given @path, using the given xenbus_watch * structure for storage, and the given @callback function as the callback. * Return 0 on success, or -errno on error. On success, the watched path * (@path/@path2) will be saved as @watch->node, and becomes the caller's to * kfree(). On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to %XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...) { int err; va_list ap; char *path; va_start(ap, pathfmt); path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); va_end(ap); if (!path) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, path, watch, callback); if (err) kfree(path); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); #endif static void xenbus_switch_fatal(struct xenbus_device *, int, int, const char *, ...); static int __xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state, int depth) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not take a caller's Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ struct xenbus_transaction xbt; int current_state; int err, abort; if (state == dev->state) return 0; again: abort = 1; err = xenbus_transaction_start(&xbt); if (err) { xenbus_switch_fatal(dev, depth, err, "starting transaction"); return 0; } err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); if (err != 1) goto abort; err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); if (err) { xenbus_switch_fatal(dev, depth, err, "writing new state"); goto abort; } abort = 0; abort: err = xenbus_transaction_end(xbt, abort); if (err) { if (err == -EAGAIN && !abort) goto again; xenbus_switch_fatal(dev, depth, err, "ending transaction"); } else dev->state = state; return 0; } /** * xenbus_switch_state * @dev: xenbus device * @state: new state * * Advertise in the store a change of the given driver to the given new_state. * Return 0 on success, or -errno on error. On error, the device will switch * to XenbusStateClosing, and the error will be saved in the store. */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) { return __xenbus_switch_state(dev, state, 0); } EXPORT_SYMBOL_GPL(xenbus_switch_state); int xenbus_frontend_closed(struct xenbus_device *dev) { xenbus_switch_state(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed); /** * Return the path to the error node for the given device, or NULL on failure. * If the value returned is non-NULL, then it is the caller's to kfree. */ static char *error_path(struct xenbus_device *dev) { return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); } static void _dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list *ap) { char *printf_buffer, *path_buffer; struct va_format vaf = { .fmt = fmt, .va = ap }; printf_buffer = kasprintf(GFP_KERNEL, "%i %pV", -err, &vaf); if (printf_buffer) dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = error_path(dev); if (!printf_buffer || !path_buffer || xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer)) dev_err(&dev->dev, "xenbus: failed to write error node for %s (%s)\n", dev->nodename, printf_buffer); kfree(printf_buffer); kfree(path_buffer); } /** * xenbus_dev_error * @dev: xenbus device * @err: error to report * @fmt: error message format * * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, &ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error); /** * xenbus_dev_fatal * @dev: xenbus device * @err: error to report * @fmt: error message format * * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, &ap); va_end(ap); xenbus_switch_state(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal); /** * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps * avoiding recursion within xenbus_switch_state. */ static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); _dev_error(dev, err, fmt, &ap); va_end(ap); if (!depth) __xenbus_switch_state(dev, XenbusStateClosing, 1); } /** * xenbus_grant_ring * @dev: xenbus device * @ring_mfn: mfn of ring to grant * * Grant access to the given @ring_mfn to the peer of the given device. Return * 0 on success, or -errno on error. On error, the device will switch to * XenbusStateClosing, and the error will be saved in the store. */ int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) { int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); if (err < 0) xenbus_dev_fatal(dev, err, "granting access to ring page"); return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); int xenbus_multi_grant_ring(struct xenbus_device *dev, unsigned int nr, struct page *pages[], grant_ref_t refs[]) { unsigned int i; int err = -EINVAL; for (i = 0; i < nr; ++i) { unsigned long pfn = page_to_pfn(pages[i]); err = gnttab_grant_foreign_access(dev->otherend_id, pfn_to_mfn(pfn), 0); if (err < 0) { xenbus_dev_fatal(dev, err, "granting access to ring page #%u", i); break; } refs[i] = err; } return err; } EXPORT_SYMBOL_GPL(xenbus_multi_grant_ring); /** * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); /** * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, int port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error(dev, err, "freeing event channel %d", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn); #if !defined(CONFIG_XEN) && !defined(MODULE) /** * xenbus_map_ring_valloc * @dev: xenbus device * @gnt_ref: grant reference * @vaddr: pointer to address to be filled out by mapping * * Based on Rusty Russell's skeleton driver's map_page. * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring_valloc allocates a page of virtual address space, maps the * page to that address, and sets *vaddr to that address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t gnt_ref, void **vaddr) { return ring_ops->map(dev, gnt_ref, vaddr); } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, grant_ref_t gnt_ref, void **vaddr) { struct gnttab_map_grant_ref op = { .flags = GNTMAP_host_map | GNTMAP_contains_pte, .ref = gnt_ref, .dom = dev->otherend_id, }; struct xenbus_map_node *node; struct vm_struct *area; pte_t *pte; *vaddr = NULL; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; area = alloc_vm_area(PAGE_SIZE, &pte); if (!area) { kfree(node); return -ENOMEM; } op.host_addr = arbitrary_virt_to_machine(pte).maddr; gnttab_batch_map(&op, 1); if (op.status != GNTST_okay) { free_vm_area(area); kfree(node); xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); return op.status; } node->handle = op.handle; node->area = area; spin_lock(&xenbus_valloc_lock); list_add(&node->next, &xenbus_valloc_pages); spin_unlock(&xenbus_valloc_lock); *vaddr = area->addr; return 0; } static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, grant_ref_t gnt_ref, void **vaddr) { struct xenbus_map_node *node; int err; void *addr; *vaddr = NULL; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */); if (err) goto out_err; addr = pfn_to_kaddr(page_to_pfn(node->page)); err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr); if (err) goto out_err_free_ballooned_pages; spin_lock(&xenbus_valloc_lock); list_add(&node->next, &xenbus_valloc_pages); spin_unlock(&xenbus_valloc_lock); *vaddr = addr; return 0; out_err_free_ballooned_pages: free_xenballooned_pages(1, &node->page); out_err: kfree(node); return err; } /** * xenbus_map_ring * @dev: xenbus device * @gnt_ref: grant reference * @handle: pointer to grant handle to be filled * @vaddr: address to be mapped to * * Map a page of memory into this domain from another domain's grant table. * xenbus_map_ring does not allocate the virtual address space (you must do * this yourself!). It only maps in the page to the specified address. * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) * or -ENOMEM on error. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. */ int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t gnt_ref, grant_handle_t *handle, void *vaddr) { struct gnttab_map_grant_ref op; gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, dev->otherend_id); gnttab_batch_map(&op, 1); if (op.status != GNTST_okay) { xenbus_dev_fatal(dev, op.status, "mapping in shared page %d from domain %d", gnt_ref, dev->otherend_id); } else *handle = op.handle; return op.status; } EXPORT_SYMBOL_GPL(xenbus_map_ring); /** * xenbus_unmap_ring_vfree * @dev: xenbus device * @vaddr: addr to unmap * * Based on Rusty Russell's skeleton driver's unmap_page. * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_vfree if you mapped in your memory with * xenbus_map_ring_valloc (it will free the virtual address space). * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) { return ring_ops->unmap(dev, vaddr); } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) { struct xenbus_map_node *node; struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, }; unsigned int level; spin_lock(&xenbus_valloc_lock); list_for_each_entry(node, &xenbus_valloc_pages, next) { if (node->area->addr == vaddr) { list_del(&node->next); goto found; } } node = NULL; found: spin_unlock(&xenbus_valloc_lock); if (!node) { xenbus_dev_error(dev, -ENOENT, "can't find mapped virtual address %p", vaddr); return GNTST_bad_virt_addr; } op.handle = node->handle; op.host_addr = arbitrary_virt_to_machine( lookup_address((unsigned long)vaddr, &level)).maddr; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status == GNTST_okay) free_vm_area(node->area); else xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", node->handle, op.status); kfree(node); return op.status; } static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) { int rv; struct xenbus_map_node *node; void *addr; spin_lock(&xenbus_valloc_lock); list_for_each_entry(node, &xenbus_valloc_pages, next) { addr = pfn_to_kaddr(page_to_pfn(node->page)); if (addr == vaddr) { list_del(&node->next); goto found; } } node = addr = NULL; found: spin_unlock(&xenbus_valloc_lock); if (!node) { xenbus_dev_error(dev, -ENOENT, "can't find mapped virtual address %p", vaddr); return GNTST_bad_virt_addr; } rv = xenbus_unmap_ring(dev, node->handle, addr); if (!rv) free_xenballooned_pages(1, &node->page); else WARN(1, "Leaking %p\n", vaddr); kfree(node); return rv; } /** * xenbus_unmap_ring * @dev: xenbus device * @handle: grant handle * @vaddr: addr to unmap * * Unmap a page of memory in this domain that was imported from another domain. * Returns 0 on success and returns GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr) { struct gnttab_unmap_grant_ref op; gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); if (op.status != GNTST_okay) xenbus_dev_error(dev, op.status, "unmapping page at handle %d error %d", handle, op.status); return op.status; } EXPORT_SYMBOL_GPL(xenbus_unmap_ring); #endif /** * xenbus_read_driver_state * @path: path for driver * * Return the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path) { int result; if (xenbus_scanf(XBT_NIL, path, "state", "%d", &result) != 1) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state); #if !defined(CONFIG_XEN) && !defined(MODULE) static const struct xenbus_ring_ops ring_ops_pv = { .map = xenbus_map_ring_valloc_pv, .unmap = xenbus_unmap_ring_vfree_pv, }; static const struct xenbus_ring_ops ring_ops_hvm = { .map = xenbus_map_ring_valloc_hvm, .unmap = xenbus_unmap_ring_vfree_hvm, }; void __init xenbus_ring_ops_init(void) { if (xen_pv_domain()) ring_ops = &ring_ops_pv; else ring_ops = &ring_ops_hvm; } #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/xenbus_comms.c000066400000000000000000000163621314037446600322440ustar00rootroot00000000000000/****************************************************************************** * xenbus_comms.c * * Low level code to talks to Xen Store: ringbuffer and event channel. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #else #include #include #include #endif #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif static int xenbus_irq; static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); static irqreturn_t wake_waiting(int irq, void *unused) { #ifdef CONFIG_XEN_PRIVILEGED_GUEST static DECLARE_WORK(probe_work, xenbus_probe); int old, new; old = atomic_read(&xenbus_xsd_state); switch (old) { case XENBUS_XSD_UNCOMMITTED: BUG(); return IRQ_HANDLED; case XENBUS_XSD_FOREIGN_INIT: new = XENBUS_XSD_FOREIGN_READY; break; case XENBUS_XSD_LOCAL_INIT: new = XENBUS_XSD_LOCAL_READY; break; case XENBUS_XSD_FOREIGN_READY: case XENBUS_XSD_LOCAL_READY: default: goto wake; } old = atomic_cmpxchg(&xenbus_xsd_state, old, new); if (old != new) schedule_work(&probe_work); wake: #endif wake_up(&xb_waitq); return IRQ_HANDLED; } static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) { return ((prod - cons) <= XENSTORE_RING_SIZE); } static void *get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) *len = XENSTORE_RING_SIZE - (prod - cons); return buf + MASK_XENSTORE_IDX(prod); } static const void *get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod, const char *buf, uint32_t *len) { *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); if ((prod - cons) < *len) *len = prod - cons; return buf + MASK_XENSTORE_IDX(cons); } /** * xb_write - low level write * @data: buffer to send * @len: length of buffer * * Returns 0 on success, error otherwise. */ int xb_write(const void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { void *dst; unsigned int avail; rc = wait_event_interruptible( xb_waitq, (intf->req_prod - intf->req_cons) != XENSTORE_RING_SIZE); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->req_cons; prod = intf->req_prod; if (!check_indexes(cons, prod)) { intf->req_cons = intf->req_prod = 0; return -EIO; } dst = get_output_chunk(cons, prod, intf->req, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must write data /after/ reading the consumer index. */ mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } int xb_data_to_read(void) { struct xenstore_domain_interface *intf = xen_store_interface; return (intf->rsp_cons != intf->rsp_prod); } int xb_wait_for_data_to_read(void) { #ifdef OPENSUSE_1302 return wait_event_interruptible(xb_waitq, xb_data_to_read()); #else return wait_event_interruptible(xb_waitq, (kgr_task_safe(current), xb_data_to_read())); #endif } int xb_read(void *data, unsigned len) { struct xenstore_domain_interface *intf = xen_store_interface; XENSTORE_RING_IDX cons, prod; int rc; while (len != 0) { unsigned int avail; const char *src; rc = xb_wait_for_data_to_read(); if (rc < 0) return rc; /* Read indexes, then verify. */ cons = intf->rsp_cons; prod = intf->rsp_prod; if (!check_indexes(cons, prod)) { intf->rsp_cons = intf->rsp_prod = 0; return -EIO; } src = get_input_chunk(cons, prod, intf->rsp, &avail); if (avail == 0) continue; if (avail > len) avail = len; /* Must read data /after/ reading the producer index. */ rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); /* Implies mb(): other side will see the updated consumer. */ notify_remote_via_evtchn(xen_store_evtchn); } return 0; } /** * xb_init_comms - Set up interrupt handler off store event channel. */ int xb_init_comms(void) { struct xenstore_domain_interface *intf = xen_store_interface; int err; if (intf->req_prod != intf->req_cons) pr_err("request ring is not quiescent (%08x:%08x)!\n", intf->req_cons, intf->req_prod); if (intf->rsp_prod != intf->rsp_cons) { pr_warn("response ring is not quiescent (%08x:%08x): fixing up\n", intf->rsp_cons, intf->rsp_prod); /* breaks kdump */ if (!reset_devices) intf->rsp_cons = intf->rsp_prod; } #if defined(CONFIG_XEN) || defined(MODULE) if (xenbus_irq) unbind_from_irqhandler(xenbus_irq, &xb_waitq); err = bind_caller_port_to_irqhandler( xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err <= 0) { pr_err("request irq failed %i\n", err); return err; } xenbus_irq = err; #else if (xenbus_irq) { /* Already have an irq; assume we're resuming */ rebind_evtchn_irq(xen_store_evtchn, xenbus_irq); } else { err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting, 0, "xenbus", &xb_waitq); if (err < 0) { pr_err("request irq failed %i\n", err); return err; } xenbus_irq = err; } #endif return 0; } #if !defined(CONFIG_XEN) && !defined(MODULE) void xb_deinit_comms(void) { unbind_from_irqhandler(xenbus_irq, &xb_waitq); xenbus_irq = 0; } #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/xenbus_comms.h000066400000000000000000000051201314037446600322370ustar00rootroot00000000000000/* * Private include for xenbus communications. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_COMMS_H #define _XENBUS_COMMS_H #include int xs_init(void); int xb_init_comms(void); void xb_deinit_comms(void); /* Low level routines. */ int xb_write(const void *data, unsigned len); int xb_read(void *data, unsigned len); int xb_data_to_read(void); int xb_wait_for_data_to_read(void); int xs_input_avail(void); extern struct xenstore_domain_interface *xen_store_interface; extern int xen_store_evtchn; extern enum xenstore_init xen_store_domain_type; extern const struct file_operations xen_xenbus_fops; /* For xenbus internal use. */ enum { XENBUS_XSD_UNCOMMITTED = 0, XENBUS_XSD_FOREIGN_INIT, XENBUS_XSD_FOREIGN_READY, XENBUS_XSD_LOCAL_INIT, XENBUS_XSD_LOCAL_READY, }; extern atomic_t xenbus_xsd_state; static inline int is_xenstored_ready(void) { int s = atomic_read(&xenbus_xsd_state); return s == XENBUS_XSD_FOREIGN_READY || s == XENBUS_XSD_LOCAL_READY; } #if defined(CONFIG_XEN_XENBUS_DEV) && defined(CONFIG_XEN_PRIVILEGED_GUEST) #include #include int xenbus_conn(domid_t, grant_ref_t *, evtchn_port_t *); #endif #endif /* _XENBUS_COMMS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/xenbus_dev.c000066400000000000000000000274151314037446600317050ustar00rootroot00000000000000/* * xenbus_dev.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #ifdef OPENSUSE_1302 #include #endif #include "xenbus_comms.h" #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #include struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[XENSTORE_PAYLOAD_MAX]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; if (!is_xenstored_ready()) return -ENODEV; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static int queue_reply(struct list_head *queue, const void *data, unsigned int len) { struct read_buffer *rb; if (len == 0) return 0; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); if (!rb) return -ENOMEM; rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, queue); return 0; } static void queue_flush(struct xenbus_dev_data *u, struct list_head *queue, int err) { if (!err) { list_splice_tail(queue, &u->read_buffers); wake_up(&u->read_waitq); } else while (!list_empty(queue)) { struct read_buffer *rb = list_entry(queue->next, struct read_buffer, list); list_del(queue->next); kfree(rb); } } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int err, path_len, tok_len, body_len, data_len = 0; LIST_HEAD(queue); path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); err = queue_reply(&queue, &hdr, sizeof(hdr)); if (!err) err = queue_reply(&queue, path, path_len); if (!err) err = queue_reply(&queue, token, tok_len); if (!err && len > 2) err = queue_reply(&queue, vec[2], data_len); queue_flush(adap->dev_data, &queue, err); mutex_unlock(&adap->dev_data->reply_mutex); } static LIST_HEAD(watch_list); static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply = NULL; LIST_HEAD(queue); char *path, *token; struct watch_adapter *watch; int err, rc = len; if (!is_xenstored_ready()) return -ENODEV; if (len > sizeof(u->u.buffer) - u->len) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: { static const char XS_RESP[] = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { rc = -EILSEQ; goto out; } if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); if (watch == NULL) { rc = -ENOMEM; goto out; } watch->watch.node = kstrdup(path, GFP_KERNEL); watch->watch.callback = watch_fired; watch->token = kstrdup(token, GFP_KERNEL); watch->dev_data = u; err = watch->watch.node && watch->token ? register_xenbus_watch(&watch->watch) : -ENOMEM; if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry(watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = sizeof(XS_RESP); mutex_lock(&u->reply_mutex); err = queue_reply(&queue, &hdr, sizeof(hdr)) ?: queue_reply(&queue, XS_RESP, hdr.len); break; } case XS_TRANSACTION_START: trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } goto common; case XS_TRANSACTION_END: list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; if (&trans->list == &u->transactions) { rc = -ESRCH; goto out; } /* fall through */ common: default: reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { if (msg_type == XS_TRANSACTION_START) kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); err = queue_reply(&queue, &u->u.msg, sizeof(u->u.msg)) ?: queue_reply(&queue, reply, u->u.msg.len); break; } queue_flush(u, &queue, err); mutex_unlock(&u->reply_mutex); kfree(reply); if (err) rc = err; out: u->len = 0; return rc; } static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; struct read_buffer *rb, *tmp_rb; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { list_del(&rb->list); kfree(rb); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; if (!is_xenstored_ready()) return -ENODEV; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } #ifdef CONFIG_XEN_PRIVILEGED_GUEST static long xenbus_dev_ioctl(struct file *file, unsigned int cmd, unsigned long data) { void __user *udata = (void __user *) data; int ret = -ENOTTY; if (!is_initial_xendomain()) return -ENODEV; switch (cmd) { case IOCTL_XENBUS_ALLOC: { xenbus_alloc_t xa; int old; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_FOREIGN_INIT); if (old != XENBUS_XSD_UNCOMMITTED) return -EBUSY; if (copy_from_user(&xa, udata, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } ret = xenbus_conn(xa.dom, &xa.grant_ref, &xa.port); if (ret != 0) { atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } if (copy_to_user(udata, &xa, sizeof(xa))) { ret = -EFAULT; atomic_set(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED); break; } } break; default: break; } return ret; } #endif static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .llseek = no_llseek, .poll = xenbus_dev_poll, #ifdef CONFIG_XEN_PRIVILEGED_GUEST .unlocked_ioctl = xenbus_dev_ioctl #endif }; #ifndef OPENSUSE_1302 int #ifndef MODULE __init #endif xenbus_dev_init(void) { if (!create_xen_proc_entry("xenbus", S_IFREG|S_IRUSR, &xenbus_dev_file_ops, NULL)) return -ENOMEM; return 0; } #else static struct miscdevice xenbus_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "xen/xenbus", .fops = &xenbus_dev_file_ops, }; int #ifndef MODULE __init #endif xenbus_dev_init(void) { int err; err = misc_register(&xenbus_dev); if (err) pr_err("Could not register xenbus frontend device\n"); return err; } #endifxenbus_dev_backend.c000066400000000000000000000063401314037446600332670ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #ifdef CONFIG_PARAVIRT_XEN #include #include #include #include #endif #include #include #include "xenbus_comms.h" MODULE_LICENSE("GPL"); static int xenbus_backend_open(struct inode *inode, struct file *filp) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; return nonseekable_open(inode, filp); } static long xenbus_alloc(domid_t domid) { #ifdef CONFIG_PARAVIRT_XEN struct evtchn_alloc_unbound arg; int err = -EEXIST; xs_suspend(); /* If xenstored_ready is nonzero, that means we have already talked to * xenstore and set up watches. These watches will be restored by * xs_resume, but that requires communication over the port established * below that is not visible to anyone until the ioctl returns. * * This can be resolved by splitting the ioctl into two parts * (postponing the resume until xenstored is active) but this is * unnecessarily complex for the intended use where xenstored is only * started once - so return -EEXIST if it's already running. */ if (xenstored_ready) goto out_err; gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid, virt_to_mfn(xen_store_interface), 0 /* writable */); arg.dom = DOMID_SELF; arg.remote_dom = domid; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &arg); if (err) goto out_err; if (xen_store_evtchn > 0) xb_deinit_comms(); xen_store_evtchn = arg.port; xs_resume(); return arg.port; out_err: xs_suspend_cancel(); return err; #else return -EOPNOTSUPP; #endif } static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; switch (cmd) { case IOCTL_XENBUS_BACKEND_EVTCHN: if (xen_store_evtchn > 0) return xen_store_evtchn; return -ENODEV; case IOCTL_XENBUS_BACKEND_SETUP: return xenbus_alloc(data); default: return -ENOTTY; } } static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, PFN_DOWN(__pa(xen_store_interface)), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static const struct file_operations xenbus_backend_fops = { .open = xenbus_backend_open, .mmap = xenbus_backend_mmap, .unlocked_ioctl = xenbus_backend_ioctl, }; static struct miscdevice xenbus_backend_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "xen/xenbus_backend", .fops = &xenbus_backend_fops, }; static int __init xenbus_backend_init(void) { int err; if (!xen_initial_domain()) return -ENODEV; err = misc_register(&xenbus_backend_dev); if (err) pr_err("Could not register xenbus backend device\n"); return err; } static void __exit xenbus_backend_exit(void) { misc_deregister(&xenbus_backend_dev); } module_init(xenbus_backend_init); module_exit(xenbus_backend_exit); xenbus_dev_frontend.c000066400000000000000000000346011314037446600335200ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/* * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Changes: * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem * and /proc/xen compatibility mount point. * Turned xenfs into a loadable module. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include #include #include MODULE_LICENSE("GPL"); /* * An element of a list of outstanding transactions, for which we're * still waiting a reply. */ struct xenbus_transaction_holder { struct list_head list; struct xenbus_transaction handle; }; /* * A buffer of data on the queue. */ struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_file_priv { /* * msgbuffer_mutex is held while partial requests are built up * and complete requests are acted on. It therefore protects * the "transactions" and "watches" lists, and the partial * request length and buffer. * * reply_mutex protects the reply being built up to return to * usermode. It nests inside msgbuffer_mutex but may be held * alone during a watch callback. */ struct mutex msgbuffer_mutex; /* In-progress transactions */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[XENSTORE_PAYLOAD_MAX]; } u; /* Response queue. */ struct mutex reply_mutex; struct list_head read_buffers; wait_queue_head_t read_waitq; }; /* Read out any raw xenbus messages queued up. */ static ssize_t xenbus_file_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_file_priv *u = filp->private_data; struct read_buffer *rb; unsigned i; int ret; mutex_lock(&u->reply_mutex); again: while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); i = 0; while (i < len) { unsigned sz = min((unsigned)len - i, rb->len - rb->cons); ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); i += sz - ret; rb->cons += sz - ret; if (ret != 0) { if (i == 0) i = -EFAULT; goto out; } /* Clear out buffer if it has been consumed */ if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } if (i == 0) goto again; out: mutex_unlock(&u->reply_mutex); return i; } /* * Add a buffer to the queue. Caller must hold the appropriate lock * if the queue is not local. (Commonly the caller will build up * multiple queued buffers on a temporary local list, and then add it * to the appropriate list under lock once all the buffers have een * successfully allocated.) */ static int queue_reply(struct list_head *queue, const void *data, size_t len) { struct read_buffer *rb; if (len == 0) return 0; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); if (rb == NULL) return -ENOMEM; rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, queue); return 0; } /* * Free all the read_buffer s on a list. * Caller must have sole reference to list. */ static void queue_cleanup(struct list_head *list) { struct read_buffer *rb; while (!list_empty(list)) { rb = list_entry(list->next, struct read_buffer, list); list_del(list->next); kfree(rb); } } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_file_priv *dev_data; char *token; }; static void free_watch_adapter(struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static struct watch_adapter *alloc_watch_adapter(const char *path, const char *token) { struct watch_adapter *watch; watch = kzalloc(sizeof(*watch), GFP_KERNEL); if (watch == NULL) goto out_fail; watch->watch.node = kstrdup(path, GFP_KERNEL); if (watch->watch.node == NULL) goto out_free; watch->token = kstrdup(token, GFP_KERNEL); if (watch->token == NULL) goto out_free; return watch; out_free: free_watch_adapter(watch); out_fail: return NULL; } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap; struct xsd_sockmsg hdr; const char *path, *token; int path_len, tok_len, body_len, data_len = 0; int ret; LIST_HEAD(staging_q); adap = container_of(watch, struct watch_adapter, watch); path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); ret = queue_reply(&staging_q, &hdr, sizeof(hdr)); if (!ret) ret = queue_reply(&staging_q, path, path_len); if (!ret) ret = queue_reply(&staging_q, token, tok_len); if (!ret && len > 2) ret = queue_reply(&staging_q, vec[2], data_len); if (!ret) { /* success: pass reply list onto watcher */ list_splice_tail(&staging_q, &adap->dev_data->read_buffers); wake_up(&adap->dev_data->read_waitq); } else queue_cleanup(&staging_q); mutex_unlock(&adap->dev_data->reply_mutex); } static int xenbus_write_transaction(unsigned msg_type, struct xenbus_file_priv *u) { int rc; void *reply; struct xenbus_transaction_holder *trans = NULL; LIST_HEAD(staging_q); if (msg_type == XS_TRANSACTION_START) { trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } } reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; BUG_ON(&trans->list == &u->transactions); list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); if (!rc) rc = queue_reply(&staging_q, reply, u->u.msg.len); if (!rc) { list_splice_tail(&staging_q, &u->read_buffers); wake_up(&u->read_waitq); } else { queue_cleanup(&staging_q); } mutex_unlock(&u->reply_mutex); kfree(reply); out: return rc; } static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) { struct watch_adapter *watch, *tmp_watch; char *path, *token; int err, rc; LIST_HEAD(staging_q); path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { rc = -EILSEQ; goto out; } if (msg_type == XS_WATCH) { watch = alloc_watch_adapter(path, token); if (watch == NULL) { rc = -ENOMEM; goto out; } watch->watch.callback = watch_fired; watch->dev_data = u; err = register_xenbus_watch(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } /* Success. Synthesize a reply to say all is OK. */ { struct { struct xsd_sockmsg hdr; char body[3]; } __packed reply = { { .type = msg_type, .len = sizeof(reply.body) }, "OK" }; mutex_lock(&u->reply_mutex); rc = queue_reply(&u->read_buffers, &reply, sizeof(reply)); wake_up(&u->read_waitq); mutex_unlock(&u->reply_mutex); } out: return rc; } static ssize_t xenbus_file_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_file_priv *u = filp->private_data; uint32_t msg_type; int rc = len; int ret; LIST_HEAD(staging_q); /* * We're expecting usermode to be writing properly formed * xenbus messages. If they write an incomplete message we * buffer it up. Once it is complete, we act on it. */ /* * Make sure concurrent writers can't stomp all over each * other's messages and make a mess of our partial message * buffer. We don't make any attemppt to stop multiple * writers from making a mess of each other's incomplete * messages; we're just trying to guarantee our own internal * consistency and make sure that single writes are handled * atomically. */ mutex_lock(&u->msgbuffer_mutex); /* Get this out of the way early to avoid confusion */ if (len == 0) goto out; /* Can't write a xenbus message larger we can buffer */ if (len > sizeof(u->u.buffer) - u->len) { /* On error, dump existing buffer */ u->len = 0; rc = -EINVAL; goto out; } ret = copy_from_user(u->u.buffer + u->len, ubuf, len); if (ret != 0) { rc = -EFAULT; goto out; } /* Deal with a partial copy. */ len -= ret; rc = len; u->len += len; /* Return if we haven't got a full message yet */ if (u->len < sizeof(u->u.msg)) goto out; /* not even the header yet */ /* If we're expecting a message that's larger than we can possibly send, dump what we have and return an error. */ if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) { rc = -E2BIG; u->len = 0; goto out; } if (u->len < (sizeof(u->u.msg) + u->u.msg.len)) goto out; /* incomplete data portion */ /* * OK, now we have a complete message. Do something with it. */ msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: /* (Un)Ask for some path to be watched for changes */ ret = xenbus_write_watch(msg_type, u); break; default: /* Send out a transaction */ ret = xenbus_write_transaction(msg_type, u); break; } if (ret != 0) rc = ret; /* Buffered message consumed */ u->len = 0; out: mutex_unlock(&u->msgbuffer_mutex); return rc; } static int xenbus_file_open(struct inode *inode, struct file *filp) { struct xenbus_file_priv *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); mutex_init(&u->msgbuffer_mutex); filp->private_data = u; return 0; } static int xenbus_file_release(struct inode *inode, struct file *filp) { struct xenbus_file_priv *u = filp->private_data; struct xenbus_transaction_holder *trans, *tmp; struct watch_adapter *watch, *tmp_watch; struct read_buffer *rb, *tmp_rb; /* * No need for locking here because there are no other users, * by definition. */ list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { list_del(&rb->list); kfree(rb); } kfree(u); return 0; } static unsigned int xenbus_file_poll(struct file *file, poll_table *wait) { struct xenbus_file_priv *u = file->private_data; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } const struct file_operations xen_xenbus_fops = { .read = xenbus_file_read, .write = xenbus_file_write, .open = xenbus_file_open, .release = xenbus_file_release, .poll = xenbus_file_poll, .llseek = no_llseek, }; EXPORT_SYMBOL_GPL(xen_xenbus_fops); static struct miscdevice xenbus_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "xen/xenbus", .fops = &xen_xenbus_fops, }; static int __init xenbus_init(void) { int err; if (!xen_domain()) return -ENODEV; err = misc_register(&xenbus_dev); if (err) pr_err("Could not register xenbus frontend device\n"); return err; } static void __exit xenbus_exit(void) { misc_deregister(&xenbus_dev); } module_init(xenbus_init); module_exit(xenbus_exit); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/xenbus_probe.c000066400000000000000000001142711314037446600322330ustar00rootroot00000000000000/****************************************************************************** * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __FUNCTION__, __LINE__, ##args) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_XEN) || defined(MODULE) #include #include #include #include #include #define PARAVIRT_EXPORT_SYMBOL(sym) __typeof__(sym) sym #else #if defined(CONFIG_XEN_COMPAT_XENFS) && !defined(MODULE) #include #endif #include #include #include #include #include #define PARAVIRT_EXPORT_SYMBOL EXPORT_SYMBOL_GPL #endif #ifndef CONFIG_XEN #include #endif #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif int xen_store_evtchn; EXPORT_SYMBOL_GPL(xen_store_evtchn); struct xenstore_domain_interface *xen_store_interface; EXPORT_SYMBOL_GPL(xen_store_interface); #if !defined(CONFIG_XEN) && !defined(MODULE) enum xenstore_init xen_store_domain_type; EXPORT_SYMBOL_GPL(xen_store_domain_type); #endif static unsigned long xen_store_mfn; extern struct mutex xenwatch_mutex; static #ifdef CONFIG_XEN_UNPRIVILEGED_GUEST __initdata #endif BLOCKING_NOTIFIER_HEAD(xenstore_chain); #if defined(CONFIG_XEN) || defined(MODULE) static void wait_for_devices(struct xenbus_driver *xendrv); #endif /* If something in array of ids matches this device, return it. */ static const struct xenbus_device_id * match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) { for (; *arr->devicetype != '\0'; arr++) { if (!strcmp(arr->devicetype, dev->devicetype)) return arr; } return NULL; } int xenbus_match(struct device *_dev, struct device_driver *_drv) { struct xenbus_driver *drv = to_xenbus_driver(_drv); if (!drv->ids) return 0; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; } PARAVIRT_EXPORT_SYMBOL(xenbus_match); static void free_otherend_details(struct xenbus_device *dev) { kfree(dev->otherend); dev->otherend = NULL; } static void free_otherend_watch(struct xenbus_device *dev) { if (dev->otherend_watch.node) { unregister_xenbus_watch(&dev->otherend_watch); kfree(dev->otherend_watch.node); dev->otherend_watch.node = NULL; } } int xenbus_read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node) { int err = xenbus_gather(XBT_NIL, xendev->nodename, id_node, "%i", &xendev->otherend_id, path_node, NULL, &xendev->otherend, NULL); if (err) { xenbus_dev_fatal(xendev, err, "reading other end details from %s", xendev->nodename); return err; } if (strlen(xendev->otherend) == 0 || !xenbus_exists(XBT_NIL, xendev->otherend, "")) { xenbus_dev_fatal(xendev, -ENOENT, "unable to read other end from %s. " "missing or inaccessible.", xendev->nodename); free_otherend_details(xendev); return -ENOENT; } return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_read_otherend_details); #if defined(CONFIG_XEN) || defined(MODULE) static int read_backend_details(struct xenbus_device *xendev) { return xenbus_read_otherend_details(xendev, "backend-id", "backend"); } static void otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) #else /* !CONFIG_XEN && !MODULE */ void xenbus_otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len, int ignore_on_shutdown) #endif /* CONFIG_XEN || MODULE */ { struct xenbus_device *dev = container_of(watch, struct xenbus_device, otherend_watch); struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); enum xenbus_state state; /* Protect us against watches firing on old details when the otherend details change, say immediately after a resume. */ if (!dev->otherend || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]); return; } state = xenbus_read_driver_state(dev->otherend); dev_dbg(&dev->dev, "state is %d (%s), %s, %s", state, xenbus_strstate(state), dev->otherend_watch.node, vec[XS_WATCH_PATH]); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) /* * Ignore xenbus transitions during shutdown. This prevents us doing * work that can fail e.g., when the rootfs is gone. */ if (system_state > SYSTEM_RUNNING) { /* If we're frontend, drive the state machine to Closed. */ /* This should cause the backend to release our resources. */ # if defined(CONFIG_XEN) || defined(MODULE) const struct xen_bus_type *bus = container_of(dev->dev.bus, struct xen_bus_type, bus); int ignore_on_shutdown = (bus->levels == 2); # endif if (ignore_on_shutdown && (state == XenbusStateClosing)) xenbus_frontend_closed(dev); return; } #endif if (drv->otherend_changed) drv->otherend_changed(dev, state); } PARAVIRT_EXPORT_SYMBOL(xenbus_otherend_changed); static int talk_to_otherend(struct xenbus_device *dev) { struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); free_otherend_watch(dev); free_otherend_details(dev); return drv->read_otherend_details(dev); } static int watch_otherend(struct xenbus_device *dev) { #if defined(CONFIG_XEN) || defined(MODULE) return xenbus_watch_path2(dev, dev->otherend, "state", &dev->otherend_watch, otherend_changed); #else struct xen_bus_type *bus = container_of(dev->dev.bus, struct xen_bus_type, bus); return xenbus_watch_pathfmt(dev, &dev->otherend_watch, bus->otherend_changed, "%s/%s", dev->otherend, "state"); #endif } int xenbus_dev_probe(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); const struct xenbus_device_id *id; int err; DPRINTK("%s", dev->nodename); if (!drv->probe) { err = -ENODEV; goto fail; } id = match_device(drv->ids, dev); if (!id) { err = -ENODEV; goto fail; } err = talk_to_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: talk_to_otherend on %s failed.\n", dev->nodename); return err; } err = drv->probe(dev, id); if (err) goto fail; err = watch_otherend(dev); if (err) { dev_warn(&dev->dev, "xenbus_probe: watch_otherend on %s failed.\n", dev->nodename); return err; } return 0; fail: xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_switch_state(dev, XenbusStateClosed); #if defined(CONFIG_XEN) || defined(MODULE) return -ENODEV; #else return err; #endif } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_probe); int xenbus_dev_remove(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); DPRINTK("%s", dev->nodename); free_otherend_watch(dev); if (drv->remove) drv->remove(dev); free_otherend_details(dev); xenbus_switch_state(dev, XenbusStateClosed); return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_remove); void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); /* Commented out since xenstored stubdom is now minios based not linux based #define XENSTORE_DOMAIN_SHARES_THIS_KERNEL */ #ifndef XENSTORE_DOMAIN_SHARES_THIS_KERNEL if (is_initial_xendomain()) #endif return; get_device(&dev->dev); if (dev->state != XenbusStateConnected) { dev_info(&dev->dev, "%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); if (!strcmp(dev->devicetype, "vfb")) goto out; timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) dev_info(&dev->dev, "%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_shutdown); int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus) { int ret; if (bus->error) return bus->error; drv->driver.bus = &bus->bus; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) drv->driver.probe = xenbus_dev_probe; drv->driver.remove = xenbus_dev_remove; drv->driver.shutdown = xenbus_dev_shutdown; #endif mutex_lock(&xenwatch_mutex); ret = driver_register(&drv->driver); mutex_unlock(&xenwatch_mutex); return ret; } PARAVIRT_EXPORT_SYMBOL(xenbus_register_driver_common); void xenbus_unregister_driver(struct xenbus_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(xenbus_unregister_driver); struct xb_find_info { struct xenbus_device *dev; const char *nodename; }; static int cmp_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; if (!strcmp(xendev->nodename, info->nodename)) { info->dev = xendev; get_device(dev); return 1; } return 0; } static struct xenbus_device *xenbus_device_find(const char *nodename, struct bus_type *bus) { struct xb_find_info info = { .dev = NULL, .nodename = nodename }; bus_for_each_dev(bus, NULL, &info, cmp_dev); return info.dev; } static int cleanup_dev(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct xb_find_info *info = data; int len = strlen(info->nodename); DPRINTK("%s", info->nodename); /* Match the info->nodename path, or any subdirectory of that path. */ if (strncmp(xendev->nodename, info->nodename, len)) return 0; /* If the node name is longer, ensure it really is a subdirectory. */ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) return 0; info->dev = xendev; get_device(dev); return 1; } static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) { struct xb_find_info info = { .nodename = path }; do { info.dev = NULL; bus_for_each_dev(bus, NULL, &info, cleanup_dev); if (info.dev) { device_unregister(&info.dev->dev); put_device(&info.dev->dev); } } while (info.dev); } static void xenbus_dev_release(struct device *dev) { if (dev) kfree(to_xenbus_device(dev)); } static ssize_t nodename_show(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); } static ssize_t devtype_show(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); } static ssize_t modalias_show(struct device *dev, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) struct device_attribute *attr, #endif char *buf) { return sprintf(buf, "%s:%s\n", dev->bus->name, to_xenbus_device(dev)->devicetype); } struct device_attribute xenbus_dev_attrs[] = { __ATTR_RO(nodename), __ATTR_RO(devtype), __ATTR_RO(modalias), __ATTR_NULL }; PARAVIRT_EXPORT_SYMBOL(xenbus_dev_attrs); int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename) { int err; struct xenbus_device *xendev; size_t stringlen; char *tmpstring; enum xenbus_state state = xenbus_read_driver_state(nodename); if (bus->error) return bus->error; if (state != XenbusStateInitialising) { /* Device is not new, so ignore it. This can happen if a device is going away after switching to Closed. */ return 0; } stringlen = strlen(nodename) + 1 + strlen(type) + 1; xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); if (!xendev) return -ENOMEM; xendev->state = XenbusStateInitialising; /* Copy the strings into the extra space. */ tmpstring = (char *)(xendev + 1); strcpy(tmpstring, nodename); xendev->nodename = tmpstring; tmpstring += strlen(tmpstring) + 1; strcpy(tmpstring, type); xendev->devicetype = tmpstring; init_completion(&xendev->down); #if defined(CONFIG_XEN) || defined(MODULE) xendev->dev.parent = &bus->dev; #endif xendev->dev.bus = &bus->bus; xendev->dev.release = xenbus_dev_release; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) { char devname[XEN_BUS_ID_SIZE]; err = bus->get_bus_id(devname, xendev->nodename); if (!err) dev_set_name(&xendev->dev, "%s", devname); } #else err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); #endif if (err) goto fail; /* Register with generic device framework. */ err = device_register(&xendev->dev); if (err) goto fail; return 0; fail: kfree(xendev); return err; } PARAVIRT_EXPORT_SYMBOL(xenbus_probe_node); #if defined(CONFIG_XEN) || defined(MODULE) /* device// => - */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { pr_warn("bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); if (!strchr(bus_id, '/')) { pr_warn("bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } /* device// */ static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type, const char *name) { char *nodename; int err; if (!strcmp(type, "console")) return 0; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(bus, type, nodename); kfree(nodename); return err; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); if (xdev == NULL) return -ENODEV; /* stuff we want to pass to /sbin/hotplug */ if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype) || add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename) || add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype)) return -ENOMEM; return 0; } #endif /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .error = -ENODEV, .bus = { .name = "xen", .match = xenbus_match, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .uevent = xenbus_uevent_frontend, #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) .dev_attrs = xenbus_dev_attrs, #endif }, .dev = { .init_name = "xen", }, }; int xenbus_register_frontend(struct xenbus_driver *drv) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(xenbus_register_frontend); #endif static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) { int err = 0; char **dir; unsigned int dir_n = 0; int i; dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = bus->probe(bus, type, dir[i]); if (err) break; } kfree(dir); return err; } int xenbus_probe_devices(struct xen_bus_type *bus) { int err = 0; char **dir; unsigned int i, dir_n; if (bus->error) return bus->error; dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); if (IS_ERR(dir)) return PTR_ERR(dir); for (i = 0; i < dir_n; i++) { err = xenbus_probe_device_type(bus, dir[i]); if (err) break; } kfree(dir); return err; } PARAVIRT_EXPORT_SYMBOL(xenbus_probe_devices); static unsigned int char_count(const char *str, char c) { unsigned int i, ret = 0; for (i = 0; str[i]; i++) if (str[i] == c) ret++; return ret; } static int strsep_len(const char *str, char c, unsigned int len) { unsigned int i; for (i = 0; str[i]; i++) if (str[i] == c) { if (len == 0) return i; len--; } return (len == 0) ? i : -ERANGE; } void xenbus_dev_changed(const char *node, struct xen_bus_type *bus) { int exists, rootlen; struct xenbus_device *dev; char type[XEN_BUS_ID_SIZE]; const char *p, *root; if (bus->error || char_count(node, '/') < 2) return; exists = xenbus_exists(XBT_NIL, node, ""); if (!exists) { xenbus_cleanup_devices(node, &bus->bus); return; } /* backend//... or device//... */ p = strchr(node, '/') + 1; snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); type[XEN_BUS_ID_SIZE-1] = '\0'; rootlen = strsep_len(node, '/', bus->levels); if (rootlen < 0) return; root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); if (!root) return; dev = xenbus_device_find(root, &bus->bus); if (!dev) xenbus_probe_node(bus, type, root); else put_device(&dev->dev); kfree(root); } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_changed); #if defined(CONFIG_XEN) || defined(MODULE) static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int __maybe_unused suspend_dev(struct device *dev, void *data) #else int xenbus_dev_suspend(struct device *dev) #endif { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev = container_of(dev, struct xenbus_device, dev); DPRINTK("%s", xdev->nodename); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); if (drv->suspend) err = drv->suspend(xdev); if (err) pr_warn("suspend %s failed: %i\n", dev_name(dev), err); return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_suspend); #if defined(CONFIG_XEN) || defined(MODULE) static int __maybe_unused suspend_cancel_dev(struct device *dev, void *data) { int err = 0; struct xenbus_driver *drv; struct xenbus_device *xdev; DPRINTK(""); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); xdev = container_of(dev, struct xenbus_device, dev); if (drv->suspend_cancel) err = drv->suspend_cancel(xdev); if (err) pr_warn("suspend_cancel %s failed: %i\n", dev_name(dev), err); return 0; } static int __maybe_unused resume_dev(struct device *dev, void *data) #else int xenbus_dev_resume(struct device *dev) #endif { int err; struct xenbus_driver *drv; struct xenbus_device *xdev = container_of(dev, struct xenbus_device, dev); DPRINTK("%s", xdev->nodename); if (dev->driver == NULL) return 0; drv = to_xenbus_driver(dev->driver); err = talk_to_otherend(xdev); if (err) { pr_warn("resume (talk_to_otherend) %s failed: %i\n", dev_name(dev), err); return err; } xdev->state = XenbusStateInitialising; if (drv->resume) { err = drv->resume(xdev); if (err) { pr_warn("resume %s failed: %i\n", dev_name(dev), err); return err; } } err = watch_otherend(xdev); if (err) { pr_warn("resume (watch_otherend) %s failed: %d.\n", dev_name(dev), err); return err; } return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_resume); #if !defined(CONFIG_XEN) && !defined(MODULE) int xenbus_dev_cancel(struct device *dev) { /* Do nothing */ DPRINTK("cancel"); return 0; } PARAVIRT_EXPORT_SYMBOL(xenbus_dev_cancel); #elif defined(CONFIG_PM_SLEEP) || defined(MODULE) void xenbus_suspend(void) { DPRINTK(""); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); xenbus_backend_suspend(suspend_dev); xs_suspend(); } void xen_unplug_emulated_devices(void); void xenbus_resume(void) { xb_init_comms(); xs_resume(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); xenbus_backend_resume(resume_dev); xen_unplug_emulated_devices(); } void xenbus_suspend_cancel(void) { xs_suspend_cancel(); if (!xenbus_frontend.error) bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); xenbus_backend_resume(suspend_cancel_dev); } #endif /* CONFIG_PM_SLEEP || MODULE */ /* A flag to determine if xenstored is 'ready' (i.e. has started) */ atomic_t xenbus_xsd_state = ATOMIC_INIT(XENBUS_XSD_UNCOMMITTED); int #ifdef CONFIG_XEN __init #endif register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; if (is_xenstored_ready()) ret = nb->notifier_call(nb, 0, NULL); else blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } #ifndef CONFIG_XEN EXPORT_SYMBOL_GPL(register_xenstore_notifier); void unregister_xenstore_notifier(struct notifier_block *nb) { blocking_notifier_chain_unregister(&xenstore_chain, nb); } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); #endif #ifndef CONFIG_XEN static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq); static int backend_state; static void xenbus_reset_backend_state_changed(struct xenbus_watch *w, const char **v, unsigned int l) { if (xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state) != 1) backend_state = XenbusStateUnknown; printk(KERN_DEBUG "XENBUS: backend %s %s\n", v[XS_WATCH_PATH], xenbus_strstate(backend_state)); wake_up(&backend_state_wq); } static void xenbus_reset_wait_for_backend(char *be, int expected) { long timeout; timeout = wait_event_interruptible_timeout(backend_state_wq, backend_state == expected, 5 * HZ); if (timeout <= 0) pr_info("backend %s timed out.\n", be); } /* * Reset frontend if it is in Connected or Closed state. * Wait for backend to catch up. * State Connected happens during kdump, Closed after kexec. */ static void xenbus_reset_frontend(char *fe, char *be, int be_state) { struct xenbus_watch be_watch; printk(KERN_DEBUG "XENBUS: backend %s %s\n", be, xenbus_strstate(be_state)); memset(&be_watch, 0, sizeof(be_watch)); be_watch.node = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/state", be); if (!be_watch.node) return; be_watch.callback = xenbus_reset_backend_state_changed; backend_state = XenbusStateUnknown; pr_info("triggering reconnect on %s\n", be); register_xenbus_watch(&be_watch); /* fall through to forward backend to state XenbusStateInitialising */ switch (be_state) { case XenbusStateConnected: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing); xenbus_reset_wait_for_backend(be, XenbusStateClosing); case XenbusStateClosing: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed); xenbus_reset_wait_for_backend(be, XenbusStateClosed); case XenbusStateClosed: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising); xenbus_reset_wait_for_backend(be, XenbusStateInitWait); } unregister_xenbus_watch(&be_watch); pr_info("reconnect done on %s\n", be); kfree(be_watch.node); } static void xenbus_check_frontend(char *class, char *dev) { int be_state, fe_state, err; char *backend, *frontend; frontend = kasprintf(GFP_NOIO | __GFP_HIGH, "device/%s/%s", class, dev); if (!frontend) return; err = xenbus_scanf(XBT_NIL, frontend, "state", "%i", &fe_state); if (err != 1) goto out; switch (fe_state) { case XenbusStateConnected: case XenbusStateClosed: printk(KERN_DEBUG "XENBUS: frontend %s %s\n", frontend, xenbus_strstate(fe_state)); backend = xenbus_read(XBT_NIL, frontend, "backend", NULL); if (!backend || IS_ERR(backend)) goto out; err = xenbus_scanf(XBT_NIL, backend, "state", "%i", &be_state); if (err == 1) xenbus_reset_frontend(frontend, backend, be_state); kfree(backend); break; default: break; } out: kfree(frontend); } static void xenbus_reset_state(void) { char **devclass, **dev; int devclass_n, dev_n; int i, j; devclass = xenbus_directory(XBT_NIL, "device", "", &devclass_n); if (IS_ERR(devclass)) return; for (i = 0; i < devclass_n; i++) { dev = xenbus_directory(XBT_NIL, "device", devclass[i], &dev_n); if (IS_ERR(dev)) continue; for (j = 0; j < dev_n; j++) xenbus_check_frontend(devclass[i], dev[j]); kfree(dev); } kfree(devclass); } #endif void #if defined(CONFIG_XEN_UNPRIVILEGED_GUEST) __init #endif xenbus_probe(struct work_struct *unused) { BUG_ON(!is_xenstored_ready()); #ifndef CONFIG_XEN /* reset devices in Connected or Closed state */ xenbus_reset_state(); #endif #if defined(CONFIG_XEN) || defined(MODULE) /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); xenbus_backend_probe_and_watch(); #endif /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } PARAVIRT_EXPORT_SYMBOL(xenbus_probe); #if !defined(CONFIG_XEN) && !defined(MODULE) static int __init xenbus_probe_initcall(void) { if (!xen_domain()) return -ENODEV; if (xen_initial_domain() || xen_hvm_domain()) return 0; xenbus_probe(NULL); return 0; } device_initcall(xenbus_probe_initcall); #endif #ifdef CONFIG_XEN_PRIVILEGED_GUEST #ifdef CONFIG_PROC_FS #include #include static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; int old; int rc; old = atomic_cmpxchg(&xenbus_xsd_state, XENBUS_XSD_UNCOMMITTED, XENBUS_XSD_LOCAL_INIT); switch (old) { case XENBUS_XSD_UNCOMMITTED: rc = xb_init_comms(); if (rc != 0) return rc; break; case XENBUS_XSD_FOREIGN_INIT: case XENBUS_XSD_FOREIGN_READY: return -EBUSY; case XENBUS_XSD_LOCAL_INIT: case XENBUS_XSD_LOCAL_READY: default: break; } if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) return -EINVAL; if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn), size, vma->vm_page_prot)) return -EAGAIN; return 0; } static int xsd_kva_show(struct seq_file *m, void *v) { return seq_printf(m, "0x%p", xen_store_interface); } static int xsd_kva_open(struct inode *inode, struct file *file) { return single_open(file, xsd_kva_show, PDE_DATA(inode)); } static const struct file_operations xsd_kva_fops = { .open = xsd_kva_open, .llseek = seq_lseek, .read = seq_read, .mmap = xsd_kva_mmap, .release = single_release }; static int xsd_port_show(struct seq_file *m, void *v) { return seq_printf(m, "%d", xen_store_evtchn); } static int xsd_port_open(struct inode *inode, struct file *file) { return single_open(file, xsd_port_show, PDE_DATA(inode)); } static const struct file_operations xsd_port_fops = { .open = xsd_port_open, .llseek = seq_lseek, .read = seq_read, .release = single_release }; #endif #ifdef CONFIG_XEN_XENBUS_DEV int xenbus_conn(domid_t remote_dom, grant_ref_t *grant_ref, evtchn_port_t *local_port) { struct evtchn_alloc_unbound alloc_unbound; int rc, rc2; BUG_ON(atomic_read(&xenbus_xsd_state) != XENBUS_XSD_FOREIGN_INIT); BUG_ON(!is_initial_xendomain()); remove_xen_proc_entry("xsd_kva"); remove_xen_proc_entry("xsd_port"); rc = close_evtchn(xen_store_evtchn); if (rc != 0) goto fail0; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_dom; rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (rc != 0) goto fail0; *local_port = xen_store_evtchn = alloc_unbound.port; /* keep the old page (xen_store_mfn, xen_store_interface) */ rc = gnttab_grant_foreign_access(remote_dom, xen_store_mfn, GTF_permit_access); if (rc < 0) goto fail1; *grant_ref = rc; rc = xb_init_comms(); if (rc != 0) goto fail1; return 0; fail1: rc2 = close_evtchn(xen_store_evtchn); if (rc2 != 0) pr_warn("Error freeing xenstore event channel: %d\n", rc2); fail0: xen_store_evtchn = -1; return rc; } #endif #endif /* CONFIG_XEN_PRIVILEGED_GUEST */ /* Set up event channel for xenstored which is run as a local process * (this is normally used only in dom0) */ static int __init xenstored_local_init(void) { int err = 0; unsigned long page = 0; struct evtchn_alloc_unbound alloc_unbound; /* Allocate Xenstore page */ page = get_zeroed_page(GFP_KERNEL); if (!page) goto out_err; xen_store_mfn = xen_start_info->store_mfn = pfn_to_mfn(virt_to_phys((void *)page) >> PAGE_SHIFT); /* Next allocate a local port which xenstored can bind to */ alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = DOMID_SELF; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err == -ENOSYS) goto out_err; BUG_ON(err); xen_store_evtchn = xen_start_info->store_evtchn = alloc_unbound.port; return 0; out_err: if (page != 0) free_page(page); return err; } #ifndef MODULE static int __init #else int #endif xenbus_init(void) { int err = 0; #if !defined(CONFIG_XEN) && !defined(MODULE) uint64_t v = 0; xen_store_domain_type = XS_UNKNOWN; #endif DPRINTK(""); if (!is_running_on_xen()) return -ENODEV; #if defined(CONFIG_XEN) || defined(MODULE) /* Register ourselves with the kernel bus subsystem */ xenbus_frontend.error = bus_register(&xenbus_frontend.bus); if (xenbus_frontend.error) pr_warn("Error registering frontend bus: %i\n", xenbus_frontend.error); xenbus_backend_bus_register(); /* * Domain0 doesn't have a store_evtchn or store_mfn yet. */ if (is_initial_xendomain()) { err = xenstored_local_init(); if (err) goto out_error; #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) /* And finally publish the above info in /proc/xen */ create_xen_proc_entry("xsd_kva", S_IFREG|S_IRUSR|S_IWUSR, &xsd_kva_fops, NULL); create_xen_proc_entry("xsd_port", S_IFREG|S_IRUSR, &xsd_port_fops, NULL); #endif xen_store_interface = mfn_to_virt(xen_store_mfn); } else { #ifndef CONFIG_XEN uint64_t v = 0; err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); if (err) goto out_error; xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; xen_store_mfn = (unsigned long)v; xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); #endif #ifndef MODULE xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); #endif atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY); /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); if (err) goto out_error; } xenbus_dev_init(); #else /* !defined(CONFIG_XEN) && !defined(MODULE) */ xenbus_ring_ops_init(); if (xen_pv_domain()) xen_store_domain_type = XS_PV; if (xen_hvm_domain()) xen_store_domain_type = XS_HVM; if (xen_hvm_domain() && xen_initial_domain()) xen_store_domain_type = XS_LOCAL; if (xen_pv_domain() && !xen_start_info->store_evtchn) xen_store_domain_type = XS_LOCAL; if (xen_pv_domain() && xen_start_info->store_evtchn) atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY); switch (xen_store_domain_type) { case XS_LOCAL: err = xenstored_local_init(); if (err) goto out_error; xen_store_interface = mfn_to_virt(xen_store_mfn); break; case XS_PV: xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); break; case XS_HVM: err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); if (err) goto out_error; xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; xen_store_mfn = (unsigned long)v; xen_store_interface = xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); break; default: pr_warn("Xenstore state unknown\n"); break; } #endif /* Initialize the interface to xenstore. */ err = xs_init(); if (err) { pr_warn("Error initializing xenstore comms: %i\n", err); goto out_error; } #if defined(CONFIG_XEN) || defined(MODULE) /* Register ourselves with the kernel device subsystem */ if (!xenbus_frontend.error) { xenbus_frontend.error = device_register(&xenbus_frontend.dev); if (xenbus_frontend.error) { bus_unregister(&xenbus_frontend.bus); pr_warn("Error registering frontend device: %d\n", xenbus_frontend.error); } } xenbus_backend_device_register(); if (!is_initial_xendomain()) xenbus_probe(NULL); #endif #if defined(CONFIG_XEN_COMPAT_XENFS) && !defined(MODULE) /* * Create xenfs mountpoint in /proc for compatibility with * utilities that expect to find "xenbus" under "/proc/xen". */ proc_mkdir("xen", NULL); #endif return 0; out_error: /* * Do not unregister the xenbus front/backend buses here. The buses * must exist because front/backend drivers will use them when they are * registered. */ return err; } #ifndef MODULE postcore_initcall(xenbus_init); #ifdef CONFIG_XEN MODULE_LICENSE("Dual BSD/GPL"); #else MODULE_LICENSE("GPL"); #endif #endif #if defined(CONFIG_XEN) || defined(MODULE) static int is_device_connecting(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int exists_connecting_device(struct device_driver *drv) { if (xenbus_frontend.error) return xenbus_frontend.error; return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ pr_info("Device with no driver: %s\n", xendev->nodename); return 0; } if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); pr_warn("Timeout connecting to device: %s" " (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } xendrv = to_xenbus_driver(dev->driver); if (xendrv->is_ready && !xendrv->is_ready(xendev)) pr_warn("Device not ready: %s\n", xendev->nodename); return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!ready_to_wait_for_devices || !is_running_on_xen()) return; while (exists_connecting_device(drv)) { if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { if (!seconds_waited) pr_warn("Waiting for devices to initialise: "); seconds_waited += 5; pr_cont("%us...", 300 - seconds_waited); if (seconds_waited == 300) break; } schedule_timeout_interruptible(HZ/10); } if (seconds_waited) pr_cont("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } #ifndef MODULE static int __init boot_wait_for_devices(void) { #if !defined(CONFIG_XEN) && !defined(MODULE) if (xen_hvm_domain() && !xen_platform_pci_unplug) return -ENODEV; #endif if (!xenbus_frontend.error) { ready_to_wait_for_devices = 1; wait_for_devices(NULL); } return 0; } late_initcall(boot_wait_for_devices); #endif int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_frontend); #endif /* CONFIG_XEN || MODULE */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/xenbus_probe.h000066400000000000000000000102401314037446600322270ustar00rootroot00000000000000/****************************************************************************** * xenbus_probe.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XENBUS_PROBE_H #define _XENBUS_PROBE_H #ifndef BUS_ID_SIZE #define XEN_BUS_ID_SIZE 20 #else #define XEN_BUS_ID_SIZE BUS_ID_SIZE #endif #ifdef CONFIG_PARAVIRT_XEN #define is_running_on_xen() xen_domain() #define is_initial_xendomain() xen_initial_domain() #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) #define dev_name(dev) ((dev)->bus_id) #endif #if IS_ENABLED(CONFIG_XEN_BACKEND) extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); extern void xenbus_backend_probe_and_watch(void); extern void xenbus_backend_bus_register(void); extern void xenbus_backend_device_register(void); #else static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} static inline void xenbus_backend_probe_and_watch(void) {} static inline void xenbus_backend_bus_register(void) {} static inline void xenbus_backend_device_register(void) {} #endif struct xen_bus_type { char *root; int error; unsigned int levels; int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); int (*probe)(struct xen_bus_type *bus, const char *type, const char *dir); #if !defined(CONFIG_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H) void (*otherend_changed)(struct xenbus_watch *watch, const char **vec, unsigned int len); #else struct device dev; #endif struct bus_type bus; }; enum xenstore_init { XS_UNKNOWN, XS_PV, XS_HVM, XS_LOCAL, }; extern struct device_attribute xenbus_dev_attrs[]; extern int xenbus_match(struct device *_dev, struct device_driver *_drv); extern int xenbus_dev_probe(struct device *_dev); extern int xenbus_dev_remove(struct device *_dev); extern int xenbus_register_driver_common(struct xenbus_driver *drv, struct xen_bus_type *bus); extern int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); extern int xenbus_probe_devices(struct xen_bus_type *bus); extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); extern void xenbus_dev_shutdown(struct device *_dev); extern int xenbus_dev_suspend(struct device *dev); extern int xenbus_dev_resume(struct device *dev); extern int xenbus_dev_cancel(struct device *dev); extern void xenbus_otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len, int ignore_on_shutdown); extern int xenbus_read_otherend_details(struct xenbus_device *xendev, char *id_node, char *path_node); void xenbus_ring_ops_init(void); #endif xenbus_probe_backend.c000066400000000000000000000211751314037446600336230ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/****************************************************************************** * Talks to Xen Store to figure out what devices we have (backend half). * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 Mike Wray, Hewlett-Packard * Copyright (C) 2005, 2006 XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DPRINTK(fmt, ...) \ pr_debug("(%s:%d) " fmt "\n", \ __func__, __LINE__, ##__VA_ARGS__) #include #include #include #include #include #include #include #include #include #include #include #include #ifndef CONFIG_XEN #include #endif #include #include #ifdef CONFIG_XEN #include #include #endif #include #include "xenbus_comms.h" #include "xenbus_probe.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif /* backend/// => -- */ static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { int domid, err; const char *devid, *type, *frontend; unsigned int typelen; type = strchr(nodename, '/'); if (!type) return -EINVAL; type++; typelen = strcspn(type, "/"); if (!typelen || type[typelen] != '/') return -EINVAL; devid = strrchr(nodename, '/') + 1; err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid, "frontend", NULL, &frontend, NULL); if (err) return err; if (strlen(frontend) == 0) err = -ERANGE; if (!err && !xenbus_exists(XBT_NIL, frontend, "")) err = -ENOENT; kfree(frontend); if (err) return err; if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s", typelen, type, domid, devid) >= XEN_BUS_ID_SIZE) return -ENOSPC; return 0; } static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env) { struct xenbus_device *xdev; struct xenbus_driver *drv; struct xen_bus_type *bus; DPRINTK(""); if (dev == NULL) return -ENODEV; xdev = to_xenbus_device(dev); bus = container_of(xdev->dev.bus, struct xen_bus_type, bus); if (add_uevent_var(env, "MODALIAS=xen-backend:%s", xdev->devicetype)) return -ENOMEM; /* stuff we want to pass to /sbin/hotplug */ if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype)) return -ENOMEM; if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename)) return -ENOMEM; if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root)) return -ENOMEM; if (dev->driver) { drv = to_xenbus_driver(dev->driver); if (drv && drv->uevent) return drv->uevent(xdev, env); } return 0; } /* backend/// */ static int xenbus_probe_backend_unit(struct xen_bus_type *bus, const char *dir, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); if (!nodename) return -ENOMEM; DPRINTK("%s\n", nodename); err = xenbus_probe_node(bus, type, nodename); kfree(nodename); return err; } /* backend// */ static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type, const char *domid) { char *nodename; int err = 0; char **dir; unsigned int i, dir_n = 0; DPRINTK(""); nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid); if (!nodename) return -ENOMEM; dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n); if (IS_ERR(dir)) { kfree(nodename); return PTR_ERR(dir); } for (i = 0; i < dir_n; i++) { err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]); if (err) break; } kfree(dir); kfree(nodename); return err; } #ifndef CONFIG_XEN static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { xenbus_otherend_changed(watch, vec, len, 0); } #endif static struct xen_bus_type xenbus_backend = { .root = "backend", .levels = 3, /* backend/type// */ .get_bus_id = backend_bus_id, .probe = xenbus_probe_backend, #ifndef CONFIG_XEN .otherend_changed = frontend_changed, #else .dev = { .init_name = "xen-backend", }, #endif .error = -ENODEV, .bus = { .name = "xen-backend", .match = xenbus_match, .uevent = xenbus_uevent_backend, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, #ifdef CONFIG_XEN .shutdown = xenbus_dev_shutdown, #endif .dev_attrs = xenbus_dev_attrs, }, }; static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); } static struct xenbus_watch be_watch = { .node = "backend", .callback = backend_changed, }; static int read_frontend_details(struct xenbus_device *xendev) { return xenbus_read_otherend_details(xendev, "frontend-id", "frontend"); } #ifndef CONFIG_XEN int xenbus_dev_is_online(struct xenbus_device *dev) { int rc, val; rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); if (rc != 1) val = 0; /* no online node present */ return val; } EXPORT_SYMBOL_GPL(xenbus_dev_is_online); #endif int xenbus_register_backend(struct xenbus_driver *drv) { drv->read_otherend_details = read_frontend_details; return xenbus_register_driver_common(drv, &xenbus_backend); } EXPORT_SYMBOL_GPL(xenbus_register_backend); #if defined(CONFIG_XEN) && defined(CONFIG_PM_SLEEP) void xenbus_backend_suspend(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } void xenbus_backend_resume(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); } #endif #ifndef CONFIG_XEN static int backend_probe_and_watch(struct notifier_block *notifier, unsigned long event, void *data) #else void xenbus_backend_probe_and_watch(void) #endif { /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_backend); register_xenbus_watch(&be_watch); #ifndef CONFIG_XEN return NOTIFY_DONE; #endif } #ifndef CONFIG_XEN static int __init xenbus_probe_backend_init(void) { static struct notifier_block xenstore_notifier = { .notifier_call = backend_probe_and_watch }; int err; DPRINTK(""); /* Register ourselves with the kernel bus subsystem */ err = bus_register(&xenbus_backend.bus); if (err) return err; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(xenbus_probe_backend_init); #else void __init xenbus_backend_bus_register(void) { xenbus_backend.error = bus_register(&xenbus_backend.bus); if (xenbus_backend.error) pr_warn("Error registering backend bus: %i\n", xenbus_backend.error); } void __init xenbus_backend_device_register(void) { if (xenbus_backend.error) return; xenbus_backend.error = device_register(&xenbus_backend.dev); if (xenbus_backend.error) { bus_unregister(&xenbus_backend.bus); pr_warn("Error registering backend device: %i\n", xenbus_backend.error); } } int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *)) { return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn); } EXPORT_SYMBOL_GPL(xenbus_for_each_backend); #endif xenbus_probe_frontend.c000066400000000000000000000321041314037446600340450ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DPRINTK(fmt, ...) \ pr_debug("(%s:%d) " fmt "\n", \ __func__, __LINE__, ##__VA_ARGS__) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_comms.h" #include "xenbus_probe.h" static struct workqueue_struct *xenbus_frontend_wq; /* device// => - */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { pr_warn("bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); if (!strchr(bus_id, '/')) { pr_warn("bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } /* device// */ static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type, const char *name) { char *nodename; int err; /* ignore console/0 */ if (!strncmp(type, "console", 7) && !strncmp(name, "0", 1)) { DPRINTK("Ignoring buggy device entry console/0"); return 0; } nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(bus, type, nodename); kfree(nodename); return err; } static int xenbus_uevent_frontend(struct device *_dev, struct kobj_uevent_env *env) { struct xenbus_device *dev = to_xenbus_device(_dev); if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype)) return -ENOMEM; return 0; } static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { xenbus_otherend_changed(watch, vec, len, 1); } static void xenbus_frontend_delayed_resume(struct work_struct *w) { struct xenbus_device *xdev = container_of(w, struct xenbus_device, work); xenbus_dev_resume(&xdev->dev); } static int xenbus_frontend_dev_resume(struct device *dev) { /* * If xenstored is running in this domain, we cannot access the backend * state at the moment, so we need to defer xenbus_dev_resume */ if (xen_store_domain_type == XS_LOCAL) { struct xenbus_device *xdev = to_xenbus_device(dev); if (!xenbus_frontend_wq) { pr_err("%s: no workqueue to process delayed resume\n", xdev->nodename); return -EFAULT; } queue_work(xenbus_frontend_wq, &xdev->work); return 0; } return xenbus_dev_resume(dev); } static int xenbus_frontend_dev_probe(struct device *dev) { if (xen_store_domain_type == XS_LOCAL) { struct xenbus_device *xdev = to_xenbus_device(dev); INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume); } return xenbus_dev_probe(dev); } static const struct dev_pm_ops xenbus_pm_ops = { .suspend = xenbus_dev_suspend, .resume = xenbus_frontend_dev_resume, .freeze = xenbus_dev_suspend, .thaw = xenbus_dev_cancel, .restore = xenbus_dev_resume, }; static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/ */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .otherend_changed = backend_changed, .bus = { .name = "xen", .match = xenbus_match, .uevent = xenbus_uevent_frontend, .probe = xenbus_frontend_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .dev_attrs = xenbus_dev_attrs, .pm = &xenbus_pm_ops, }, }; static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int read_backend_details(struct xenbus_device *xendev) { return xenbus_read_otherend_details(xendev, "backend-id", "backend"); } static int is_device_connecting(struct device *dev, void *data, bool ignore_nonessential) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (ignore_nonessential) { /* With older QEMU, for PVonHVM guests the guest config files * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0'] * which is nonsensical as there is no PV FB (there can be * a PVKB) running as HVM guest. */ if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0)) return 0; if ((strncmp(xendev->nodename, "device/vfb", 10) == 0)) return 0; } xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int essential_device_connecting(struct device *dev, void *data) { return is_device_connecting(dev, data, true /* ignore PV[KBB+FB] */); } static int non_essential_device_connecting(struct device *dev, void *data) { return is_device_connecting(dev, data, false); } static int exists_essential_connecting_device(struct device_driver *drv) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, essential_device_connecting); } static int exists_non_essential_connecting_device(struct device_driver *drv) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, non_essential_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ pr_info("Device with no driver: %s\n", xendev->nodename); } else if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); pr_warn("Timeout connecting to device: %s (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; static bool wait_loop(unsigned long start, unsigned int max_delay, unsigned int *seconds_waited) { if (time_after(jiffies, start + (*seconds_waited+5)*HZ)) { if (!*seconds_waited) pr_warn("Waiting for devices to initialise: "); *seconds_waited += 5; pr_cont("%us...", max_delay - *seconds_waited); if (*seconds_waited == max_delay) { pr_cont("\n"); return true; } } schedule_timeout_interruptible(HZ/10); return false; } /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!ready_to_wait_for_devices || !xen_domain()) return; while (exists_non_essential_connecting_device(drv)) if (wait_loop(start, 30, &seconds_waited)) break; /* Skips PVKB and PVFB check.*/ while (exists_essential_connecting_device(drv)) if (wait_loop(start, 270, &seconds_waited)) break; if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } int xenbus_register_frontend(struct xenbus_driver *drv) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(xenbus_register_frontend); static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq); static int backend_state; static void xenbus_reset_backend_state_changed(struct xenbus_watch *w, const char **v, unsigned int l) { xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state); printk(KERN_DEBUG "XENBUS: backend %s %s\n", v[XS_WATCH_PATH], xenbus_strstate(backend_state)); wake_up(&backend_state_wq); } static void xenbus_reset_wait_for_backend(char *be, int expected) { long timeout; timeout = wait_event_interruptible_timeout(backend_state_wq, backend_state == expected, 5 * HZ); if (timeout <= 0) pr_info("backend %s timed out\n", be); } /* * Reset frontend if it is in Connected or Closed state. * Wait for backend to catch up. * State Connected happens during kdump, Closed after kexec. */ static void xenbus_reset_frontend(char *fe, char *be, int be_state) { struct xenbus_watch be_watch; printk(KERN_DEBUG "XENBUS: backend %s %s\n", be, xenbus_strstate(be_state)); memset(&be_watch, 0, sizeof(be_watch)); be_watch.node = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/state", be); if (!be_watch.node) return; be_watch.callback = xenbus_reset_backend_state_changed; backend_state = XenbusStateUnknown; pr_info("triggering reconnect on %s\n", be); register_xenbus_watch(&be_watch); /* fall through to forward backend to state XenbusStateInitialising */ switch (be_state) { case XenbusStateConnected: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing); xenbus_reset_wait_for_backend(be, XenbusStateClosing); case XenbusStateClosing: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed); xenbus_reset_wait_for_backend(be, XenbusStateClosed); case XenbusStateClosed: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising); xenbus_reset_wait_for_backend(be, XenbusStateInitWait); } unregister_xenbus_watch(&be_watch); pr_info("reconnect done on %s\n", be); kfree(be_watch.node); } static void xenbus_check_frontend(char *class, char *dev) { int be_state, fe_state, err; char *backend, *frontend; frontend = kasprintf(GFP_NOIO | __GFP_HIGH, "device/%s/%s", class, dev); if (!frontend) return; err = xenbus_scanf(XBT_NIL, frontend, "state", "%i", &fe_state); if (err != 1) goto out; switch (fe_state) { case XenbusStateConnected: case XenbusStateClosed: printk(KERN_DEBUG "XENBUS: frontend %s %s\n", frontend, xenbus_strstate(fe_state)); backend = xenbus_read(XBT_NIL, frontend, "backend", NULL); if (!backend || IS_ERR(backend)) goto out; err = xenbus_scanf(XBT_NIL, backend, "state", "%i", &be_state); if (err == 1) xenbus_reset_frontend(frontend, backend, be_state); kfree(backend); break; default: break; } out: kfree(frontend); } static void xenbus_reset_state(void) { char **devclass, **dev; int devclass_n, dev_n; int i, j; devclass = xenbus_directory(XBT_NIL, "device", "", &devclass_n); if (IS_ERR(devclass)) return; for (i = 0; i < devclass_n; i++) { dev = xenbus_directory(XBT_NIL, "device", devclass[i], &dev_n); if (IS_ERR(dev)) continue; for (j = 0; j < dev_n; j++) xenbus_check_frontend(devclass[i], dev[j]); kfree(dev); } kfree(devclass); } static int frontend_probe_and_watch(struct notifier_block *notifier, unsigned long event, void *data) { /* reset devices in Connected or Closed state */ if (xen_hvm_domain()) xenbus_reset_state(); /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); return NOTIFY_DONE; } static int __init xenbus_probe_frontend_init(void) { static struct notifier_block xenstore_notifier = { .notifier_call = frontend_probe_and_watch }; int err; DPRINTK(""); /* Register ourselves with the kernel bus subsystem */ err = bus_register(&xenbus_frontend.bus); if (err) return err; register_xenstore_notifier(&xenstore_notifier); if (xen_store_domain_type == XS_LOCAL) { xenbus_frontend_wq = create_workqueue("xenbus_frontend"); if (!xenbus_frontend_wq) pr_warn("create xenbus frontend workqueue failed, S3 resume is likely to fail\n"); } return 0; } subsys_initcall(xenbus_probe_frontend_init); #ifndef MODULE static int __init boot_wait_for_devices(void) { if (!xen_has_pv_devices()) return -ENODEV; ready_to_wait_for_devices = 1; wait_for_devices(NULL); return 0; } late_initcall(boot_wait_for_devices); #endif MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-platform-pci/xenbus_xs.c000066400000000000000000000603751314037446600315630ustar00rootroot00000000000000/****************************************************************************** * xenbus_xs.c * * This is the kernel equivalent of the "xs" library. We don't need everything * and we use xenbus_comms for communication. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef CONFIG_XEN #include #endif #include #include #include "xenbus_comms.h" #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */ #define PF_NOFREEZE 0 #endif #define XS_LINUX_WEAK_WRITE 128 struct xs_stored_msg { struct list_head list; struct xsd_sockmsg hdr; union { /* Queued replies. */ struct { char *body; } reply; /* Queued watch events. */ struct { struct xenbus_watch *handle; char **vec; unsigned int vec_size; } watch; } u; }; struct xs_handle { /* A list of replies. Currently only one will ever be outstanding. */ struct list_head reply_list; spinlock_t reply_lock; wait_queue_head_t reply_waitq; /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. * * transaction_mutex must be held before incrementing * transaction_count. The mutex is held when a suspend is in * progress to prevent new transactions starting. * * When decrementing transaction_count to zero the wait queue * should be woken up, the suspend code waits for count to * reach zero. */ /* One request at a time. */ struct mutex request_mutex; /* Protect xenbus reader thread against save/restore. */ struct mutex response_mutex; /* Protect transactions against save/restore. */ struct mutex transaction_mutex; atomic_t transaction_count; wait_queue_head_t transaction_wq; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; }; static struct xs_handle xs_state; /* List of registered watches, and a lock to protect it. */ static LIST_HEAD(watches); static DEFINE_SPINLOCK(watches_lock); /* List of pending watch callback events, and a lock to protect it. */ static LIST_HEAD(watch_events); static DEFINE_SPINLOCK(watch_events_lock); /* * Details of the xenwatch callback kernel thread. The thread waits on the * watch_events_waitq for work to do (queued on watch_events list). When it * wakes up it acquires the xenwatch_mutex before reading the list and * carrying out work. */ static pid_t xenwatch_pid; /* static */ DEFINE_MUTEX(xenwatch_mutex); static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); static int get_error(const char *errorstring) { unsigned int i; for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { if (i == ARRAY_SIZE(xsd_errors) - 1) { pr_warn("xen store gave: unknown error %s\n", errorstring); return EINVAL; } } return xsd_errors[i].errnum; } #ifdef OPENSUSE_1302 static bool xenbus_ok(void) { #if !defined(CONFIG_XEN) && !defined(MODULE) switch (xen_store_domain_type) { case XS_LOCAL: #else if (atomic_read(&xenbus_xsd_state) == XENBUS_XSD_LOCAL_READY) #endif switch (system_state) { case SYSTEM_POWER_OFF: case SYSTEM_RESTART: case SYSTEM_HALT: return false; default: break; } return true; #if !defined(CONFIG_XEN) && !defined(MODULE) case XS_PV: case XS_HVM: /* FIXME: Could check that the remote domain is alive, * but it is normally initial domain. */ return true; default: break; } return false; #endif } #endif static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) { struct xs_stored_msg *msg; char *body; spin_lock(&xs_state.reply_lock); while (list_empty(&xs_state.reply_list)) { spin_unlock(&xs_state.reply_lock); #ifdef OPENSUSE_1302 if (xenbus_ok()) /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event_timeout(xs_state.reply_waitq, !list_empty(&xs_state.reply_list), msecs_to_jiffies(500)); else { /* * If we are in the process of being shut-down there is * no point of trying to contact XenBus - it is either * killed (xenstored application) or the other domain * has been killed or is unreachable. */ return ERR_PTR(-EIO); } #else /* XXX FIXME: Avoid synchronous wait for response here. */ wait_event(xs_state.reply_waitq, !list_empty(&xs_state.reply_list)); #endif spin_lock(&xs_state.reply_lock); } msg = list_entry(xs_state.reply_list.next, struct xs_stored_msg, list); list_del(&msg->list); spin_unlock(&xs_state.reply_lock); *type = msg->hdr.type; if (len) *len = msg->hdr.len; body = msg->u.reply.body; kfree(msg); return body; } static void transaction_start(void) { mutex_lock(&xs_state.transaction_mutex); atomic_inc(&xs_state.transaction_count); mutex_unlock(&xs_state.transaction_mutex); } static void transaction_end(void) { if (atomic_dec_and_test(&xs_state.transaction_count)) wake_up(&xs_state.transaction_wq); } #if !defined(CONFIG_XEN) || defined(CONFIG_PM_SLEEP) static void transaction_suspend(void) { mutex_lock(&xs_state.transaction_mutex); wait_event(xs_state.transaction_wq, atomic_read(&xs_state.transaction_count) == 0); } static void transaction_resume(void) { mutex_unlock(&xs_state.transaction_mutex); } #endif void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; enum xsd_sockmsg_type type = msg->type; int err; if (type == XS_TRANSACTION_START) transaction_start(); mutex_lock(&xs_state.request_mutex); err = xb_write(msg, sizeof(*msg) + msg->len); if (err) { msg->type = XS_ERROR; ret = ERR_PTR(err); } else ret = read_reply(&msg->type, &msg->len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if ((type == XS_TRANSACTION_END) || ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) transaction_end(); return ret; } #if !defined(CONFIG_XEN) && !defined(MODULE) EXPORT_SYMBOL(xenbus_dev_request_and_reply); #endif /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, enum xsd_sockmsg_type type, const struct kvec *iovec, unsigned int num_vecs, unsigned int *len) { struct xsd_sockmsg msg; void *ret = NULL; unsigned int i; int err; msg.tx_id = t.id; msg.req_id = 0; msg.type = type; msg.len = 0; for (i = 0; i < num_vecs; i++) msg.len += iovec[i].iov_len; mutex_lock(&xs_state.request_mutex); err = xb_write(&msg, sizeof(msg)); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } for (i = 0; i < num_vecs; i++) { err = xb_write(iovec[i].iov_base, iovec[i].iov_len); if (err) { mutex_unlock(&xs_state.request_mutex); return ERR_PTR(err); } } ret = read_reply(&msg.type, len); mutex_unlock(&xs_state.request_mutex); if (IS_ERR(ret)) return ret; if (msg.type == XS_ERROR) { err = get_error(ret); kfree(ret); return ERR_PTR(-err); } if (msg.type != type && type != XS_LINUX_WEAK_WRITE) { pr_warn_ratelimited("unexpected type [%d], expected [%d]\n", msg.type, type); kfree(ret); return ERR_PTR(-EINVAL); } return ret; } /* Simplified version of xs_talkv: single message. */ static void *xs_single(struct xenbus_transaction t, enum xsd_sockmsg_type type, const char *string, unsigned int *len) { struct kvec iovec; iovec.iov_base = (void *)string; iovec.iov_len = strlen(string) + 1; return xs_talkv(t, type, &iovec, 1, len); } /* Many commands only need an ack, don't care what it says. */ static int xs_error(char *reply) { if (IS_ERR(reply)) return PTR_ERR(reply); kfree(reply); return 0; } static unsigned int count_strings(const char *strings, unsigned int len) { unsigned int num; const char *p; for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) num++; return num; } /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ static char *join(const char *dir, const char *name) { char *buffer; if (strlen(name) == 0) buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir); else buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name); return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; } static char **split(char *strings, unsigned int len, unsigned int *num) { char *p, **ret; /* Count the strings. */ *num = count_strings(strings, len) + 1; /* Transfer to one big alloc for easy freeing. */ ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH); if (!ret) { kfree(strings); return ERR_PTR(-ENOMEM); } memcpy(&ret[*num], strings, len); kfree(strings); strings = (char *)&ret[*num]; for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) ret[(*num)++] = p; ret[*num] = strings + len; return ret; } char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num) { char *strings, *path; unsigned int len; path = join(dir, node); if (IS_ERR(path)) return (char **)path; strings = xs_single(t, XS_DIRECTORY, path, &len); kfree(path); if (IS_ERR(strings)) return (char **)strings; return split(strings, len, num); } EXPORT_SYMBOL_GPL(xenbus_directory); /* Check if a path exists. Return 1 if it does. */ int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node) { char **d; int dir_n; d = xenbus_directory(t, dir, node, &dir_n); if (IS_ERR(d)) return 0; kfree(d); return 1; } EXPORT_SYMBOL_GPL(xenbus_exists); /* Get the value of a single file. * Returns a kmalloced value: call free() on it after use. * len indicates length in bytes. */ void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len) { char *path; void *ret; path = join(dir, node); if (IS_ERR(path)) return (void *)path; ret = xs_single(t, XS_READ, path, len); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_read); /* Write the value of a single file. * Returns -err on failure. */ int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string) { const char *path; struct kvec iovec[2]; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); iovec[0].iov_base = (void *)path; iovec[0].iov_len = strlen(path) + 1; iovec[1].iov_base = (void *)string; iovec[1].iov_len = strlen(string); ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_write); /* Create a new directory. */ int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_mkdir); /* Destroy a file or directory (directories must be empty). */ int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) { char *path; int ret; path = join(dir, node); if (IS_ERR(path)) return PTR_ERR(path); ret = xs_error(xs_single(t, XS_RM, path, NULL)); kfree(path); return ret; } EXPORT_SYMBOL_GPL(xenbus_rm); /* Start a transaction: changes by others will not be seen during this * transaction, and changes will not be visible to others until end. */ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; transaction_start(); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { transaction_end(); return PTR_ERR(id_str); } t->id = simple_strtoul(id_str, NULL, 0); kfree(id_str); return 0; } EXPORT_SYMBOL_GPL(xenbus_transaction_start); /* End a transaction. * If abandon is true, transaction is discarded instead of committed. */ int xenbus_transaction_end(struct xenbus_transaction t, int abort) { char abortstr[2]; int err; if (abort) strcpy(abortstr, "F"); else strcpy(abortstr, "T"); err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); transaction_end(); return err; } EXPORT_SYMBOL_GPL(xenbus_transaction_end); /* Single read and scanf: returns -errno or num scanned. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *val; val = xenbus_read(t, dir, node, NULL); if (IS_ERR(val)) return PTR_ERR(val); va_start(ap, fmt); ret = vsscanf(val, fmt, ap); va_end(ap); kfree(val); /* Distinctive errno. */ if (ret == 0) return -ERANGE; return ret; } EXPORT_SYMBOL_GPL(xenbus_scanf); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) { va_list ap; int ret; char *printf_buffer; va_start(ap, fmt); printf_buffer = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap); va_end(ap); if (!printf_buffer) return -ENOMEM; ret = xenbus_write(t, dir, node, printf_buffer); kfree(printf_buffer); return ret; } EXPORT_SYMBOL_GPL(xenbus_printf); /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) { va_list ap; const char *name; int ret = 0; va_start(ap, dir); while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { const char *fmt = va_arg(ap, char *); void *result = va_arg(ap, void *); char *p; p = xenbus_read(t, dir, name, NULL); if (IS_ERR(p)) { ret = PTR_ERR(p); break; } if (fmt) { if (sscanf(p, fmt, result) == 0) ret = -EINVAL; kfree(p); } else *(char **)result = p; } va_end(ap); return ret; } EXPORT_SYMBOL_GPL(xenbus_gather); static int xs_watch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (void *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (void *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, ARRAY_SIZE(iov), NULL)); } static int xs_unwatch(const char *path, const char *token) { struct kvec iov[2]; iov[0].iov_base = (char *)path; iov[0].iov_len = strlen(path) + 1; iov[1].iov_base = (char *)token; iov[1].iov_len = strlen(token) + 1; return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL)); } static struct xenbus_watch *find_watch(const char *token) { struct xenbus_watch *i, *cmp; cmp = (void *)simple_strtoul(token, NULL, 16); list_for_each_entry(i, &watches, list) if (i == cmp) return i; return NULL; } /* * Certain older XenBus toolstack cannot handle reading values that are * not populated. Some Xen 3.4 installation are incapable of doing this * so if we are running on anything older than 4 do not attempt to read * control/platform-feature-xs_reset_watches. */ static inline bool xen_strict_xenbus_quirk(void) { #if !defined(CONFIG_XEN) && defined(CONFIG_X86) uint32_t eax, ebx, ecx, edx, base; base = xen_cpuid_base(); cpuid(base + 1, &eax, &ebx, &ecx, &edx); if ((eax >> 16) < 4) return true; #endif return false; } static void xs_reset_watches(void) { #if defined(CONFIG_PARAVIRT_XEN) || defined(MODULE) int err, supported = 0; #ifdef CONFIG_PARAVIRT_XEN if (!xen_hvm_domain() || xen_initial_domain()) return; #endif if (xen_strict_xenbus_quirk()) return; err = xenbus_scanf(XBT_NIL, "control", "platform-feature-xs_reset_watches", "%d", &supported); if (err != 1 || !supported) return; err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL)); if (err && err != -EEXIST) pr_warn("xs_reset_watches failed: %d\n", err); #endif } /* Register callback to watch this node. */ int register_xenbus_watch(struct xenbus_watch *watch) { /* Pointer in ascii is the token. */ char token[sizeof(watch) * 2 + 1]; int err; sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(find_watch(token)); list_add(&watch->list, &watches); spin_unlock(&watches_lock); err = xs_watch(watch->node, token); if (err) { spin_lock(&watches_lock); list_del(&watch->list); spin_unlock(&watches_lock); } up_read(&xs_state.watch_mutex); return err; } EXPORT_SYMBOL_GPL(register_xenbus_watch); void unregister_xenbus_watch(struct xenbus_watch *watch) { struct xs_stored_msg *msg, *tmp; char token[sizeof(watch) * 2 + 1]; int err; #if defined(CONFIG_XEN) || defined(MODULE) BUG_ON(watch->flags & XBWF_new_thread); #endif sprintf(token, "%lX", (long)watch); down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); BUG_ON(!find_watch(token)); list_del(&watch->list); spin_unlock(&watches_lock); err = xs_unwatch(watch->node, token); if (err) pr_warn("Failed to release watch %s: %i\n", watch->node, err); up_read(&xs_state.watch_mutex); /* Make sure there are no callbacks running currently (unless its us) */ if (current->pid != xenwatch_pid) mutex_lock(&xenwatch_mutex); /* Cancel pending watch events. */ spin_lock(&watch_events_lock); list_for_each_entry_safe(msg, tmp, &watch_events, list) { if (msg->u.watch.handle != watch) continue; list_del(&msg->list); kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watch_events_lock); if (current->pid != xenwatch_pid) mutex_unlock(&xenwatch_mutex); } EXPORT_SYMBOL_GPL(unregister_xenbus_watch); #if !defined(CONFIG_XEN) || defined(CONFIG_PM_SLEEP) void xs_suspend(void) { transaction_suspend(); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); } void xs_resume(void) { struct xenbus_watch *watch; char token[sizeof(watch) * 2 + 1]; #if !defined(CONFIG_XEN) && !defined(MODULE) xb_init_comms(); #endif mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); transaction_resume(); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { sprintf(token, "%lX", (long)watch); xs_watch(watch->node, token); } up_write(&xs_state.watch_mutex); } void xs_suspend_cancel(void) { mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); mutex_unlock(&xs_state.transaction_mutex); } #endif #if defined(CONFIG_XEN) || defined(MODULE) static int xenwatch_handle_callback(void *data) { struct xs_stored_msg *msg = data; msg->u.watch.handle->callback(msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); kfree(msg->u.watch.vec); kfree(msg); /* Kill this kthread if we were spawned just for this callback. */ if (current->pid != xenwatch_pid) do_exit(0); return 0; } #endif static int xenwatch_thread(void *unused) { struct list_head *ent; struct xs_stored_msg *msg; current->flags |= PF_NOFREEZE; for (;;) { #ifdef OPENSUSE_1302 wait_event_interruptible(watch_events_waitq, !list_empty(&watch_events)); #else wait_event_interruptible(watch_events_waitq, (kgr_task_safe(current), !list_empty(&watch_events))); #endif if (kthread_should_stop()) break; mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); ent = watch_events.next; if (ent != &watch_events) list_del(ent); spin_unlock(&watch_events_lock); if (ent == &watch_events) { mutex_unlock(&xenwatch_mutex); continue; } msg = list_entry(ent, struct xs_stored_msg, list); #if defined(CONFIG_XEN) || defined(MODULE) /* * Unlock the mutex before running an XBWF_new_thread * handler. kthread_run can block which can deadlock * against unregister_xenbus_watch() if we need to * unregister other watches in order to make * progress. This can occur on resume before the swap * device is attached. */ if (msg->u.watch.handle->flags & XBWF_new_thread) { mutex_unlock(&xenwatch_mutex); kthread_run(xenwatch_handle_callback, msg, "xenwatch_cb"); } else { xenwatch_handle_callback(msg); mutex_unlock(&xenwatch_mutex); } #else msg->u.watch.handle->callback( msg->u.watch.handle, (const char **)msg->u.watch.vec, msg->u.watch.vec_size); mutex_unlock(&xenwatch_mutex); kfree(msg->u.watch.vec); kfree(msg); #endif } return 0; } static int process_msg(void) { struct xs_stored_msg *msg; char *body; int err; /* * We must disallow save/restore while reading a xenstore message. * A partial read across s/r leaves us out of sync with xenstored. */ for (;;) { err = xb_wait_for_data_to_read(); if (err) return err; mutex_lock(&xs_state.response_mutex); if (xb_data_to_read()) break; /* We raced with save/restore: pending data 'disappeared'. */ mutex_unlock(&xs_state.response_mutex); } msg = kmalloc(sizeof(*msg), GFP_NOIO | __GFP_HIGH); if (msg == NULL) { err = -ENOMEM; goto out; } err = xb_read(&msg->hdr, sizeof(msg->hdr)); if (err) { kfree(msg); goto out; } if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) { kfree(msg); err = -EINVAL; goto out; } body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH); if (body == NULL) { kfree(msg); err = -ENOMEM; goto out; } err = xb_read(body, msg->hdr.len); if (err) { kfree(body); kfree(msg); goto out; } body[msg->hdr.len] = '\0'; if (msg->hdr.type == XS_WATCH_EVENT) { msg->u.watch.vec = split(body, msg->hdr.len, &msg->u.watch.vec_size); if (IS_ERR(msg->u.watch.vec)) { err = PTR_ERR(msg->u.watch.vec); kfree(msg); goto out; } spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { kfree(msg->u.watch.vec); kfree(msg); } spin_unlock(&watches_lock); } else { msg->u.reply.body = body; spin_lock(&xs_state.reply_lock); list_add_tail(&msg->list, &xs_state.reply_list); spin_unlock(&xs_state.reply_lock); wake_up(&xs_state.reply_waitq); } out: mutex_unlock(&xs_state.response_mutex); return err; } static int xenbus_thread(void *unused) { int err; current->flags |= PF_NOFREEZE; for (;;) { err = process_msg(); if (err) pr_warn("error %d while reading message\n", err); if (kthread_should_stop()) break; } return 0; } int #ifndef MODULE __init #endif xs_init(void) { struct task_struct *task; INIT_LIST_HEAD(&xs_state.reply_list); spin_lock_init(&xs_state.reply_lock); init_waitqueue_head(&xs_state.reply_waitq); mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); mutex_init(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); atomic_set(&xs_state.transaction_count, 0); init_waitqueue_head(&xs_state.transaction_wq); task = kthread_run(xenwatch_thread, NULL, "xenwatch"); if (IS_ERR(task)) return PTR_ERR(task); xenwatch_pid = task->pid; task = kthread_run(xenbus_thread, NULL, "xenbus"); if (IS_ERR(task)) return PTR_ERR(task); /* shutdown watches for kexec boot */ xs_reset_watches(); return 0; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-scsi/000077500000000000000000000000001314037446600257325ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-scsi/Kbuild000066400000000000000000000001241314037446600270640ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-scsi.o xen-scsi-objs := scsifront.o xenbus.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-scsi/Makefile000066400000000000000000000000661314037446600273740ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-scsi/common.h000066400000000000000000000115441314037446600274000ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_SCSIFRONT_H__ #define __XEN_DRIVERS_SCSIFRONT_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../include/xen/interface/io/vscsiif.h" #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define VSCSI_IN_ABORT 1 #define VSCSI_IN_RESET 2 #define DMA_VSCSI_INDIRECT 4 /* tuning point*/ #define VSCSIIF_DEFAULT_CMD_PER_LUN 128 #define VSCSIIF_MAX_TARGET 64 #define VSCSIIF_MAX_LUN 255 #define VSCSIIF_MAX_RING_ORDER 3U #define VSCSIIF_MAX_RING_PAGES (1U << VSCSIIF_MAX_RING_ORDER) #define VSCSIIF_MAX_RING_PAGE_SIZE (VSCSIIF_MAX_RING_PAGES * PAGE_SIZE) #define VSCSIIF_MAX_RING_SIZE __CONST_RING_SIZE(vscsiif, VSCSIIF_MAX_RING_PAGE_SIZE) #define VSCSIIF_MAX_REQS VSCSIIF_MAX_RING_SIZE #define VSCSIIF_STATE_DISCONNECT 0 #define VSCSIIF_STATE_CONNECTED 1 #define VSCSIIF_STATE_SUSPENDED 2 struct grant { grant_ref_t gref; unsigned long pfn; struct list_head node; }; struct vscsifrnt_shadow { uint16_t next_free; #define VSCSIIF_IN_USE 0x0fff #define VSCSIIF_NONE 0x0ffe /* command between backend and frontend * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */ unsigned char act; /* Number of pieces of scatter-gather */ unsigned int nr_segments; uint8_t sc_data_direction; /* do reset function */ wait_queue_head_t wq_reset; /* reset work queue */ int wait_reset; /* reset work queue condition */ int32_t rslt_reset; /* reset response status */ /* (SUCESS or FAILED) */ /* requested struct scsi_cmnd is stored from kernel */ struct scsi_cmnd *sc; struct grant **grants_used; struct grant **indirect_grants; }; struct vscsifrnt_info { struct xenbus_device *dev; struct Scsi_Host *host; spinlock_t shadow_lock; unsigned int evtchn; unsigned int irq; grant_ref_t ring_ref[VSCSIIF_MAX_RING_PAGES]; struct vscsiif_front_ring ring; unsigned int ring_size; struct vscsiif_response ring_res; struct list_head grants; struct list_head indirect_pages; unsigned int max_indirect_segments; unsigned int persistent_gnts_c; unsigned int feature_persistent; struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS]; uint32_t shadow_free; struct task_struct *kthread; wait_queue_head_t wq; wait_queue_head_t wq_sync; wait_queue_head_t wq_pause; unsigned char waiting_resp; unsigned char waiting_sync; unsigned char waiting_pause; unsigned char pause; unsigned callers; }; #define DPRINTK(_f, _a...) \ pr_debug("(file=%s, line=%d) " _f, \ __FILE__ , __LINE__ , ## _a ) int scsifront_xenbus_init(void); void scsifront_xenbus_unregister(void); int scsifront_schedule(void *data); void scsifront_finish_all(struct vscsifrnt_info *info); irqreturn_t scsifront_intr(int irq, void *dev_id); #endif /* __XEN_DRIVERS_SCSIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-scsi/scsifront.c000066400000000000000000000527211314037446600301170ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "common.h" #include #define PREFIX(lvl) KERN_##lvl "scsifront: " static int get_id_from_freelist(struct vscsifrnt_info *info) { unsigned long flags; uint32_t free; spin_lock_irqsave(&info->shadow_lock, flags); free = info->shadow_free; BUG_ON(free >= VSCSIIF_MAX_REQS); info->shadow_free = info->shadow[free].next_free; info->shadow[free].next_free = VSCSIIF_IN_USE; info->shadow[free].wait_reset = 0; spin_unlock_irqrestore(&info->shadow_lock, flags); return free; } static void _add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id) { info->shadow[id].next_free = info->shadow_free; info->shadow[id].sc = NULL; info->shadow_free = id; } static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id) { unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); _add_id_to_freelist(info, id); spin_unlock_irqrestore(&info->shadow_lock, flags); } static struct grant *get_grant(grant_ref_t *gref_head, unsigned long pfn, struct vscsifrnt_info *info, int write) { struct grant *gnt_list_entry; unsigned long buffer_mfn; BUG_ON(list_empty(&info->grants)); gnt_list_entry = list_first_entry(&info->grants, struct grant, node); list_del(&gnt_list_entry->node); /* for persistent grant */ if (gnt_list_entry->gref != GRANT_INVALID_REF) { info->persistent_gnts_c--; return gnt_list_entry; } /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); if (!info->feature_persistent) { BUG_ON(!pfn); gnt_list_entry->pfn = pfn; } buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); gnttab_grant_foreign_access_ref(gnt_list_entry->gref, info->dev->otherend_id, buffer_mfn, write); return gnt_list_entry; } struct vscsiif_request * scsifront_pre_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); vscsiif_request_t *ring_req; uint32_t id; ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); ring->req_prod_pvt++; id = get_id_from_freelist(info); /* use id by response */ ring_req->rqid = (uint16_t)id; return ring_req; } static void scsifront_notify_work(struct vscsifrnt_info *info) { info->waiting_resp = 1; wake_up(&info->wq); } static void scsifront_do_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); unsigned int irq = info->irq; int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); if (notify) notify_remote_via_irq(irq); } irqreturn_t scsifront_intr(int irq, void *dev_id) { scsifront_notify_work((struct vscsifrnt_info *)dev_id); return IRQ_HANDLED; } static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id) { struct vscsifrnt_shadow *s = &info->shadow[id]; struct scsi_cmnd *sc = s->sc; int i = 0; int nseg = s->nr_segments; int read = (s->sc->sc_data_direction == DMA_FROM_DEVICE); unsigned int off, len, bytes; unsigned int data_len = scsi_bufflen(sc); char *sg_data = NULL; void *shared_data = NULL; if (s->sc->sc_data_direction == DMA_NONE) return; if (read && info->feature_persistent) { /* * Copy the data received from the backend into the bvec. * Since bv_offset can be different than 0, and bv_len different * than PAGE_SIZE, we have to keep track of the current offset, * to be sure we are copying the data from the right shared page. */ struct scatterlist *sg, *sgl = scsi_sglist(sc); for_each_sg (sgl, sg, scsi_sg_count(sc), i) { off = sg->offset; len = sg->length; while (len > 0 && data_len > 0) { /* * sg sends a scatterlist that is larger than * the data_len it wants transferred for certain * IO sizes */ bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic(pfn_to_page(s->grants_used[i]->pfn)); sg_data = kmap_atomic(sg_page(sg)); memcpy(sg_data + sg->offset, shared_data + sg->offset, sg->length); kunmap_atomic(sg_data); kunmap_atomic(shared_data); len -= bytes; data_len -= bytes; off = 0; } } } /* Add the persistent grant into the list of free grants */ for (i = 0; i < nseg; i++) { if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ if (!info->feature_persistent) { printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->grants_used[i]->gref); shost_printk(PREFIX(ALERT), info->host, "grant still in use by backend\n"); BUG(); } list_add(&s->grants_used[i]->node, &info->grants); info->persistent_gnts_c++; } else { /* * If the grant is not mapped by the backend we end the * foreign access and add it to the tail of the list, * so it will not be picked again unless we run out of * persistent grants. */ gnttab_end_foreign_access(s->grants_used[i]->gref, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &info->grants); } } if (s->sc_data_direction == DMA_VSCSI_INDIRECT) { for (i = 0; i < VSCSI_INDIRECT_PAGES(nseg); i++) { if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->indirect_grants[i]->gref); list_add(&s->indirect_grants[i]->node, &info->grants); info->persistent_gnts_c++; } else { struct page *indirect_page; gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. */ if (!info->feature_persistent) { indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); list_add(&indirect_page->lru, &info->indirect_pages); } s->indirect_grants[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->indirect_grants[i]->node, &info->grants); } } } } static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { struct scsi_cmnd *sc; uint32_t id; uint8_t sense_len; id = ring_res->rqid; sc = info->shadow[id].sc; if (sc == NULL) BUG(); scsifront_gnttab_done(info, id); add_id_to_freelist(info, id); sc->result = ring_res->rslt; scsi_set_resid(sc, ring_res->residual_len); if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE) sense_len = VSCSIIF_SENSE_BUFFERSIZE; else sense_len = ring_res->sense_len; if (sense_len) memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len); sc->scsi_done(sc); return; } static void scsifront_sync_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { uint16_t id = ring_res->rqid; unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); info->shadow[id].wait_reset = 1; switch (info->shadow[id].rslt_reset) { case 0: info->shadow[id].rslt_reset = ring_res->rslt; break; case -1: _add_id_to_freelist(info, id); break; default: shost_printk(PREFIX(ERR), info->host, "bad reset state %d, possibly leaking %u\n", info->shadow[id].rslt_reset, id); break; } spin_unlock_irqrestore(&info->shadow_lock, flags); wake_up(&(info->shadow[id].wq_reset)); } static void scsifront_do_response(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB) scsifront_cdb_cmd_done(info, ring_res); else scsifront_sync_cmd_done(info, ring_res); } static int scsifront_ring_drain(struct vscsifrnt_info *info) { vscsiif_response_t *ring_res; RING_IDX i, rp; int more_to_do = 0; rp = info->ring.sring->rsp_prod; rmb(); for (i = info->ring.rsp_cons; i != rp; i++) { ring_res = RING_GET_RESPONSE(&info->ring, i); scsifront_do_response(info, ring_res); } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); } else { info->ring.sring->rsp_event = i + 1; } return more_to_do; } static int scsifront_cmd_done(struct vscsifrnt_info *info) { int more_to_do; unsigned long flags; spin_lock_irqsave(info->host->host_lock, flags); more_to_do = scsifront_ring_drain(info); info->waiting_sync = 0; spin_unlock_irqrestore(info->host->host_lock, flags); wake_up(&info->wq_sync); /* Yield point for this unbounded loop. */ cond_resched(); return more_to_do; } void scsifront_finish_all(struct vscsifrnt_info *info) { unsigned i; struct vscsiif_response resp; scsifront_ring_drain(info); for (i = 0; i < VSCSIIF_MAX_REQS; i++) { if (info->shadow[i].next_free != VSCSIIF_IN_USE) continue; resp.rqid = i; resp.sense_len = 0; resp.rslt = DID_RESET << 16; resp.residual_len = 0; scsifront_do_response(info, &resp); } } int scsifront_schedule(void *data) { struct vscsifrnt_info *info = (struct vscsifrnt_info *)data; while (!kthread_should_stop()) { wait_event_interruptible( info->wq, info->waiting_resp || kthread_should_stop()); info->waiting_resp = 0; smp_mb(); if (scsifront_cmd_done(info)) info->waiting_resp = 1; } return 0; } static int scsifront_calculate_segs(struct scsi_cmnd *sc) { unsigned int i, off, len, bytes; int ref_cnt = 0; unsigned int data_len = scsi_bufflen(sc); struct scatterlist *sg, *sgl = scsi_sglist(sc); for_each_sg (sgl, sg, scsi_sg_count(sc), i) { off = sg->offset; len = sg->length; while (len > 0 && data_len > 0) { /* * sg sends a scatterlist that is larger than * the data_len it wants transferred for certain * IO sizes */ bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); len -= bytes; data_len -= bytes; off = 0; ref_cnt++; } } return ref_cnt; } static int map_data_for_request(struct vscsifrnt_info *info, struct scsi_cmnd *sc, vscsiif_request_t *ring_req, uint32_t id) { grant_ref_t gref_head; struct page *page; int err, ref, ref_cnt = 0; int write = (sc->sc_data_direction == DMA_TO_DEVICE); unsigned int i, nr_pages, off, len, bytes; unsigned int data_len = scsi_bufflen(sc); int nseg, max_grefs, n; struct grant *gnt_list_entry = NULL; struct scsiif_request_segment_aligned *segments = NULL; bool new_persistent_gnts; if (sc->sc_data_direction == DMA_NONE || !data_len) { ring_req->u.rw.nr_segments = (uint8_t)ref_cnt; return 0; } max_grefs = info->max_indirect_segments ? info->max_indirect_segments + VSCSI_INDIRECT_PAGES(info->max_indirect_segments) : VSCSIIF_SG_TABLESIZE; /* Check if we have enought grants to allocate a requests */ if (info->persistent_gnts_c < max_grefs) { new_persistent_gnts = 1; err = gnttab_alloc_grant_references( max_grefs - info->persistent_gnts_c, &gref_head); if (err) { shost_printk(PREFIX(ERR), info->host, "gnttab_alloc_grant_references() error\n"); return -ENOMEM; } } else new_persistent_gnts = 0; nseg = scsifront_calculate_segs(sc); if (nseg > VSCSIIF_SG_TABLESIZE) { ring_req->sc_data_direction = DMA_VSCSI_INDIRECT; ring_req->u.indirect.indirect_op = (uint8_t)sc->sc_data_direction; ring_req->u.indirect.nr_segments = (uint16_t)nseg; info->shadow[id].sc_data_direction = DMA_VSCSI_INDIRECT; } else { ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->u.rw.nr_segments = (uint8_t)nseg; info->shadow[id].sc_data_direction = (uint8_t)sc->sc_data_direction; } /* quoted scsi_lib.c/scsi_req_map_sg . */ nr_pages = PFN_UP(data_len + scsi_sglist(sc)->offset); if (nr_pages > max_grefs) { shost_printk(PREFIX(ERR), info->host, "Unable to map request_buffer for command!\n"); ref_cnt = -E2BIG; } else { struct scatterlist *sg, *sgl = scsi_sglist(sc); for_each_sg (sgl, sg, scsi_sg_count(sc), i) { page = sg_page(sg); off = sg->offset; len = sg->length; while (len > 0 && data_len > 0) { /* map indirect page */ if ((ring_req->sc_data_direction == DMA_VSCSI_INDIRECT) && (ref_cnt % SEGS_PER_VSCSI_INDIRECT_FRAME == 0)) { unsigned long uninitialized_var(pfn); if (segments) kunmap_atomic(segments); n = ref_cnt / SEGS_PER_VSCSI_INDIRECT_FRAME; if (!info->feature_persistent) { struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ BUG_ON(list_empty(&info->indirect_pages)); indirect_page = list_first_entry(&info->indirect_pages, struct page, lru); list_del(&indirect_page->lru); pfn = page_to_pfn(indirect_page); } gnt_list_entry = get_grant(&gref_head, pfn, info, 0); info->shadow[id].indirect_grants[n] = gnt_list_entry; segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; } /* * sg sends a scatterlist that is larger than * the data_len it wants transferred for certain * IO sizes */ bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info, write); ref = gnt_list_entry->gref; info->shadow[id].grants_used[ref_cnt] = gnt_list_entry; /* If use persistent grant, it will have a memcpy, * just copy the data from sg page to grant page. */ if (write && info->feature_persistent) { char *sg_data = NULL; void *shared_data = NULL; BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); sg_data = kmap_atomic(sg_page(sg)); /* * this does not wipe data stored outside the * range sg->offset..sg->offset+sg->length. * Therefore, blkback *could* see data from * previous requests. This is OK as long as * persistent grants are shared with just one * domain. It may need refactoring if this * changes */ memcpy(shared_data + sg->offset, sg_data + sg->offset, sg->length); kunmap_atomic(sg_data); kunmap_atomic(shared_data); } if (ring_req->sc_data_direction != DMA_VSCSI_INDIRECT) { ring_req->u.rw.seg[ref_cnt].gref = ref; ring_req->u.rw.seg[ref_cnt].offset = (uint16_t)off; ring_req->u.rw.seg[ref_cnt].length = (uint16_t)bytes; } else { n = ref_cnt % SEGS_PER_VSCSI_INDIRECT_FRAME; segments[n] = (struct scsiif_request_segment_aligned) { .gref = ref, .offset = (uint16_t)off, .length = (uint16_t)bytes }; } page++; len -= bytes; data_len -= bytes; off = 0; ref_cnt++; } } } if (segments) kunmap_atomic(segments); /* for persistent grant */ if (new_persistent_gnts) gnttab_free_grant_references(gref_head); return ref_cnt; } static int scsifront_enter(struct vscsifrnt_info *info) { if (info->pause) return 1; info->callers++; return 0; } static void scsifront_return(struct vscsifrnt_info *info) { info->callers--; if (info->callers) return; if (!info->waiting_pause) return; info->waiting_pause = 0; wake_up(&info->wq_pause); } static int scsifront_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) { struct vscsifrnt_info *info = shost_priv(shost); vscsiif_request_t *ring_req; unsigned long flags; int ref_cnt; uint16_t rqid; if (scsifront_enter(info)) return SCSI_MLQUEUE_HOST_BUSY; /* debug printk to identify more missing scsi commands shost_printk(KERN_INFO "scsicmd: ", sc->device->host, "len=%u %#x,%#x,%#x,%#x,%#x,%#x,%#x,%#x,%#x,%#x\n", sc->cmd_len, sc->cmnd[0], sc->cmnd[1], sc->cmnd[2], sc->cmnd[3], sc->cmnd[4], sc->cmnd[5], sc->cmnd[6], sc->cmnd[7], sc->cmnd[8], sc->cmnd[9]); */ spin_lock_irqsave(shost->host_lock, flags); scsi_cmd_get_serial(shost, sc); if (RING_FULL(&info->ring)) { scsifront_return(info); spin_unlock_irqrestore(shost->host_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } sc->result = 0; ring_req = scsifront_pre_request(info); rqid = ring_req->rqid; ring_req->act = VSCSIIF_ACT_SCSI_CDB; ring_req->id = sc->device->id; ring_req->lun = sc->device->lun; ring_req->channel = sc->device->channel; ring_req->cmd_len = sc->cmd_len; BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); if ( sc->cmd_len ) memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); else memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->timeout_per_command = (sc->request->timeout / HZ); info->shadow[rqid].sc = sc; info->shadow[rqid].act = VSCSIIF_ACT_SCSI_CDB; info->shadow[rqid].sc_data_direction = (uint8_t)sc->sc_data_direction; ref_cnt = map_data_for_request(info, sc, ring_req, rqid); if (ref_cnt < 0) { add_id_to_freelist(info, rqid); info->ring.req_prod_pvt--; scsifront_do_request(info); scsifront_return(info); spin_unlock_irqrestore(shost->host_lock, flags); if (ref_cnt == (-ENOMEM)) return SCSI_MLQUEUE_HOST_BUSY; sc->result = (DID_ERROR << 16); sc->scsi_done(sc); return 0; } info->shadow[rqid].nr_segments = ref_cnt; scsifront_do_request(info); scsifront_return(info); spin_unlock_irqrestore(shost->host_lock, flags); return 0; } static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) { return (FAILED); } /* vscsi supports only device_reset, because it is each of LUNs */ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc) { struct Scsi_Host *host = sc->device->host; struct vscsifrnt_info *info = shost_priv(host); vscsiif_request_t *ring_req; uint16_t rqid; int err = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_lock_irq(host->host_lock); #endif while (RING_FULL(&info->ring)) { if (err || info->pause) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_unlock_irq(host->host_lock); #endif return FAILED; } info->waiting_sync = 1; spin_unlock_irq(host->host_lock); err = wait_event_interruptible(info->wq_sync, !info->waiting_sync); spin_lock_irq(host->host_lock); } if (scsifront_enter(info)) { spin_unlock_irq(host->host_lock); return FAILED; } ring_req = scsifront_pre_request(info); ring_req->act = VSCSIIF_ACT_SCSI_RESET; rqid = ring_req->rqid; info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET; info->shadow[rqid].rslt_reset = 0; ring_req->channel = sc->device->channel; ring_req->id = sc->device->id; ring_req->lun = sc->device->lun; ring_req->cmd_len = sc->cmd_len; if ( sc->cmd_len ) memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); else memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->timeout_per_command = (sc->request->timeout / HZ); ring_req->u.rw.nr_segments = 0; scsifront_do_request(info); spin_unlock_irq(host->host_lock); err = wait_event_interruptible(info->shadow[rqid].wq_reset, info->shadow[rqid].wait_reset); spin_lock_irq(host->host_lock); if (!err) { err = info->shadow[rqid].rslt_reset; add_id_to_freelist(info, rqid); } else { spin_lock(&info->shadow_lock); info->shadow[rqid].rslt_reset = -1; spin_unlock(&info->shadow_lock); err = FAILED; } scsifront_return(info); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_unlock_irq(host->host_lock); #endif return (err); } struct scsi_host_template scsifront_sht = { .module = THIS_MODULE, .name = "Xen SCSI frontend driver", .queuecommand = scsifront_queuecommand, .eh_abort_handler = scsifront_eh_abort_handler, .eh_device_reset_handler= scsifront_dev_reset_handler, .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN, .can_queue = VSCSIIF_MAX_REQS, .this_id = -1, .sg_tablesize = VSCSIIF_SG_TABLESIZE, .use_clustering = DISABLE_CLUSTERING, .proc_name = "scsifront", }; static int __init scsifront_init(void) { int err; if (!is_running_on_xen()) return -ENODEV; err = scsifront_xenbus_init(); return err; } static void __exit scsifront_exit(void) { scsifront_xenbus_unregister(); } module_init(scsifront_init); module_exit(scsifront_exit); MODULE_DESCRIPTION("Xen SCSI frontend driver"); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-scsi/xenbus.c000066400000000000000000000520501314037446600274040ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ /* * Patched to support >2TB drives * 2010, Samuel Kvasnica, IMS Nanofabrication AG */ #include "common.h" #include #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) #define DEFAULT_TASK_COMM_LEN 16 #else #define DEFAULT_TASK_COMM_LEN TASK_COMM_LEN #endif static unsigned int max_nr_segs = VSCSIIF_SG_TABLESIZE; module_param_named(max_segs, max_nr_segs, uint, 0); MODULE_PARM_DESC(max_segs, "Maximum number of segments per request"); extern struct scsi_host_template scsifront_sht; static int fill_grant_buffer(struct vscsifrnt_info *info, int num) { struct grant *gnt_list_entry, *n; struct page *granted_page; int i = 0; while(i < num) { gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); if (!gnt_list_entry) goto out_of_memory; /* If frontend uses feature_persistent, it will preallocate * (256 + 1) * 256 pages, almost 257M. */ if (info->feature_persistent) { granted_page = alloc_page(GFP_NOIO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; } gnt_list_entry->pfn = page_to_pfn(granted_page); } gnt_list_entry->gref = GRANT_INVALID_REF; list_add(&gnt_list_entry->node, &info->grants); i++; } return 0; out_of_memory: list_for_each_entry_safe(gnt_list_entry, n, &info->grants, node) { list_del(&gnt_list_entry->node); if (info->feature_persistent) __free_page(pfn_to_page(gnt_list_entry->pfn)); kfree(gnt_list_entry); i--; } BUG_ON(i != 0); return -ENOMEM; } static void scsifront_free_ring(struct vscsifrnt_info *info) { int i; for (i = 0; i < info->ring_size; i++) { if (info->ring_ref[i] != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref[i], 0UL); info->ring_ref[i] = GRANT_INVALID_REF; } } free_pages((unsigned long)info->ring.sring, get_order(VSCSIIF_MAX_RING_PAGE_SIZE)); info->ring.sring = NULL; if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } void scsifront_resume_free(struct vscsifrnt_info *info) { struct grant *persistent_gnt; struct grant *n; int i, j, segs; /* Remove all persistent grants */ if (!list_empty(&info->grants)) { list_for_each_entry_safe(persistent_gnt, n, &info->grants, node) { list_del(&persistent_gnt->node); if (persistent_gnt->gref != GRANT_INVALID_REF) { gnttab_end_foreign_access(persistent_gnt->gref, 0UL); info->persistent_gnts_c--; } if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } } BUG_ON(info->persistent_gnts_c != 0); /* * Remove indirect pages, this only happens when using indirect * descriptors but not persistent grants */ if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; BUG_ON(info->feature_persistent); list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { /* * Clear persistent grants present in requests already * on the shared ring */ if (!info->shadow[i].sc) goto free_shadow; segs = info->shadow[i].nr_segments; for (j = 0; j < segs; j++) { persistent_gnt = info->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } if (info->shadow[i].sc_data_direction != DMA_VSCSI_INDIRECT) /* * If this is not an indirect operation don't try to * free indirect segments */ goto free_shadow; for (j = 0; j < VSCSI_INDIRECT_PAGES(segs); j++) { persistent_gnt = info->shadow[i].indirect_grants[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } free_shadow: kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; } scsifront_free_ring(info); } static void scsifront_free(struct vscsifrnt_info *info) { struct Scsi_Host *host = info->host; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) if (host->shost_state != SHOST_DEL) { #else if (!test_bit(SHOST_DEL, &host->shost_state)) { #endif scsi_remove_host(info->host); } scsifront_resume_free(info); scsi_host_put(info->host); } static void shadow_init(struct vscsifrnt_shadow * shadow, unsigned int ring_size) { unsigned int i = 0; WARN_ON(!ring_size); for (i = 0; i < ring_size; i++) { shadow[i].next_free = i + 1; init_waitqueue_head(&(shadow[i].wq_reset)); shadow[i].wait_reset = 0; } shadow[ring_size - 1].next_free = VSCSIIF_NONE; } static int scsifront_alloc_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct vscsiif_sring *sring; int err = -ENOMEM; int i; for (i = 0; i < info->ring_size; i++) info->ring_ref[i] = GRANT_INVALID_REF; /***** Frontend to Backend ring start *****/ sring = (struct vscsiif_sring *) __get_free_pages(GFP_KERNEL, get_order(VSCSIIF_MAX_RING_PAGE_SIZE)); if (!sring) { xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)"); return err; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, (unsigned long)info->ring_size << PAGE_SHIFT); for (i = 0; i < info->ring_size; i++) { err = xenbus_grant_ring(dev, virt_to_mfn((unsigned long)sring + i * PAGE_SIZE)); if (err < 0) { info->ring.sring = NULL; xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)"); goto free_sring; } info->ring_ref[i] = err; } err = bind_listening_port_to_irqhandler( dev->otherend_id, scsifront_intr, 0, "scsifront", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto free_sring; } info->irq = err; return 0; /* free resource */ free_sring: scsifront_free(info); return err; } static int scsifront_init_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct xenbus_transaction xbt; unsigned int ring_size, ring_order; const char *what = NULL; int err; int i; char buf[16]; DPRINTK("%s\n",__FUNCTION__); err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &ring_order); if (err != 1) ring_order = VSCSIIF_MAX_RING_ORDER; if (ring_order > VSCSIIF_MAX_RING_ORDER) ring_order = VSCSIIF_MAX_RING_ORDER; /* * While for larger rings not all pages are actually used, be on the * safe side and set up a full power of two to please as many backends * as possible. */ printk("talk_to_scsiback ring_size %d, ring_order %d\n", 1U<ring_size = ring_size = 1U << ring_order; /* Create shared ring, alloc event channel. */ err = scsifront_alloc_ring(info); if (err) return err; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); } /* * for compatibility with old backend * ring order = 0 write the same xenstore * key with ring order > 0 */ what = "ring-page-order"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_order); if (err) goto fail; what = "num-ring-pages"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_size); if (err) goto fail; what = buf; for (i = 0; i < ring_size; i++) { snprintf(buf, sizeof(buf), "ring-ref-%d", i + 1); err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_ref[i]); if (err) goto fail; } what = "event-channel"; err = xenbus_printf(xbt, dev->nodename, what, "%u", irq_to_evtchn_port(info->irq)); if (err) goto fail; /* for persistent grant */ what = "feature-persistent"; err = xenbus_printf(xbt, dev->nodename, what, "%u", 1); if (err) goto fail; err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto free_sring; } return 0; fail: xenbus_transaction_end(xbt, 1); if (what) xenbus_dev_fatal(dev, err, "writing %s", what); free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { struct vscsifrnt_info *info; struct Scsi_Host *host; int err = -ENOMEM; char name[DEFAULT_TASK_COMM_LEN]; host = scsi_host_alloc(&scsifront_sht, sizeof(*info)); //offsetof(struct vscsifrnt_info, //active.segs[max_nr_segs])); if (!host) { xenbus_dev_fatal(dev, err, "fail to allocate scsi host"); return err; } info = (struct vscsifrnt_info *) host->hostdata; info->host = host; INIT_LIST_HEAD(&info->grants); INIT_LIST_HEAD(&info->indirect_pages); dev_set_drvdata(&dev->dev, info); info->dev = dev; init_waitqueue_head(&info->wq); init_waitqueue_head(&info->wq_sync); spin_lock_init(&info->shadow_lock); info->persistent_gnts_c = 0; info->pause = 0; info->callers = 0; info->waiting_pause = 0; init_waitqueue_head(&info->wq_pause); snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no); info->kthread = kthread_run(scsifront_schedule, info, name); if (IS_ERR(info->kthread)) { err = PTR_ERR(info->kthread); info->kthread = NULL; dev_err(&dev->dev, "kthread start err %d\n", err); goto free_sring; } host->max_id = VSCSIIF_MAX_TARGET; host->max_channel = 0; host->max_lun = VSCSIIF_MAX_LUN; host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512; host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE; err = scsi_add_host(host, &dev->dev); if (err) { dev_err(&dev->dev, "fail to add scsi host %d\n", err); goto free_sring; } return 0; free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_resume(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); struct Scsi_Host *host = info->host; int err; spin_lock_irq(host->host_lock); /* finish all still pending commands */ scsifront_finish_all(info); spin_unlock_irq(host->host_lock); /* reconnect to dom0 */ scsifront_resume_free(info); err = scsifront_init_ring(info); if (err) { dev_err(&dev->dev, "fail to resume %d\n", err); scsifront_free(info); return err; } shadow_init(info->shadow, RING_SIZE(&info->ring)); info->shadow_free = info->ring.req_prod_pvt; xenbus_switch_state(dev, XenbusStateInitialised); return 0; } static int scsifront_suspend(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); struct Scsi_Host *host = info->host; int err = 0; /* no new commands for the backend */ spin_lock_irq(host->host_lock); info->pause = 1; while (info->callers && !err) { info->waiting_pause = 1; info->waiting_sync = 0; spin_unlock_irq(host->host_lock); wake_up(&info->wq_sync); err = wait_event_interruptible(info->wq_pause, !info->waiting_pause); spin_lock_irq(host->host_lock); } spin_unlock_irq(host->host_lock); return err; } static int scsifront_suspend_cancel(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); struct Scsi_Host *host = info->host; spin_lock_irq(host->host_lock); info->pause = 0; spin_unlock_irq(host->host_lock); return 0; } static int scsifront_remove(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename); if (info->kthread) { kthread_stop(info->kthread); info->kthread = NULL; } scsifront_free(info); return 0; } static int scsifront_disconnect(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct Scsi_Host *host = info->host; DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename); /* When this function is executed, all devices of Frontend have been deleted. Therefore, it need not block I/O before remove_host. */ scsi_remove_host(host); xenbus_frontend_closed(dev); return 0; } #define VSCSIFRONT_OP_ADD_LUN 1 #define VSCSIFRONT_OP_DEL_LUN 2 #define VSCSIFRONT_OP_READD_LUN 3 static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) { struct xenbus_device *dev = info->dev; int i, err = 0; char str[64], state_str[64]; char **dir; unsigned int dir_n = 0; unsigned int device_state; unsigned int hst, chn, tgt, lun; struct scsi_device *sdev; dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n); if (IS_ERR(dir)) return; for (i = 0; i < dir_n; i++) { /* read status */ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u", &device_state); if (XENBUS_EXIST_ERR(err)) continue; /* virtual SCSI device */ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u:%u:%u:%u", &hst, &chn, &tgt, &lun); if (XENBUS_EXIST_ERR(err)) continue; /* front device state path */ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]); switch (op) { case VSCSIFRONT_OP_ADD_LUN: if (device_state == XenbusStateInitialised) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { dev_err(&dev->dev, "Device already in use.\n"); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } else { scsi_add_device(info->host, chn, tgt, lun); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); } } break; case VSCSIFRONT_OP_DEL_LUN: if (device_state == XenbusStateClosing) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } } break; case VSCSIFRONT_OP_READD_LUN: if (device_state == XenbusStateConnected) xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); break; default: break; } } kfree(dir); return; } static void scsifront_setup_persistent(struct vscsifrnt_info *info) { int err; int persistent; err = xenbus_gather(XBT_NIL, info->dev->otherend, "feature-persistent", "%u", &persistent, NULL); if (err) info->feature_persistent = 0; else info->feature_persistent = persistent; } static void scsifront_setup_host(struct vscsifrnt_info *info) { int nr_segs; struct Scsi_Host *host = info->host; if (info->max_indirect_segments) nr_segs = info->max_indirect_segments; else nr_segs = VSCSIIF_SG_TABLESIZE; if (!info->pause && nr_segs > host->sg_tablesize) { host->sg_tablesize = nr_segs; dev_info(&info->dev->dev, "using up to %d SG entries\n", host->sg_tablesize); host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512; } else if (info->pause && nr_segs < host->sg_tablesize) dev_warn(&info->dev->dev, "SG entries decreased from %d to %u - device may not work properly anymore\n", host->sg_tablesize, nr_segs); } static int scsifront_setup_indirect(struct vscsifrnt_info *info) { unsigned int indirect_segments, segs; int err, i; info->max_indirect_segments = 0; segs = VSCSIIF_SG_TABLESIZE; err = xenbus_gather(XBT_NIL, info->dev->otherend, "feature-max-indirect-segments", "%u", &indirect_segments, NULL); if (err != 1) segs = VSCSIIF_SG_TABLESIZE; if (!err && indirect_segments > VSCSIIF_SG_TABLESIZE) { info->max_indirect_segments = min(indirect_segments, MAX_VSCSI_INDIRECT_SEGMENTS); segs = info->max_indirect_segments; } scsifront_setup_host(info); printk("[%s:%d], segs %d\n", __func__, __LINE__, segs); err = fill_grant_buffer(info, (segs + VSCSI_INDIRECT_PAGES(segs)) * RING_SIZE(&info->ring)); if (err) goto out_of_memory; if (!info->feature_persistent && info->max_indirect_segments) { /* * We are using indirect descriptors but not persistent * grants, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int num = VSCSI_INDIRECT_PAGES(segs) * RING_SIZE(&info->ring); BUG_ON(!list_empty(&info->indirect_pages)); for (i = 0; i < num; i++) { struct page *indirect_page = alloc_page(GFP_NOIO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &info->indirect_pages); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { info->shadow[i].grants_used = kzalloc( sizeof(info->shadow[i].grants_used[0]) * segs, GFP_NOIO); if (info->max_indirect_segments) info->shadow[i].indirect_grants = kzalloc( sizeof(info->shadow[i].indirect_grants[0]) * VSCSI_INDIRECT_PAGES(segs), GFP_NOIO); if ((info->shadow[i].grants_used == NULL) || (info->max_indirect_segments && (info->shadow[i].indirect_grants == NULL))) goto out_of_memory; } return 0; out_of_memory: for (i = 0; i < RING_SIZE(&info->ring); i++) { kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; } if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } return -ENOMEM; } static void scsifront_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); int err; DPRINTK("%p %u %u\n", dev, dev->state, backend_state); switch (backend_state) { case XenbusStateUnknown: case XenbusStateInitialising: break; case XenbusStateInitWait: if (dev->state == XenbusStateInitialised) break; err = scsifront_init_ring(info); if (err) { scsi_host_put(info->host); xenbus_dev_fatal(info->dev, err, "scsi init ring at %s", info->dev->otherend); return; } shadow_init(info->shadow, RING_SIZE(&info->ring)); xenbus_switch_state(dev, XenbusStateInitialised); break; case XenbusStateInitialised: break; case XenbusStateConnected: if (xenbus_read_driver_state(dev->nodename) == XenbusStateInitialised) { scsifront_setup_persistent(info); err = scsifront_setup_indirect(info); if (err) { xenbus_dev_fatal(info->dev, err, "setup_indirect at %s", info->dev->otherend); return; } } if (info->pause) { scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_READD_LUN); xenbus_switch_state(dev, XenbusStateConnected); info->pause = 0; return; } if (xenbus_read_driver_state(dev->nodename) == XenbusStateInitialised) { scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); } if (dev->state == XenbusStateConnected) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's Closing state -- fallthrough */ case XenbusStateClosing: scsifront_disconnect(info); break; case XenbusStateReconfiguring: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN); xenbus_switch_state(dev, XenbusStateReconfiguring); break; case XenbusStateReconfigured: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); xenbus_switch_state(dev, XenbusStateConnected); break; } } static const struct xenbus_device_id scsifront_ids[] = { { "vscsi" }, { "" } }; MODULE_ALIAS("xen:vscsi"); static DEFINE_XENBUS_DRIVER(scsifront, , .probe = scsifront_probe, .remove = scsifront_remove, .resume = scsifront_resume, .suspend = scsifront_suspend, .suspend_cancel = scsifront_suspend_cancel, .otherend_changed = scsifront_backend_changed, ); int __init scsifront_xenbus_init(void) { if (max_nr_segs > SG_ALL) max_nr_segs = SG_ALL; if (max_nr_segs < VSCSIIF_SG_TABLESIZE) max_nr_segs = VSCSIIF_SG_TABLESIZE; return xenbus_register_frontend(&scsifront_driver); } void __exit scsifront_xenbus_unregister(void) { xenbus_unregister_driver(&scsifront_driver); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/000077500000000000000000000000001314037446600255445ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/3.12.28/000077500000000000000000000000001314037446600263575ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/3.12.28/blkfront.c000066400000000000000000001576671314037446600303720ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* ssleep prototype */ #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct blk_resume_entry { struct list_head list; struct blk_shadow copy; }; #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 #define UNPLUG_DISK_TMOUT 60 static void connect(struct blkfront_info *); static void blkfront_closing(struct blkfront_info *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(struct work_struct *arg); static int blkif_recover(struct blkfront_info *, unsigned int old_ring_size, unsigned int new_ring_size); static void blkif_completion(struct blk_shadow *, struct blkfront_info *info, struct blkif_response *bret); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); static int blkfront_setup_indirect(struct blkfront_info *info); static int fill_grant_buffer(struct blkfront_info *info, int num) { struct page *granted_page; struct grant *gnt_list_entry, *n; int i = 0; while (i < num) { gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); if (!gnt_list_entry) goto out_of_memory; /* If frontend uses feature_persistent, it will preallocate * (256 + 1) * 256 pages, almost 257M. */ if (info->feature_persistent) { granted_page = alloc_page(GFP_NOIO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; } gnt_list_entry->pfn = page_to_pfn(granted_page); } gnt_list_entry->gref = GRANT_INVALID_REF; list_add(&gnt_list_entry->node, &info->grants); i++; } return 0; out_of_memory: list_for_each_entry_safe(gnt_list_entry, n, &info->grants, node) { list_del(&gnt_list_entry->node); if (info->feature_persistent) __free_page(pfn_to_page(gnt_list_entry->pfn)); kfree(gnt_list_entry); i--; } BUG_ON(i != 0); return -ENOMEM; } static struct grant *get_grant(grant_ref_t *gref_head, unsigned long pfn, struct blkfront_info *info) { struct grant *gnt_list_entry; unsigned long buffer_mfn; BUG_ON(list_empty(&info->grants)); gnt_list_entry = list_first_entry(&info->grants, struct grant, node); list_del(&gnt_list_entry->node); /* for persistent grant */ if (gnt_list_entry->gref != GRANT_INVALID_REF) { info->persistent_gnts_c--; return gnt_list_entry; } /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); if (!info->feature_persistent) { BUG_ON(!pfn); gnt_list_entry->pfn = pfn; } buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); gnttab_grant_foreign_access_ref(gnt_list_entry->gref, info->xbdev->otherend_id, buffer_mfn, 0); return gnt_list_entry; } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice; struct blkfront_info *info; enum xenbus_state backend_state; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); pr_notice("blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } spin_lock_init(&info->io_lock); mutex_init(&info->mutex); INIT_LIST_HEAD(&info->grants); INIT_LIST_HEAD(&info->indirect_pages); info->xbdev = dev; info->vdevice = vdevice; info->persistent_gnts_c = 0; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); INIT_LIST_HEAD(&info->resume_list); /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); backend_state = xenbus_read_driver_state(dev->otherend); /* * XenbusStateInitWait would be the correct state to enter here, * but (at least) blkback considers this a fatal error. */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_backend(dev, info); if (err) { kfree(info); dev_set_drvdata(&dev->dev, NULL); return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); enum xenbus_state backend_state; int i; int err; //printk("[INTO]----->blkfront_resume: %s\n", dev->nodename); for (i=0; iring_size; i++) { if (gnttab_query_foreign_access(info->ring_refs[i])) return 0; } blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); backend_state = xenbus_read_driver_state(dev->otherend); /* See respective comment in blkfront_probe(). */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; //printk("[EXIT]<-----blkfront_resume: %s\n", dev->nodename); err = talk_to_backend(dev, info); return err; } static void shadow_init(struct blk_shadow *shadow, unsigned int ring_size) { unsigned int i = 0; WARN_ON(!ring_size); while (++i < ring_size) shadow[i - 1].req.u.rw.id = i; shadow[i - 1].req.u.rw.id = 0x0fffffff; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { unsigned int ring_size, ring_order; unsigned int old_ring_size = RING_SIZE(&info->ring); const char *what = NULL; struct xenbus_transaction xbt; int err; if (dev->state >= XenbusStateInitialised) return 0; err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-pages", "%u", &ring_size); if (err != 1) ring_size = 0; else if (!ring_size) pr_warn("blkfront: %s: zero max-ring-pages\n", dev->nodename); err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &ring_order); if (err != 1) ring_order = ring_size ? ilog2(ring_size) : 0; else if (!ring_size) /* nothing */; else if ((ring_size - 1) >> ring_order) pr_warn("blkfront: %s: max-ring-pages (%#x) inconsistent with" " max-ring-page-order (%u)\n", dev->nodename, ring_size, ring_order); else ring_order = ilog2(ring_size); if (ring_order > BLK_MAX_RING_PAGE_ORDER) ring_order = BLK_MAX_RING_PAGE_ORDER; /* * While for larger rings not all pages are actually used, be on the * safe side and set up a full power of two to please as many backends * as possible. */ printk("talk_to_blkback ring_size %d, ring_order %d\n", 1U<ring_size = ring_size = 1U << ring_order; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } if (ring_size == 1) { what = "ring-ref"; err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[0]); if (err) goto abort_transaction; } else { unsigned int i; char buf[16]; what = "ring-page-order"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_order); if (err) goto abort_transaction; what = "num-ring-pages"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_size); if (err) goto abort_transaction; what = buf; for (i = 0; i < ring_size; i++) { snprintf(buf, sizeof(buf), "ring-ref%u", i); err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[i]); if (err) goto abort_transaction; } } what = "event-channel"; err = xenbus_printf(xbt, dev->nodename, what, "%u", irq_to_evtchn_port(info->irq)); if (err) goto abort_transaction; what = "protocol"; err = xenbus_printf(xbt, dev->nodename, what, "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) goto abort_transaction; /* for persistent grant */ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1); if (err) dev_warn(&dev->dev, "writing persistent grants feature to xenbus"); err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); ring_size = RING_SIZE(&info->ring); switch (info->connected) { case BLKIF_STATE_DISCONNECTED: shadow_init(info->shadow, ring_size); break; case BLKIF_STATE_SUSPENDED: printk("talk_to_backend:blkif_recover old_ring_size =%d.ring_size=%d\n", old_ring_size,ring_size); err = blkif_recover(info, old_ring_size, ring_size); if (err) goto out; break; } pr_info("blkfront: %s: ring-pages=%u nr_ents=%u\n", dev->nodename, info->ring_size, ring_size); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (what) xenbus_dev_fatal(dev, err, "writing %s", what); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { blkif_sring_t *sring; int err; unsigned int nr; for (nr = 0; nr < info->ring_size; nr++) { info->ring_refs[nr] = GRANT_INVALID_REF; info->ring_pages[nr] = alloc_page(GFP_NOIO | __GFP_HIGH); if (!info->ring_pages[nr]) break; } sring = nr == info->ring_size ? vmap(info->ring_pages, nr, VM_MAP, PAGE_KERNEL) : NULL; if (!sring) { while (nr--) __free_page(info->ring_pages[nr]); xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, (unsigned long)info->ring_size << PAGE_SHIFT); err = xenbus_multi_grant_ring(dev, nr, info->ring_pages, info->ring_refs); if (err < 0) goto fail; err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, 0, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } static int xenwatch_unplugdisk_callback(void *data) { int len; char *devname; int iTimeout = 0; char *bestate; struct block_device *bd; struct xenbus_device *dev = data; struct blkfront_info *info = dev_get_drvdata(&dev->dev); if(!strcmp(info->gd->disk_name, "xvda")) { printk(KERN_ERR "xvda disk is unallowed to unplug!\n"); return 0; } devname = xenbus_read(XBT_NIL, dev->otherend, "dev", &len); if (IS_ERR(devname)){ printk(KERN_ERR "read %s xenstore error!\n", dev->otherend); return 0; } else{ xenbus_write(XBT_NIL, "control/uvp", "unplug-disk", devname); kfree(devname); } while(info && info->users != 0){ if(iTimeout > UNPLUG_DISK_TMOUT) break; printk(KERN_INFO "info->users=%d,ssleep(1),iTimeout=%d!\n", info->users, iTimeout); ssleep(1); iTimeout++; } if(info && !info->users) { printk(KERN_INFO "finish to umount,info->users has changed to 0!\n"); } /* if (!info->gd) { printk(KERN_ERR "unplug_disk, info->gd is NULL\n"); xenbus_frontend_closed_uvp(dev); return 0; } */ bd = bdget_disk(info->gd, 0); if (bd == NULL) xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (info->users > 0) xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); else blkfront_closing(info); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); bestate = xenbus_read(XBT_NIL, dev->otherend, "state", &len); if (IS_ERR(bestate)){ printk(KERN_ERR "read %s state error!\n", dev->otherend); } else{ if(strncmp(bestate, "5", 1) || iTimeout > UNPLUG_DISK_TMOUT) { kfree(bestate); return 0; } kfree(bestate); } return 0; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); //struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (talk_to_backend(dev, info)) { dev_set_drvdata(&dev->dev, NULL); kfree(info); } break; case XenbusStateConnected: connect(info); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's Closing state -- fallthrough */ case XenbusStateClosing: kthread_run(xenwatch_unplugdisk_callback, dev, "xenwatch_unplugdisk"); break; } } /* ** Connection ** */ static void blkfront_setup_discard(struct blkfront_info *info) { int err; char *type; unsigned int discard_granularity; unsigned int discard_alignment; int discard_secure; type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL); if (IS_ERR(type)) return; info->feature_secdiscard = 0; if (strncmp(type, "phy", 3) == 0) { err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "discard-granularity", "%u", &discard_granularity, "discard-alignment", "%u", &discard_alignment, NULL); if (!err) { info->feature_discard = 1; info->discard_granularity = discard_granularity; info->discard_alignment = discard_alignment; } err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "discard-secure", "%d", &discard_secure); if (err == 1) info->feature_secdiscard = discard_secure; } else if (strncmp(type, "file", 4) == 0) info->feature_discard = 1; kfree(type); } /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err, barrier, flush, discard; int persistent; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (err != 1) return; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sector-size", "%lu", §or_size); if (err != 1) sector_size = 0; if (sector_size) blk_queue_logical_block_size(info->gd->queue, sector_size); pr_info("Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: /* * If we are recovering from suspension, we need to wait * for the backend to announce it's features before * reconnecting, at least we need to know if the backend * supports indirect descriptors, and how many. */ //printk("[connect]:blkif_recover info->ring_size =%d.\n", info->ring_size); //blkif_recover(info, info->ring_size, info->ring_size); return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } info->feature_flush = 0; info->flush_op = 0; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%d", &barrier); /* * If there's no "feature-barrier" defined, then it means * we're dealing with a very old backend which writes * synchronously; nothing to do. * * If there are barriers, then we use flush. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) if (err > 0 && barrier) { info->feature_flush = REQ_FLUSH | REQ_FUA; info->flush_op = BLKIF_OP_WRITE_BARRIER; } /* * And if there is "feature-flush-cache" use that above * barriers. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-flush-cache", "%d", &flush); if (err > 0 && flush) { info->feature_flush = REQ_FLUSH; info->flush_op = BLKIF_OP_FLUSH_DISKCACHE; } #else if (err <= 0) info->feature_flush = QUEUE_ORDERED_DRAIN; else if (barrier) info->feature_flush = QUEUE_ORDERED_TAG; else info->feature_flush = QUEUE_ORDERED_NONE; #endif err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-discard", "%d", &discard); if (err > 0 && discard) blkfront_setup_discard(info); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-persistent", "%u", &persistent, NULL); if (err) info->feature_persistent = 0; else info->feature_persistent = persistent; err = blkfront_setup_indirect(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); return; } err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&info->io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); add_disk(info->gd); info->is_ready = 1; register_vcd(info); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct blkfront_info *info) { unsigned long flags; DPRINTK("blkfront_closing: %d removed\n", info->vdevice); if (info->rq == NULL) goto out; spin_lock_irqsave(&info->io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&info->io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work_sync(&info->work); xlvbd_sysfs_delif(info); ssleep(2); while(RING_FREE_REQUESTS(&info->ring) != RING_SIZE(&info->ring)) { ssleep(1); } spin_lock_irqsave(&info->io_lock, flags); /* No more blkif_request(). */ blk_start_queue(info->rq); spin_unlock_irqrestore(&info->io_lock, flags); unregister_vcd(info); xlvbd_del(info); out: if (info->xbdev) xenbus_frontend_closed(info->xbdev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); struct block_device *bd; struct gendisk *disk; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); mutex_lock(&info->mutex); disk = info->gd; bd = disk ? bdget_disk(disk, 0) : NULL; info->xbdev = NULL; mutex_unlock(&info->mutex); if (!bd) { kfree(info); return 0; } /* * The xbdev was removed before we reached the Closed * state. See if it's safe to remove the disk. If the bdev * isn't closed yet, we let release take care of it. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif info = disk->private_data; dev_warn(disk_to_dev(disk), "%s was hot-unplugged, %d stale handles\n", dev->nodename, bd->bd_openers); if (info && !bd->bd_openers) { blkfront_closing(info); disk->private_data = NULL; kfree(info); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); return 0; } static int blkfront_setup_indirect(struct blkfront_info *info) { unsigned int indirect_segments, segs; int err, i; info->max_indirect_segments = 0; segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-max-indirect-segments", "%u", &indirect_segments, NULL); if (!err && indirect_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) { info->max_indirect_segments = min(indirect_segments, xen_blkif_max_segments); segs = info->max_indirect_segments; } printk("[%s:%d], segs %d\n", __func__, __LINE__, segs); /*info->sg = kzalloc(sizeof(info->sg[0]) * segs, GFP_KERNEL); if (info->sg == NULL) goto out_of_memory; sg_init_table(info->sg, segs);*/ err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * RING_SIZE(&info->ring)); if (err) goto out_of_memory; if (!info->feature_persistent && info->max_indirect_segments) { /* * We are using indirect descriptors but not persistent * grants, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int num = INDIRECT_GREFS(segs) * RING_SIZE(&info->ring); BUG_ON(!list_empty(&info->indirect_pages)); for (i = 0; i < num; i++) { struct page *indirect_page = alloc_page(GFP_NOIO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &info->indirect_pages); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { info->shadow[i].grants_used = kzalloc( sizeof(info->shadow[i].grants_used[0]) * segs, GFP_NOIO); /* malloc space for every shadow's sg */ info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO); if (info->max_indirect_segments) info->shadow[i].indirect_grants = kzalloc( sizeof(info->shadow[i].indirect_grants[0]) * INDIRECT_GREFS(segs), GFP_NOIO); if ((info->shadow[i].grants_used == NULL) || (info->shadow[i].sg == NULL) || (info->max_indirect_segments && (info->shadow[i].indirect_grants == NULL))) goto out_of_memory; /* initialise every shadow's sg */ sg_init_table(info->shadow[i].sg, segs); } return 0; out_of_memory: //kfree(info->sg); //info->sg = NULL; for (i = 0; i < RING_SIZE(&info->ring); i++) { kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; /* free every shadow's sg */ kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; } if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } return -ENOMEM; } //int is_recovered = 0; static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= RING_SIZE(&info->ring)); info->shadow_free = info->shadow[free].req.u.rw.id; info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ return free; } static inline int ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { if (info->shadow[id].req.u.rw.id != id) return -EINVAL; if (!info->shadow[id].request) return -ENXIO; info->shadow[id].req.u.rw.id = info->shadow_free; info->shadow[id].request = NULL; info->shadow_free = id; return 0; } static const char *op_name(unsigned int op) { static const char *const names[] = { [BLKIF_OP_READ] = "read", [BLKIF_OP_WRITE] = "write", [BLKIF_OP_WRITE_BARRIER] = "barrier", [BLKIF_OP_FLUSH_DISKCACHE] = "flush", [BLKIF_OP_PACKET] = "packet", [BLKIF_OP_DISCARD] = "discard", }; if (op >= ARRAY_SIZE(names)) return "unknown"; return names[op] ?: "reserved"; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void kick_pending_request_queues(struct blkfront_info *info) { bool queued = false; /* Recover stage 3: Re-queue pending requests. */ while (!list_empty(&info->resume_list) && !RING_FULL(&info->ring)) { /* Grab a request slot and copy shadow state into it. */ struct blk_resume_entry *ent = list_first_entry(&info->resume_list, struct blk_resume_entry, list); blkif_request_t *req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); unsigned int i; *req = ent->copy.req; /* We get a new request id, and must reset the shadow state. */ req->u.rw.id = GET_ID_FROM_FREELIST(info); info->shadow[req->u.rw.id] = ent->copy; info->shadow[req->u.rw.id].req.u.rw.id = req->u.rw.id; /* Rewrite any grant references invalidated by susp/resume. */ for (i = 0; i < req->u.rw.nr_segments; i++) { gnttab_grant_foreign_access_ref(req->u.rw.seg[i].gref, info->xbdev->otherend_id, pfn_to_mfn(ent->copy.grants_used[i]->pfn), rq_data_dir(ent->copy.request) ? GTF_readonly : 0); } info->ring.req_prod_pvt++; queued = true; __list_del_entry(&ent->list); kfree(ent); } /* Send off requeued requests */ if (queued) flush_requests(info); if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(struct work_struct *arg) { struct blkfront_info *info = container_of(arg, struct blkfront_info, work); spin_lock_irq(&info->io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_open(struct inode *inode, struct file *filep) { struct block_device *bd = inode->i_bdev; #else int blkif_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int err = 0; if (!info) /* xbdev gone */ err = -ERESTARTSYS; else { mutex_lock(&info->mutex); if (!info->gd) /* xbdev is closed */ err = -ERESTARTSYS; mutex_unlock(&info->mutex); } info->users++; return err; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_release(struct inode *inode, struct file *filep) { struct gendisk *disk = inode->i_bdev->bd_disk; #else int blkif_release(struct gendisk *disk, fmode_t mode) { #endif struct blkfront_info *info = disk->private_data; //struct xenbus_device *xbdev; struct block_device *bd = bdget_disk(disk, 0); info->users--; bdput(bd); if (bd->bd_openers) return 0; return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { struct block_device *bd = inode->i_bdev; #else int blkif_ioctl(struct block_device *bd, fmode_t mode, unsigned command, unsigned long argument) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: if (info->gd && (info->gd->flags & GENHD_FL_CD)) return 0; return -EINVAL; default: if (info->mi && info->gd && info->rq) { switch (info->mi->major) { case SCSI_DISK0_MAJOR: case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: case SCSI_CDROM_MAJOR: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) return scsi_cmd_ioctl(filep, info->gd, command, (void __user *)argument); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return scsi_cmd_ioctl(filep, info->rq, info->gd, command, (void __user *)argument); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) \ && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0) \ || LINUX_VERSION_CODE < KERNEL_VERSION(3,0,18)) return scsi_cmd_ioctl(info->rq, info->gd, mode, command, (void __user *)argument); #else return scsi_cmd_blk_ioctl(bd, mode, command, (void __user *)argument); #endif } } return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * Generate a Xen blkfront IO request from a blk layer request. Reads * and writes are handled as expected. * * @req: a request struct */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref, n; struct blkif_request_segment_aligned *segments = NULL; /* * Used to store if we are able to queue the request by just using * existing persistent grants, or if we have to get new grants, * as there are not sufficiently many free. */ bool new_persistent_gnts; grant_ref_t gref_head; struct grant *gnt_list_entry = NULL; struct scatterlist *sg; int nseg, max_grefs; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; max_grefs = info->max_indirect_segments ? info->max_indirect_segments + INDIRECT_GREFS(info->max_indirect_segments) : BLKIF_MAX_SEGMENTS_PER_REQUEST; /* Check if we have enought grants to allocate a requests */ if (info->persistent_gnts_c < max_grefs) { new_persistent_gnts = 1; if (gnttab_alloc_grant_references( max_grefs - info->persistent_gnts_c, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, max_grefs); return 1; } } else new_persistent_gnts = 0; /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = req; BUG_ON(info->max_indirect_segments == 0 && req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); BUG_ON(info->max_indirect_segments && req->nr_phys_segments > info->max_indirect_segments); nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); ring_req->u.rw.id = id; if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { /* * The indirect operation can only be a BLKIF_OP_READ or * BLKIF_OP_WRITE */ BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA)); ring_req->operation = BLKIF_OP_INDIRECT; ring_req->u.indirect.indirect_op = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->u.indirect.handle = info->handle; ring_req->u.indirect.nr_segments = nseg; } else { ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->u.rw.handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) #else if (req->cmd_flags & REQ_HARDBARRIER) #endif ring_req->operation = info->flush_op; if (req->cmd_type == REQ_TYPE_BLOCK_PC) ring_req->operation = BLKIF_OP_PACKET; ring_req->u.rw.nr_segments = nseg; } if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { /* id, sector_number and handle are set above. */ ring_req->operation = BLKIF_OP_DISCARD; ring_req->u.discard.flag = 0; ring_req->u.discard.nr_sectors = blk_rq_sectors(req); if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) ring_req->u.discard.flag = BLKIF_DISCARD_SECURE; } else { for_each_sg(info->shadow[id].sg, sg, nseg, i) { fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; if ((ring_req->operation == BLKIF_OP_INDIRECT) && (i % SEGS_PER_INDIRECT_FRAME == 0)) { unsigned long uninitialized_var(pfn); if (segments) kunmap_atomic(segments); n = i / SEGS_PER_INDIRECT_FRAME; if (!info->feature_persistent) { struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ BUG_ON(list_empty(&info->indirect_pages)); indirect_page = list_first_entry(&info->indirect_pages, struct page, lru); list_del(&indirect_page->lru); pfn = page_to_pfn(indirect_page); } gnt_list_entry = get_grant(&gref_head, pfn, info); info->shadow[id].indirect_grants[n] = gnt_list_entry; segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; } gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); ref = gnt_list_entry->gref; info->shadow[id].grants_used[i] = gnt_list_entry; /* If use persistent grant, it will have a memcpy, * just copy the data from sg page to grant page. */ if (rq_data_dir(req) && info->feature_persistent) { char *bvec_data; void *shared_data; BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn)); bvec_data = kmap_atomic(sg_page(sg)); /* * this does not wipe data stored outside the * range sg->offset..sg->offset+sg->length. * Therefore, blkback *could* see data from * previous requests. This is OK as long as * persistent grants are shared with just one * domain. It may need refactoring if this * changes */ memcpy(shared_data + sg->offset, bvec_data + sg->offset, sg->length); kunmap_atomic(bvec_data); kunmap_atomic(shared_data); } if (ring_req->operation != BLKIF_OP_INDIRECT) { ring_req->u.rw.seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } else { n = i % SEGS_PER_INDIRECT_FRAME; segments[n] = (struct blkif_request_segment_aligned) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } } } if (segments) kunmap_atomic(segments); info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; /* for persistent grant */ if (new_persistent_gnts) gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = blk_peek_request(rq)) != NULL) { info = req->rq_disk->private_data; if (RING_FULL(&info->ring)) goto wait; blk_start_request(req); /*if ((req->cmd_type != REQ_TYPE_FS && (req->cmd_type != REQ_TYPE_BLOCK_PC || req->cmd_len)) || ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) && !info->flush_op)) { req->errors = (DID_ERROR << 16) | (DRIVER_INVALID << 24); __blk_end_request_all(req, -EIO); continue; }*/ if (req->cmd_type != REQ_TYPE_FS) { __blk_end_request_all(req, -EIO); continue; } DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%u) buffer:%p [%s]\n", req, req->cmd, (long long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), req->buffer, rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; spin_lock_irqsave(&info->io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&info->io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bret = RING_GET_RESPONSE(&info->ring, i); if (unlikely(bret->id >= RING_SIZE(&info->ring))) { /* * The backend has messed up and given us an id that * we would never have given to it (we stamp it up to * RING_SIZE() - see GET_ID_FROM_FREELIST()). */ pr_warning("%s: response to %s has incorrect id (%#Lx)\n", info->gd->disk_name, op_name(bret->operation), (unsigned long long)bret->id); continue; } id = bret->id; req = info->shadow[id].request; blkif_completion(&info->shadow[id], info, bret); ret = ADD_ID_TO_FREELIST(info, id); if (unlikely(ret)) { pr_warning("%s: id %#lx (response to %s) couldn't be recycled (%d)!\n", info->gd->disk_name, id, op_name(bret->operation), ret); continue; } ret = bret->status == BLKIF_RSP_OKAY ? 0 : -EIO; switch (bret->operation) { const char *kind; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: kind = ""; if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) ret = -EOPNOTSUPP; if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.u.rw.nr_segments == 0)) { kind = "empty "; ret = -EOPNOTSUPP; } if (unlikely(ret)) { if (ret == -EOPNOTSUPP) { pr_warn("blkfront: %s: %s%s op failed\n", info->gd->disk_name, kind, op_name(bret->operation)); ret = 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) info->feature_flush = 0; #else info->feature_flush = QUEUE_ORDERED_NONE; #endif xlvbd_flush(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_PACKET: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev %s request: %d\n", op_name(bret->operation), bret->status); __blk_end_request_all(req, ret); break; case BLKIF_OP_DISCARD: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; pr_warn("blkfront: %s: discard op failed\n", info->gd->disk_name); ret = -EOPNOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; queue_flag_clear(QUEUE_FLAG_DISCARD, rq); queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); } __blk_end_request_all(req, ret); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&info->io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { struct grant *persistent_gnt; struct grant *n; int i, j, segs; /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&info->io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* Remove all persistent grants */ if (!list_empty(&info->grants)) { list_for_each_entry_safe(persistent_gnt, n, &info->grants, node) { list_del(&persistent_gnt->node); if (persistent_gnt->gref != GRANT_INVALID_REF) { gnttab_end_foreign_access(persistent_gnt->gref, 0UL); info->persistent_gnts_c--; } if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } } BUG_ON(info->persistent_gnts_c != 0); /* * Remove indirect pages, this only happens when using indirect * descriptors but not persistent grants */ if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; BUG_ON(info->feature_persistent); list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } for (i = 0; i < RING_SIZE(&info->ring); i++) { /* * Clear persistent grants present in requests already * on the shared ring */ if (!info->shadow[i].request) goto free_shadow; segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? info->shadow[i].req.u.indirect.nr_segments : info->shadow[i].req.u.rw.nr_segments; for (j = 0; j < segs; j++) { persistent_gnt = info->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT) /* * If this is not an indirect operation don't try to * free indirect segments */ goto free_shadow; for (j = 0; j < INDIRECT_GREFS(segs); j++) { persistent_gnt = info->shadow[i].indirect_grants[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0UL); __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } free_shadow: kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; } //kfree(info->sg); //info->sg = NULL; /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&info->io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work_sync(&info->work); /* Free resources associated with old device channel. */ vunmap(info->ring.sring); info->ring.sring = NULL; gnttab_multi_end_foreign_access(info->ring_size, info->ring_refs, info->ring_pages); if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, struct blkif_response *bret) { int i; int nseg; /* for persistent grant */ struct scatterlist *sg; char *bvec_data; void *shared_data; if (s->req.operation == BLKIF_OP_DISCARD) return; nseg = s->req.operation == BLKIF_OP_INDIRECT ? s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { /* * Copy the data received from the backend into the bvec. * Since bv_offset can be different than 0, and bv_len different * than PAGE_SIZE, we have to keep track of the current offset, * to be sure we are copying the data from the right shared page. */ for_each_sg(s->sg, sg, nseg, i) { BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic( pfn_to_page(s->grants_used[i]->pfn)); bvec_data = kmap_atomic(sg_page(sg)); memcpy(bvec_data + sg->offset, shared_data + sg->offset, sg->length); kunmap_atomic(bvec_data); kunmap_atomic(shared_data); } } /* Add the persistent grant into the list of free grants */ for (i = 0; i < nseg; i++) { if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->grants_used[i]->gref); list_add(&s->grants_used[i]->node, &info->grants); info->persistent_gnts_c++; } else { /* * If the grant is not mapped by the backend we end the * foreign access and add it to the tail of the list, * so it will not be picked again unless we run out of * persistent grants. */ gnttab_end_foreign_access(s->grants_used[i]->gref, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &info->grants); } } if (s->req.operation == BLKIF_OP_INDIRECT) { for (i = 0; i < INDIRECT_GREFS(nseg); i++) { if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->indirect_grants[i]->gref); list_add(&s->indirect_grants[i]->node, &info->grants); info->persistent_gnts_c++; } else { struct page *indirect_page; gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. */ if (!info->feature_persistent) { indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); list_add(&indirect_page->lru, &info->indirect_pages); } s->indirect_grants[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->indirect_grants[i]->node, &info->grants); } } } } /* * This is a clone of md_trim_bio, used to split a bio into smaller ones */ static void trim_bio(struct bio *bio, int offset, int size) { /* 'bio' is a cloned bio which we need to trim to match * the given offset and size. * This requires adjusting bi_sector, bi_size, and bi_io_vec */ int i; struct bio_vec *bvec; int sofar = 0; size <<= 9; if (offset == 0 && size == bio->bi_size) return; bio->bi_sector += offset; bio->bi_size = size; offset <<= 9; clear_bit(BIO_SEG_VALID, &bio->bi_flags); while (bio->bi_idx < bio->bi_vcnt && bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { /* remove this whole bio_vec */ offset -= bio->bi_io_vec[bio->bi_idx].bv_len; bio->bi_idx++; } if (bio->bi_idx < bio->bi_vcnt) { bio->bi_io_vec[bio->bi_idx].bv_offset += offset; bio->bi_io_vec[bio->bi_idx].bv_len -= offset; } /* avoid any complications with bi_idx being non-zero*/ if (bio->bi_idx) { memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); bio->bi_vcnt -= bio->bi_idx; bio->bi_idx = 0; } /* Make sure vcnt and last bv are not too big */ bio_for_each_segment(bvec, bio, i) { if (sofar + bvec->bv_len > size) bvec->bv_len = size - sofar; if (bvec->bv_len == 0) { bio->bi_vcnt = i; break; } sofar += bvec->bv_len; } } static void split_bio_end(struct bio *bio, int error) { struct split_bio *split_bio = bio->bi_private; if (error) split_bio->err = error; if (atomic_dec_and_test(&split_bio->pending)) { split_bio->bio->bi_phys_segments = 0; bio_endio(split_bio->bio, split_bio->err); kfree(split_bio); } bio_put(bio); } static int blkif_recover(struct blkfront_info *info, unsigned int old_ring_size, unsigned int ring_size) { unsigned int i; struct request *req, *n; unsigned int segs; int rc; struct bio *bio, *cloned_bio; struct bio_list bio_list, merge_bio; unsigned int offset; int pending, size; struct split_bio *split_bio; struct list_head requests; struct blk_resume_entry *ent = NULL; LIST_HEAD(list); //WARN_ON(1); /* Stage 1: Make a safe copy of the shadow state. */ for (i = 0; i < old_ring_size; i++) { /* Not in use? */ if (!info->shadow[i].request) continue; ent = kmalloc(sizeof(*ent), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); if (!ent) break; ent->copy = info->shadow[i]; list_add_tail(&ent->list, &list); } if (i < old_ring_size) { while (!list_empty(&list)) { ent = list_first_entry(&list, struct blk_resume_entry, list); __list_del_entry(&ent->list); kfree(ent); } return -ENOMEM; } list_splice_tail(&list, &info->resume_list); /* Stage 2: Set up free list. */ memset(&info->shadow, 0, sizeof(info->shadow)); shadow_init(info->shadow, ring_size); info->shadow_free = info->ring.req_prod_pvt; rc = blkfront_setup_indirect(info); if (rc) { while (!list_empty(&info->resume_list)) { ent = list_first_entry(&info->resume_list, struct blk_resume_entry, list); __list_del_entry(&ent->list); kfree(ent); } return rc; } segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; blk_queue_max_segments(info->rq, segs); bio_list_init(&bio_list); INIT_LIST_HEAD(&requests); /* Recover stage 3: Re-queue pending requests. */ while (!list_empty(&info->resume_list) && !RING_FULL(&info->ring)) { /* Grab a request slot and copy shadow state into it. */ ent = list_first_entry(&info->resume_list, struct blk_resume_entry, list); merge_bio.head = ent->copy.request->bio; merge_bio.tail = ent->copy.request->biotail; bio_list_merge(&bio_list, &merge_bio); ent->copy.request->bio = NULL; //blk_put_request(ent->copy.request); blk_end_request_all(ent->copy.request, 0); __list_del_entry(&ent->list); kfree(ent); } /* * Empty the queue, this is important because we might have * requests in the queue with more segments than what we * can handle now. */ spin_lock_irq(&info->io_lock); while ((req = blk_fetch_request(info->rq)) != NULL) { merge_bio.head = req->bio; merge_bio.tail = req->biotail; bio_list_merge(&bio_list, &merge_bio); req->bio = NULL; if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) pr_alert("diskcache flush request found!\n"); //__blk_put_request(info->rq, req); __blk_end_request_all(req, 0); } spin_unlock_irq(&info->io_lock); (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&info->io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Kick any other new requests queued since we resumed */ //kick_pending_request_queues(info); if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } list_for_each_entry_safe(req, n, &requests, queuelist) { /* Requeue pending requests (flush or discard) */ list_del_init(&req->queuelist); BUG_ON(req->nr_phys_segments > segs); blk_requeue_request(info->rq, req); } spin_unlock_irq(&info->io_lock); while ((bio = bio_list_pop(&bio_list)) != NULL) { /* Traverse the list of pending bios and re-queue them */ if (bio_segments(bio) > segs) { /* * This bio has more segments than what we can * handle, we have to split it. */ pending = (bio_segments(bio) + segs - 1) / segs; split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO); BUG_ON(split_bio == NULL); atomic_set(&split_bio->pending, pending); split_bio->bio = bio; for (i = 0; i < pending; i++) { offset = (i * segs * PAGE_SIZE) >> 9; size = min((unsigned int)(segs * PAGE_SIZE) >> 9, (unsigned int)(bio->bi_size >> 9) - offset); cloned_bio = bio_clone(bio, GFP_NOIO); BUG_ON(cloned_bio == NULL); trim_bio(cloned_bio, offset, size); cloned_bio->bi_private = split_bio; cloned_bio->bi_end_io = split_bio_end; submit_bio(cloned_bio->bi_rw, cloned_bio); } /* * Now we have to wait for all those smaller bios to * end, so we can also end the "parent" bio. */ continue; } /* We don't need to split this bio */ submit_bio(bio->bi_rw, bio); } if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_INFO "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } return 0; } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); return info->is_ready && info->xbdev; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static DEFINE_XENBUS_DRIVER(blkfront, , .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, ); static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront_driver); } module_init(xlblk_init); static void __exit xlblk_exit(void) { xenbus_unregister_driver(&blkfront_driver); xlbd_release_major_info(); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/3.12.28/block.h000066400000000000000000000152261314037446600276300ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../include/xen/interface/blkif.h" //#include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #define DPRINTK_IOCTL(_f, _a...) ((void)0) struct xlbd_major_info { int major; int index; int usage; const struct xlbd_type_info *type; struct xlbd_minor_state *minors; }; struct grant { grant_ref_t gref; unsigned long pfn; struct list_head node; }; struct blk_shadow { blkif_request_t req; struct request *request; struct grant **grants_used; struct grant **indirect_grants; /* If use persistent grant, it will copy response * from grant page to sg when read io completion. */ struct scatterlist *sg; }; struct split_bio { struct bio *bio; atomic_t pending; int err; }; /* * Maximum number of segments in indirect requests, the actual value used by * the frontend driver is the minimum of this value and the value provided * by the backend driver. */ static unsigned int xen_blkif_max_segments = 256; //module_param_named(max_seg, xen_blkif_max_segments, int, S_IRUGO); //MODULE_PARM_DESC(max_seg, "Maximum amount of segments in indirect requests (default is 256 (1M))"); #define BLK_MAX_RING_PAGE_ORDER 4U #define BLK_MAX_RING_PAGES (1U << BLK_MAX_RING_PAGE_ORDER) #define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, \ BLK_MAX_RING_PAGES * PAGE_SIZE) #define SEGS_PER_INDIRECT_FRAME \ (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) #define INDIRECT_GREFS(_segs) \ ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; struct gendisk *gd; struct mutex mutex; int vdevice; blkif_vdev_t handle; int connected; unsigned int ring_size; blkif_front_ring_t ring; spinlock_t io_lock; /* move sg to blk_shadow struct */ //struct scatterlist *sg; unsigned int irq; struct xlbd_major_info *mi; struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_MAX_RING_SIZE]; struct list_head resume_list; grant_ref_t ring_refs[BLK_MAX_RING_PAGES]; struct page *ring_pages[BLK_MAX_RING_PAGES]; struct list_head grants; struct list_head indirect_pages; unsigned int max_indirect_segments; unsigned long shadow_free; unsigned int feature_flush; unsigned int flush_op; bool feature_discard; bool feature_secdiscard; unsigned int persistent_gnts_c; unsigned int feature_persistent:1; unsigned int discard_granularity; unsigned int discard_alignment; int is_ready; /** * The number of people holding this device open. We won't allow a * hot-unplug unless this is 0. */ int users; }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); #else extern int blkif_open(struct block_device *bdev, fmode_t mode); extern int blkif_release(struct gendisk *disk, fmode_t mode); extern int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument); #endif extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (struct request_queue *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, u16 vdisk_info, u16 sector_size, struct blkfront_info *info); void xlvbd_del(struct blkfront_info *info); void xlvbd_flush(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif void xlbd_release_major_info(void); /* Virtual cdrom block-device */ extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/3.12.28/vbd.c000066400000000000000000000372111314037446600273020ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #include #include /* ssleep prototype */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) #define XENVBD_MAJOR 202 #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; struct xlbd_minor_state *minors; int do_register; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; minors = kmalloc(sizeof(*minors), GFP_KERNEL); if (minors == NULL) { kfree(ptr); return NULL; } minors->bitmap = kzalloc(BITS_TO_LONGS(256) * sizeof(*minors->bitmap), GFP_KERNEL); if (minors->bitmap == NULL) { kfree(minors); kfree(ptr); return NULL; } spin_lock_init(&minors->lock); minors->nr = 256; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SD_RANGE: ptr->type = &xlbd_sd_type; ptr->index = index - XLBD_MAJOR_SD_START; break; case XLBD_MAJOR_SR_RANGE: ptr->type = &xlbd_sr_type; ptr->index = index - XLBD_MAJOR_SR_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major XENVBD_MAJOR, * don't try to register it again */ if (major_info[XLBD_MAJOR_VBD_ALT(index)] != NULL) { kfree(minors->bitmap); kfree(minors); minors = major_info[XLBD_MAJOR_VBD_ALT(index)]->minors; do_register = 0; } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(minors->bitmap); kfree(minors); kfree(ptr); return NULL; } pr_info("xen-vbd: registered block device major %i\n", ptr->major); } ptr->minors = minors; major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = XLBD_MAJOR_SD_START; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = XLBD_MAJOR_SD_START + 1 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = XLBD_MAJOR_SD_START + 8 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = XLBD_MAJOR_SR_START; break; case XENVBD_MAJOR: index = XLBD_MAJOR_VBD_START + !!VDEV_IS_EXTENDED(vdevice); break; default: return NULL; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } void __exit xlbd_release_major_info(void) { unsigned int i; int vbd_done = 0; for (i = 0; i < ARRAY_SIZE(major_info); ++i) { struct xlbd_major_info *mi = major_info[i]; if (!mi) continue; if (mi->usage) pr_warning("vbd: major %u still in use (%u times)\n", mi->major, mi->usage); if (mi->major != XENVBD_MAJOR || !vbd_done) { unregister_blkdev(mi->major, mi->type->devname); kfree(mi->minors->bitmap); kfree(mi->minors); } if (mi->major == XENVBD_MAJOR) vbd_done = 1; kfree(mi); } } static int xlbd_reserve_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; unsigned int end = minor + nr_minors; int rc; if (end > ms->nr) { unsigned long *bitmap, *old; bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap), GFP_KERNEL); if (bitmap == NULL) return -ENOMEM; spin_lock(&ms->lock); if (end > ms->nr) { old = ms->bitmap; memcpy(bitmap, ms->bitmap, BITS_TO_LONGS(ms->nr) * sizeof(*bitmap)); ms->bitmap = bitmap; ms->nr = BITS_TO_LONGS(end) * BITS_PER_LONG; } else old = bitmap; spin_unlock(&ms->lock); kfree(old); } spin_lock(&ms->lock); if (find_next_bit(ms->bitmap, end, minor) >= end) { bitmap_set(ms->bitmap, minor, nr_minors); rc = 0; } else rc = -EBUSY; spin_unlock(&ms->lock); return rc; } static void xlbd_release_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; BUG_ON(minor + nr_minors > ms->nr); spin_lock(&ms->lock); bitmap_clear(ms->bitmap, minor, nr_minors); spin_unlock(&ms->lock); } static char *encode_disk_name(char *ptr, unsigned int n) { if (n >= 26) ptr = encode_disk_name(ptr, n / 26 - 1); *ptr = 'a' + n % 26; return ptr + 1; } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, struct blkfront_info *info) { struct request_queue *rq; unsigned int segments = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; rq = blk_init_queue(do_blkif_request, &info->io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); #endif if (info->feature_discard) { queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); blk_queue_max_discard_sectors(rq, get_capacity(gd)); rq->limits.discard_granularity = info->discard_granularity; rq->limits.discard_alignment = info->discard_alignment; if (info->feature_secdiscard) queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq); } /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_logical_block_size(rq, sector_size); blk_queue_max_hw_sectors(rq, segments*8); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_segments(rq, segments); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; info->rq = rq; return 0; } int xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info, u16 sector_size, struct blkfront_info *info) { int major, minor; struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; char *ptr; unsigned int offset; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ pr_warning("blkfront: vdevice %#x is above the extended range;" " ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = XENVBD_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); if (minor >> MINORBITS) { pr_warning("blkfront: %#x's minor (%#x) out of range;" " ignoring\n", vdevice, minor); return -ENODEV; } } BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if ((vdisk_info & VDISK_CDROM) || !(minor & ((1 << mi->type->partn_shift) - 1))) nr_minors = 1 << mi->type->partn_shift; err = xlbd_reserve_minors(mi, minor & ~(nr_minors - 1), nr_minors); if (err) goto out; err = -ENODEV; gd = alloc_disk(vdisk_info & VDISK_CDROM ? 1 : nr_minors); if (gd == NULL) goto release; strcpy(gd->disk_name, mi->type->diskname); ptr = gd->disk_name + strlen(mi->type->diskname); offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (mi->type->partn_shift) { ptr = encode_disk_name(ptr, offset); offset = minor & ((1 << mi->type->partn_shift) - 1); } else gd->flags |= GENHD_FL_CD; BUG_ON(ptr >= gd->disk_name + ARRAY_SIZE(gd->disk_name)); if (nr_minors > 1) *ptr = 0; else snprintf(ptr, gd->disk_name + ARRAY_SIZE(gd->disk_name) - ptr, "%u", offset); gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size, info)) { del_gendisk(gd); goto release; } info->gd = gd; xlvbd_flush(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; release: xlbd_release_minors(mi, minor, nr_minors); out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } void xlvbd_del(struct blkfront_info *info) { unsigned int minor, nr_minors; unsigned long flags; if (info->mi == NULL) return; BUG_ON(info->gd == NULL); minor = info->gd->first_minor; nr_minors = (info->gd->flags & GENHD_FL_CD) || !(minor & ((1 << info->mi->type->partn_shift) - 1)) ? 1 << info->mi->type->partn_shift : 1; del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_release_minors(info->mi, minor & ~(nr_minors - 1), nr_minors); xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); spin_lock_irqsave(&info->io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); spin_unlock_irqrestore(&info->io_lock, flags); while (!list_empty(&info->rq->queue_head)){ ssleep(1); } blk_cleanup_queue(info->rq); info->rq = NULL; } void xlvbd_flush(struct blkfront_info *info) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) blk_queue_flush(info->rq, info->feature_flush); pr_info("blkfront: %s: %s: %s; %s %s %s %s\n", info->gd->disk_name, info->flush_op == BLKIF_OP_WRITE_BARRIER ? "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? "flush diskcache" : "barrier or flush"), info->feature_flush ? "enabled" : "disabled", "persistent grants:", info->feature_persistent ? "enabled;" : "disabled;", "indirect descriptors:", info->max_indirect_segments ? "enabled;" : "disabled;"); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int err; const char *barrier; switch (info->feature_flush) { case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; case QUEUE_ORDERED_NONE: barrier = "disabled"; break; default: return -EINVAL; } err = blk_queue_ordered(info->rq, info->feature_flush); if (err) return err; pr_info("blkfront: %s: barriers %s\n", info->gd->disk_name, barrier); #else if (info->feature_flush) pr_info("blkfront: %s: barriers disabled\n", info->gd->disk_name); #endif } #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = dev_get_drvdata(&xendev->dev); if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/3.12.28/vcd.c000066400000000000000000000306721314037446600273070ustar00rootroot00000000000000/******************************************************************************* * vcd.c * * Implements CDROM cmd packet passing between frontend guest and backend driver. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include "block.h" /* List of cdrom_device_info, can have as many as blkfront supports */ struct vcd_disk { struct list_head vcd_entry; struct cdrom_device_info vcd_cdrom_info; spinlock_t vcd_cdrom_info_lock; }; static LIST_HEAD(vcd_disks); static DEFINE_SPINLOCK(vcd_disks_lock); static struct vcd_disk *xencdrom_get_list_entry(struct gendisk *disk) { struct vcd_disk *ret_vcd = NULL; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { spin_lock(&vcd->vcd_cdrom_info_lock); ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd; } static void submit_message(struct blkfront_info *info, void *sp) { struct request *req = NULL; req = blk_get_request(info->rq, READ, __GFP_WAIT); if (blk_rq_map_kern(info->rq, req, sp, PAGE_SIZE, __GFP_WAIT)) goto out; req->rq_disk = info->gd; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_NOMERGE; #else req->flags |= REQ_BLOCK_PC; #endif req->__sector = 0; req->cmd_len = 0; req->timeout = 60*HZ; blk_execute_rq(req->q, info->gd, req, 1); out: blk_put_request(req); } static int submit_cdrom_cmd(struct blkfront_info *info, struct packet_command *cgc) { int ret = 0; struct page *page; union xen_block_packet *sp; struct xen_cdrom_packet *xcp; struct vcd_generic_command *vgc; if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) { pr_warn("%s() Packet buffer length is to large \n", __func__); return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcp = &(sp->xcp); xcp->type = XEN_TYPE_CDROM_PACKET; xcp->payload_offset = PACKET_PAYLOAD_OFFSET; vgc = (struct vcd_generic_command *)((char *)sp + xcp->payload_offset); memcpy(vgc->cmd, cgc->cmd, CDROM_PACKET_SIZE); vgc->stat = cgc->stat; vgc->data_direction = cgc->data_direction; vgc->quiet = cgc->quiet; vgc->timeout = cgc->timeout; if (cgc->sense) { vgc->sense_offset = PACKET_SENSE_OFFSET; memcpy((char *)sp + vgc->sense_offset, cgc->sense, sizeof(struct request_sense)); } if (cgc->buffer) { vgc->buffer_offset = PACKET_BUFFER_OFFSET; memcpy((char *)sp + vgc->buffer_offset, cgc->buffer, cgc->buflen); vgc->buflen = cgc->buflen; } submit_message(info,sp); if (xcp->ret) ret = xcp->err; if (cgc->sense) memcpy(cgc->sense, (char *)sp + PACKET_SENSE_OFFSET, sizeof(struct request_sense)); if (cgc->buffer && cgc->buflen) memcpy(cgc->buffer, (char *)sp + PACKET_BUFFER_OFFSET, cgc->buflen); __free_page(page); return ret; } static int xencdrom_open(struct cdrom_device_info *cdi, int purpose) { int ret = 0; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_open *xco; info = cdi->disk->private_data; if (!info->xbdev) return -ENODEV; if (strlen(info->xbdev->otherend) > MAX_PACKET_DATA) { return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xco = &(sp->xco); xco->type = XEN_TYPE_CDROM_OPEN; xco->payload_offset = sizeof(struct xen_cdrom_open); strcpy((char *)sp + xco->payload_offset, info->xbdev->otherend); submit_message(info,sp); if (xco->ret) { ret = xco->err; goto out; } if (xco->media_present) set_capacity(cdi->disk, xco->sectors); out: __free_page(page); return ret; } static void xencdrom_release(struct cdrom_device_info *cdi) { } static int xencdrom_media_changed(struct cdrom_device_info *cdi, int disc_nr) { int ret; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_media_changed *xcmc; info = cdi->disk->private_data; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcmc = &(sp->xcmc); xcmc->type = XEN_TYPE_CDROM_MEDIA_CHANGED; submit_message(info,sp); ret = xcmc->media_changed; __free_page(page); return ret; } static int xencdrom_tray_move(struct cdrom_device_info *cdi, int position) { struct packet_command cgc; struct blkfront_info *info; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_START_STOP_UNIT; if (position) cgc.cmd[4] = 2; else cgc.cmd[4] = 3; return submit_cdrom_cmd(info, &cgc); } static int xencdrom_lock_door(struct cdrom_device_info *cdi, int lock) { struct blkfront_info *info; struct packet_command cgc; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; cgc.cmd[4] = lock; return submit_cdrom_cmd(info, &cgc); } static int xencdrom_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { return cgc->stat = submit_cdrom_cmd(cdi->disk->private_data, cgc); } static int xencdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { return -EINVAL; } /* Query backend to see if CDROM packets are supported */ static int xencdrom_supported(struct blkfront_info *info) { struct page *page; union xen_block_packet *sp; struct xen_cdrom_support *xcs; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcs = &(sp->xcs); xcs->type = XEN_TYPE_CDROM_SUPPORT; submit_message(info,sp); return xcs->supported; } static struct cdrom_device_ops xencdrom_dops = { .open = xencdrom_open, .release = xencdrom_release, .media_changed = xencdrom_media_changed, .tray_move = xencdrom_tray_move, .lock_door = xencdrom_lock_door, .generic_packet = xencdrom_packet, .audio_ioctl = xencdrom_audio_ioctl, .capability = (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | \ CDC_MEDIA_CHANGED | CDC_GENERIC_PACKET | CDC_DVD | \ CDC_CD_R), .n_minors = 1, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_open(struct inode *inode, struct file *file) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!info->xbdev) return -ENODEV; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_open(&vcd->vcd_cdrom_info, inode, file); #else ret = cdrom_open(&vcd->vcd_cdrom_info, bd, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_release(struct inode *inode, struct file *file) { struct gendisk *gd = inode->i_bdev->bd_disk; #else static int xencdrom_block_release(struct gendisk *gd, fmode_t mode) { #endif struct blkfront_info *info = gd->private_data; struct vcd_disk *vcd; int ret = 0; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_release(&vcd->vcd_cdrom_info, file); #else cdrom_release(&vcd->vcd_cdrom_info, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); if (vcd->vcd_cdrom_info.use_count == 0) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) blkif_release(inode, file); #else blkif_release(gd, mode); #endif } } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_ioctl(struct block_device *bd, fmode_t mode, unsigned cmd, unsigned long arg) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!(vcd = xencdrom_get_list_entry(info->gd))) goto out; switch (cmd) { case 2285: /* SG_IO */ ret = -ENOSYS; break; case CDROMEJECT: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 1); break; case CDROMCLOSETRAY: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 0); break; case CDROM_GET_CAPABILITY: ret = vcd->vcd_cdrom_info.ops->capability & ~vcd->vcd_cdrom_info.mask; break; case CDROM_SET_OPTIONS: ret = vcd->vcd_cdrom_info.options; break; case CDROM_SEND_PACKET: { struct packet_command cgc; ret = copy_from_user(&cgc, (void __user *)arg, sizeof(cgc)) ? -EFAULT : submit_cdrom_cmd(info, &cgc); break; } default: spin_unlock(&vcd->vcd_cdrom_info_lock); out: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return blkif_ioctl(inode, file, cmd, arg); #else return blkif_ioctl(bd, mode, cmd, arg); #endif } spin_unlock(&vcd->vcd_cdrom_info_lock); return ret; } /* Called as result of cdrom_open, vcd_cdrom_info_lock already held */ static int xencdrom_block_media_changed(struct gendisk *disk) { struct vcd_disk *vcd; struct vcd_disk *ret_vcd = NULL; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd ? cdrom_media_changed(&ret_vcd->vcd_cdrom_info) : 0; } static const struct block_device_operations xencdrom_bdops = { .owner = THIS_MODULE, .open = xencdrom_block_open, .release = xencdrom_block_release, .ioctl = xencdrom_block_ioctl, .media_changed = xencdrom_block_media_changed, }; void register_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; /* Make sure this is for a CD device */ if (!(gd->flags & GENHD_FL_CD)) goto out; /* Make sure we have backend support */ if (!xencdrom_supported(info)) goto out; /* Create new vcd_disk and fill in cdrom_info */ vcd = kzalloc(sizeof(*vcd), GFP_KERNEL); if (!vcd) { pr_info("%s(): Unable to allocate vcd struct!\n", __func__); goto out; } spin_lock_init(&vcd->vcd_cdrom_info_lock); vcd->vcd_cdrom_info.ops = &xencdrom_dops; vcd->vcd_cdrom_info.speed = 4; vcd->vcd_cdrom_info.capacity = 1; vcd->vcd_cdrom_info.options = 0; strlcpy(vcd->vcd_cdrom_info.name, gd->disk_name, ARRAY_SIZE(vcd->vcd_cdrom_info.name)); vcd->vcd_cdrom_info.mask = (CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_SELECT_SPEED | CDC_MRW | CDC_MRW_W | CDC_RAM); if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) { pr_warn("%s() Cannot register blkdev as a cdrom %d!\n", __func__, gd->major); goto err_out; } gd->fops = &xencdrom_bdops; vcd->vcd_cdrom_info.disk = gd; spin_lock(&vcd_disks_lock); list_add(&(vcd->vcd_entry), &vcd_disks); spin_unlock(&vcd_disks_lock); out: return; err_out: kfree(vcd); } void unregister_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == gd) { spin_lock(&vcd->vcd_cdrom_info_lock); unregister_cdrom(&vcd->vcd_cdrom_info); list_del(&vcd->vcd_entry); spin_unlock(&vcd->vcd_cdrom_info_lock); kfree(vcd); break; } } spin_unlock(&vcd_disks_lock); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/3.12.49/000077500000000000000000000000001314037446600263625ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/3.12.49/blkfront.c000066400000000000000000001275221314037446600303600ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct blk_resume_entry { struct list_head list; struct blk_shadow copy; struct blkif_request_segment *indirect_segs; }; #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 static void connect(struct blkfront_info *); static void blkfront_closing(struct blkfront_info *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *, unsigned int old_ring_size); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(struct work_struct *arg); static int blkif_recover(struct blkfront_info *, unsigned int old_ring_size, unsigned int new_ring_size); static bool blkif_completion(struct blkfront_info *, unsigned long id, int status); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); /* Maximum number of indirect segments to be used by the front end. */ static unsigned int max_segs_per_req = BITS_PER_LONG; module_param_named(max_indirect_segments, max_segs_per_req, uint, 0644); MODULE_PARM_DESC(max_indirect_segments, "maximum number of indirect segments"); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice; struct blkfront_info *info; enum xenbus_state backend_state; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); pr_notice("blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } spin_lock_init(&info->io_lock); mutex_init(&info->mutex); info->xbdev = dev; info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); INIT_LIST_HEAD(&info->resume_list); INIT_LIST_HEAD(&info->resume_split); /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); backend_state = xenbus_read_driver_state(dev->otherend); /* * XenbusStateInitWait would be the correct state to enter here, * but (at least) blkback considers this a fatal error. */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_backend(dev, info); if (err) { kfree(info); dev_set_drvdata(&dev->dev, NULL); return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); enum xenbus_state backend_state; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); backend_state = xenbus_read_driver_state(dev->otherend); /* See respective comment in blkfront_probe(). */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; return talk_to_backend(dev, info); } static void shadow_init(struct blk_shadow *shadow, unsigned int ring_size) { unsigned int i = 0; WARN_ON(!ring_size); while (++i < ring_size) shadow[i - 1].req.id = i; shadow[i - 1].req.id = 0x0fffffff; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { unsigned int ring_size, ring_order, max_segs; unsigned int old_ring_size = RING_SIZE(&info->ring); const char *what = NULL; struct xenbus_transaction xbt; int err; if (dev->state >= XenbusStateInitialised) return 0; err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-pages", "%u", &ring_size); if (err != 1) ring_size = 0; else if (!ring_size) pr_warn("blkfront: %s: zero max-ring-pages\n", dev->nodename); err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &ring_order); if (err != 1) ring_order = ring_size ? ilog2(ring_size) : 0; else if (!ring_size) /* nothing */; else if ((ring_size - 1) >> ring_order) pr_warn("blkfront: %s: max-ring-pages (%#x) inconsistent with" " max-ring-page-order (%u)\n", dev->nodename, ring_size, ring_order); else ring_order = ilog2(ring_size); if (ring_order > BLK_MAX_RING_PAGE_ORDER) ring_order = BLK_MAX_RING_PAGE_ORDER; /* * While for larger rings not all pages are actually used, be on the * safe side and set up a full power of two to please as many backends * as possible. */ info->ring_size = ring_size = 1U << ring_order; err = xenbus_scanf(XBT_NIL, dev->otherend, "feature-max-indirect-segments", "%u", &max_segs); if (max_segs > max_segs_per_req) max_segs = max_segs_per_req; if (err != 1 || max_segs < BLKIF_MAX_SEGMENTS_PER_REQUEST) max_segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; else if (BLKIF_INDIRECT_PAGES(max_segs) > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST) max_segs = BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST * BLKIF_SEGS_PER_INDIRECT_FRAME; info->max_segs_per_req = max_segs; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info, old_ring_size); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } if (ring_size == 1) { what = "ring-ref"; err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[0]); if (err) goto abort_transaction; } else { unsigned int i; char buf[16]; what = "ring-page-order"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_order); if (err) goto abort_transaction; what = "num-ring-pages"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_size); if (err) goto abort_transaction; what = buf; for (i = 0; i < ring_size; i++) { snprintf(buf, sizeof(buf), "ring-ref%u", i); err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[i]); if (err) goto abort_transaction; } } what = "event-channel"; err = xenbus_printf(xbt, dev->nodename, what, "%u", irq_to_evtchn_port(info->irq)); if (err) goto abort_transaction; what = "protocol"; err = xenbus_write(xbt, dev->nodename, what, XEN_IO_PROTO_ABI_NATIVE); if (err) goto abort_transaction; err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); ring_size = RING_SIZE(&info->ring); switch (info->connected) { case BLKIF_STATE_DISCONNECTED: shadow_init(info->shadow, ring_size); break; case BLKIF_STATE_SUSPENDED: err = blkif_recover(info, old_ring_size, ring_size); if (err) goto out; break; } pr_info("blkfront: %s: ring-pages=%u nr-ents=%u segs-per-req=%u\n", dev->nodename, info->ring_size, ring_size, max_segs); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (what) xenbus_dev_fatal(dev, err, "writing %s", what); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info, unsigned int old_ring_size) { blkif_sring_t *sring; int err; unsigned int i, nr, ring_size; for (nr = 0; nr < info->ring_size; nr++) { info->ring_refs[nr] = GRANT_INVALID_REF; info->ring_pages[nr] = alloc_page(GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM); if (!info->ring_pages[nr]) break; } sring = nr == info->ring_size ? vmap(info->ring_pages, nr, VM_MAP, PAGE_KERNEL) : NULL; if (!sring) { while (nr--) __free_page(info->ring_pages[nr]); xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, nr * PAGE_SIZE); info->sg = kcalloc(info->max_segs_per_req, sizeof(*info->sg), GFP_KERNEL); if (!info->sg) { err = -ENOMEM; goto fail; } sg_init_table(info->sg, info->max_segs_per_req); err = -ENOMEM; ring_size = RING_SIZE(&info->ring); if (info->max_segs_per_req > BLKIF_MAX_SEGMENTS_PER_REQUEST) { if (!info->indirect_segs) old_ring_size = 0; if (old_ring_size < ring_size) { struct blkif_request_segment **segs; segs = krealloc(info->indirect_segs, ring_size * sizeof(*segs), GFP_KERNEL|__GFP_ZERO); if (!segs) goto fail; info->indirect_segs = segs; } for (i = old_ring_size; i < ring_size; ++i) { info->indirect_segs[i] = vzalloc(BLKIF_INDIRECT_PAGES(info->max_segs_per_req) * PAGE_SIZE); if (!info->indirect_segs[i]) goto fail; } } for (i = 0; i < ring_size; ++i) { unsigned long *frame; if (info->shadow[i].frame && ksize(info->shadow[i].frame) / sizeof(*frame) >= info->max_segs_per_req) continue; frame = krealloc(info->shadow[i].frame, info->max_segs_per_req * sizeof(*frame), GFP_KERNEL); if (!frame) goto fail; if (!info->shadow[i].frame) memset(frame, ~0, info->max_segs_per_req * sizeof(*frame)); info->shadow[i].frame = frame; } err = xenbus_multi_grant_ring(dev, nr, info->ring_pages, info->ring_refs); if (err < 0) goto fail; err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, 0, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (talk_to_backend(dev, info)) { dev_set_drvdata(&dev->dev, NULL); kfree(info); } break; case XenbusStateConnected: connect(info); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's Closing state -- fallthrough */ case XenbusStateClosing: mutex_lock(&info->mutex); if (dev->state == XenbusStateClosing) { mutex_unlock(&info->mutex); break; } bd = info->gd ? bdget_disk(info->gd, 0) : NULL; mutex_unlock(&info->mutex); if (bd == NULL) { xenbus_frontend_closed(dev); break; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (bd->bd_openers) { xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); xenbus_switch_state(dev, XenbusStateClosing); } else blkfront_closing(info); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); break; } } /* ** Connection ** */ static void blkfront_setup_discard(struct blkfront_info *info) { unsigned int discard_granularity; unsigned int discard_alignment; int discard_secure; info->feature_discard = 1; if (!xenbus_gather(XBT_NIL, info->xbdev->otherend, "discard-granularity", "%u", &discard_granularity, "discard-alignment", "%u", &discard_alignment, NULL)) { info->discard_granularity = discard_granularity; info->discard_alignment = discard_alignment; } if (xenbus_scanf(XBT_NIL, info->xbdev->otherend, "discard-secure", "%d", &discard_secure) != 1) discard_secure = 0; info->feature_secdiscard = !!discard_secure; } /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned int binfo, sector_size, physical_sector_size; int err, barrier, flush, discard; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (err != 1) return; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sector-size", "%u", §or_size); if (err != 1) sector_size = 0; if (sector_size) blk_queue_logical_block_size(info->gd->queue, sector_size); pr_info("Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%u", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } /* * physcial-sector-size is a newer field, so old backends may not * provide this. Assume physical sector size to be the same as * sector_size in that case. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "physical-sector-size", "%u", &physical_sector_size); if (err <= 0) physical_sector_size = sector_size; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%d", &barrier); /* * If there's no "feature-barrier" defined, then it means * we're dealing with a very old backend which writes * synchronously; nothing to do. * * If there are barriers, then we use flush. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) info->feature_flush = 0; info->flush_op = 0; if (err > 0 && barrier) { info->feature_flush = REQ_FLUSH | REQ_FUA; info->flush_op = BLKIF_OP_WRITE_BARRIER; } /* * And if there is "feature-flush-cache" use that above * barriers. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-flush-cache", "%d", &flush); if (err > 0 && flush) { info->feature_flush = REQ_FLUSH; info->flush_op = BLKIF_OP_FLUSH_DISKCACHE; } #else if (err <= 0) info->feature_flush = QUEUE_ORDERED_DRAIN; else if (barrier) info->feature_flush = QUEUE_ORDERED_TAG; else info->feature_flush = QUEUE_ORDERED_NONE; #endif err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-discard", "%d", &discard); if (err > 0 && discard) blkfront_setup_discard(info); err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, physical_sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&info->io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); add_disk(info->gd); info->is_ready = 1; register_vcd(info); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct blkfront_info *info) { unsigned long flags; DPRINTK("blkfront_closing: %d removed\n", info->vdevice); if (info->rq == NULL) goto out; spin_lock_irqsave(&info->io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&info->io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work(&info->work); xlvbd_sysfs_delif(info); unregister_vcd(info); xlvbd_del(info); out: if (info->xbdev) xenbus_frontend_closed(info->xbdev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); struct block_device *bd; struct gendisk *disk; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); mutex_lock(&info->mutex); disk = info->gd; bd = disk ? bdget_disk(disk, 0) : NULL; info->xbdev = NULL; mutex_unlock(&info->mutex); if (!bd) { kfree(info); return 0; } /* * The xbdev was removed before we reached the Closed * state. See if it's safe to remove the disk. If the bdev * isn't closed yet, we let release take care of it. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif info = disk->private_data; dev_warn(disk_to_dev(disk), "%s was hot-unplugged, %d stale handles\n", dev->nodename, bd->bd_openers); if (info && !bd->bd_openers) { blkfront_closing(info); disk->private_data = NULL; kfree(info); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= RING_SIZE(&info->ring)); info->shadow_free = info->shadow[free].req.id; info->shadow[free].req.id = 0x0fffffee; /* debug */ return free; } static inline int ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { if (info->shadow[id].req.id != id) return -EINVAL; if (!info->shadow[id].request) return -ENXIO; info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = NULL; info->shadow_free = id; return 0; } static const char *op_name(unsigned int op) { static const char *const names[] = { [BLKIF_OP_READ] = "read", [BLKIF_OP_WRITE] = "write", [BLKIF_OP_WRITE_BARRIER] = "barrier", [BLKIF_OP_FLUSH_DISKCACHE] = "flush", [BLKIF_OP_PACKET] = "packet", [BLKIF_OP_DISCARD] = "discard", [BLKIF_OP_INDIRECT] = "indirect", }; if (op >= ARRAY_SIZE(names)) return "unknown"; return names[op] ?: "reserved"; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void split_request(struct blkif_request *req, struct blk_shadow *copy, const struct blkfront_info *info, const struct blkif_request_segment *segs) { unsigned int i; req->operation = copy->ind.indirect_op; req->nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; req->handle = copy->ind.handle; for (i = 0; i < BLKIF_MAX_SEGMENTS_PER_REQUEST; ++i) { req->seg[i] = segs[i]; gnttab_grant_foreign_access_ref(segs[i].gref, info->xbdev->otherend_id, pfn_to_mfn(copy->frame[i]), rq_data_dir(copy->request) ? GTF_readonly : 0); info->shadow[req->id].frame[i] = copy->frame[i]; } copy->ind.id = req->id; } static void kick_pending_request_queues(struct blkfront_info *info) { bool queued = false; /* Recover stage 3: Re-queue pending requests. */ while (!list_empty(&info->resume_list) && !RING_FULL(&info->ring)) { /* Grab a request slot and copy shadow state into it. */ struct blk_resume_entry *ent = list_first_entry(&info->resume_list, struct blk_resume_entry, list); blkif_request_t *req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); unsigned long *frame; unsigned int i; *req = ent->copy.req; /* We get a new request id, and must reset the shadow state. */ req->id = GET_ID_FROM_FREELIST(info); frame = info->shadow[req->id].frame; info->shadow[req->id] = ent->copy; info->shadow[req->id].req.id = req->id; info->shadow[req->id].frame = frame; /* Rewrite any grant references invalidated by susp/resume. */ if (req->operation == BLKIF_OP_INDIRECT) { const blkif_request_indirect_t *ind = (void *)req; struct blkif_request_segment *segs = info->indirect_segs[req->id]; for (i = RING_SIZE(&info->ring); segs && i--; ) if (!info->indirect_segs[i]) { info->indirect_segs[i] = segs; segs = NULL; } if (segs) vfree(segs); segs = ent->indirect_segs; info->indirect_segs[req->id] = segs; if (ind->nr_segments > info->max_segs_per_req) { split_request(req, &ent->copy, info, segs); info->ring.req_prod_pvt++; queued = true; list_move_tail(&ent->list, &info->resume_split); continue; } for (i = 0; i < BLKIF_INDIRECT_PAGES(ind->nr_segments); ++i) { void *va = (void *)segs + i * PAGE_SIZE; struct page *pg = vmalloc_to_page(va); gnttab_grant_foreign_access_ref( ind->indirect_grefs[i], info->xbdev->otherend_id, page_to_phys(pg) >> PAGE_SHIFT, GTF_readonly); } for (i = 0; i < ind->nr_segments; ++i) { gnttab_grant_foreign_access_ref(segs[i].gref, info->xbdev->otherend_id, pfn_to_mfn(ent->copy.frame[i]), rq_data_dir(ent->copy.request) ? GTF_readonly : 0); frame[i] = ent->copy.frame[i]; } } else for (i = 0; i < req->nr_segments; ++i) { gnttab_grant_foreign_access_ref(req->seg[i].gref, info->xbdev->otherend_id, pfn_to_mfn(ent->copy.frame[i]), rq_data_dir(ent->copy.request) ? GTF_readonly : 0); frame[i] = ent->copy.frame[i]; } info->ring.req_prod_pvt++; queued = true; __list_del_entry(&ent->list); kfree(ent); } /* Send off requeued requests */ if (queued) flush_requests(info); if (list_empty(&info->resume_split) && list_empty(&info->resume_list) && !RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(struct work_struct *arg) { struct blkfront_info *info = container_of(arg, struct blkfront_info, work); spin_lock_irq(&info->io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_open(struct inode *inode, struct file *filep) { struct block_device *bd = inode->i_bdev; #else int blkif_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int err = 0; if (!info) /* xbdev gone */ err = -ERESTARTSYS; else { mutex_lock(&info->mutex); if (!info->gd) /* xbdev is closed */ err = -ERESTARTSYS; mutex_unlock(&info->mutex); } return err; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_release(struct inode *inode, struct file *filep) { struct gendisk *disk = inode->i_bdev->bd_disk; #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) int blkif_release(struct gendisk *disk, fmode_t mode) { #else void blkif_release(struct gendisk *disk, fmode_t mode) { #define return(n) return #endif struct blkfront_info *info = disk->private_data; struct xenbus_device *xbdev; struct block_device *bd = bdget_disk(disk, 0); bdput(bd); if (bd->bd_openers) return(0); /* * Check if we have been instructed to close. We will have * deferred this request, because the bdev was still open. */ mutex_lock(&info->mutex); xbdev = info->xbdev; if (xbdev && xbdev->state == XenbusStateClosing) { /* pending switch to state closed */ dev_info(disk_to_dev(disk), "releasing disk\n"); blkfront_closing(info); } mutex_unlock(&info->mutex); if (!xbdev) { /* sudden device removal */ dev_info(disk_to_dev(disk), "releasing disk\n"); blkfront_closing(info); disk->private_data = NULL; kfree(info); } return(0); #undef return } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { struct block_device *bd = inode->i_bdev; #else int blkif_ioctl(struct block_device *bd, fmode_t mode, unsigned command, unsigned long argument) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: if (info->gd && (info->gd->flags & GENHD_FL_CD)) return 0; return -EINVAL; default: if (info->mi && info->gd && info->rq) { switch (info->mi->major) { case SCSI_DISK0_MAJOR: case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: case SCSI_CDROM_MAJOR: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) return scsi_cmd_ioctl(filep, info->gd, command, (void __user *)argument); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return scsi_cmd_ioctl(filep, info->rq, info->gd, command, (void __user *)argument); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) return scsi_cmd_ioctl(info->rq, info->gd, mode, command, (void __user *)argument); #else return scsi_cmd_blk_ioctl(bd, mode, command, (void __user *)argument); #endif } } return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * Generate a Xen blkfront IO request from a blk layer request. Reads * and writes are handled as expected. * * @req: a request struct */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; unsigned long buffer_mfn; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect, nr_segs; int i, ref; grant_ref_t gref_head; struct scatterlist *sg; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; nr_segs = info->max_segs_per_req; if (nr_segs > BLKIF_MAX_SEGMENTS_PER_REQUEST) nr_segs += BLKIF_INDIRECT_PAGES(nr_segs); if (gnttab_alloc_grant_references(nr_segs, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, nr_segs); return 1; } /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) ring_req->operation = info->flush_op; #else if (req->cmd_flags & REQ_HARDBARRIER) ring_req->operation = BLKIF_OP_WRITE_BARRIER; #endif if (req->cmd_type == REQ_TYPE_BLOCK_PC) ring_req->operation = BLKIF_OP_PACKET; if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { struct blkif_request_discard *discard = (void *)ring_req; /* id, sector_number and handle are set above. */ discard->operation = BLKIF_OP_DISCARD; discard->flag = 0; discard->handle = info->handle; discard->nr_sectors = blk_rq_sectors(req); if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) discard->flag = BLKIF_DISCARD_SECURE; } else { struct blkif_request_segment *segs; nr_segs = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(nr_segs > info->max_segs_per_req); if (nr_segs <= BLKIF_MAX_SEGMENTS_PER_REQUEST) { ring_req->nr_segments = nr_segs; ring_req->handle = info->handle; segs = ring_req->seg; } else { struct blkif_request_indirect *ind = (void *)ring_req; ind->indirect_op = ring_req->operation; ind->operation = BLKIF_OP_INDIRECT; ind->nr_segments = nr_segs; ind->handle = info->handle; segs = info->indirect_segs[id]; for (i = 0; i < BLKIF_INDIRECT_PAGES(nr_segs); ++i) { void *va = (void *)segs + i * PAGE_SIZE; struct page *pg = vmalloc_to_page(va); buffer_mfn = page_to_phys(pg) >> PAGE_SHIFT; ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, GTF_readonly); ind->indirect_grefs[i] = ref; } } for_each_sg(info->sg, sg, nr_segs, i) { buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; /* install a grant reference. */ ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ? GTF_readonly : 0 ); info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); segs[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } } info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = blk_peek_request(rq)) != NULL) { info = req->rq_disk->private_data; if (RING_FULL(&info->ring)) goto wait; blk_start_request(req); if ((req->cmd_type != REQ_TYPE_FS && (req->cmd_type != REQ_TYPE_BLOCK_PC || req->cmd_len)) || ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) && !info->flush_op)) { req->errors = (DID_ERROR << 16) | (DRIVER_INVALID << 24); __blk_end_request_all(req, -EIO); continue; } DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%u) buffer:%p [%s]\n", req, req->cmd, (long long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), req->buffer, rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; spin_lock_irqsave(&info->io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&info->io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bool done; bret = RING_GET_RESPONSE(&info->ring, i); if (unlikely(bret->id >= RING_SIZE(&info->ring))) { /* * The backend has messed up and given us an id that * we would never have given to it (we stamp it up to * RING_SIZE() - see GET_ID_FROM_FREELIST()). */ pr_warning("%s: response to %s has incorrect id (%#Lx)\n", info->gd->disk_name, op_name(bret->operation), (unsigned long long)bret->id); continue; } id = bret->id; req = info->shadow[id].request; done = blkif_completion(info, id, bret->status); ret = ADD_ID_TO_FREELIST(info, id); if (unlikely(ret)) { pr_warning("%s: id %#lx (response to %s) couldn't be recycled (%d)!\n", info->gd->disk_name, id, op_name(bret->operation), ret); continue; } if (!done) continue; ret = bret->status == BLKIF_RSP_OKAY ? 0 : -EIO; switch (bret->operation) { const char *kind; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: kind = ""; if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) ret = -EOPNOTSUPP; if (unlikely(bret->status == BLKIF_RSP_ERROR && !(info->shadow[id].req.operation == BLKIF_OP_INDIRECT ? info->shadow[id].ind.nr_segments : info->shadow[id].req.nr_segments))) { kind = "empty "; ret = -EOPNOTSUPP; } if (unlikely(ret)) { if (ret == -EOPNOTSUPP) { pr_warn("blkfront: %s: %s%s op failed\n", info->gd->disk_name, kind, op_name(bret->operation)); ret = 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) info->feature_flush = 0; #else info->feature_flush = QUEUE_ORDERED_NONE; #endif xlvbd_flush(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_PACKET: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev %s request: %d\n", op_name(bret->operation), bret->status); __blk_end_request_all(req, ret); break; case BLKIF_OP_DISCARD: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; pr_warn("blkfront: %s: discard op failed\n", info->gd->disk_name); ret = -EOPNOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; queue_flag_clear(QUEUE_FLAG_DISCARD, rq); queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); } __blk_end_request_all(req, ret); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&info->io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&info->io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&info->io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work(&info->work); /* Free resources associated with old device channel. */ if (!suspend) { unsigned int i; if (info->indirect_segs) { for (i = 0; i < RING_SIZE(&info->ring); ++i) if (info->indirect_segs[i]) vfree(info->indirect_segs[i]); kfree(info->indirect_segs); info->indirect_segs = NULL; } for (i = 0; i < ARRAY_SIZE(info->shadow); ++i) kfree(info->shadow[i].frame); } vunmap(info->ring.sring); info->ring.sring = NULL; gnttab_multi_end_foreign_access(info->ring_size, info->ring_refs, info->ring_pages); if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; kfree(info->sg); info->sg = NULL; info->max_segs_per_req = 0; } static bool blkif_completion(struct blkfront_info *info, unsigned long id, int status) { struct blk_shadow *s = &info->shadow[id]; switch (s->req.operation) { unsigned int i, j; case BLKIF_OP_DISCARD: break; case BLKIF_OP_INDIRECT: { struct blkif_request_segment *segs = info->indirect_segs[id]; struct blk_resume_entry *tmp, *ent = NULL; list_for_each_entry(tmp, &info->resume_split, list) if (tmp->copy.ind.id == id) { ent = tmp; __list_del_entry(&ent->list); break; } if (!ent) { for (i = 0; i < s->ind.nr_segments; i++) gnttab_end_foreign_access(segs[i].gref, 0UL); gnttab_multi_end_foreign_access( BLKIF_INDIRECT_PAGES(s->ind.nr_segments), s->ind.indirect_grefs, NULL); break; } for (i = 0; i < BLKIF_MAX_SEGMENTS_PER_REQUEST; i++) gnttab_end_foreign_access(segs[i].gref, 0UL); if (status != BLKIF_RSP_OKAY) { kfree(ent); break; } for (j = 0; i < ent->copy.ind.nr_segments; ++i, ++j) segs[j] = segs[i]; ent->copy.frame += BLKIF_MAX_SEGMENTS_PER_REQUEST; ent->copy.ind.nr_segments -= BLKIF_MAX_SEGMENTS_PER_REQUEST; if (ent->copy.ind.nr_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) { ent->copy.req.operation = ent->copy.ind.indirect_op; ent->copy.req.nr_segments = ent->copy.ind.nr_segments; ent->copy.req.handle = ent->copy.ind.handle; for (i = 0; i < ent->copy.req.nr_segments; ++i) ent->copy.req.seg[i] = segs[i]; } list_add_tail(&ent->list, &info->resume_list); return false; } default: for (i = 0; i < s->req.nr_segments; i++) gnttab_end_foreign_access(s->req.seg[i].gref, 0UL); break; } return true; } static int blkif_recover(struct blkfront_info *info, unsigned int old_ring_size, unsigned int ring_size) { unsigned int i; struct blk_resume_entry *ent; LIST_HEAD(list); /* Stage 1: Make a safe copy of the shadow state. */ for (i = 0; i < old_ring_size; i++) { unsigned int nr_segs; /* Not in use? */ if (!info->shadow[i].request) continue; switch (info->shadow[i].req.operation) { default: nr_segs = info->shadow[i].req.nr_segments; break; case BLKIF_OP_INDIRECT: nr_segs = info->shadow[i].ind.nr_segments; break; case BLKIF_OP_DISCARD: nr_segs = 0; break; } ent = kmalloc(sizeof(*ent) + nr_segs * sizeof(*ent->copy.frame), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); if (!ent) break; ent->copy = info->shadow[i]; ent->copy.frame = (void *)(ent + 1); memcpy(ent->copy.frame, info->shadow[i].frame, nr_segs * sizeof(*ent->copy.frame)); if (ent->copy.req.operation == BLKIF_OP_INDIRECT) { ent->indirect_segs = info->indirect_segs[i]; info->indirect_segs[i] = NULL; } else ent->indirect_segs = NULL; list_add_tail(&ent->list, &list); } if (i < old_ring_size) { while (!list_empty(&list)) { ent = list_first_entry(&list, struct blk_resume_entry, list); __list_del_entry(&ent->list); kfree(ent); } return -ENOMEM; } list_splice_tail(&list, &info->resume_list); /* Stage 2: Set up free list. */ for (i = 0; i < old_ring_size; ++i) { unsigned long *frame = info->shadow[i].frame; memset(info->shadow + i, 0, sizeof(*info->shadow)); memset(frame, ~0, info->max_segs_per_req * sizeof(*frame)); info->shadow[i].frame = frame; } shadow_init(info->shadow, ring_size); info->shadow_free = info->ring.req_prod_pvt; (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&info->io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); if (info->indirect_segs) for (i = ring_size; i < old_ring_size; ++i) if (info->indirect_segs[i]) { vfree(info->indirect_segs[i]); info->indirect_segs[i] = NULL; } if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } return 0; } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); return info->is_ready && info->xbdev; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static DEFINE_XENBUS_DRIVER(blkfront, , .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, ); static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront_driver); } module_init(xlblk_init); static void __exit xlblk_exit(void) { xenbus_unregister_driver(&blkfront_driver); xlbd_release_major_info(); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/3.12.49/block.h000066400000000000000000000143201314037446600276250ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #if 0 #define DPRINTK_IOCTL(_f, _a...) pr_alert(_f, ## _a) #else #define DPRINTK_IOCTL(_f, _a...) ((void)0) #endif struct xlbd_major_info { int major; int index; int usage; const struct xlbd_type_info *type; struct xlbd_minor_state *minors; }; struct blk_shadow { union { blkif_request_t req; blkif_request_indirect_t ind; }; struct request *request; unsigned long *frame; }; #define BLK_MAX_RING_PAGE_ORDER 4U #define BLK_MAX_RING_PAGES (1U << BLK_MAX_RING_PAGE_ORDER) #define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, \ BLK_MAX_RING_PAGES * PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; struct gendisk *gd; struct mutex mutex; int vdevice; blkif_vdev_t handle; int connected; unsigned int ring_size; blkif_front_ring_t ring; spinlock_t io_lock; struct scatterlist *sg; struct blkif_request_segment **indirect_segs; unsigned int irq; unsigned int max_segs_per_req; struct xlbd_major_info *mi; struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_MAX_RING_SIZE]; struct list_head resume_list, resume_split; grant_ref_t ring_refs[BLK_MAX_RING_PAGES]; struct page *ring_pages[BLK_MAX_RING_PAGES]; unsigned long shadow_free; unsigned int feature_flush; unsigned int flush_op; bool feature_discard; bool feature_secdiscard; unsigned int discard_granularity; unsigned int discard_alignment; int is_ready; }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); #else extern int blkif_open(struct block_device *bdev, fmode_t mode); #if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) extern int blkif_release(struct gendisk *disk, fmode_t mode); #else extern void blkif_release(struct gendisk *disk, fmode_t mode); #endif extern int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument); #endif extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (struct request_queue *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, unsigned int vdisk_info, unsigned int sector_size, unsigned int physical_sector_size, struct blkfront_info *); void xlvbd_del(struct blkfront_info *info); void xlvbd_flush(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif void xlbd_release_major_info(void); /* Virtual cdrom block-device */ #ifdef CONFIG_XEN extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #else //static inline void register_vcd(struct blkfront_info *info) {} //static inline void unregister_vcd(struct blkfront_info *info) {} #endif extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/3.12.49/vbd.c000066400000000000000000000400301314037446600272760ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) #define XENVBD_MAJOR 202 #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; struct xlbd_minor_state *minors; int do_register; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; minors = kmalloc(sizeof(*minors), GFP_KERNEL); if (minors == NULL) { kfree(ptr); return NULL; } minors->bitmap = kzalloc(BITS_TO_LONGS(256) * sizeof(*minors->bitmap), GFP_KERNEL); if (minors->bitmap == NULL) { kfree(minors); kfree(ptr); return NULL; } spin_lock_init(&minors->lock); minors->nr = 256; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SD_RANGE: ptr->type = &xlbd_sd_type; ptr->index = index - XLBD_MAJOR_SD_START; break; case XLBD_MAJOR_SR_RANGE: ptr->type = &xlbd_sr_type; ptr->index = index - XLBD_MAJOR_SR_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major XENVBD_MAJOR, * don't try to register it again */ if (major_info[XLBD_MAJOR_VBD_ALT(index)] != NULL) { kfree(minors->bitmap); kfree(minors); minors = major_info[XLBD_MAJOR_VBD_ALT(index)]->minors; do_register = 0; } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(minors->bitmap); kfree(minors); kfree(ptr); return NULL; } pr_info("xen-vbd: registered block device major %i\n", ptr->major); } ptr->minors = minors; major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = XLBD_MAJOR_SD_START; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = XLBD_MAJOR_SD_START + 1 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = XLBD_MAJOR_SD_START + 8 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = XLBD_MAJOR_SR_START; break; case XENVBD_MAJOR: index = XLBD_MAJOR_VBD_START + !!VDEV_IS_EXTENDED(vdevice); break; default: return NULL; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } void __exit xlbd_release_major_info(void) { unsigned int i; int vbd_done = 0; for (i = 0; i < ARRAY_SIZE(major_info); ++i) { struct xlbd_major_info *mi = major_info[i]; if (!mi) continue; if (mi->usage) pr_warning("vbd: major %u still in use (%u times)\n", mi->major, mi->usage); if (mi->major != XENVBD_MAJOR || !vbd_done) { unregister_blkdev(mi->major, mi->type->devname); kfree(mi->minors->bitmap); kfree(mi->minors); } if (mi->major == XENVBD_MAJOR) vbd_done = 1; kfree(mi); } } static int xlbd_reserve_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; unsigned int end = minor + nr_minors; int rc; if (end > ms->nr) { unsigned long *bitmap, *old; bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap), GFP_KERNEL); if (bitmap == NULL) return -ENOMEM; spin_lock(&ms->lock); if (end > ms->nr) { old = ms->bitmap; memcpy(bitmap, ms->bitmap, BITS_TO_LONGS(ms->nr) * sizeof(*bitmap)); ms->bitmap = bitmap; ms->nr = BITS_TO_LONGS(end) * BITS_PER_LONG; } else old = bitmap; spin_unlock(&ms->lock); kfree(old); } spin_lock(&ms->lock); if (find_next_bit(ms->bitmap, end, minor) >= end) { bitmap_set(ms->bitmap, minor, nr_minors); rc = 0; } else rc = -EBUSY; spin_unlock(&ms->lock); return rc; } static void xlbd_release_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; BUG_ON(minor + nr_minors > ms->nr); spin_lock(&ms->lock); bitmap_clear(ms->bitmap, minor, nr_minors); spin_unlock(&ms->lock); } static char *encode_disk_name(char *ptr, unsigned int n) { if (n >= 26) ptr = encode_disk_name(ptr, n / 26 - 1); *ptr = 'a' + n % 26; return ptr + 1; } static int xlvbd_init_blk_queue(struct gendisk *gd, unsigned int sector_size, unsigned int physical_sector_size, struct blkfront_info *info) { struct request_queue *rq; rq = blk_init_queue(do_blkif_request, &info->io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); #endif if (info->feature_discard) { queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); blk_queue_max_discard_sectors(rq, get_capacity(gd)); rq->limits.discard_granularity = info->discard_granularity; rq->limits.discard_alignment = info->discard_alignment; if (info->feature_secdiscard) queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq); } /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_logical_block_size(rq, sector_size); blk_queue_physical_block_size(rq, physical_sector_size); blk_queue_max_hw_sectors(rq, info->max_segs_per_req << (PAGE_SHIFT - 9)); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_segments(rq, info->max_segs_per_req); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; info->rq = rq; return 0; } static inline bool xlvbd_ide_unplugged(int major) { #ifndef CONFIG_XEN /* * For HVM guests: * - pv driver required if booted with xen_emul_unplug=ide-disks|all * - native driver required with xen_emul_unplug=nics|never */ if (xen_pvonhvm_unplugged_disks) return true; switch (major) { case IDE0_MAJOR: case IDE1_MAJOR: case IDE2_MAJOR: case IDE3_MAJOR: case IDE4_MAJOR: case IDE5_MAJOR: case IDE6_MAJOR: case IDE7_MAJOR: case IDE8_MAJOR: case IDE9_MAJOR: return false; default: break; } #endif return true; } int xlvbd_add(blkif_sector_t capacity, int vdevice, unsigned int vdisk_info, unsigned int sector_size, unsigned int physical_sector_size, struct blkfront_info *info) { int major, minor; struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; char *ptr; unsigned int offset; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ pr_warning("blkfront: vdevice %#x is above the extended range;" " ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = XENVBD_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); if (minor >> MINORBITS) { pr_warning("blkfront: %#x's minor (%#x) out of range;" " ignoring\n", vdevice, minor); return -ENODEV; } } if (!xlvbd_ide_unplugged(major)) { pr_warning("blkfront: skipping IDE major on %x, native driver required\n", vdevice); return -ENODEV; } BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if ((vdisk_info & VDISK_CDROM) || !(minor & ((1 << mi->type->partn_shift) - 1))) nr_minors = 1 << mi->type->partn_shift; err = xlbd_reserve_minors(mi, minor & ~(nr_minors - 1), nr_minors); if (err) goto out; err = -ENODEV; gd = alloc_disk(vdisk_info & VDISK_CDROM ? 1 : nr_minors); if (gd == NULL) goto release; strcpy(gd->disk_name, mi->type->diskname); ptr = gd->disk_name + strlen(mi->type->diskname); offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (mi->type->partn_shift) { ptr = encode_disk_name(ptr, offset); offset = minor & ((1 << mi->type->partn_shift) - 1); } else gd->flags |= GENHD_FL_CD; BUG_ON(ptr >= gd->disk_name + ARRAY_SIZE(gd->disk_name)); if (nr_minors > 1) *ptr = 0; else snprintf(ptr, gd->disk_name + ARRAY_SIZE(gd->disk_name) - ptr, "%u", offset); gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size, info)) { del_gendisk(gd); goto release; } info->gd = gd; xlvbd_flush(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; release: xlbd_release_minors(mi, minor, nr_minors); out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } void xlvbd_del(struct blkfront_info *info) { unsigned int minor, nr_minors; if (info->mi == NULL) return; BUG_ON(info->gd == NULL); minor = info->gd->first_minor; nr_minors = (info->gd->flags & GENHD_FL_CD) || !(minor & ((1 << info->mi->type->partn_shift) - 1)) ? 1 << info->mi->type->partn_shift : 1; del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_release_minors(info->mi, minor & ~(nr_minors - 1), nr_minors); xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); blk_cleanup_queue(info->rq); info->rq = NULL; } void xlvbd_flush(struct blkfront_info *info) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) blk_queue_flush(info->rq, info->feature_flush); pr_info("blkfront: %s: %s: %s\n", info->gd->disk_name, info->flush_op == BLKIF_OP_WRITE_BARRIER ? "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? "flush diskcache" : "barrier or flush"), info->feature_flush ? "enabled" : "disabled"); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int err; const char *barrier; switch (info->feature_flush) { case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; case QUEUE_ORDERED_NONE: barrier = "disabled"; break; default: return -EINVAL; } err = blk_queue_ordered(info->rq, info->feature_flush); if (err) return err; pr_info("blkfront: %s: barriers %s\n", info->gd->disk_name, barrier); #else if (info->feature_flush) pr_info("blkfront: %s: barriers disabled\n", info->gd->disk_name); #endif } #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = dev_get_drvdata(&xendev->dev); if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/3.12.49/vcd.c000066400000000000000000000323011314037446600273010ustar00rootroot00000000000000/******************************************************************************* * vcd.c * * Implements CDROM cmd packet passing between frontend guest and backend driver. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include "block.h" /* List of cdrom_device_info, can have as many as blkfront supports */ struct vcd_disk { struct list_head vcd_entry; struct cdrom_device_info vcd_cdrom_info; spinlock_t vcd_cdrom_info_lock; }; static LIST_HEAD(vcd_disks); static DEFINE_SPINLOCK(vcd_disks_lock); static struct vcd_disk *xencdrom_get_list_entry(struct gendisk *disk) { struct vcd_disk *ret_vcd = NULL; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { spin_lock(&vcd->vcd_cdrom_info_lock); ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd; } static void submit_message(struct blkfront_info *info, void *sp) { struct request *req = NULL; req = blk_get_request(info->rq, READ, __GFP_WAIT); if (blk_rq_map_kern(info->rq, req, sp, PAGE_SIZE, __GFP_WAIT)) goto out; req->rq_disk = info->gd; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_NOMERGE; #else req->flags |= REQ_BLOCK_PC; #endif req->__sector = 0; req->cmd_len = 0; req->timeout = 60*HZ; blk_execute_rq(req->q, info->gd, req, 1); out: blk_put_request(req); } static int submit_cdrom_cmd(struct blkfront_info *info, struct packet_command *cgc) { int ret = 0; struct page *page; union xen_block_packet *sp; struct xen_cdrom_packet *xcp; struct vcd_generic_command *vgc; if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) { pr_warn("%s() Packet buffer length is to large \n", __func__); return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcp = &(sp->xcp); xcp->type = XEN_TYPE_CDROM_PACKET; xcp->payload_offset = PACKET_PAYLOAD_OFFSET; vgc = (struct vcd_generic_command *)((char *)sp + xcp->payload_offset); memcpy(vgc->cmd, cgc->cmd, CDROM_PACKET_SIZE); vgc->stat = cgc->stat; vgc->data_direction = cgc->data_direction; vgc->quiet = cgc->quiet; vgc->timeout = cgc->timeout; if (cgc->sense) { vgc->sense_offset = PACKET_SENSE_OFFSET; memcpy((char *)sp + vgc->sense_offset, cgc->sense, sizeof(struct request_sense)); } if (cgc->buffer) { vgc->buffer_offset = PACKET_BUFFER_OFFSET; memcpy((char *)sp + vgc->buffer_offset, cgc->buffer, cgc->buflen); vgc->buflen = cgc->buflen; } submit_message(info,sp); if (xcp->ret) ret = xcp->err; if (cgc->sense) memcpy(cgc->sense, (char *)sp + PACKET_SENSE_OFFSET, sizeof(struct request_sense)); if (cgc->buffer && cgc->buflen) memcpy(cgc->buffer, (char *)sp + PACKET_BUFFER_OFFSET, cgc->buflen); __free_page(page); return ret; } static int xencdrom_open(struct cdrom_device_info *cdi, int purpose) { int ret = 0; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_open *xco; info = cdi->disk->private_data; if (!info->xbdev) return -ENODEV; if (strlen(info->xbdev->otherend) > MAX_PACKET_DATA) { return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xco = &(sp->xco); xco->type = XEN_TYPE_CDROM_OPEN; xco->payload_offset = sizeof(struct xen_cdrom_open); strcpy((char *)sp + xco->payload_offset, info->xbdev->otherend); submit_message(info,sp); if (xco->ret) { ret = xco->err; goto out; } if (xco->media_present) set_capacity(cdi->disk, xco->sectors); out: __free_page(page); return ret; } static void xencdrom_release(struct cdrom_device_info *cdi) { } static int xencdrom_media_changed(struct cdrom_device_info *cdi, int disc_nr) { int ret; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_media_changed *xcmc; info = cdi->disk->private_data; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcmc = &(sp->xcmc); xcmc->type = XEN_TYPE_CDROM_MEDIA_CHANGED; submit_message(info,sp); ret = xcmc->media_changed; __free_page(page); return ret; } static int xencdrom_tray_move(struct cdrom_device_info *cdi, int position) { struct packet_command cgc; struct blkfront_info *info; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_START_STOP_UNIT; if (position) cgc.cmd[4] = 2; else cgc.cmd[4] = 3; return submit_cdrom_cmd(info, &cgc); } static int xencdrom_lock_door(struct cdrom_device_info *cdi, int lock) { struct blkfront_info *info; struct packet_command cgc; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; cgc.cmd[4] = lock; return submit_cdrom_cmd(info, &cgc); } static int xencdrom_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { return cgc->stat = submit_cdrom_cmd(cdi->disk->private_data, cgc); } static int xencdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { return -EINVAL; } /* Query backend to see if CDROM packets are supported */ static int xencdrom_supported(struct blkfront_info *info) { struct page *page; union xen_block_packet *sp; struct xen_cdrom_support *xcs; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcs = &(sp->xcs); xcs->type = XEN_TYPE_CDROM_SUPPORT; submit_message(info,sp); return xcs->supported; } static struct cdrom_device_ops xencdrom_dops = { .open = xencdrom_open, .release = xencdrom_release, .media_changed = xencdrom_media_changed, .tray_move = xencdrom_tray_move, .lock_door = xencdrom_lock_door, .generic_packet = xencdrom_packet, .audio_ioctl = xencdrom_audio_ioctl, .capability = (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | \ CDC_MEDIA_CHANGED | CDC_GENERIC_PACKET | CDC_DVD | \ CDC_CD_R), .n_minors = 1, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_open(struct inode *inode, struct file *file) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!info->xbdev) return -ENODEV; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_open(&vcd->vcd_cdrom_info, inode, file); #else ret = cdrom_open(&vcd->vcd_cdrom_info, bd, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_release(struct inode *inode, struct file *file) { struct gendisk *gd = inode->i_bdev->bd_disk; #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) static int xencdrom_block_release(struct gendisk *gd, fmode_t mode) { #else static void xencdrom_block_release(struct gendisk *gd, fmode_t mode) { #endif struct blkfront_info *info = gd->private_data; struct vcd_disk *vcd; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int ret = 0; #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) # define ret 0 #else # define ret #endif if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_release(&vcd->vcd_cdrom_info, file); #else cdrom_release(&vcd->vcd_cdrom_info, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); if (vcd->vcd_cdrom_info.use_count == 0) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) blkif_release(inode, file); #else blkif_release(gd, mode); #endif } } return ret; #undef ret } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_ioctl(struct block_device *bd, fmode_t mode, unsigned cmd, unsigned long arg) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!(vcd = xencdrom_get_list_entry(info->gd))) goto out; switch (cmd) { case 2285: /* SG_IO */ ret = -ENOSYS; break; case CDROMEJECT: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 1); break; case CDROMCLOSETRAY: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 0); break; case CDROM_GET_CAPABILITY: ret = vcd->vcd_cdrom_info.ops->capability & ~vcd->vcd_cdrom_info.mask; break; case CDROM_SET_OPTIONS: ret = vcd->vcd_cdrom_info.options; break; case CDROM_SEND_PACKET: { struct packet_command cgc; ret = copy_from_user(&cgc, (void __user *)arg, sizeof(cgc)) ? -EFAULT : submit_cdrom_cmd(info, &cgc); break; } default: spin_unlock(&vcd->vcd_cdrom_info_lock); out: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return blkif_ioctl(inode, file, cmd, arg); #else return blkif_ioctl(bd, mode, cmd, arg); #endif } spin_unlock(&vcd->vcd_cdrom_info_lock); return ret; } /* Called as result of cdrom_open, vcd_cdrom_info_lock already held */ static int xencdrom_block_media_changed(struct gendisk *disk) { struct vcd_disk *vcd; struct vcd_disk *ret_vcd = NULL; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd ? cdrom_media_changed(&ret_vcd->vcd_cdrom_info) : 0; } static const struct block_device_operations xencdrom_bdops = { .owner = THIS_MODULE, .open = xencdrom_block_open, .release = xencdrom_block_release, .ioctl = xencdrom_block_ioctl, .media_changed = xencdrom_block_media_changed, }; void register_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; /* Make sure this is for a CD device */ if (!(gd->flags & GENHD_FL_CD)) goto out; /* Make sure we have backend support */ if (!xencdrom_supported(info)) goto out; /* Create new vcd_disk and fill in cdrom_info */ vcd = kzalloc(sizeof(*vcd), GFP_KERNEL); if (!vcd) { pr_info("%s(): Unable to allocate vcd struct!\n", __func__); goto out; } spin_lock_init(&vcd->vcd_cdrom_info_lock); vcd->vcd_cdrom_info.ops = &xencdrom_dops; vcd->vcd_cdrom_info.speed = 4; vcd->vcd_cdrom_info.capacity = 1; vcd->vcd_cdrom_info.options = 0; strlcpy(vcd->vcd_cdrom_info.name, gd->disk_name, ARRAY_SIZE(vcd->vcd_cdrom_info.name)); vcd->vcd_cdrom_info.mask = (CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_SELECT_SPEED | CDC_MRW | CDC_MRW_W | CDC_RAM); if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) { pr_warn("%s() Cannot register blkdev as a cdrom %d!\n", __func__, gd->major); goto err_out; } gd->fops = &xencdrom_bdops; vcd->vcd_cdrom_info.disk = gd; spin_lock(&vcd_disks_lock); list_add(&(vcd->vcd_entry), &vcd_disks); spin_unlock(&vcd_disks_lock); out: return; err_out: kfree(vcd); } void unregister_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == gd) { spin_lock(&vcd->vcd_cdrom_info_lock); unregister_cdrom(&vcd->vcd_cdrom_info); list_del(&vcd->vcd_entry); spin_unlock(&vcd->vcd_cdrom_info_lock); kfree(vcd); break; } } spin_unlock(&vcd_disks_lock); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/Kbuild000066400000000000000000000001231314037446600266750ustar00rootroot00000000000000include $(M)/config.mk obj-m += xen-vbd.o xen-vbd-objs := blkfront.o vbd.o vcd.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/Makefile000066400000000000000000000000661314037446600272060ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/blkfront.c000066400000000000000000001277161314037446600275470ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct blk_resume_entry { struct list_head list; struct blk_shadow copy; struct blkif_request_segment *indirect_segs; }; #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 static void connect(struct blkfront_info *); static void blkfront_closing(struct blkfront_info *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *, unsigned int old_ring_size); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(struct work_struct *arg); static int blkif_recover(struct blkfront_info *, unsigned int old_ring_size, unsigned int new_ring_size); static bool blkif_completion(struct blkfront_info *, unsigned long id, int status); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); /* Maximum number of indirect segments advertised to the front end. */ static unsigned int max_segs_per_req = BITS_PER_LONG; module_param_named(max_indirect_segments, max_segs_per_req, uint, 0644); MODULE_PARM_DESC(max_indirect_segments, "maximum number of indirect segments"); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice; struct blkfront_info *info; enum xenbus_state backend_state; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); pr_notice("blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } spin_lock_init(&info->io_lock); mutex_init(&info->mutex); info->xbdev = dev; info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); INIT_LIST_HEAD(&info->resume_list); INIT_LIST_HEAD(&info->resume_split); /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); backend_state = xenbus_read_driver_state(dev->otherend); /* * XenbusStateInitWait would be the correct state to enter here, * but (at least) blkback considers this a fatal error. */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_backend(dev, info); if (err) { kfree(info); dev_set_drvdata(&dev->dev, NULL); return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); enum xenbus_state backend_state; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); backend_state = xenbus_read_driver_state(dev->otherend); /* See respective comment in blkfront_probe(). */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; return talk_to_backend(dev, info); } static void shadow_init(struct blk_shadow *shadow, unsigned int ring_size) { unsigned int i = 0; WARN_ON(!ring_size); while (++i < ring_size) shadow[i - 1].req.id = i; shadow[i - 1].req.id = 0x0fffffff; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { unsigned int ring_size, ring_order, max_segs; unsigned int old_ring_size = RING_SIZE(&info->ring); const char *what = NULL; struct xenbus_transaction xbt; int err; if (dev->state >= XenbusStateInitialised) return 0; err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-pages", "%u", &ring_size); if (err != 1) ring_size = 0; else if (!ring_size) pr_warn("blkfront: %s: zero max-ring-pages\n", dev->nodename); err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &ring_order); if (err != 1) ring_order = ring_size ? ilog2(ring_size) : 0; else if (!ring_size) /* nothing */; else if ((ring_size - 1) >> ring_order) pr_warn("blkfront: %s: max-ring-pages (%#x) inconsistent with" " max-ring-page-order (%u)\n", dev->nodename, ring_size, ring_order); else ring_order = ilog2(ring_size); if (ring_order > BLK_MAX_RING_PAGE_ORDER) ring_order = BLK_MAX_RING_PAGE_ORDER; /* * While for larger rings not all pages are actually used, be on the * safe side and set up a full power of two to please as many backends * as possible. */ info->ring_size = ring_size = 1U << ring_order; err = xenbus_scanf(XBT_NIL, dev->otherend, "feature-max-indirect-segments", "%u", &max_segs); if (max_segs > max_segs_per_req) max_segs = max_segs_per_req; if (err != 1 || max_segs < BLKIF_MAX_SEGMENTS_PER_REQUEST) max_segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; else if (BLKIF_INDIRECT_PAGES(max_segs) > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST) max_segs = BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST * BLKIF_SEGS_PER_INDIRECT_FRAME; info->max_segs_per_req = max_segs; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info, old_ring_size); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } if (ring_size == 1) { what = "ring-ref"; err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[0]); if (err) goto abort_transaction; } else { unsigned int i; char buf[16]; what = "ring-page-order"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_order); if (err) goto abort_transaction; what = "num-ring-pages"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_size); if (err) goto abort_transaction; what = buf; for (i = 0; i < ring_size; i++) { snprintf(buf, sizeof(buf), "ring-ref%u", i); err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[i]); if (err) goto abort_transaction; } } what = "event-channel"; err = xenbus_printf(xbt, dev->nodename, what, "%u", irq_to_evtchn_port(info->irq)); if (err) goto abort_transaction; what = "protocol"; err = xenbus_write(xbt, dev->nodename, what, XEN_IO_PROTO_ABI_NATIVE); if (err) goto abort_transaction; err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); ring_size = RING_SIZE(&info->ring); switch (info->connected) { case BLKIF_STATE_DISCONNECTED: shadow_init(info->shadow, ring_size); break; case BLKIF_STATE_SUSPENDED: err = blkif_recover(info, old_ring_size, ring_size); if (err) goto out; break; } pr_info("blkfront: %s: ring-pages=%u nr-ents=%u segs-per-req=%u\n", dev->nodename, info->ring_size, ring_size, max_segs); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (what) xenbus_dev_fatal(dev, err, "writing %s", what); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info, unsigned int old_ring_size) { blkif_sring_t *sring; int err; unsigned int i, nr, ring_size; for (nr = 0; nr < info->ring_size; nr++) { info->ring_refs[nr] = GRANT_INVALID_REF; info->ring_pages[nr] = alloc_page(GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM); if (!info->ring_pages[nr]) break; } sring = nr == info->ring_size ? vmap(info->ring_pages, nr, VM_MAP, PAGE_KERNEL) : NULL; if (!sring) { while (nr--) __free_page(info->ring_pages[nr]); xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, nr * PAGE_SIZE); info->sg = kcalloc(info->max_segs_per_req, sizeof(*info->sg), GFP_KERNEL); if (!info->sg) { err = -ENOMEM; goto fail; } sg_init_table(info->sg, info->max_segs_per_req); err = -ENOMEM; ring_size = RING_SIZE(&info->ring); if (info->max_segs_per_req > BLKIF_MAX_SEGMENTS_PER_REQUEST) { if (!info->indirect_segs) old_ring_size = 0; if (old_ring_size < ring_size) { struct blkif_request_segment **segs; segs = krealloc(info->indirect_segs, ring_size * sizeof(*segs), GFP_KERNEL|__GFP_ZERO); if (!segs) goto fail; info->indirect_segs = segs; } for (i = old_ring_size; i < ring_size; ++i) { info->indirect_segs[i] = vzalloc(BLKIF_INDIRECT_PAGES(info->max_segs_per_req) * PAGE_SIZE); if (!info->indirect_segs[i]) goto fail; } } for (i = 0; i < ring_size; ++i) { unsigned long *frame; if (info->shadow[i].frame && ksize(info->shadow[i].frame) / sizeof(*frame) >= info->max_segs_per_req) continue; frame = krealloc(info->shadow[i].frame, info->max_segs_per_req * sizeof(*frame), GFP_KERNEL); if (!frame) goto fail; if (!info->shadow[i].frame) memset(frame, ~0, info->max_segs_per_req * sizeof(*frame)); info->shadow[i].frame = frame; } err = xenbus_multi_grant_ring(dev, nr, info->ring_pages, info->ring_refs); if (err < 0) goto fail; err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, 0, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (talk_to_backend(dev, info)) { dev_set_drvdata(&dev->dev, NULL); kfree(info); } break; case XenbusStateConnected: connect(info); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's Closing state -- fallthrough */ case XenbusStateClosing: mutex_lock(&info->mutex); if (dev->state == XenbusStateClosing) { mutex_unlock(&info->mutex); break; } bd = info->gd ? bdget_disk(info->gd, 0) : NULL; mutex_unlock(&info->mutex); if (bd == NULL) { xenbus_frontend_closed(dev); break; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (bd->bd_openers) { xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); xenbus_switch_state(dev, XenbusStateClosing); } else blkfront_closing(info); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); break; } } /* ** Connection ** */ static void blkfront_setup_discard(struct blkfront_info *info) { unsigned int discard_granularity; unsigned int discard_alignment; int discard_secure; info->feature_discard = 1; if (!xenbus_gather(XBT_NIL, info->xbdev->otherend, "discard-granularity", "%u", &discard_granularity, "discard-alignment", "%u", &discard_alignment, NULL)) { info->discard_granularity = discard_granularity; info->discard_alignment = discard_alignment; } if (xenbus_scanf(XBT_NIL, info->xbdev->otherend, "discard-secure", "%d", &discard_secure) != 1) discard_secure = 0; info->feature_secdiscard = !!discard_secure; } /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned int binfo, sector_size, physical_sector_size; int err, barrier, flush, discard; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (err != 1) return; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sector-size", "%u", §or_size); if (err != 1) sector_size = 0; if (sector_size) blk_queue_logical_block_size(info->gd->queue, sector_size); pr_info("Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%u", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } /* * physcial-sector-size is a newer field, so old backends may not * provide this. Assume physical sector size to be the same as * sector_size in that case. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "physical-sector-size", "%u", &physical_sector_size); if (err <= 0) physical_sector_size = sector_size; info->feature_flush = 0; info->flush_op = 0; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%d", &barrier); /* * If there's no "feature-barrier" defined, then it means * we're dealing with a very old backend which writes * synchronously; nothing to do. * * If there are barriers, then we use flush. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) if (err > 0 && barrier) { info->feature_flush = REQ_FLUSH | REQ_FUA; info->flush_op = BLKIF_OP_WRITE_BARRIER; } /* * And if there is "feature-flush-cache" use that above * barriers. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-flush-cache", "%d", &flush); if (err > 0 && flush) { info->feature_flush = REQ_FLUSH; info->flush_op = BLKIF_OP_FLUSH_DISKCACHE; } #else if (err <= 0) info->feature_flush = QUEUE_ORDERED_DRAIN; else if (barrier) info->feature_flush = QUEUE_ORDERED_TAG; else info->feature_flush = QUEUE_ORDERED_NONE; #endif err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-discard", "%d", &discard); if (err > 0 && discard) blkfront_setup_discard(info); err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, physical_sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&info->io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); add_disk(info->gd); info->is_ready = 1; register_vcd(info); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct blkfront_info *info) { unsigned long flags; DPRINTK("blkfront_closing: %d removed\n", info->vdevice); if (info->rq == NULL) goto out; spin_lock_irqsave(&info->io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&info->io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work(&info->work); xlvbd_sysfs_delif(info); unregister_vcd(info); xlvbd_del(info); out: if (info->xbdev) xenbus_frontend_closed(info->xbdev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); struct block_device *bd; struct gendisk *disk; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); mutex_lock(&info->mutex); disk = info->gd; bd = disk ? bdget_disk(disk, 0) : NULL; info->xbdev = NULL; mutex_unlock(&info->mutex); if (!bd) { kfree(info); return 0; } /* * The xbdev was removed before we reached the Closed * state. See if it's safe to remove the disk. If the bdev * isn't closed yet, we let release take care of it. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif info = disk->private_data; dev_warn(disk_to_dev(disk), "%s was hot-unplugged, %d stale handles\n", dev->nodename, bd->bd_openers); if (info && !bd->bd_openers) { blkfront_closing(info); disk->private_data = NULL; kfree(info); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= RING_SIZE(&info->ring)); info->shadow_free = info->shadow[free].req.id; info->shadow[free].req.id = 0x0fffffee; /* debug */ return free; } static inline int ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { if (info->shadow[id].req.id != id) return -EINVAL; if (!info->shadow[id].request) return -ENXIO; info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = NULL; info->shadow_free = id; return 0; } static const char *op_name(unsigned int op) { static const char *const names[] = { [BLKIF_OP_READ] = "read", [BLKIF_OP_WRITE] = "write", [BLKIF_OP_WRITE_BARRIER] = "barrier", [BLKIF_OP_FLUSH_DISKCACHE] = "flush", [BLKIF_OP_PACKET] = "packet", [BLKIF_OP_DISCARD] = "discard", [BLKIF_OP_INDIRECT] = "indirect", }; if (op >= ARRAY_SIZE(names)) return "unknown"; return names[op] ?: "reserved"; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void split_request(struct blkif_request *req, struct blk_shadow *copy, const struct blkfront_info *info, const struct blkif_request_segment *segs) { unsigned int i; req->operation = copy->ind.indirect_op; req->nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; req->handle = copy->ind.handle; for (i = 0; i < BLKIF_MAX_SEGMENTS_PER_REQUEST; ++i) { req->seg[i] = segs[i]; gnttab_grant_foreign_access_ref(segs[i].gref, info->xbdev->otherend_id, pfn_to_mfn(copy->frame[i]), rq_data_dir(copy->request) ? GTF_readonly : 0); info->shadow[req->id].frame[i] = copy->frame[i]; } copy->ind.id = req->id; } static void kick_pending_request_queues(struct blkfront_info *info) { bool queued = false; /* Recover stage 3: Re-queue pending requests. */ while (!list_empty(&info->resume_list) && !RING_FULL(&info->ring)) { /* Grab a request slot and copy shadow state into it. */ struct blk_resume_entry *ent = list_first_entry(&info->resume_list, struct blk_resume_entry, list); blkif_request_t *req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); unsigned long *frame; unsigned int i; *req = ent->copy.req; /* We get a new request id, and must reset the shadow state. */ req->id = GET_ID_FROM_FREELIST(info); frame = info->shadow[req->id].frame; info->shadow[req->id] = ent->copy; info->shadow[req->id].req.id = req->id; info->shadow[req->id].frame = frame; /* Rewrite any grant references invalidated by susp/resume. */ if (req->operation == BLKIF_OP_INDIRECT) { const blkif_request_indirect_t *ind = (void *)req; struct blkif_request_segment *segs = info->indirect_segs[req->id]; for (i = RING_SIZE(&info->ring); segs && i--; ) if (!info->indirect_segs[i]) { info->indirect_segs[i] = segs; segs = NULL; } if (segs) vfree(segs); segs = ent->indirect_segs; info->indirect_segs[req->id] = segs; if (ind->nr_segments > info->max_segs_per_req) { split_request(req, &ent->copy, info, segs); info->ring.req_prod_pvt++; queued = true; list_move_tail(&ent->list, &info->resume_split); continue; } for (i = 0; i < BLKIF_INDIRECT_PAGES(ind->nr_segments); ++i) { void *va = (void *)segs + i * PAGE_SIZE; struct page *pg = vmalloc_to_page(va); gnttab_grant_foreign_access_ref( ind->indirect_grefs[i], info->xbdev->otherend_id, page_to_phys(pg) >> PAGE_SHIFT, GTF_readonly); } for (i = 0; i < ind->nr_segments; ++i) { gnttab_grant_foreign_access_ref(segs[i].gref, info->xbdev->otherend_id, pfn_to_mfn(ent->copy.frame[i]), rq_data_dir(ent->copy.request) ? GTF_readonly : 0); frame[i] = ent->copy.frame[i]; } } else for (i = 0; i < req->nr_segments; ++i) { gnttab_grant_foreign_access_ref(req->seg[i].gref, info->xbdev->otherend_id, pfn_to_mfn(ent->copy.frame[i]), rq_data_dir(ent->copy.request) ? GTF_readonly : 0); frame[i] = ent->copy.frame[i]; } info->ring.req_prod_pvt++; queued = true; __list_del_entry(&ent->list); kfree(ent); } /* Send off requeued requests */ if (queued) flush_requests(info); if (list_empty(&info->resume_split) && !RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(struct work_struct *arg) { struct blkfront_info *info = container_of(arg, struct blkfront_info, work); spin_lock_irq(&info->io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_open(struct inode *inode, struct file *filep) { struct block_device *bd = inode->i_bdev; #else int blkif_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int err = 0; if (!info) /* xbdev gone */ err = -ERESTARTSYS; else { mutex_lock(&info->mutex); if (!info->gd) /* xbdev is closed */ err = -ERESTARTSYS; mutex_unlock(&info->mutex); } return err; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_release(struct inode *inode, struct file *filep) { struct gendisk *disk = inode->i_bdev->bd_disk; #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) int blkif_release(struct gendisk *disk, fmode_t mode) { #else void blkif_release(struct gendisk *disk, fmode_t mode) { #define return(n) return #endif struct blkfront_info *info = disk->private_data; struct xenbus_device *xbdev; struct block_device *bd = bdget_disk(disk, 0); bdput(bd); if (bd->bd_openers) return(0); /* * Check if we have been instructed to close. We will have * deferred this request, because the bdev was still open. */ mutex_lock(&info->mutex); xbdev = info->xbdev; if (xbdev && xbdev->state == XenbusStateClosing) { /* pending switch to state closed */ dev_info(disk_to_dev(disk), "releasing disk\n"); blkfront_closing(info); } mutex_unlock(&info->mutex); if (!xbdev) { /* sudden device removal */ dev_info(disk_to_dev(disk), "releasing disk\n"); blkfront_closing(info); disk->private_data = NULL; kfree(info); } return(0); #undef return } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { struct block_device *bd = inode->i_bdev; #else int blkif_ioctl(struct block_device *bd, fmode_t mode, unsigned command, unsigned long argument) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: if (info->gd && (info->gd->flags & GENHD_FL_CD)) return 0; return -EINVAL; default: if (info->mi && info->gd && info->rq) { switch (info->mi->major) { case SCSI_DISK0_MAJOR: case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: case SCSI_CDROM_MAJOR: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) return scsi_cmd_ioctl(filep, info->gd, command, (void __user *)argument); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return scsi_cmd_ioctl(filep, info->rq, info->gd, command, (void __user *)argument); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) return scsi_cmd_ioctl(info->rq, info->gd, mode, command, (void __user *)argument); #else return scsi_cmd_blk_ioctl(bd, mode, command, (void __user *)argument); #endif } } return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * Generate a Xen blkfront IO request from a blk layer request. Reads * and writes are handled as expected. * * @req: a request struct */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; unsigned long buffer_mfn; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect, nr_segs; int i, ref; grant_ref_t gref_head; struct scatterlist *sg; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; nr_segs = info->max_segs_per_req; if (nr_segs > BLKIF_MAX_SEGMENTS_PER_REQUEST) nr_segs += BLKIF_INDIRECT_PAGES(nr_segs); if (gnttab_alloc_grant_references(nr_segs, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, nr_segs); return 1; } /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) #else if (req->cmd_flags & REQ_HARDBARRIER) #endif ring_req->operation = info->flush_op; if (req->cmd_type == REQ_TYPE_BLOCK_PC) ring_req->operation = BLKIF_OP_PACKET; if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { struct blkif_request_discard *discard = (void *)ring_req; /* id, sector_number and handle are set above. */ discard->operation = BLKIF_OP_DISCARD; discard->flag = 0; discard->handle = info->handle; discard->nr_sectors = blk_rq_sectors(req); if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) discard->flag = BLKIF_DISCARD_SECURE; } else { struct blkif_request_segment *segs; nr_segs = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(nr_segs > info->max_segs_per_req); if (nr_segs <= BLKIF_MAX_SEGMENTS_PER_REQUEST) { ring_req->nr_segments = nr_segs; ring_req->handle = info->handle; segs = ring_req->seg; } else { struct blkif_request_indirect *ind = (void *)ring_req; ind->indirect_op = ring_req->operation; ind->operation = BLKIF_OP_INDIRECT; ind->nr_segments = nr_segs; ind->handle = info->handle; segs = info->indirect_segs[id]; for (i = 0; i < BLKIF_INDIRECT_PAGES(nr_segs); ++i) { void *va = (void *)segs + i * PAGE_SIZE; struct page *pg = vmalloc_to_page(va); buffer_mfn = page_to_phys(pg) >> PAGE_SHIFT; ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, GTF_readonly); ind->indirect_grefs[i] = ref; } } for_each_sg(info->sg, sg, nr_segs, i) { buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; /* install a grant reference. */ ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ? GTF_readonly : 0 ); info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); segs[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } } info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = blk_peek_request(rq)) != NULL) { info = req->rq_disk->private_data; if (RING_FULL(&info->ring)) goto wait; blk_start_request(req); if ((req->cmd_type != REQ_TYPE_FS && (req->cmd_type != REQ_TYPE_BLOCK_PC || req->cmd_len)) || ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) && !info->flush_op)) { req->errors = (DID_ERROR << 16) | (DRIVER_INVALID << 24); __blk_end_request_all(req, -EIO); continue; } #ifndef OPENSUSE_1302 DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%u) buffer:%p [%s]\n", req, req->cmd, (long long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), req->buffer, rq_data_dir(req) ? "write" : "read"); #else DPRINTK("do_blk_req %p: cmd %p, sec %llx, (%u/%u) [%s]\n", req, req->cmd, (long long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), rq_data_dir(req) ? "write" : "read"); #endif if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; spin_lock_irqsave(&info->io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&info->io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bool done; bret = RING_GET_RESPONSE(&info->ring, i); if (unlikely(bret->id >= RING_SIZE(&info->ring))) { /* * The backend has messed up and given us an id that * we would never have given to it (we stamp it up to * RING_SIZE() - see GET_ID_FROM_FREELIST()). */ pr_warning("%s: response to %s has incorrect id (%#Lx)\n", info->gd->disk_name, op_name(bret->operation), (unsigned long long)bret->id); continue; } id = bret->id; req = info->shadow[id].request; done = blkif_completion(info, id, bret->status); ret = ADD_ID_TO_FREELIST(info, id); if (unlikely(ret)) { pr_warning("%s: id %#lx (response to %s) couldn't be recycled (%d)!\n", info->gd->disk_name, id, op_name(bret->operation), ret); continue; } if (!done) continue; ret = bret->status == BLKIF_RSP_OKAY ? 0 : -EIO; switch (bret->operation) { const char *kind; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: kind = ""; if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) ret = -EOPNOTSUPP; if (unlikely(bret->status == BLKIF_RSP_ERROR && !(info->shadow[id].req.operation == BLKIF_OP_INDIRECT ? info->shadow[id].ind.nr_segments : info->shadow[id].req.nr_segments))) { kind = "empty "; ret = -EOPNOTSUPP; } if (unlikely(ret)) { if (ret == -EOPNOTSUPP) { pr_warn("blkfront: %s: %s%s op failed\n", info->gd->disk_name, kind, op_name(bret->operation)); ret = 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) info->feature_flush = 0; #else info->feature_flush = QUEUE_ORDERED_NONE; #endif xlvbd_flush(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_PACKET: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev %s request: %d\n", op_name(bret->operation), bret->status); __blk_end_request_all(req, ret); break; case BLKIF_OP_DISCARD: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; pr_warn("blkfront: %s: discard op failed\n", info->gd->disk_name); ret = -EOPNOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; queue_flag_clear(QUEUE_FLAG_DISCARD, rq); queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); } __blk_end_request_all(req, ret); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&info->io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&info->io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&info->io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work(&info->work); /* Free resources associated with old device channel. */ if (!suspend) { unsigned int i; if (info->indirect_segs) { for (i = 0; i < RING_SIZE(&info->ring); ++i) if (info->indirect_segs[i]) vfree(info->indirect_segs[i]); kfree(info->indirect_segs); info->indirect_segs = NULL; } for (i = 0; i < ARRAY_SIZE(info->shadow); ++i) kfree(info->shadow[i].frame); } vunmap(info->ring.sring); info->ring.sring = NULL; gnttab_multi_end_foreign_access(info->ring_size, info->ring_refs, info->ring_pages); if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; kfree(info->sg); info->sg = NULL; info->max_segs_per_req = 0; } static bool blkif_completion(struct blkfront_info *info, unsigned long id, int status) { struct blk_shadow *s = &info->shadow[id]; switch (s->req.operation) { unsigned int i, j; case BLKIF_OP_DISCARD: break; case BLKIF_OP_INDIRECT: { struct blkif_request_segment *segs = info->indirect_segs[id]; struct blk_resume_entry *tmp, *ent = NULL; list_for_each_entry(tmp, &info->resume_split, list) if (tmp->copy.ind.id == id) { ent = tmp; __list_del_entry(&ent->list); break; } if (!ent) { for (i = 0; i < s->ind.nr_segments; i++) gnttab_end_foreign_access(segs[i].gref, 0UL); gnttab_multi_end_foreign_access( BLKIF_INDIRECT_PAGES(s->ind.nr_segments), s->ind.indirect_grefs, NULL); break; } for (i = 0; i < BLKIF_MAX_SEGMENTS_PER_REQUEST; i++) gnttab_end_foreign_access(segs[i].gref, 0UL); if (status != BLKIF_RSP_OKAY) { kfree(ent); break; } for (j = 0; i < ent->copy.ind.nr_segments; ++i, ++j) segs[j] = segs[i]; ent->copy.frame += BLKIF_MAX_SEGMENTS_PER_REQUEST; ent->copy.ind.nr_segments -= BLKIF_MAX_SEGMENTS_PER_REQUEST; if (ent->copy.ind.nr_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) { ent->copy.req.operation = ent->copy.ind.indirect_op; ent->copy.req.nr_segments = ent->copy.ind.nr_segments; ent->copy.req.handle = ent->copy.ind.handle; for (i = 0; i < ent->copy.req.nr_segments; ++i) ent->copy.req.seg[i] = segs[i]; } list_add_tail(&ent->list, &info->resume_list); return false; } default: for (i = 0; i < s->req.nr_segments; i++) gnttab_end_foreign_access(s->req.seg[i].gref, 0UL); break; } return true; } static int blkif_recover(struct blkfront_info *info, unsigned int old_ring_size, unsigned int ring_size) { unsigned int i; struct blk_resume_entry *ent; LIST_HEAD(list); /* Stage 1: Make a safe copy of the shadow state. */ for (i = 0; i < old_ring_size; i++) { unsigned int nr_segs; /* Not in use? */ if (!info->shadow[i].request) continue; switch (info->shadow[i].req.operation) { default: nr_segs = info->shadow[i].req.nr_segments; break; case BLKIF_OP_INDIRECT: nr_segs = info->shadow[i].ind.nr_segments; break; case BLKIF_OP_DISCARD: nr_segs = 0; break; } ent = kmalloc(sizeof(*ent) + nr_segs * sizeof(*ent->copy.frame), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); if (!ent) break; ent->copy = info->shadow[i]; ent->copy.frame = (void *)(ent + 1); memcpy(ent->copy.frame, info->shadow[i].frame, nr_segs * sizeof(*ent->copy.frame)); if (info->indirect_segs) { ent->indirect_segs = info->indirect_segs[i]; info->indirect_segs[i] = NULL; } else ent->indirect_segs = NULL; list_add_tail(&ent->list, &list); } if (i < old_ring_size) { while (!list_empty(&list)) { ent = list_first_entry(&list, struct blk_resume_entry, list); __list_del_entry(&ent->list); kfree(ent); } return -ENOMEM; } list_splice_tail(&list, &info->resume_list); /* Stage 2: Set up free list. */ for (i = 0; i < old_ring_size; ++i) { unsigned long *frame = info->shadow[i].frame; memset(info->shadow + i, 0, sizeof(*info->shadow)); memset(frame, ~0, info->max_segs_per_req * sizeof(*frame)); info->shadow[i].frame = frame; } shadow_init(info->shadow, ring_size); info->shadow_free = info->ring.req_prod_pvt; (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&info->io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); if (info->indirect_segs) for (i = ring_size; i < old_ring_size; ++i) if (info->indirect_segs[i]) { vfree(info->indirect_segs[i]); info->indirect_segs[i] = NULL; } if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } return 0; } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); return info->is_ready && info->xbdev; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static DEFINE_XENBUS_DRIVER(blkfront, , .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, ); static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront_driver); } module_init(xlblk_init); static void __exit xlblk_exit(void) { xenbus_unregister_driver(&blkfront_driver); xlbd_release_major_info(); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/block.h000066400000000000000000000140241314037446600270100ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #if 0 #define DPRINTK_IOCTL(_f, _a...) pr_alert(_f, ## _a) #else #define DPRINTK_IOCTL(_f, _a...) ((void)0) #endif struct xlbd_major_info { int major; int index; int usage; const struct xlbd_type_info *type; struct xlbd_minor_state *minors; }; struct blk_shadow { union { blkif_request_t req; blkif_request_indirect_t ind; }; struct request *request; unsigned long *frame; }; #define BLK_MAX_RING_PAGE_ORDER 4U #define BLK_MAX_RING_PAGES (1U << BLK_MAX_RING_PAGE_ORDER) #define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, \ BLK_MAX_RING_PAGES * PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; struct gendisk *gd; struct mutex mutex; int vdevice; blkif_vdev_t handle; int connected; unsigned int ring_size; blkif_front_ring_t ring; spinlock_t io_lock; struct scatterlist *sg; struct blkif_request_segment **indirect_segs; unsigned int irq; unsigned int max_segs_per_req; struct xlbd_major_info *mi; struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_MAX_RING_SIZE]; struct list_head resume_list, resume_split; grant_ref_t ring_refs[BLK_MAX_RING_PAGES]; struct page *ring_pages[BLK_MAX_RING_PAGES]; unsigned long shadow_free; unsigned int feature_flush; unsigned int flush_op; bool feature_discard; bool feature_secdiscard; unsigned int discard_granularity; unsigned int discard_alignment; int is_ready; }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); #else extern int blkif_open(struct block_device *bdev, fmode_t mode); #if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) extern int blkif_release(struct gendisk *disk, fmode_t mode); #else extern void blkif_release(struct gendisk *disk, fmode_t mode); #endif extern int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument); #endif extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (struct request_queue *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, unsigned int vdisk_info, unsigned int sector_size, unsigned int physical_sector_size, struct blkfront_info *); void xlvbd_del(struct blkfront_info *info); void xlvbd_flush(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif void xlbd_release_major_info(void); /* Virtual cdrom block-device */ #ifdef CONFIG_XEN extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #else //static inline void register_vcd(struct blkfront_info *info) {} //static inline void unregister_vcd(struct blkfront_info *info) {} #endif extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/opensuse/000077500000000000000000000000001314037446600274055ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/opensuse/blkfront.c000066400000000000000000001277161314037446600314100ustar00rootroot00000000000000/****************************************************************************** * blkfront.c * * XenLinux virtual block-device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "block.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct blk_resume_entry { struct list_head list; struct blk_shadow copy; struct blkif_request_segment *indirect_segs; }; #define BLKIF_STATE_DISCONNECTED 0 #define BLKIF_STATE_CONNECTED 1 #define BLKIF_STATE_SUSPENDED 2 static void connect(struct blkfront_info *); static void blkfront_closing(struct blkfront_info *); static int blkfront_remove(struct xenbus_device *); static int talk_to_backend(struct xenbus_device *, struct blkfront_info *); static int setup_blkring(struct xenbus_device *, struct blkfront_info *, unsigned int old_ring_size); static void kick_pending_request_queues(struct blkfront_info *); static irqreturn_t blkif_int(int irq, void *dev_id); static void blkif_restart_queue(struct work_struct *arg); static int blkif_recover(struct blkfront_info *, unsigned int old_ring_size, unsigned int new_ring_size); static bool blkif_completion(struct blkfront_info *, unsigned long id, int status); static void blkif_free(struct blkfront_info *, int); static void write_frontend_state_flag(const char * nodename); /* Maximum number of indirect segments advertised to the front end. */ static unsigned int max_segs_per_req = BITS_PER_LONG; module_param_named(max_indirect_segments, max_segs_per_req, uint, 0644); MODULE_PARM_DESC(max_indirect_segments, "maximum number of indirect segments"); /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice; struct blkfront_info *info; enum xenbus_state backend_state; #ifndef CONFIG_XEN /* For HVM guests, do not take over CDROM devices. */ char *type; type = xenbus_read(XBT_NIL, dev->nodename, "device-type", NULL); if (IS_ERR(type)) { xenbus_dev_fatal(dev, PTR_ERR(type), "reading dev type"); return PTR_ERR(type); } if (!strncmp(type, "cdrom", 5)) { /* * We are handed a cdrom device in a hvm guest; let the * native cdrom driver handle this device. */ kfree(type); pr_notice("blkfront: ignoring CDROM %s\n", dev->nodename); return -ENXIO; } kfree(type); #endif /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } spin_lock_init(&info->io_lock); mutex_init(&info->mutex); info->xbdev = dev; info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; INIT_WORK(&info->work, blkif_restart_queue); INIT_LIST_HEAD(&info->resume_list); INIT_LIST_HEAD(&info->resume_split); /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); backend_state = xenbus_read_driver_state(dev->otherend); /* * XenbusStateInitWait would be the correct state to enter here, * but (at least) blkback considers this a fatal error. */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_backend(dev, info); if (err) { kfree(info); dev_set_drvdata(&dev->dev, NULL); return err; } return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); enum xenbus_state backend_state; DPRINTK("blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); backend_state = xenbus_read_driver_state(dev->otherend); /* See respective comment in blkfront_probe(). */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; return talk_to_backend(dev, info); } static void shadow_init(struct blk_shadow *shadow, unsigned int ring_size) { unsigned int i = 0; WARN_ON(!ring_size); while (++i < ring_size) shadow[i - 1].req.id = i; shadow[i - 1].req.id = 0x0fffffff; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct blkfront_info *info) { unsigned int ring_size, ring_order, max_segs; unsigned int old_ring_size = RING_SIZE(&info->ring); const char *what = NULL; struct xenbus_transaction xbt; int err; if (dev->state >= XenbusStateInitialised) return 0; err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-pages", "%u", &ring_size); if (err != 1) ring_size = 0; else if (!ring_size) pr_warn("blkfront: %s: zero max-ring-pages\n", dev->nodename); err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &ring_order); if (err != 1) ring_order = ring_size ? ilog2(ring_size) : 0; else if (!ring_size) /* nothing */; else if ((ring_size - 1) >> ring_order) pr_warn("blkfront: %s: max-ring-pages (%#x) inconsistent with" " max-ring-page-order (%u)\n", dev->nodename, ring_size, ring_order); else ring_order = ilog2(ring_size); if (ring_order > BLK_MAX_RING_PAGE_ORDER) ring_order = BLK_MAX_RING_PAGE_ORDER; /* * While for larger rings not all pages are actually used, be on the * safe side and set up a full power of two to please as many backends * as possible. */ info->ring_size = ring_size = 1U << ring_order; err = xenbus_scanf(XBT_NIL, dev->otherend, "feature-max-indirect-segments", "%u", &max_segs); if (max_segs > max_segs_per_req) max_segs = max_segs_per_req; if (err != 1 || max_segs < BLKIF_MAX_SEGMENTS_PER_REQUEST) max_segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; else if (BLKIF_INDIRECT_PAGES(max_segs) > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST) max_segs = BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST * BLKIF_SEGS_PER_INDIRECT_FRAME; info->max_segs_per_req = max_segs; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info, old_ring_size); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } if (ring_size == 1) { what = "ring-ref"; err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[0]); if (err) goto abort_transaction; } else { unsigned int i; char buf[16]; what = "ring-page-order"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_order); if (err) goto abort_transaction; what = "num-ring-pages"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_size); if (err) goto abort_transaction; what = buf; for (i = 0; i < ring_size; i++) { snprintf(buf, sizeof(buf), "ring-ref%u", i); err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[i]); if (err) goto abort_transaction; } } what = "event-channel"; err = xenbus_printf(xbt, dev->nodename, what, "%u", irq_to_evtchn_port(info->irq)); if (err) goto abort_transaction; what = "protocol"; err = xenbus_write(xbt, dev->nodename, what, XEN_IO_PROTO_ABI_NATIVE); if (err) goto abort_transaction; err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); ring_size = RING_SIZE(&info->ring); switch (info->connected) { case BLKIF_STATE_DISCONNECTED: shadow_init(info->shadow, ring_size); break; case BLKIF_STATE_SUSPENDED: err = blkif_recover(info, old_ring_size, ring_size); if (err) goto out; break; } pr_info("blkfront: %s: ring-pages=%u nr-ents=%u segs-per-req=%u\n", dev->nodename, info->ring_size, ring_size, max_segs); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (what) xenbus_dev_fatal(dev, err, "writing %s", what); destroy_blkring: blkif_free(info, 0); out: return err; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info, unsigned int old_ring_size) { blkif_sring_t *sring; int err; unsigned int i, nr, ring_size; for (nr = 0; nr < info->ring_size; nr++) { info->ring_refs[nr] = GRANT_INVALID_REF; info->ring_pages[nr] = alloc_page(GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM); if (!info->ring_pages[nr]) break; } sring = nr == info->ring_size ? vmap(info->ring_pages, nr, VM_MAP, PAGE_KERNEL) : NULL; if (!sring) { while (nr--) __free_page(info->ring_pages[nr]); xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, nr * PAGE_SIZE); info->sg = kcalloc(info->max_segs_per_req, sizeof(*info->sg), GFP_KERNEL); if (!info->sg) { err = -ENOMEM; goto fail; } sg_init_table(info->sg, info->max_segs_per_req); err = -ENOMEM; ring_size = RING_SIZE(&info->ring); if (info->max_segs_per_req > BLKIF_MAX_SEGMENTS_PER_REQUEST) { if (!info->indirect_segs) old_ring_size = 0; if (old_ring_size < ring_size) { struct blkif_request_segment **segs; segs = krealloc(info->indirect_segs, ring_size * sizeof(*segs), GFP_KERNEL|__GFP_ZERO); if (!segs) goto fail; info->indirect_segs = segs; } for (i = old_ring_size; i < ring_size; ++i) { info->indirect_segs[i] = vzalloc(BLKIF_INDIRECT_PAGES(info->max_segs_per_req) * PAGE_SIZE); if (!info->indirect_segs[i]) goto fail; } } for (i = 0; i < ring_size; ++i) { unsigned long *frame; if (info->shadow[i].frame && ksize(info->shadow[i].frame) / sizeof(*frame) >= info->max_segs_per_req) continue; frame = krealloc(info->shadow[i].frame, info->max_segs_per_req * sizeof(*frame), GFP_KERNEL); if (!frame) goto fail; if (!info->shadow[i].frame) memset(frame, ~0, info->max_segs_per_req * sizeof(*frame)); info->shadow[i].frame = frame; } err = xenbus_multi_grant_ring(dev, nr, info->ring_pages, info->ring_refs); if (err < 0) goto fail; err = bind_listening_port_to_irqhandler( dev->otherend_id, blkif_int, 0, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); struct block_device *bd; DPRINTK("blkfront:backend_changed.\n"); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (talk_to_backend(dev, info)) { dev_set_drvdata(&dev->dev, NULL); kfree(info); } break; case XenbusStateConnected: connect(info); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's Closing state -- fallthrough */ case XenbusStateClosing: mutex_lock(&info->mutex); if (dev->state == XenbusStateClosing) { mutex_unlock(&info->mutex); break; } bd = info->gd ? bdget_disk(info->gd, 0) : NULL; mutex_unlock(&info->mutex); if (bd == NULL) { xenbus_frontend_closed(dev); break; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif if (bd->bd_openers) { xenbus_dev_error(dev, -EBUSY, "Device in use; refusing to close"); xenbus_switch_state(dev, XenbusStateClosing); } else blkfront_closing(info); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); break; } } /* ** Connection ** */ static void blkfront_setup_discard(struct blkfront_info *info) { unsigned int discard_granularity; unsigned int discard_alignment; int discard_secure; info->feature_discard = 1; if (!xenbus_gather(XBT_NIL, info->xbdev->otherend, "discard-granularity", "%u", &discard_granularity, "discard-alignment", "%u", &discard_alignment, NULL)) { info->discard_granularity = discard_granularity; info->discard_alignment = discard_alignment; } if (xenbus_scanf(XBT_NIL, info->xbdev->otherend, "discard-secure", "%d", &discard_secure) != 1) discard_secure = 0; info->feature_secdiscard = !!discard_secure; } /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void connect(struct blkfront_info *info) { unsigned long long sectors; unsigned int binfo, sector_size, physical_sector_size; int err, barrier, flush, discard; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (err != 1) return; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sector-size", "%u", §or_size); if (err != 1) sector_size = 0; if (sector_size) blk_queue_logical_block_size(info->gd->queue, sector_size); pr_info("Setting capacity to %Lu\n", sectors); set_capacity(info->gd, sectors); revalidate_disk(info->gd); /* fall through */ case BLKIF_STATE_SUSPENDED: return; } DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors, "info", "%u", &binfo, "sector-size", "%u", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } /* * physcial-sector-size is a newer field, so old backends may not * provide this. Assume physical sector size to be the same as * sector_size in that case. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "physical-sector-size", "%u", &physical_sector_size); if (err <= 0) physical_sector_size = sector_size; info->feature_flush = 0; info->flush_op = 0; err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%d", &barrier); /* * If there's no "feature-barrier" defined, then it means * we're dealing with a very old backend which writes * synchronously; nothing to do. * * If there are barriers, then we use flush. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) if (err > 0 && barrier) { info->feature_flush = REQ_FLUSH | REQ_FUA; info->flush_op = BLKIF_OP_WRITE_BARRIER; } /* * And if there is "feature-flush-cache" use that above * barriers. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-flush-cache", "%d", &flush); if (err > 0 && flush) { info->feature_flush = REQ_FLUSH; info->flush_op = BLKIF_OP_FLUSH_DISKCACHE; } #else if (err <= 0) info->feature_flush = QUEUE_ORDERED_DRAIN; else if (barrier) info->feature_flush = QUEUE_ORDERED_TAG; else info->feature_flush = QUEUE_ORDERED_NONE; #endif err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-discard", "%d", &discard); if (err > 0 && discard) blkfront_setup_discard(info); err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, physical_sector_size, info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } err = xlvbd_sysfs_addif(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s", info->xbdev->otherend); return; } (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&info->io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); add_disk(info->gd); info->is_ready = 1; register_vcd(info); if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s is connected\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } } static void write_frontend_state_flag(const char * nodename) { int rc; rc = xenbus_write(XBT_NIL, nodename, "frontend-state", "connect"); if (rc) { printk(KERN_INFO "write frontend-state failed \n"); } } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once is this done, we can switch to Closed in * acknowledgement. */ static void blkfront_closing(struct blkfront_info *info) { unsigned long flags; DPRINTK("blkfront_closing: %d removed\n", info->vdevice); if (info->rq == NULL) goto out; spin_lock_irqsave(&info->io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&info->io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work(&info->work); xlvbd_sysfs_delif(info); unregister_vcd(info); xlvbd_del(info); out: if (info->xbdev) xenbus_frontend_closed(info->xbdev); } static int blkfront_remove(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); struct block_device *bd; struct gendisk *disk; DPRINTK("blkfront_remove: %s removed\n", dev->nodename); blkif_free(info, 0); mutex_lock(&info->mutex); disk = info->gd; bd = disk ? bdget_disk(disk, 0) : NULL; info->xbdev = NULL; mutex_unlock(&info->mutex); if (!bd) { kfree(info); return 0; } /* * The xbdev was removed before we reached the Closed * state. See if it's safe to remove the disk. If the bdev * isn't closed yet, we let release take care of it. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) down(&bd->bd_sem); #else mutex_lock(&bd->bd_mutex); #endif info = disk->private_data; dev_warn(disk_to_dev(disk), "%s was hot-unplugged, %d stale handles\n", dev->nodename, bd->bd_openers); if (info && !bd->bd_openers) { blkfront_closing(info); disk->private_data = NULL; kfree(info); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) up(&bd->bd_sem); #else mutex_unlock(&bd->bd_mutex); #endif bdput(bd); return 0; } static inline int GET_ID_FROM_FREELIST( struct blkfront_info *info) { unsigned long free = info->shadow_free; BUG_ON(free >= RING_SIZE(&info->ring)); info->shadow_free = info->shadow[free].req.id; info->shadow[free].req.id = 0x0fffffee; /* debug */ return free; } static inline int ADD_ID_TO_FREELIST( struct blkfront_info *info, unsigned long id) { if (info->shadow[id].req.id != id) return -EINVAL; if (!info->shadow[id].request) return -ENXIO; info->shadow[id].req.id = info->shadow_free; info->shadow[id].request = NULL; info->shadow_free = id; return 0; } static const char *op_name(unsigned int op) { static const char *const names[] = { [BLKIF_OP_READ] = "read", [BLKIF_OP_WRITE] = "write", [BLKIF_OP_WRITE_BARRIER] = "barrier", [BLKIF_OP_FLUSH_DISKCACHE] = "flush", [BLKIF_OP_PACKET] = "packet", [BLKIF_OP_DISCARD] = "discard", [BLKIF_OP_INDIRECT] = "indirect", }; if (op >= ARRAY_SIZE(names)) return "unknown"; return names[op] ?: "reserved"; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } static void split_request(struct blkif_request *req, struct blk_shadow *copy, const struct blkfront_info *info, const struct blkif_request_segment *segs) { unsigned int i; req->operation = copy->ind.indirect_op; req->nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; req->handle = copy->ind.handle; for (i = 0; i < BLKIF_MAX_SEGMENTS_PER_REQUEST; ++i) { req->seg[i] = segs[i]; gnttab_grant_foreign_access_ref(segs[i].gref, info->xbdev->otherend_id, pfn_to_mfn(copy->frame[i]), rq_data_dir(copy->request) ? GTF_readonly : 0); info->shadow[req->id].frame[i] = copy->frame[i]; } copy->ind.id = req->id; } static void kick_pending_request_queues(struct blkfront_info *info) { bool queued = false; /* Recover stage 3: Re-queue pending requests. */ while (!list_empty(&info->resume_list) && !RING_FULL(&info->ring)) { /* Grab a request slot and copy shadow state into it. */ struct blk_resume_entry *ent = list_first_entry(&info->resume_list, struct blk_resume_entry, list); blkif_request_t *req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); unsigned long *frame; unsigned int i; *req = ent->copy.req; /* We get a new request id, and must reset the shadow state. */ req->id = GET_ID_FROM_FREELIST(info); frame = info->shadow[req->id].frame; info->shadow[req->id] = ent->copy; info->shadow[req->id].req.id = req->id; info->shadow[req->id].frame = frame; /* Rewrite any grant references invalidated by susp/resume. */ if (req->operation == BLKIF_OP_INDIRECT) { const blkif_request_indirect_t *ind = (void *)req; struct blkif_request_segment *segs = info->indirect_segs[req->id]; for (i = RING_SIZE(&info->ring); segs && i--; ) if (!info->indirect_segs[i]) { info->indirect_segs[i] = segs; segs = NULL; } if (segs) vfree(segs); segs = ent->indirect_segs; info->indirect_segs[req->id] = segs; if (ind->nr_segments > info->max_segs_per_req) { split_request(req, &ent->copy, info, segs); info->ring.req_prod_pvt++; queued = true; list_move_tail(&ent->list, &info->resume_split); continue; } for (i = 0; i < BLKIF_INDIRECT_PAGES(ind->nr_segments); ++i) { void *va = (void *)segs + i * PAGE_SIZE; struct page *pg = vmalloc_to_page(va); gnttab_grant_foreign_access_ref( ind->indirect_grefs[i], info->xbdev->otherend_id, page_to_phys(pg) >> PAGE_SHIFT, GTF_readonly); } for (i = 0; i < ind->nr_segments; ++i) { gnttab_grant_foreign_access_ref(segs[i].gref, info->xbdev->otherend_id, pfn_to_mfn(ent->copy.frame[i]), rq_data_dir(ent->copy.request) ? GTF_readonly : 0); frame[i] = ent->copy.frame[i]; } } else for (i = 0; i < req->nr_segments; ++i) { gnttab_grant_foreign_access_ref(req->seg[i].gref, info->xbdev->otherend_id, pfn_to_mfn(ent->copy.frame[i]), rq_data_dir(ent->copy.request) ? GTF_readonly : 0); frame[i] = ent->copy.frame[i]; } info->ring.req_prod_pvt++; queued = true; __list_del_entry(&ent->list); kfree(ent); } /* Send off requeued requests */ if (queued) flush_requests(info); if (list_empty(&info->resume_split) && !RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(struct work_struct *arg) { struct blkfront_info *info = container_of(arg, struct blkfront_info, work); spin_lock_irq(&info->io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_open(struct inode *inode, struct file *filep) { struct block_device *bd = inode->i_bdev; #else int blkif_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int err = 0; if (!info) /* xbdev gone */ err = -ERESTARTSYS; else { mutex_lock(&info->mutex); if (!info->gd) /* xbdev is closed */ err = -ERESTARTSYS; mutex_unlock(&info->mutex); } return err; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_release(struct inode *inode, struct file *filep) { struct gendisk *disk = inode->i_bdev->bd_disk; #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) int blkif_release(struct gendisk *disk, fmode_t mode) { #else void blkif_release(struct gendisk *disk, fmode_t mode) { #define return(n) return #endif struct blkfront_info *info = disk->private_data; struct xenbus_device *xbdev; struct block_device *bd = bdget_disk(disk, 0); bdput(bd); if (bd->bd_openers) return(0); /* * Check if we have been instructed to close. We will have * deferred this request, because the bdev was still open. */ mutex_lock(&info->mutex); xbdev = info->xbdev; if (xbdev && xbdev->state == XenbusStateClosing) { /* pending switch to state closed */ dev_info(disk_to_dev(disk), "releasing disk\n"); blkfront_closing(info); } mutex_unlock(&info->mutex); if (!xbdev) { /* sudden device removal */ dev_info(disk_to_dev(disk), "releasing disk\n"); blkfront_closing(info); disk->private_data = NULL; kfree(info); } return(0); #undef return } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument) { struct block_device *bd = inode->i_bdev; #else int blkif_ioctl(struct block_device *bd, fmode_t mode, unsigned command, unsigned long argument) { #endif struct blkfront_info *info = bd->bd_disk->private_data; int i; DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", command, (long)argument, inode->i_rdev); switch (command) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) case HDIO_GETGEO: { struct hd_geometry geo; int ret; if (!argument) return -EINVAL; geo.start = get_start_sect(bd); ret = blkif_getgeo(bd, &geo); if (ret) return ret; if (copy_to_user((struct hd_geometry __user *)argument, &geo, sizeof(geo))) return -EFAULT; return 0; } #endif case CDROMMULTISESSION: DPRINTK("FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: if (info->gd && (info->gd->flags & GENHD_FL_CD)) return 0; return -EINVAL; default: if (info->mi && info->gd && info->rq) { switch (info->mi->major) { case SCSI_DISK0_MAJOR: case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: case SCSI_CDROM_MAJOR: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) return scsi_cmd_ioctl(filep, info->gd, command, (void __user *)argument); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return scsi_cmd_ioctl(filep, info->rq, info->gd, command, (void __user *)argument); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) return scsi_cmd_ioctl(info->rq, info->gd, mode, command, (void __user *)argument); #else return scsi_cmd_blk_ioctl(bd, mode, command, (void __user *)argument); #endif } } return -EINVAL; /* same return as native Linux */ } return 0; } int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } /* * Generate a Xen blkfront IO request from a blk layer request. Reads * and writes are handled as expected. * * @req: a request struct */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; unsigned long buffer_mfn; blkif_request_t *ring_req; unsigned long id; unsigned int fsect, lsect, nr_segs; int i, ref; grant_ref_t gref_head; struct scatterlist *sg; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; nr_segs = info->max_segs_per_req; if (nr_segs > BLKIF_MAX_SEGMENTS_PER_REQUEST) nr_segs += BLKIF_INDIRECT_PAGES(nr_segs); if (gnttab_alloc_grant_references(nr_segs, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, nr_segs); return 1; } /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = GET_ID_FROM_FREELIST(info); info->shadow[id].request = req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) #else if (req->cmd_flags & REQ_HARDBARRIER) #endif ring_req->operation = info->flush_op; if (req->cmd_type == REQ_TYPE_BLOCK_PC) ring_req->operation = BLKIF_OP_PACKET; if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { struct blkif_request_discard *discard = (void *)ring_req; /* id, sector_number and handle are set above. */ discard->operation = BLKIF_OP_DISCARD; discard->flag = 0; discard->handle = info->handle; discard->nr_sectors = blk_rq_sectors(req); if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard) discard->flag = BLKIF_DISCARD_SECURE; } else { struct blkif_request_segment *segs; nr_segs = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(nr_segs > info->max_segs_per_req); if (nr_segs <= BLKIF_MAX_SEGMENTS_PER_REQUEST) { ring_req->nr_segments = nr_segs; ring_req->handle = info->handle; segs = ring_req->seg; } else { struct blkif_request_indirect *ind = (void *)ring_req; ind->indirect_op = ring_req->operation; ind->operation = BLKIF_OP_INDIRECT; ind->nr_segments = nr_segs; ind->handle = info->handle; segs = info->indirect_segs[id]; for (i = 0; i < BLKIF_INDIRECT_PAGES(nr_segs); ++i) { void *va = (void *)segs + i * PAGE_SIZE; struct page *pg = vmalloc_to_page(va); buffer_mfn = page_to_phys(pg) >> PAGE_SHIFT; ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, GTF_readonly); ind->indirect_grefs[i] = ref; } } for_each_sg(info->sg, sg, nr_segs, i) { buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT; fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; /* install a grant reference. */ ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); gnttab_grant_foreign_access_ref( ref, info->xbdev->otherend_id, buffer_mfn, rq_data_dir(req) ? GTF_readonly : 0 ); info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); segs[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } } info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; gnttab_free_grant_references(gref_head); return 0; } /* * do_blkif_request * read a block; request is in a request queue */ void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; DPRINTK("Entered do_blkif_request\n"); queued = 0; while ((req = blk_peek_request(rq)) != NULL) { info = req->rq_disk->private_data; if (RING_FULL(&info->ring)) goto wait; blk_start_request(req); if ((req->cmd_type != REQ_TYPE_FS && (req->cmd_type != REQ_TYPE_BLOCK_PC || req->cmd_len)) || ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) && !info->flush_op)) { req->errors = (DID_ERROR << 16) | (DRIVER_INVALID << 24); __blk_end_request_all(req, -EIO); continue; } #ifndef OPENSUSE_1302 DPRINTK("do_blk_req %p: cmd %p, sec %llx, " "(%u/%u) buffer:%p [%s]\n", req, req->cmd, (long long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), req->buffer, rq_data_dir(req) ? "write" : "read"); #else DPRINTK("do_blk_req %p: cmd %p, sec %llx, (%u/%u) [%s]\n", req, req->cmd, (long long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), rq_data_dir(req) ? "write" : "read"); #endif if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static irqreturn_t blkif_int(int irq, void *dev_id) { struct request *req; blkif_response_t *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; spin_lock_irqsave(&info->io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&info->io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; int ret; bool done; bret = RING_GET_RESPONSE(&info->ring, i); if (unlikely(bret->id >= RING_SIZE(&info->ring))) { /* * The backend has messed up and given us an id that * we would never have given to it (we stamp it up to * RING_SIZE() - see GET_ID_FROM_FREELIST()). */ pr_warning("%s: response to %s has incorrect id (%#Lx)\n", info->gd->disk_name, op_name(bret->operation), (unsigned long long)bret->id); continue; } id = bret->id; req = info->shadow[id].request; done = blkif_completion(info, id, bret->status); ret = ADD_ID_TO_FREELIST(info, id); if (unlikely(ret)) { pr_warning("%s: id %#lx (response to %s) couldn't be recycled (%d)!\n", info->gd->disk_name, id, op_name(bret->operation), ret); continue; } if (!done) continue; ret = bret->status == BLKIF_RSP_OKAY ? 0 : -EIO; switch (bret->operation) { const char *kind; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: kind = ""; if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) ret = -EOPNOTSUPP; if (unlikely(bret->status == BLKIF_RSP_ERROR && !(info->shadow[id].req.operation == BLKIF_OP_INDIRECT ? info->shadow[id].ind.nr_segments : info->shadow[id].req.nr_segments))) { kind = "empty "; ret = -EOPNOTSUPP; } if (unlikely(ret)) { if (ret == -EOPNOTSUPP) { pr_warn("blkfront: %s: %s%s op failed\n", info->gd->disk_name, kind, op_name(bret->operation)); ret = 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) info->feature_flush = 0; #else info->feature_flush = QUEUE_ORDERED_NONE; #endif xlvbd_flush(info); } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_PACKET: if (unlikely(bret->status != BLKIF_RSP_OKAY)) DPRINTK("Bad return from blkdev %s request: %d\n", op_name(bret->operation), bret->status); __blk_end_request_all(req, ret); break; case BLKIF_OP_DISCARD: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; pr_warn("blkfront: %s: discard op failed\n", info->gd->disk_name); ret = -EOPNOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; queue_flag_clear(QUEUE_FLAG_DISCARD, rq); queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); } __blk_end_request_all(req, ret); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&info->io_lock, flags); return IRQ_HANDLED; } static void blkif_free(struct blkfront_info *info, int suspend) { /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&info->io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&info->io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_work(&info->work); /* Free resources associated with old device channel. */ if (!suspend) { unsigned int i; if (info->indirect_segs) { for (i = 0; i < RING_SIZE(&info->ring); ++i) if (info->indirect_segs[i]) vfree(info->indirect_segs[i]); kfree(info->indirect_segs); info->indirect_segs = NULL; } for (i = 0; i < ARRAY_SIZE(info->shadow); ++i) kfree(info->shadow[i].frame); } vunmap(info->ring.sring); info->ring.sring = NULL; gnttab_multi_end_foreign_access(info->ring_size, info->ring_refs, info->ring_pages); if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; kfree(info->sg); info->sg = NULL; info->max_segs_per_req = 0; } static bool blkif_completion(struct blkfront_info *info, unsigned long id, int status) { struct blk_shadow *s = &info->shadow[id]; switch (s->req.operation) { unsigned int i, j; case BLKIF_OP_DISCARD: break; case BLKIF_OP_INDIRECT: { struct blkif_request_segment *segs = info->indirect_segs[id]; struct blk_resume_entry *tmp, *ent = NULL; list_for_each_entry(tmp, &info->resume_split, list) if (tmp->copy.ind.id == id) { ent = tmp; __list_del_entry(&ent->list); break; } if (!ent) { for (i = 0; i < s->ind.nr_segments; i++) gnttab_end_foreign_access(segs[i].gref, 0UL); gnttab_multi_end_foreign_access( BLKIF_INDIRECT_PAGES(s->ind.nr_segments), s->ind.indirect_grefs, NULL); break; } for (i = 0; i < BLKIF_MAX_SEGMENTS_PER_REQUEST; i++) gnttab_end_foreign_access(segs[i].gref, 0UL); if (status != BLKIF_RSP_OKAY) { kfree(ent); break; } for (j = 0; i < ent->copy.ind.nr_segments; ++i, ++j) segs[j] = segs[i]; ent->copy.frame += BLKIF_MAX_SEGMENTS_PER_REQUEST; ent->copy.ind.nr_segments -= BLKIF_MAX_SEGMENTS_PER_REQUEST; if (ent->copy.ind.nr_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) { ent->copy.req.operation = ent->copy.ind.indirect_op; ent->copy.req.nr_segments = ent->copy.ind.nr_segments; ent->copy.req.handle = ent->copy.ind.handle; for (i = 0; i < ent->copy.req.nr_segments; ++i) ent->copy.req.seg[i] = segs[i]; } list_add_tail(&ent->list, &info->resume_list); return false; } default: for (i = 0; i < s->req.nr_segments; i++) gnttab_end_foreign_access(s->req.seg[i].gref, 0UL); break; } return true; } static int blkif_recover(struct blkfront_info *info, unsigned int old_ring_size, unsigned int ring_size) { unsigned int i; struct blk_resume_entry *ent; LIST_HEAD(list); /* Stage 1: Make a safe copy of the shadow state. */ for (i = 0; i < old_ring_size; i++) { unsigned int nr_segs; /* Not in use? */ if (!info->shadow[i].request) continue; switch (info->shadow[i].req.operation) { default: nr_segs = info->shadow[i].req.nr_segments; break; case BLKIF_OP_INDIRECT: nr_segs = info->shadow[i].ind.nr_segments; break; case BLKIF_OP_DISCARD: nr_segs = 0; break; } ent = kmalloc(sizeof(*ent) + nr_segs * sizeof(*ent->copy.frame), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH); if (!ent) break; ent->copy = info->shadow[i]; ent->copy.frame = (void *)(ent + 1); memcpy(ent->copy.frame, info->shadow[i].frame, nr_segs * sizeof(*ent->copy.frame)); if (info->indirect_segs) { ent->indirect_segs = info->indirect_segs[i]; info->indirect_segs[i] = NULL; } else ent->indirect_segs = NULL; list_add_tail(&ent->list, &list); } if (i < old_ring_size) { while (!list_empty(&list)) { ent = list_first_entry(&list, struct blk_resume_entry, list); __list_del_entry(&ent->list); kfree(ent); } return -ENOMEM; } list_splice_tail(&list, &info->resume_list); /* Stage 2: Set up free list. */ for (i = 0; i < old_ring_size; ++i) { unsigned long *frame = info->shadow[i].frame; memset(info->shadow + i, 0, sizeof(*info->shadow)); memset(frame, ~0, info->max_segs_per_req * sizeof(*frame)); info->shadow[i].frame = frame; } shadow_init(info->shadow, ring_size); info->shadow_free = info->ring.req_prod_pvt; (void)xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&info->io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); spin_unlock_irq(&info->io_lock); if (info->indirect_segs) for (i = ring_size; i < old_ring_size; ++i) if (info->indirect_segs[i]) { vfree(info->indirect_segs[i]); info->indirect_segs[i] = NULL; } if (strstr(info->xbdev->nodename,"vbd")) { printk(KERN_ERR "%s has been blkif_recovered\n",info->xbdev->nodename); write_frontend_state_flag(info->xbdev->nodename); } return 0; } int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); return info->is_ready && info->xbdev; } /* ** Driver Registration ** */ static const struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; MODULE_ALIAS("xen:vbd"); static DEFINE_XENBUS_DRIVER(blkfront, , .probe = blkfront_probe, .remove = blkfront_remove, .resume = blkfront_resume, .otherend_changed = backend_changed, .is_ready = blkfront_is_ready, ); static int __init xlblk_init(void) { if (!is_running_on_xen()) return -ENODEV; return xenbus_register_frontend(&blkfront_driver); } module_init(xlblk_init); static void __exit xlblk_exit(void) { xenbus_unregister_driver(&blkfront_driver); xlbd_release_major_info(); } module_exit(xlblk_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/opensuse/block.h000066400000000000000000000137141314037446600306560ustar00rootroot00000000000000/****************************************************************************** * block.h * * Shared definitions between all levels of XenLinux Virtual block devices. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_BLOCK_H__ #define __XEN_DRIVERS_BLOCK_H__ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DPRINTK(_f, _a...) pr_debug(_f, ## _a) #define DPRINTK_IOCTL(_f, _a...) ((void)0) struct xlbd_major_info { int major; int index; int usage; const struct xlbd_type_info *type; struct xlbd_minor_state *minors; }; struct blk_shadow { union { blkif_request_t req; blkif_request_indirect_t ind; }; struct request *request; unsigned long *frame; }; #define BLK_MAX_RING_PAGE_ORDER 4U #define BLK_MAX_RING_PAGES (1U << BLK_MAX_RING_PAGE_ORDER) #define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, \ BLK_MAX_RING_PAGES * PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct xenbus_device *xbdev; struct gendisk *gd; struct mutex mutex; int vdevice; blkif_vdev_t handle; int connected; unsigned int ring_size; blkif_front_ring_t ring; spinlock_t io_lock; struct scatterlist *sg; struct blkif_request_segment **indirect_segs; unsigned int irq; unsigned int max_segs_per_req; struct xlbd_major_info *mi; struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_MAX_RING_SIZE]; struct list_head resume_list, resume_split; grant_ref_t ring_refs[BLK_MAX_RING_PAGES]; struct page *ring_pages[BLK_MAX_RING_PAGES]; unsigned long shadow_free; unsigned int feature_flush; unsigned int flush_op; bool feature_discard; bool feature_secdiscard; unsigned int discard_granularity; unsigned int discard_alignment; int is_ready; }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) extern int blkif_open(struct inode *inode, struct file *filep); extern int blkif_release(struct inode *inode, struct file *filep); extern int blkif_ioctl(struct inode *inode, struct file *filep, unsigned command, unsigned long argument); #else extern int blkif_open(struct block_device *bdev, fmode_t mode); #if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) extern int blkif_release(struct gendisk *disk, fmode_t mode); #else extern void blkif_release(struct gendisk *disk, fmode_t mode); #endif extern int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument); #endif extern int blkif_getgeo(struct block_device *, struct hd_geometry *); extern int blkif_check(dev_t dev); extern int blkif_revalidate(dev_t dev); extern void do_blkif_request (struct request_queue *rq); /* Virtual block-device subsystem. */ /* Note that xlvbd_add doesn't call add_disk for you: you're expected to call add_disk on info->gd once the disk is properly connected up. */ int xlvbd_add(blkif_sector_t capacity, int device, unsigned int vdisk_info, unsigned int sector_size, unsigned int physical_sector_size, struct blkfront_info *); void xlvbd_del(struct blkfront_info *info); void xlvbd_flush(struct blkfront_info *info); #ifdef CONFIG_SYSFS int xlvbd_sysfs_addif(struct blkfront_info *info); void xlvbd_sysfs_delif(struct blkfront_info *info); #else static inline int xlvbd_sysfs_addif(struct blkfront_info *info) { return 0; } static inline void xlvbd_sysfs_delif(struct blkfront_info *info) { ; } #endif void xlbd_release_major_info(void); /* Virtual cdrom block-device */ #ifdef CONFIG_XEN extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #else //static inline void register_vcd(struct blkfront_info *info) {} //static inline void unregister_vcd(struct blkfront_info *info) {} #endif extern void register_vcd(struct blkfront_info *info); extern void unregister_vcd(struct blkfront_info *info); #endif /* __XEN_DRIVERS_BLOCK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/opensuse/vbd.c000066400000000000000000000377541314037446600303440ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) #define XENVBD_MAJOR 202 #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; struct xlbd_minor_state *minors; int do_register; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; minors = kmalloc(sizeof(*minors), GFP_KERNEL); if (minors == NULL) { kfree(ptr); return NULL; } minors->bitmap = kzalloc(BITS_TO_LONGS(256) * sizeof(*minors->bitmap), GFP_KERNEL); if (minors->bitmap == NULL) { kfree(minors); kfree(ptr); return NULL; } spin_lock_init(&minors->lock); minors->nr = 256; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SD_RANGE: ptr->type = &xlbd_sd_type; ptr->index = index - XLBD_MAJOR_SD_START; break; case XLBD_MAJOR_SR_RANGE: ptr->type = &xlbd_sr_type; ptr->index = index - XLBD_MAJOR_SR_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major XENVBD_MAJOR, * don't try to register it again */ if (major_info[XLBD_MAJOR_VBD_ALT(index)] != NULL) { kfree(minors->bitmap); kfree(minors); minors = major_info[XLBD_MAJOR_VBD_ALT(index)]->minors; do_register = 0; } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(minors->bitmap); kfree(minors); kfree(ptr); return NULL; } pr_info("xen-vbd: registered block device major %i\n", ptr->major); } ptr->minors = minors; major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = XLBD_MAJOR_SD_START; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = XLBD_MAJOR_SD_START + 1 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = XLBD_MAJOR_SD_START + 8 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = XLBD_MAJOR_SR_START; break; case XENVBD_MAJOR: index = XLBD_MAJOR_VBD_START + !!VDEV_IS_EXTENDED(vdevice); break; default: return NULL; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } void __exit xlbd_release_major_info(void) { unsigned int i; int vbd_done = 0; for (i = 0; i < ARRAY_SIZE(major_info); ++i) { struct xlbd_major_info *mi = major_info[i]; if (!mi) continue; if (mi->usage) pr_warning("vbd: major %u still in use (%u times)\n", mi->major, mi->usage); if (mi->major != XENVBD_MAJOR || !vbd_done) { unregister_blkdev(mi->major, mi->type->devname); kfree(mi->minors->bitmap); kfree(mi->minors); } if (mi->major == XENVBD_MAJOR) vbd_done = 1; kfree(mi); } } static int xlbd_reserve_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; unsigned int end = minor + nr_minors; int rc; if (end > ms->nr) { unsigned long *bitmap, *old; bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap), GFP_KERNEL); if (bitmap == NULL) return -ENOMEM; spin_lock(&ms->lock); if (end > ms->nr) { old = ms->bitmap; memcpy(bitmap, ms->bitmap, BITS_TO_LONGS(ms->nr) * sizeof(*bitmap)); ms->bitmap = bitmap; ms->nr = BITS_TO_LONGS(end) * BITS_PER_LONG; } else old = bitmap; spin_unlock(&ms->lock); kfree(old); } spin_lock(&ms->lock); if (find_next_bit(ms->bitmap, end, minor) >= end) { bitmap_set(ms->bitmap, minor, nr_minors); rc = 0; } else rc = -EBUSY; spin_unlock(&ms->lock); return rc; } static void xlbd_release_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; BUG_ON(minor + nr_minors > ms->nr); spin_lock(&ms->lock); bitmap_clear(ms->bitmap, minor, nr_minors); spin_unlock(&ms->lock); } static char *encode_disk_name(char *ptr, unsigned int n) { if (n >= 26) ptr = encode_disk_name(ptr, n / 26 - 1); *ptr = 'a' + n % 26; return ptr + 1; } static int xlvbd_init_blk_queue(struct gendisk *gd, unsigned int sector_size, unsigned int physical_sector_size, struct blkfront_info *info) { struct request_queue *rq; rq = blk_init_queue(do_blkif_request, &info->io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); #endif if (info->feature_discard) { queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); blk_queue_max_discard_sectors(rq, get_capacity(gd)); rq->limits.discard_granularity = info->discard_granularity; rq->limits.discard_alignment = info->discard_alignment; if (info->feature_secdiscard) queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq); } /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_logical_block_size(rq, sector_size); blk_queue_physical_block_size(rq, physical_sector_size); blk_queue_max_hw_sectors(rq, 512); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_segments(rq, info->max_segs_per_req); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; info->rq = rq; return 0; } static inline bool xlvbd_ide_unplugged(int major) { #ifndef CONFIG_XEN /* * For HVM guests: * - pv driver required if booted with xen_emul_unplug=ide-disks|all * - native driver required with xen_emul_unplug=nics|never */ if (xen_pvonhvm_unplugged_disks) return true; switch (major) { case IDE0_MAJOR: case IDE1_MAJOR: case IDE2_MAJOR: case IDE3_MAJOR: case IDE4_MAJOR: case IDE5_MAJOR: case IDE6_MAJOR: case IDE7_MAJOR: case IDE8_MAJOR: case IDE9_MAJOR: return false; default: break; } #endif return true; } int xlvbd_add(blkif_sector_t capacity, int vdevice, unsigned int vdisk_info, unsigned int sector_size, unsigned int physical_sector_size, struct blkfront_info *info) { int major, minor; struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; char *ptr; unsigned int offset; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ pr_warning("blkfront: vdevice %#x is above the extended range;" " ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = XENVBD_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); if (minor >> MINORBITS) { pr_warning("blkfront: %#x's minor (%#x) out of range;" " ignoring\n", vdevice, minor); return -ENODEV; } } if (!xlvbd_ide_unplugged(major)) { pr_warning("blkfront: skipping IDE major on %x, native driver required\n", vdevice); return -ENODEV; } BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if ((vdisk_info & VDISK_CDROM) || !(minor & ((1 << mi->type->partn_shift) - 1))) nr_minors = 1 << mi->type->partn_shift; err = xlbd_reserve_minors(mi, minor & ~(nr_minors - 1), nr_minors); if (err) goto out; err = -ENODEV; gd = alloc_disk(vdisk_info & VDISK_CDROM ? 1 : nr_minors); if (gd == NULL) goto release; strcpy(gd->disk_name, mi->type->diskname); ptr = gd->disk_name + strlen(mi->type->diskname); offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (mi->type->partn_shift) { ptr = encode_disk_name(ptr, offset); offset = minor & ((1 << mi->type->partn_shift) - 1); } else gd->flags |= GENHD_FL_CD; BUG_ON(ptr >= gd->disk_name + ARRAY_SIZE(gd->disk_name)); if (nr_minors > 1) *ptr = 0; else snprintf(ptr, gd->disk_name + ARRAY_SIZE(gd->disk_name) - ptr, "%u", offset); gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size, info)) { del_gendisk(gd); goto release; } info->gd = gd; xlvbd_flush(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; release: xlbd_release_minors(mi, minor, nr_minors); out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } void xlvbd_del(struct blkfront_info *info) { unsigned int minor, nr_minors; if (info->mi == NULL) return; BUG_ON(info->gd == NULL); minor = info->gd->first_minor; nr_minors = (info->gd->flags & GENHD_FL_CD) || !(minor & ((1 << info->mi->type->partn_shift) - 1)) ? 1 << info->mi->type->partn_shift : 1; del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_release_minors(info->mi, minor & ~(nr_minors - 1), nr_minors); xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); blk_cleanup_queue(info->rq); info->rq = NULL; } void xlvbd_flush(struct blkfront_info *info) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) blk_queue_flush(info->rq, info->feature_flush); pr_info("blkfront: %s: %s: %s\n", info->gd->disk_name, info->flush_op == BLKIF_OP_WRITE_BARRIER ? "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? "flush diskcache" : "barrier or flush"), info->feature_flush ? "enabled" : "disabled"); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int err; const char *barrier; switch (info->feature_flush) { case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; case QUEUE_ORDERED_NONE: barrier = "disabled"; break; default: return -EINVAL; } err = blk_queue_ordered(info->rq, info->feature_flush); if (err) return err; pr_info("blkfront: %s: barriers %s\n", info->gd->disk_name, barrier); #else if (info->feature_flush) pr_info("blkfront: %s: barriers disabled\n", info->gd->disk_name); #endif } #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = dev_get_drvdata(&xendev->dev); if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/opensuse/vcd.c000066400000000000000000000313111314037446600303240ustar00rootroot00000000000000/******************************************************************************* * vcd.c * * Implements CDROM cmd packet passing between frontend guest and backend driver. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include "block.h" /* List of cdrom_device_info, can have as many as blkfront supports */ struct vcd_disk { struct list_head vcd_entry; struct cdrom_device_info vcd_cdrom_info; spinlock_t vcd_cdrom_info_lock; }; static LIST_HEAD(vcd_disks); static DEFINE_SPINLOCK(vcd_disks_lock); static struct vcd_disk *xencdrom_get_list_entry(struct gendisk *disk) { struct vcd_disk *ret_vcd = NULL; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { spin_lock(&vcd->vcd_cdrom_info_lock); ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd; } static void submit_message(struct blkfront_info *info, void *sp) { struct request *req = NULL; req = blk_get_request(info->rq, READ, __GFP_WAIT); if (blk_rq_map_kern(info->rq, req, sp, PAGE_SIZE, __GFP_WAIT)) goto out; req->rq_disk = info->gd; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_NOMERGE; #else req->flags |= REQ_BLOCK_PC; #endif req->__sector = 0; req->cmd_len = 0; req->timeout = 60*HZ; blk_execute_rq(req->q, info->gd, req, 1); out: blk_put_request(req); } static int submit_cdrom_cmd(struct blkfront_info *info, struct packet_command *cgc) { int ret = 0; struct page *page; union xen_block_packet *sp; struct xen_cdrom_packet *xcp; struct vcd_generic_command *vgc; if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) { pr_warn("%s() Packet buffer length is to large \n", __func__); return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcp = &(sp->xcp); xcp->type = XEN_TYPE_CDROM_PACKET; xcp->payload_offset = PACKET_PAYLOAD_OFFSET; vgc = (struct vcd_generic_command *)((char *)sp + xcp->payload_offset); memcpy(vgc->cmd, cgc->cmd, CDROM_PACKET_SIZE); vgc->stat = cgc->stat; vgc->data_direction = cgc->data_direction; vgc->quiet = cgc->quiet; vgc->timeout = cgc->timeout; if (cgc->sense) { vgc->sense_offset = PACKET_SENSE_OFFSET; memcpy((char *)sp + vgc->sense_offset, cgc->sense, sizeof(struct request_sense)); } if (cgc->buffer) { vgc->buffer_offset = PACKET_BUFFER_OFFSET; memcpy((char *)sp + vgc->buffer_offset, cgc->buffer, cgc->buflen); vgc->buflen = cgc->buflen; } submit_message(info,sp); if (xcp->ret) ret = xcp->err; if (cgc->sense) memcpy(cgc->sense, (char *)sp + PACKET_SENSE_OFFSET, sizeof(struct request_sense)); if (cgc->buffer && cgc->buflen) memcpy(cgc->buffer, (char *)sp + PACKET_BUFFER_OFFSET, cgc->buflen); __free_page(page); return ret; } static int xencdrom_open(struct cdrom_device_info *cdi, int purpose) { int ret = 0; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_open *xco; info = cdi->disk->private_data; if (!info->xbdev) return -ENODEV; if (strlen(info->xbdev->otherend) > MAX_PACKET_DATA) { return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xco = &(sp->xco); xco->type = XEN_TYPE_CDROM_OPEN; xco->payload_offset = sizeof(struct xen_cdrom_open); strcpy((char *)sp + xco->payload_offset, info->xbdev->otherend); submit_message(info,sp); if (xco->ret) { ret = xco->err; goto out; } if (xco->media_present) set_capacity(cdi->disk, xco->sectors); out: __free_page(page); return ret; } static void xencdrom_release(struct cdrom_device_info *cdi) { } static int xencdrom_media_changed(struct cdrom_device_info *cdi, int disc_nr) { int ret; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_media_changed *xcmc; info = cdi->disk->private_data; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcmc = &(sp->xcmc); xcmc->type = XEN_TYPE_CDROM_MEDIA_CHANGED; submit_message(info,sp); ret = xcmc->media_changed; __free_page(page); return ret; } static int xencdrom_tray_move(struct cdrom_device_info *cdi, int position) { struct packet_command cgc; struct blkfront_info *info; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_START_STOP_UNIT; if (position) cgc.cmd[4] = 2; else cgc.cmd[4] = 3; return submit_cdrom_cmd(info, &cgc); } static int xencdrom_lock_door(struct cdrom_device_info *cdi, int lock) { struct blkfront_info *info; struct packet_command cgc; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; cgc.cmd[4] = lock; return submit_cdrom_cmd(info, &cgc); } static int xencdrom_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { return cgc->stat = submit_cdrom_cmd(cdi->disk->private_data, cgc); } static int xencdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { return -EINVAL; } /* Query backend to see if CDROM packets are supported */ static int xencdrom_supported(struct blkfront_info *info) { struct page *page; union xen_block_packet *sp; struct xen_cdrom_support *xcs; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcs = &(sp->xcs); xcs->type = XEN_TYPE_CDROM_SUPPORT; submit_message(info,sp); return xcs->supported; } static struct cdrom_device_ops xencdrom_dops = { .open = xencdrom_open, .release = xencdrom_release, .media_changed = xencdrom_media_changed, .tray_move = xencdrom_tray_move, .lock_door = xencdrom_lock_door, .generic_packet = xencdrom_packet, .audio_ioctl = xencdrom_audio_ioctl, .capability = (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | \ CDC_MEDIA_CHANGED | CDC_GENERIC_PACKET | CDC_DVD | \ CDC_CD_R), .n_minors = 1, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_open(struct inode *inode, struct file *file) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!info->xbdev) return -ENODEV; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_open(&vcd->vcd_cdrom_info, inode, file); #else ret = cdrom_open(&vcd->vcd_cdrom_info, bd, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_release(struct inode *inode, struct file *file) { struct gendisk *gd = inode->i_bdev->bd_disk; #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) static int xencdrom_block_release(struct gendisk *gd, fmode_t mode) { #else static void xencdrom_block_release(struct gendisk *gd, fmode_t mode) { #endif struct blkfront_info *info = gd->private_data; struct vcd_disk *vcd; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int ret = 0; #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) # define ret 0 #else # define ret #endif if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_release(&vcd->vcd_cdrom_info, file); #else cdrom_release(&vcd->vcd_cdrom_info, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); if (vcd->vcd_cdrom_info.use_count == 0) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) blkif_release(inode, file); #else blkif_release(gd, mode); #endif } } return ret; #undef ret } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_ioctl(struct block_device *bd, fmode_t mode, unsigned cmd, unsigned long arg) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!(vcd = xencdrom_get_list_entry(info->gd))) goto out; switch (cmd) { case 2285: /* SG_IO */ ret = -ENOSYS; break; case CDROMEJECT: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 1); break; case CDROMCLOSETRAY: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 0); break; case CDROM_GET_CAPABILITY: ret = vcd->vcd_cdrom_info.ops->capability & ~vcd->vcd_cdrom_info.mask; break; case CDROM_SET_OPTIONS: ret = vcd->vcd_cdrom_info.options; break; case CDROM_SEND_PACKET: { struct packet_command cgc; ret = copy_from_user(&cgc, (void __user *)arg, sizeof(cgc)) ? -EFAULT : submit_cdrom_cmd(info, &cgc); break; } default: spin_unlock(&vcd->vcd_cdrom_info_lock); out: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return blkif_ioctl(inode, file, cmd, arg); #else return blkif_ioctl(bd, mode, cmd, arg); #endif } spin_unlock(&vcd->vcd_cdrom_info_lock); return ret; } /* Called as result of cdrom_open, vcd_cdrom_info_lock already held */ static int xencdrom_block_media_changed(struct gendisk *disk) { struct vcd_disk *vcd; struct vcd_disk *ret_vcd = NULL; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd ? cdrom_media_changed(&ret_vcd->vcd_cdrom_info) : 0; } static const struct block_device_operations xencdrom_bdops = { .owner = THIS_MODULE, .open = xencdrom_block_open, .release = xencdrom_block_release, .ioctl = xencdrom_block_ioctl, .media_changed = xencdrom_block_media_changed, }; void register_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; /* Make sure this is for a CD device */ if (!(gd->flags & GENHD_FL_CD)) goto out; /* Make sure we have backend support */ if (!xencdrom_supported(info)) goto out; /* Create new vcd_disk and fill in cdrom_info */ vcd = kzalloc(sizeof(*vcd), GFP_KERNEL); if (!vcd) { pr_info("%s(): Unable to allocate vcd struct!\n", __func__); goto out; } spin_lock_init(&vcd->vcd_cdrom_info_lock); vcd->vcd_cdrom_info.ops = &xencdrom_dops; vcd->vcd_cdrom_info.speed = 4; vcd->vcd_cdrom_info.capacity = 1; vcd->vcd_cdrom_info.options = 0; strlcpy(vcd->vcd_cdrom_info.name, gd->disk_name, ARRAY_SIZE(vcd->vcd_cdrom_info.name)); vcd->vcd_cdrom_info.mask = (CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_SELECT_SPEED | CDC_MRW | CDC_MRW_W | CDC_RAM); if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) { pr_warn("%s() Cannot register blkdev as a cdrom %d!\n", __func__, gd->major); goto err_out; } gd->fops = &xencdrom_bdops; vcd->vcd_cdrom_info.disk = gd; spin_lock(&vcd_disks_lock); list_add(&(vcd->vcd_entry), &vcd_disks); spin_unlock(&vcd_disks_lock); out: return; err_out: kfree(vcd); } void unregister_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == gd) { spin_lock(&vcd->vcd_cdrom_info_lock); unregister_cdrom(&vcd->vcd_cdrom_info); list_del(&vcd->vcd_entry); spin_unlock(&vcd->vcd_cdrom_info_lock); kfree(vcd); break; } } spin_unlock(&vcd_disks_lock); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/vbd.c000066400000000000000000000377541314037446600265030ustar00rootroot00000000000000/****************************************************************************** * vbd.c * * XenLinux virtual block-device driver (xvd). * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004-2005, Christian Limpach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "block.h" #include #include #include #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) #define XENVBD_MAJOR 202 #endif #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_SHIFT 28 #define EXTENDED (1<= KERNEL_VERSION(2,6,16) .getgeo = blkif_getgeo #endif }; static struct xlbd_major_info * xlbd_alloc_major_info(int major, int minor, int index) { struct xlbd_major_info *ptr; struct xlbd_minor_state *minors; int do_register; ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL); if (ptr == NULL) return NULL; ptr->major = major; minors = kmalloc(sizeof(*minors), GFP_KERNEL); if (minors == NULL) { kfree(ptr); return NULL; } minors->bitmap = kzalloc(BITS_TO_LONGS(256) * sizeof(*minors->bitmap), GFP_KERNEL); if (minors->bitmap == NULL) { kfree(minors); kfree(ptr); return NULL; } spin_lock_init(&minors->lock); minors->nr = 256; do_register = 1; switch (index) { case XLBD_MAJOR_IDE_RANGE: ptr->type = &xlbd_ide_type; ptr->index = index - XLBD_MAJOR_IDE_START; break; case XLBD_MAJOR_SD_RANGE: ptr->type = &xlbd_sd_type; ptr->index = index - XLBD_MAJOR_SD_START; break; case XLBD_MAJOR_SR_RANGE: ptr->type = &xlbd_sr_type; ptr->index = index - XLBD_MAJOR_SR_START; break; case XLBD_MAJOR_VBD_RANGE: ptr->index = 0; if ((index - XLBD_MAJOR_VBD_START) == 0) ptr->type = &xlbd_vbd_type; else ptr->type = &xlbd_vbd_type_ext; /* * if someone already registered block major XENVBD_MAJOR, * don't try to register it again */ if (major_info[XLBD_MAJOR_VBD_ALT(index)] != NULL) { kfree(minors->bitmap); kfree(minors); minors = major_info[XLBD_MAJOR_VBD_ALT(index)]->minors; do_register = 0; } break; } if (do_register) { if (register_blkdev(ptr->major, ptr->type->devname)) { kfree(minors->bitmap); kfree(minors); kfree(ptr); return NULL; } pr_info("xen-vbd: registered block device major %i\n", ptr->major); } ptr->minors = minors; major_info[index] = ptr; return ptr; } static struct xlbd_major_info * xlbd_get_major_info(int major, int minor, int vdevice) { struct xlbd_major_info *mi; int index; switch (major) { case IDE0_MAJOR: index = 0; break; case IDE1_MAJOR: index = 1; break; case IDE2_MAJOR: index = 2; break; case IDE3_MAJOR: index = 3; break; case IDE4_MAJOR: index = 4; break; case IDE5_MAJOR: index = 5; break; case IDE6_MAJOR: index = 6; break; case IDE7_MAJOR: index = 7; break; case IDE8_MAJOR: index = 8; break; case IDE9_MAJOR: index = 9; break; case SCSI_DISK0_MAJOR: index = XLBD_MAJOR_SD_START; break; case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: index = XLBD_MAJOR_SD_START + 1 + major - SCSI_DISK1_MAJOR; break; case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: index = XLBD_MAJOR_SD_START + 8 + major - SCSI_DISK8_MAJOR; break; case SCSI_CDROM_MAJOR: index = XLBD_MAJOR_SR_START; break; case XENVBD_MAJOR: index = XLBD_MAJOR_VBD_START + !!VDEV_IS_EXTENDED(vdevice); break; default: return NULL; } mi = ((major_info[index] != NULL) ? major_info[index] : xlbd_alloc_major_info(major, minor, index)); if (mi) mi->usage++; return mi; } static void xlbd_put_major_info(struct xlbd_major_info *mi) { mi->usage--; /* XXX: release major if 0 */ } void __exit xlbd_release_major_info(void) { unsigned int i; int vbd_done = 0; for (i = 0; i < ARRAY_SIZE(major_info); ++i) { struct xlbd_major_info *mi = major_info[i]; if (!mi) continue; if (mi->usage) pr_warning("vbd: major %u still in use (%u times)\n", mi->major, mi->usage); if (mi->major != XENVBD_MAJOR || !vbd_done) { unregister_blkdev(mi->major, mi->type->devname); kfree(mi->minors->bitmap); kfree(mi->minors); } if (mi->major == XENVBD_MAJOR) vbd_done = 1; kfree(mi); } } static int xlbd_reserve_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; unsigned int end = minor + nr_minors; int rc; if (end > ms->nr) { unsigned long *bitmap, *old; bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap), GFP_KERNEL); if (bitmap == NULL) return -ENOMEM; spin_lock(&ms->lock); if (end > ms->nr) { old = ms->bitmap; memcpy(bitmap, ms->bitmap, BITS_TO_LONGS(ms->nr) * sizeof(*bitmap)); ms->bitmap = bitmap; ms->nr = BITS_TO_LONGS(end) * BITS_PER_LONG; } else old = bitmap; spin_unlock(&ms->lock); kfree(old); } spin_lock(&ms->lock); if (find_next_bit(ms->bitmap, end, minor) >= end) { bitmap_set(ms->bitmap, minor, nr_minors); rc = 0; } else rc = -EBUSY; spin_unlock(&ms->lock); return rc; } static void xlbd_release_minors(struct xlbd_major_info *mi, unsigned int minor, unsigned int nr_minors) { struct xlbd_minor_state *ms = mi->minors; BUG_ON(minor + nr_minors > ms->nr); spin_lock(&ms->lock); bitmap_clear(ms->bitmap, minor, nr_minors); spin_unlock(&ms->lock); } static char *encode_disk_name(char *ptr, unsigned int n) { if (n >= 26) ptr = encode_disk_name(ptr, n / 26 - 1); *ptr = 'a' + n % 26; return ptr + 1; } static int xlvbd_init_blk_queue(struct gendisk *gd, unsigned int sector_size, unsigned int physical_sector_size, struct blkfront_info *info) { struct request_queue *rq; rq = blk_init_queue(do_blkif_request, &info->io_lock); if (rq == NULL) return -1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); #endif if (info->feature_discard) { queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); blk_queue_max_discard_sectors(rq, get_capacity(gd)); rq->limits.discard_granularity = info->discard_granularity; rq->limits.discard_alignment = info->discard_alignment; if (info->feature_secdiscard) queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq); } /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_logical_block_size(rq, sector_size); blk_queue_physical_block_size(rq, physical_sector_size); blk_queue_max_hw_sectors(rq, 512); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ blk_queue_max_segments(rq, info->max_segs_per_req); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; info->rq = rq; return 0; } static inline bool xlvbd_ide_unplugged(int major) { #ifndef CONFIG_XEN /* * For HVM guests: * - pv driver required if booted with xen_emul_unplug=ide-disks|all * - native driver required with xen_emul_unplug=nics|never */ if (xen_pvonhvm_unplugged_disks) return true; switch (major) { case IDE0_MAJOR: case IDE1_MAJOR: case IDE2_MAJOR: case IDE3_MAJOR: case IDE4_MAJOR: case IDE5_MAJOR: case IDE6_MAJOR: case IDE7_MAJOR: case IDE8_MAJOR: case IDE9_MAJOR: return false; default: break; } #endif return true; } int xlvbd_add(blkif_sector_t capacity, int vdevice, unsigned int vdisk_info, unsigned int sector_size, unsigned int physical_sector_size, struct blkfront_info *info) { int major, minor; struct gendisk *gd; struct xlbd_major_info *mi; int nr_minors = 1; int err = -ENODEV; char *ptr; unsigned int offset; if ((vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ pr_warning("blkfront: vdevice %#x is above the extended range;" " ignoring\n", vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(vdevice)) { major = BLKIF_MAJOR(vdevice); minor = BLKIF_MINOR(vdevice); } else { major = XENVBD_MAJOR; minor = BLKIF_MINOR_EXT(vdevice); if (minor >> MINORBITS) { pr_warning("blkfront: %#x's minor (%#x) out of range;" " ignoring\n", vdevice, minor); return -ENODEV; } } if (!xlvbd_ide_unplugged(major)) { pr_warning("blkfront: skipping IDE major on %x, native driver required\n", vdevice); return -ENODEV; } BUG_ON(info->gd != NULL); BUG_ON(info->mi != NULL); BUG_ON(info->rq != NULL); mi = xlbd_get_major_info(major, minor, vdevice); if (mi == NULL) goto out; info->mi = mi; if ((vdisk_info & VDISK_CDROM) || !(minor & ((1 << mi->type->partn_shift) - 1))) nr_minors = 1 << mi->type->partn_shift; err = xlbd_reserve_minors(mi, minor & ~(nr_minors - 1), nr_minors); if (err) goto out; err = -ENODEV; gd = alloc_disk(vdisk_info & VDISK_CDROM ? 1 : nr_minors); if (gd == NULL) goto release; strcpy(gd->disk_name, mi->type->diskname); ptr = gd->disk_name + strlen(mi->type->diskname); offset = mi->index * mi->type->disks_per_major + (minor >> mi->type->partn_shift); if (mi->type->partn_shift) { ptr = encode_disk_name(ptr, offset); offset = minor & ((1 << mi->type->partn_shift) - 1); } else gd->flags |= GENHD_FL_CD; BUG_ON(ptr >= gd->disk_name + ARRAY_SIZE(gd->disk_name)); if (nr_minors > 1) *ptr = 0; else snprintf(ptr, gd->disk_name + ARRAY_SIZE(gd->disk_name) - ptr, "%u", offset); gd->major = mi->major; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size, info)) { del_gendisk(gd); goto release; } info->gd = gd; xlvbd_flush(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; release: xlbd_release_minors(mi, minor, nr_minors); out: if (mi) xlbd_put_major_info(mi); info->mi = NULL; return err; } void xlvbd_del(struct blkfront_info *info) { unsigned int minor, nr_minors; if (info->mi == NULL) return; BUG_ON(info->gd == NULL); minor = info->gd->first_minor; nr_minors = (info->gd->flags & GENHD_FL_CD) || !(minor & ((1 << info->mi->type->partn_shift) - 1)) ? 1 << info->mi->type->partn_shift : 1; del_gendisk(info->gd); put_disk(info->gd); info->gd = NULL; xlbd_release_minors(info->mi, minor & ~(nr_minors - 1), nr_minors); xlbd_put_major_info(info->mi); info->mi = NULL; BUG_ON(info->rq == NULL); blk_cleanup_queue(info->rq); info->rq = NULL; } void xlvbd_flush(struct blkfront_info *info) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) blk_queue_flush(info->rq, info->feature_flush); pr_info("blkfront: %s: %s: %s\n", info->gd->disk_name, info->flush_op == BLKIF_OP_WRITE_BARRIER ? "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? "flush diskcache" : "barrier or flush"), info->feature_flush ? "enabled" : "disabled"); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) int err; const char *barrier; switch (info->feature_flush) { case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; case QUEUE_ORDERED_NONE: barrier = "disabled"; break; default: return -EINVAL; } err = blk_queue_ordered(info->rq, info->feature_flush); if (err) return err; pr_info("blkfront: %s: barriers %s\n", info->gd->disk_name, barrier); #else if (info->feature_flush) pr_info("blkfront: %s: barriers disabled\n", info->gd->disk_name); #endif } #ifdef CONFIG_SYSFS static ssize_t show_media(struct device *dev, struct device_attribute *attr, char *buf) { struct xenbus_device *xendev = to_xenbus_device(dev); struct blkfront_info *info = dev_get_drvdata(&xendev->dev); if (info->gd->flags & GENHD_FL_CD) return sprintf(buf, "cdrom\n"); return sprintf(buf, "disk\n"); } static struct device_attribute xlvbd_attrs[] = { __ATTR(media, S_IRUGO, show_media, NULL), }; int xlvbd_sysfs_addif(struct blkfront_info *info) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) { error = device_create_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); return error; } void xlvbd_sysfs_delif(struct blkfront_info *info) { int i; for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]); } #endif /* CONFIG_SYSFS */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vbd/vcd.c000066400000000000000000000313111314037446600264630ustar00rootroot00000000000000/******************************************************************************* * vcd.c * * Implements CDROM cmd packet passing between frontend guest and backend driver. * * Copyright (c) 2008, Pat Campell plc@novell.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include "block.h" /* List of cdrom_device_info, can have as many as blkfront supports */ struct vcd_disk { struct list_head vcd_entry; struct cdrom_device_info vcd_cdrom_info; spinlock_t vcd_cdrom_info_lock; }; static LIST_HEAD(vcd_disks); static DEFINE_SPINLOCK(vcd_disks_lock); static struct vcd_disk *xencdrom_get_list_entry(struct gendisk *disk) { struct vcd_disk *ret_vcd = NULL; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { spin_lock(&vcd->vcd_cdrom_info_lock); ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd; } static void submit_message(struct blkfront_info *info, void *sp) { struct request *req = NULL; req = blk_get_request(info->rq, READ, __GFP_WAIT); if (blk_rq_map_kern(info->rq, req, sp, PAGE_SIZE, __GFP_WAIT)) goto out; req->rq_disk = info->gd; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_NOMERGE; #else req->flags |= REQ_BLOCK_PC; #endif req->__sector = 0; req->cmd_len = 0; req->timeout = 60*HZ; blk_execute_rq(req->q, info->gd, req, 1); out: blk_put_request(req); } static int submit_cdrom_cmd(struct blkfront_info *info, struct packet_command *cgc) { int ret = 0; struct page *page; union xen_block_packet *sp; struct xen_cdrom_packet *xcp; struct vcd_generic_command *vgc; if (cgc->buffer && cgc->buflen > MAX_PACKET_DATA) { pr_warn("%s() Packet buffer length is to large \n", __func__); return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcp = &(sp->xcp); xcp->type = XEN_TYPE_CDROM_PACKET; xcp->payload_offset = PACKET_PAYLOAD_OFFSET; vgc = (struct vcd_generic_command *)((char *)sp + xcp->payload_offset); memcpy(vgc->cmd, cgc->cmd, CDROM_PACKET_SIZE); vgc->stat = cgc->stat; vgc->data_direction = cgc->data_direction; vgc->quiet = cgc->quiet; vgc->timeout = cgc->timeout; if (cgc->sense) { vgc->sense_offset = PACKET_SENSE_OFFSET; memcpy((char *)sp + vgc->sense_offset, cgc->sense, sizeof(struct request_sense)); } if (cgc->buffer) { vgc->buffer_offset = PACKET_BUFFER_OFFSET; memcpy((char *)sp + vgc->buffer_offset, cgc->buffer, cgc->buflen); vgc->buflen = cgc->buflen; } submit_message(info,sp); if (xcp->ret) ret = xcp->err; if (cgc->sense) memcpy(cgc->sense, (char *)sp + PACKET_SENSE_OFFSET, sizeof(struct request_sense)); if (cgc->buffer && cgc->buflen) memcpy(cgc->buffer, (char *)sp + PACKET_BUFFER_OFFSET, cgc->buflen); __free_page(page); return ret; } static int xencdrom_open(struct cdrom_device_info *cdi, int purpose) { int ret = 0; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_open *xco; info = cdi->disk->private_data; if (!info->xbdev) return -ENODEV; if (strlen(info->xbdev->otherend) > MAX_PACKET_DATA) { return -EIO; } page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xco = &(sp->xco); xco->type = XEN_TYPE_CDROM_OPEN; xco->payload_offset = sizeof(struct xen_cdrom_open); strcpy((char *)sp + xco->payload_offset, info->xbdev->otherend); submit_message(info,sp); if (xco->ret) { ret = xco->err; goto out; } if (xco->media_present) set_capacity(cdi->disk, xco->sectors); out: __free_page(page); return ret; } static void xencdrom_release(struct cdrom_device_info *cdi) { } static int xencdrom_media_changed(struct cdrom_device_info *cdi, int disc_nr) { int ret; struct page *page; struct blkfront_info *info; union xen_block_packet *sp; struct xen_cdrom_media_changed *xcmc; info = cdi->disk->private_data; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcmc = &(sp->xcmc); xcmc->type = XEN_TYPE_CDROM_MEDIA_CHANGED; submit_message(info,sp); ret = xcmc->media_changed; __free_page(page); return ret; } static int xencdrom_tray_move(struct cdrom_device_info *cdi, int position) { struct packet_command cgc; struct blkfront_info *info; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_START_STOP_UNIT; if (position) cgc.cmd[4] = 2; else cgc.cmd[4] = 3; return submit_cdrom_cmd(info, &cgc); } static int xencdrom_lock_door(struct cdrom_device_info *cdi, int lock) { struct blkfront_info *info; struct packet_command cgc; info = cdi->disk->private_data; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; cgc.cmd[4] = lock; return submit_cdrom_cmd(info, &cgc); } static int xencdrom_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { return cgc->stat = submit_cdrom_cmd(cdi->disk->private_data, cgc); } static int xencdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) { return -EINVAL; } /* Query backend to see if CDROM packets are supported */ static int xencdrom_supported(struct blkfront_info *info) { struct page *page; union xen_block_packet *sp; struct xen_cdrom_support *xcs; page = alloc_page(GFP_NOIO|__GFP_ZERO); if (!page) { pr_crit("%s() Unable to allocate page\n", __func__); return -ENOMEM; } sp = page_address(page); xcs = &(sp->xcs); xcs->type = XEN_TYPE_CDROM_SUPPORT; submit_message(info,sp); return xcs->supported; } static struct cdrom_device_ops xencdrom_dops = { .open = xencdrom_open, .release = xencdrom_release, .media_changed = xencdrom_media_changed, .tray_move = xencdrom_tray_move, .lock_door = xencdrom_lock_door, .generic_packet = xencdrom_packet, .audio_ioctl = xencdrom_audio_ioctl, .capability = (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | \ CDC_MEDIA_CHANGED | CDC_GENERIC_PACKET | CDC_DVD | \ CDC_CD_R), .n_minors = 1, }; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_open(struct inode *inode, struct file *file) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_open(struct block_device *bd, fmode_t mode) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!info->xbdev) return -ENODEV; if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_open(&vcd->vcd_cdrom_info, inode, file); #else ret = cdrom_open(&vcd->vcd_cdrom_info, bd, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); } return ret; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_release(struct inode *inode, struct file *file) { struct gendisk *gd = inode->i_bdev->bd_disk; #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) static int xencdrom_block_release(struct gendisk *gd, fmode_t mode) { #else static void xencdrom_block_release(struct gendisk *gd, fmode_t mode) { #endif struct blkfront_info *info = gd->private_data; struct vcd_disk *vcd; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int ret = 0; #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) # define ret 0 #else # define ret #endif if ((vcd = xencdrom_get_list_entry(info->gd))) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ret = cdrom_release(&vcd->vcd_cdrom_info, file); #else cdrom_release(&vcd->vcd_cdrom_info, mode); #endif spin_unlock(&vcd->vcd_cdrom_info_lock); if (vcd->vcd_cdrom_info.use_count == 0) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) blkif_release(inode, file); #else blkif_release(gd, mode); #endif } } return ret; #undef ret } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static int xencdrom_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { struct block_device *bd = inode->i_bdev; #else static int xencdrom_block_ioctl(struct block_device *bd, fmode_t mode, unsigned cmd, unsigned long arg) { #endif struct blkfront_info *info = bd->bd_disk->private_data; struct vcd_disk *vcd; int ret = 0; if (!(vcd = xencdrom_get_list_entry(info->gd))) goto out; switch (cmd) { case 2285: /* SG_IO */ ret = -ENOSYS; break; case CDROMEJECT: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 1); break; case CDROMCLOSETRAY: ret = xencdrom_tray_move(&vcd->vcd_cdrom_info, 0); break; case CDROM_GET_CAPABILITY: ret = vcd->vcd_cdrom_info.ops->capability & ~vcd->vcd_cdrom_info.mask; break; case CDROM_SET_OPTIONS: ret = vcd->vcd_cdrom_info.options; break; case CDROM_SEND_PACKET: { struct packet_command cgc; ret = copy_from_user(&cgc, (void __user *)arg, sizeof(cgc)) ? -EFAULT : submit_cdrom_cmd(info, &cgc); break; } default: spin_unlock(&vcd->vcd_cdrom_info_lock); out: #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) return blkif_ioctl(inode, file, cmd, arg); #else return blkif_ioctl(bd, mode, cmd, arg); #endif } spin_unlock(&vcd->vcd_cdrom_info_lock); return ret; } /* Called as result of cdrom_open, vcd_cdrom_info_lock already held */ static int xencdrom_block_media_changed(struct gendisk *disk) { struct vcd_disk *vcd; struct vcd_disk *ret_vcd = NULL; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == disk) { ret_vcd = vcd; break; } } spin_unlock(&vcd_disks_lock); return ret_vcd ? cdrom_media_changed(&ret_vcd->vcd_cdrom_info) : 0; } static const struct block_device_operations xencdrom_bdops = { .owner = THIS_MODULE, .open = xencdrom_block_open, .release = xencdrom_block_release, .ioctl = xencdrom_block_ioctl, .media_changed = xencdrom_block_media_changed, }; void register_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; /* Make sure this is for a CD device */ if (!(gd->flags & GENHD_FL_CD)) goto out; /* Make sure we have backend support */ if (!xencdrom_supported(info)) goto out; /* Create new vcd_disk and fill in cdrom_info */ vcd = kzalloc(sizeof(*vcd), GFP_KERNEL); if (!vcd) { pr_info("%s(): Unable to allocate vcd struct!\n", __func__); goto out; } spin_lock_init(&vcd->vcd_cdrom_info_lock); vcd->vcd_cdrom_info.ops = &xencdrom_dops; vcd->vcd_cdrom_info.speed = 4; vcd->vcd_cdrom_info.capacity = 1; vcd->vcd_cdrom_info.options = 0; strlcpy(vcd->vcd_cdrom_info.name, gd->disk_name, ARRAY_SIZE(vcd->vcd_cdrom_info.name)); vcd->vcd_cdrom_info.mask = (CDC_CD_RW | CDC_DVD_R | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_SELECT_SPEED | CDC_MRW | CDC_MRW_W | CDC_RAM); if (register_cdrom(&(vcd->vcd_cdrom_info)) != 0) { pr_warn("%s() Cannot register blkdev as a cdrom %d!\n", __func__, gd->major); goto err_out; } gd->fops = &xencdrom_bdops; vcd->vcd_cdrom_info.disk = gd; spin_lock(&vcd_disks_lock); list_add(&(vcd->vcd_entry), &vcd_disks); spin_unlock(&vcd_disks_lock); out: return; err_out: kfree(vcd); } void unregister_vcd(struct blkfront_info *info) { struct gendisk *gd = info->gd; struct vcd_disk *vcd; spin_lock(&vcd_disks_lock); list_for_each_entry(vcd, &vcd_disks, vcd_entry) { if (vcd->vcd_cdrom_info.disk == gd) { spin_lock(&vcd->vcd_cdrom_info_lock); unregister_cdrom(&vcd->vcd_cdrom_info); list_del(&vcd->vcd_entry); spin_unlock(&vcd->vcd_cdrom_info_lock); kfree(vcd); break; } } spin_unlock(&vcd_disks_lock); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/000077500000000000000000000000001314037446600257335ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/3.12.28/000077500000000000000000000000001314037446600265465ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/3.12.28/accel.c000066400000000000000000000533571314037446600277760ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); static void netfront_accelerator_remove_watch(struct netfront_info *np); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Workqueue to process acceleration configuration changes */ struct workqueue_struct *accel_watch_workqueue; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); accel_watch_workqueue = create_workqueue("net_accel"); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; flush_workqueue(accel_watch_workqueue); destroy_workqueue(accel_watch_workqueue); /* No lock required as everything else should be quiet by now */ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); queue_work(accel_watch_workqueue, &vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* * If old watch exists, e.g. from before suspend/resume, * remove it now */ netfront_accelerator_remove_watch(np); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_purge_watch(struct netfront_accel_vif_state *vif_state) { flush_workqueue(accel_watch_workqueue); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; netfront_accelerator_purge_watch(vif_state); } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. Must be called with the accelerator * mutex held. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } } /* * Initialise the state to track an accelerator plugin module. * * Must be called with the accelerator mutex held. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; list_add(&accelerator->link, &accelerators_list); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { struct netfront_accelerator *accelerator; unsigned long flags; DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); accelerator = vif_state->np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); vif_state->hooks = accelerator->hooks; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks && hooks->new_device(np->netdev, dev) == 0) accelerator_set_vif_state_hooks(&np->accel_vif_state); return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* Holds accelerator_mutex to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if (hooks->new_device(np->netdev, vif_state->dev) == 0) accelerator_set_vif_state_hooks(vif_state); } } /* * Called by the netfront accelerator plugin module when it has * loaded. * * Takes the accelerator_mutex and vif_states_lock spinlock. */ int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded); /* * Remove the hooks from a single vif state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { unsigned long flags; /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Safely remove the accelerator function hooks from a netfront state. * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { if(vif_state->hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ vif_state->hooks->get_stats(vif_state->np->netdev, &vif_state->np->netdev->stats, this_cpu_ptr(vif_state->np->stats)); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. * * Takes the accelerator mutex, and vif_states_lock spinlock. */ void netfront_accelerator_stop(const char *frontend) { struct netfront_accelerator *accelerator; mutex_lock(&accelerator_mutex); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_remove_hooks(accelerator); goto out; } } out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop); /* * Helper for call_remove and do_suspend * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator = np->accelerator; unsigned long flags; int rc = 0; if (np->accel_vif_state.hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ np->accel_vif_state.hooks->get_stats(np->netdev, &np->netdev->stats, this_cpu_ptr(np->stats)); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); } return rc; } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock */ static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev); np->accelerator = NULL; return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { int rc = 0; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ rc = do_remove(np, dev); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { netfront_accelerator_purge_watch(&np->accel_vif_state); /* * Gratuitously fire the watch handler to reinstate the * configured accelerator */ if (dev->state == XenbusStateConnected) queue_work(accel_watch_workqueue, &np->accel_vif_state.accel_work); return 0; } /* * No lock pre-requisites. Takes the accelerator mutex */ void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; break; } } out: mutex_unlock(&accelerator_mutex); return; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &dev->stats, this_cpu_ptr(np->stats)); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/3.12.28/netfront.c000066400000000000000000001751061314037446600305630ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef OPENSUSE_1302 #include #endif #include #include #include #include #include #include #include struct netfront_cb { unsigned int pull_to; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* not support balloon for openSUSE 13.2*/ #ifdef OPENSUSE_1302 void balloon_update_driver_allowance(long delta) { /* nothing */ } void balloon_release_driver_page(struct page *page) { /* nothing */ } #endif /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN static bool MODPARM_rx_copy; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static bool MODPARM_rx_flip; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else # define MODPARM_rx_copy true # define MODPARM_rx_flip false #endif #define RX_COPY_THRESHOLD 256 #define DEFAULT_DEBUG_LEVEL_SHIFT 3 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define NO_CSUM_OFFLOAD 0 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= ~NETIF_F_GSO_MASK; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define NO_CSUM_OFFLOAD 1 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } #define netif_skb_features(skb) ((skb)->dev->features) static inline int netif_needs_gso(struct sk_buff *skb, int features) { return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define NO_CSUM_OFFLOAD 1 #define netif_needs_gso(skb, feat) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_release_rings(struct netfront_info *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static irqreturn_t netif_int(int irq, void *dev_id); static void send_fake_arp(struct net_device *, int arpType); static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline bool xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of nic8139 */ #include static void remove_nic8139(void) { struct pci_dev *pci_dev_nic8139 = NULL; pci_dev_nic8139 = pci_get_device(0x10ec, 0x8139, pci_dev_nic8139); if (pci_dev_nic8139) { printk(KERN_INFO "remove_nic8139 : 8139 NIC of subsystem(0x%2x,0x%2x) removed.\n", pci_dev_nic8139->subsystem_vendor, pci_dev_nic8139->subsystem_device); pci_disable_device(pci_dev_nic8139); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /* check kernel version */ pci_remove_bus_device(pci_dev_nic8139); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pci_dev_nic8139); pci_remove_bus_device(pci_dev_nic8139); #else pci_stop_and_remove_bus_device(pci_dev_nic8139); #endif /* end check kernel version */ } return; } /* * Work around net.ipv4.conf.*.arp_notify not being enabled by default. */ static void netfront_enable_arp_notify(struct netfront_info *info) { #ifdef CONFIG_INET struct in_device *in_dev; rtnl_lock(); in_dev = __in_dev_get_rtnl(info->netdev); if (in_dev && !IN_DEV_CONF_GET(in_dev, ARP_NOTIFY)) IN_DEV_CONF_SET(in_dev, ARP_NOTIFY, 1); rtnl_unlock(); if (!in_dev) pr_warn("Cannot enable ARP notification on %s\n", info->xbdev->nodename); #endif } static bool netfront_nic_unplugged(struct xenbus_device *dev) { bool ret = true; #ifndef CONFIG_XEN char *typestr; /* * For HVM guests: * - pv driver required if booted with xen_emul_unplug=nics|all * - native driver required with xen_emul_unplug=ide-disks|never */ /* Use pv driver if emulated hardware was unplugged */ if (xen_pvonhvm_unplugged_nics) return true; typestr = xenbus_read(XBT_NIL, dev->otherend, "type", NULL); /* Assume emulated+pv interface (ioemu+vif) when type property is missing. */ if (IS_ERR(typestr)) return false; /* If the interface is emulated and not unplugged, skip it. */ if (strcmp(typestr, "vif_ioemu") == 0 || strcmp(typestr, "ioemu") == 0) ret = false; kfree(typestr); #endif return ret; } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; /* remove 8139 nic when xen nic devices appear */ remove_nic8139(); if (!netfront_nic_unplugged(dev)) { pr_warning("netfront: skipping emulated interface, native driver required\n"); return -ENODEV; } netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { pr_warning("%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } netfront_enable_arp_notify(info); err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); pr_warning("%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } static int netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_percpu(info->stats); free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(info->mac)) { err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-rx-notify", "1"); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-no-csum-offload", __stringify(NO_CSUM_OFFLOAD)); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } #ifdef OPENSUSE_1302 #ifdef NETIF_F_IPV6_CSUM err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", "1"); if (err) { message = "writing feature-ipv6-csum-offload"; goto abort_transaction; } #else #define NETIF_F_IPV6_CSUM 0 #endif #endif err = xenbus_write(xbt, dev->nodename, "feature-sg", "1"); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv4", __stringify(HAVE_TSO)); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } #ifdef OPENSUSE_1302 #ifdef NETIF_F_TSO6 err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); if (err) { message = "writing feature-gso-tcpv6"; goto abort_transaction; } #else #define NETIF_F_TSO6 0 #endif #endif err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, 0, netdev->name, netdev); if (err < 0) goto fail; info->irq = err; return 0; fail: netif_release_rings(info); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); netdev_notify_peers(netdev); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state -- fallthrough */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @return 0 on success, error code otherwise */ /* Added by luwei KF29836, DTS2011041502151 */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; //Modified by luwei KF29836, DTS2011041502151 //creat GARP if ( ARPOP_REQUEST == arpType ) { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); } else { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); } if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == XEN_NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { pr_alert("network_tx_buf_gc: grant still" " in use by backend domain\n"); BUG(); } gnttab_end_foreign_access_ref(np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate enough skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; page = skb_frag_page(skb_shinfo(skb)->frags); pfn = page_to_pfn(page); vaddr = page_address(page); req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if (nr_flips) { struct xen_memory_reservation reservation = { .nr_extents = nr_flips, .domid = DOMID_SELF, }; /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ MULTI_memory_op(np->rx_mcl + i, XENMEM_decrease_reservation, &reservation); /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (nr_flips--) BUG_ON(np->rx_mcl[nr_flips].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= XEN_NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; struct page *page = skb_frag_page(frag); len = skb_frag_size(frag); offset = frag->page_offset; /* Data must not cross a page boundary. */ BUG_ON(offset + len > (PAGE_SIZE << compound_order(page))); /* Skip unused frames from start of page */ page += PFN_DOWN(offset); for (offset &= ~PAGE_MASK; len; page++, offset = 0) { unsigned int bytes = PAGE_SIZE - offset; if (bytes > len) bytes = len; tx->flags |= XEN_NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = bytes; tx->flags = 0; len -= bytes; } } np->tx.req_prod_pvt = prod; } /* * Count how many ring slots are required to send the frags of this * skb. Each frag might be a compound page. */ static unsigned int xennet_count_skb_frag_slots(const struct sk_buff *skb) { unsigned int i, pages, frags = skb_shinfo(skb)->nr_frags; for (pages = i = 0; i < frags; i++) { const skb_frag_t *frag = skb_shinfo(skb)->frags + i; unsigned int len = skb_frag_size(frag); if (!len) continue; pages += PFN_UP((frag->page_offset & ~PAGE_MASK) + len); } return pages; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netfront_stats *stats = this_cpu_ptr(np->stats); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn, flags; int notify; unsigned int offset = offset_in_page(data); unsigned int slots, len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return NETDEV_TX_OK; } /* * If skb->len is too big for wire format, drop skb and alert * user about misconfiguration. */ if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { net_alert_ratelimited("xennet: length %u too big for interface\n", skb->len); goto drop; } slots = PFN_UP(offset + len) + xennet_count_skb_frag_slots(skb); if (unlikely(slots > MAX_SKB_FRAGS + 1)) { net_alert_ratelimited("xennet: skb rides the rocket: %u slots\n", slots); goto drop; } spin_lock_irqsave(&np->tx_lock, flags); if (unlikely(!netfront_carrier_ok(np) || (slots > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&np->tx_lock, flags); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) tx->flags |= XEN_NETTXF_data_validated; #if HAVE_TSO if (skb_is_gso(skb)) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; #ifndef OPENSUSE_1302 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; #else gso->u.gso.type = skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ? XEN_NETIF_GSO_TYPE_TCPV6 : XEN_NETIF_GSO_TYPE_TCPV4; #endif gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->irq); u64_stats_update_begin(&stats->syncp); stats->tx_bytes += skb->len; stats->tx_packets++; u64_stats_update_end(&stats->syncp); dev->trans_start = jiffies; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irqrestore(&np->tx_lock, flags); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; #ifndef OPENSUSE_1302 dev_kfree_skb(skb); #else dev_kfree_skb_any(skb); #endif return NETDEV_TX_OK; } static irqreturn_t netif_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } static int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) netdev_warn(np->netdev, "Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) netdev_warn(np->netdev, "Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & XEN_NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) netdev_warn(np->netdev, "rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) netdev_warn(np->netdev, "Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { if (net_ratelimit()) netdev_warn(np->netdev, "Unfulfilled rx req (id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ const struct page *page = skb_frag_page(skb_shinfo(skb)->frags); unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); BUG_ON(!ret); } gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & XEN_NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) netdev_warn(np->netdev, "Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) netdev_warn(np->netdev, "Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); RING_IDX cons = np->rx.rsp_cons; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); if (shinfo->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; BUG_ON(pull_to <= skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(skb_shinfo(nskb)->frags), rx->offset, rx->status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); } return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) netdev_warn(skb->dev, "GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ #ifndef OPENSUSE_1302 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { #else if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 && gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) { #endif if (net_ratelimit()) netdev_warn(skb->dev, "Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO #ifndef OPENSUSE_1302 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; #else skb_shinfo(skb)->gso_type = gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; #endif /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) netdev_warn(skb->dev, "GSO unsupported by this kernel.\n"); return -EINVAL; #endif } static int netif_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct netfront_stats *stats = this_cpu_ptr(np->stats); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; int work_done, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); dev->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->pull_to = rx->status; if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; skb_shinfo(skb)->frags[0].page_offset = rx->offset; skb_frag_size_set(skb_shinfo(skb)->frags, rx->status); skb->len += skb->data_len = rx->status; i = xennet_fill_frags(np, skb, &tmpq); if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & XEN_NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { MULTI_mmu_update(np->rx_mcl + pages_flipped, np->rx_mmu, pages_flipped, 0, DOMID_SELF); err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } __skb_queue_purge(&errq); while ((skb = __skb_dequeue(&rxq)) != NULL) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; if (pull_to > skb_headlen(skb)) __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); #ifndef OPENSUSE_1302 if (skb_checksum_setup(skb, &np->rx_gso_csum_fixups)) { #else if (xennet_checksum_setup(skb, &np->rx_gso_csum_fixups)) { #endif kfree_skb(skb); dev->stats.rx_errors++; --work_done; continue; } u64_stats_update_begin(&stats->syncp); stats->rx_packets++; stats->rx_bytes += skb->len; u64_stats_update_end(&stats->syncp); /* Pass it up. */ netif_receive_skb(skb); } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { struct page *page; if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); page = skb_frag_page(skb_shinfo(skb)->frags); if (0 == mfn) { balloon_release_driver_page(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, 0, DOMID_SELF); rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl + 1 - np->rx_mcl, NULL); BUG_ON(rc); } } __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_ref(ref)) { busy++; continue; } gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static int xennet_set_mac_address(struct net_device *dev, void *p) { struct netfront_info *np = netdev_priv(dev); struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(np->mac, addr->sa_data, ETH_ALEN); return 0; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct netfront_info *np = netdev_priv(dev); int cpu; netfront_accelerator_call_get_stats(np, dev); for_each_possible_cpu(cpu) { struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu); u64 rx_packets, rx_bytes, tx_packets, tx_bytes; unsigned int start; do { #ifdef OPENSUSE_1302 start = u64_stats_fetch_begin_irq(&stats->syncp); #else start = u64_stats_fetch_begin_bh(&stats->syncp); #endif rx_packets = stats->rx_packets; tx_packets = stats->tx_packets; rx_bytes = stats->rx_bytes; tx_bytes = stats->tx_bytes; #ifdef OPENSUSE_1302 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); #else } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); #endif tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; tot->rx_bytes += rx_bytes; tot->tx_bytes += tx_bytes; } tot->rx_errors = dev->stats.rx_errors; tot->tx_dropped = dev->stats.tx_dropped; return tot; } static const struct xennet_stat { char name[ETH_GSTRING_LEN]; u16 offset; } xennet_stats[] = { { "rx_gso_csum_fixups", offsetof(struct netfront_info, rx_gso_csum_fixups) / sizeof(long) }, }; static int xennet_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(xennet_stats); } return -EOPNOTSUPP; } static void xennet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { unsigned long *np = netdev_priv(dev); unsigned int i; for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) data[i] = np[xennet_stats[i].offset]; } static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 *data) { unsigned int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, xennet_stats[i].name, ETH_GSTRING_LEN); break; } } static void netfront_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "netfront"); strlcpy(info->bus_info, dev_name(dev->dev.parent), ARRAY_SIZE(info->bus_info)); } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; rtnl_lock(); netdev_update_features(dev); rtnl_unlock(); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { unsigned long pfn; if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); pfn = page_to_pfn(skb_frag_page(skb_shinfo(skb)->frags)); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref( ref, np->xbdev->otherend_id, pfn); } else { gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static const struct ethtool_ops network_ethtool_ops = { .get_drvinfo = netfront_get_drvinfo, .get_link = ethtool_op_get_link, .get_sset_count = xennet_get_sset_count, .get_ethtool_stats = xennet_get_ethtool_stats, .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { error = device_create_file(&netdev->dev, &xennet_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } static netdev_features_t xennet_fix_features(struct net_device *dev, netdev_features_t features) { struct netfront_info *np = netdev_priv(dev); int val; if (features & NETIF_F_SG) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_SG; } if (features & NETIF_F_TSO) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_TSO; } #ifdef OPENSUSE_1302 if (features & NETIF_F_IPV6_CSUM) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-ipv6-csum-offload", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_IPV6_CSUM; } if (features & NETIF_F_TSO6) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv6", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_TSO6; } #endif return features; } static int xennet_set_features(struct net_device *dev, netdev_features_t features) { if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { netdev_info(dev, "Reducing MTU because no SG offload"); dev->mtu = ETH_DATA_LEN; } return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xennet_poll_controller(struct net_device *dev) { netif_int(0, dev); } #endif static const struct net_device_ops xennet_netdev_ops = { .ndo_uninit = netif_uninit, .ndo_open = network_open, .ndo_stop = network_close, .ndo_start_xmit = network_start_xmit, .ndo_set_rx_mode = network_set_multicast_list, .ndo_set_mac_address = xennet_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = xennet_fix_features, .ndo_set_features = xennet_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xennet_poll_controller, #endif .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats64 = xennet_get_stats64, }; static struct net_device *create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) return ERR_PTR(-ENOMEM); np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; err = -ENOMEM; #ifdef OPENSUSE_1302 np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); #else np->stats = alloc_percpu(struct netfront_stats); #endif if (np->stats == NULL) goto exit; #ifndef OPENSUSE_1302 for_each_possible_cpu(i) u64_stats_init(&per_cpu_ptr(np->stats, i)->syncp); #endif /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { pr_alert("#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit_free_stats; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { pr_alert("#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, netif_poll, 64); #ifdef OPENSUSE_1302 netdev->features = NETIF_F_RXCSUM | NETIF_F_GSO_ROBUST; netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; #else netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_GSO_ROBUST; netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; #endif /* * Assume that all hw features are available for now. This set * will be adjusted by the call to netdev_update_features() in * xennet_connect() which is the earliest point where we can * negotiate with the backend regarding supported features. */ netdev->features |= netdev->hw_features; #ifdef OPENSUSE_1302 netdev->ethtool_ops = &network_ethtool_ops; #else SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); #endif SET_NETDEV_DEV(netdev, &dev->dev); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); #endif np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit_free_stats: free_percpu(np->stats); exit: free_netdev(netdev); return ERR_PTR(err); } static void netif_release_rings(struct netfront_info *info) { end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler(info->irq, info->netdev); info->irq = 0; netif_release_rings(info); } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, (unsigned long)page); } /* ** Driver registration ** */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static DEFINE_XENBUS_DRIVER(netfront, , .probe = netfront_probe, .remove = netfront_remove, .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, ); static int __init netif_init(void) { if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN if (MODPARM_rx_flip && MODPARM_rx_copy) { pr_warning("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_copy = true; /* Default is to copy. */ #endif netif_init_accel(); pr_info("Initialising virtual ethernet driver.\n"); return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { xenbus_unregister_driver(&netfront_driver); netif_exit_accel(); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/3.12.28/netfront.h000066400000000000000000000214141314037446600305600ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include #include #include #include #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct netfront_stats { u64 rx_packets; u64 tx_packets; u64 rx_bytes; u64 tx_bytes; struct u64_stats_sync syncp; }; /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *dev_stats, struct netfront_stats *link_stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; struct napi_struct napi; unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Statistics */ struct netfront_stats __percpu *stats; unsigned long rx_gso_csum_fixups; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded() */ extern void netfront_accelerator_stop(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/3.12.49/000077500000000000000000000000001314037446600265515ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/3.12.49/accel.c000066400000000000000000000535651314037446600300020ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); static void netfront_accelerator_remove_watch(struct netfront_info *np); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Workqueue to process acceleration configuration changes */ struct workqueue_struct *accel_watch_workqueue; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); accel_watch_workqueue = create_workqueue("net_accel"); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; flush_workqueue(accel_watch_workqueue); destroy_workqueue(accel_watch_workqueue); /* No lock required as everything else should be quiet by now */ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); queue_work(accel_watch_workqueue, &vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* * If old watch exists, e.g. from before suspend/resume, * remove it now */ netfront_accelerator_remove_watch(np); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_purge_watch(struct netfront_accel_vif_state *vif_state) { flush_workqueue(accel_watch_workqueue); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; netfront_accelerator_purge_watch(vif_state); } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. Must be called with the accelerator * mutex held. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } } /* * Initialise the state to track an accelerator plugin module. * * Must be called with the accelerator mutex held. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; list_add(&accelerator->link, &accelerators_list); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { struct netfront_accelerator *accelerator; unsigned long flags; DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); accelerator = vif_state->np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); vif_state->hooks = accelerator->hooks; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks && hooks->new_device(np->netdev, dev) == 0) accelerator_set_vif_state_hooks(&np->accel_vif_state); return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* Holds accelerator_mutex to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if (hooks->new_device(np->netdev, vif_state->dev) == 0) accelerator_set_vif_state_hooks(vif_state); } } /* * Called by the netfront accelerator plugin module when it has * loaded. * * Takes the accelerator_mutex and vif_states_lock spinlock. */ int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded); /* * Remove the hooks from a single vif state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { unsigned long flags; /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Safely remove the accelerator function hooks from a netfront state. * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { if(vif_state->hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ vif_state->hooks->get_stats(vif_state->np->netdev, &vif_state->np->netdev->stats, this_cpu_ptr(vif_state->np->rx_stats), this_cpu_ptr(vif_state->np->tx_stats)); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. * * Takes the accelerator mutex, and vif_states_lock spinlock. */ void netfront_accelerator_stop(const char *frontend) { struct netfront_accelerator *accelerator; mutex_lock(&accelerator_mutex); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_remove_hooks(accelerator); goto out; } } out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop); /* * Helper for call_remove and do_suspend * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator = np->accelerator; unsigned long flags; int rc = 0; if (np->accel_vif_state.hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ np->accel_vif_state.hooks->get_stats(np->netdev, &np->netdev->stats, this_cpu_ptr(np->rx_stats), this_cpu_ptr(np->tx_stats)); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); } return rc; } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock */ static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev); np->accelerator = NULL; return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { int rc = 0; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ rc = do_remove(np, dev); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { netfront_accelerator_purge_watch(&np->accel_vif_state); /* * Gratuitously fire the watch handler to reinstate the * configured accelerator */ if (dev->state == XenbusStateConnected) queue_work(accel_watch_workqueue, &np->accel_vif_state.accel_work); return 0; } /* * No lock pre-requisites. Takes the accelerator mutex */ void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; break; } } out: mutex_unlock(&accelerator_mutex); return; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &dev->stats, this_cpu_ptr(np->rx_stats), this_cpu_ptr(np->tx_stats)); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/3.12.49/netfront.c000066400000000000000000001713041314037446600305620ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct netfront_cb { unsigned int pull_to; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN static bool MODPARM_rx_copy; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static bool MODPARM_rx_flip; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else # define MODPARM_rx_copy true # define MODPARM_rx_flip false #endif #define RX_COPY_THRESHOLD 256 #define DEFAULT_DEBUG_LEVEL_SHIFT 3 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define NO_CSUM_OFFLOAD 0 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= ~NETIF_F_GSO_MASK; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define NO_CSUM_OFFLOAD 1 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } #define netif_skb_features(skb) ((skb)->dev->features) static inline int netif_needs_gso(struct sk_buff *skb, int features) { return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define NO_CSUM_OFFLOAD 1 #define netif_needs_gso(skb, feat) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_release_rings(struct netfront_info *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static irqreturn_t netif_int(int irq, void *dev_id); static void send_fake_arp(struct net_device *, int arpType); static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline bool xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /** * remove pci device of nic8139 */ #include static void remove_nic8139(void) { struct pci_dev *pci_dev_nic8139 = NULL; pci_dev_nic8139 = pci_get_device(0x10ec, 0x8139, pci_dev_nic8139); if (pci_dev_nic8139) { printk(KERN_INFO "remove_nic8139 : 8139 NIC of subsystem(0x%2x,0x%2x) removed.\n", pci_dev_nic8139->subsystem_vendor, pci_dev_nic8139->subsystem_device); if (atomic_read(&pci_dev_nic8139->enable_cnt) > 0) { pci_disable_device(pci_dev_nic8139); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) /* check kernel version */ pci_remove_bus_device(pci_dev_nic8139); #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) pci_stop_bus_device(pci_dev_nic8139); pci_remove_bus_device(pci_dev_nic8139); #else pci_stop_and_remove_bus_device(pci_dev_nic8139); #endif /* end check kernel version */ } return; } static void netfront_free_netdev(struct net_device *netdev) { struct netfront_info *np = netdev_priv(netdev); free_percpu(np->rx_stats); free_percpu(np->tx_stats); free_netdev(netdev); } /* * Work around net.ipv4.conf.*.arp_notify not being enabled by default. */ static void netfront_enable_arp_notify(struct netfront_info *info) { #ifdef CONFIG_INET struct in_device *in_dev; rtnl_lock(); in_dev = __in_dev_get_rtnl(info->netdev); if (in_dev && !IN_DEV_CONF_GET(in_dev, ARP_NOTIFY)) IN_DEV_CONF_SET(in_dev, ARP_NOTIFY, 1); rtnl_unlock(); if (!in_dev) pr_warn("Cannot enable ARP notification on %s\n", info->xbdev->nodename); #endif } static bool netfront_nic_unplugged(struct xenbus_device *dev) { bool ret = true; #ifndef CONFIG_XEN char *typestr; /* * For HVM guests: * - pv driver required if booted with xen_emul_unplug=nics|all * - native driver required with xen_emul_unplug=ide-disks|never */ /* Use pv driver if emulated hardware was unplugged */ if (xen_pvonhvm_unplugged_nics) return true; typestr = xenbus_read(XBT_NIL, dev->otherend, "type", NULL); /* Assume emulated+pv interface (ioemu+vif) when type property is missing. */ if (IS_ERR(typestr)) return false; /* If the interface is emulated and not unplugged, skip it. */ if (strcmp(typestr, "vif_ioemu") == 0 || strcmp(typestr, "ioemu") == 0) ret = false; kfree(typestr); #endif return ret; } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; /* remove 8139 nic when xen nic devices appear */ remove_nic8139(); if (!netfront_nic_unplugged(dev)) { pr_warning("netfront: skipping emulated interface, native driver required\n"); return -ENODEV; } netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { pr_warning("%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } netfront_enable_arp_notify(info); err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); pr_warning("%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: netfront_free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } static int netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); netfront_free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(info->mac)) { err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-rx-notify", "1"); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-no-csum-offload", __stringify(NO_CSUM_OFFLOAD)); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-sg", "1"); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv4", __stringify(HAVE_TSO)); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, 0, netdev->name, netdev); if (err < 0) goto fail; info->irq = err; return 0; fail: netif_release_rings(info); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); netdev_notify_peers(netdev); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state -- fallthrough */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @return 0 on success, error code otherwise */ /* Added by luwei KF29836, DTS2011041502151 */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; //Modified by luwei KF29836, DTS2011041502151 //creat GARP if ( ARPOP_REQUEST == arpType ) { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); } else { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); } if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == XEN_NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { pr_alert("network_tx_buf_gc: grant still" " in use by backend domain\n"); BUG(); } gnttab_end_foreign_access_ref(np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate enough skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; page = skb_frag_page(skb_shinfo(skb)->frags); pfn = page_to_pfn(page); vaddr = page_address(page); req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if (nr_flips) { struct xen_memory_reservation reservation = { .nr_extents = nr_flips, .domid = DOMID_SELF, }; /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ MULTI_memory_op(np->rx_mcl + i, XENMEM_decrease_reservation, &reservation); /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (nr_flips--) BUG_ON(np->rx_mcl[nr_flips].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= XEN_NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; struct page *page = skb_frag_page(frag); len = skb_frag_size(frag); offset = frag->page_offset; /* Data must not cross a page boundary. */ BUG_ON(offset + len > (PAGE_SIZE << compound_order(page))); /* Skip unused frames from start of page */ page += PFN_DOWN(offset); for (offset &= ~PAGE_MASK; len; page++, offset = 0) { unsigned int bytes = PAGE_SIZE - offset; if (bytes > len) bytes = len; tx->flags |= XEN_NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = bytes; tx->flags = 0; len -= bytes; } } np->tx.req_prod_pvt = prod; } /* * Count how many ring slots are required to send the frags of this * skb. Each frag might be a compound page. */ static unsigned int xennet_count_skb_frag_slots(const struct sk_buff *skb) { unsigned int i, pages, frags = skb_shinfo(skb)->nr_frags; for (pages = i = 0; i < frags; i++) { const skb_frag_t *frag = skb_shinfo(skb)->frags + i; unsigned int len = skb_frag_size(frag); if (!len) continue; pages += PFN_UP((frag->page_offset & ~PAGE_MASK) + len); } return pages; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netfront_stats *stats = this_cpu_ptr(np->tx_stats); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn, flags; int notify; unsigned int offset = offset_in_page(data); unsigned int slots, len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return NETDEV_TX_OK; } /* * If skb->len is too big for wire format, drop skb and alert * user about misconfiguration. */ if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { net_alert_ratelimited("xennet: length %u too big for interface\n", skb->len); goto drop; } slots = PFN_UP(offset + len) + xennet_count_skb_frag_slots(skb); if (unlikely(slots > MAX_SKB_FRAGS + 1)) { net_alert_ratelimited("xennet: skb rides the rocket: %u slots\n", slots); goto drop; } spin_lock_irqsave(&np->tx_lock, flags); if (unlikely(!netfront_carrier_ok(np) || (slots > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&np->tx_lock, flags); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) tx->flags |= XEN_NETTXF_data_validated; #if HAVE_TSO if (skb_is_gso(skb)) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->irq); u64_stats_update_begin(&stats->syncp); stats->bytes += skb->len; stats->packets++; u64_stats_update_end(&stats->syncp); dev->trans_start = jiffies; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irqrestore(&np->tx_lock, flags); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static irqreturn_t netif_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } static int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) netdev_warn(np->netdev, "Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) netdev_warn(np->netdev, "Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & XEN_NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) netdev_warn(np->netdev, "rx->offset: %#x, size: %d\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) netdev_warn(np->netdev, "Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { if (net_ratelimit()) netdev_warn(np->netdev, "Unfulfilled rx req (id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ const struct page *page = skb_frag_page(skb_shinfo(skb)->frags); unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); BUG_ON(!ret); } gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & XEN_NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) netdev_warn(np->netdev, "Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) netdev_warn(np->netdev, "Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); RING_IDX cons = np->rx.rsp_cons; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); if (shinfo->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; BUG_ON(pull_to <= skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(skb_shinfo(nskb)->frags), rx->offset, rx->status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); } return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) netdev_warn(skb->dev, "GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) netdev_warn(skb->dev, "Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) netdev_warn(skb->dev, "GSO unsupported by this kernel.\n"); return -EINVAL; #endif } static int netif_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct netfront_stats *stats = this_cpu_ptr(np->rx_stats); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; int work_done, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); dev->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->pull_to = rx->status; if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; skb_shinfo(skb)->frags[0].page_offset = rx->offset; skb_frag_size_set(skb_shinfo(skb)->frags, rx->status); skb->len += skb->data_len = rx->status; i = xennet_fill_frags(np, skb, &tmpq); if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & XEN_NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { MULTI_mmu_update(np->rx_mcl + pages_flipped, np->rx_mmu, pages_flipped, 0, DOMID_SELF); err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } __skb_queue_purge(&errq); while ((skb = __skb_dequeue(&rxq)) != NULL) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; if (pull_to > skb_headlen(skb)) __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); if (skb_checksum_setup(skb, &np->rx_gso_csum_fixups)) { kfree_skb(skb); dev->stats.rx_errors++; --work_done; continue; } u64_stats_update_begin(&stats->syncp); stats->packets++; stats->bytes += skb->len; u64_stats_update_end(&stats->syncp); /* Pass it up. */ netif_receive_skb(skb); } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { struct page *page; if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); page = skb_frag_page(skb_shinfo(skb)->frags); if (0 == mfn) { balloon_release_driver_page(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, 0, DOMID_SELF); rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl + 1 - np->rx_mcl, NULL); BUG_ON(rc); } } __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_ref(ref)) { busy++; continue; } gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static int xennet_set_mac_address(struct net_device *dev, void *p) { struct netfront_info *np = netdev_priv(dev); struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(np->mac, addr->sa_data, ETH_ALEN); return 0; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct netfront_info *np = netdev_priv(dev); int cpu; netfront_accelerator_call_get_stats(np, dev); for_each_possible_cpu(cpu) { struct netfront_stats *stats = per_cpu_ptr(np->rx_stats, cpu); u64 packets, bytes; unsigned int start; do { start = u64_stats_fetch_begin_bh(&stats->syncp); packets = stats->packets; bytes = stats->bytes; } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); tot->rx_packets += packets; tot->rx_bytes += bytes; stats = per_cpu_ptr(np->tx_stats, cpu); do { start = u64_stats_fetch_begin_bh(&stats->syncp); packets = stats->packets; bytes = stats->bytes; } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); tot->tx_packets += packets; tot->tx_bytes += bytes; } tot->rx_errors = dev->stats.rx_errors; tot->tx_dropped = dev->stats.tx_dropped; return tot; } static const struct xennet_stat { char name[ETH_GSTRING_LEN]; u16 offset; } xennet_stats[] = { { "rx_gso_csum_fixups", offsetof(struct netfront_info, rx_gso_csum_fixups) / sizeof(long) }, }; static int xennet_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(xennet_stats); } return -EOPNOTSUPP; } static void xennet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { unsigned long *np = netdev_priv(dev); unsigned int i; for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) data[i] = np[xennet_stats[i].offset]; } static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 *data) { unsigned int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, xennet_stats[i].name, ETH_GSTRING_LEN); break; } } static void netfront_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "netfront"); strlcpy(info->bus_info, dev_name(dev->dev.parent), ARRAY_SIZE(info->bus_info)); } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; rtnl_lock(); netdev_update_features(dev); rtnl_unlock(); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { unsigned long pfn; if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); pfn = page_to_pfn(skb_frag_page(skb_shinfo(skb)->frags)); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref( ref, np->xbdev->otherend_id, pfn); } else { gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static const struct ethtool_ops network_ethtool_ops = { .get_drvinfo = netfront_get_drvinfo, .get_link = ethtool_op_get_link, .get_sset_count = xennet_get_sset_count, .get_ethtool_stats = xennet_get_ethtool_stats, .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { error = device_create_file(&netdev->dev, &xennet_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } static netdev_features_t xennet_fix_features(struct net_device *dev, netdev_features_t features) { struct netfront_info *np = netdev_priv(dev); int val; if (features & NETIF_F_SG) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_SG; } if (features & NETIF_F_TSO) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_TSO; } return features; } static int xennet_set_features(struct net_device *dev, netdev_features_t features) { if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { netdev_info(dev, "Reducing MTU because no SG offload"); dev->mtu = ETH_DATA_LEN; } return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xennet_poll_controller(struct net_device *dev) { netif_int(0, dev); } #endif static const struct net_device_ops xennet_netdev_ops = { .ndo_uninit = netif_uninit, .ndo_open = network_open, .ndo_stop = network_close, .ndo_start_xmit = network_start_xmit, .ndo_set_rx_mode = network_set_multicast_list, .ndo_set_mac_address = xennet_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = xennet_fix_features, .ndo_set_features = xennet_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xennet_poll_controller, #endif .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats64 = xennet_get_stats64, }; static struct net_device *create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) return ERR_PTR(-ENOMEM); np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; err = -ENOMEM; np->rx_stats = alloc_percpu(struct netfront_stats); if (np->rx_stats == NULL) goto exit; np->tx_stats = alloc_percpu(struct netfront_stats); if (np->tx_stats == NULL) goto exit; for_each_possible_cpu(i) { u64_stats_init(&per_cpu_ptr(np->rx_stats, i)->syncp); u64_stats_init(&per_cpu_ptr(np->tx_stats, i)->syncp); } /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { pr_alert("#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { pr_alert("#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, netif_poll, 64); netdev->features = NETIF_F_RXCSUM | NETIF_F_GSO_ROBUST; netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; /* * Assume that all hw features are available for now. This set * will be adjusted by the call to netdev_update_features() in * xennet_connect() which is the earliest point where we can * negotiate with the backend regarding supported features. */ netdev->features |= netdev->hw_features; SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); SET_NETDEV_DEV(netdev, &dev->dev); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); #endif np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: netfront_free_netdev(netdev); return ERR_PTR(err); } static void netif_release_rings(struct netfront_info *info) { end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler(info->irq, info->netdev); info->irq = 0; netif_release_rings(info); } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, (unsigned long)page); } /* ** Driver registration ** */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static DEFINE_XENBUS_DRIVER(netfront, , .probe = netfront_probe, .remove = netfront_remove, .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, ); static int __init netif_init(void) { if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN if (MODPARM_rx_flip && MODPARM_rx_copy) { pr_warning("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_copy = true; /* Default is to copy. */ #endif netif_init_accel(); pr_info("Initialising virtual ethernet driver.\n"); return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { xenbus_unregister_driver(&netfront_driver); netif_exit_accel(); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/3.12.49/netfront.h000066400000000000000000000214631314037446600305670ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include #include #include #include #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct netfront_stats { u64 packets; u64 bytes; struct u64_stats_sync syncp; }; /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *dev_stats, struct netfront_stats *rx_stats, struct netfront_stats *tx_stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; struct napi_struct napi; unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Statistics */ struct netfront_stats __percpu *rx_stats; struct netfront_stats __percpu *tx_stats; unsigned long rx_gso_csum_fixups; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded() */ extern void netfront_accelerator_stop(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/Kbuild000066400000000000000000000001411314037446600270640ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-vnif.o xen-vnif-objs := netfront.o xen-vnif-objs += accel.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/Makefile000066400000000000000000000000661314037446600273750ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/accel.c000066400000000000000000000533571314037446600271630ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include "netfront.h" #define DPRINTK(fmt, args...) \ pr_debug("netfront/accel (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev); static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend); static void netfront_accelerator_remove_watch(struct netfront_info *np); /* * List of all netfront accelerator plugin modules available. Each * list entry is of type struct netfront_accelerator. */ static struct list_head accelerators_list; /* Workqueue to process acceleration configuration changes */ struct workqueue_struct *accel_watch_workqueue; /* Mutex to prevent concurrent loads and suspends, etc. */ DEFINE_MUTEX(accelerator_mutex); void netif_init_accel(void) { INIT_LIST_HEAD(&accelerators_list); accel_watch_workqueue = create_workqueue("net_accel"); } void netif_exit_accel(void) { struct netfront_accelerator *accelerator, *tmp; flush_workqueue(accel_watch_workqueue); destroy_workqueue(accel_watch_workqueue); /* No lock required as everything else should be quiet by now */ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) { BUG_ON(!list_empty(&accelerator->vif_states)); list_del(&accelerator->link); kfree(accelerator->frontend); kfree(accelerator); } } /* * Watch the configured accelerator and change plugin if it's modified */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) static void accel_watch_work(struct work_struct *context) #else static void accel_watch_work(void *context) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) struct netfront_accel_vif_state *vif_state = container_of(context, struct netfront_accel_vif_state, accel_work); #else struct netfront_accel_vif_state *vif_state = (struct netfront_accel_vif_state *)context; #endif struct netfront_info *np = vif_state->np; char *accel_frontend; int accel_len, rc = -1; mutex_lock(&accelerator_mutex); accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend, "accel-frontend", &accel_len); if (IS_ERR(accel_frontend)) { accel_frontend = NULL; netfront_remove_accelerator(np, np->xbdev); } else { /* If this is the first time, request the accelerator, otherwise only request one if it has changed */ if (vif_state->accel_frontend == NULL) { rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } else { if (strncmp(vif_state->accel_frontend, accel_frontend, accel_len)) { netfront_remove_accelerator(np, np->xbdev); rc = netfront_load_accelerator(np, np->xbdev, accel_frontend); } } } /* Get rid of previous state and replace with the new name */ if (vif_state->accel_frontend != NULL) kfree(vif_state->accel_frontend); vif_state->accel_frontend = accel_frontend; mutex_unlock(&accelerator_mutex); if (rc == 0) { DPRINTK("requesting module %s\n", accel_frontend); request_module("%s", accel_frontend); /* * Module should now call netfront_accelerator_loaded() once * it's up and running, and we can continue from there */ } } static void accel_watch_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct netfront_accel_vif_state *vif_state = container_of(watch, struct netfront_accel_vif_state, accel_watch); queue_work(accel_watch_workqueue, &vif_state->accel_work); } void netfront_accelerator_add_watch(struct netfront_info *np) { int err; /* * If old watch exists, e.g. from before suspend/resume, * remove it now */ netfront_accelerator_remove_watch(np); /* Get a watch on the accelerator plugin */ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend, "accel-frontend", &np->accel_vif_state.accel_watch, accel_watch_changed); if (err) { DPRINTK("%s: Failed to register accel watch: %d\n", __FUNCTION__, err); np->accel_vif_state.accel_watch.node = NULL; } } static void netfront_accelerator_purge_watch(struct netfront_accel_vif_state *vif_state) { flush_workqueue(accel_watch_workqueue); /* Clean up any state left from watch */ if (vif_state->accel_frontend != NULL) { kfree(vif_state->accel_frontend); vif_state->accel_frontend = NULL; } } static void netfront_accelerator_remove_watch(struct netfront_info *np) { struct netfront_accel_vif_state *vif_state = &np->accel_vif_state; /* Get rid of watch on accelerator plugin */ if (vif_state->accel_watch.node != NULL) { unregister_xenbus_watch(&vif_state->accel_watch); kfree(vif_state->accel_watch.node); vif_state->accel_watch.node = NULL; netfront_accelerator_purge_watch(vif_state); } } /* * Initialise the accel_vif_state field in the netfront state */ void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev) { np->accelerator = NULL; /* It's assumed that these things don't change */ np->accel_vif_state.np = np; np->accel_vif_state.dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work); #else INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work, &np->accel_vif_state); #endif } /* * Compare a frontend description string against an accelerator to see * if they match. Would ultimately be nice to replace the string with * a unique numeric identifier for each accelerator. */ static int match_accelerator(const char *frontend, struct netfront_accelerator *accelerator) { return strcmp(frontend, accelerator->frontend) == 0; } /* * Add a frontend vif to the list of vifs that is using a netfront * accelerator plugin module. Must be called with the accelerator * mutex held. */ static void add_accelerator_vif(struct netfront_accelerator *accelerator, struct netfront_info *np) { if (np->accelerator == NULL) { np->accelerator = accelerator; list_add(&np->accel_vif_state.link, &accelerator->vif_states); } else { /* * May get here legitimately if suspend_cancel is * called, but in that case configuration should not * have changed */ BUG_ON(np->accelerator != accelerator); } } /* * Initialise the state to track an accelerator plugin module. * * Must be called with the accelerator mutex held. */ static int init_accelerator(const char *frontend, struct netfront_accelerator **result, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator = kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL); int frontend_len; if (!accelerator) { DPRINTK("no memory for accelerator\n"); return -ENOMEM; } frontend_len = strlen(frontend) + 1; accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL); if (!accelerator->frontend) { DPRINTK("no memory for accelerator\n"); kfree(accelerator); return -ENOMEM; } strlcpy(accelerator->frontend, frontend, frontend_len); INIT_LIST_HEAD(&accelerator->vif_states); spin_lock_init(&accelerator->vif_states_lock); accelerator->hooks = hooks; list_add(&accelerator->link, &accelerators_list); *result = accelerator; return 0; } /* * Modify the hooks stored in the per-vif state to match that in the * netfront accelerator's state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state) { struct netfront_accelerator *accelerator; unsigned long flags; DPRINTK("%p\n",vif_state); /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); accelerator = vif_state->np->accelerator; spin_lock_irqsave(&accelerator->vif_states_lock, flags); vif_state->hooks = accelerator->hooks; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_new_vif(struct netfront_info *np, struct xenbus_device *dev, struct netfront_accelerator *accelerator) { struct netfront_accel_hooks *hooks; DPRINTK("\n"); /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); hooks = accelerator->hooks; if (hooks && hooks->new_device(np->netdev, dev) == 0) accelerator_set_vif_state_hooks(&np->accel_vif_state); return; } /* * Request that a particular netfront accelerator plugin is loaded. * Usually called as a result of the vif configuration specifying * which one to use. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static int netfront_load_accelerator(struct netfront_info *np, struct xenbus_device *dev, const char *frontend) { struct netfront_accelerator *accelerator; int rc = 0; DPRINTK(" %s\n", frontend); /* * Look at list of loaded accelerators to see if the requested * one is already there */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_new_vif(np, dev, accelerator); return 0; } } /* Couldn't find it, so create a new one and load the module */ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) { return rc; } /* Include this frontend device on the accelerator's list */ add_accelerator_vif(accelerator, np); return rc; } /* * Go through all the netfront vifs and see if they have requested * this accelerator. Notify the accelerator plugin of the relevant * device if so. Called when an accelerator plugin module is first * loaded and connects to netfront. * * Must be called with accelerator_mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_probe_vifs(struct netfront_accelerator *accelerator, struct netfront_accel_hooks *hooks) { struct netfront_accel_vif_state *vif_state, *tmp; DPRINTK("%p\n", accelerator); /* * Store the hooks for future calls to probe a new device, and * to wire into the vif_state once the accelerator plugin is * ready to accelerate each vif */ BUG_ON(hooks == NULL); accelerator->hooks = hooks; /* Holds accelerator_mutex to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { struct netfront_info *np = vif_state->np; if (hooks->new_device(np->netdev, vif_state->dev) == 0) accelerator_set_vif_state_hooks(vif_state); } } /* * Called by the netfront accelerator plugin module when it has * loaded. * * Takes the accelerator_mutex and vif_states_lock spinlock. */ int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks) { struct netfront_accelerator *accelerator; if (is_initial_xendomain()) return -EINVAL; if (version != NETFRONT_ACCEL_VERSION) { if (version > NETFRONT_ACCEL_VERSION) { /* Caller has higher version number, leave it up to them to decide whether to continue. They can re-call with a lower number if they're happy to be compatible with us */ return NETFRONT_ACCEL_VERSION; } else { /* We have a more recent version than caller. Currently reject, but may in future be able to be backwardly compatible */ return -EPROTO; } } mutex_lock(&accelerator_mutex); /* * Look through list of accelerators to see if it has already * been requested */ list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_probe_vifs(accelerator, hooks); goto out; } } /* * If it wasn't in the list, add it now so that when it is * requested the caller will find it */ DPRINTK("Couldn't find matching accelerator (%s)\n", frontend); init_accelerator(frontend, &accelerator, hooks); out: mutex_unlock(&accelerator_mutex); return 0; } EXPORT_SYMBOL_GPL(netfront_accelerator_loaded); /* * Remove the hooks from a single vif state. * * Takes the vif_states_lock spinlock and may sleep. */ static void accelerator_remove_single_hook(struct netfront_accelerator *accelerator, struct netfront_accel_vif_state *vif_state) { unsigned long flags; /* Make sure there are no data path operations going on */ napi_disable(&vif_state->np->napi); netif_tx_lock_bh(vif_state->np->netdev); spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* * Remove the hooks, but leave the vif_state on the * accelerator's list as that signifies this vif is * interested in using that accelerator if it becomes * available again */ vif_state->hooks = NULL; spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); netif_tx_unlock_bh(vif_state->np->netdev); napi_enable(&vif_state->np->napi); } /* * Safely remove the accelerator function hooks from a netfront state. * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static void accelerator_remove_hooks(struct netfront_accelerator *accelerator) { struct netfront_accel_vif_state *vif_state, *tmp; unsigned long flags; /* Mutex is held to iterate list */ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states, link) { if(vif_state->hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ vif_state->hooks->get_stats(vif_state->np->netdev, &vif_state->np->netdev->stats, this_cpu_ptr(vif_state->np->stats)); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); accelerator_remove_single_hook(accelerator, vif_state); accelerator->hooks->remove(vif_state->dev); } } accelerator->hooks = NULL; } /* * Called by a netfront accelerator when it is unloaded. This safely * removes the hooks into the plugin and blocks until all devices have * finished using it, so on return it is safe to unload. * * Takes the accelerator mutex, and vif_states_lock spinlock. */ void netfront_accelerator_stop(const char *frontend) { struct netfront_accelerator *accelerator; mutex_lock(&accelerator_mutex); list_for_each_entry(accelerator, &accelerators_list, link) { if (match_accelerator(frontend, accelerator)) { accelerator_remove_hooks(accelerator); goto out; } } out: mutex_unlock(&accelerator_mutex); } EXPORT_SYMBOL_GPL(netfront_accelerator_stop); /* * Helper for call_remove and do_suspend * * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock. */ static int do_remove(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator = np->accelerator; unsigned long flags; int rc = 0; if (np->accel_vif_state.hooks) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); /* Last chance to get statistics from the accelerator */ np->accel_vif_state.hooks->get_stats(np->netdev, &np->netdev->stats, this_cpu_ptr(np->stats)); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); /* * Try and do the opposite of accelerator_probe_new_vif * to ensure there's no state pointing back at the * netdev */ accelerator_remove_single_hook(accelerator, &np->accel_vif_state); rc = accelerator->hooks->remove(dev); } return rc; } /* * Must be called with the accelerator mutex held. Takes the * vif_states_lock spinlock */ static int netfront_remove_accelerator(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accelerator *accelerator; struct netfront_accel_vif_state *tmp_vif_state; int rc = 0; /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) return rc; accelerator = np->accelerator; list_for_each_entry(tmp_vif_state, &accelerator->vif_states, link) { if (tmp_vif_state == &np->accel_vif_state) { list_del(&np->accel_vif_state.link); break; } } rc = do_remove(np, dev); np->accelerator = NULL; return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev) { int rc; netfront_accelerator_remove_watch(np); mutex_lock(&accelerator_mutex); rc = netfront_remove_accelerator(np, dev); mutex_unlock(&accelerator_mutex); return rc; } /* * No lock pre-requisites. Takes the accelerator mutex and the * vif_states_lock spinlock. */ int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev) { int rc = 0; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if (np->accelerator == NULL) goto out; /* * Call the remove accelerator hook, but leave the vif_state * on the accelerator's list in case there is a suspend_cancel. */ rc = do_remove(np, dev); out: mutex_unlock(&accelerator_mutex); return rc; } int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev) { netfront_accelerator_purge_watch(&np->accel_vif_state); /* * Gratuitously fire the watch handler to reinstate the * configured accelerator */ if (dev->state == XenbusStateConnected) queue_work(accel_watch_workqueue, &np->accel_vif_state.accel_work); return 0; } /* * No lock pre-requisites. Takes the accelerator mutex */ void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev) { struct netfront_accel_vif_state *accel_vif_state = NULL; mutex_lock(&accelerator_mutex); /* Check that we've got a device that was accelerated */ if(np->accelerator == NULL) goto out; /* Find the vif_state from the accelerator's list */ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states, link) { if (accel_vif_state->dev == dev) { BUG_ON(accel_vif_state != &np->accel_vif_state); /* * Remove it from the accelerator's list so * state is consistent for probing new vifs * when they get connected */ list_del(&accel_vif_state->link); np->accelerator = NULL; break; } } out: mutex_unlock(&accelerator_mutex); return; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np) { struct netfront_accelerator *accelerator; int rc = 1; unsigned long flags; accelerator = np->accelerator; /* Call the check_ready accelerator hook. */ if (np->accel_vif_state.hooks && accelerator) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->check_ready(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; accelerator = np->accelerator; /* Call the stop_napi_interrupts accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) np->accel_vif_state.hooks->stop_napi_irq(dev); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } } /* * No lock pre-requisites. Takes the vif_states_lock spinlock */ int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev) { struct netfront_accelerator *accelerator; unsigned long flags; int rc = 0; accelerator = np->accelerator; /* Call the get_stats accelerator hook. */ if (np->accel_vif_state.hooks && accelerator != NULL) { spin_lock_irqsave(&accelerator->vif_states_lock, flags); if (np->accel_vif_state.hooks && np->accelerator == accelerator) rc = np->accel_vif_state.hooks->get_stats(dev, &dev->stats, this_cpu_ptr(np->stats)); spin_unlock_irqrestore(&accelerator->vif_states_lock, flags); } return rc; } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/netfront.c000066400000000000000000001732071314037446600277500ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef OPENSUSE_1302 #include #endif #include #include #include #include #include #include #include struct netfront_cb { unsigned int pull_to; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #include "netfront.h" /* not support balloon for openSUSE 13.2*/ #ifdef OPENSUSE_1302 void balloon_update_driver_allowance(long delta) { /* nothing */ } void balloon_release_driver_page(struct page *page) { /* nothing */ } #endif /* * Mutually-exclusive module options to select receive data path: * rx_copy : Packets are copied by network backend into local memory * rx_flip : Page containing packet data is transferred to our ownership * For fully-virtualised guests there is no option - copying must be used. * For paravirtualised guests, flipping is the default. */ #ifdef CONFIG_XEN static bool MODPARM_rx_copy; module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); static bool MODPARM_rx_flip; module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); #else # define MODPARM_rx_copy true # define MODPARM_rx_flip false #endif #define RX_COPY_THRESHOLD 256 #define DEFAULT_DEBUG_LEVEL_SHIFT 3 /* If we don't have GSO, fake things up so that we never try to use it. */ #if defined(NETIF_F_GSO) #define HAVE_GSO 1 #define HAVE_TSO 1 /* TSO is a subset of GSO */ #define NO_CSUM_OFFLOAD 0 static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= ~NETIF_F_GSO_MASK; dev->features |= NETIF_F_GSO_ROBUST; } #elif defined(NETIF_F_TSO) #define HAVE_GSO 0 #define HAVE_TSO 1 /* Some older kernels cannot cope with incorrect checksums, * particularly in netfilter. I'm not sure there is 100% correlation * with the presence of NETIF_F_TSO but it appears to be a good first * approximiation. */ #define NO_CSUM_OFFLOAD 1 #define gso_size tso_size #define gso_segs tso_segs static inline void dev_disable_gso_features(struct net_device *dev) { /* Turn off all TSO bits. */ dev->features &= ~NETIF_F_TSO; } static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->tso_size; } static inline int skb_gso_ok(struct sk_buff *skb, int features) { return (features & NETIF_F_TSO); } #define netif_skb_features(skb) ((skb)->dev->features) static inline int netif_needs_gso(struct sk_buff *skb, int features) { return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } #else #define HAVE_GSO 0 #define HAVE_TSO 0 #define NO_CSUM_OFFLOAD 1 #define netif_needs_gso(skb, feat) 0 #define dev_disable_gso_features(dev) ((void)0) #define ethtool_op_set_tso(dev, data) (-ENOSYS) #endif struct netfront_rx_info { struct netif_rx_response rx; struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; /* * Implement our own carrier flag: the network stack's version causes delays * when the carrier is re-enabled (in particular, dev_activate() may not * immediately be called, which can cause packet loss). */ #define netfront_carrier_on(netif) ((netif)->carrier = 1) #define netfront_carrier_off(netif) ((netif)->carrier = 0) #define netfront_carrier_ok(netif) ((netif)->carrier) /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id) { list[id] = list[0]; list[0] = (void *)(unsigned long)id; } static inline unsigned short get_id_from_freelist(struct sk_buff **list) { unsigned int id = (unsigned int)(unsigned long)list[0]; list[0] = list[id]; return id; } static inline int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #define DPRINTK(fmt, args...) \ pr_debug("netfront (%s:%d) " fmt, \ __FUNCTION__, __LINE__, ##args) static int setup_device(struct xenbus_device *, struct netfront_info *); static struct net_device *create_netdev(struct xenbus_device *); static void end_access(int, void *); static void netif_release_rings(struct netfront_info *); static void netif_disconnect_backend(struct netfront_info *); static int network_connect(struct net_device *); static void network_tx_buf_gc(struct net_device *); static void network_alloc_rx_buffers(struct net_device *); static irqreturn_t netif_int(int irq, void *dev_id); static void send_fake_arp(struct net_device *, int arpType); static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while(0) #endif static inline bool xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } /* * Work around net.ipv4.conf.*.arp_notify not being enabled by default. */ static void netfront_enable_arp_notify(struct netfront_info *info) { #ifdef CONFIG_INET struct in_device *in_dev; rtnl_lock(); in_dev = __in_dev_get_rtnl(info->netdev); if (in_dev && !IN_DEV_CONF_GET(in_dev, ARP_NOTIFY)) IN_DEV_CONF_SET(in_dev, ARP_NOTIFY, 1); rtnl_unlock(); if (!in_dev) pr_warn("Cannot enable ARP notification on %s\n", info->xbdev->nodename); #endif } static bool netfront_nic_unplugged(struct xenbus_device *dev) { bool ret = true; #ifndef CONFIG_XEN char *typestr; /* * For HVM guests: * - pv driver required if booted with xen_emul_unplug=nics|all * - native driver required with xen_emul_unplug=ide-disks|never */ /* Use pv driver if emulated hardware was unplugged */ if (xen_pvonhvm_unplugged_nics) return true; typestr = xenbus_read(XBT_NIL, dev->otherend, "type", NULL); /* Assume emulated+pv interface (ioemu+vif) when type property is missing. */ if (IS_ERR(typestr)) return false; /* If the interface is emulated and not unplugged, skip it. */ if (strcmp(typestr, "vif_ioemu") == 0 || strcmp(typestr, "ioemu") == 0) ret = false; kfree(typestr); #endif return ret; } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; if (!netfront_nic_unplugged(dev)) { pr_warning("netfront: skipping emulated interface, native driver required\n"); return -ENODEV; } netdev = create_netdev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { pr_warning("%s: register_netdev err=%d\n", __FUNCTION__, err); goto fail; } netfront_enable_arp_notify(info); err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); pr_warning("%s: add sysfs failed err=%d\n", __FUNCTION__, err); goto fail; } return 0; fail: free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } static int netfront_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); free_percpu(info->stats); free_netdev(info->netdev); return 0; } static int netfront_suspend(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend(info, dev); } static int netfront_suspend_cancel(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); return netfront_accelerator_suspend_cancel(info, dev); } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s\n", dev->nodename); netfront_accelerator_resume(info, dev); netif_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(info->mac)) { err = xen_net_read_mac(dev, info->mac); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } } /* Create shared ring, alloc event channel. */ err = setup_device(dev, info); if (err) goto out; /* This will load an accelerator if one is configured when the * watch fires */ netfront_accelerator_add_watch(info); again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", irq_to_evtchn_port(info->irq)); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", info->copying_receiver); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-rx-notify", "1"); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-no-csum-offload", __stringify(NO_CSUM_OFFLOAD)); if (err) { message = "writing feature-no-csum-offload"; goto abort_transaction; } #ifdef OPENSUSE_1302 #ifdef NETIF_F_IPV6_CSUM err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", "1"); if (err) { message = "writing feature-ipv6-csum-offload"; goto abort_transaction; } #else #define NETIF_F_IPV6_CSUM 0 #endif #endif err = xenbus_write(xbt, dev->nodename, "feature-sg", "1"); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv4", __stringify(HAVE_TSO)); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } #ifdef OPENSUSE_1302 #ifdef NETIF_F_TSO6 err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); if (err) { message = "writing feature-gso-tcpv6"; goto abort_transaction; } #else #define NETIF_F_TSO6 0 #endif #endif err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: netfront_accelerator_call_remove(info, dev); netif_disconnect_backend(info); out: return err; } static int setup_device(struct xenbus_device *dev, struct netfront_info *info) { struct netif_tx_sring *txs; struct netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; info->irq = 0; txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; memcpy(netdev->dev_addr, info->mac, ETH_ALEN); err = bind_listening_port_to_irqhandler( dev->otherend_id, netif_int, 0, netdev->name, netdev); if (err < 0) goto fail; info->irq = err; return 0; fail: netif_release_rings(info); return err; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; DPRINTK("%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (network_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: (void)send_fake_arp(netdev, ARPOP_REQUEST); (void)send_fake_arp(netdev, ARPOP_REPLY); netdev_notify_peers(netdev); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state -- fallthrough */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } /** Send a packet on a net device to encourage switches to learn the * MAC. We send a fake ARP request. * * @param dev device * @return 0 on success, error code otherwise */ static void send_fake_arp(struct net_device *dev, int arpType) { #ifdef CONFIG_INET struct sk_buff *skb; u32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* No IP? Then nothing to do. */ if (src_ip == 0) return; // creat GARP if ( ARPOP_REQUEST == arpType ) { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); } else { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); } if (skb == NULL) return; dev_queue_xmit(skb); #endif } static inline int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static inline void network_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)) && netfront_check_accelerator_queue_ready(dev, np)) netif_wake_queue(dev); } int netfront_check_queue_ready(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); return unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev)); } EXPORT_SYMBOL(netfront_check_queue_ready); static int network_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netfront_carrier_ok(np)) { network_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){ netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void network_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netfront_carrier_ok(np)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == XEN_NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id]; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { pr_alert("network_tx_buf_gc: grant still" " in use by backend domain\n"); BUG(); } gnttab_end_foreign_access_ref(np->grant_tx_ref[id]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); network_maybe_wake_tx(dev); } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; grant_ref_t ref; unsigned long pfn; void *vaddr; int nr_flips; netif_rx_request_t *req; if (unlikely(!netfront_carrier_ok(np))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { /* * Allocate an skb and a page. Do not use __dev_alloc_skb as * that will allocate page-sized buffers which is not * necessary here. * 16 bytes added as necessary headroom for netif_receive_skb. */ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate enough skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */ skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (nr_flips = i = 0; ; i++) { if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; page = skb_frag_page(skb_shinfo(skb)->frags); pfn = page_to_pfn(page); vaddr = page_address(page); req = RING_GET_REQUEST(&np->rx, req_prod + i); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref(ref, np->xbdev->otherend_id, pfn); np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remove this page before passing * back to Xen. */ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); MULTI_update_va_mapping(np->rx_mcl+i, (unsigned long)vaddr, __pte(0), 0); } nr_flips++; } else { gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->id = id; req->gref = ref; } if (nr_flips) { struct xen_memory_reservation reservation = { .nr_extents = nr_flips, .domid = DOMID_SELF, }; /* Tell the ballon driver what is going on. */ balloon_update_driver_allowance(i); set_xen_guest_handle(reservation.extent_start, np->rx_pfn_array); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* After all PTEs have been zapped, flush the TLB. */ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* Give away a batch of pages. */ MULTI_memory_op(np->rx_mcl + i, XENMEM_decrease_reservation, &reservation); /* Zap PTEs and give away pages in one big * multicall. */ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1))) BUG(); /* Check return status of HYPERVISOR_memory_op(). */ if (unlikely(np->rx_mcl[i].result != i)) panic("Unable to reduce memory reservation\n"); while (nr_flips--) BUG_ON(np->rx_mcl[nr_flips].result); } else { if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) != i) panic("Unable to reduce memory reservation\n"); } } else { wmb(); } /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->irq); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= XEN_NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; struct page *page = skb_frag_page(frag); len = skb_frag_size(frag); offset = frag->page_offset; /* Data must not cross a page boundary. */ BUG_ON(offset + len > (PAGE_SIZE << compound_order(page))); /* Skip unused frames from start of page */ page += PFN_DOWN(offset); for (offset &= ~PAGE_MASK; len; page++, offset = 0) { unsigned int bytes = PAGE_SIZE - offset; if (bytes > len) bytes = len; tx->flags |= XEN_NETTXF_more_data; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = bytes; tx->flags = 0; len -= bytes; } } np->tx.req_prod_pvt = prod; } /* * Count how many ring slots are required to send the frags of this * skb. Each frag might be a compound page. */ static unsigned int xennet_count_skb_frag_slots(const struct sk_buff *skb) { unsigned int i, pages, frags = skb_shinfo(skb)->nr_frags; for (pages = i = 0; i < frags; i++) { const skb_frag_t *frag = skb_shinfo(skb)->frags + i; unsigned int len = skb_frag_size(frag); if (!len) continue; pages += PFN_UP((frag->page_offset & ~PAGE_MASK) + len); } return pages; } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netfront_stats *stats = this_cpu_ptr(np->stats); struct netif_tx_request *tx; struct netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn, flags; int notify; unsigned int offset = offset_in_page(data); unsigned int slots, len = skb_headlen(skb); /* Check the fast path, if hooks are available */ if (np->accel_vif_state.hooks && np->accel_vif_state.hooks->start_xmit(skb, dev)) { /* Fast path has sent this packet */ return NETDEV_TX_OK; } /* * If skb->len is too big for wire format, drop skb and alert * user about misconfiguration. */ if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { net_alert_ratelimited("xennet: length %u too big for interface\n", skb->len); goto drop; } slots = PFN_UP(offset + len) + xennet_count_skb_frag_slots(skb); if (unlikely(slots > MAX_SKB_FRAGS + 1)) { net_alert_ratelimited("xennet: skb rides the rocket: %u slots\n", slots); goto drop; } spin_lock_irqsave(&np->tx_lock, flags); if (unlikely(!netfront_carrier_ok(np) || (slots > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&np->tx_lock, flags); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(np->tx_skbs); np->tx_skbs[id] = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GTF_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; extra = NULL; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) tx->flags |= XEN_NETTXF_data_validated; #if HAVE_TSO if (skb_is_gso(skb)) { struct netif_extra_info *gso = (struct netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; #ifndef OPENSUSE_1302 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; #else gso->u.gso.type = skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ? XEN_NETIF_GSO_TYPE_TCPV6 : XEN_NETIF_GSO_TYPE_TCPV4; #endif gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } #endif np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->irq); u64_stats_update_begin(&stats->syncp); stats->tx_bytes += skb->len; stats->tx_packets++; u64_stats_update_end(&stats->syncp); dev->trans_start = jiffies; /* Note: It is not safe to access skb after network_tx_buf_gc()! */ network_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irqrestore(&np->tx_lock, flags); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; #ifndef OPENSUSE_1302 dev_kfree_skb(skb); #else dev_kfree_skb_any(skb); #endif return NETDEV_TX_OK; } static irqreturn_t netif_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netfront_carrier_ok(np))) { network_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) { netfront_accelerator_call_stop_napi_irq(np, dev); napi_schedule(&np->napi); } } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } static int xennet_get_extras(struct netfront_info *np, struct netif_extra_info *extras, RING_IDX rp) { struct netif_extra_info *extra; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) netdev_warn(np->netdev, "Missing extra info\n"); err = -EBADR; break; } extra = (struct netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) netdev_warn(np->netdev, "Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list, int *pages_flipped_p) { int pages_flipped = *pages_flipped_p; struct mmu_update *mmu; struct multicall_entry *mcl; struct netif_rx_response *rx = &rinfo->rx; struct netif_extra_info *extras = rinfo->extras; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & XEN_NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { unsigned long mfn; if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) netdev_warn(np->netdev, "rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) netdev_warn(np->netdev, "Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } if (!np->copying_receiver) { /* Memory pressure, insufficient buffer * headroom, ... */ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { if (net_ratelimit()) netdev_warn(np->netdev, "Unfulfilled rx req (id=%d, st=%d).\n", rx->id, rx->status); xennet_move_rx_slot(np, skb, ref); err = -ENOMEM; goto next; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ const struct page *page = skb_frag_page(skb_shinfo(skb)->frags); unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); mcl = np->rx_mcl + pages_flipped; mmu = np->rx_mmu + pages_flipped; MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; set_phys_to_machine(pfn, mfn); } pages_flipped++; } else { ret = gnttab_end_foreign_access_ref(ref); BUG_ON(!ret); } gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & XEN_NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) netdev_warn(np->netdev, "Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) netdev_warn(np->netdev, "Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; *pages_flipped_p = pages_flipped; return err; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); RING_IDX cons = np->rx.rsp_cons; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); if (shinfo->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; BUG_ON(pull_to <= skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(skb_shinfo(nskb)->frags), rx->offset, rx->status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); } return cons; } static int xennet_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) netdev_warn(skb->dev, "GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ #ifndef OPENSUSE_1302 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { #else if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 && gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) { #endif if (net_ratelimit()) netdev_warn(skb->dev, "Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } #if HAVE_TSO skb_shinfo(skb)->gso_size = gso->u.gso.size; #if HAVE_GSO #ifndef OPENSUSE_1302 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; #else skb_shinfo(skb)->gso_type = gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; #endif /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; #endif skb_shinfo(skb)->gso_segs = 0; return 0; #else if (net_ratelimit()) netdev_warn(skb->dev, "GSO unsupported by this kernel.\n"); return -EINVAL; #endif } static int netif_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct netfront_stats *stats = this_cpu_ptr(np->stats); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct netif_rx_response *rx = &rinfo.rx; struct netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; int work_done, more_to_do = 1, accel_more_to_do = 1; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; int pages_flipped = 0; int err; spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */ if (unlikely(!netfront_carrier_ok(np))) { spin_unlock(&np->rx_lock); return 0; } skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq, &pages_flipped); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); dev->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->pull_to = rx->status; if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; skb_shinfo(skb)->frags[0].page_offset = rx->offset; skb_frag_size_set(skb_shinfo(skb)->frags, rx->status); skb->len += skb->data_len = rx->status; i = xennet_fill_frags(np, skb, &tmpq); if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & XEN_NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } if (pages_flipped) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-pages_flipped); /* Do all the remapping work and M2P updates. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { MULTI_mmu_update(np->rx_mcl + pages_flipped, np->rx_mmu, pages_flipped, 0, DOMID_SELF); err = HYPERVISOR_multicall_check(np->rx_mcl, pages_flipped + 1, NULL); BUG_ON(err); } } __skb_queue_purge(&errq); while ((skb = __skb_dequeue(&rxq)) != NULL) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; if (pull_to > skb_headlen(skb)) __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); #ifndef OPENSUSE_1302 if (skb_checksum_setup(skb, &np->rx_gso_csum_fixups)) { #else if (xennet_checksum_setup(skb, &np->rx_gso_csum_fixups)) { #endif kfree_skb(skb); dev->stats.rx_errors++; --work_done; continue; } u64_stats_update_begin(&stats->syncp); stats->rx_packets++; stats->rx_bytes += skb->len; u64_stats_update_end(&stats->syncp); /* Pass it up. */ netif_receive_skb(skb); } /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; network_alloc_rx_buffers(dev); if (work_done < budget) { /* there's some spare capacity, try the accelerated path */ int accel_budget = budget - work_done; int accel_budget_start = accel_budget; if (np->accel_vif_state.hooks) { accel_more_to_do = np->accel_vif_state.hooks->netdev_poll (dev, &accel_budget); work_done += (accel_budget_start - accel_budget); } else accel_more_to_do = 0; } if (work_done < budget) { local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do && !accel_more_to_do && np->accel_vif_state.hooks) { /* * Slow path has nothing more to do, see if * fast path is likewise */ accel_more_to_do = np->accel_vif_state.hooks->start_napi_irq(dev); } if (!more_to_do && !accel_more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static void netif_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 1; i <= NET_TX_RING_SIZE; i++) { if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET) continue; skb = np->tx_skbs[i]; gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void netif_release_rx_bufs_flip(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref, rc; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { struct page *page; if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, id); page = skb_frag_page(skb_shinfo(skb)->frags); if (0 == mfn) { balloon_release_driver_page(page); skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, pfn_pte_ma(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } DPRINTK("%s: %d xfer, %d noxfer, %d unused\n", __FUNCTION__, xfer, noxfer, unused); if (xfer) { /* Some pages are no longer absent... */ balloon_update_driver_allowance(-xfer); if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, 0, DOMID_SELF); rc = HYPERVISOR_multicall_check( np->rx_mcl, mcl + 1 - np->rx_mcl, NULL); BUG_ON(rc); } } __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } static void netif_release_rx_bufs_copy(struct netfront_info *np) { struct sk_buff *skb; int i, ref; int busy = 0, inuse = 0; spin_lock_bh(&np->rx_lock); for (i = 0; i < NET_RX_RING_SIZE; i++) { ref = np->grant_rx_ref[i]; if (ref == GRANT_INVALID_REF) continue; inuse++; skb = np->rx_skbs[i]; if (!gnttab_end_foreign_access_ref(ref)) { busy++; continue; } gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(np->rx_skbs, i); dev_kfree_skb(skb); } if (busy) DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n", __FUNCTION__, busy, inuse, NET_RX_RING_SIZE); spin_unlock_bh(&np->rx_lock); } static int network_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static int xennet_set_mac_address(struct net_device *dev, void *p) { struct netfront_info *np = netdev_priv(dev); struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(np->mac, addr->sa_data, ETH_ALEN); return 0; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct netfront_info *np = netdev_priv(dev); int cpu; netfront_accelerator_call_get_stats(np, dev); for_each_possible_cpu(cpu) { struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu); u64 rx_packets, rx_bytes, tx_packets, tx_bytes; unsigned int start; do { #ifdef OPENSUSE_1302 start = u64_stats_fetch_begin_irq(&stats->syncp); #else start = u64_stats_fetch_begin_bh(&stats->syncp); #endif rx_packets = stats->rx_packets; tx_packets = stats->tx_packets; rx_bytes = stats->rx_bytes; tx_bytes = stats->tx_bytes; #ifdef OPENSUSE_1302 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); #else } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); #endif tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; tot->rx_bytes += rx_bytes; tot->tx_bytes += tx_bytes; } tot->rx_errors = dev->stats.rx_errors; tot->tx_dropped = dev->stats.tx_dropped; return tot; } static const struct xennet_stat { char name[ETH_GSTRING_LEN]; u16 offset; } xennet_stats[] = { { "rx_gso_csum_fixups", offsetof(struct netfront_info, rx_gso_csum_fixups) / sizeof(long) }, }; static int xennet_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(xennet_stats); } return -EOPNOTSUPP; } static void xennet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { unsigned long *np = netdev_priv(dev); unsigned int i; for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) data[i] = np[xennet_stats[i].offset]; } static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 *data) { unsigned int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, xennet_stats[i].name, ETH_GSTRING_LEN); break; } } static void netfront_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "netfront"); strlcpy(info->bus_info, dev_name(dev->dev.parent), ARRAY_SIZE(info->bus_info)); } static int network_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; netif_rx_request_t *req; unsigned int feature_rx_copy, feature_rx_flip; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-flip", "%u", &feature_rx_flip); if (err != 1) feature_rx_flip = 1; /* * Copy packets on receive path if: * (a) This was requested by user, and the backend supports it; or * (b) Flipping was requested, but this is unsupported by the backend. */ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || (MODPARM_rx_flip && !feature_rx_flip)); err = talk_to_backend(np->xbdev, np); if (err) return err; rtnl_lock(); netdev_update_features(dev); rtnl_unlock(); DPRINTK("device %s has %sing receive path.\n", dev->name, np->copying_receiver ? "copy" : "flipp"); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* * Recovery procedure: * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ /* Step 1: Discard all pending TX packet fragments. */ netif_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { unsigned long pfn; if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); pfn = page_to_pfn(skb_frag_page(skb_shinfo(skb)->frags)); if (!np->copying_receiver) { gnttab_grant_foreign_transfer_ref( ref, np->xbdev->otherend_id, pfn); } else { gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); } req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netfront_carrier_on(np); notify_remote_via_irq(np->irq); network_tx_buf_gc(dev); network_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } static void netif_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_release_tx_bufs(np); if (np->copying_receiver) netif_release_rx_bufs_copy(np); else netif_release_rx_bufs_flip(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static const struct ethtool_ops network_ethtool_ops = { .get_drvinfo = netfront_get_drvinfo, .get_link = ethtool_op_get_link, .get_sset_count = xennet_get_sset_count, .get_ethtool_stats = xennet_get_ethtool_stats, .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; network_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct netfront_info *info = netdev_priv(to_net_dev(dev)); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int error = 0; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { error = device_create_file(&netdev->dev, &xennet_attrs[i]); if (error) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return error; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ /* * Nothing to do here. Virtual interface is point-to-point and the * physical interface is probably promiscuous anyway. */ static void network_set_multicast_list(struct net_device *dev) { } static netdev_features_t xennet_fix_features(struct net_device *dev, netdev_features_t features) { struct netfront_info *np = netdev_priv(dev); int val; if (features & NETIF_F_SG) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_SG; } if (features & NETIF_F_TSO) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_TSO; } #ifdef OPENSUSE_1302 if (features & NETIF_F_IPV6_CSUM) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-ipv6-csum-offload", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_IPV6_CSUM; } if (features & NETIF_F_TSO6) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv6", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_TSO6; } #endif return features; } static int xennet_set_features(struct net_device *dev, netdev_features_t features) { if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { netdev_info(dev, "Reducing MTU because no SG offload"); dev->mtu = ETH_DATA_LEN; } return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xennet_poll_controller(struct net_device *dev) { netif_int(0, dev); } #endif static const struct net_device_ops xennet_netdev_ops = { .ndo_uninit = netif_uninit, .ndo_open = network_open, .ndo_stop = network_close, .ndo_start_xmit = network_start_xmit, .ndo_set_rx_mode = network_set_multicast_list, .ndo_set_mac_address = xennet_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = xennet_fix_features, .ndo_set_features = xennet_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xennet_poll_controller, #endif .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats64 = xennet_get_stats64, }; static struct net_device *create_netdev(struct xenbus_device *dev) { int i, err = 0; struct net_device *netdev = NULL; struct netfront_info *np = NULL; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) return ERR_PTR(-ENOMEM); np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); init_accelerator_vif(np, dev); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; err = -ENOMEM; #ifdef OPENSUSE_1302 np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); #else np->stats = alloc_percpu(struct netfront_stats); #endif if (np->stats == NULL) goto exit; #ifndef OPENSUSE_1302 for_each_possible_cpu(i) u64_stats_init(&per_cpu_ptr(np->stats, i)->syncp); #endif /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ for (i = 0; i <= NET_TX_RING_SIZE; i++) { np->tx_skbs[i] = (void *)((unsigned long) i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { pr_alert("#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit_free_stats; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { pr_alert("#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, netif_poll, 64); #ifdef OPENSUSE_1302 netdev->features = NETIF_F_RXCSUM | NETIF_F_GSO_ROBUST; netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; #else netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_GSO_ROBUST; netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; #endif /* * Assume that all hw features are available for now. This set * will be adjusted by the call to netdev_update_features() in * xennet_connect() which is the earliest point where we can * negotiate with the backend regarding supported features. */ netdev->features |= netdev->hw_features; #ifdef OPENSUSE_1302 netdev->ethtool_ops = &network_ethtool_ops; #else SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); #endif SET_NETDEV_DEV(netdev, &dev->dev); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); #endif np->netdev = netdev; netfront_carrier_off(np); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit_free_stats: free_percpu(np->stats); exit: free_netdev(netdev); return ERR_PTR(err); } static void netif_release_rings(struct netfront_info *info) { end_access(info->tx_ring_ref, info->tx.sring); end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } static void netif_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netfront_carrier_off(info); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->irq) unbind_from_irqhandler(info->irq, info->netdev); info->irq = 0; netif_release_rings(info); } static void end_access(int ref, void *page) { if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, (unsigned long)page); } /* ** Driver registration ** */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; MODULE_ALIAS("xen:vif"); static DEFINE_XENBUS_DRIVER(netfront, , .probe = netfront_probe, .remove = netfront_remove, .suspend = netfront_suspend, .suspend_cancel = netfront_suspend_cancel, .resume = netfront_resume, .otherend_changed = backend_changed, ); static int __init netif_init(void) { if (!is_running_on_xen()) return -ENODEV; #ifdef CONFIG_XEN if (MODPARM_rx_flip && MODPARM_rx_copy) { pr_warning("Cannot specify both rx_copy and rx_flip.\n"); return -EINVAL; } if (!MODPARM_rx_flip && !MODPARM_rx_copy) MODPARM_rx_copy = true; /* Default is to copy. */ #endif netif_init_accel(); pr_info("Initialising virtual ethernet driver.\n"); return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { xenbus_unregister_driver(&netfront_driver); netif_exit_accel(); } module_exit(netif_exit); MODULE_LICENSE("Dual BSD/GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-classic_xen_driver-3.12.xto3.16.x/xen-vnif/netfront.h000066400000000000000000000214141314037446600277450ustar00rootroot00000000000000/****************************************************************************** * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * Copyright (C) 2007 Solarflare Communications, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef NETFRONT_H #define NETFRONT_H #include #include #include #include #include #include #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE) #include #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif struct netfront_stats { u64 rx_packets; u64 tx_packets; u64 rx_bytes; u64 tx_bytes; struct u64_stats_sync syncp; }; /* * Function pointer table for hooks into a network acceleration * plugin. These are called at appropriate points from the netfront * driver */ struct netfront_accel_hooks { /* * new_device: Accelerator hook to ask the plugin to support a * new network interface */ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev); /* * remove: Opposite of new_device */ int (*remove)(struct xenbus_device *dev); /* * The net_device is being polled, check the accelerated * hardware for any pending packets */ int (*netdev_poll)(struct net_device *dev, int *pbudget); /* * start_xmit: Used to give the accelerated plugin the option * of sending a packet. Returns non-zero if has done so, or * zero to decline and force the packet onto normal send * path */ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev); /* * start/stop_napi_interrupts Used by netfront to indicate * when napi interrupts should be enabled or disabled */ int (*start_napi_irq)(struct net_device *dev); void (*stop_napi_irq)(struct net_device *dev); /* * Called before re-enabling the TX queue to check the fast * path has slots too */ int (*check_ready)(struct net_device *dev); /* * Get the fastpath network statistics */ int (*get_stats)(struct net_device *dev, struct net_device_stats *dev_stats, struct netfront_stats *link_stats); }; /* Version of API/protocol for communication between netfront and acceleration plugin supported */ #define NETFRONT_ACCEL_VERSION 0x00010003 /* * Per-netfront device state for the accelerator. This is used to * allow efficient per-netfront device access to the accelerator * hooks */ struct netfront_accel_vif_state { struct list_head link; struct xenbus_device *dev; struct netfront_info *np; struct netfront_accel_hooks *hooks; /* Watch on the accelerator configuration value */ struct xenbus_watch accel_watch; /* Work item to process change in accelerator */ struct work_struct accel_work; /* The string from xenbus last time accel_watch fired */ char *accel_frontend; }; /* * Per-accelerator state stored in netfront. These form a list that * is used to track which devices are accelerated by which plugins, * and what plugins are available/have been requested */ struct netfront_accelerator { /* Used to make a list */ struct list_head link; /* ID of the accelerator */ int id; /* * String describing the accelerator. Currently this is the * name of the accelerator module. This is provided by the * backend accelerator through xenstore */ char *frontend; /* The hooks into the accelerator plugin module */ struct netfront_accel_hooks *hooks; /* * List of per-netfront device state (struct * netfront_accel_vif_state) for each netfront device that is * using this accelerator */ struct list_head vif_states; spinlock_t vif_states_lock; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct netif_tx_front_ring tx; struct netif_rx_front_ring rx; spinlock_t tx_lock; spinlock_t rx_lock; struct napi_struct napi; unsigned int irq; unsigned int copying_receiver; unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; /* * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs * is an index into a chain of free entries. */ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1]; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; struct xenbus_device *xbdev; int tx_ring_ref; int rx_ring_ref; u8 mac[ETH_ALEN]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Statistics */ struct netfront_stats __percpu *stats; unsigned long rx_gso_csum_fixups; /* Private pointer to state internal to accelerator module */ void *accel_priv; /* The accelerator used by this netfront device */ struct netfront_accelerator *accelerator; /* The accelerator state for this netfront device */ struct netfront_accel_vif_state accel_vif_state; }; /* Exported Functions */ /* * Called by an accelerator plugin module when it has loaded. * * frontend: the string describing the accelerator, currently the module name * hooks: the hooks for netfront to use to call into the accelerator * version: the version of API between frontend and plugin requested * * return: 0 on success, <0 on error, >0 (with version supported) on * version mismatch */ extern int netfront_accelerator_loaded(int version, const char *frontend, struct netfront_accel_hooks *hooks); /* * Called by an accelerator plugin module when it is about to unload. * * frontend: the string describing the accelerator. Must match the * one passed to netfront_accelerator_loaded() */ extern void netfront_accelerator_stop(const char *frontend); /* * Called by an accelerator before waking the net device's TX queue to * ensure the slow path has available slots. Returns true if OK to * wake, false if still busy */ extern int netfront_check_queue_ready(struct net_device *net_dev); /* Internal-to-netfront Functions */ /* * Call into accelerator and check to see if it has tx space before we * wake the net device's TX queue. Returns true if OK to wake, false * if still busy */ extern int netfront_check_accelerator_queue_ready(struct net_device *dev, struct netfront_info *np); extern int netfront_accelerator_call_remove(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend(struct netfront_info *np, struct xenbus_device *dev); extern int netfront_accelerator_suspend_cancel(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_resume(struct netfront_info *np, struct xenbus_device *dev); extern void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np, struct net_device *dev); extern int netfront_accelerator_call_get_stats(struct netfront_info *np, struct net_device *dev); extern void netfront_accelerator_add_watch(struct netfront_info *np); extern void netif_init_accel(void); extern void netif_exit_accel(void); extern void init_accelerator_vif(struct netfront_info *np, struct xenbus_device *dev); #endif /* NETFRONT_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/000077500000000000000000000000001314037446600235405ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/Makefile000066400000000000000000000044441314037446600252060ustar00rootroot00000000000000############################################################################### ###### File name : Makefile ###### ###### Author : uvptools@huawei.com ###### ###### Description : For output xenbus user-space library of new ###### ###### linux OS release ###### ###### History : ###### ###### 2012-05-23: Create the file. ###### ###### 2012-12-28: Add the KERNDIR parameter for build. ###### ############################################################################### M = $(shell pwd) include $(M)/config.mk COMPILEFLAGS=$(CROSSCOMPILE) vnif_path=other ifeq ($(NO_PROCFS), ) obj-m += xen-procfs/ endif ifeq ("2.6.32-131.0.15.el6.x86_64", "$(BUILDKERNEL)") obj-m += xen-scsi/ endif ifeq ("2.6.32-220.el6.x86_64", "$(BUILDKERNEL)") obj-m += xen-scsi/ endif ifeq ("2.6.32-279.el6.x86_64", "$(BUILDKERNEL)") obj-m += xen-scsi/ endif ifeq ("2.6.32-358.el6.x86_64", "$(BUILDKERNEL)") obj-m += xen-scsi/ endif ifeq ("Ubuntu", "$(OSDISTRIBUTION)") #the 3th kernel version ifeq ("38", "$(KERN_VER)") obj-m += xen-balloon/ endif endif ifeq ("Debian", "$(OSDISTRIBUTION)") ifeq ("2.6.32-5-amd64", "$(BUILDKERNEL)") obj-m += xen-balloon/ endif endif ifeq ("Oracle", "$(OSDISTRIBUTION)") ifeq ("","$(ORACLE65)") ifneq ("39", "$(KERN_VER)") obj-m += xen-vbd/ endif ifneq ("", "$(ORACLE56)") obj-m += xen-balloon/ endif obj-m += xen-vnif/ endif endif ifeq ("redhat", "$(OSDISTRIBUTION)") ifeq ($(OS_DIST_RHEL_7), ) obj-m += xen-balloon/ obj-m += xen-vbd/ obj-m += xen-vnif/ obj-m += vni_front/ endif endif ifeq ("39","$(KERN_VER)") vnif_path=oracle6 endif ifeq ("3.16.0-4-amd64", "$(BUILDKERNEL)") obj-m += xen-vnif/ vnif_path=debian8 endif ifeq ("4.0.4-301.fc22.x86_64", "$(BUILDKERNEL)") obj-m += xen-vnif/ vnif_path=fedora22 endif all:lnfile make -C $(KERNDIR) M=$(M) modules $(COMPILEFLAGS) modules_install:lnfile make -C $(KERNDIR) M=$(M) modules_install ${COMPILEFLAGS} lnfile: @set -e; \ cd xen-vnif; \ for file in `ls $(vnif_path)`; \ do \ ln -sf $(vnif_path)/$$file $$file; \ done; \ cd - clean: make -C $(KERNDIR) M=$(M) clean $(COMPILEFLAGS) rm -rf ./*/*.order UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/config.mk000066400000000000000000000031531314037446600253400ustar00rootroot00000000000000_XEN_CPPFLAGS += -I$(M)/include OS_ARCH := $(shell uname -m) KERN_VER := $(shell echo $(BUILDKERNEL) | awk -F'.' '{print $$3}' | awk -F'-' '{print $$1}') DEBIAN6 := $(shell echo $(KERNDIR) | grep 2.6.32-5-amd64) ORACLE65 := $(shell echo $(KERNDIR) | grep 3.8.13-16.2.1) ORACLE56 := $(shell echo $(KERNDIR) | grep "2.6.32-[0-9]\{3\}\.") OS_DIST_UBUNTU_KERNEL_3110 := $(shell echo $(KERNDIR) | grep 3.11.0) OS_DIST_UBUNTU_KERNEL_3130 := $(shell echo $(KERNDIR) | grep "3.13.0\|3.16.0\|3.19.0\|4.2.3\|4.0.4") OS_DIST_RHEL_7 := $(shell echo $(KERNDIR) | grep 3.10.0-123.el7) OS_DIST_GENTOO_KERNEL_390 := $(shell echo $(OSDISTRIBUTION) | grep Gentoo) #AUTOCONF := $(shell find $(CROSS_COMPILE_KERNELSOURCE) -name autoconf.h) #debian8.2/ubuntu14.04.3/fedora22/23 not use xen_procfs NO_PROCFS := $(shell echo $(KERNDIR) | grep "3.16.0-4\|3.19.0-25\|4.2.3-300\|4.0.4-301") ifneq ($(NO_PROCFS), ) _XEN_CPPFLAGS += -DNO_PROCFS endif OS_DIST_KERNEL_3190_UP := $(shell echo $(KERNDIR) | grep "3.19.0-25\|4.2.3-300\|4.0.4-301") ifneq ($(OS_DIST_KERNEL_3190_UP), ) _XEN_CPPFLAGS += -DKERNEL_3190_UP endif ifeq ("Oracle", "$(OSDISTRIBUTION)") _XEN_CPPFLAGS += -DORACLE endif ifneq ($(OS_DIST_UBUNTU_KERNEL_3130), ) _XEN_CPPFLAGS += -DUBUNTU_KERNEL_3110 endif ifneq ($(OS_DIST_RHEL_7), ) _XEN_CPPFLAGS += -DUBUNTU_KERNEL_3110 endif ifneq ($(OS_DIST_UBUNTU_KERNEL_3110), ) _XEN_CPPFLAGS += -DUBUNTU_KERNEL_3110 endif ifeq ("Gentoo","$(OSDISTRIBUTION)") _XEN_CPPFLAGS += -DGENTOO_KERNEL_390 endif #_XEN_CPPFLAGS += -include $(AUTOCONF) EXTRA_CFLAGS += $(_XEN_CPPFLAGS) EXTRA_AFLAGS += $(_XEN_CPPFLAGS) CPPFLAGS := -I$(M)/include $(CPPFLAGS) UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/000077500000000000000000000000001314037446600251635ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/asm/000077500000000000000000000000001314037446600257435ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/asm/maddr.h000066400000000000000000000114111314037446600272010ustar00rootroot00000000000000#ifndef _MADDR_H #define _MADDR_H #include #include /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ #define INVALID_P2M_ENTRY(~0UL) #define FOREIGN_FRAME_BIT(1UL<<63) #define FOREIGN_FRAME(m)((m) | FOREIGN_FRAME_BIT) /* Definitions for machine and pseudophysical addresses. */ typedef unsigned long paddr_t; typedef unsigned long maddr_t; #ifdef CONFIG_XEN extern unsigned long *phys_to_machine_mapping; #undef machine_to_phys_mapping extern unsigned long *machine_to_phys_mapping; extern unsigned int machine_to_phys_order; static inline unsigned long pfn_to_mfn(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return pfn; BUG_ON(end_pfn && pfn >= end_pfn); return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT; } static inline int phys_to_machine_mapping_valid(unsigned long pfn) { if (xen_feature(XENFEAT_auto_translated_physmap)) return 1; BUG_ON(end_pfn && pfn >= end_pfn); return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); } static inline unsigned long mfn_to_pfn(unsigned long mfn) { unsigned long pfn; if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; if (unlikely((mfn >> machine_to_phys_order) != 0)) return end_pfn; /* The array access can fail (e.g., device space beyond end of RAM). */ asm ( "1:movq %1,%0\n" "2:\n" ".section .fixup,\"ax\"\n" "3:movq %2,%0\n" "jmp 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" ".align 8\n" ".quad 1b,3b\n" ".previous" : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) ); return pfn; } /* * We detect special mappings in one of two ways: * 1. If the MFN is an I/O page then Xen will set the m2p entry * to be outside our maximum possible pseudophys range. * 2. If the MFN belongs to a different domain then we will certainly * not have MFN in our p2m table. Conversely, if the page is ours, * then we'll have p2m(m2p(MFN))==MFN. * If we detect a special mapping then it doesn't have a 'struct page'. * We force !pfn_valid() by returning an out-of-range pointer. * * NB. These checks require that, for any MFN that is not in our reservation, * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. * * NB2. When deliberately mapping foreign pages into the p2m table, you *must* * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we * require. In all the cases we care about, the FOREIGN_FRAME bit is * masked (e.g., pfn_to_mfn()) so behaviour there is correct. */ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) { unsigned long pfn = mfn_to_pfn(mfn); if ((pfn < end_pfn) && !xen_feature(XENFEAT_auto_translated_physmap) && (phys_to_machine_mapping[pfn] != mfn)) return end_pfn; /* force !pfn_valid() */ return pfn; } static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) { BUG_ON(end_pfn && pfn >= end_pfn); if (xen_feature(XENFEAT_auto_translated_physmap)) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return; } phys_to_machine_mapping[pfn] = mfn; } static inline maddr_t phys_to_machine(paddr_t phys) { maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); return machine; } static inline paddr_t machine_to_phys(maddr_t machine) { paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); return phys; } static inline paddr_t pte_phys_to_machine(paddr_t phys) { maddr_t machine; machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK); return machine; } static inline paddr_t pte_machine_to_phys(maddr_t machine) { paddr_t phys; phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT); phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK); return phys; } #define __pte_ma(x) ((pte_t) { (x) } ) #define pfn_pte_ma(pfn, prot)__pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask) #else /* !CONFIG_XEN */ #define pfn_to_mfn(pfn) (pfn) #define mfn_to_pfn(mfn) (mfn) #define mfn_to_local_pfn(mfn) (mfn) #define set_phys_to_machine(pfn, mfn) ((void)0) #define phys_to_machine_mapping_valid(pfn) (1) #define phys_to_machine(phys) ((maddr_t)(phys)) #define machine_to_phys(mach) ((paddr_t)(mach)) #define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot) #define __pte_ma(x) __pte(x) #endif /* !CONFIG_XEN */ /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v)(phys_to_machine(__pa(v))) #define virt_to_mfn(v)(pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) #define mfn_to_virt(m)(__va(mfn_to_pfn(m) << PAGE_SHIFT)) #endif /* _MADDR_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/internal.h000066400000000000000000000177451314037446600271660ustar00rootroot00000000000000/* Internal procfs definitions * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include #include #include #include #include struct ctl_table_header; struct mempolicy; /* * This is not completely implemented yet. The idea is to * create an in-memory tree (like the actual /proc filesystem * tree) of these proc_dir_entries, so that we can dynamically * add new files to /proc. * * The "next" pointer creates a linked list of one /proc directory, * while parent/subdir create the directory structure (every * /proc file has a parent, but "subdir" is NULL for all * non-directory entries). */ struct proc_dir_entry { unsigned int low_ino; umode_t mode; nlink_t nlink; kuid_t uid; kgid_t gid; loff_t size; const struct inode_operations *proc_iops; const struct file_operations *proc_fops; #ifdef KERNEL_3190_UP struct proc_dir_entry *parent; struct rb_root subdir; struct rb_node subdir_node; #else struct proc_dir_entry *next, *parent, *subdir; #endif void *data; atomic_t count; /* use count */ atomic_t in_use; /* number of callers into module in progress; */ /* negative -> it's going away RSN */ struct completion *pde_unload_completion; struct list_head pde_openers; /* who did ->open, but not ->release */ spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */ u8 namelen; char name[]; }; union proc_op { int (*proc_get_link)(struct dentry *, struct path *); int (*proc_read)(struct task_struct *task, char *page); int (*proc_show)(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task); }; struct proc_inode { struct pid *pid; int fd; union proc_op op; struct proc_dir_entry *pde; struct ctl_table_header *sysctl; struct ctl_table *sysctl_entry; #ifdef KERNEL_3190_UP const struct proc_ns_operations *ns_ops; #else struct proc_ns ns; #endif struct inode vfs_inode; }; /* * General functions */ static inline struct proc_inode *PROC_I(const struct inode *inode) { return container_of(inode, struct proc_inode, vfs_inode); } static inline struct proc_dir_entry *PDE(const struct inode *inode) { return PROC_I(inode)->pde; } static inline void *__PDE_DATA(const struct inode *inode) { return PDE(inode)->data; } static inline struct pid *proc_pid(struct inode *inode) { return PROC_I(inode)->pid; } static inline struct task_struct *get_proc_task(struct inode *inode) { return get_pid_task(proc_pid(inode), PIDTYPE_PID); } static inline int task_dumpable(struct task_struct *task) { int dumpable = 0; struct mm_struct *mm; task_lock(task); mm = task->mm; if (mm) dumpable = get_dumpable(mm); task_unlock(task); if (dumpable == SUID_DUMP_USER) return 1; return 0; } static inline unsigned name_to_int(struct dentry *dentry) { const char *name = dentry->d_name.name; int len = dentry->d_name.len; unsigned n = 0; if (len > 1 && *name == '0') goto out; while (len-- > 0) { unsigned c = *name++ - '0'; if (c > 9) goto out; if (n >= (~0U-9)/10) goto out; n *= 10; n += c; } return n; out: return ~0U; } /* * Offset of the first process in the /proc root directory.. */ #define FIRST_PROCESS_ENTRY 256 /* Worst case buffer size needed for holding an integer. */ #define PROC_NUMBUF 13 /* * array.c */ extern const struct file_operations proc_tid_children_operations; extern int proc_tid_stat(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); extern int proc_tgid_stat(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); extern int proc_pid_status(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); extern int proc_pid_statm(struct seq_file *, struct pid_namespace *, struct pid *, struct task_struct *); /* * base.c */ extern const struct dentry_operations pid_dentry_operations; extern int pid_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern int proc_setattr(struct dentry *, struct iattr *); extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *); extern int pid_revalidate(struct dentry *, unsigned int); extern int pid_delete_dentry(const struct dentry *); extern int proc_pid_readdir(struct file *, struct dir_context *); extern struct dentry *proc_pid_lookup(struct inode *, struct dentry *, unsigned int); extern loff_t mem_lseek(struct file *, loff_t, int); /* Lookups */ typedef int instantiate_t(struct inode *, struct dentry *, struct task_struct *, const void *); extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, int, instantiate_t, struct task_struct *, const void *); /* * generic.c */ extern spinlock_t proc_subdir_lock; extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int); extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *, struct dentry *); extern int proc_readdir(struct file *, struct dir_context *); extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *); static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde) { atomic_inc(&pde->count); return pde; } extern void pde_put(struct proc_dir_entry *); /* * inode.c */ struct pde_opener { struct file *file; struct list_head lh; int closing; struct completion *c; }; extern const struct inode_operations proc_pid_link_inode_operations; extern void proc_init_inodecache(void); extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *); extern int proc_fill_super(struct super_block *); extern void proc_entry_rundown(struct proc_dir_entry *); /* * proc_devtree.c */ #ifdef CONFIG_PROC_DEVICETREE extern void proc_device_tree_init(void); #endif /* * proc_namespaces.c */ extern const struct inode_operations proc_ns_dir_inode_operations; extern const struct file_operations proc_ns_dir_operations; /* * proc_net.c */ extern const struct file_operations proc_net_operations; extern const struct inode_operations proc_net_inode_operations; #ifdef CONFIG_NET extern int proc_net_init(void); #else static inline int proc_net_init(void) { return 0; } #endif /* * proc_self.c */ extern int proc_setup_self(struct super_block *); /* * proc_sysctl.c */ #ifdef CONFIG_PROC_SYSCTL extern int proc_sys_init(void); extern void sysctl_head_put(struct ctl_table_header *); #else static inline void proc_sys_init(void) { } static inline void sysctl_head_put(struct ctl_table_header *head) { } #endif /* * proc_tty.c */ #ifdef CONFIG_TTY extern void proc_tty_init(void); #else static inline void proc_tty_init(void) {} #endif /* * root.c */ extern struct proc_dir_entry proc_root; extern void proc_self_init(void); extern int proc_remount(struct super_block *, int *, char *); /* * task_[no]mmu.c */ struct proc_maps_private { struct pid *pid; struct task_struct *task; #ifdef CONFIG_MMU struct vm_area_struct *tail_vma; #endif #ifdef CONFIG_NUMA struct mempolicy *task_mempolicy; #endif }; extern const struct file_operations proc_pid_maps_operations; extern const struct file_operations proc_tid_maps_operations; extern const struct file_operations proc_pid_numa_maps_operations; extern const struct file_operations proc_tid_numa_maps_operations; extern const struct file_operations proc_pid_smaps_operations; extern const struct file_operations proc_tid_smaps_operations; extern const struct file_operations proc_clear_refs_operations; extern const struct file_operations proc_pagemap_operations; extern unsigned long task_vsize(struct mm_struct *); extern unsigned long task_statm(struct mm_struct *, unsigned long *, unsigned long *, unsigned long *, unsigned long *); extern void task_mem(struct seq_file *, struct mm_struct *); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/linux/000077500000000000000000000000001314037446600263225ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/linux/compat_virtionet.h000066400000000000000000000052411314037446600320630ustar00rootroot00000000000000#ifndef __COMPAT_VIRTIONET_H__ #define __COMPAT_VIRTIONET_H__ #include #ifndef netif_set_real_num_tx_queues #define netif_set_real_num_tx_queues(dev, max_queue_pairs) #endif #ifndef netif_set_real_num_rx_queues #define netif_set_real_num_rx_queues(dev, max_queue_pairs) #endif #ifndef this_cpu_ptr #define this_cpu_ptr(field) \ per_cpu_ptr(field, smp_processor_id()) #endif #ifndef net_ratelimited_function #define net_ratelimited_function(function, ...) \ do { \ if (net_ratelimit()) \ function(__VA_ARGS__); \ } while (0) #endif /* net_ratelimited_function */ #ifndef pr_warn #define pr_warn(fmt, arg...) printk(KERN_WARNING fmt, ##arg) #endif /* pr_warn */ #ifndef net_warn_ratelimited #define net_warn_ratelimited(fmt, ...) \ net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) #endif /* net_warn_ratelimited */ #ifndef net_dbg_ratelimited #define net_dbg_ratelimited(fmt, ...) \ net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) #endif /* net_dbg_ratelimited */ #ifndef eth_hw_addr_random #define eth_hw_addr_random(dev) \ random_ether_addr(dev->dev_addr) #endif /* eth_hw_addr_random */ #ifndef netdev_for_each_uc_addr #define netdev_for_each_uc_addr(ha, dev) \ list_for_each_entry(ha, &dev->uc.list, list) #endif /* netdev_for_each_uc_addr */ #ifndef netdev_mc_count #define netdev_mc_count(dev) \ dev->mc_count #endif /* netdev_mc_count */ #ifndef netdev_uc_count #define netdev_uc_count(dev) \ dev->uc.count #endif /* netdev_uc_count */ #ifndef __percpu #define __percpu #endif /* __percpu */ #ifndef skb_checksum_start_offset #define skb_checksum_start_offset(skb) \ skb->csum_start - skb_headroom(skb) #endif /* skb_checksum_start_offset */ /** * module_driver() - Helper macro for drivers that don't do anything * special in module init/exit. This eliminates a lot of boilerplate. * Each module may only use this macro once, and calling it replaces * module_init() and module_exit(). * * @__driver: driver name * @__register: register function for this driver type * @__unregister: unregister function for this driver type * @...: Additional arguments to be passed to __register and __unregister. * * Use this macro to construct bus specific macros for registering * drivers, and do not use it on its own. */ #ifndef module_driver #define module_driver(__driver, __register, __unregister, ...) \ static int __init __driver##_init(void) \ { \ return __register(&(__driver) , ##__VA_ARGS__); \ } \ module_init(__driver##_init); \ static void __exit __driver##_exit(void) \ { \ __unregister(&(__driver) , ##__VA_ARGS__); \ } \ module_exit(__driver##_exit); #endif /* module_driver */ #endif /* __COMPAT_VIRTIONET_H__ */UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/linux/u64_stats_sync.h000066400000000000000000000102541314037446600313650ustar00rootroot00000000000000#ifndef _LINUX_U64_STATS_SYNC_H #define _LINUX_U64_STATS_SYNC_H /* * To properly implement 64bits network statistics on 32bit and 64bit hosts, * we provide a synchronization point, that is a noop on 64bit or UP kernels. * * Key points : * 1) Use a seqcount on SMP 32bits, with low overhead. * 2) Whole thing is a noop on 64bit arches or UP kernels. * 3) Write side must ensure mutual exclusion or one seqcount update could * be lost, thus blocking readers forever. * If this synchronization point is not a mutex, but a spinlock or * spinlock_bh() or disable_bh() : * 3.1) Write side should not sleep. * 3.2) Write side should not allow preemption. * 3.3) If applicable, interrupts should be disabled. * * 4) If reader fetches several counters, there is no guarantee the whole values * are consistent (remember point 1) : this is a noop on 64bit arches anyway) * * 5) readers are allowed to sleep or be preempted/interrupted : They perform * pure reads. But if they have to fetch many values, it's better to not allow * preemptions/interruptions to avoid many retries. * * 6) If counter might be written by an interrupt, readers should block interrupts. * (On UP, there is no seqcount_t protection, a reader allowing interrupts could * read partial values) * * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and * u64_stats_fetch_retry_bh() helpers * * Usage : * * Stats producer (writer) should use following template granted it already got * an exclusive access to counters (a lock is already taken, or per cpu * data is used [in a non preemptable context]) * * spin_lock_bh(...) or other synchronization to get exclusive access * ... * u64_stats_update_begin(&stats->syncp); * stats->bytes64 += len; // non atomic operation * stats->packets64++; // non atomic operation * u64_stats_update_end(&stats->syncp); * * While a consumer (reader) should use following template to get consistent * snapshot for each variable (but no guarantee on several ones) * * u64 tbytes, tpackets; * unsigned int start; * * do { * start = u64_stats_fetch_begin(&stats->syncp); * tbytes = stats->bytes64; // non atomic operation * tpackets = stats->packets64; // non atomic operation * } while (u64_stats_fetch_retry(&stats->syncp, start)); * * * Example of use in drivers/net/loopback.c, using per_cpu containers, * in BH disabled context. */ #include struct u64_stats_sync { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) seqcount_t seq; #endif }; static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_begin(&syncp->seq); #endif } static inline void u64_stats_update_end(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) write_seqcount_end(&syncp->seq); #endif } static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); #else #if BITS_PER_LONG==32 preempt_disable(); #endif return 0; #endif } static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_retry(&syncp->seq, start); #else #if BITS_PER_LONG==32 preempt_enable(); #endif return false; #endif } /* * In case softirq handlers can update u64 counters, readers can use following helpers * - SMP 32bit arches use seqcount protection, irq safe. * - UP 32bit must disable BH. * - 64bit have no problem atomically reading u64 values, irq safe. */ static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); #else #if BITS_PER_LONG==32 local_bh_disable(); #endif return 0; #endif } static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_retry(&syncp->seq, start); #else #if BITS_PER_LONG==32 local_bh_enable(); #endif return false; #endif } #endif /* _LINUX_U64_STATS_SYNC_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/linux/virtio.h000066400000000000000000000120701314037446600300070ustar00rootroot00000000000000#ifndef _LINUX_VIRTIO_H #define _LINUX_VIRTIO_H /* Everything a virtio driver needs to work with any particular virtio * implementation. */ #include #include #include #include #include #include //#include /** * virtqueue - a queue to register buffers for sending or receiving. * @list: the chain of virtqueues for this device * @callback: the function to call when buffers are consumed (can be NULL). * @name: the name of this virtqueue (mainly for debugging) * @vdev: the virtio device this queue was created for. * @priv: a pointer for the virtqueue implementation to use. * @index: the zero-based ordinal number for this queue. * @num_free: number of elements we expect to be able to fit. * * A note on @num_free: with indirect buffers, each buffer needs one * element in the queue, otherwise a buffer will need one element per * sg element. */ struct virtqueue { struct list_head list; void (*callback)(struct virtqueue *vq); const char *name; struct virtio_device *vdev; unsigned int index; unsigned int num_free; void *priv; }; int virtqueue_add_buf(struct virtqueue *vq, struct scatterlist sg[], unsigned int out_num, unsigned int in_num, void *data, gfp_t gfp); int virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp); int virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp); int virtqueue_add_sgs(struct virtqueue *vq, struct scatterlist *sgs[], unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp); void virtqueue_kick(struct virtqueue *vq); bool virtqueue_kick_prepare(struct virtqueue *vq); void virtqueue_notify(struct virtqueue *vq); void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); void virtqueue_disable_cb(struct virtqueue *vq); bool virtqueue_enable_cb(struct virtqueue *vq); unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq); bool virtqueue_poll(struct virtqueue *vq, unsigned); bool virtqueue_enable_cb_delayed(struct virtqueue *vq); void *virtqueue_detach_unused_buf(struct virtqueue *vq); unsigned int virtqueue_get_vring_size(struct virtqueue *vq); bool virtqueue_is_broken(struct virtqueue *vq); /** * virtio_device - representation of a device using virtio * @index: unique position on the virtio bus * @dev: underlying device. * @id: the device type identification (used to match it with a driver). * @config: the configuration ops for this device. * @vringh_config: configuration ops for host vrings. * @vqs: the list of virtqueues for this device. * @features: the features supported by both driver and device. * @priv: private pointer for the driver's use. */ struct virtio_device { int index; struct device dev; struct virtio_device_id id; const struct virtio_config_ops *config; const struct vringh_config_ops *vringh_config; struct list_head vqs; /* Note that this is a Linux set_bit-style bitmap. */ unsigned long features[1]; void *priv; }; static inline struct virtio_device *dev_to_virtio(struct device *_dev) { return container_of(_dev, struct virtio_device, dev); } int register_virtio_device(struct virtio_device *dev); void unregister_virtio_device(struct virtio_device *dev); /** * virtio_driver - operations for a virtio I/O driver * @driver: underlying device driver (populate name and owner). * @id_table: the ids serviced by this driver. * @feature_table: an array of feature numbers supported by this driver. * @feature_table_size: number of entries in the feature table array. * @probe: the function to call when a device is found. Returns 0 or -errno. * @remove: the function to call when a device is removed. * @config_changed: optional function to call when the device configuration * changes; may be called in interrupt context. */ struct virtio_driver { struct device_driver driver; const struct virtio_device_id *id_table; const unsigned int *feature_table; unsigned int feature_table_size; int (*probe)(struct virtio_device *dev); void (*scan)(struct virtio_device *dev); void (*remove)(struct virtio_device *dev); void (*config_changed)(struct virtio_device *dev); #ifdef CONFIG_PM int (*freeze)(struct virtio_device *dev); int (*restore)(struct virtio_device *dev); #endif }; static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv) { return container_of(drv, struct virtio_driver, driver); } int register_virtio_driver(struct virtio_driver *drv); void unregister_virtio_driver(struct virtio_driver *drv); /* module_virtio_driver() - Helper macro for drivers that don't do * anything special in module init/exit. This eliminates a lot of * boilerplate. Each module may only use this macro once, and * calling it replaces module_init() and module_exit() */ #define module_virtio_driver(__virtio_driver) \ module_driver(__virtio_driver, register_virtio_driver, \ unregister_virtio_driver) #endif /* _LINUX_VIRTIO_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/linux/virtio_config.h000066400000000000000000000127611314037446600313430ustar00rootroot00000000000000#ifndef _LINUX_VIRTIO_CONFIG_H #define _LINUX_VIRTIO_CONFIG_H #include #include #include #include #include /** * virtio_config_ops - operations for configuring a virtio device * @get: read the value of a configuration field * vdev: the virtio_device * offset: the offset of the configuration field * buf: the buffer to write the field value into. * len: the length of the buffer * @set: write the value of a configuration field * vdev: the virtio_device * offset: the offset of the configuration field * buf: the buffer to read the field value from. * len: the length of the buffer * @get_status: read the status byte * vdev: the virtio_device * Returns the status byte * @set_status: write the status byte * vdev: the virtio_device * status: the new status byte * @reset: reset the device * vdev: the virtio device * After this, status and feature negotiation must be done again * Device must not be reset from its vq/config callbacks, or in * parallel with being added/removed. * @find_vqs: find virtqueues and instantiate them. * vdev: the virtio_device * nvqs: the number of virtqueues to find * vqs: on success, includes new virtqueues * callbacks: array of callbacks, for each virtqueue * include a NULL entry for vqs that do not need a callback * names: array of virtqueue names (mainly for debugging) * include a NULL entry for vqs unused by driver * Returns 0 on success or error status * @del_vqs: free virtqueues found by find_vqs(). * @get_features: get the array of feature bits for this device. * vdev: the virtio_device * Returns the first 32 feature bits (all we currently need). * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device * This gives the final feature bits for the device: it can change * the dev->feature bits if it wants. * @bus_name: return the bus name associated with the device * vdev: the virtio_device * This returns a pointer to the bus name a la pci_name from which * the caller can then copy. * @set_vq_affinity: set the affinity for a virtqueue. */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { void (*get)(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len); void (*set)(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len); u8 (*get_status)(struct virtio_device *vdev); void (*set_status)(struct virtio_device *vdev, u8 status); void (*reset)(struct virtio_device *vdev); int (*find_vqs)(struct virtio_device *, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]); void (*del_vqs)(struct virtio_device *); u32 (*get_features)(struct virtio_device *vdev); void (*finalize_features)(struct virtio_device *vdev); const char *(*bus_name)(struct virtio_device *vdev); int (*set_vq_affinity)(struct virtqueue *vq, int cpu); }; /* If driver didn't advertise the feature, it will never appear. */ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit); /** * virtio_has_feature - helper to determine if this device has this feature. * @vdev: the device * @fbit: the feature bit */ static inline bool virtio_has_feature(const struct virtio_device *vdev, unsigned int fbit) { /* Did you forget to fix assumptions on max features? */ if (__builtin_constant_p(fbit)) #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) BUILD_BUG_ON(fbit >= 32); #else MAYBE_BUILD_BUG_ON(fbit >= 32); #endif else BUG_ON(fbit >= 32); if (fbit < VIRTIO_TRANSPORT_F_START) virtio_check_driver_offered_feature(vdev, fbit); return test_bit(fbit, vdev->features); } /** * virtio_config_val - look for a feature and get a virtio config entry. * @vdev: the virtio device * @fbit: the feature bit * @offset: the type to search for. * @v: a pointer to the value to fill in. * * The return value is -ENOENT if the feature doesn't exist. Otherwise * the config value is copied into whatever is pointed to by v. */ #define virtio_config_val(vdev, fbit, offset, v) \ virtio_config_buf((vdev), (fbit), (offset), (v), sizeof(*v)) #define virtio_config_val_len(vdev, fbit, offset, v, len) \ virtio_config_buf((vdev), (fbit), (offset), (v), (len)) static inline int virtio_config_buf(struct virtio_device *vdev, unsigned int fbit, unsigned int offset, void *buf, unsigned len) { if (!virtio_has_feature(vdev, fbit)) return -ENOENT; vdev->config->get(vdev, offset, buf, len); return 0; } static inline struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, vq_callback_t *c, const char *n) { vq_callback_t *callbacks[] = { c }; const char *names[] = { n }; struct virtqueue *vq; int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names); if (err < 0) return ERR_PTR(err); return vq; } static inline const char *virtio_bus_name(struct virtio_device *vdev) { if (!vdev->config->bus_name) return "virtio"; return vdev->config->bus_name(vdev); } /** * virtqueue_set_affinity - setting affinity for a virtqueue * @vq: the virtqueue * @cpu: the cpu no. * * Pay attention the function are best-effort: the affinity hint may not be set * due to config support, irq type and sharing. * */ static inline int virtqueue_set_affinity(struct virtqueue *vq, int cpu) { struct virtio_device *vdev = vq->vdev; if (vdev->config->set_vq_affinity) return vdev->config->set_vq_affinity(vq, cpu); return 0; } #endif /* _LINUX_VIRTIO_CONFIG_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/linux/virtio_ids.h000066400000000000000000000042401314037446600306460ustar00rootroot00000000000000#ifndef _LINUX_VIRTIO_IDS_H #define _LINUX_VIRTIO_IDS_H /* * Virtio IDs * * This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of IBM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #define VIRTIO_ID_NET 1 /* virtio net */ #define VIRTIO_ID_BLOCK 2 /* virtio block */ #define VIRTIO_ID_CONSOLE 3 /* virtio console */ #define VIRTIO_ID_RNG 4 /* virtio rng */ #define VIRTIO_ID_BALLOON 5 /* virtio balloon */ #define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */ #define VIRTIO_ID_SCSI 8 /* virtio scsi */ #define VIRTIO_ID_9P 9 /* 9p virtio console */ #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ #define VIRTIO_ID_CAIF 12 /* Virtio caif */ #endif /* _LINUX_VIRTIO_IDS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/linux/virtio_net.h000066400000000000000000000204651314037446600306640ustar00rootroot00000000000000#ifndef _LINUX_VIRTIO_NET_H #define _LINUX_VIRTIO_NET_H /* This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of IBM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include /* The feature bitmap for virtio net */ #define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ #define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ #define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ #define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */ #define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ #define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ #define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */ #define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */ #define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */ #define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */ #define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */ #define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ #define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ #define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */ #define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */ #define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */ #define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */ #define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */ #define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the * network */ #define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow * Steering */ #define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ #define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ #define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */ struct virtio_net_config { /* The config defining mac address (if VIRTIO_NET_F_MAC) */ __u8 mac[6]; /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ __u16 status; /* Maximum number of each of transmit and receive queues; * see VIRTIO_NET_F_MQ and VIRTIO_NET_CTRL_MQ. * Legal values are between 1 and 0x8000 */ __u16 max_virtqueue_pairs; } __attribute__((packed)); /* This is the first element of the scatter-gather list. If you don't * specify GSO or CSUM features, you can simply ignore the header. */ struct virtio_net_hdr { #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset #define VIRTIO_NET_HDR_F_DATA_VALID 2 // Csum is valid __u8 flags; #define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame #define VIRTIO_NET_HDR_GSO_TCPV4 1 // GSO frame, IPv4 TCP (TSO) #define VIRTIO_NET_HDR_GSO_UDP 3 // GSO frame, IPv4 UDP (UFO) #define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP #define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set __u8 gso_type; __u16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ __u16 gso_size; /* Bytes to append to hdr_len per frame */ __u16 csum_start; /* Position to start checksumming from */ __u16 csum_offset; /* Offset after that to place checksum */ }; /* This is the version of the header to use when the MRG_RXBUF * feature has been negotiated. */ struct virtio_net_hdr_mrg_rxbuf { struct virtio_net_hdr hdr; __u16 num_buffers; /* Number of merged rx buffers */ }; /* * Control virtqueue data structures * * The control virtqueue expects a header in the first sg entry * and an ack/status response in the last entry. Data for the * command goes in between. */ struct virtio_net_ctrl_hdr { __u8 class; __u8 cmd; } __attribute__((packed)); typedef __u8 virtio_net_ctrl_ack; #define VIRTIO_NET_OK 0 #define VIRTIO_NET_ERR 1 /* * Control the RX mode, ie. promisucous, allmulti, etc... * All commands require an "out" sg entry containing a 1 byte * state value, zero = disable, non-zero = enable. Commands * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature. * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA. */ #define VIRTIO_NET_CTRL_RX 0 #define VIRTIO_NET_CTRL_RX_PROMISC 0 #define VIRTIO_NET_CTRL_RX_ALLMULTI 1 #define VIRTIO_NET_CTRL_RX_ALLUNI 2 #define VIRTIO_NET_CTRL_RX_NOMULTI 3 #define VIRTIO_NET_CTRL_RX_NOUNI 4 #define VIRTIO_NET_CTRL_RX_NOBCAST 5 /* * Control the MAC * * The MAC filter table is managed by the hypervisor, the guest should * assume the size is infinite. Filtering should be considered * non-perfect, ie. based on hypervisor resources, the guest may * received packets from sources not specified in the filter list. * * In addition to the class/cmd header, the TABLE_SET command requires * two out scatterlists. Each contains a 4 byte count of entries followed * by a concatenated byte stream of the ETH_ALEN MAC addresses. The * first sg list contains unicast addresses, the second is for multicast. * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature * is available. * * The ADDR_SET command requests one out scatterlist, it contains a * 6 bytes MAC address. This functionality is present if the * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available. */ struct virtio_net_ctrl_mac { __u32 entries; __u8 macs[][ETH_ALEN]; } __attribute__((packed)); #define VIRTIO_NET_CTRL_MAC 1 #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 #define VIRTIO_NET_CTRL_MAC_ADDR_SET 1 /* * Control VLAN filtering * * The VLAN filter table is controlled via a simple ADD/DEL interface. * VLAN IDs not added may be filterd by the hypervisor. Del is the * opposite of add. Both commands expect an out entry containing a 2 * byte VLAN ID. VLAN filterting is available with the * VIRTIO_NET_F_CTRL_VLAN feature bit. */ #define VIRTIO_NET_CTRL_VLAN 2 #define VIRTIO_NET_CTRL_VLAN_ADD 0 #define VIRTIO_NET_CTRL_VLAN_DEL 1 /* * Control link announce acknowledgement * * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that * driver has recevied the notification; device would clear the * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives * this command. */ #define VIRTIO_NET_CTRL_ANNOUNCE 3 #define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0 /* * Control Receive Flow Steering * * The command VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET * enables Receive Flow Steering, specifying the number of the transmit and * receive queues that will be used. After the command is consumed and acked by * the device, the device will not steer new packets on receive virtqueues * other than specified nor read from transmit virtqueues other than specified. * Accordingly, driver should not transmit new packets on virtqueues other than * specified. */ struct virtio_net_ctrl_mq { __u16 virtqueue_pairs; }; #define VIRTIO_NET_CTRL_MQ 4 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 #endif /* _LINUX_VIRTIO_NET_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/linux/virtio_pci.h000066400000000000000000000072341314037446600306500ustar00rootroot00000000000000/* * Virtio PCI driver * * This module allows virtio devices to be used over a virtual PCI device. * This can be used with QEMU based VMMs like KVM or Xen. * * Copyright IBM Corp. 2007 * * Authors: * Anthony Liguori * * This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of IBM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _LINUX_VIRTIO_PCI_H #define _LINUX_VIRTIO_PCI_H #include /* A 32-bit r/o bitmask of the features supported by the host */ #define VIRTIO_PCI_HOST_FEATURES 0 /* A 32-bit r/w bitmask of features activated by the guest */ #define VIRTIO_PCI_GUEST_FEATURES 4 /* A 32-bit r/w PFN for the currently selected queue */ #define VIRTIO_PCI_QUEUE_PFN 8 /* A 16-bit r/o queue size for the currently selected queue */ #define VIRTIO_PCI_QUEUE_NUM 12 /* A 16-bit r/w queue selector */ #define VIRTIO_PCI_QUEUE_SEL 14 /* A 16-bit r/w queue notifier */ #define VIRTIO_PCI_QUEUE_NOTIFY 16 /* An 8-bit device status register. */ #define VIRTIO_PCI_STATUS 18 /* An 8-bit r/o interrupt status register. Reading the value will return the * current contents of the ISR and will also clear it. This is effectively * a read-and-acknowledge. */ #define VIRTIO_PCI_ISR 19 /* The bit of the ISR which indicates a device configuration change. */ #define VIRTIO_PCI_ISR_CONFIG 0x2 /* MSI-X registers: only enabled if MSI-X is enabled. */ /* A 16-bit vector for configuration changes. */ #define VIRTIO_MSI_CONFIG_VECTOR 20 /* A 16-bit vector for selected queue notifications. */ #define VIRTIO_MSI_QUEUE_VECTOR 22 /* Vector value used to disable MSI for queue */ #define VIRTIO_MSI_NO_VECTOR 0xffff /* The remaining space is defined by each driver as the per-driver * configuration space */ #define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20) /* Virtio ABI version, this must match exactly */ #define VIRTIO_PCI_ABI_VERSION 0 /* How many bits to shift physical queue address written to QUEUE_PFN. * 12 is historical, and due to x86 page size. */ #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12 /* The alignment to use between consumer and producer parts of vring. * x86 pagesize again. */ #define VIRTIO_PCI_VRING_ALIGN 4096 #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/linux/virtio_ring.h000066400000000000000000000035421314037446600310320ustar00rootroot00000000000000#ifndef _LINUX_VIRTIO_RING_H #define _LINUX_VIRTIO_RING_H #include #include /* * Barriers in virtio are tricky. Non-SMP virtio guests can't assume * they're not on an SMP host system, so they need to assume real * barriers. Non-SMP virtio hosts could skip the barriers, but does * anyone care? * * For virtio_pci on SMP, we don't need to order with respect to MMIO * accesses through relaxed memory I/O windows, so smp_mb() et al are * sufficient. * * For using virtio to talk to real devices (eg. other heterogeneous * CPUs) we do need real barriers. In theory, we could be using both * kinds of virtio, so it's a runtime decision, and the branch is * actually quite cheap. */ #ifdef CONFIG_SMP static inline void virtio_mb(bool weak_barriers) { if (weak_barriers) smp_mb(); else mb(); } static inline void virtio_rmb(bool weak_barriers) { if (weak_barriers) smp_rmb(); else rmb(); } static inline void virtio_wmb(bool weak_barriers) { if (weak_barriers) smp_wmb(); else wmb(); } #else static inline void virtio_mb(bool weak_barriers) { mb(); } static inline void virtio_rmb(bool weak_barriers) { rmb(); } static inline void virtio_wmb(bool weak_barriers) { wmb(); } #endif struct virtio_device; struct virtqueue; struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, void *pages, void (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq), const char *name); void vring_del_virtqueue(struct virtqueue *vq); /* Filter out transport-specific feature bits. */ void vring_transport_features(struct virtio_device *vdev); irqreturn_t vring_interrupt(int irq, void *_vq); #endif /* _LINUX_VIRTIO_RING_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/uapi/000077500000000000000000000000001314037446600261215ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/uapi/linux/000077500000000000000000000000001314037446600272605ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/uapi/linux/virtio_config.h000066400000000000000000000053301314037446600322730ustar00rootroot00000000000000#ifndef _UAPI_LINUX_VIRTIO_CONFIG_H #define _UAPI_LINUX_VIRTIO_CONFIG_H /* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so * anyone can use the definitions to implement compatible drivers/servers. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of IBM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Virtio devices use a standardized configuration space to define their * features and pass configuration information, but each implementation can * store and access that space differently. */ #include /* Status byte for guest to report progress, and synchronize features. */ /* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */ #define VIRTIO_CONFIG_S_ACKNOWLEDGE 1 /* We have found a driver for the device. */ #define VIRTIO_CONFIG_S_DRIVER 2 /* Driver has used its parts of the config, and is happy */ #define VIRTIO_CONFIG_S_DRIVER_OK 4 /* We've given up on this device. */ #define VIRTIO_CONFIG_S_FAILED 0x80 /* Some virtio feature bits (currently bits 28 through 31) are reserved for the * transport being used (eg. virtio_ring), the rest are per-device feature * bits. */ #define VIRTIO_TRANSPORT_F_START 28 #define VIRTIO_TRANSPORT_F_END 32 /* Do we get callbacks when the ring is completely used, even if we've * suppressed them? */ #define VIRTIO_F_NOTIFY_ON_EMPTY 24 #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/uapi/linux/virtio_ring.h000066400000000000000000000135341314037446600317720ustar00rootroot00000000000000#ifndef _UAPI_LINUX_VIRTIO_RING_H #define _UAPI_LINUX_VIRTIO_RING_H /* An interface for efficient virtio implementation, currently for use by KVM * and lguest, but hopefully others soon. Do NOT change this since it will * break existing servers and clients. * * This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of IBM nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Copyright Rusty Russell IBM Corporation 2007. */ #include /* This marks a buffer as continuing via the next field. */ #define VRING_DESC_F_NEXT 1 /* This marks a buffer as write-only (otherwise read-only). */ #define VRING_DESC_F_WRITE 2 /* This means the buffer contains a list of buffer descriptors. */ #define VRING_DESC_F_INDIRECT 4 /* The Host uses this in used->flags to advise the Guest: don't kick me when * you add a buffer. It's unreliable, so it's simply an optimization. Guest * will still kick if it's out of buffers. */ #define VRING_USED_F_NO_NOTIFY 1 /* The Guest uses this in avail->flags to advise the Host: don't interrupt me * when you consume a buffer. It's unreliable, so it's simply an * optimization. */ #define VRING_AVAIL_F_NO_INTERRUPT 1 /* We support indirect buffer descriptors */ #define VIRTIO_RING_F_INDIRECT_DESC 28 /* The Guest publishes the used index for which it expects an interrupt * at the end of the avail ring. Host should ignore the avail->flags field. */ /* The Host publishes the avail index for which it expects a kick * at the end of the used ring. Guest should ignore the used->flags field. */ #define VIRTIO_RING_F_EVENT_IDX 29 /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ struct vring_desc { /* Address (guest-physical). */ __u64 addr; /* Length. */ __u32 len; /* The flags as indicated above. */ __u16 flags; /* We chain unused descriptors via this, too */ __u16 next; }; struct vring_avail { __u16 flags; __u16 idx; __u16 ring[]; }; /* u32 is used here for ids for padding reasons. */ struct vring_used_elem { /* Index of start of used descriptor chain. */ __u32 id; /* Total length of the descriptor chain which was used (written to) */ __u32 len; }; struct vring_used { __u16 flags; __u16 idx; struct vring_used_elem ring[]; }; struct vring { unsigned int num; struct vring_desc *desc; struct vring_avail *avail; struct vring_used *used; }; /* The standard layout for the ring is a continuous chunk of memory which looks * like this. We assume num is a power of 2. * * struct vring * { * // The actual descriptors (16 bytes each) * struct vring_desc desc[num]; * * // A ring of available descriptor heads with free-running index. * __u16 avail_flags; * __u16 avail_idx; * __u16 available[num]; * __u16 used_event_idx; * * // Padding to the next align boundary. * char pad[]; * * // A ring of used descriptor heads with free-running index. * __u16 used_flags; * __u16 used_idx; * struct vring_used_elem used[num]; * __u16 avail_event_idx; * }; */ /* We publish the used event index at the end of the available ring, and vice * versa. They are at the end for backwards compatibility. */ #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) #define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num]) static inline void vring_init(struct vring *vr, unsigned int num, void *p, unsigned long align) { vr->num = num; vr->desc = p; vr->avail = p + num*sizeof(struct vring_desc); vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__u16) + align-1) & ~(align - 1)); } static inline unsigned vring_size(unsigned int num, unsigned long align) { return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) + align - 1) & ~(align - 1)) + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num; } /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ /* Assuming a given event_idx value from the other size, if * we have just incremented index from old to new_idx, * should we trigger an event? */ static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) { /* Note: Xen has similar logic for notification hold-off * in include/xen/interface/io/ring.h with req_event and req_prod * corresponding to event_idx + 1 and new_idx respectively. * Note also that req_event and req_prod in Xen start at 1, * event indexes in virtio start at 0. */ return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old); } #endif /* _UAPI_LINUX_VIRTIO_RING_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/000077500000000000000000000000001314037446600257555ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/Kbuild000066400000000000000000000000251314037446600271070ustar00rootroot00000000000000header-y += evtchn.h UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/blkif.h000066400000000000000000000317551314037446600272300ustar00rootroot00000000000000/****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_BLKIF_H__ #define __XEN_PUBLIC_IO_BLKIF_H__ #include "interface/io/ring.h" #include "interface/grant_table.h" /* * Front->back notifications: When enqueuing a new request, sending a * notification can be made conditional on req_event (i.e., the generic * hold-off mechanism provided by the ring macros). Backends must set * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). * * Back->front notifications: When enqueuing a new response, sending a * notification can be made conditional on rsp_event (i.e., the generic * hold-off mechanism provided by the ring macros). Frontends must set * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). */ typedef uint16_t blkif_vdev_t; typedef uint64_t blkif_sector_t; /* * REQUEST CODES. */ #define BLKIF_OP_READ 0 #define BLKIF_OP_WRITE 1 /* * Recognised only if "feature-barrier" is present in backend xenbus info. * The "feature_barrier" node contains a boolean indicating whether barrier * requests are likely to succeed or fail. Either way, a barrier request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt barrier requests. * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* * create the "feature-barrier" node! */ #define BLKIF_OP_WRITE_BARRIER 2 /* * Recognised if "feature-flush-cache" is present in backend xenbus * info. A flush will ask the underlying storage hardware to flush its * non-volatile caches as appropriate. The "feature-flush-cache" node * contains a boolean indicating whether flush requests are likely to * succeed or fail. Either way, a flush request may fail at any time * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying * block-device hardware. The boolean simply indicates whether or not it * is worthwhile for the frontend to attempt flushes. If a backend does * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the * "feature-flush-cache" node! */ #define BLKIF_OP_FLUSH_DISKCACHE 3 /* * Device specific command packet contained within the request */ #define BLKIF_OP_PACKET 4 /* * Recognised only if "feature-discard" is present in backend xenbus info. * The "feature-discard" node contains a boolean indicating whether trim * (ATA) or unmap (SCSI) - conviently called discard requests are likely * to succeed or fail. Either way, a discard request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt discard requests. * If a backend does not recognise BLKIF_OP_DISCARD, it should *not* * create the "feature-discard" node! * * Discard operation is a request for the underlying block device to mark * extents to be erased. However, discard does not guarantee that the blocks * will be erased from the device - it is just a hint to the device * controller that these blocks are no longer in use. What the device * controller does with that information is left to the controller. * Discard operations are passed with sector_number as the * sector index to begin discard operations at and nr_sectors as the number of * sectors to be discarded. The specified sectors should be discarded if the * underlying block device supports trim (ATA) or unmap (SCSI) operations, * or a BLKIF_RSP_EOPNOTSUPP should be returned. * More information about trim/unmap operations at: * http://t13.org/Documents/UploadedDocuments/docs2008/ * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc * http://www.seagate.com/staticfiles/support/disc/manuals/ * Interface%20manuals/100293068c.pdf * The backend can optionally provide these extra XenBus attributes to * further optimize the discard functionality: * 'discard-aligment' - Devices that support discard functionality may * internally allocate space in units that are bigger than the exported * logical block size. The discard-alignment parameter indicates how many bytes * the beginning of the partition is offset from the internal allocation unit's * natural alignment. Do not confuse this with natural disk alignment offset. * 'discard-granularity' - Devices that support discard functionality may * internally allocate space using units that are bigger than the logical block * size. The discard-granularity parameter indicates the size of the internal * allocation unit in bytes if reported by the device. Otherwise the * discard-granularity will be set to match the device's physical block size. * It is the minimum size you can discard. * 'discard-secure' - All copies of the discarded sectors (potentially created * by garbage collection) must also be erased. To use this feature, the flag * BLKIF_DISCARD_SECURE must be set in the blkif_request_discard. */ #define BLKIF_OP_DISCARD 5 /* * Recognized if "feature-max-indirect-segments" in present in the backend * xenbus info. The "feature-max-indirect-segments" node contains the maximum * number of segments allowed by the backend per request. If the node is * present, the frontend might use blkif_request_indirect structs in order to * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The * maximum number of indirect segments is fixed by the backend, but the * frontend can issue requests with any number of indirect segments as long as * it's less than the number provided by the backend. The indirect_grefs field * in blkif_request_indirect should be filled by the frontend with the * grant references of the pages that are holding the indirect segments. * This pages are filled with an array of blkif_request_segment_aligned * that hold the information about the segments. The number of indirect * pages to use is determined by the maximum number of segments * a indirect request contains. Every indirect page can contain a maximum * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), * so to calculate the number of indirect pages to use we have to do * ceil(indirect_segments/512). * * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* * create the "feature-max-indirect-segments" node! */ #define BLKIF_OP_INDIRECT 6 /* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 #if 0 struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #endif struct blkif_request_segment_aligned { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ } __attribute__((__packed__)); struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; } __attribute__((__packed__)); struct blkif_request_discard { uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ blkif_vdev_t _pad1; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number; uint64_t nr_sectors; uint8_t _pad3; } __attribute__((__packed__)); struct blkif_request_other { uint8_t _pad1; blkif_vdev_t _pad2; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ } __attribute__((__packed__)); struct blkif_request_indirect { uint8_t indirect_op; uint16_t nr_segments; #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ #endif uint64_t id; blkif_sector_t sector_number; blkif_vdev_t handle; uint16_t _pad2; grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; #ifndef CONFIG_X86_32 uint32_t _pad3; /* make it 64 byte aligned */ #else uint64_t _pad3; /* make it 64 byte aligned */ #endif } __attribute__((__packed__)); struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_request_rw rw; struct blkif_request_discard discard; struct blkif_request_other other; struct blkif_request_indirect indirect; } u; } __attribute__((__packed__)); typedef struct blkif_request blkif_request_t; struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_response blkif_response_t; /* * STATUS RETURN CODES. */ /* Operation not supported (only happens on barrier writes). */ #define BLKIF_RSP_EOPNOTSUPP -2 /* Operation failed for some unspecified reason (-EIO). */ #define BLKIF_RSP_ERROR -1 /* Operation completed successfully. */ #define BLKIF_RSP_OKAY 0 /* * Generate blkif ring structures and types. */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 /* Xen-defined major numbers for virtual disks, they look strangely * familiar */ #define XEN_IDE0_MAJOR 3 #define XEN_IDE1_MAJOR 22 #define XEN_SCSI_DISK0_MAJOR 8 #define XEN_SCSI_DISK1_MAJOR 65 #define XEN_SCSI_DISK2_MAJOR 66 #define XEN_SCSI_DISK3_MAJOR 67 #define XEN_SCSI_DISK4_MAJOR 68 #define XEN_SCSI_DISK5_MAJOR 69 #define XEN_SCSI_DISK6_MAJOR 70 #define XEN_SCSI_DISK7_MAJOR 71 #define XEN_SCSI_DISK8_MAJOR 128 #define XEN_SCSI_DISK9_MAJOR 129 #define XEN_SCSI_DISK10_MAJOR 130 #define XEN_SCSI_DISK11_MAJOR 131 #define XEN_SCSI_DISK12_MAJOR 132 #define XEN_SCSI_DISK13_MAJOR 133 #define XEN_SCSI_DISK14_MAJOR 134 #define XEN_SCSI_DISK15_MAJOR 135 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/events.h000066400000000000000000000040671314037446600274410ustar00rootroot00000000000000#ifndef _XEN_EVENTS_H #define _XEN_EVENTS_H #include #include #include #include int bind_evtchn_to_irq(unsigned int evtchn); int bind_evtchn_to_irqhandler(unsigned int evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); int bind_ipi_to_irqhandler(enum ipi_vector ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id); /* * Common unbind function for all event sources. Takes IRQ to unbind from. * Automatically closes the underlying event channel (even for bindings * made with bind_evtchn_to_irqhandler()). */ void unbind_from_irqhandler(unsigned int irq, void *dev_id); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); int resend_irq_on_evtchn(unsigned int irq); void rebind_evtchn_irq(int evtchn, int irq); static inline void notify_remote_via_evtchn(int port) { struct evtchn_send send = { .port = port }; (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send); } extern void notify_remote_via_irq(int irq); extern void xen_irq_resume(void); /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq); void xen_set_irq_pending(int irq); bool xen_test_irq_pending(int irq); /* Poll waiting for an irq to become pending. In the usual case, the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq(int irq); /* Determine the IRQ which is bound to an event channel */ unsigned irq_from_evtchn(unsigned int evtchn); /* Xen HVM evtchn vector callback */ extern void xen_hvm_callback_vector(void); extern int xen_have_vector_callback; int xen_set_callback_via(uint64_t via); void xen_evtchn_do_upcall(struct pt_regs *regs); void xen_hvm_evtchn_do_upcall(void); #endif /* _XEN_EVENTS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/evtchn.h000066400000000000000000000056271314037446600274270ustar00rootroot00000000000000/****************************************************************************** * evtchn.h * * Interface to /dev/xen/evtchn. * * Copyright (c) 2003-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __LINUX_PUBLIC_EVTCHN_H__ #define __LINUX_PUBLIC_EVTCHN_H__ /* * Bind a fresh port to VIRQ @virq. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_VIRQ \ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq)) struct ioctl_evtchn_bind_virq { unsigned int virq; }; /* * Bind a fresh port to remote <@remote_domain, @remote_port>. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_INTERDOMAIN \ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain)) struct ioctl_evtchn_bind_interdomain { unsigned int remote_domain, remote_port; }; /* * Allocate a fresh port for binding to @remote_domain. * Return allocated port. */ #define IOCTL_EVTCHN_BIND_UNBOUND_PORT \ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port)) struct ioctl_evtchn_bind_unbound_port { unsigned int remote_domain; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_UNBIND \ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind)) struct ioctl_evtchn_unbind { unsigned int port; }; /* * Unbind previously allocated @port. */ #define IOCTL_EVTCHN_NOTIFY \ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify)) struct ioctl_evtchn_notify { unsigned int port; }; /* Clear and reinitialise the event buffer. Clear error condition. */ #define IOCTL_EVTCHN_RESET \ _IOC(_IOC_NONE, 'E', 5, 0) #endif /* __LINUX_PUBLIC_EVTCHN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/features.h000066400000000000000000000007171314037446600277510ustar00rootroot00000000000000/****************************************************************************** * features.h * * Query the features reported by Xen. * * Copyright (c) 2006, Ian Campbell */ #ifndef __XEN_FEATURES_H__ #define __XEN_FEATURES_H__ #include void xen_setup_features(void); extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32]; static inline int xen_feature(int flag) { return xen_features[flag]; } #endif /* __ASM_XEN_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/grant_table.h000066400000000000000000000106301314037446600304100ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Two sets of functionality: * 1. Granting foreign access to our memory reservation. * 2. Accessing others' memory reservations via grant references. * (i.e., mechanisms for both sender and recipient of grant references) * * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __ASM_GNTTAB_H__ #define __ASM_GNTTAB_H__ #include #include #include /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ #define NR_GRANT_FRAMES 4 struct gnttab_free_callback { struct gnttab_free_callback *next; void (*fn)(void *); void *arg; u16 count; }; int gnttab_init(void); int gnttab_suspend(void); int gnttab_resume(void); int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly); /* * End access through the given grant reference, iff the grant entry is no * longer in use. Return 1 if the grant entry was freed, 0 if it is still in * use. */ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly); /* * Eventually end access through the given grant reference, and once that * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. */ void gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page); int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); int gnttab_query_foreign_access(grant_ref_t ref); /* * operations on reserved batches of grant references */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); void gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t release); void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count); void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly); void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, struct grant_entry **__shared); void arch_gnttab_unmap_shared(struct grant_entry *shared, unsigned long nr_gframes); extern unsigned long xen_hvm_resume_frames; unsigned int gnttab_max_grant_frames(void); #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) #endif /* __ASM_GNTTAB_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/hvc-console.h000066400000000000000000000007011314037446600303440ustar00rootroot00000000000000#ifndef XEN_HVC_CONSOLE_H #define XEN_HVC_CONSOLE_H extern struct console xenboot_console; #ifdef CONFIG_HVC_XEN void xen_console_resume(void); void xen_raw_console_write(const char *str); void xen_raw_printk(const char *fmt, ...); #else static inline void xen_console_resume(void) { } static inline void xen_raw_console_write(const char *str) { } static inline void xen_raw_printk(const char *fmt, ...) { } #endif #endif /* XEN_HVC_CONSOLE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/hvm.h000066400000000000000000000012671314037446600267260ustar00rootroot00000000000000/* Simple wrappers around HVM functions */ #ifndef XEN_HVM_H__ #define XEN_HVM_H__ #include #include static inline int hvm_get_parameter(int idx, uint64_t *value) { struct xen_hvm_param xhv; int r; xhv.domid = DOMID_SELF; xhv.index = idx; r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); if (r < 0) { printk(KERN_ERR "cannot get hvm parameter %d: %d.\n", idx, r); return r; } *value = xhv.value; return r; } #define HVM_CALLBACK_VIA_TYPE_VECTOR 0x2 #define HVM_CALLBACK_VIA_TYPE_SHIFT 56 #define HVM_CALLBACK_VECTOR(x) (((uint64_t)HVM_CALLBACK_VIA_TYPE_VECTOR)<<\ HVM_CALLBACK_VIA_TYPE_SHIFT | (x)) #endif /* XEN_HVM_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/000077500000000000000000000000001314037446600277155ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/callback.h000066400000000000000000000067161314037446600316340ustar00rootroot00000000000000/****************************************************************************** * callback.h * * Register guest OS callbacks with Xen. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2006, Ian Campbell */ #ifndef __XEN_PUBLIC_CALLBACK_H__ #define __XEN_PUBLIC_CALLBACK_H__ #include "xen.h" /* * Prototype for this hypercall is: * long callback_op(int cmd, void *extra_args) * @cmd == CALLBACKOP_??? (callback operation). * @extra_args == Operation-specific extra arguments (NULL if none). */ /* ia64, x86: Callback for event delivery. */ #define CALLBACKTYPE_event 0 /* x86: Failsafe callback when guest state cannot be restored by Xen. */ #define CALLBACKTYPE_failsafe 1 /* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */ #define CALLBACKTYPE_syscall 2 /* * x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel * feature is enabled. Do not use this callback type in new code. */ #define CALLBACKTYPE_sysenter_deprecated 3 /* x86: Callback for NMI delivery. */ #define CALLBACKTYPE_nmi 4 /* * x86: sysenter is only available as follows: * - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled * - 64-bit hypervisor: 32-bit guest applications on Intel CPUs * ('32-on-32-on-64', '32-on-64-on-64') * [nb. also 64-bit guest applications on Intel CPUs * ('64-on-64-on-64'), but syscall is preferred] */ #define CALLBACKTYPE_sysenter 5 /* * x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs * ('32-on-32-on-64', '32-on-64-on-64') */ #define CALLBACKTYPE_syscall32 7 /* * Disable event deliver during callback? This flag is ignored for event and * NMI callbacks: event delivery is unconditionally disabled. */ #define _CALLBACKF_mask_events 0 #define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events) /* * Register a callback. */ #define CALLBACKOP_register 0 struct callback_register { uint16_t type; uint16_t flags; xen_callback_t address; }; /* * Unregister a callback. * * Not all callbacks can be unregistered. -EINVAL will be returned if * you attempt to unregister such a callback. */ #define CALLBACKOP_unregister 1 struct callback_unregister { uint16_t type; uint16_t _unused; }; #endif /* __XEN_PUBLIC_CALLBACK_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/elfnote.h000066400000000000000000000074131314037446600315270ustar00rootroot00000000000000/****************************************************************************** * elfnote.h * * Definitions used for the Xen ELF notes. * * Copyright (c) 2006, Ian Campbell, XenSource Ltd. */ #ifndef __XEN_PUBLIC_ELFNOTE_H__ #define __XEN_PUBLIC_ELFNOTE_H__ /* * The notes should live in a SHT_NOTE segment and have "Xen" in the * name field. * * Numeric types are either 4 or 8 bytes depending on the content of * the desc field. * * LEGACY indicated the fields in the legacy __xen_guest string which * this a note type replaces. */ /* * NAME=VALUE pair (string). * * LEGACY: FEATURES and PAE */ #define XEN_ELFNOTE_INFO 0 /* * The virtual address of the entry point (numeric). * * LEGACY: VIRT_ENTRY */ #define XEN_ELFNOTE_ENTRY 1 /* The virtual address of the hypercall transfer page (numeric). * * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page * number not a virtual address) */ #define XEN_ELFNOTE_HYPERCALL_PAGE 2 /* The virtual address where the kernel image should be mapped (numeric). * * Defaults to 0. * * LEGACY: VIRT_BASE */ #define XEN_ELFNOTE_VIRT_BASE 3 /* * The offset of the ELF paddr field from the acutal required * psuedo-physical address (numeric). * * This is used to maintain backwards compatibility with older kernels * which wrote __PAGE_OFFSET into that field. This field defaults to 0 * if not present. * * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE) */ #define XEN_ELFNOTE_PADDR_OFFSET 4 /* * The version of Xen that we work with (string). * * LEGACY: XEN_VER */ #define XEN_ELFNOTE_XEN_VERSION 5 /* * The name of the guest operating system (string). * * LEGACY: GUEST_OS */ #define XEN_ELFNOTE_GUEST_OS 6 /* * The version of the guest operating system (string). * * LEGACY: GUEST_VER */ #define XEN_ELFNOTE_GUEST_VERSION 7 /* * The loader type (string). * * LEGACY: LOADER */ #define XEN_ELFNOTE_LOADER 8 /* * The kernel supports PAE (x86/32 only, string = "yes" or "no"). * * LEGACY: PAE (n.b. The legacy interface included a provision to * indicate 'extended-cr3' support allowing L3 page tables to be * placed above 4G. It is assumed that any kernel new enough to use * these ELF notes will include this and therefore "yes" here is * equivalent to "yes[entended-cr3]" in the __xen_guest interface. */ #define XEN_ELFNOTE_PAE_MODE 9 /* * The features supported/required by this kernel (string). * * The string must consist of a list of feature names (as given in * features.h, without the "XENFEAT_" prefix) separated by '|' * characters. If a feature is required for the kernel to function * then the feature name must be preceded by a '!' character. * * LEGACY: FEATURES */ #define XEN_ELFNOTE_FEATURES 10 /* * The kernel requires the symbol table to be loaded (string = "yes" or "no") * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence * of this string as a boolean flag rather than requiring "yes" or * "no". */ #define XEN_ELFNOTE_BSD_SYMTAB 11 /* * The lowest address the hypervisor hole can begin at (numeric). * * This must not be set higher than HYPERVISOR_VIRT_START. Its presence * also indicates to the hypervisor that the kernel can deal with the * hole starting at a higher address. */ #define XEN_ELFNOTE_HV_START_LOW 12 /* * List of maddr_t-sized mask/value pairs describing how to recognize * (non-present) L1 page table entries carrying valid MFNs (numeric). */ #define XEN_ELFNOTE_L1_MFN_VALID 13 /* * Whether or not the guest supports cooperative suspend cancellation. */ #define XEN_ELFNOTE_SUSPEND_CANCEL 14 #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ event_channel.h000066400000000000000000000132111314037446600326160ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/****************************************************************************** * event_channel.h * * Event channels between domains. * * Copyright (c) 2003-2004, K A Fraser. */ #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__ #define __XEN_PUBLIC_EVENT_CHANNEL_H__ #include typedef uint32_t evtchn_port_t; DEFINE_GUEST_HANDLE(evtchn_port_t); /* * EVTCHNOP_alloc_unbound: Allocate a port in domain and mark as * accepting interdomain bindings from domain . A fresh port * is allocated in and returned as . * NOTES: * 1. If the caller is unprivileged then must be DOMID_SELF. * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_alloc_unbound 6 struct evtchn_alloc_unbound { /* IN parameters */ domid_t dom, remote_dom; /* OUT parameters */ evtchn_port_t port; }; /* * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between * the calling domain and . must identify * a port that is unbound and marked as accepting bindings from the calling * domain. A fresh port is allocated in the calling domain and returned as * . * NOTES: * 2. may be DOMID_SELF, allowing loopback connections. */ #define EVTCHNOP_bind_interdomain 0 struct evtchn_bind_interdomain { /* IN parameters. */ domid_t remote_dom; evtchn_port_t remote_port; /* OUT parameters. */ evtchn_port_t local_port; }; /* * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ on specified * vcpu. * NOTES: * 1. A virtual IRQ may be bound to at most one event channel per vcpu. * 2. The allocated event channel is bound to the specified vcpu. The binding * may not be changed. */ #define EVTCHNOP_bind_virq 1 struct evtchn_bind_virq { /* IN parameters. */ uint32_t virq; uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; /* * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ . * NOTES: * 1. A physical IRQ may be bound to at most one event channel per domain. * 2. Only a sufficiently-privileged domain may bind to a physical IRQ. */ #define EVTCHNOP_bind_pirq 2 struct evtchn_bind_pirq { /* IN parameters. */ uint32_t pirq; #define BIND_PIRQ__WILL_SHARE 1 uint32_t flags; /* BIND_PIRQ__* */ /* OUT parameters. */ evtchn_port_t port; }; /* * EVTCHNOP_bind_ipi: Bind a local event channel to receive events. * NOTES: * 1. The allocated event channel is bound to the specified vcpu. The binding * may not be changed. */ #define EVTCHNOP_bind_ipi 7 struct evtchn_bind_ipi { uint32_t vcpu; /* OUT parameters. */ evtchn_port_t port; }; /* * EVTCHNOP_close: Close a local event channel . If the channel is * interdomain then the remote end is placed in the unbound state * (EVTCHNSTAT_unbound), awaiting a new connection. */ #define EVTCHNOP_close 3 struct evtchn_close { /* IN parameters. */ evtchn_port_t port; }; /* * EVTCHNOP_send: Send an event to the remote end of the channel whose local * endpoint is . */ #define EVTCHNOP_send 4 struct evtchn_send { /* IN parameters. */ evtchn_port_t port; }; /* * EVTCHNOP_status: Get the current status of the communication channel which * has an endpoint at . * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may obtain the status of an event * channel for which is not DOMID_SELF. */ #define EVTCHNOP_status 5 struct evtchn_status { /* IN parameters */ domid_t dom; evtchn_port_t port; /* OUT parameters */ #define EVTCHNSTAT_closed 0 /* Channel is not in use. */ #define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/ #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */ #define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */ #define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */ #define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */ uint32_t status; uint32_t vcpu; /* VCPU to which this channel is bound. */ union { struct { domid_t dom; } unbound; /* EVTCHNSTAT_unbound */ struct { domid_t dom; evtchn_port_t port; } interdomain; /* EVTCHNSTAT_interdomain */ uint32_t pirq; /* EVTCHNSTAT_pirq */ uint32_t virq; /* EVTCHNSTAT_virq */ } u; }; /* * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an * event is pending. * NOTES: * 1. IPI- and VIRQ-bound channels always notify the vcpu that initialised * the binding. This binding cannot be changed. * 2. All other channels notify vcpu0 by default. This default is set when * the channel is allocated (a port that is freed and subsequently reused * has its binding reset to vcpu0). */ #define EVTCHNOP_bind_vcpu 8 struct evtchn_bind_vcpu { /* IN parameters. */ evtchn_port_t port; uint32_t vcpu; }; /* * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver * a notification to the appropriate VCPU if an event is pending. */ #define EVTCHNOP_unmask 9 struct evtchn_unmask { /* IN parameters. */ evtchn_port_t port; }; struct evtchn_op { uint32_t cmd; /* EVTCHNOP_* */ union { struct evtchn_alloc_unbound alloc_unbound; struct evtchn_bind_interdomain bind_interdomain; struct evtchn_bind_virq bind_virq; struct evtchn_bind_pirq bind_pirq; struct evtchn_bind_ipi bind_ipi; struct evtchn_close close; struct evtchn_send send; struct evtchn_status status; struct evtchn_bind_vcpu bind_vcpu; struct evtchn_unmask unmask; } u; }; DEFINE_GUEST_HANDLE_STRUCT(evtchn_op); #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/features.h000066400000000000000000000030441314037446600317050ustar00rootroot00000000000000/****************************************************************************** * features.h * * Feature flags, reported by XENVER_get_features. * * Copyright (c) 2006, Keir Fraser */ #ifndef __XEN_PUBLIC_FEATURES_H__ #define __XEN_PUBLIC_FEATURES_H__ /* * If set, the guest does not need to write-protect its pagetables, and can * update them via direct writes. */ #define XENFEAT_writable_page_tables 0 /* * If set, the guest does not need to write-protect its segment descriptor * tables, and can update them via direct writes. */ #define XENFEAT_writable_descriptor_tables 1 /* * If set, translation between the guest's 'pseudo-physical' address space * and the host's machine address space are handled by the hypervisor. In this * mode the guest does not need to perform phys-to/from-machine translations * when performing page table operations. */ #define XENFEAT_auto_translated_physmap 2 /* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ #define XENFEAT_supervisor_mode_kernel 3 /* * If set, the guest does not need to allocate x86 PAE page directories * below 4GB. This flag is usually implied by auto_translated_physmap. */ #define XENFEAT_pae_pgdir_above_4gb 4 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ #define XENFEAT_mmu_pt_update_preserve_ad 5 /* x86: Does this Xen host support the HVM callback vector type? */ #define XENFEAT_hvm_callback_vector 8 #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/grant_table.h000066400000000000000000000345301314037446600323550ustar00rootroot00000000000000/****************************************************************************** * grant_table.h * * Interface for granting foreign access to page frames, and receiving * page-ownership transfers. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_GRANT_TABLE_H__ #define __XEN_PUBLIC_GRANT_TABLE_H__ #include /*********************************** * GRANT TABLE REPRESENTATION */ /* Some rough guidelines on accessing and updating grant-table entries * in a concurrency-safe manner. For more information, Linux contains a * reference implementation for guest OSes (arch/xen/kernel/grant_table.c). * * NB. WMB is a no-op on current-generation x86 processors. However, a * compiler barrier will still be required. * * Introducing a valid entry into the grant table: * 1. Write ent->domid. * 2. Write ent->frame: * GTF_permit_access: Frame to which access is permitted. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new * frame, or zero if none. * 3. Write memory barrier (WMB). * 4. Write ent->flags, inc. valid type. * * Invalidating an unused GTF_permit_access entry: * 1. flags = ent->flags. * 2. Observe that !(flags & (GTF_reading|GTF_writing)). * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * * Invalidating an in-use GTF_permit_access entry: * This cannot be done directly. Request assistance from the domain controller * which can set a timeout on the use of a grant entry and take necessary * action. (NB. This is not yet implemented!). * * Invalidating an unused GTF_accept_transfer entry: * 1. flags = ent->flags. * 2. Observe that !(flags & GTF_transfer_committed). [*] * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0). * NB. No need for WMB as reuse of entry is control-dependent on success of * step 3, and all architectures guarantee ordering of ctrl-dep writes. * [*] If GTF_transfer_committed is set then the grant entry is 'committed'. * The guest must /not/ modify the grant entry until the address of the * transferred frame is written. It is safe for the guest to spin waiting * for this to occur (detect by observing GTF_transfer_completed in * ent->flags). * * Invalidating a committed GTF_accept_transfer entry: * 1. Wait for (ent->flags & GTF_transfer_completed). * * Changing a GTF_permit_access from writable to read-only: * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing. * * Changing a GTF_permit_access from read-only to writable: * Use SMP-safe bit-setting instruction. */ /* * A grant table comprises a packed array of grant entries in one or more * page frames shared between Xen and a guest. * [XEN]: This field is written by Xen and read by the sharing guest. * [GST]: This field is written by the guest and read by Xen. */ struct grant_entry { /* GTF_xxx: various type and flag information. [XEN,GST] */ uint16_t flags; /* The domain being granted foreign privileges. [GST] */ domid_t domid; /* * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] */ uint32_t frame; }; /* * Type of grant entry. * GTF_invalid: This grant entry grants no privileges. * GTF_permit_access: Allow @domid to map/access @frame. * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame * to this guest. Xen writes the page number to @frame. */ #define GTF_invalid (0U<<0) #define GTF_permit_access (1U<<0) #define GTF_accept_transfer (2U<<0) #define GTF_type_mask (3U<<0) /* * Subflags for GTF_permit_access. * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] */ #define _GTF_readonly (2) #define GTF_readonly (1U<<_GTF_readonly) #define _GTF_reading (3) #define GTF_reading (1U<<_GTF_reading) #define _GTF_writing (4) #define GTF_writing (1U<<_GTF_writing) /* * Subflags for GTF_accept_transfer: * GTF_transfer_committed: Xen sets this flag to indicate that it is committed * to transferring ownership of a page frame. When a guest sees this flag * it must /not/ modify the grant entry until GTF_transfer_completed is * set by Xen. * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag * after reading GTF_transfer_committed. Xen will always write the frame * address, followed by ORing this flag, in a timely manner. */ #define _GTF_transfer_committed (2) #define GTF_transfer_committed (1U<<_GTF_transfer_committed) #define _GTF_transfer_completed (3) #define GTF_transfer_completed (1U<<_GTF_transfer_completed) /*********************************** * GRANT TABLE QUERIES AND USES */ /* * Reference to a grant entry in a specified domain's grant table. */ typedef uint32_t grant_ref_t; /* * Handle to track a mapping created via a grant reference. */ typedef uint32_t grant_handle_t; /* * GNTTABOP_map_grant_ref: Map the grant entry (,) for access * by devices and/or host CPUs. If successful, is a tracking number * that must be presented later to destroy the mapping(s). On error, * is a negative status code. * NOTES: * 1. If GNTMAP_device_map is specified then is the address * via which I/O devices may access the granted frame. * 2. If GNTMAP_host_map is specified then a mapping will be added at * either a host virtual address in the current address space, or at * a PTE at the specified machine address. The type of mapping to * perform is selected through the GNTMAP_contains_pte flag, and the * address is specified in . * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a * host mapping is destroyed by other means then it is *NOT* guaranteed * to be accounted to the correct grant reference! */ #define GNTTABOP_map_grant_ref 0 struct gnttab_map_grant_ref { /* IN parameters. */ uint64_t host_addr; uint32_t flags; /* GNTMAP_* */ grant_ref_t ref; domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ grant_handle_t handle; uint64_t dev_bus_addr; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref); /* * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings * tracked by . If or is zero, that * field is ignored. If non-zero, they must refer to a device/host mapping * that is tracked by * NOTES: * 1. The call may fail in an undefined manner if either mapping is not * tracked by . * 3. After executing a batch of unmaps, it is guaranteed that no stale * mappings will remain in the device or host TLBs. */ #define GNTTABOP_unmap_grant_ref 1 struct gnttab_unmap_grant_ref { /* IN parameters. */ uint64_t host_addr; uint64_t dev_bus_addr; grant_handle_t handle; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref); /* * GNTTABOP_setup_table: Set up a grant table for comprising at least * pages. The frame addresses are written to the . * Only addresses are written, even if the table is larger. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. * 3. Xen may not support more than a single grant-table page per domain. */ #define GNTTABOP_setup_table 2 struct gnttab_setup_table { /* IN parameters. */ domid_t dom; uint32_t nr_frames; /* OUT parameters. */ int16_t status; /* GNTST_* */ GUEST_HANDLE(ulong) frame_list; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table); /* * GNTTABOP_dump_table: Dump the contents of the grant table to the * xen console. Debugging use only. */ #define GNTTABOP_dump_table 3 struct gnttab_dump_table { /* IN parameters. */ domid_t dom; /* OUT parameters. */ int16_t status; /* GNTST_* */ }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table); /* * GNTTABOP_transfer_grant_ref: Transfer to a foreign domain. The * foreign domain has previously registered its interest in the transfer via * . * * Note that, even if the transfer fails, the specified page no longer belongs * to the calling domain *unless* the error is GNTST_bad_page. */ #define GNTTABOP_transfer 4 struct gnttab_transfer { /* IN parameters. */ unsigned long mfn; domid_t domid; grant_ref_t ref; /* OUT parameters. */ int16_t status; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer); /* * GNTTABOP_copy: Hypervisor based copy * source and destinations can be eithers MFNs or, for foreign domains, * grant references. the foreign domain has to grant read/write access * in its grant table. * * The flags specify what type source and destinations are (either MFN * or grant reference). * * Note that this can also be used to copy data between two domains * via a third party if the source and destination domains had previously * grant appropriate access to their pages to the third party. * * source_offset specifies an offset in the source frame, dest_offset * the offset in the target frame and len specifies the number of * bytes to be copied. */ #define _GNTCOPY_source_gref (0) #define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref) #define _GNTCOPY_dest_gref (1) #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) #define GNTTABOP_copy 5 struct gnttab_copy { /* IN parameters. */ struct { union { grant_ref_t ref; unsigned long gmfn; } u; domid_t domid; uint16_t offset; } source, dest; uint16_t len; uint16_t flags; /* GNTCOPY_* */ /* OUT parameters. */ int16_t status; }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy); /* * GNTTABOP_query_size: Query the current and maximum sizes of the shared * grant table. * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify != DOMID_SELF. */ #define GNTTABOP_query_size 6 struct gnttab_query_size { /* IN parameters. */ domid_t dom; /* OUT parameters. */ uint32_t nr_frames; uint32_t max_nr_frames; int16_t status; /* GNTST_* */ }; DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size); /* * Bitfield values for update_pin_status.flags. */ /* Map the grant entry for access by I/O devices. */ #define _GNTMAP_device_map (0) #define GNTMAP_device_map (1<<_GNTMAP_device_map) /* Map the grant entry for access by host CPUs. */ #define _GNTMAP_host_map (1) #define GNTMAP_host_map (1<<_GNTMAP_host_map) /* Accesses to the granted frame will be restricted to read-only access. */ #define _GNTMAP_readonly (2) #define GNTMAP_readonly (1<<_GNTMAP_readonly) /* * GNTMAP_host_map subflag: * 0 => The host mapping is usable only by the guest OS. * 1 => The host mapping is usable by guest OS + current application. */ #define _GNTMAP_application_map (3) #define GNTMAP_application_map (1<<_GNTMAP_application_map) /* * GNTMAP_contains_pte subflag: * 0 => This map request contains a host virtual address. * 1 => This map request contains the machine addess of the PTE to update. */ #define _GNTMAP_contains_pte (4) #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) /* * Values for error status returns. All errors are -ve. */ #define GNTST_okay (0) /* Normal return. */ #define GNTST_general_error (-1) /* General undefined error. */ #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */ #define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */ #define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */ #define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */ #define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/ #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */ #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */ #define GNTST_bad_page (-9) /* Specified page was invalid for op. */ #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary */ #define GNTTABOP_error_msgs { \ "okay", \ "undefined error", \ "unrecognised domain id", \ "invalid grant reference", \ "invalid mapping handle", \ "invalid virtual address", \ "invalid device address", \ "no spare translation slot in the I/O MMU", \ "permission denied", \ "bad page", \ "copy arguments cross page boundary" \ } #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/hvm/000077500000000000000000000000001314037446600305075ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/hvm/hvm_op.h000066400000000000000000000030071314037446600321500ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ #define __XEN_PUBLIC_HVM_HVM_OP_H__ /* Get/set subcommands: the second argument of the hypercall is a * pointer to a xen_hvm_param struct. */ #define HVMOP_set_param 0 #define HVMOP_get_param 1 struct xen_hvm_param { domid_t domid; /* IN */ uint32_t index; /* IN */ uint64_t value; /* IN/OUT */ }; DEFINE_GUEST_HANDLE_STRUCT(xen_hvm_param); #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/hvm/params.h000066400000000000000000000071361314037446600321520ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_HVM_PARAMS_H__ #define __XEN_PUBLIC_HVM_PARAMS_H__ #include "hvm_op.h" /* * Parameter space for HVMOP_{set,get}_param. */ /* * How should CPU0 event-channel notifications be delivered? * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: * Domain = val[47:32], Bus = val[31:16], * DevFn = val[15: 8], IntX = val[ 1: 0] * val[63:56] == 2: val[7:0] is a vector number. * If val == 0 then CPU0 event-channel notifications are not delivered. */ #define HVM_PARAM_CALLBACK_IRQ 0 #define HVM_PARAM_STORE_PFN 1 #define HVM_PARAM_STORE_EVTCHN 2 #define HVM_PARAM_PAE_ENABLED 4 #define HVM_PARAM_IOREQ_PFN 5 #define HVM_PARAM_BUFIOREQ_PFN 6 /* * Set mode for virtual timers (currently x86 only): * delay_for_missed_ticks (default): * Do not advance a vcpu's time beyond the correct delivery time for * interrupts that have been missed due to preemption. Deliver missed * interrupts when the vcpu is rescheduled and advance the vcpu's virtual * time stepwise for each one. * no_delay_for_missed_ticks: * As above, missed interrupts are delivered, but guest time always tracks * wallclock (i.e., real) time while doing so. * no_missed_ticks_pending: * No missed interrupts are held pending. Instead, to ensure ticks are * delivered at some non-zero rate, if we detect missed ticks then the * internal tick alarm is not disabled if the VCPU is preempted during the * next tick period. * one_missed_tick_pending: * Missed interrupts are collapsed together and delivered as one 'late tick'. * Guest time always tracks wallclock (i.e., real) time. */ #define HVM_PARAM_TIMER_MODE 10 #define HVMPTM_delay_for_missed_ticks 0 #define HVMPTM_no_delay_for_missed_ticks 1 #define HVMPTM_no_missed_ticks_pending 2 #define HVMPTM_one_missed_tick_pending 3 /* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */ #define HVM_PARAM_HPET_ENABLED 11 /* Identity-map page directory used by Intel EPT when CR0.PG=0. */ #define HVM_PARAM_IDENT_PT 12 /* Device Model domain, defaults to 0. */ #define HVM_PARAM_DM_DOMAIN 13 /* ACPI S state: currently support S0 and S3 on x86. */ #define HVM_PARAM_ACPI_S_STATE 14 /* TSS used on Intel when CR0.PE=0. */ #define HVM_PARAM_VM86_TSS 15 /* Boolean: Enable aligning all periodic vpts to reduce interrupts */ #define HVM_PARAM_VPT_ALIGN 16 #define HVM_NR_PARAMS 17 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/io/000077500000000000000000000000001314037446600303245ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/io/blkif.h000066400000000000000000000317311314037446600315710ustar00rootroot00000000000000/****************************************************************************** * blkif.h * * Unified block-device I/O interface for Xen guest OSes. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_BLKIF_H__ #define __XEN_PUBLIC_IO_BLKIF_H__ #include "ring.h" #include "../grant_table.h" /* * Front->back notifications: When enqueuing a new request, sending a * notification can be made conditional on req_event (i.e., the generic * hold-off mechanism provided by the ring macros). Backends must set * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()). * * Back->front notifications: When enqueuing a new response, sending a * notification can be made conditional on rsp_event (i.e., the generic * hold-off mechanism provided by the ring macros). Frontends must set * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()). */ typedef uint16_t blkif_vdev_t; typedef uint64_t blkif_sector_t; /* * REQUEST CODES. */ #define BLKIF_OP_READ 0 #define BLKIF_OP_WRITE 1 /* * Recognised only if "feature-barrier" is present in backend xenbus info. * The "feature_barrier" node contains a boolean indicating whether barrier * requests are likely to succeed or fail. Either way, a barrier request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt barrier requests. * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not* * create the "feature-barrier" node! */ #define BLKIF_OP_WRITE_BARRIER 2 /* * Recognised if "feature-flush-cache" is present in backend xenbus * info. A flush will ask the underlying storage hardware to flush its * non-volatile caches as appropriate. The "feature-flush-cache" node * contains a boolean indicating whether flush requests are likely to * succeed or fail. Either way, a flush request may fail at any time * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying * block-device hardware. The boolean simply indicates whether or not it * is worthwhile for the frontend to attempt flushes. If a backend does * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the * "feature-flush-cache" node! */ #define BLKIF_OP_FLUSH_DISKCACHE 3 /* * Device specific command packet contained within the request */ #define BLKIF_OP_PACKET 4 /* * Recognised only if "feature-discard" is present in backend xenbus info. * The "feature-discard" node contains a boolean indicating whether trim * (ATA) or unmap (SCSI) - conviently called discard requests are likely * to succeed or fail. Either way, a discard request * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by * the underlying block-device hardware. The boolean simply indicates whether * or not it is worthwhile for the frontend to attempt discard requests. * If a backend does not recognise BLKIF_OP_DISCARD, it should *not* * create the "feature-discard" node! * * Discard operation is a request for the underlying block device to mark * extents to be erased. However, discard does not guarantee that the blocks * will be erased from the device - it is just a hint to the device * controller that these blocks are no longer in use. What the device * controller does with that information is left to the controller. * Discard operations are passed with sector_number as the * sector index to begin discard operations at and nr_sectors as the number of * sectors to be discarded. The specified sectors should be discarded if the * underlying block device supports trim (ATA) or unmap (SCSI) operations, * or a BLKIF_RSP_EOPNOTSUPP should be returned. * More information about trim/unmap operations at: * http://t13.org/Documents/UploadedDocuments/docs2008/ * e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc * http://www.seagate.com/staticfiles/support/disc/manuals/ * Interface%20manuals/100293068c.pdf * The backend can optionally provide these extra XenBus attributes to * further optimize the discard functionality: * 'discard-aligment' - Devices that support discard functionality may * internally allocate space in units that are bigger than the exported * logical block size. The discard-alignment parameter indicates how many bytes * the beginning of the partition is offset from the internal allocation unit's * natural alignment. Do not confuse this with natural disk alignment offset. * 'discard-granularity' - Devices that support discard functionality may * internally allocate space using units that are bigger than the logical block * size. The discard-granularity parameter indicates the size of the internal * allocation unit in bytes if reported by the device. Otherwise the * discard-granularity will be set to match the device's physical block size. * It is the minimum size you can discard. * 'discard-secure' - All copies of the discarded sectors (potentially created * by garbage collection) must also be erased. To use this feature, the flag * BLKIF_DISCARD_SECURE must be set in the blkif_request_discard. */ #define BLKIF_OP_DISCARD 5 /* * Recognized if "feature-max-indirect-segments" in present in the backend * xenbus info. The "feature-max-indirect-segments" node contains the maximum * number of segments allowed by the backend per request. If the node is * present, the frontend might use blkif_request_indirect structs in order to * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The * maximum number of indirect segments is fixed by the backend, but the * frontend can issue requests with any number of indirect segments as long as * it's less than the number provided by the backend. The indirect_grefs field * in blkif_request_indirect should be filled by the frontend with the * grant references of the pages that are holding the indirect segments. * This pages are filled with an array of blkif_request_segment_aligned * that hold the information about the segments. The number of indirect * pages to use is determined by the maximum number of segments * a indirect request contains. Every indirect page can contain a maximum * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), * so to calculate the number of indirect pages to use we have to do * ceil(indirect_segments/512). * * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* * create the "feature-max-indirect-segments" node! */ #define BLKIF_OP_INDIRECT 6 /* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 #if 0 struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; #endif struct blkif_request_segment_aligned { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ } __attribute__((__packed__)); struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment { grant_ref_t gref; /* reference to I/O buffer frame */ /* @first_sect: first sector in frame to transfer (inclusive). */ /* @last_sect: last sector in frame to transfer (inclusive). */ uint8_t first_sect, last_sect; } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; } __attribute__((__packed__)); struct blkif_request_discard { uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ blkif_vdev_t _pad1; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number; uint64_t nr_sectors; uint8_t _pad3; } __attribute__((__packed__)); struct blkif_request_other { uint8_t _pad1; blkif_vdev_t _pad2; /* only for read/write requests */ #ifndef CONFIG_X86_32 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ #endif uint64_t id; /* private guest value, echoed in resp */ } __attribute__((__packed__)); struct blkif_request_indirect { uint8_t indirect_op; uint16_t nr_segments; #ifndef CONFIG_X86_32 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ #endif uint64_t id; blkif_sector_t sector_number; blkif_vdev_t handle; uint16_t _pad2; grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; #ifndef CONFIG_X86_32 uint32_t _pad3; /* make it 64 byte aligned */ #else uint64_t _pad3; /* make it 64 byte aligned */ #endif } __attribute__((__packed__)); struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_request_rw rw; struct blkif_request_discard discard; struct blkif_request_other other; struct blkif_request_indirect indirect; } u; } __attribute__((__packed__)); typedef struct blkif_request blkif_request_t; struct blkif_response { uint64_t id; /* copied from request */ uint8_t operation; /* copied from request */ int16_t status; /* BLKIF_RSP_??? */ }; typedef struct blkif_response blkif_response_t; /* * STATUS RETURN CODES. */ /* Operation not supported (only happens on barrier writes). */ #define BLKIF_RSP_EOPNOTSUPP -2 /* Operation failed for some unspecified reason (-EIO). */ #define BLKIF_RSP_ERROR -1 /* Operation completed successfully. */ #define BLKIF_RSP_OKAY 0 /* * Generate blkif ring structures and types. */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4 /* Xen-defined major numbers for virtual disks, they look strangely * familiar */ #define XEN_IDE0_MAJOR 3 #define XEN_IDE1_MAJOR 22 #define XEN_SCSI_DISK0_MAJOR 8 #define XEN_SCSI_DISK1_MAJOR 65 #define XEN_SCSI_DISK2_MAJOR 66 #define XEN_SCSI_DISK3_MAJOR 67 #define XEN_SCSI_DISK4_MAJOR 68 #define XEN_SCSI_DISK5_MAJOR 69 #define XEN_SCSI_DISK6_MAJOR 70 #define XEN_SCSI_DISK7_MAJOR 71 #define XEN_SCSI_DISK8_MAJOR 128 #define XEN_SCSI_DISK9_MAJOR 129 #define XEN_SCSI_DISK10_MAJOR 130 #define XEN_SCSI_DISK11_MAJOR 131 #define XEN_SCSI_DISK12_MAJOR 132 #define XEN_SCSI_DISK13_MAJOR 133 #define XEN_SCSI_DISK14_MAJOR 134 #define XEN_SCSI_DISK15_MAJOR 135 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/io/console.h000066400000000000000000000010441314037446600321360ustar00rootroot00000000000000/****************************************************************************** * console.h * * Console I/O interface for Xen guest OSes. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_CONSOLE_H__ #define __XEN_PUBLIC_IO_CONSOLE_H__ typedef uint32_t XENCONS_RING_IDX; #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1)) struct xencons_interface { char in[1024]; char out[2048]; XENCONS_RING_IDX in_cons, in_prod; XENCONS_RING_IDX out_cons, out_prod; }; #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/io/fbif.h000066400000000000000000000107521314037446600314100ustar00rootroot00000000000000/* * fbif.h -- Xen virtual frame buffer device * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_FBIF_H__ #define __XEN_PUBLIC_IO_FBIF_H__ /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. */ /* Event type 1 currently not used */ /* * Framebuffer update notification event * Capable frontend sets feature-update in xenstore. * Backend requests it by setting request-update in xenstore. */ #define XENFB_TYPE_UPDATE 2 struct xenfb_update { uint8_t type; /* XENFB_TYPE_UPDATE */ int32_t x; /* source x */ int32_t y; /* source y */ int32_t width; /* rect width */ int32_t height; /* rect height */ }; /* * Framebuffer resize notification event * Capable backend sets feature-resize in xenstore. */ #define XENFB_TYPE_RESIZE 3 struct xenfb_resize { uint8_t type; /* XENFB_TYPE_RESIZE */ int32_t width; /* width in pixels */ int32_t height; /* height in pixels */ int32_t stride; /* stride in bytes */ int32_t depth; /* depth in bits */ int32_t offset; /* start offset within framebuffer */ }; #define XENFB_OUT_EVENT_SIZE 40 union xenfb_out_event { uint8_t type; struct xenfb_update update; struct xenfb_resize resize; char pad[XENFB_OUT_EVENT_SIZE]; }; /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. * No in events currently defined. */ #define XENFB_IN_EVENT_SIZE 40 union xenfb_in_event { uint8_t type; char pad[XENFB_IN_EVENT_SIZE]; }; /* shared page */ #define XENFB_IN_RING_SIZE 1024 #define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE) #define XENFB_IN_RING_OFFS 1024 #define XENFB_IN_RING(page) \ ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS)) #define XENFB_IN_RING_REF(page, idx) \ (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN]) #define XENFB_OUT_RING_SIZE 2048 #define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE) #define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE) #define XENFB_OUT_RING(page) \ ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS)) #define XENFB_OUT_RING_REF(page, idx) \ (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN]) struct xenfb_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; int32_t width; /* width of the framebuffer (in pixels) */ int32_t height; /* height of the framebuffer (in pixels) */ uint32_t line_length; /* length of a row of pixels (in bytes) */ uint32_t mem_length; /* length of the framebuffer (in bytes) */ uint8_t depth; /* depth of a pixel (in bits) */ /* * Framebuffer page directory * * Each directory page holds PAGE_SIZE / sizeof(*pd) * framebuffer pages, and can thus map up to PAGE_SIZE * * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 * Megs 64 bit. 256 directories give enough room for a 512 * Meg framebuffer with a max resolution of 12,800x10,240. * Should be enough for a while with room leftover for * expansion. */ unsigned long pd[256]; }; /* * Wart: xenkbd needs to know default resolution. Put it here until a * better solution is found, but don't leak it to the backend. */ #ifdef __KERNEL__ #define XENFB_WIDTH 800 #define XENFB_HEIGHT 600 #define XENFB_DEPTH 32 #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/io/kbdif.h000066400000000000000000000072171314037446600315630ustar00rootroot00000000000000/* * kbdif.h -- Xen virtual keyboard/mouse * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) 2005 Anthony Liguori * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster */ #ifndef __XEN_PUBLIC_IO_KBDIF_H__ #define __XEN_PUBLIC_IO_KBDIF_H__ /* In events (backend -> frontend) */ /* * Frontends should ignore unknown in events. */ /* Pointer movement event */ #define XENKBD_TYPE_MOTION 1 /* Event type 2 currently not used */ /* Key event (includes pointer buttons) */ #define XENKBD_TYPE_KEY 3 /* * Pointer position event * Capable backend sets feature-abs-pointer in xenstore. * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting * request-abs-update in xenstore. */ #define XENKBD_TYPE_POS 4 struct xenkbd_motion { uint8_t type; /* XENKBD_TYPE_MOTION */ int32_t rel_x; /* relative X motion */ int32_t rel_y; /* relative Y motion */ int32_t rel_z; /* relative Z motion (wheel) */ }; struct xenkbd_key { uint8_t type; /* XENKBD_TYPE_KEY */ uint8_t pressed; /* 1 if pressed; 0 otherwise */ uint32_t keycode; /* KEY_* from linux/input.h */ }; struct xenkbd_position { uint8_t type; /* XENKBD_TYPE_POS */ int32_t abs_x; /* absolute X position (in FB pixels) */ int32_t abs_y; /* absolute Y position (in FB pixels) */ int32_t rel_z; /* relative Z motion (wheel) */ }; #define XENKBD_IN_EVENT_SIZE 40 union xenkbd_in_event { uint8_t type; struct xenkbd_motion motion; struct xenkbd_key key; struct xenkbd_position pos; char pad[XENKBD_IN_EVENT_SIZE]; }; /* Out events (frontend -> backend) */ /* * Out events may be sent only when requested by backend, and receipt * of an unknown out event is an error. * No out events currently defined. */ #define XENKBD_OUT_EVENT_SIZE 40 union xenkbd_out_event { uint8_t type; char pad[XENKBD_OUT_EVENT_SIZE]; }; /* shared page */ #define XENKBD_IN_RING_SIZE 2048 #define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE) #define XENKBD_IN_RING_OFFS 1024 #define XENKBD_IN_RING(page) \ ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS)) #define XENKBD_IN_RING_REF(page, idx) \ (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN]) #define XENKBD_OUT_RING_SIZE 1024 #define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE) #define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE) #define XENKBD_OUT_RING(page) \ ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS)) #define XENKBD_OUT_RING_REF(page, idx) \ (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN]) struct xenkbd_page { uint32_t in_cons, in_prod; uint32_t out_cons, out_prod; }; #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/io/netif.h000066400000000000000000000114761314037446600316130ustar00rootroot00000000000000/****************************************************************************** * netif.h * * Unified network-device I/O interface for Xen guest OSes. * * Copyright (c) 2003-2004, Keir Fraser */ #ifndef __XEN_PUBLIC_IO_NETIF_H__ #define __XEN_PUBLIC_IO_NETIF_H__ #include "ring.h" #include "../grant_table.h" /* * Notifications after enqueuing any type of message should be conditional on * the appropriate req_event or rsp_event field in the shared ring. * If the client sends notification for rx requests then it should specify * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume * that it cannot safely queue packets (as it may not be kicked to send them). */ /* * This is the 'wire' format for packets: * Request 1: netif_tx_request -- NETTXF_* (any flags) * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info) * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE) * Request 4: netif_tx_request -- NETTXF_more_data * Request 5: netif_tx_request -- NETTXF_more_data * ... * Request N: netif_tx_request -- 0 */ /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETTXF_csum_blank (0) #define NETTXF_csum_blank (1U<<_NETTXF_csum_blank) /* Packet data has been validated against protocol checksum. */ #define _NETTXF_data_validated (1) #define NETTXF_data_validated (1U<<_NETTXF_data_validated) /* Packet continues in the next request descriptor. */ #define _NETTXF_more_data (2) #define NETTXF_more_data (1U<<_NETTXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETTXF_extra_info (3) #define NETTXF_extra_info (1U<<_NETTXF_extra_info) struct xen_netif_tx_request { grant_ref_t gref; /* Reference to buffer page */ uint16_t offset; /* Offset within buffer page */ uint16_t flags; /* NETTXF_* */ uint16_t id; /* Echoed in response message. */ uint16_t size; /* Packet size in bytes. */ }; /* Types of netif_extra_info descriptors. */ #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_MAX (2) /* netif_extra_info flags. */ #define _XEN_NETIF_EXTRA_FLAG_MORE (0) #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) /* GSO types - only TCPv4 currently supported. */ #define XEN_NETIF_GSO_TYPE_TCPV4 (1) /* * This structure needs to fit within both netif_tx_request and * netif_rx_response for compatibility. */ struct xen_netif_extra_info { uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ union { struct { /* * Maximum payload size of each segment. For * example, for TCP this is just the path MSS. */ uint16_t size; /* * GSO type. This determines the protocol of * the packet and any extra features required * to segment the packet properly. */ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ /* Future expansion. */ uint8_t pad; /* * GSO features. This specifies any extra GSO * features required to process this packet, * such as ECN support for TCPv4. */ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ } gso; uint16_t pad[3]; } u; }; struct xen_netif_tx_response { uint16_t id; int16_t status; /* NETIF_RSP_* */ }; struct xen_netif_rx_request { uint16_t id; /* Echoed in response message. */ grant_ref_t gref; /* Reference to incoming granted frame */ }; /* Packet data has been validated against protocol checksum. */ #define _NETRXF_data_validated (0) #define NETRXF_data_validated (1U<<_NETRXF_data_validated) /* Protocol checksum field is blank in the packet (hardware offload)? */ #define _NETRXF_csum_blank (1) #define NETRXF_csum_blank (1U<<_NETRXF_csum_blank) /* Packet continues in the next request descriptor. */ #define _NETRXF_more_data (2) #define NETRXF_more_data (1U<<_NETRXF_more_data) /* Packet to be followed by extra descriptor(s). */ #define _NETRXF_extra_info (3) #define NETRXF_extra_info (1U<<_NETRXF_extra_info) struct xen_netif_rx_response { uint16_t id; uint16_t offset; /* Offset in page of start of received packet */ uint16_t flags; /* NETRXF_* */ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ }; /* * Generate netif ring structures and types. */ DEFINE_RING_TYPES(xen_netif_tx, struct xen_netif_tx_request, struct xen_netif_tx_response); DEFINE_RING_TYPES(xen_netif_rx, struct xen_netif_rx_request, struct xen_netif_rx_response); #define NETIF_RSP_DROPPED -2 #define NETIF_RSP_ERROR -1 #define NETIF_RSP_OKAY 0 /* No response: used for auxiliary requests (e.g., netif_tx_extra). */ #define NETIF_RSP_NULL 1 #endif protocols.h000066400000000000000000000011751314037446600324460ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/io#ifndef __XEN_PROTOCOLS_H__ #define __XEN_PROTOCOLS_H__ #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi" #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi" #define XEN_IO_PROTO_ABI_IA64 "ia64-abi" #define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi" #if defined(__i386__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32 #elif defined(__x86_64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64 #elif defined(__ia64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64 #elif defined(__powerpc64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64 #else # error arch fixup needed here #endif #endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/io/ring.h000066400000000000000000000227551314037446600314470ustar00rootroot00000000000000/****************************************************************************** * ring.h * * Shared producer-consumer ring macros. * * Tim Deegan and Andrew Warfield November 2004. */ #ifndef __XEN_PUBLIC_IO_RING_H__ #define __XEN_PUBLIC_IO_RING_H__ typedef unsigned int RING_IDX; /* Round a 32-bit unsigned constant down to the nearest power of two. */ #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) /* * Calculate size of a shared ring, given the total available space for the * ring and indexes (_sz), and the name tag of the request/response structure. * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ #define __CONST_RING_SIZE(_s, _sz) \ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ sizeof(((struct _s##_sring *)0)->ring[0]))) #define __RING_SIZE(_s, _sz) \ (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* * Macros to make the correct C datatypes for a new kind of ring. * * To make a new ring datatype, you need to have two message structures, * let's say struct request, and struct response already defined. * * In a header where you want the ring datatype declared, you then do: * * DEFINE_RING_TYPES(mytag, struct request, struct response); * * These expand out to give you a set of types, as you can see below. * The most important of these are: * * struct mytag_sring - The shared ring. * struct mytag_front_ring - The 'front' half of the ring. * struct mytag_back_ring - The 'back' half of the ring. * * To initialize a ring in your code you need to know the location and size * of the shared memory area (PAGE_SIZE, for instance). To initialise * the front half: * * struct mytag_front_ring front_ring; * SHARED_RING_INIT((struct mytag_sring *)shared_page); * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page, * PAGE_SIZE); * * Initializing the back follows similarly (note that only the front * initializes the shared ring): * * struct mytag_back_ring back_ring; * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page, * PAGE_SIZE); */ #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ \ /* Shared ring entry */ \ union __name##_sring_entry { \ __req_t req; \ __rsp_t rsp; \ }; \ \ /* Shared ring page */ \ struct __name##_sring { \ RING_IDX req_prod, req_event; \ RING_IDX rsp_prod, rsp_event; \ uint8_t pad[48]; \ union __name##_sring_entry ring[1]; /* variable-length */ \ }; \ \ /* "Front" end's private variables */ \ struct __name##_front_ring { \ RING_IDX req_prod_pvt; \ RING_IDX rsp_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; \ \ /* "Back" end's private variables */ \ struct __name##_back_ring { \ RING_IDX rsp_prod_pvt; \ RING_IDX req_cons; \ unsigned int nr_ents; \ struct __name##_sring *sring; \ }; /* * Macros for manipulating rings. * * FRONT_RING_whatever works on the "front end" of a ring: here * requests are pushed on to the ring and responses taken off it. * * BACK_RING_whatever works on the "back end" of a ring: here * requests are taken off the ring and responses put on. * * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. * This is OK in 1-for-1 request-response situations where the * requestor (front end) never has more than RING_SIZE()-1 * outstanding requests. */ /* Initialising empty rings */ #define SHARED_RING_INIT(_s) do { \ (_s)->req_prod = (_s)->rsp_prod = 0; \ (_s)->req_event = (_s)->rsp_event = 1; \ memset((_s)->pad, 0, sizeof((_s)->pad)); \ } while(0) #define FRONT_RING_INIT(_r, _s, __size) do { \ (_r)->req_prod_pvt = 0; \ (_r)->rsp_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) #define BACK_RING_INIT(_r, _s, __size) do { \ (_r)->rsp_prod_pvt = 0; \ (_r)->req_cons = 0; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ (_r)->sring = (_s); \ } while (0) /* Initialize to existing shared indexes -- for recovery */ #define FRONT_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->req_prod_pvt = (_s)->req_prod; \ (_r)->rsp_cons = (_s)->rsp_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) #define BACK_RING_ATTACH(_r, _s, __size) do { \ (_r)->sring = (_s); \ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ (_r)->req_cons = (_s)->req_prod; \ (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) /* How big is this ring? */ #define RING_SIZE(_r) \ ((_r)->nr_ents) /* Number of free requests (for use on front side only). */ #define RING_FREE_REQUESTS(_r) \ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) /* Test if there is an empty slot available on the front ring. * (This is only meaningful from the front. ) */ #define RING_FULL(_r) \ (RING_FREE_REQUESTS(_r) == 0) /* Test if there are outstanding messages to be processed on a ring. */ #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ ({ \ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ unsigned int rsp = RING_SIZE(_r) - \ ((_r)->req_cons - (_r)->rsp_prod_pvt); \ req < rsp ? req : rsp; \ }) /* Direct access to individual ring elements, by index. */ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) #define RING_GET_RESPONSE(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) /* Loop termination condition: Would the specified index overflow the ring? */ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) #define RING_PUSH_REQUESTS(_r) do { \ wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) #define RING_PUSH_RESPONSES(_r) do { \ wmb(); /* front sees responses /before/ updated producer index */ \ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ } while (0) /* * Notification hold-off (req_event and rsp_event): * * When queueing requests or responses on a shared ring, it may not always be * necessary to notify the remote end. For example, if requests are in flight * in a backend, the front may be able to queue further requests without * notifying the back (if the back checks for new requests when it queues * responses). * * When enqueuing requests or responses: * * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument * is a boolean return value. True indicates that the receiver requires an * asynchronous notification. * * After dequeuing requests or responses (before sleeping the connection): * * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). * The second argument is a boolean return value. True indicates that there * are pending messages on the ring (i.e., the connection should not be put * to sleep). * * These macros will set the req_event/rsp_event field to trigger a * notification on the very next message that is enqueued. If you want to * create batches of work (i.e., only receive a notification after several * messages have been enqueued) then you will need to create a customised * version of the FINAL_CHECK macro in your own code, which sets the event * field appropriately. */ #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->req_prod; \ RING_IDX __new = (_r)->req_prod_pvt; \ wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = __new; \ mb(); /* back sees new requests /before/ we check req_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->rsp_prod; \ RING_IDX __new = (_r)->rsp_prod_pvt; \ wmb(); /* front sees responses /before/ updated producer index */ \ (_r)->sring->rsp_prod = __new; \ mb(); /* front sees new responses /before/ we check rsp_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ (RING_IDX)(__new - __old)); \ } while (0) #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ if (_work_to_do) break; \ (_r)->sring->req_event = (_r)->req_cons + 1; \ mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ } while (0) #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ if (_work_to_do) break; \ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ } while (0) #endif /* __XEN_PUBLIC_IO_RING_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/io/vscsiif.h000066400000000000000000000070421314037446600321460ustar00rootroot00000000000000/****************************************************************************** * vscsiif.h * * Based on the blkif.h code. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright(c) FUJITSU Limited 2008. */ #ifndef __XEN__PUBLIC_IO_SCSI_H__ #define __XEN__PUBLIC_IO_SCSI_H__ #include "ring.h" #include "../grant_table.h" /* command between backend and frontend */ #define VSCSIIF_ACT_SCSI_CDB 1 /* SCSI CDB command */ #define VSCSIIF_ACT_SCSI_ABORT 2 /* SCSI Device(Lun) Abort*/ #define VSCSIIF_ACT_SCSI_RESET 3 /* SCSI Device(Lun) Reset*/ #define VSCSIIF_BACK_MAX_PENDING_REQS 128 /* * Maximum scatter/gather segments per request. * * Considering balance between allocating al least 16 "vscsiif_request" * structures on one page (4096bytes) and number of scatter gather * needed, we decided to use 26 as a magic number. */ #define VSCSIIF_SG_TABLESIZE 26 /* * base on linux kernel 2.6.18 */ #define VSCSIIF_MAX_COMMAND_SIZE 16 #define VSCSIIF_SENSE_BUFFERSIZE 96 struct vscsiif_request { uint16_t rqid; /* private guest value, echoed in resp */ uint8_t act; /* command between backend and frontend */ uint8_t cmd_len; uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; uint16_t timeout_per_command; /* The command is issued by twice the value in Backend. */ uint16_t channel, id, lun; uint16_t padding; uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1) DMA_FROM_DEVICE(2) DMA_NONE(3) requests */ uint8_t nr_segments; /* Number of pieces of scatter-gather */ struct scsiif_request_segment { grant_ref_t gref; uint16_t offset; uint16_t length; } seg[VSCSIIF_SG_TABLESIZE]; uint32_t reserved[3]; }; typedef struct vscsiif_request vscsiif_request_t; struct vscsiif_response { uint16_t rqid; uint8_t padding; uint8_t sense_len; uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE]; int32_t rslt; uint32_t residual_len; /* request bufflen - return the value from physical device */ uint32_t reserved[36]; }; typedef struct vscsiif_response vscsiif_response_t; DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response); #endif /*__XEN__PUBLIC_IO_SCSI_H__*/ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/io/xenbus.h000066400000000000000000000024271314037446600320060ustar00rootroot00000000000000/***************************************************************************** * xenbus.h * * Xenbus protocol details. * * Copyright (C) 2005 XenSource Ltd. */ #ifndef _XEN_PUBLIC_IO_XENBUS_H #define _XEN_PUBLIC_IO_XENBUS_H /* The state of either end of the Xenbus, i.e. the current communication status of initialisation across the bus. States here imply nothing about the state of the connection between the driver and the kernel's device layers. */ enum xenbus_state { XenbusStateUnknown = 0, XenbusStateInitialising = 1, XenbusStateInitWait = 2, /* Finished early initialisation, but waiting for information from the peer or hotplug scripts. */ XenbusStateInitialised = 3, /* Initialised and waiting for a connection from the peer. */ XenbusStateConnected = 4, XenbusStateClosing = 5, /* The device is being closed due to an error or an unplug event. */ XenbusStateClosed = 6, /* * Reconfiguring: The device is being reconfigured. */ XenbusStateReconfiguring = 7, XenbusStateReconfigured = 8 }; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ /* * Local variables: * c-file-style: "linux" * indent-tabs-mode: t * c-indent-level: 8 * c-basic-offset: 8 * tab-width: 8 * End: */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/io/xs_wire.h000066400000000000000000000040041314037446600321530ustar00rootroot00000000000000/* * Details of the "wire" protocol between Xen Store Daemon and client * library or guest kernel. * Copyright (C) 2005 Rusty Russell IBM Corporation */ #ifndef _XS_WIRE_H #define _XS_WIRE_H enum xsd_sockmsg_type { XS_DEBUG, XS_DIRECTORY, XS_READ, XS_GET_PERMS, XS_WATCH, XS_UNWATCH, XS_TRANSACTION_START, XS_TRANSACTION_END, XS_INTRODUCE, XS_RELEASE, XS_GET_DOMAIN_PATH, XS_WRITE, XS_MKDIR, XS_RM, XS_SET_PERMS, XS_WATCH_EVENT, XS_ERROR, XS_IS_DOMAIN_INTRODUCED }; #define XS_WRITE_NONE "NONE" #define XS_WRITE_CREATE "CREATE" #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" /* We hand errors as strings, for portability. */ struct xsd_errors { int errnum; const char *errstring; }; #define XSD_ERROR(x) { x, #x } static struct xsd_errors xsd_errors[] __attribute__((unused)) = { XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), XSD_ERROR(EISDIR), XSD_ERROR(ENOENT), XSD_ERROR(ENOMEM), XSD_ERROR(ENOSPC), XSD_ERROR(EIO), XSD_ERROR(ENOTEMPTY), XSD_ERROR(ENOSYS), XSD_ERROR(EROFS), XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN) }; struct xsd_sockmsg { uint32_t type; /* XS_??? */ uint32_t req_id;/* Request identifier, echoed in daemon's response. */ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ uint32_t len; /* Length of data following this. */ /* Generally followed by nul-terminated string(s). */ }; enum xs_watch_type { XS_WATCH_PATH = 0, XS_WATCH_TOKEN }; /* Inter-domain shared memory communications. */ #define XENSTORE_RING_SIZE 1024 typedef uint32_t XENSTORE_RING_IDX; #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) struct xenstore_domain_interface { char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ XENSTORE_RING_IDX req_cons, req_prod; XENSTORE_RING_IDX rsp_cons, rsp_prod; }; #endif /* _XS_WIRE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/memory.h000066400000000000000000000127061314037446600314040ustar00rootroot00000000000000/****************************************************************************** * memory.h * * Memory reservation and information. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_MEMORY_H__ #define __XEN_PUBLIC_MEMORY_H__ #include /* * Increase or decrease the specified domain's memory reservation. Returns a * -ve errcode on failure, or the # extents successfully allocated or freed. * arg == addr of struct xen_memory_reservation. */ #define XENMEM_increase_reservation 0 #define XENMEM_decrease_reservation 1 #define XENMEM_populate_physmap 6 struct xen_memory_reservation { /* * XENMEM_increase_reservation: * OUT: MFN (*not* GMFN) bases of extents that were allocated * XENMEM_decrease_reservation: * IN: GMFN bases of extents to free * XENMEM_populate_physmap: * IN: GPFN bases of extents to populate with memory * OUT: GMFN bases of extents that were allocated * (NB. This command also updates the mach_to_phys translation table) */ GUEST_HANDLE(ulong) extent_start; /* Number of extents, and size/alignment of each (2^extent_order pages). */ unsigned long nr_extents; unsigned int extent_order; /* * Maximum # bits addressable by the user of the allocated region (e.g., * I/O devices often have a 32-bit limitation even in 64-bit systems). If * zero then the user has no addressing restriction. * This field is not used by XENMEM_decrease_reservation. */ unsigned int address_bits; /* * Domain whose reservation is being changed. * Unprivileged domains can specify only DOMID_SELF. */ domid_t domid; }; DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation); /* * Returns the maximum machine frame number of mapped RAM in this system. * This command always succeeds (it never returns an error code). * arg == NULL. */ #define XENMEM_maximum_ram_page 2 /* * Returns the current or maximum memory reservation, in pages, of the * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. * arg == addr of domid_t. */ #define XENMEM_current_reservation 3 #define XENMEM_maximum_reservation 4 /* * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys * mapping table. Architectures which do not have a m2p table do not implement * this command. * arg == addr of xen_machphys_mfn_list_t. */ #define XENMEM_machphys_mfn_list 5 struct xen_machphys_mfn_list { /* * Size of the 'extent_start' array. Fewer entries will be filled if the * machphys table is smaller than max_extents * 2MB. */ unsigned int max_extents; /* * Pointer to buffer to fill with list of extent starts. If there are * any large discontiguities in the machine address space, 2MB gaps in * the machphys table will be represented by an MFN base of zero. */ GUEST_HANDLE(ulong) extent_start; /* * Number of extents written to the above array. This will be smaller * than 'max_extents' if the machphys table is smaller than max_e * 2MB. */ unsigned int nr_extents; }; DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list); /* * Sets the GPFN at which a particular page appears in the specified guest's * pseudophysical address space. * arg == addr of xen_add_to_physmap_t. */ #define XENMEM_add_to_physmap 7 struct xen_add_to_physmap { /* Which domain to change the mapping for. */ domid_t domid; /* Source mapping space. */ #define XENMAPSPACE_shared_info 0 /* shared info page */ #define XENMAPSPACE_grant_table 1 /* grant table page */ unsigned int space; /* Index into source mapping space. */ unsigned long idx; /* GPFN where the source mapping page should appear. */ unsigned long gpfn; }; DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap); /* * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error * code on failure. This call only works for auto-translated guests. */ #define XENMEM_translate_gpfn_list 8 struct xen_translate_gpfn_list { /* Which domain to translate for? */ domid_t domid; /* Length of list. */ unsigned long nr_gpfns; /* List of GPFNs to translate. */ GUEST_HANDLE(ulong) gpfn_list; /* * Output list to contain MFN translations. May be the same as the input * list (in which case each input GPFN is overwritten with the output MFN). */ GUEST_HANDLE(ulong) mfn_list; }; DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list); /* * Returns the pseudo-physical memory map as it was when the domain * was started (specified by XENMEM_set_memory_map). * arg == addr of struct xen_memory_map. */ #define XENMEM_memory_map 9 struct xen_memory_map { /* * On call the number of entries which can be stored in buffer. On * return the number of entries which have been stored in * buffer. */ unsigned int nr_entries; /* * Entries in the buffer are in the same format as returned by the * BIOS INT 0x15 EAX=0xE820 call. */ GUEST_HANDLE(void) buffer; }; DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map); /* * Returns the real physical memory map. Passes the same structure as * XENMEM_memory_map. * arg == addr of struct xen_memory_map. */ #define XENMEM_machine_memory_map 10 /* * Prevent the balloon driver from changing the memory reservation * during a driver critical region. */ extern spinlock_t xen_reservation_lock; #endif /* __XEN_PUBLIC_MEMORY_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/physdev.h000066400000000000000000000103671314037446600315570ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef __XEN_PUBLIC_PHYSDEV_H__ #define __XEN_PUBLIC_PHYSDEV_H__ /* * Prototype for this hypercall is: * int physdev_op(int cmd, void *args) * @cmd == PHYSDEVOP_??? (physdev operation). * @args == Operation-specific extra arguments (NULL if none). */ /* * Notify end-of-interrupt (EOI) for the specified IRQ. * @arg == pointer to physdev_eoi structure. */ #define PHYSDEVOP_eoi 12 struct physdev_eoi { /* IN */ uint32_t irq; }; /* * Query the status of an IRQ line. * @arg == pointer to physdev_irq_status_query structure. */ #define PHYSDEVOP_irq_status_query 5 struct physdev_irq_status_query { /* IN */ uint32_t irq; /* OUT */ uint32_t flags; /* XENIRQSTAT_* */ }; /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */ #define _XENIRQSTAT_needs_eoi (0) #define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi) /* IRQ shared by multiple guests? */ #define _XENIRQSTAT_shared (1) #define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared) /* * Set the current VCPU's I/O privilege level. * @arg == pointer to physdev_set_iopl structure. */ #define PHYSDEVOP_set_iopl 6 struct physdev_set_iopl { /* IN */ uint32_t iopl; }; /* * Set the current VCPU's I/O-port permissions bitmap. * @arg == pointer to physdev_set_iobitmap structure. */ #define PHYSDEVOP_set_iobitmap 7 struct physdev_set_iobitmap { /* IN */ uint8_t * bitmap; uint32_t nr_ports; }; /* * Read or write an IO-APIC register. * @arg == pointer to physdev_apic structure. */ #define PHYSDEVOP_apic_read 8 #define PHYSDEVOP_apic_write 9 struct physdev_apic { /* IN */ unsigned long apic_physbase; uint32_t reg; /* IN or OUT */ uint32_t value; }; /* * Allocate or free a physical upcall vector for the specified IRQ line. * @arg == pointer to physdev_irq structure. */ #define PHYSDEVOP_alloc_irq_vector 10 #define PHYSDEVOP_free_irq_vector 11 struct physdev_irq { /* IN */ uint32_t irq; /* IN or OUT */ uint32_t vector; }; /* * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op() * hypercall since 0x00030202. */ struct physdev_op { uint32_t cmd; union { struct physdev_irq_status_query irq_status_query; struct physdev_set_iopl set_iopl; struct physdev_set_iobitmap set_iobitmap; struct physdev_apic apic_op; struct physdev_irq irq_op; } u; }; /* * Notify that some PIRQ-bound event channels have been unmasked. * ** This command is obsolete since interface version 0x00030202 and is ** * ** unsupported by newer versions of Xen. ** */ #define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4 /* * These all-capitals physdev operation names are superceded by the new names * (defined above) since interface version 0x00030202. */ #define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query #define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl #define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap #define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read #define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write #define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector #define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi #define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared #endif /* __XEN_PUBLIC_PHYSDEV_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/sched.h000066400000000000000000000046121314037446600311570ustar00rootroot00000000000000/****************************************************************************** * sched.h * * Scheduler state interactions * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_SCHED_H__ #define __XEN_PUBLIC_SCHED_H__ #include "event_channel.h" /* * The prototype for this hypercall is: * long sched_op_new(int cmd, void *arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == Operation-specific extra argument(s), as described below. * * **NOTE**: * Versions of Xen prior to 3.0.2 provide only the following legacy version * of this hypercall, supporting only the commands yield, block and shutdown: * long sched_op(int cmd, unsigned long arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) * == SHUTDOWN_* code (SCHEDOP_shutdown) */ /* * Voluntarily yield the CPU. * @arg == NULL. */ #define SCHEDOP_yield 0 /* * Block execution of this VCPU until an event is received for processing. * If called with event upcalls masked, this operation will atomically * reenable event delivery and check for pending events before blocking the * VCPU. This avoids a "wakeup waiting" race. * @arg == NULL. */ #define SCHEDOP_block 1 /* * Halt execution of this domain (all VCPUs) and notify the system controller. * @arg == pointer to sched_shutdown structure. */ #define SCHEDOP_shutdown 2 struct sched_shutdown { unsigned int reason; /* SHUTDOWN_* */ }; DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown); /* * Poll a set of event-channel ports. Return when one or more are pending. An * optional timeout may be specified. * @arg == pointer to sched_poll structure. */ #define SCHEDOP_poll 3 struct sched_poll { GUEST_HANDLE(evtchn_port_t) ports; unsigned int nr_ports; uint64_t timeout; }; DEFINE_GUEST_HANDLE_STRUCT(sched_poll); /* * Reason codes for SCHEDOP_shutdown. These may be interpreted by control * software to determine the appropriate action. For the most part, Xen does * not care about the shutdown code. */ #define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */ #define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */ #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #endif /* __XEN_PUBLIC_SCHED_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/vcpu.h000066400000000000000000000147701314037446600310540ustar00rootroot00000000000000/****************************************************************************** * vcpu.h * * VCPU initialisation, query, and hotplug. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VCPU_H__ #define __XEN_PUBLIC_VCPU_H__ /* * Prototype for this hypercall is: * int vcpu_op(int cmd, int vcpuid, void *extra_args) * @cmd == VCPUOP_??? (VCPU operation). * @vcpuid == VCPU to operate on. * @extra_args == Operation-specific extra arguments (NULL if none). */ /* * Initialise a VCPU. Each VCPU can be initialised only once. A * newly-initialised VCPU will not run until it is brought up by VCPUOP_up. * * @extra_arg == pointer to vcpu_guest_context structure containing initial * state for the VCPU. */ #define VCPUOP_initialise 0 /* * Bring up a VCPU. This makes the VCPU runnable. This operation will fail * if the VCPU has not been initialised (VCPUOP_initialise). */ #define VCPUOP_up 1 /* * Bring down a VCPU (i.e., make it non-runnable). * There are a few caveats that callers should observe: * 1. This operation may return, and VCPU_is_up may return false, before the * VCPU stops running (i.e., the command is asynchronous). It is a good * idea to ensure that the VCPU has entered a non-critical loop before * bringing it down. Alternatively, this operation is guaranteed * synchronous if invoked by the VCPU itself. * 2. After a VCPU is initialised, there is currently no way to drop all its * references to domain memory. Even a VCPU that is down still holds * memory references via its pagetable base pointer and GDT. It is good * practise to move a VCPU onto an 'idle' or default page table, LDT and * GDT before bringing it down. */ #define VCPUOP_down 2 /* Returns 1 if the given VCPU is up. */ #define VCPUOP_is_up 3 /* * Return information about the state and running time of a VCPU. * @extra_arg == pointer to vcpu_runstate_info structure. */ #define VCPUOP_get_runstate_info 4 struct vcpu_runstate_info { /* VCPU's current state (RUNSTATE_*). */ int state; /* When was current state entered (system time, ns)? */ uint64_t state_entry_time; /* * Time spent in each RUNSTATE_* (ns). The sum of these times is * guaranteed not to drift from system time. */ uint64_t time[4]; }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate_info); /* VCPU is currently running on a physical CPU. */ #define RUNSTATE_running 0 /* VCPU is runnable, but not currently scheduled on any physical CPU. */ #define RUNSTATE_runnable 1 /* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */ #define RUNSTATE_blocked 2 /* * VCPU is not runnable, but it is not blocked. * This is a 'catch all' state for things like hotplug and pauses by the * system administrator (or for critical sections in the hypervisor). * RUNSTATE_blocked dominates this state (it is the preferred state). */ #define RUNSTATE_offline 3 /* * Register a shared memory area from which the guest may obtain its own * runstate information without needing to execute a hypercall. * Notes: * 1. The registered address may be virtual or physical, depending on the * platform. The virtual address should be registered on x86 systems. * 2. Only one shared area may be registered per VCPU. The shared area is * updated by the hypervisor each time the VCPU is scheduled. Thus * runstate.state will always be RUNSTATE_running and * runstate.state_entry_time will indicate the system time at which the * VCPU was last scheduled to run. * @extra_arg == pointer to vcpu_register_runstate_memory_area structure. */ #define VCPUOP_register_runstate_memory_area 5 struct vcpu_register_runstate_memory_area { union { GUEST_HANDLE(vcpu_runstate_info) h; struct vcpu_runstate_info *v; uint64_t p; } addr; }; /* * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer * which can be set via these commands. Periods smaller than one millisecond * may not be supported. */ #define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */ #define VCPUOP_stop_periodic_timer 7 /* arg == NULL */ struct vcpu_set_periodic_timer { uint64_t period_ns; }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_periodic_timer); /* * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot * timer which can be set via these commands. */ #define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */ #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */ struct vcpu_set_singleshot_timer { uint64_t timeout_abs_ns; uint32_t flags; /* VCPU_SSHOTTMR_??? */ }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_singleshot_timer); /* Flags to VCPUOP_set_singleshot_timer. */ /* Require the timeout to be in the future (return -ETIME if it's passed). */ #define _VCPU_SSHOTTMR_future (0) #define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future) /* * Register a memory location in the guest address space for the * vcpu_info structure. This allows the guest to place the vcpu_info * structure in a convenient place, such as in a per-cpu data area. * The pointer need not be page aligned, but the structure must not * cross a page boundary. */ #define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */ struct vcpu_register_vcpu_info { uint64_t mfn; /* mfn of page to place vcpu_info */ uint32_t offset; /* offset within page */ uint32_t rsvd; /* unused */ }; DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info); #endif /* __XEN_PUBLIC_VCPU_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/version.h000066400000000000000000000031661314037446600315610ustar00rootroot00000000000000/****************************************************************************** * version.h * * Xen version, type, and compile information. * * Copyright (c) 2005, Nguyen Anh Quynh * Copyright (c) 2005, Keir Fraser */ #ifndef __XEN_PUBLIC_VERSION_H__ #define __XEN_PUBLIC_VERSION_H__ /* NB. All ops return zero on success, except XENVER_version. */ /* arg == NULL; returns major:minor (16:16). */ #define XENVER_version 0 /* arg == xen_extraversion_t. */ #define XENVER_extraversion 1 struct xen_extraversion { char extraversion[16]; }; #define XEN_EXTRAVERSION_LEN (sizeof(struct xen_extraversion)) /* arg == xen_compile_info_t. */ #define XENVER_compile_info 2 struct xen_compile_info { char compiler[64]; char compile_by[16]; char compile_domain[32]; char compile_date[32]; }; #define XENVER_capabilities 3 struct xen_capabilities_info { char info[1024]; }; #define XEN_CAPABILITIES_INFO_LEN (sizeof(struct xen_capabilities_info)) #define XENVER_changeset 4 struct xen_changeset_info { char info[64]; }; #define XEN_CHANGESET_INFO_LEN (sizeof(struct xen_changeset_info)) #define XENVER_platform_parameters 5 struct xen_platform_parameters { unsigned long virt_start; }; #define XENVER_get_features 6 struct xen_feature_info { unsigned int submap_idx; /* IN: which 32-bit submap to return */ uint32_t submap; /* OUT: 32-bit submap */ }; /* Declares the features reported by XENVER_get_features. */ #include "features.h" /* arg == NULL; returns host memory page size. */ #define XENVER_pagesize 7 #endif /* __XEN_PUBLIC_VERSION_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/xen.h000066400000000000000000000442301314037446600306630ustar00rootroot00000000000000/****************************************************************************** * xen.h * * Guest OS interface to Xen. * * Copyright (c) 2004, K A Fraser */ #ifndef __XEN_PUBLIC_XEN_H__ #define __XEN_PUBLIC_XEN_H__ #include #include /* * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS). */ /* * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5. * EAX = return value * (argument registers may be clobbered on return) * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6. * RAX = return value * (argument registers not clobbered on return; RCX, R11 are) */ #define __HYPERVISOR_set_trap_table 0 #define __HYPERVISOR_mmu_update 1 #define __HYPERVISOR_set_gdt 2 #define __HYPERVISOR_stack_switch 3 #define __HYPERVISOR_set_callbacks 4 #define __HYPERVISOR_fpu_taskswitch 5 #define __HYPERVISOR_sched_op 6 #define __HYPERVISOR_dom0_op 7 #define __HYPERVISOR_set_debugreg 8 #define __HYPERVISOR_get_debugreg 9 #define __HYPERVISOR_update_descriptor 10 #define __HYPERVISOR_memory_op 12 #define __HYPERVISOR_multicall 13 #define __HYPERVISOR_update_va_mapping 14 #define __HYPERVISOR_set_timer_op 15 #define __HYPERVISOR_event_channel_op_compat 16 #define __HYPERVISOR_xen_version 17 #define __HYPERVISOR_console_io 18 #define __HYPERVISOR_physdev_op_compat 19 #define __HYPERVISOR_grant_table_op 20 #define __HYPERVISOR_vm_assist 21 #define __HYPERVISOR_update_va_mapping_otherdomain 22 #define __HYPERVISOR_iret 23 /* x86 only */ #define __HYPERVISOR_vcpu_op 24 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ #define __HYPERVISOR_mmuext_op 26 #define __HYPERVISOR_acm_op 27 #define __HYPERVISOR_nmi_op 28 #define __HYPERVISOR_sched_op_new 29 #define __HYPERVISOR_callback_op 30 #define __HYPERVISOR_xenoprof_op 31 #define __HYPERVISOR_event_channel_op 32 #define __HYPERVISOR_physdev_op 33 #define __HYPERVISOR_hvm_op 34 /* Architecture-specific hypercall definitions. */ #define __HYPERVISOR_arch_0 48 #define __HYPERVISOR_arch_1 49 #define __HYPERVISOR_arch_2 50 #define __HYPERVISOR_arch_3 51 #define __HYPERVISOR_arch_4 52 #define __HYPERVISOR_arch_5 53 #define __HYPERVISOR_arch_6 54 #define __HYPERVISOR_arch_7 55 /* * VIRTUAL INTERRUPTS * * Virtual interrupts that a guest OS may receive from Xen. */ #define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */ #define VIRQ_DEBUG 1 /* Request guest to dump debug info. */ #define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */ #define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */ #define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */ /* Architecture-specific VIRQ definitions. */ #define VIRQ_ARCH_0 16 #define VIRQ_ARCH_1 17 #define VIRQ_ARCH_2 18 #define VIRQ_ARCH_3 19 #define VIRQ_ARCH_4 20 #define VIRQ_ARCH_5 21 #define VIRQ_ARCH_6 22 #define VIRQ_ARCH_7 23 #define NR_VIRQS 24 /* * MMU-UPDATE REQUESTS * * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * ptr[1:0] specifies the appropriate MMU_* command. * * ptr[1:0] == MMU_NORMAL_PT_UPDATE: * Updates an entry in a page table. If updating an L1 table, and the new * table entry is valid/present, the mapped frame must belong to the FD, if * an FD has been specified. If attempting to map an I/O page then the * caller assumes the privilege of the FD. * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller. * FD == DOMID_XEN: Map restricted areas of Xen's heap space. * ptr[:2] -- Machine address of the page-table entry to modify. * val -- Value to write. * * ptr[1:0] == MMU_MACHPHYS_UPDATE: * Updates an entry in the machine->pseudo-physical mapping table. * ptr[:2] -- Machine address within the frame whose mapping to modify. * The frame must belong to the FD, if one is specified. * val -- Value to write into the mapping entry. * * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD: * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed * with those in @val. */ #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */ #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */ #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */ /* * MMU EXTENDED OPERATIONS * * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures. * A foreigndom (FD) can be specified (or DOMID_SELF for none). * Where the FD has some effect, it is described below. * * cmd: MMUEXT_(UN)PIN_*_TABLE * mfn: Machine frame number to be (un)pinned as a p.t. page. * The frame must belong to the FD, if one is specified. * * cmd: MMUEXT_NEW_BASEPTR * mfn: Machine frame number of new page-table base to install in MMU. * * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only] * mfn: Machine frame number of new page-table base to install in MMU * when in user space. * * cmd: MMUEXT_TLB_FLUSH_LOCAL * No additional arguments. Flushes local TLB. * * cmd: MMUEXT_INVLPG_LOCAL * linear_addr: Linear address to be flushed from the local TLB. * * cmd: MMUEXT_TLB_FLUSH_MULTI * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_INVLPG_MULTI * linear_addr: Linear address to be flushed. * vcpumask: Pointer to bitmap of VCPUs to be flushed. * * cmd: MMUEXT_TLB_FLUSH_ALL * No additional arguments. Flushes all VCPUs' TLBs. * * cmd: MMUEXT_INVLPG_ALL * linear_addr: Linear address to be flushed from all VCPUs' TLBs. * * cmd: MMUEXT_FLUSH_CACHE * No additional arguments. Writes back and flushes cache contents. * * cmd: MMUEXT_SET_LDT * linear_addr: Linear address of LDT base (NB. must be page-aligned). * nr_ents: Number of entries in LDT. */ #define MMUEXT_PIN_L1_TABLE 0 #define MMUEXT_PIN_L2_TABLE 1 #define MMUEXT_PIN_L3_TABLE 2 #define MMUEXT_PIN_L4_TABLE 3 #define MMUEXT_UNPIN_TABLE 4 #define MMUEXT_NEW_BASEPTR 5 #define MMUEXT_TLB_FLUSH_LOCAL 6 #define MMUEXT_INVLPG_LOCAL 7 #define MMUEXT_TLB_FLUSH_MULTI 8 #define MMUEXT_INVLPG_MULTI 9 #define MMUEXT_TLB_FLUSH_ALL 10 #define MMUEXT_INVLPG_ALL 11 #define MMUEXT_FLUSH_CACHE 12 #define MMUEXT_SET_LDT 13 #define MMUEXT_NEW_USER_BASEPTR 15 #ifndef __ASSEMBLY__ struct mmuext_op { unsigned int cmd; union { /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */ unsigned long mfn; /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */ unsigned long linear_addr; } arg1; union { /* SET_LDT */ unsigned int nr_ents; /* TLB_FLUSH_MULTI, INVLPG_MULTI */ void *vcpumask; } arg2; }; DEFINE_GUEST_HANDLE_STRUCT(mmuext_op); #endif /* These are passed as 'flags' to update_va_mapping. They can be ORed. */ /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */ /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */ #define UVMF_NONE (0UL<<0) /* No flushing at all. */ #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */ #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */ #define UVMF_FLUSHTYPE_MASK (3UL<<0) #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */ #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */ #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */ /* * Commands to HYPERVISOR_console_io(). */ #define CONSOLEIO_write 0 #define CONSOLEIO_read 1 /* * Commands to HYPERVISOR_vm_assist(). */ #define VMASST_CMD_enable 0 #define VMASST_CMD_disable 1 #define VMASST_TYPE_4gb_segments 0 #define VMASST_TYPE_4gb_segments_notify 1 #define VMASST_TYPE_writable_pagetables 2 #define VMASST_TYPE_pae_extended_cr3 3 #define MAX_VMASST_TYPE 3 #ifndef __ASSEMBLY__ typedef uint16_t domid_t; /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ #define DOMID_FIRST_RESERVED (0x7FF0U) /* DOMID_SELF is used in certain contexts to refer to oneself. */ #define DOMID_SELF (0x7FF0U) /* * DOMID_IO is used to restrict page-table updates to mapping I/O memory. * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO * is useful to ensure that no mappings to the OS's own heap are accidentally * installed. (e.g., in Linux this could cause havoc as reference counts * aren't adjusted on the I/O-mapping code path). * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can * be specified by any calling domain. */ #define DOMID_IO (0x7FF1U) /* * DOMID_XEN is used to allow privileged domains to map restricted parts of * Xen's heap space (e.g., the machine_to_phys table). * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if * the caller is privileged. */ #define DOMID_XEN (0x7FF2U) /* * Send an array of these to HYPERVISOR_mmu_update(). * NB. The fields are natural pointer/address size for this architecture. */ struct mmu_update { uint64_t ptr; /* Machine address of PTE. */ uint64_t val; /* New contents of PTE. */ }; DEFINE_GUEST_HANDLE_STRUCT(mmu_update); /* * Send an array of these to HYPERVISOR_multicall(). * NB. The fields are natural register size for this architecture. */ struct multicall_entry { unsigned long op; long result; unsigned long args[6]; }; DEFINE_GUEST_HANDLE_STRUCT(multicall_entry); /* * Event channel endpoints per domain: * 1024 if a long is 32 bits; 4096 if a long is 64 bits. */ #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) struct vcpu_time_info { /* * Updates to the following values are preceded and followed * by an increment of 'version'. The guest can therefore * detect updates by looking for changes to 'version'. If the * least-significant bit of the version number is set then an * update is in progress and the guest must wait to read a * consistent set of values. The correct way to interact with * the version number is similar to Linux's seqlock: see the * implementations of read_seqbegin/read_seqretry. */ uint32_t version; uint32_t pad0; uint64_t tsc_timestamp; /* TSC at last update of time vals. */ uint64_t system_time; /* Time, in nanosecs, since boot. */ /* * Current system time: * system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul * CPU frequency (Hz): * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift */ uint32_t tsc_to_system_mul; int8_t tsc_shift; int8_t pad1[3]; }; /* 32 bytes */ struct vcpu_info { /* * 'evtchn_upcall_pending' is written non-zero by Xen to indicate * a pending notification for a particular VCPU. It is then cleared * by the guest OS /before/ checking for pending work, thus avoiding * a set-and-check race. Note that the mask is only accessed by Xen * on the CPU that is currently hosting the VCPU. This means that the * pending and mask flags can be updated by the guest without special * synchronisation (i.e., no need for the x86 LOCK prefix). * This may seem suboptimal because if the pending flag is set by * a different CPU then an IPI may be scheduled even when the mask * is set. However, note: * 1. The task of 'interrupt holdoff' is covered by the per-event- * channel mask bits. A 'noisy' event that is continually being * triggered can be masked at source at this very precise * granularity. * 2. The main purpose of the per-VCPU mask is therefore to restrict * reentrant execution: whether for concurrency control, or to * prevent unbounded stack usage. Whatever the purpose, we expect * that the mask will be asserted only for short periods at a time, * and so the likelihood of a 'spurious' IPI is suitably small. * The mask is read before making an event upcall to the guest: a * non-zero mask therefore guarantees that the VCPU will not receive * an upcall activation. The mask is cleared when the VCPU requests * to block: this avoids wakeup-waiting races. */ uint8_t evtchn_upcall_pending; uint8_t evtchn_upcall_mask; unsigned long evtchn_pending_sel; struct arch_vcpu_info arch; struct pvclock_vcpu_time_info time; }; /* 64 bytes (x86) */ /* * Xen/kernel shared data -- pointer provided in start_info. * NB. We expect that this struct is smaller than a page. */ struct shared_info { struct vcpu_info vcpu_info[MAX_VIRT_CPUS]; /* * A domain can create "event channels" on which it can send and receive * asynchronous event notifications. There are three classes of event that * are delivered by this mechanism: * 1. Bi-directional inter- and intra-domain connections. Domains must * arrange out-of-band to set up a connection (usually by allocating * an unbound 'listener' port and avertising that via a storage service * such as xenstore). * 2. Physical interrupts. A domain with suitable hardware-access * privileges can bind an event-channel port to a physical interrupt * source. * 3. Virtual interrupts ('events'). A domain can bind an event-channel * port to a virtual interrupt source, such as the virtual-timer * device or the emergency console. * * Event channels are addressed by a "port index". Each channel is * associated with two bits of information: * 1. PENDING -- notifies the domain that there is a pending notification * to be processed. This bit is cleared by the guest. * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING * will cause an asynchronous upcall to be scheduled. This bit is only * updated by the guest. It is read-only within Xen. If a channel * becomes pending while the channel is masked then the 'edge' is lost * (i.e., when the channel is unmasked, the guest must manually handle * pending notifications as no upcall will be scheduled by Xen). * * To expedite scanning of pending notifications, any 0->1 pending * transition on an unmasked channel causes a corresponding bit in a * per-vcpu selector word to be set. Each bit in the selector covers a * 'C long' in the PENDING bitfield array. */ unsigned long evtchn_pending[sizeof(unsigned long) * 8]; unsigned long evtchn_mask[sizeof(unsigned long) * 8]; /* * Wallclock time: updated only by control software. Guests should base * their gettimeofday() syscall on this wallclock-base value. */ struct pvclock_wall_clock wc; struct arch_shared_info arch; }; /* * Start-of-day memory layout for the initial domain (DOM0): * 1. The domain is started within contiguous virtual-memory region. * 2. The contiguous region begins and ends on an aligned 4MB boundary. * 3. The region start corresponds to the load address of the OS image. * If the load address is not 4MB aligned then the address is rounded down. * 4. This the order of bootstrap elements in the initial virtual region: * a. relocated kernel image * b. initial ram disk [mod_start, mod_len] * c. list of allocated page frames [mfn_list, nr_pages] * d. start_info_t structure [register ESI (x86)] * e. bootstrap page tables [pt_base, CR3 (x86)] * f. bootstrap stack [register ESP (x86)] * 5. Bootstrap elements are packed together, but each is 4kB-aligned. * 6. The initial ram disk may be omitted. * 7. The list of page frames forms a contiguous 'pseudo-physical' memory * layout for the domain. In particular, the bootstrap virtual-memory * region is a 1:1 mapping to the first section of the pseudo-physical map. * 8. All bootstrap elements are mapped read-writable for the guest OS. The * only exception is the bootstrap page table, which is mapped read-only. * 9. There is guaranteed to be at least 512kB padding after the final * bootstrap element. If necessary, the bootstrap virtual region is * extended by an extra 4MB to ensure this. */ #define MAX_GUEST_CMDLINE 1024 struct start_info { /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ char magic[32]; /* "xen--". */ unsigned long nr_pages; /* Total pages allocated to this domain. */ unsigned long shared_info; /* MACHINE address of shared info struct. */ uint32_t flags; /* SIF_xxx flags. */ unsigned long store_mfn; /* MACHINE page number of shared page. */ uint32_t store_evtchn; /* Event channel for store communication. */ union { struct { unsigned long mfn; /* MACHINE page number of console page. */ uint32_t evtchn; /* Event channel for console page. */ } domU; struct { uint32_t info_off; /* Offset of console_info struct. */ uint32_t info_size; /* Size of console_info struct from start.*/ } dom0; } console; /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ unsigned long pt_base; /* VIRTUAL address of page directory. */ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ int8_t cmd_line[MAX_GUEST_CMDLINE]; }; /* These flags are passed in the 'flags' field of start_info_t. */ #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ typedef uint64_t cpumap_t; typedef uint8_t xen_domain_handle_t[16]; /* Turn a plain number into a C unsigned long constant. */ #define __mk_unsigned_long(x) x ## UL #define mk_unsigned_long(x) __mk_unsigned_long(x) #else /* __ASSEMBLY__ */ /* In assembly code we cannot use C numeric constant suffixes. */ #define mk_unsigned_long(x) x #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_XEN_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/interface/xencomm.h000066400000000000000000000032131314037446600315330ustar00rootroot00000000000000/* * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Copyright (C) IBM Corp. 2006 */ #ifndef _XEN_XENCOMM_H_ #define _XEN_XENCOMM_H_ /* A xencomm descriptor is a scatter/gather list containing physical * addresses corresponding to a virtually contiguous memory area. The * hypervisor translates these physical addresses to machine addresses to copy * to and from the virtually contiguous area. */ #define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */ #define XENCOMM_INVALID (~0UL) struct xencomm_desc { uint32_t magic; uint32_t nr_addrs; /* the number of entries in address[] */ uint64_t address[0]; }; #endif /* _XEN_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/page.h000066400000000000000000000002321314037446600270370ustar00rootroot00000000000000#ifndef _XEN_PAGE_H #define _XEN_PAGE_H #include extern phys_addr_t xen_extra_mem_start, xen_extra_mem_size; #endif /* _XEN_PAGE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/platform_pci.h000066400000000000000000000032741314037446600306130ustar00rootroot00000000000000#ifndef _XEN_PLATFORM_PCI_H #define _XEN_PLATFORM_PCI_H #define XEN_IOPORT_MAGIC_VAL 0x49d2 #define XEN_IOPORT_LINUX_PRODNUM 0x0003 #define XEN_IOPORT_LINUX_DRVVER 0x0001 #define XEN_IOPORT_BASE 0x10 #define XEN_IOPORT_PLATFLAGS (XEN_IOPORT_BASE + 0) /* 1 byte access (R/W) */ #define XEN_IOPORT_MAGIC (XEN_IOPORT_BASE + 0) /* 2 byte access (R) */ #define XEN_IOPORT_UNPLUG (XEN_IOPORT_BASE + 0) /* 2 byte access (W) */ #define XEN_IOPORT_DRVVER (XEN_IOPORT_BASE + 0) /* 4 byte access (W) */ #define XEN_IOPORT_SYSLOG (XEN_IOPORT_BASE + 2) /* 1 byte access (W) */ #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ #define XEN_UNPLUG_ALL_IDE_DISKS (1<<0) #define XEN_UNPLUG_ALL_NICS (1<<1) #define XEN_UNPLUG_AUX_IDE_DISKS (1<<2) #define XEN_UNPLUG_ALL (XEN_UNPLUG_ALL_IDE_DISKS|\ XEN_UNPLUG_ALL_NICS|\ XEN_UNPLUG_AUX_IDE_DISKS) #define XEN_UNPLUG_UNNECESSARY (1<<16) #define XEN_UNPLUG_NEVER (1<<17) #define XEN_UNPLUG_IGNORE 8 static inline int xen_must_unplug_nics(void) { #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) return 1; #else return 0; #endif } static inline int xen_must_unplug_disks(void) { #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ (defined(CONFIG_XEN_PLATFORM_PCI) || \ defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) return 1; #else return 0; #endif } extern bool xen_platform_pci_enabled; extern int xen_platform_pci_unplug; #endif /* _XEN_PLATFORM_PCI_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/xen-ops.h000066400000000000000000000006641314037446600275250ustar00rootroot00000000000000#ifndef INCLUDE_XEN_OPS_H #define INCLUDE_XEN_OPS_H #include DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); void xen_pre_suspend(void); void xen_post_suspend(int suspend_cancelled); void xen_hvm_post_suspend(int suspend_cancelled); void xen_mm_pin_all(void); void xen_mm_unpin_all(void); void xen_timer_resume(void); void xen_arch_resume(void); int xen_setup_shutdown_event(void); #endif /* INCLUDE_XEN_OPS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/xen.h000066400000000000000000000017211314037446600267210ustar00rootroot00000000000000#ifndef _XEN_XEN_H #define _XEN_XEN_H enum xen_domain_type { XEN_NATIVE, /* running on bare hardware */ XEN_PV_DOMAIN, /* running in a PV domain */ XEN_HVM_DOMAIN, /* running in a Xen hvm domain */ }; #ifdef CONFIG_XEN extern enum xen_domain_type xen_domain_type; extern void xen_hvm_guest_init(void); extern uint32_t xen_cpuid_base(void); #else #define xen_domain_type XEN_NATIVE #define xen_hvm_guest_init() do {} while (0) #endif #define xen_domain() (xen_domain_type != XEN_NATIVE) #define xen_pv_domain() (xen_domain() && \ xen_domain_type == XEN_PV_DOMAIN) #define xen_hvm_domain() (xen_domain() && \ xen_domain_type == XEN_HVM_DOMAIN) #ifdef CONFIG_XEN_DOM0 #include #include #define xen_initial_domain() (xen_pv_domain() && \ xen_start_info->flags & SIF_INITDOMAIN) #else /* !CONFIG_XEN_DOM0 */ #define xen_initial_domain() (0) #endif /* CONFIG_XEN_DOM0 */ #endif /* _XEN_XEN_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/xenbus.h000066400000000000000000000204401314037446600274320ustar00rootroot00000000000000/****************************************************************************** * xenbus.h * * Talks to Xen Store to figure out what devices we have. * * Copyright (C) 2005 Rusty Russell, IBM Corporation * Copyright (C) 2005 XenSource Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef _XEN_XENBUS_H #define _XEN_XENBUS_H #include #include #include #include #include #include #include #include #include /* Register callback to watch this node. */ struct xenbus_watch { struct list_head list; /* Path being watched. */ const char *node; /* Callback (executed in a process context with no locks held). */ void (*callback)(struct xenbus_watch *, const char **vec, unsigned int len); }; /* A xenbus device. */ struct xenbus_device { const char *devicetype; const char *nodename; const char *otherend; int otherend_id; struct xenbus_watch otherend_watch; struct device dev; enum xenbus_state state; struct completion down; }; static inline struct xenbus_device *to_xenbus_device(struct device *dev) { return container_of(dev, struct xenbus_device, dev); } struct xenbus_device_id { /* .../device// */ char devicetype[32]; /* General class of device. */ }; /* A xenbus driver. */ struct xenbus_driver { char *name; struct module *owner; const struct xenbus_device_id *ids; int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); void (*otherend_changed)(struct xenbus_device *dev, enum xenbus_state backend_state); int (*remove)(struct xenbus_device *dev); int (*suspend)(struct xenbus_device *dev, pm_message_t state); int (*resume)(struct xenbus_device *dev); int (*uevent)(struct xenbus_device *, char **, int, char *, int); struct device_driver driver; int (*read_otherend_details)(struct xenbus_device *dev); int (*is_ready)(struct xenbus_device *dev); }; #define DEFINE_XENBUS_DRIVER(var, drvname, methods...) \ struct xenbus_driver var ## _driver = { \ .driver.name = drvname + 0 ?: var ## _ids->devicetype, \ .driver.owner = THIS_MODULE, \ .ids = var ## _ids, ## methods \ } static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) { return container_of(drv, struct xenbus_driver, driver); } int __must_check __xenbus_register_frontend(struct xenbus_driver *drv, struct module *owner, const char *mod_name); static inline int __must_check xenbus_register_frontend(struct xenbus_driver *drv) { WARN_ON(drv->owner != THIS_MODULE); return __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME); } int __must_check __xenbus_register_backend(struct xenbus_driver *drv, struct module *owner, const char *mod_name); static inline int __must_check xenbus_register_backend(struct xenbus_driver *drv) { WARN_ON(drv->owner != THIS_MODULE); return __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME); } void xenbus_unregister_driver(struct xenbus_driver *drv); struct xenbus_transaction { u32 id; }; /* Nil transaction ID. */ #define XBT_NIL ((struct xenbus_transaction) { 0 }) char **xenbus_directory(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *num); void *xenbus_read(struct xenbus_transaction t, const char *dir, const char *node, unsigned int *len); int xenbus_write(struct xenbus_transaction t, const char *dir, const char *node, const char *string); int xenbus_mkdir(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_exists(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node); int xenbus_transaction_start(struct xenbus_transaction *t); int xenbus_transaction_end(struct xenbus_transaction t, int abort); /* Single read and scanf: returns -errno or num scanned if > 0. */ int xenbus_scanf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(scanf, 4, 5))); /* Single printf and write: returns -errno or 0. */ int xenbus_printf(struct xenbus_transaction t, const char *dir, const char *node, const char *fmt, ...) __attribute__((format(printf, 4, 5))); /* Generic read function: NULL-terminated triples of name, * sprintf-style type string, and pointer. Returns 0 or errno.*/ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...); /* notifer routines for when the xenstore comes up */ extern int xenstored_ready; int register_xenstore_notifier(struct notifier_block *nb); void unregister_xenstore_notifier(struct notifier_block *nb); int register_xenbus_watch(struct xenbus_watch *watch); void unregister_xenbus_watch(struct xenbus_watch *watch); void xs_suspend(void); void xs_resume(void); void xs_suspend_cancel(void); int xenbus_probe_init(void); /* Used by xenbus_dev to borrow kernel's store connection. */ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg); struct work_struct; /* Prepare for domain suspend: then resume or cancel the suspend. */ void xenbus_suspend(void); void xenbus_resume(void); void xenbus_probe(struct work_struct *); void xenbus_suspend_cancel(void); #define XENBUS_IS_ERR_READ(str) ({ \ if (!IS_ERR(str) && strlen(str) == 0) { \ kfree(str); \ str = ERR_PTR(-ERANGE); \ } \ IS_ERR(str); \ }) #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE) int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int)); int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...) __attribute__ ((format (printf, 4, 5))); int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn); int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr); int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, grant_handle_t *handle, void *vaddr); int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr); int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t handle, void *vaddr); int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port); int xenbus_free_evtchn(struct xenbus_device *dev, int port); enum xenbus_state xenbus_read_driver_state(const char *path); void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...); void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...); const char *xenbus_strstate(enum xenbus_state state); int xenbus_dev_is_online(struct xenbus_device *dev); int xenbus_frontend_closed(struct xenbus_device *dev); #endif /* _XEN_XENBUS_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/xencomm.h000066400000000000000000000050611314037446600275760ustar00rootroot00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Copyright (C) IBM Corp. 2006 * * Authors: Hollis Blanchard * Jerone Young */ #ifndef _LINUX_XENCOMM_H_ #define _LINUX_XENCOMM_H_ #include #define XENCOMM_MINI_ADDRS 3 struct xencomm_mini { struct xencomm_desc _desc; uint64_t address[XENCOMM_MINI_ADDRS]; }; /* To avoid additionnal virt to phys conversion, an opaque structure is presented. */ struct xencomm_handle; extern void xencomm_free(struct xencomm_handle *desc); extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes); extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, struct xencomm_mini *xc_area); #if 0 #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ struct xencomm_mini xc_desc ## _base[(n)] \ __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \ struct xencomm_mini *xc_desc = &xc_desc ## _base[0]; #else /* * gcc bug workaround: * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660 * gcc doesn't handle properly stack variable with * __attribute__((__align__(sizeof(struct xencomm_mini)))) */ #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ unsigned char xc_desc ## _base[((n) + 1 ) * \ sizeof(struct xencomm_mini)]; \ struct xencomm_mini *xc_desc = (struct xencomm_mini *) \ ((unsigned long)xc_desc ## _base + \ (sizeof(struct xencomm_mini) - \ ((unsigned long)xc_desc ## _base) % \ sizeof(struct xencomm_mini))); #endif #define xencomm_map_no_alloc(ptr, bytes) \ ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \ __xencomm_map_no_alloc(ptr, bytes, xc_desc); }) /* provided by architecture code: */ extern unsigned long xencomm_vtop(unsigned long vaddr); static inline void *xencomm_pa(void *ptr) { return (void *)xencomm_vtop((unsigned long)ptr); } #define xen_guest_handle(hnd) ((hnd).p) #endif /* _LINUX_XENCOMM_H_ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/include/xen/xs_wire.h000066400000000000000000000040421314037446600276060ustar00rootroot00000000000000/* * Details of the "wire" protocol between Xen Store Daemon and client * library or guest kernel. * Copyright (C) 2005 Rusty Russell IBM Corporation */ #ifndef _XS_WIRE_H #define _XS_WIRE_H enum xsd_sockmsg_type { XS_DEBUG, XS_DIRECTORY, XS_READ, XS_GET_PERMS, XS_WATCH, XS_UNWATCH, XS_TRANSACTION_START, XS_TRANSACTION_END, XS_INTRODUCE, XS_RELEASE, XS_GET_DOMAIN_PATH, XS_WRITE, XS_MKDIR, XS_RM, XS_SET_PERMS, XS_WATCH_EVENT, XS_ERROR, XS_IS_DOMAIN_INTRODUCED, XS_LINUX_WEAK_WRITE=128 }; #define XS_WRITE_NONE "NONE" #define XS_WRITE_CREATE "CREATE" #define XS_WRITE_CREATE_EXCL "CREATE|EXCL" /* We hand errors as strings, for portability. */ struct xsd_errors { int errnum; const char *errstring; }; #define XSD_ERROR(x) { x, #x } static struct xsd_errors xsd_errors[] __attribute__((unused)) = { XSD_ERROR(EINVAL), XSD_ERROR(EACCES), XSD_ERROR(EEXIST), XSD_ERROR(EISDIR), XSD_ERROR(ENOENT), XSD_ERROR(ENOMEM), XSD_ERROR(ENOSPC), XSD_ERROR(EIO), XSD_ERROR(ENOTEMPTY), XSD_ERROR(ENOSYS), XSD_ERROR(EROFS), XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), XSD_ERROR(EISCONN) }; struct xsd_sockmsg { uint32_t type; /* XS_??? */ uint32_t req_id;/* Request identifier, echoed in daemon's response. */ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */ uint32_t len; /* Length of data following this. */ /* Generally followed by nul-terminated string(s). */ }; enum xs_watch_type { XS_WATCH_PATH = 0, XS_WATCH_TOKEN }; /* Inter-domain shared memory communications. */ #define XENSTORE_RING_SIZE 1024 typedef uint32_t XENSTORE_RING_IDX; #define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) struct xenstore_domain_interface { char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ XENSTORE_RING_IDX req_cons, req_prod; XENSTORE_RING_IDX rsp_cons, rsp_prod; }; #endif /* _XS_WIRE_H */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/version.ini000066400000000000000000000000641314037446600257260ustar00rootroot00000000000000KernModeVersion=2.2.0.112 UserModeVersion=2.2.0.112 UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/vni_front/000077500000000000000000000000001314037446600255445ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/vni_front/Kbuild000066400000000000000000000001451314037446600267010ustar00rootroot00000000000000EXTRA_CFLAGS := -I$(M)/include/ obj-m := virtio.o virtio_ring.o virtio_pci.o obj-m += virtio_net.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/vni_front/Makefile000066400000000000000000000000661314037446600272060ustar00rootroot00000000000000ifneq ($(KERNELRELEASE),) include $(src)/Kbuild endif UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/vni_front/virtio.c000066400000000000000000000156031314037446600272310ustar00rootroot00000000000000#include #include #include #include #include #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) /* Unique numbering for virtio devices. */ static DEFINE_IDA(virtio_index_ida); #endif static ssize_t device_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.device); } static ssize_t vendor_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.vendor); } static ssize_t status_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); } static ssize_t modalias_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "virtio:d%08Xv%08X\n", dev->id.device, dev->id.vendor); } static ssize_t features_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = dev_to_virtio(_d); unsigned int i; ssize_t len = 0; /* We actually represent this as a bitstring, as it could be * arbitrary length in future. */ for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++) len += sprintf(buf+len, "%c", test_bit(i, dev->features) ? '1' : '0'); len += sprintf(buf+len, "\n"); return len; } static struct device_attribute virtio_dev_attrs[] = { __ATTR_RO(device), __ATTR_RO(vendor), __ATTR_RO(status), __ATTR_RO(modalias), __ATTR_RO(features), __ATTR_NULL }; static inline int virtio_id_match(const struct virtio_device *dev, const struct virtio_device_id *id) { if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID) return 0; return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; } /* This looks through all the IDs a driver claims to support. If any of them * match, we return 1 and the kernel will call virtio_dev_probe(). */ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) { unsigned int i; struct virtio_device *dev = dev_to_virtio(_dv); const struct virtio_device_id *ids; ids = drv_to_virtio(_dr)->id_table; for (i = 0; ids[i].device; i++) if (virtio_id_match(dev, &ids[i])) return 1; return 0; } static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) { struct virtio_device *dev = dev_to_virtio(_dv); return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", dev->id.device, dev->id.vendor); } static void add_status(struct virtio_device *dev, unsigned status) { dev->config->set_status(dev, dev->config->get_status(dev) | status); } void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit) { unsigned int i; struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); for (i = 0; i < drv->feature_table_size; i++) if (drv->feature_table[i] == fbit) return; BUG(); } EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); static int virtio_dev_probe(struct device *_d) { int err, i; struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); u32 device_features; /* We have a driver! */ add_status(dev, VIRTIO_CONFIG_S_DRIVER); /* Figure out what features the device supports. */ device_features = dev->config->get_features(dev); /* Features supported by both device and driver into dev->features. */ memset(dev->features, 0, sizeof(dev->features)); for (i = 0; i < drv->feature_table_size; i++) { unsigned int f = drv->feature_table[i]; BUG_ON(f >= 32); if (device_features & (1 << f)) set_bit(f, dev->features); } /* Transport features always preserved to pass to finalize_features. */ for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) if (device_features & (1 << i)) set_bit(i, dev->features); dev->config->finalize_features(dev); err = drv->probe(dev); if (err) add_status(dev, VIRTIO_CONFIG_S_FAILED); else { add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); if (drv->scan) drv->scan(dev); } return err; } static int virtio_dev_remove(struct device *_d) { struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); drv->remove(dev); /* Driver should have reset device. */ WARN_ON_ONCE(dev->config->get_status(dev)); /* Acknowledge the device's existence again. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); return 0; } static struct bus_type virtio_bus = { .name = "virtio", .match = virtio_dev_match, .dev_attrs = virtio_dev_attrs, .uevent = virtio_uevent, .probe = virtio_dev_probe, .remove = virtio_dev_remove, }; int register_virtio_driver(struct virtio_driver *driver) { /* Catch this early. */ BUG_ON(driver->feature_table_size && !driver->feature_table); driver->driver.bus = &virtio_bus; return driver_register(&driver->driver); } EXPORT_SYMBOL_GPL(register_virtio_driver); void unregister_virtio_driver(struct virtio_driver *driver) { driver_unregister(&driver->driver); } EXPORT_SYMBOL_GPL(unregister_virtio_driver); #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,14) /* Unique numbering for virtio devices. */ static unsigned int dev_index; #endif int register_virtio_device(struct virtio_device *dev) { int err; dev->dev.bus = &virtio_bus; /* Assign a unique device index and hence name. */ #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL); if (err < 0) goto out; dev->index = err; #else dev->index = dev_index++; #endif dev_set_name(&dev->dev, "virtio%u", dev->index); /* We always start by resetting the device, in case a previous * driver messed it up. This also tests that code path a little. */ dev->config->reset(dev); /* Acknowledge that we've seen the device. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); INIT_LIST_HEAD(&dev->vqs); /* device_register() causes the bus infrastructure to look for a * matching driver. */ err = device_register(&dev->dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) out: #endif if (err) add_status(dev, VIRTIO_CONFIG_S_FAILED); return err; } EXPORT_SYMBOL_GPL(register_virtio_device); void unregister_virtio_device(struct virtio_device *dev) { #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) int index = dev->index; /* save for after device release */ #endif device_unregister(&dev->dev); #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13) ida_simple_remove(&virtio_index_ida, index); #endif } EXPORT_SYMBOL_GPL(unregister_virtio_device); static int virtio_init(void) { if (bus_register(&virtio_bus) != 0) panic("virtio bus registration failed"); return 0; } static void __exit virtio_exit(void) { bus_unregister(&virtio_bus); } core_initcall(virtio_init); module_exit(virtio_exit); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/vni_front/virtio_net.c000066400000000000000000001460641314037446600301050ustar00rootroot00000000000000/* A network driver using virtio. * * Copyright 2007 Rusty Russell IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ //#define DEBUG #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* device supports hardware address * change when it's running */ static unsigned int iff_live_addr_change = 1; static int napi_weight = 64; module_param(napi_weight, int, 0444); static bool csum = true, gso = true; module_param(csum, bool, 0444); module_param(gso, bool, 0444); /* FIXME: MTU in config. */ #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 #ifndef VIRTIO_F_ANY_LAYOUT #define VIRTIO_F_ANY_LAYOUT 27 #endif #define VIRTNET_DRIVER_VERSION "1.0.0" struct virtnet_stats { struct u64_stats_sync tx_syncp; struct u64_stats_sync rx_syncp; u64 tx_bytes; u64 tx_packets; u64 rx_bytes; u64 rx_packets; }; /* Internal representation of a send virtqueue */ struct send_queue { /* Virtqueue associated with this send _queue */ struct virtqueue *vq; /* TX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Name of the send queue: output.$index */ char name[40]; }; /* Internal representation of a receive virtqueue */ struct receive_queue { /* Virtqueue associated with this receive_queue */ struct virtqueue *vq; struct napi_struct napi; /* Number of input buffers, and max we've ever had. */ unsigned int num, max; /* Chain pages by the private ptr. */ struct page *pages; /* RX: fragments + linear part + virtio header */ struct scatterlist sg[MAX_SKB_FRAGS + 2]; /* Name of this receive queue: input.$index */ char name[40]; }; struct virtnet_info { struct virtio_device *vdev; struct virtqueue *cvq; struct net_device *dev; struct send_queue *sq; struct receive_queue *rq; unsigned int status; /* Max # of queue pairs supported by the device */ u16 max_queue_pairs; /* # of queue pairs currently used by the driver */ u16 curr_queue_pairs; /* I like... big packets and I cannot lie! */ bool big_packets; /* Host will merge rx buffers for big packets (shake it! shake it!) */ bool mergeable_rx_bufs; /* Has control virtqueue */ bool has_cvq; /* Host can handle any s/g split between our header and packet data */ bool any_header_sg; /* enable config space updates */ bool config_enable; /* Active statistics */ struct virtnet_stats __percpu *stats; /* Work struct for refilling if we run low on memory. */ struct delayed_work refill; /* Work struct for config space updates */ struct work_struct config_work; /* Lock for config space updates */ struct mutex config_lock; /* Does the affinity hint is set for virtqueues? */ bool affinity_hint_set; /* Per-cpu variable to show the mapping from CPU to virtqueue */ int __percpu *vq_index; /* CPU hot plug notifier */ struct notifier_block nb; }; struct skb_vnet_hdr { union { struct virtio_net_hdr hdr; struct virtio_net_hdr_mrg_rxbuf mhdr; }; }; struct padded_vnet_hdr { struct virtio_net_hdr hdr; /* * virtio_net_hdr should be in a separated sg buffer because of a * QEMU bug, and data sg buffer shares same page with this header sg. * This padding makes next sg 16 byte aligned after virtio_net_hdr. */ char padding[6]; }; /* Converting between virtqueue no. and kernel tx/rx queue no. * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq */ static int vq2txq(struct virtqueue *vq) { return (vq->index - 1) / 2; } static int txq2vq(int txq) { return txq * 2 + 1; } static int vq2rxq(struct virtqueue *vq) { return vq->index / 2; } static int rxq2vq(int rxq) { return rxq * 2; } static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) { return (struct skb_vnet_hdr *)skb->cb; } /* * private is used to chain pages for big packets, put the whole * most recent used list in the beginning for reuse */ static void give_pages(struct receive_queue *rq, struct page *page) { struct page *end; /* Find end of list, sew whole thing into vi->rq.pages. */ for (end = page; end->private; end = (struct page *)end->private); end->private = (unsigned long)rq->pages; rq->pages = page; } static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) { struct page *p = rq->pages; if (p) { rq->pages = (struct page *)p->private; /* clear private here, it is used to chain pages */ p->private = 0; } else p = alloc_page(gfp_mask); return p; } static void skb_xmit_done(struct virtqueue *vq) { struct virtnet_info *vi = vq->vdev->priv; /* Suppress further interrupts. */ virtqueue_disable_cb(vq); /* We were probably waiting for more output buffers. */ netif_wake_subqueue(vi->dev, vq2txq(vq)); } static void set_skb_frag(struct sk_buff *skb, struct page *page, unsigned int offset, unsigned int *len) { int size = min((unsigned)PAGE_SIZE - offset, *len); int i = skb_shinfo(skb)->nr_frags; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,13)) skb_fill_page_desc(skb, i, page, offset, size); #else __skb_fill_page_desc(skb, i, page, offset, size); skb_shinfo(skb)->nr_frags++; #endif skb->data_len += size; skb->len += size; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,17)) skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; #endif *len -= size; } /* Called from bottom half context */ static struct sk_buff *page_to_skb(struct receive_queue *rq, struct page *page, unsigned int len) { struct virtnet_info *vi = rq->vq->vdev->priv; struct sk_buff *skb; struct skb_vnet_hdr *hdr; unsigned int copy, hdr_len, offset; char *p; p = page_address(page); /* copy small packet so we can reuse these pages for small data */ skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); if (unlikely(!skb)) return NULL; hdr = skb_vnet_hdr(skb); if (vi->mergeable_rx_bufs) { hdr_len = sizeof hdr->mhdr; offset = hdr_len; } else { hdr_len = sizeof hdr->hdr; offset = sizeof(struct padded_vnet_hdr); } memcpy(hdr, p, hdr_len); len -= hdr_len; p += offset; copy = len; if (copy > skb_tailroom(skb)) copy = skb_tailroom(skb); memcpy(skb_put(skb, copy), p, copy); len -= copy; offset += copy; /* * Verify that we can indeed put this data into a skb. * This is here to handle cases when the device erroneously * tries to receive more than is possible. This is usually * the case of a broken device. */ if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { net_dbg_ratelimited("%s: too much data\n", skb->dev->name); dev_kfree_skb(skb); return NULL; } while (len) { set_skb_frag(skb, page, offset, &len); page = (struct page *)page->private; offset = 0; } if (page) give_pages(rq, page); return skb; } static struct sk_buff *receive_small(void *buf, unsigned int len) { struct sk_buff * skb = buf; len -= sizeof(struct virtio_net_hdr); skb_trim(skb, len); return skb; } static struct sk_buff *receive_big(struct net_device *dev, struct receive_queue *rq, void *buf) { struct page *page = buf; struct sk_buff *skb = page_to_skb(rq, page, 0); if (unlikely(!skb)) goto err; return skb; err: dev->stats.rx_dropped++; give_pages(rq, page); return NULL; } static struct sk_buff *receive_mergeable(struct net_device *dev, struct receive_queue *rq, void *buf, unsigned int len) { struct skb_vnet_hdr *hdr = page_address(buf); int num_buf = hdr->mhdr.num_buffers; struct page *page = buf; struct sk_buff *skb = page_to_skb(rq, page, len); int i; if (unlikely(!skb)) goto err_skb; while (--num_buf) { i = skb_shinfo(skb)->nr_frags; if (i >= MAX_SKB_FRAGS) { pr_debug("%s: packet too long\n", skb->dev->name); skb->dev->stats.rx_length_errors++; goto err_frags; } page = virtqueue_get_buf(rq->vq, &len); if (!page) { pr_debug("%s: rx error: %d buffers %d missing\n", dev->name, hdr->mhdr.num_buffers, num_buf); dev->stats.rx_length_errors++; goto err_buf; } if (len > PAGE_SIZE) len = PAGE_SIZE; set_skb_frag(skb, page, 0, &len); --rq->num; } return skb; err_skb: give_pages(rq, page); while (--num_buf) { err_frags: buf = virtqueue_get_buf(rq->vq, &len); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers missing\n", dev->name, num_buf); dev->stats.rx_length_errors++; break; } page = buf; give_pages(rq, page); --rq->num; } err_buf: dev->stats.rx_dropped++; dev_kfree_skb(skb); return NULL; } static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) { struct virtnet_info *vi = rq->vq->vdev->priv; struct net_device *dev = vi->dev; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); struct sk_buff *skb; struct skb_vnet_hdr *hdr; if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); dev->stats.rx_length_errors++; if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(rq, buf); else dev_kfree_skb(buf); return; } if (vi->mergeable_rx_bufs) skb = receive_mergeable(dev, rq, buf, len); else if (vi->big_packets) skb = receive_big(dev, rq, buf); else skb = receive_small(buf, len); if (unlikely(!skb)) return; hdr = skb_vnet_hdr(skb); skb->truesize += skb->data_len; u64_stats_update_begin(&stats->rx_syncp); stats->rx_bytes += skb->len; stats->rx_packets++; u64_stats_update_end(&stats->rx_syncp); if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { pr_debug("Needs csum!\n"); if (!skb_partial_csum_set(skb, hdr->hdr.csum_start, hdr->hdr.csum_offset)) goto frame_err; } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { skb->ip_summed = CHECKSUM_UNNECESSARY; } skb->protocol = eth_type_trans(skb, dev); pr_debug("Receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type); if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; break; case VIRTIO_NET_HDR_GSO_UDP: skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; case VIRTIO_NET_HDR_GSO_TCPV6: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; default: net_warn_ratelimited("%s: bad gso type %u.\n", dev->name, hdr->hdr.gso_type); goto frame_err; } if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; if (skb_shinfo(skb)->gso_size == 0) { net_warn_ratelimited("%s: zero gso size.\n", dev->name); goto frame_err; } /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; } netif_receive_skb(skb); return; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); } static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) { struct virtnet_info *vi = rq->vq->vdev->priv; struct sk_buff *skb; struct skb_vnet_hdr *hdr; int err; #if (LINUX_VERSION_CODE > KERNEL_VERSION(3,0,13)) skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); #else skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); #endif if (unlikely(!skb)) return -ENOMEM; skb_put(skb, MAX_PACKET_LEN); hdr = skb_vnet_hdr(skb); sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); if (err < 0) dev_kfree_skb(skb); return err; } static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) { struct page *first, *list = NULL; char *p; int i, err, offset; /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { first = get_a_page(rq, gfp); if (!first) { if (list) give_pages(rq, list); return -ENOMEM; } sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); /* chain new page in list head to match sg */ first->private = (unsigned long)list; list = first; } first = get_a_page(rq, gfp); if (!first) { give_pages(rq, list); return -ENOMEM; } p = page_address(first); /* rq->sg[0], rq->sg[1] share the same page */ /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); /* rq->sg[1] for data packet, from offset */ offset = sizeof(struct padded_vnet_hdr); sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); /* chain first in list head */ first->private = (unsigned long)list; err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, first, gfp); if (err < 0) give_pages(rq, first); return err; } static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) { struct page *page; int err; page = get_a_page(rq, gfp); if (!page) return -ENOMEM; sg_init_one(rq->sg, page_address(page), PAGE_SIZE); err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp); if (err < 0) give_pages(rq, page); return err; } /* * Returns false if we couldn't fill entirely (OOM). * * Normally run in the receive path, but can also be run from ndo_open * before we're receiving packets, or from refill_work which is * careful to disable receiving (using napi_disable). */ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) { struct virtnet_info *vi = rq->vq->vdev->priv; int err; bool oom; do { if (vi->mergeable_rx_bufs) err = add_recvbuf_mergeable(rq, gfp); else if (vi->big_packets) err = add_recvbuf_big(rq, gfp); else err = add_recvbuf_small(rq, gfp); oom = err == -ENOMEM; if (err) break; ++rq->num; } while (rq->vq->num_free); if (unlikely(rq->num > rq->max)) rq->max = rq->num; virtqueue_kick(rq->vq); return !oom; } static void skb_recv_done(struct virtqueue *rvq) { struct virtnet_info *vi = rvq->vdev->priv; struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; /* Schedule NAPI, Suppress further interrupts if successful. */ if (napi_schedule_prep(&rq->napi)) { virtqueue_disable_cb(rvq); __napi_schedule(&rq->napi); } } static void virtnet_napi_enable(struct receive_queue *rq) { napi_enable(&rq->napi); /* If all buffers were filled by other side before we napi_enabled, we * won't get another interrupt, so process any outstanding packets * now. virtnet_poll wants re-enable the queue, so we disable here. * We synchronize against interrupts via NAPI_STATE_SCHED */ if (napi_schedule_prep(&rq->napi)) { virtqueue_disable_cb(rq->vq); local_bh_disable(); __napi_schedule(&rq->napi); local_bh_enable(); } } static void refill_work(struct work_struct *work) { struct virtnet_info *vi = container_of(work, struct virtnet_info, refill.work); bool still_empty; int i; for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; napi_disable(&rq->napi); still_empty = !try_fill_recv(rq, GFP_KERNEL); virtnet_napi_enable(rq); /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. */ if (still_empty) schedule_delayed_work(&vi->refill, HZ/2); } } static int virtnet_poll(struct napi_struct *napi, int budget) { struct receive_queue *rq = container_of(napi, struct receive_queue, napi); struct virtnet_info *vi = rq->vq->vdev->priv; void *buf; unsigned int r, len, received = 0; again: while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { receive_buf(rq, buf, len); --rq->num; received++; } if (rq->num < rq->max / 2) { if (!try_fill_recv(rq, GFP_ATOMIC)) schedule_delayed_work(&vi->refill, 0); } /* Out of packets? */ if (received < budget) { r = virtqueue_enable_cb_prepare(rq->vq); napi_complete(napi); if (unlikely(virtqueue_poll(rq->vq, r)) && napi_schedule_prep(napi)) { virtqueue_disable_cb(rq->vq); __napi_schedule(napi); goto again; } } return received; } static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; for (i = 0; i < vi->max_queue_pairs; i++) { if (i < vi->curr_queue_pairs) /* Make sure we have some buffers: if oom use wq. */ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); virtnet_napi_enable(&vi->rq[i]); } return 0; } static void free_old_xmit_skbs(struct send_queue *sq) { struct sk_buff *skb; unsigned int len; struct virtnet_info *vi = sq->vq->vdev->priv; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { pr_debug("Sent skb %p\n", skb); u64_stats_update_begin(&stats->tx_syncp); stats->tx_bytes += skb->len; stats->tx_packets++; u64_stats_update_end(&stats->tx_syncp); dev_kfree_skb_any(skb); } } static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) { struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; struct virtnet_info *vi = sq->vq->vdev->priv; unsigned num_sg; unsigned hdr_len; bool can_push; pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); if (vi->mergeable_rx_bufs) hdr_len = sizeof hdr->mhdr; else hdr_len = sizeof hdr->hdr; can_push = vi->any_header_sg && !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; /* Even if we can, don't push here yet as this would skew * csum_start offset below. */ if (can_push) hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len); else hdr = skb_vnet_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->hdr.csum_start = skb_checksum_start_offset(skb); hdr->hdr.csum_offset = skb->csum_offset; } else { hdr->hdr.flags = 0; hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; } if (skb_is_gso(skb)) { hdr->hdr.hdr_len = skb_headlen(skb); hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; else BUG(); if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; } else { hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; } if (vi->mergeable_rx_bufs) hdr->mhdr.num_buffers = 0; if (can_push) { __skb_push(skb, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); /* Pull header back to avoid skew in tx bytes calculations. */ __skb_pull(skb, hdr_len); } else { sg_set_buf(sq->sg, hdr, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; } return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); } static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int qnum = skb_get_queue_mapping(skb); struct send_queue *sq = &vi->sq[qnum]; int err; /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(sq); /* Try to transmit */ err = xmit_skb(sq, skb); /* This should not happen! */ if (unlikely(err)) { dev->stats.tx_fifo_errors++; if (net_ratelimit()) dev_warn(&dev->dev, "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } virtqueue_kick(sq->vq); /* Don't wait up for transmitted skbs to be freed. */ skb_orphan(skb); nf_reset(skb); /* Apparently nice girls don't return TX_BUSY; stop the queue * before it gets out of hand. Naturally, this wastes entries. */ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { netif_stop_subqueue(dev, qnum); if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { /* More just got used, free them then recheck. */ free_old_xmit_skbs(sq); if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { netif_start_subqueue(dev, qnum); virtqueue_disable_cb(sq->vq); } } } return NETDEV_TX_OK; } /* * Send command via the control virtqueue and check status. Commands * supported by the hypervisor, as indicated by feature bits, should * never fail unless improperly formated. */ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, struct scatterlist *out, struct scatterlist *in) { struct scatterlist *sgs[4], hdr, stat; struct virtio_net_ctrl_hdr ctrl; virtio_net_ctrl_ack status = ~0; unsigned out_num = 0, in_num = 0, tmp; /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); ctrl.class = class; ctrl.cmd = cmd; /* Add header */ sg_init_one(&hdr, &ctrl, sizeof(ctrl)); sgs[out_num++] = &hdr; if (out) sgs[out_num++] = out; if (in) sgs[out_num + in_num++] = in; /* Add return status. */ sg_init_one(&stat, &status, sizeof(status)); sgs[out_num + in_num++] = &stat; BUG_ON(out_num + in_num > ARRAY_SIZE(sgs)); BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC) < 0); virtqueue_kick(vi->cvq); /* Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. */ while (!virtqueue_get_buf(vi->cvq, &tmp) && !virtqueue_is_broken(vi->cvq)) cpu_relax(); return status == VIRTIO_NET_OK; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) /** * eth_prepare_mac_addr_change - prepare for mac change * @dev: network device * @p: socket address */ static int eth_prepare_mac_addr_change(struct net_device *dev, void *p) { struct sockaddr *addr = p; if (!iff_live_addr_change && netif_running(dev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; return 0; } /** * eth_commit_mac_addr_change - commit mac change * @dev: network device * @p: socket address */ static void eth_commit_mac_addr_change(struct net_device *dev, void *p) { struct sockaddr *addr = p; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); } #endif static int virtnet_set_mac_address(struct net_device *dev, void *p) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; int ret; struct sockaddr *addr = p; struct scatterlist sg; ret = eth_prepare_mac_addr_change(dev, p); if (ret) return ret; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { sg_init_one(&sg, addr->sa_data, dev->addr_len); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg, NULL)) { dev_warn(&vdev->dev, "Failed to set mac address by vq command.\n"); return -EINVAL; } } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), addr->sa_data, dev->addr_len); } eth_commit_mac_addr_change(dev, p); return 0; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) static struct net_device_stats *virtnet_stats(struct net_device *dev) { int cpu; unsigned int start; struct virtnet_info *vi = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; stats->rx_packets = 0; stats->tx_packets = 0; stats->rx_bytes = 0; stats->tx_bytes = 0; for_each_possible_cpu(cpu) { struct virtnet_stats *stats_tmp = per_cpu_ptr(vi->stats, cpu); u64 tpackets, tbytes, rpackets, rbytes; do { start = u64_stats_fetch_begin_bh(&stats_tmp->tx_syncp); tpackets = stats_tmp->tx_packets; tbytes = stats_tmp->tx_bytes; } while (u64_stats_fetch_retry_bh(&stats_tmp->tx_syncp, start)); do { start = u64_stats_fetch_begin_bh(&stats_tmp->rx_syncp); rpackets = stats_tmp->rx_packets; rbytes = stats_tmp->rx_bytes; } while (u64_stats_fetch_retry_bh(&stats_tmp->rx_syncp, start)); stats->rx_packets += rpackets; stats->tx_packets += tpackets; stats->rx_bytes += rbytes; stats->tx_bytes += tbytes; } return stats; } #else static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct virtnet_info *vi = netdev_priv(dev); int cpu; unsigned int start; for_each_possible_cpu(cpu) { struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); u64 tpackets, tbytes, rpackets, rbytes; do { start = u64_stats_fetch_begin_bh(&stats->tx_syncp); tpackets = stats->tx_packets; tbytes = stats->tx_bytes; } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); do { start = u64_stats_fetch_begin_bh(&stats->rx_syncp); rpackets = stats->rx_packets; rbytes = stats->rx_bytes; } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); tot->rx_packets += rpackets; tot->tx_packets += tpackets; tot->rx_bytes += rbytes; tot->tx_bytes += tbytes; } tot->tx_dropped = dev->stats.tx_dropped; tot->tx_fifo_errors = dev->stats.tx_fifo_errors; tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; return tot; } #endif #ifdef CONFIG_NET_POLL_CONTROLLER static void virtnet_netpoll(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; for (i = 0; i < vi->curr_queue_pairs; i++) napi_schedule(&vi->rq[i].napi); } #endif static void virtnet_ack_link_announce(struct virtnet_info *vi) { rtnl_lock(); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL)) dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); rtnl_unlock(); } static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { struct scatterlist sg; struct virtio_net_ctrl_mq s; struct net_device *dev = vi->dev; if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; s.virtqueue_pairs = queue_pairs; sg_init_one(&sg, &s, sizeof(s)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) { dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", queue_pairs); return -EINVAL; } else { vi->curr_queue_pairs = queue_pairs; /* virtnet_open() will refill when device is going to up. */ if (dev->flags & IFF_UP) schedule_delayed_work(&vi->refill, 0); } return 0; } static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); int i; /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); for (i = 0; i < vi->max_queue_pairs; i++) napi_disable(&vi->rq[i].napi); return 0; } #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) static int virtnet_set_tx_csum(struct net_device *dev, u32 data) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) return -ENOSYS; return ethtool_op_set_tx_hw_csum(dev, data); } #endif static void virtnet_set_rx_mode(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg[2]; u8 promisc, allmulti; struct virtio_net_ctrl_mac *mac_data; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) struct dev_addr_list *addr; #endif struct netdev_hw_addr *ha; int uc_count; int mc_count; void *buf; int i; /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; promisc = ((dev->flags & IFF_PROMISC) != 0); allmulti = ((dev->flags & IFF_ALLMULTI) != 0); sg_init_one(sg, &promisc, sizeof(promisc)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg, NULL)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", promisc ? "en" : "dis"); sg_init_one(sg, &allmulti, sizeof(allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg, NULL)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", allmulti ? "en" : "dis"); uc_count = netdev_uc_count(dev); mc_count = netdev_mc_count(dev); /* MAC filter - use one buffer for both lists */ buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + (2 * sizeof(mac_data->entries)), GFP_ATOMIC); mac_data = buf; if (!buf) return; sg_init_table(sg, 2); /* Store the unicast list and count in the front of the buffer */ mac_data->entries = uc_count; i = 0; netdev_for_each_uc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); sg_set_buf(&sg[0], mac_data, sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); /* multicast list and count fill the end */ mac_data = (void *)&mac_data->macs[uc_count][0]; mac_data->entries = mc_count; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) addr = dev->mc_list; for (i = 0; i < dev->mc_count; i++, addr = addr->next) memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN); #else i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); #endif sg_set_buf(&sg[1], mac_data, sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, sg, NULL)) dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); kfree(buf); } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,24)) static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) #else static int virtnet_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) #endif { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL)) dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,24)) return 0; #endif } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,24)) static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) #else static int virtnet_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) #endif { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL)) dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,24)) return 0; #endif } static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) { int i; int cpu; if (vi->affinity_hint_set) { for (i = 0; i < vi->max_queue_pairs; i++) { virtqueue_set_affinity(vi->rq[i].vq, -1); virtqueue_set_affinity(vi->sq[i].vq, -1); } vi->affinity_hint_set = false; } i = 0; for_each_online_cpu(cpu) { if (cpu == hcpu) { *per_cpu_ptr(vi->vq_index, cpu) = -1; } else { *per_cpu_ptr(vi->vq_index, cpu) = ++i % vi->curr_queue_pairs; } } } static void virtnet_set_affinity(struct virtnet_info *vi) { int i; int cpu; /* In multiqueue mode, when the number of cpu is equal to the number of * queue pairs, we let the queue pairs to be private to one cpu by * setting the affinity hint to eliminate the contention. */ if (vi->curr_queue_pairs == 1 || vi->max_queue_pairs != num_online_cpus()) { virtnet_clean_affinity(vi, -1); return; } i = 0; for_each_online_cpu(cpu) { virtqueue_set_affinity(vi->rq[i].vq, cpu); virtqueue_set_affinity(vi->sq[i].vq, cpu); *per_cpu_ptr(vi->vq_index, cpu) = i; i++; } vi->affinity_hint_set = true; } static int virtnet_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); switch(action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: case CPU_DOWN_FAILED: case CPU_DEAD: virtnet_set_affinity(vi); break; case CPU_DOWN_PREPARE: virtnet_clean_affinity(vi, (long)hcpu); break; default: break; } return NOTIFY_OK; } static void virtnet_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { struct virtnet_info *vi = netdev_priv(dev); ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); ring->rx_pending = ring->rx_max_pending; ring->tx_pending = ring->tx_max_pending; } static void virtnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34)) /* TODO: Eliminate OOO packets during switching */ static int virtnet_set_channels(struct net_device *dev, struct ethtool_channels *channels) { struct virtnet_info *vi = netdev_priv(dev); u16 queue_pairs = channels->combined_count; int err; /* We don't support separate rx/tx channels. * We don't allow setting 'other' channels. */ if (channels->rx_count || channels->tx_count || channels->other_count) return -EINVAL; if (queue_pairs > vi->max_queue_pairs) return -EINVAL; get_online_cpus(); err = virtnet_set_queues(vi, queue_pairs); if (!err) { netif_set_real_num_tx_queues(dev, queue_pairs); netif_set_real_num_rx_queues(dev, queue_pairs); virtnet_set_affinity(vi); } put_online_cpus(); return err; } static void virtnet_get_channels(struct net_device *dev, struct ethtool_channels *channels) { struct virtnet_info *vi = netdev_priv(dev); channels->combined_count = vi->curr_queue_pairs; channels->max_combined = vi->max_queue_pairs; channels->max_other = 0; channels->rx_count = 0; channels->tx_count = 0; channels->other_count = 0; } #endif static const struct ethtool_ops virtnet_ethtool_ops = { .get_drvinfo = virtnet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = virtnet_get_ringparam, #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) .set_tx_csum = virtnet_set_tx_csum, .set_sg = ethtool_op_set_sg, .set_tso = ethtool_op_set_tso, .set_ufo = ethtool_op_set_ufo, #else .set_channels = virtnet_set_channels, .get_channels = virtnet_get_channels, #endif }; #define MIN_MTU 68 #define MAX_MTU 65535 static int virtnet_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) return -EINVAL; dev->mtu = new_mtu; return 0; } #if 0 /* To avoid contending a lock hold by a vcpu who would exit to host, select the * txq based on the processor id. */ static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb) { int txq; struct virtnet_info *vi = netdev_priv(dev); if (skb_rx_queue_recorded(skb)) { txq = skb_get_rx_queue(skb); } else { txq = *this_cpu_ptr(vi->vq_index); if (txq == -1) txq = 0; } while (unlikely(txq >= dev->real_num_tx_queues)) txq -= dev->real_num_tx_queues; return txq; } #endif static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, .ndo_start_xmit = start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = virtnet_set_mac_address, .ndo_set_rx_mode = virtnet_set_rx_mode, .ndo_change_mtu = virtnet_change_mtu, #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) .ndo_get_stats = virtnet_stats, #else .ndo_get_stats64 = virtnet_stats, #endif .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, #if 0 .ndo_select_queue = virtnet_select_queue, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = virtnet_netpoll, #endif }; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,32)) /* from higher version kernel code. For redhat*/ static void inetdev_send_gratuitous_arp(struct net_device *dev, struct in_device *in_dev) { struct in_ifaddr *ifa; for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { arp_send(ARPOP_REQUEST, ETH_P_ARP, ifa->ifa_local, dev, ifa->ifa_local, NULL, dev->dev_addr, NULL); } } /** * netif_notify_peers - notify network peers about existence of @dev * @dev: network device * * Generate traffic such that interested network peers are aware of * @dev, such as by generating a gratuitous ARP. This may be used when * a device wants to inform the rest of the network about some sort of * reconfiguration such as a failover event or virtual machine * migration. * For redhat. */ void netif_notify_peers(struct net_device *dev) { struct in_device *in_dev; rtnl_lock(); in_dev = __in_dev_get_rtnl(dev); if(in_dev) inetdev_send_gratuitous_arp(dev, in_dev); rtnl_unlock(); } #endif static void virtnet_config_changed_work(struct work_struct *work) { struct virtnet_info *vi = container_of(work, struct virtnet_info, config_work); u16 v; mutex_lock(&vi->config_lock); if (!vi->config_enable) goto done; if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, offsetof(struct virtio_net_config, status), &v) < 0) goto done; if (v & VIRTIO_NET_S_ANNOUNCE) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) netif_notify_peers(vi->dev); #else netdev_notify_peers(vi->dev); #endif virtnet_ack_link_announce(vi); } /* Ignore unknown (future) status bits */ v &= VIRTIO_NET_S_LINK_UP; if (vi->status == v) goto done; vi->status = v; if (vi->status & VIRTIO_NET_S_LINK_UP) { netif_carrier_on(vi->dev); netif_tx_wake_all_queues(vi->dev); } else { netif_carrier_off(vi->dev); netif_tx_stop_all_queues(vi->dev); } done: mutex_unlock(&vi->config_lock); } static void virtnet_config_changed(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; schedule_work(&vi->config_work); } static void virtnet_free_queues(struct virtnet_info *vi) { int i; for (i = 0; i < vi->max_queue_pairs; i++) netif_napi_del(&vi->rq[i].napi); kfree(vi->rq); kfree(vi->sq); } static void free_receive_bufs(struct virtnet_info *vi) { int i; for (i = 0; i < vi->max_queue_pairs; i++) { while (vi->rq[i].pages) __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); } } static void free_unused_bufs(struct virtnet_info *vi) { void *buf; int i; for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->sq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) dev_kfree_skb(buf); } for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->rq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(&vi->rq[i], buf); else dev_kfree_skb(buf); --vi->rq[i].num; } BUG_ON(vi->rq[i].num != 0); } } static void virtnet_del_vqs(struct virtnet_info *vi) { struct virtio_device *vdev = vi->vdev; virtnet_clean_affinity(vi, -1); vdev->config->del_vqs(vdev); virtnet_free_queues(vi); } static int virtnet_find_vqs(struct virtnet_info *vi) { vq_callback_t **callbacks; struct virtqueue **vqs; int ret = -ENOMEM; int i, total_vqs; const char **names; /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by * possible control vq. */ total_vqs = vi->max_queue_pairs * 2 + virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); /* Allocate space for find_vqs parameters */ vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); if (!vqs) goto err_vq; callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); if (!callbacks) goto err_callback; names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); if (!names) goto err_names; /* Parameters for control virtqueue, if any */ if (vi->has_cvq) { callbacks[total_vqs - 1] = NULL; names[total_vqs - 1] = "control"; } /* Allocate/initialize parameters for send/receive virtqueues */ for (i = 0; i < vi->max_queue_pairs; i++) { callbacks[rxq2vq(i)] = skb_recv_done; callbacks[txq2vq(i)] = skb_xmit_done; sprintf(vi->rq[i].name, "input.%d", i); sprintf(vi->sq[i].name, "output.%d", i); names[rxq2vq(i)] = vi->rq[i].name; names[txq2vq(i)] = vi->sq[i].name; } ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, names); if (ret) goto err_find; if (vi->has_cvq) { vi->cvq = vqs[total_vqs - 1]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,17)) vi->dev->features |= NETIF_F_HW_VLAN_FILTER; #else vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; #endif } for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].vq = vqs[rxq2vq(i)]; vi->sq[i].vq = vqs[txq2vq(i)]; } kfree(names); kfree(callbacks); kfree(vqs); return 0; err_find: kfree(names); err_names: kfree(callbacks); err_callback: kfree(vqs); err_vq: return ret; } static int virtnet_alloc_queues(struct virtnet_info *vi) { int i; vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); if (!vi->sq) goto err_sq; vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); if (!vi->rq) goto err_rq; INIT_DELAYED_WORK(&vi->refill, refill_work); for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].pages = NULL; netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, napi_weight); sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); } return 0; err_rq: kfree(vi->sq); err_sq: return -ENOMEM; } static int init_vqs(struct virtnet_info *vi) { int ret; /* Allocate send & receive queues */ ret = virtnet_alloc_queues(vi); if (ret) goto err; ret = virtnet_find_vqs(vi); if (ret) goto err_free; get_online_cpus(); virtnet_set_affinity(vi); put_online_cpus(); return 0; err_free: virtnet_free_queues(vi); err: return ret; } static int virtnet_probe(struct virtio_device *vdev) { int i, err; struct net_device *dev; struct virtnet_info *vi; u16 max_queue_pairs; /* Find if host supports multiqueue virtio_net device */ err = virtio_config_val(vdev, VIRTIO_NET_F_MQ, offsetof(struct virtio_net_config, max_virtqueue_pairs), &max_queue_pairs); /* We need at least 2 queue's */ if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) max_queue_pairs = 1; /* Allocate ourselves a network device with room for our info */ dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); if (!dev) return -ENOMEM; /* Set up network device as normal. */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,17)) dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,24)) dev->priv_flags |= IFF_UNICAST_FLT; #endif dev->netdev_ops = &virtnet_netdev; dev->features = NETIF_F_HIGHDMA; SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); SET_NETDEV_DEV(dev, &vdev->dev); /* Do we support "hardware" checksums? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { /* This opens up the world of extra features. */ #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) dev->features |= NETIF_F_TSO; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) dev->features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->features |= NETIF_F_TSO_ECN; dev->features |= NETIF_F_GSO_ROBUST; #else dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (csum) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) dev->hw_features |= NETIF_F_TSO; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) dev->hw_features |= NETIF_F_UFO; if (gso) dev->features |= dev->hw_features & NETIF_F_ALL_TSO; #endif /* (!csum && gso) case will be fixed by register_netdev() */ } dev->vlan_features = dev->features; /* Configuration may specify what MAC to use. Otherwise random. */ if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, offsetof(struct virtio_net_config, mac), dev->dev_addr, dev->addr_len) < 0) eth_hw_addr_random(dev); /* Set up our device-specific information */ vi = netdev_priv(dev); vi->dev = dev; vi->vdev = vdev; vdev->priv = vi; vi->stats = alloc_percpu(struct virtnet_stats); err = -ENOMEM; if (vi->stats == NULL) goto free; vi->vq_index = alloc_percpu(int); if (vi->vq_index == NULL) goto free_stats; mutex_init(&vi->config_lock); vi->config_enable = true; INIT_WORK(&vi->config_work, virtnet_config_changed_work); /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) vi->big_packets = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) vi->mergeable_rx_bufs = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) vi->has_cvq = true; if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) { vi->any_header_sg = true; if (vi->mergeable_rx_bufs) dev->needed_headroom = sizeof (struct virtio_net_hdr_mrg_rxbuf); else dev->needed_headroom = sizeof (struct virtio_net_hdr); } /* Use single tx/rx queue pair as default */ vi->curr_queue_pairs = 1; vi->max_queue_pairs = max_queue_pairs; /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ err = init_vqs(vi); if (err) goto free_index; get_online_cpus(); err = virtnet_set_queues(vi, max_queue_pairs); if (!err) { netif_set_real_num_tx_queues(dev, max_queue_pairs); netif_set_real_num_rx_queues(dev, max_queue_pairs); virtnet_set_affinity(vi); } put_online_cpus(); err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); goto free_vqs; } /* Last of all, set up some receive buffers. */ for (i = 0; i < vi->curr_queue_pairs; i++) { try_fill_recv(&vi->rq[i], GFP_KERNEL); /* If we didn't even get one input buffer, we're useless. */ if (vi->rq[i].num == 0) { free_unused_bufs(vi); err = -ENOMEM; goto free_recv_bufs; } } vi->nb.notifier_call = &virtnet_cpu_callback; err = register_hotcpu_notifier(&vi->nb); if (err) { pr_debug("virtio_net: registering cpu notifier failed\n"); goto free_recv_bufs; } /* Assume link up if device can't report link status, otherwise get link status from config. */ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { netif_carrier_off(dev); schedule_work(&vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; netif_carrier_on(dev); } pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", dev->name, max_queue_pairs); return 0; free_recv_bufs: free_receive_bufs(vi); unregister_netdev(dev); free_vqs: cancel_delayed_work_sync(&vi->refill); virtnet_del_vqs(vi); free_index: free_percpu(vi->vq_index); free_stats: free_percpu(vi->stats); free: free_netdev(dev); return err; } static void remove_vq_common(struct virtnet_info *vi) { vi->vdev->config->reset(vi->vdev); /* Free unused buffers in both send and recv, if any. */ free_unused_bufs(vi); free_receive_bufs(vi); virtnet_del_vqs(vi); } static void virtnet_remove(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; unregister_hotcpu_notifier(&vi->nb); /* Prevent config work handler from accessing the device. */ mutex_lock(&vi->config_lock); vi->config_enable = false; mutex_unlock(&vi->config_lock); unregister_netdev(vi->dev); remove_vq_common(vi); flush_work(&vi->config_work); free_percpu(vi->vq_index); free_percpu(vi->stats); free_netdev(vi->dev); } #ifdef CONFIG_PM static int virtnet_freeze(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int i; unregister_hotcpu_notifier(&vi->nb); /* Prevent config work handler from accessing the device */ mutex_lock(&vi->config_lock); vi->config_enable = false; mutex_unlock(&vi->config_lock); netif_device_detach(vi->dev); cancel_delayed_work_sync(&vi->refill); if (netif_running(vi->dev)) for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi); } remove_vq_common(vi); flush_work(&vi->config_work); return 0; } static int virtnet_restore(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int err, i; err = init_vqs(vi); if (err) return err; if (netif_running(vi->dev)) { for (i = 0; i < vi->curr_queue_pairs; i++) if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); for (i = 0; i < vi->max_queue_pairs; i++) virtnet_napi_enable(&vi->rq[i]); } netif_device_attach(vi->dev); mutex_lock(&vi->config_lock); vi->config_enable = true; mutex_unlock(&vi->config_lock); rtnl_lock(); virtnet_set_queues(vi, vi->curr_queue_pairs); rtnl_unlock(); err = register_hotcpu_notifier(&vi->nb); if (err) return err; return 0; } #endif static struct virtio_device_id id_table[] = { { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, VIRTIO_NET_F_CTRL_MAC_ADDR, VIRTIO_F_ANY_LAYOUT, }; static struct virtio_driver virtio_net_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtnet_probe, .remove = virtnet_remove, .config_changed = virtnet_config_changed, #ifdef CONFIG_PM .freeze = virtnet_freeze, .restore = virtnet_restore, #endif }; module_virtio_driver(virtio_net_driver); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio network driver"); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/vni_front/virtio_pci.c000066400000000000000000000564271314037446600300750ustar00rootroot00000000000000/* * Virtio PCI driver * * This module allows virtio devices to be used over a virtual PCI device. * This can be used with QEMU based VMMs like KVM or Xen. * * Copyright IBM Corp. 2007 * * Authors: * Anthony Liguori * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include #include #include #include #include #include #include #include #include #include #include #include MODULE_AUTHOR("Anthony Liguori "); MODULE_DESCRIPTION("virtio-pci"); MODULE_LICENSE("GPL"); MODULE_VERSION("1"); /* Our device structure */ struct virtio_pci_device { struct virtio_device vdev; struct pci_dev *pci_dev; /* the IO mapping for the PCI config space */ void __iomem *ioaddr; /* a list of queues so we can dispatch IRQs */ spinlock_t lock; struct list_head virtqueues; /* MSI-X support */ int msix_enabled; int intx_enabled; struct msix_entry *msix_entries; cpumask_var_t *msix_affinity_masks; /* Name strings for interrupts. This size should be enough, * and I'm too lazy to allocate each name separately. */ char (*msix_names)[256]; /* Number of available vectors */ unsigned msix_vectors; /* Vectors allocated, excluding per-vq vectors if any */ unsigned msix_used_vectors; /* Status saved during hibernate/restore */ u8 saved_status; /* Whether we have vector per vq */ bool per_vq_vectors; }; /* Constants for MSI-X */ /* Use first vector for configuration changes, second and the rest for * virtqueues Thus, we need at least 2 vectors for MSI. */ enum { VP_MSIX_CONFIG_VECTOR = 0, VP_MSIX_VQ_VECTOR = 1, }; struct virtio_pci_vq_info { /* the actual virtqueue */ struct virtqueue *vq; /* the number of entries in the queue */ int num; /* the virtual address of the ring queue */ void *queue; /* the list node for the virtqueues list */ struct list_head node; /* MSI-X vector (or none) */ unsigned msix_vector; }; /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ static DEFINE_PCI_DEVICE_TABLE(virtio_pci_id_table) = { { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, { 0 } }; MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); /* Convert a generic virtio device to our structure */ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) { return container_of(vdev, struct virtio_pci_device, vdev); } /* virtio config->get_features() implementation */ static u32 vp_get_features(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* When someone needs more than 32 feature bits, we'll need to * steal a bit to indicate that the rest are somewhere else. */ return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); } /* virtio config->finalize_features() implementation */ static void vp_finalize_features(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); /* We only support 32 feature bits. */ BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1); iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES); } /* virtio config->get() implementation */ static void vp_get(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG(vp_dev) + offset; u8 *ptr = buf; int i; for (i = 0; i < len; i++) ptr[i] = ioread8(ioaddr + i); } /* the config->set() implementation. it's symmetric to the config->get() * implementation */ static void vp_set(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG(vp_dev) + offset; const u8 *ptr = buf; int i; for (i = 0; i < len; i++) iowrite8(ptr[i], ioaddr + i); } /* config->{get,set}_status() implementations */ static u8 vp_get_status(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); } static void vp_set_status(struct virtio_device *vdev, u8 status) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* We should never be setting status to 0. */ BUG_ON(status == 0); iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); } /* wait for pending irq handlers */ static void vp_synchronize_vectors(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; if (vp_dev->intx_enabled) synchronize_irq(vp_dev->pci_dev->irq); for (i = 0; i < vp_dev->msix_vectors; ++i) synchronize_irq(vp_dev->msix_entries[i].vector); } static void vp_reset(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* 0 status means a reset. */ iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); /* Flush out the status write, and flush in device writes, * including MSi-X interrupts, if any. */ ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); /* Flush pending VQ/configuration callbacks. */ vp_synchronize_vectors(vdev); } /* the notify function used when creating a virt queue */ static void vp_notify(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); /* we write the queue's selector into the notification register to * signal the other end */ iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); } /* Handle a configuration change: Tell driver if it wants to know. */ static irqreturn_t vp_config_changed(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; struct virtio_driver *drv; drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); if (drv && drv->config_changed) drv->config_changed(&vp_dev->vdev); return IRQ_HANDLED; } /* Notify all virtqueues on an interrupt. */ static irqreturn_t vp_vring_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; struct virtio_pci_vq_info *info; irqreturn_t ret = IRQ_NONE; unsigned long flags; spin_lock_irqsave(&vp_dev->lock, flags); list_for_each_entry(info, &vp_dev->virtqueues, node) { if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) ret = IRQ_HANDLED; } spin_unlock_irqrestore(&vp_dev->lock, flags); return ret; } /* A small wrapper to also acknowledge the interrupt when it's handled. * I really need an EIO hook for the vring so I can ack the interrupt once we * know that we'll be handling the IRQ but before we invoke the callback since * the callback may notify the host which results in the host attempting to * raise an interrupt that we would then mask once we acknowledged the * interrupt. */ static irqreturn_t vp_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; u8 isr; /* reading the ISR has the effect of also clearing it so it's very * important to save off the value. */ isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); /* It's definitely not us if the ISR was not high */ if (!isr) return IRQ_NONE; /* Configuration change? Tell driver if it wants to know. */ if (isr & VIRTIO_PCI_ISR_CONFIG) vp_config_changed(irq, opaque); return vp_vring_interrupt(irq, opaque); } static void vp_free_vectors(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; if (vp_dev->intx_enabled) { free_irq(vp_dev->pci_dev->irq, vp_dev); vp_dev->intx_enabled = 0; } for (i = 0; i < vp_dev->msix_used_vectors; ++i) free_irq(vp_dev->msix_entries[i].vector, vp_dev); for (i = 0; i < vp_dev->msix_vectors; i++) if (vp_dev->msix_affinity_masks[i]) free_cpumask_var(vp_dev->msix_affinity_masks[i]); if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); /* Flush the write out to device */ ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); pci_disable_msix(vp_dev->pci_dev); vp_dev->msix_enabled = 0; vp_dev->msix_vectors = 0; } vp_dev->msix_used_vectors = 0; kfree(vp_dev->msix_names); vp_dev->msix_names = NULL; kfree(vp_dev->msix_entries); vp_dev->msix_entries = NULL; kfree(vp_dev->msix_affinity_masks); vp_dev->msix_affinity_masks = NULL; } static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, bool per_vq_vectors) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); const char *name = dev_name(&vp_dev->vdev.dev); unsigned i, v; int err = -ENOMEM; vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, GFP_KERNEL); if (!vp_dev->msix_entries) goto error; vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, GFP_KERNEL); if (!vp_dev->msix_names) goto error; vp_dev->msix_affinity_masks = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, GFP_KERNEL); if (!vp_dev->msix_affinity_masks) goto error; for (i = 0; i < nvectors; ++i) if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], GFP_KERNEL)) goto error; for (i = 0; i < nvectors; ++i) vp_dev->msix_entries[i].entry = i; /* pci_enable_msix returns positive if we can't get this many. */ err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); if (err > 0) err = -ENOSPC; if (err) goto error; vp_dev->msix_vectors = nvectors; vp_dev->msix_enabled = 1; /* Set the vector used for configuration */ v = vp_dev->msix_used_vectors; snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, "%s-config", name); err = request_irq(vp_dev->msix_entries[v].vector, vp_config_changed, 0, vp_dev->msix_names[v], vp_dev); if (err) goto error; ++vp_dev->msix_used_vectors; iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); /* Verify we had enough resources to assign the vector */ v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); if (v == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto error; } if (!per_vq_vectors) { /* Shared vector for all VQs */ v = vp_dev->msix_used_vectors; snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, "%s-virtqueues", name); err = request_irq(vp_dev->msix_entries[v].vector, vp_vring_interrupt, 0, vp_dev->msix_names[v], vp_dev); if (err) goto error; ++vp_dev->msix_used_vectors; } return 0; error: vp_free_vectors(vdev); return err; } static int vp_request_intx(struct virtio_device *vdev) { int err; struct virtio_pci_device *vp_dev = to_vp_device(vdev); err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vp_dev); if (!err) vp_dev->intx_enabled = 1; return err; } static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, u16 msix_vec) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info; struct virtqueue *vq; unsigned long flags, size; u16 num; int err; /* Select the queue we're interested in */ iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); /* Check if queue is either not available or already active. */ num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) return ERR_PTR(-ENOENT); /* allocate and fill out our structure the represents an active * queue */ info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); info->num = num; info->msix_vector = msix_vec; size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); if (info->queue == NULL) { err = -ENOMEM; goto out_info; } /* activate the queue */ iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); /* create the vring */ vq = vring_new_virtqueue(index, info->num, VIRTIO_PCI_VRING_ALIGN, vdev, true, info->queue, vp_notify, callback, name); if (!vq) { err = -ENOMEM; goto out_activate_queue; } vq->priv = info; info->vq = vq; if (msix_vec != VIRTIO_MSI_NO_VECTOR) { iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); if (msix_vec == VIRTIO_MSI_NO_VECTOR) { err = -EBUSY; goto out_assign; } } if (callback) { spin_lock_irqsave(&vp_dev->lock, flags); list_add(&info->node, &vp_dev->virtqueues); spin_unlock_irqrestore(&vp_dev->lock, flags); } else { INIT_LIST_HEAD(&info->node); } return vq; out_assign: vring_del_virtqueue(vq); out_activate_queue: iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); free_pages_exact(info->queue, size); out_info: kfree(info); return ERR_PTR(err); } static void vp_del_vq(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_vq_info *info = vq->priv; unsigned long flags, size; spin_lock_irqsave(&vp_dev->lock, flags); list_del(&info->node); spin_unlock_irqrestore(&vp_dev->lock, flags); iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); /* Flush the write out to device */ ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); } vring_del_virtqueue(vq); /* Select and deactivate the queue */ iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); free_pages_exact(info->queue, size); kfree(info); } /* the config->del_vqs() implementation */ static void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq, *n; struct virtio_pci_vq_info *info; list_for_each_entry_safe(vq, n, &vdev->vqs, list) { info = vq->priv; if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR) free_irq(vp_dev->msix_entries[info->msix_vector].vector, vq); vp_del_vq(vq); } vp_dev->per_vq_vectors = false; vp_free_vectors(vdev); } static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[], bool use_msix, bool per_vq_vectors) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); u16 msix_vec; int i, err, nvectors, allocated_vectors; if (!use_msix) { /* Old style: one normal interrupt for change and all vqs. */ err = vp_request_intx(vdev); if (err) goto error_request; } else { if (per_vq_vectors) { /* Best option: one for change interrupt, one per vq. */ nvectors = 1; for (i = 0; i < nvqs; ++i) if (callbacks[i]) ++nvectors; } else { /* Second best: one for change, shared for all vqs. */ nvectors = 2; } err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); if (err) goto error_request; } vp_dev->per_vq_vectors = per_vq_vectors; allocated_vectors = vp_dev->msix_used_vectors; for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } else if (!callbacks[i] || !vp_dev->msix_enabled) msix_vec = VIRTIO_MSI_NO_VECTOR; else if (vp_dev->per_vq_vectors) msix_vec = allocated_vectors++; else msix_vec = VP_MSIX_VQ_VECTOR; vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); goto error_find; } if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) continue; /* allocate per-vq irq if available and necessary */ snprintf(vp_dev->msix_names[msix_vec], sizeof *vp_dev->msix_names, "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); err = request_irq(vp_dev->msix_entries[msix_vec].vector, vring_interrupt, 0, vp_dev->msix_names[msix_vec], vqs[i]); if (err) { vp_del_vq(vqs[i]); goto error_find; } } return 0; error_find: vp_del_vqs(vdev); error_request: return err; } /* the config->find_vqs() implementation */ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) { int err; /* Try MSI-X with one vector per queue. */ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); if (!err) return 0; /* Fallback: MSI-X with one vector for config, one shared for queues. */ err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, false); if (!err) return 0; /* Finally fall back to regular interrupts. */ return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, false, false); } static const char *vp_bus_name(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); return pci_name(vp_dev->pci_dev); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) /* Setup the affinity for a virtqueue: * - force the affinity for per vq vector * - OR over all affinities for shared MSI * - ignore the affinity request if we're using INTX */ static int vp_set_vq_affinity(struct virtqueue *vq, int cpu) { struct virtio_device *vdev = vq->vdev; struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info = vq->priv; struct cpumask *mask; unsigned int irq; if (!vq->callback) return -EINVAL; if (vp_dev->msix_enabled) { mask = vp_dev->msix_affinity_masks[info->msix_vector]; irq = vp_dev->msix_entries[info->msix_vector].vector; if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { cpumask_set_cpu(cpu, mask); irq_set_affinity_hint(irq, mask); } } return 0; } #endif static const struct virtio_config_ops virtio_pci_config_ops = { .get = vp_get, .set = vp_set, .get_status = vp_get_status, .set_status = vp_set_status, .reset = vp_reset, .find_vqs = vp_find_vqs, .del_vqs = vp_del_vqs, .get_features = vp_get_features, .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) .set_vq_affinity = vp_set_vq_affinity, #endif }; static void virtio_pci_release_dev(struct device *_d) { /* * No need for a release method as we allocate/free * all devices together with the pci devices. * Provide an empty one to avoid getting a warning from core. */ } #if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) /** * pci_msi_off - disables any msi or msix capabilities * @dev: the PCI device to operate on * * If you want to use msi see pci_enable_msi and friends. * This is a lower level primitive that allows us to disable * msi operation at the device level. */ void pci_msi_off(struct pci_dev *dev) { int pos; u16 control; pos = pci_find_capability(dev, PCI_CAP_ID_MSI); if (pos) { pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); control &= ~PCI_MSI_FLAGS_ENABLE; pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); } pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); if (pos) { pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); control &= ~PCI_MSIX_FLAGS_ENABLE; pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); } } #endif /* the PCI probing function */ static int virtio_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { struct virtio_pci_device *vp_dev; int err; /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) return -ENODEV; if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", VIRTIO_PCI_ABI_VERSION, pci_dev->revision); return -ENODEV; } /* allocate our structure and fill it out */ vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); if (vp_dev == NULL) return -ENOMEM; vp_dev->vdev.dev.parent = &pci_dev->dev; vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->vdev.config = &virtio_pci_config_ops; vp_dev->pci_dev = pci_dev; INIT_LIST_HEAD(&vp_dev->virtqueues); spin_lock_init(&vp_dev->lock); /* Disable MSI/MSIX to bring device to a known good state. */ pci_msi_off(pci_dev); /* enable the device */ err = pci_enable_device(pci_dev); if (err) goto out; err = pci_request_regions(pci_dev, "virtio-pci"); if (err) goto out_enable_device; vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); if (vp_dev->ioaddr == NULL) { err = -ENOMEM; goto out_req_regions; } pci_set_drvdata(pci_dev, vp_dev); pci_set_master(pci_dev); /* we use the subsystem vendor/device id as the virtio vendor/device * id. this allows us to use the same PCI vendor/device id for all * virtio devices and to identify the particular virtio driver by * the subsystem ids */ vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; vp_dev->vdev.id.device = pci_dev->subsystem_device; /* finally register the virtio device */ err = register_virtio_device(&vp_dev->vdev); if (err) goto out_set_drvdata; return 0; out_set_drvdata: pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); out_req_regions: pci_release_regions(pci_dev); out_enable_device: pci_disable_device(pci_dev); out: kfree(vp_dev); return err; } static void virtio_pci_remove(struct pci_dev *pci_dev) { struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); unregister_virtio_device(&vp_dev->vdev); vp_del_vqs(&vp_dev->vdev); pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); pci_release_regions(pci_dev); pci_disable_device(pci_dev); kfree(vp_dev); } #ifdef CONFIG_PM static int virtio_pci_freeze(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); struct virtio_driver *drv; int ret; drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); ret = 0; vp_dev->saved_status = vp_get_status(&vp_dev->vdev); if (drv && drv->freeze) ret = drv->freeze(&vp_dev->vdev); if (!ret) pci_disable_device(pci_dev); return ret; } static int virtio_pci_restore(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); struct virtio_driver *drv; int ret; drv = container_of(vp_dev->vdev.dev.driver, struct virtio_driver, driver); ret = pci_enable_device(pci_dev); if (ret) return ret; pci_set_master(pci_dev); vp_finalize_features(&vp_dev->vdev); if (drv && drv->restore) ret = drv->restore(&vp_dev->vdev); /* Finally, tell the device we're all set */ if (!ret) vp_set_status(&vp_dev->vdev, vp_dev->saved_status); return ret; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) static const struct dev_pm_ops virtio_pci_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore) }; #else static const struct dev_pm_ops virtio_pci_pm_ops = { .suspend = virtio_pci_freeze, .resume = virtio_pci_restore, .freeze = virtio_pci_freeze, .thaw = virtio_pci_restore, .restore = virtio_pci_restore, .poweroff = virtio_pci_freeze, }; #endif #endif static struct pci_driver virtio_pci_driver = { .name = "virtio-pci", .id_table = virtio_pci_id_table, .probe = virtio_pci_probe, .remove = virtio_pci_remove, #ifdef CONFIG_PM .driver.pm = &virtio_pci_pm_ops, #endif }; static int __init virtio_pci_init(void) { return pci_register_driver(&virtio_pci_driver); } module_init(virtio_pci_init); static void __exit virtio_pci_exit(void) { pci_unregister_driver(&virtio_pci_driver); } module_exit(virtio_pci_exit); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/vni_front/virtio_ring.c000066400000000000000000000570751314037446600302610ustar00rootroot00000000000000/* Virtio ring implementation. * * Copyright 2007 Rusty Russell IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&(_vq)->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ BUG(); \ } while (0) /* Caller is supposed to guarantee no reentry. */ #define START_USE(_vq) \ do { \ if ((_vq)->in_use) \ panic("%s:in_use = %i\n", \ (_vq)->vq.name, (_vq)->in_use); \ (_vq)->in_use = __LINE__; \ } while (0) #define END_USE(_vq) \ do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) #else #define BAD_RING(_vq, fmt, args...) \ do { \ dev_err(&_vq->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \ (_vq)->broken = true; \ } while (0) #define START_USE(vq) #define END_USE(vq) #endif struct vring_virtqueue { struct virtqueue vq; /* Actual memory layout for this queue */ struct vring vring; /* Can we use weak barriers? */ bool weak_barriers; /* Other side has made a mess, don't try any more. */ bool broken; /* Host supports indirect buffers */ bool indirect; /* Host publishes avail event idx */ bool event; /* Head of free buffer list. */ unsigned int free_head; /* Number we've added since last sync. */ unsigned int num_added; /* Last used index we've seen. */ u16 last_used_idx; /* How to notify other side. FIXME: commonalize hcalls! */ void (*notify)(struct virtqueue *vq); #ifdef DEBUG /* They're supposed to lock for us. */ unsigned int in_use; /* Figure out if their kicks are too delayed. */ bool last_add_time_valid; ktime_t last_add_time; #endif /* Tokens for callbacks. */ void *data[]; }; #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) static inline struct scatterlist *sg_next_chained(struct scatterlist *sg, unsigned int *count) { return sg_next(sg); } static inline struct scatterlist *sg_next_arr(struct scatterlist *sg, unsigned int *count) { if (--(*count) == 0) return NULL; return sg + 1; } /* Set up an indirect table of descriptors and add it to the queue. */ static inline int vring_add_indirect(struct vring_virtqueue *vq, struct scatterlist *sgs[], struct scatterlist *(*next) (struct scatterlist *, unsigned int *), unsigned int total_sg, unsigned int total_out, unsigned int total_in, unsigned int out_sgs, unsigned int in_sgs, gfp_t gfp) { struct vring_desc *desc; unsigned head; struct scatterlist *sg; int i, n; /* * We require lowmem mappings for the descriptors because * otherwise virt_to_phys will give us bogus addresses in the * virtqueue. */ gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); if (!desc) return -ENOMEM; /* Transfer entries from the sg lists into the indirect page */ i = 0; for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { desc[i].flags = VRING_DESC_F_NEXT; desc[i].addr = sg_phys(sg); desc[i].len = sg->length; desc[i].next = i+1; i++; } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; desc[i].addr = sg_phys(sg); desc[i].len = sg->length; desc[i].next = i+1; i++; } } BUG_ON(i != total_sg); /* Last one doesn't continue. */ desc[i-1].flags &= ~VRING_DESC_F_NEXT; desc[i-1].next = 0; /* We're about to use a buffer */ vq->vq.num_free--; /* Use a single buffer which doesn't continue */ head = vq->free_head; vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; vq->vring.desc[head].addr = virt_to_phys(desc); vq->vring.desc[head].len = i * sizeof(struct vring_desc); /* Update free pointer */ vq->free_head = vq->vring.desc[head].next; return head; } static inline int virtqueue_add(struct virtqueue *_vq, struct scatterlist *sgs[], struct scatterlist *(*next) (struct scatterlist *, unsigned int *), unsigned int total_out, unsigned int total_in, unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); struct scatterlist *sg; unsigned int i, n, avail, uninitialized_var(prev), total_sg; int head; START_USE(vq); BUG_ON(data == NULL); #ifdef DEBUG { ktime_t now = ktime_get(); /* No kick or get, with .1 second between? Warn. */ if (vq->last_add_time_valid) WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) > 100); vq->last_add_time = now; vq->last_add_time_valid = true; } #endif total_sg = total_in + total_out; /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ if (vq->indirect && total_sg > 2 && vq->vq.num_free) { head = vring_add_indirect(vq, sgs, next, total_sg, total_out, total_in, out_sgs, in_sgs, gfp); if (likely(head >= 0)) goto add_head; } BUG_ON(total_sg > vq->vring.num); BUG_ON(total_sg == 0); if (vq->vq.num_free < total_sg) { pr_debug("Can't add buf len %i - avail = %i\n", total_sg, vq->vq.num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ if (out_sgs) vq->notify(&vq->vq); END_USE(vq); return -ENOSPC; } /* We're about to use some buffers from the free list. */ vq->vq.num_free -= total_sg; head = i = vq->free_head; for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; i = vq->vring.desc[i].next; } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; vq->vring.desc[i].addr = sg_phys(sg); vq->vring.desc[i].len = sg->length; prev = i; i = vq->vring.desc[i].next; } } /* Last one doesn't continue. */ vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; /* Update free pointer */ vq->free_head = i; add_head: /* Set token. */ vq->data[head] = data; /* Put entry in available array (but don't update avail->idx until they * do sync). */ avail = (vq->vring.avail->idx & (vq->vring.num-1)); vq->vring.avail->ring[avail] = head; /* Descriptors and available array need to be set before we expose the * new available array entries. */ virtio_wmb(vq->weak_barriers); vq->vring.avail->idx++; vq->num_added++; /* This is very unlikely, but theoretically possible. Kick * just in case. */ if (unlikely(vq->num_added == (1 << 16) - 1)) virtqueue_kick(_vq); pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); return 0; } /** * virtqueue_add_buf - expose buffer to other end * @vq: the struct virtqueue we're talking about. * @sg: the description of the buffer(s). * @out_num: the number of sg readable by other side * @in_num: the number of sg which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_buf(struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, unsigned int in, void *data, gfp_t gfp) { struct scatterlist *sgs[2]; sgs[0] = sg; sgs[1] = sg + out; return virtqueue_add(_vq, sgs, sg_next_arr, out, in, out ? 1 : 0, in ? 1 : 0, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_buf); /** * virtqueue_add_sgs - expose buffers to other end * @vq: the struct virtqueue we're talking about. * @sgs: array of terminated scatterlists. * @out_num: the number of scatterlists readable by other side * @in_num: the number of scatterlists which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_sgs(struct virtqueue *_vq, struct scatterlist *sgs[], unsigned int out_sgs, unsigned int in_sgs, void *data, gfp_t gfp) { unsigned int i, total_out, total_in; /* Count them first. */ for (i = total_out = total_in = 0; i < out_sgs; i++) { struct scatterlist *sg; for (sg = sgs[i]; sg; sg = sg_next(sg)) total_out++; } for (; i < out_sgs + in_sgs; i++) { struct scatterlist *sg; for (sg = sgs[i]; sg; sg = sg_next(sg)) total_in++; } return virtqueue_add(_vq, sgs, sg_next_chained, total_out, total_in, out_sgs, in_sgs, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_sgs); /** * virtqueue_add_outbuf - expose output buffers to other end * @vq: the struct virtqueue we're talking about. * @sgs: array of scatterlists (need not be terminated!) * @num: the number of scatterlists readable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); /** * virtqueue_add_inbuf - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sgs: array of scatterlists (need not be terminated!) * @num: the number of scatterlists writable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM). */ int virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist sg[], unsigned int num, void *data, gfp_t gfp) { return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); /** * virtqueue_kick_prepare - first half of split virtqueue_kick call. * @vq: the struct virtqueue * * Instead of virtqueue_kick(), you can do: * if (virtqueue_kick_prepare(vq)) * virtqueue_notify(vq); * * This is sometimes useful because the virtqueue_kick_prepare() needs * to be serialized, but the actual virtqueue_notify() call does not. */ bool virtqueue_kick_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 new, old; bool needs_kick; START_USE(vq); /* We need to expose available array entries before checking avail * event. */ virtio_mb(vq->weak_barriers); old = vq->vring.avail->idx - vq->num_added; new = vq->vring.avail->idx; vq->num_added = 0; #ifdef DEBUG if (vq->last_add_time_valid) { WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), vq->last_add_time)) > 100); } vq->last_add_time_valid = false; #endif if (vq->event) { needs_kick = vring_need_event(vring_avail_event(&vq->vring), new, old); } else { needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); } END_USE(vq); return needs_kick; } EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); /** * virtqueue_notify - second half of split virtqueue_kick call. * @vq: the struct virtqueue * * This does not need to be serialized. */ void virtqueue_notify(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); /* Prod other side to tell it about changes. */ vq->notify(_vq); } EXPORT_SYMBOL_GPL(virtqueue_notify); /** * virtqueue_kick - update after add_buf * @vq: the struct virtqueue * * After one or more virtqueue_add_buf calls, invoke this to kick * the other side. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ void virtqueue_kick(struct virtqueue *vq) { if (virtqueue_kick_prepare(vq)) virtqueue_notify(vq); } EXPORT_SYMBOL_GPL(virtqueue_kick); static void detach_buf(struct vring_virtqueue *vq, unsigned int head) { unsigned int i; /* Clear data ptr. */ vq->data[head] = NULL; /* Put back on free list: find end */ i = head; /* Free the indirect table */ if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) kfree(phys_to_virt(vq->vring.desc[i].addr)); while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { i = vq->vring.desc[i].next; vq->vq.num_free++; } vq->vring.desc[i].next = vq->free_head; vq->free_head = head; /* Plus final descriptor */ vq->vq.num_free++; } static inline bool more_used(const struct vring_virtqueue *vq) { return vq->last_used_idx != vq->vring.used->idx; } /** * virtqueue_get_buf - get the next used buffer * @vq: the struct virtqueue we're talking about. * @len: the length written into the buffer * * If the driver wrote data into the buffer, @len will be set to the * amount written. This means you don't need to clear the buffer * beforehand to ensure there's no data leakage in the case of short * writes. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). * * Returns NULL if there are no used buffers, or the "data" token * handed to virtqueue_add_buf(). */ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) { struct vring_virtqueue *vq = to_vvq(_vq); void *ret; unsigned int i; u16 last_used; START_USE(vq); if (unlikely(vq->broken)) { END_USE(vq); return NULL; } if (!more_used(vq)) { pr_debug("No more buffers in queue\n"); END_USE(vq); return NULL; } /* Only get used array entries after they have been exposed by host. */ virtio_rmb(vq->weak_barriers); last_used = (vq->last_used_idx & (vq->vring.num - 1)); i = vq->vring.used->ring[last_used].id; *len = vq->vring.used->ring[last_used].len; if (unlikely(i >= vq->vring.num)) { BAD_RING(vq, "id %u out of range\n", i); return NULL; } if (unlikely(!vq->data[i])) { BAD_RING(vq, "id %u is not a head!\n", i); return NULL; } /* detach_buf clears data, so grab it now. */ ret = vq->data[i]; detach_buf(vq, i); vq->last_used_idx++; /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call. */ if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { vring_used_event(&vq->vring) = vq->last_used_idx; virtio_mb(vq->weak_barriers); } #ifdef DEBUG vq->last_add_time_valid = false; #endif END_USE(vq); return ret; } EXPORT_SYMBOL_GPL(virtqueue_get_buf); /** * virtqueue_disable_cb - disable callbacks * @vq: the struct virtqueue we're talking about. * * Note that this is not necessarily synchronous, hence unreliable and only * useful as an optimization. * * Unlike other operations, this need not be serialized. */ void virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; } EXPORT_SYMBOL_GPL(virtqueue_disable_cb); /** * virtqueue_enable_cb_prepare - restart callbacks after disable_cb * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns current queue state * in an opaque unsigned value. This value should be later tested by * virtqueue_poll, to detect a possible race between the driver checking for * more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 last_used_idx; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; END_USE(vq); return last_used_idx; } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); /** * virtqueue_poll - query pending used buffers * @vq: the struct virtqueue we're talking about. * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). * * Returns "true" if there are pending used buffers in the queue. * * This does not need to be serialized. */ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); virtio_mb(vq->weak_barriers); return (u16)last_used_idx != vq->vring.used->idx; } EXPORT_SYMBOL_GPL(virtqueue_poll); /** * virtqueue_enable_cb - restart callbacks after disable_cb. * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks; it returns "false" if there are pending * buffers in the queue, to detect a possible race between the driver * checking for more work, and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb(struct virtqueue *_vq) { unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); return !virtqueue_poll(_vq, last_used_idx); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb); /** * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. * @vq: the struct virtqueue we're talking about. * * This re-enables callbacks but hints to the other side to delay * interrupts until most of the available buffers have been processed; * it returns "false" if there are many pending buffers in the queue, * to detect a possible race between the driver checking for more work, * and enabling callbacks. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 bufs; START_USE(vq); /* We optimistically turn back on interrupts, then check if there was * more to do. */ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; /* TODO: tune this threshold */ bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; vring_used_event(&vq->vring) = vq->last_used_idx + bufs; virtio_mb(vq->weak_barriers); if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { END_USE(vq); return false; } END_USE(vq); return true; } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); /** * virtqueue_detach_unused_buf - detach first unused buffer * @vq: the struct virtqueue we're talking about. * * Returns NULL or the "data" token handed to virtqueue_add_buf(). * This is not valid on an active queue; it is useful only for device * shutdown. */ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i; void *buf; START_USE(vq); for (i = 0; i < vq->vring.num; i++) { if (!vq->data[i]) continue; /* detach_buf clears data, so grab it now. */ buf = vq->data[i]; detach_buf(vq, i); vq->vring.avail->idx--; END_USE(vq); return buf; } /* That should have freed everything. */ BUG_ON(vq->vq.num_free != vq->vring.num); END_USE(vq); return NULL; } EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); irqreturn_t vring_interrupt(int irq, void *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); if (!more_used(vq)) { pr_debug("virtqueue interrupt with no work for %p\n", vq); return IRQ_NONE; } if (unlikely(vq->broken)) return IRQ_HANDLED; pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); if (vq->vq.callback) vq->vq.callback(&vq->vq); return IRQ_HANDLED; } EXPORT_SYMBOL_GPL(vring_interrupt); struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, void *pages, void (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), const char *name) { struct vring_virtqueue *vq; unsigned int i; /* We assume num is a power of 2. */ if (num & (num - 1)) { dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); return NULL; } vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); if (!vq) return NULL; vring_init(&vq->vring, num, pages, vring_align); vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->vq.num_free = num; vq->vq.index = index; vq->notify = notify; vq->weak_barriers = weak_barriers; vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; vq->last_add_time_valid = false; #endif vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); /* No callback? Tell other side not to bother us. */ if (!callback) vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; /* Put everything in free lists. */ vq->free_head = 0; for (i = 0; i < num-1; i++) { vq->vring.desc[i].next = i+1; vq->data[i] = NULL; } vq->data[i] = NULL; return &vq->vq; } EXPORT_SYMBOL_GPL(vring_new_virtqueue); void vring_del_virtqueue(struct virtqueue *vq) { list_del(&vq->list); kfree(to_vvq(vq)); } EXPORT_SYMBOL_GPL(vring_del_virtqueue); /* Manipulates transport-specific feature bits. */ void vring_transport_features(struct virtio_device *vdev) { unsigned int i; for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { switch (i) { case VIRTIO_RING_F_INDIRECT_DESC: break; case VIRTIO_RING_F_EVENT_IDX: break; default: /* We don't understand this bit. */ clear_bit(i, vdev->features); } } } EXPORT_SYMBOL_GPL(vring_transport_features); /** * virtqueue_get_vring_size - return the size of the virtqueue's vring * @vq: the struct virtqueue containing the vring of interest. * * Returns the size of the vring. This is mainly used for boasting to * userspace. Unlike other operations, this need not be serialized. */ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->vring.num; } EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); bool virtqueue_is_broken(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->broken; } EXPORT_SYMBOL_GPL(virtqueue_is_broken); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-balloon/000077500000000000000000000000001314037446600257565ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-balloon/Makefile000066400000000000000000000000561314037446600274170ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-balloon.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-balloon/xen-balloon.c000066400000000000000000000427321314037446600303500ustar00rootroot00000000000000/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) #define GB2PAGE 262144 #define BALLOON_CLASS_NAME "xen_memory" unsigned long old_totalram_pages; unsigned long totalram_bias; struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; static DEFINE_MUTEX(balloon_mutex); static struct sys_device balloon_sysdev; static int register_balloon(struct sys_device *sysdev); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ static DEFINE_SPINLOCK(balloon_lock); static struct balloon_stats balloon_stats; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; #ifdef CONFIG_HIGHMEM #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else #define inc_totalhigh_pages() do {} while(0) #define dec_totalhigh_pages() do {} while(0) #endif #define set_phys_to_machine(pfn, mfn) ((void)0) /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ static void balloon_process(struct work_struct *work); static DECLARE_WORK(balloon_worker, balloon_process); static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ /* #define GFP_BALLOON \ (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) */ #define GFP_BALLOON \ (__GFP_HIGHMEM|__GFP_NOWARN|__GFP_NORETRY|GFP_ATOMIC) static void scrub_page(struct page *page) { #ifdef CONFIG_XEN_SCRUB_PAGES clear_highpage(page); #endif } static inline unsigned long get_num_physpages(void) { int nid; unsigned long phys_pages = 0; for_each_online_node(nid) phys_pages += node_present_pages(nid); return phys_pages; } /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page) { /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(&page->lru, &ballooned_pages); balloon_stats.balloon_high++; dec_totalhigh_pages(); } else { list_add(&page->lru, &ballooned_pages); balloon_stats.balloon_low++; } totalram_pages--; } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(void) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; page = list_entry(ballooned_pages.next, struct page, lru); list_del(&page->lru); if (PageHighMem(page)) { balloon_stats.balloon_high--; inc_totalhigh_pages(); } else balloon_stats.balloon_low--; totalram_pages++; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return list_entry(ballooned_pages.next, struct page, lru); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = page->lru.next; if (next == &ballooned_pages) return NULL; return list_entry(next, struct page, lru); } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = balloon_stats.target_pages; target = min(target, balloon_stats.current_pages + balloon_stats.balloon_low + balloon_stats.balloon_high); return target; } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); spin_lock_irqsave(&balloon_lock, flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page); page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); /* Link back into the page tables if not highmem. */ #ifdef CONFIG_PVM if (!xen_hvm_domain() && pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), mfn_pte(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); init_page_count(page); __free_page(page); } balloon_stats.current_pages += rc; if (old_totalram_pages + rc < totalram_pages) { printk(KERN_INFO "old_totalram=%luKB, totalram_pages=%luKB\n", old_totalram_pages*4, totalram_pages*4); balloon_stats.current_pages = totalram_pages + totalram_bias; printk(KERN_INFO "when ballooning, the mem online! totalram=%luKB, current=%luKB\n", totalram_pages*4, balloon_stats.current_pages*4); } old_totalram_pages = totalram_pages; out: spin_unlock_irqrestore(&balloon_lock, flags); return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); scrub_page(page); if (!xen_hvm_domain() && !PageHighMem(page)) { ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), __pte_ma(0), 0); BUG_ON(ret); } } /* Ensure that ballooned highmem pages don't have kmaps. */ #ifdef CONFIG_PVM kmap_flush_unused(); flush_tlb_all(); #endif spin_lock_irqsave(&balloon_lock, flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(pfn_to_page(pfn)); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); balloon_stats.current_pages -= nr_pages; if(old_totalram_pages < totalram_pages + nr_pages) { printk(KERN_INFO "old_totalram=%luKB, totalram_pages=%luKB\n", old_totalram_pages*4, totalram_pages*4); balloon_stats.current_pages = totalram_pages + totalram_bias; printk(KERN_INFO "when ballooning, the mem online! totalram=%luKB, current=%luKB\n", totalram_pages*4, balloon_stats.current_pages*4); } old_totalram_pages = totalram_pages; spin_unlock_irqrestore(&balloon_lock, flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ static void balloon_process(struct work_struct *work) { int need_sleep = 0; long credit; long total_increase = 0; char buffer[16]; mutex_lock(&balloon_mutex); printk(KERN_INFO "totalram_pages=%luKB, current_pages=%luKB,totalram_bias=%luKB\n", totalram_pages*4, balloon_stats.current_pages*4, totalram_bias*4); if (totalram_pages > old_totalram_pages) { //TODO:Just know that totalram_pages will increase. total_increase = (totalram_pages - old_totalram_pages) % GB2PAGE; if (totalram_bias > total_increase ) { totalram_bias = totalram_bias - total_increase; } balloon_stats.current_pages = totalram_pages + totalram_bias; old_totalram_pages = totalram_pages; } printk(KERN_INFO "totalram_pages=%luKB, current_pages=%luKB, totalram_bias=%luKB,total_increase=%ld\n", totalram_pages*4, balloon_stats.current_pages*4, totalram_bias*4, total_increase*4); xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "1"); do { credit = current_target() - balloon_stats.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); /* Schedule more work if there is some still to be done. */ if (current_target() != balloon_stats.current_pages) { mod_timer(&balloon_timer, jiffies + HZ); sprintf(buffer,"%lu",balloon_stats.current_pages<<(PAGE_SHIFT-10)); xenbus_write(XBT_NIL, "memory", "target", buffer); } xenbus_write(XBT_NIL, "control/uvp", "Balloon_flag", "0"); mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ static void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ balloon_stats.target_pages = target; schedule_work(&balloon_worker); } static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { unsigned long pfn,num_physpages,max_pfn; struct page *page; if (!xen_domain()) return -ENODEV; pr_info("xen_balloon: Initialising balloon driver.\n"); num_physpages = get_num_physpages(); if (xen_pv_domain()) max_pfn = xen_start_info->nr_pages; else max_pfn = num_physpages; balloon_stats.current_pages = min(num_physpages,max_pfn); totalram_bias = balloon_stats.current_pages - totalram_pages; old_totalram_pages = totalram_pages; balloon_stats.target_pages = balloon_stats.current_pages; balloon_stats.balloon_low = 0; balloon_stats.balloon_high = 0; balloon_stats.driver_pages = 0UL; pr_info("current_pages=%luKB, totalram_pages=%luKB, totalram_bias=%luKB\n",balloon_stats.current_pages*4, totalram_pages*4, totalram_bias*4); init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; register_balloon(&balloon_sysdev); /* Initialise the balloon with excess memory space. */ #ifdef CONFIG_PVM for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) balloon_append(page); } #endif target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void balloon_exit(void) { /* XXX - release balloon here */ return; } module_exit(balloon_exit); #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ struct sysdev_attribute *attr, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high)); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; target_bytes = simple_strtoull(buf, &endchar, 0) * 1024; balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)balloon_stats.target_pages << PAGE_SHIFT); } static ssize_t store_target(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; target_bytes = memparse(buf, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR, show_target, store_target); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, &attr_target, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_driver_kb.attr, NULL }; static struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { .name = BALLOON_CLASS_NAME, }; static int register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-procfs/000077500000000000000000000000001314037446600256245ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-procfs/Makefile000066400000000000000000000000551314037446600272640ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-procfs.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-procfs/xen-procfs.c000066400000000000000000000232571314037446600300650ustar00rootroot00000000000000/* * create_xenbus_proc.c * * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * Copyright (c) 2012, Huawei Technologies Co., Ltd */ #include #include #include #include #include #include #include #include #include #include #include #include #ifdef UBUNTU_KERNEL_3110 #include "internal.h" #endif MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xenbus Process filesystem"); struct xenbus_dev_transaction { struct list_head list; struct xenbus_transaction handle; }; struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[]; }; struct xenbus_dev_data { /* In-progress transaction. */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[PAGE_SIZE]; } u; /* Response queue. */ struct list_head read_buffers; wait_queue_head_t read_waitq; struct mutex reply_mutex; }; static struct proc_dir_entry *proc_entry; static ssize_t xenbus_dev_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct read_buffer *rb; int i, ret; mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); for (i = 0; i < len;) { put_user(rb->msg[rb->cons], ubuf + i); i++; rb->cons++; if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } mutex_unlock(&u->reply_mutex); return i; } static void queue_reply(struct xenbus_dev_data *u, char *data, unsigned int len) { struct read_buffer *rb =NULL; if (len == 0) return; rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); if(NULL == rb) BUG_ON(rb == NULL); rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, &u->read_buffers); wake_up(&u->read_waitq); } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_dev_data *dev_data; char *token; }; static void free_watch_adapter (struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static void watch_fired(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct watch_adapter *adap = container_of(watch, struct watch_adapter, watch); struct xsd_sockmsg hdr; const char *path, *token; int path_len, tok_len, body_len, data_len = 0; path = vec[XS_WATCH_PATH]; token = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token) + 1; if (len > 2) data_len = vec[len] - vec[2] + 1; body_len = path_len + tok_len + data_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr)); queue_reply(adap->dev_data, (char *)path, path_len); queue_reply(adap->dev_data, (char *)token, tok_len); if (len > 2) queue_reply(adap->dev_data, (char *)vec[2], data_len); mutex_unlock(&adap->dev_data->reply_mutex); } static ssize_t xenbus_dev_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans = NULL; uint32_t msg_type; void *reply; char *path, *token; struct watch_adapter *watch, *tmp_watch; int err, rc = len; if ((len + u->len) > sizeof(u->u.buffer)) { rc = -EINVAL; goto out; } if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) { rc = -EFAULT; goto out; } u->len += len; if ((u->len < sizeof(u->u.msg)) || (u->len < (sizeof(u->u.msg) + u->u.msg.len))) return rc; msg_type = u->u.msg.type; switch (msg_type) { case XS_TRANSACTION_START: case XS_TRANSACTION_END: case XS_DIRECTORY: case XS_READ: case XS_GET_PERMS: case XS_RELEASE: case XS_GET_DOMAIN_PATH: case XS_WRITE: case XS_LINUX_WEAK_WRITE: case XS_MKDIR: case XS_RM: case XS_SET_PERMS: if (msg_type == XS_TRANSACTION_START) { trans = kmalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } } reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { kfree(trans); rc = PTR_ERR(reply); goto out; } if (msg_type == XS_TRANSACTION_START) { trans->handle.id = simple_strtoul(reply, NULL, 0); list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == u->u.msg.tx_id) break; BUG_ON(&trans->list == &u->transactions); list_del(&trans->list); kfree(trans); } mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg)); queue_reply(u, (char *)reply, u->u.msg.len); mutex_unlock(&u->reply_mutex); kfree(reply); break; case XS_WATCH: case XS_UNWATCH: { static const char *XS_RESP = "OK"; struct xsd_sockmsg hdr; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = -EILSEQ; goto out; } token++; if (msg_type == XS_WATCH) { watch = kzalloc(sizeof(*watch), GFP_KERNEL); BUG_ON(watch == NULL); watch->watch.node = kmalloc(strlen(path)+1, GFP_KERNEL); BUG_ON(watch->watch.node == NULL); strcpy((char *)watch->watch.node, path); watch->watch.callback = watch_fired; watch->token = kmalloc(strlen(token)+1, GFP_KERNEL); BUG_ON(watch->token == NULL); strcpy(watch->token, token); watch->dev_data = u; err = register_xenbus_watch(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } hdr.type = msg_type; hdr.len = strlen(XS_RESP) + 1; mutex_lock(&u->reply_mutex); queue_reply(u, (char *)&hdr, sizeof(hdr)); queue_reply(u, (char *)XS_RESP, hdr.len); mutex_unlock(&u->reply_mutex); break; } default: rc = -EINVAL; break; } out: u->len = 0; return rc; } extern int xen_store_evtchn; static int xenbus_dev_open(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u; if (xen_store_evtchn == 0) return -ENOENT; nonseekable_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); mutex_init(&u->reply_mutex); filp->private_data = u; return 0; } static int xenbus_dev_release(struct inode *inode, struct file *filp) { struct xenbus_dev_data *u = filp->private_data; struct xenbus_dev_transaction *trans, *tmp; struct watch_adapter *watch, *tmp_watch; list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } kfree(u); return 0; } static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait) { struct xenbus_dev_data *u = file->private_data; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return POLLIN | POLLRDNORM; return 0; } static const struct file_operations xenbus_dev_file_ops = { .read = xenbus_dev_read, .write = xenbus_dev_write, .open = xenbus_dev_open, .release = xenbus_dev_release, .poll = xenbus_dev_poll, }; static int xenbus_procfs_init(void) { int ret = 0; #if defined UBUNTU_KERNEL_3110 proc_entry = proc_create( "xen/xenbus", 0644, NULL,&xenbus_dev_file_ops); #else proc_entry = create_proc_entry( "xen/xenbus", 0644, NULL ); #endif if ( proc_entry == NULL ) { ret = -ENOMEM; printk( KERN_INFO "Procfs: Couldn't create xenbus user-space entry!\n"); } else { proc_entry->proc_fops = &xenbus_dev_file_ops; printk( KERN_INFO "Procfs: xenbus user-space has been created.\n" ); } #if defined UBUNTU_KERNEL_3110 proc_entry = proc_create( "xen/version", 0644, NULL,&xenbus_dev_file_ops); #else #if defined GENTOO_KERNEL_390 proc_entry = create_proc_entry( "xen_version", 0644, NULL ); #else proc_entry = create_proc_entry( "xen/version", 0644, NULL ); #endif #endif if ( proc_entry == NULL ) { ret = -ENOMEM; printk( KERN_INFO "Procfs: Couldn't create xenbus user-space entry!\n"); } else { //proc_entry->proc_fops = &xenbus_version_file_ops; printk( KERN_INFO "Procfs: xenbus version has been created.\n" ); } return ret; } module_init(xenbus_procfs_init); static void xenbus_procfs_exit(void) { remove_proc_entry( "xen/xenbus", NULL ); remove_proc_entry( "xen/version", NULL ); printk( KERN_INFO "Procfs: xenbus user-space has been deleted.\n" ); } module_exit(xenbus_procfs_exit); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-scsi/000077500000000000000000000000001314037446600252715ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-scsi/Makefile000066400000000000000000000001351314037446600267300ustar00rootroot00000000000000include $(M)/config.mk obj-m := xen-scsifront.o xen-scsifront-objs := scsifront.o xenbus.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-scsi/common.h000066400000000000000000000112231314037446600267310ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #ifndef __XEN_DRIVERS_SCSIFRONT_H__ #define __XEN_DRIVERS_SCSIFRONT_H__ #include #include #include #include #include #include #include #include #include #include #include /* ssleep prototype */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /*#include */ #ifdef HAVE_XEN_PLATFORM_COMPAT_H #include #endif #define GRANT_INVALID_REF 0 #define VSCSI_IN_ABORT 1 #define VSCSI_IN_RESET 2 /* tuning point*/ #define VSCSIIF_DEFAULT_CMD_PER_LUN 64 #define VSCSIIF_MAX_TARGET 64 #define VSCSIIF_MAX_LUN 255 #define RING_REF_PAGE_COUNT 8 /* IOҳ */ #define VSCSIIF_RING_PAGE_SIZE (RING_REF_PAGE_COUNT * PAGE_SIZE) /* IOռڴС */ #define VSCSIIF_RING_SIZE __CONST_RING_SIZE(vscsiif, VSCSIIF_RING_PAGE_SIZE) #define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE struct vscsifrnt_shadow { uint16_t next_free; /* command between backend and frontend * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */ unsigned char act; /* do reset function */ wait_queue_head_t wq_reset; /* reset work queue */ int wait_reset; /* reset work queue condition */ int32_t rslt_reset; /* reset response status */ /* (SUCESS or FAILED) */ /* for DMA_TO_DEVICE(1), DMA_FROM_DEVICE(2), DMA_NONE(3) requests */ unsigned int sc_data_direction; /* Number of pieces of scatter-gather */ unsigned int nr_segments; /* requested struct scsi_cmnd is stored from kernel */ unsigned long req_scsi_cmnd; int gref[VSCSIIF_SG_TABLESIZE]; }; struct vscsifrnt_info { struct xenbus_device *dev; struct Scsi_Host *host; spinlock_t io_lock; spinlock_t shadow_lock; unsigned int evtchn; unsigned int irq; grant_ref_t ring_ref[RING_REF_PAGE_COUNT]; struct vscsiif_front_ring ring; struct vscsiif_response ring_res; struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS]; uint32_t shadow_free; struct task_struct *kthread; wait_queue_head_t wq; unsigned int waiting_resp; int connected; }; #define DPRINTK(_f, _a...) \ pr_debug("(file=%s, line=%d) " _f, \ __FILE__ , __LINE__ , ## _a ) int scsifront_xenbus_init(void); void scsifront_xenbus_unregister(void); int scsifront_schedule(void *data); irqreturn_t scsifront_intr(int irq, void *dev_id); int scsifront_cmd_done(struct vscsifrnt_info *info); extern int scsi_internal_device_block(struct scsi_device *sdev); extern int scsi_internal_device_unblock(struct scsi_device *sdev); void scsifront_resume_io(struct vscsifrnt_info *info); void scsifront_resume_free(struct vscsifrnt_info *info); void scsifront_recover(struct vscsifrnt_info *info); void scsifront_xenfree(struct vscsifrnt_info *info); #endif /* __XEN_DRIVERS_SCSIFRONT_H__ */ UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-scsi/scsifront.c000066400000000000000000000421661314037446600274600ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "common.h" #define VSCSIIF_STATE_CONNECTED 1 /* hostѳʼresumeĹqueuecommandresetԹ */ #define VSCSIIF_STATE_SUSPENDED 2 /* ڶsuspendʱIOresumequeuecommandresetӦ */ /* next_freeΪ0x0fffһʹõģһshadow_idʼΪ0x0fffǿܲûʹ */ #define is_shadow_item_in_use(shadow_item) \ ((shadow_item.next_free == 0x0fff) && (shadow_item.req_scsi_cmnd != 0)) #define is_shadow_item_cmnd_cdb(shadow_item) (shadow_item.act == VSCSIIF_ACT_SCSI_CDB) #define is_reset_cmnd_and_completing(shadow_item) \ ((shadow_item.act == VSCSIIF_ACT_SCSI_RESET) && (shadow_item.wait_reset == 1)) static int get_id_from_freelist(struct vscsifrnt_info *info) { unsigned long flags; uint32_t free; spin_lock_irqsave(&info->shadow_lock, flags); free = info->shadow_free; BUG_ON(free > VSCSIIF_MAX_REQS); info->shadow_free = info->shadow[free].next_free; info->shadow[free].next_free = 0x0fff; info->shadow[free].wait_reset = 0; spin_unlock_irqrestore(&info->shadow_lock, flags); return free; } static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id) { unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); info->shadow[id].next_free = info->shadow_free; info->shadow[id].req_scsi_cmnd = 0; info->shadow_free = id; spin_unlock_irqrestore(&info->shadow_lock, flags); } static struct vscsiif_request* io_ring_get_request(struct vscsiif_front_ring *ring) { vscsiif_request_t *ring_req; ring_req = RING_GET_REQUEST(ring, ring->req_prod_pvt); ring->req_prod_pvt++; return ring_req; } static struct vscsiif_request* scsifront_pre_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); vscsiif_request_t *ring_req; uint32_t id; ring_req = io_ring_get_request(ring); id = get_id_from_freelist(info); /* use id by response */ ring_req->rqid = (uint16_t)id; return ring_req; } static void scsifront_notify_work(struct vscsifrnt_info *info) { info->waiting_resp = 1; wake_up(&info->wq); } static void scsifront_do_request(struct vscsifrnt_info *info) { struct vscsiif_front_ring *ring = &(info->ring); unsigned int irq = info->irq; int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); if (notify) notify_remote_via_irq(irq); } irqreturn_t scsifront_intr(int irq, void *dev_id) { scsifront_notify_work((struct vscsifrnt_info *)dev_id); return IRQ_HANDLED; } static void scsifront_gnttab_done(struct vscsifrnt_shadow *s, uint32_t id) { int i; if (s->sc_data_direction == DMA_NONE) return; if (s->nr_segments) { for (i = 0; i < s->nr_segments; i++) { if (unlikely(gnttab_query_foreign_access( s->gref[i]) != 0)) { printk(KERN_ALERT "scsifront: " "grant still in use by backend.\n"); BUG(); } gnttab_end_foreign_access(s->gref[i], 0, 0UL); } } return; } static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { struct scsi_cmnd *sc; uint32_t id; uint8_t sense_len; id = ring_res->rqid; sc = (struct scsi_cmnd *)info->shadow[id].req_scsi_cmnd; if (sc == NULL) BUG(); scsifront_gnttab_done(&info->shadow[id], id); add_id_to_freelist(info, id); sc->result = ring_res->rslt; scsi_set_resid(sc, ring_res->residual_len); if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE) sense_len = VSCSIIF_SENSE_BUFFERSIZE; else sense_len = ring_res->sense_len; if (sense_len) memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len); sc->scsi_done(sc); return; } static void scsifront_sync_cmd_done(struct vscsifrnt_info *info, vscsiif_response_t *ring_res) { uint16_t id = ring_res->rqid; unsigned long flags; spin_lock_irqsave(&info->shadow_lock, flags); info->shadow[id].wait_reset = 1; info->shadow[id].rslt_reset = ring_res->rslt; spin_unlock_irqrestore(&info->shadow_lock, flags); wake_up(&(info->shadow[id].wq_reset)); } int scsifront_cmd_done(struct vscsifrnt_info *info) { vscsiif_response_t *ring_res; RING_IDX i, rp; int more_to_do = 0; unsigned long flags; spin_lock_irqsave(info->host->host_lock, flags); /* ״̬Ϊconnectedresume³ʼIO˴ʱܷIO */ if (unlikely(VSCSIIF_STATE_CONNECTED != info->connected)) { more_to_do = 1; /* ˴δɵresumeʱط */ goto out; } rp = info->ring.sring->rsp_prod; rmb(); for (i = info->ring.rsp_cons; i != rp; i++) { ring_res = RING_GET_RESPONSE(&info->ring, i); if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB) scsifront_cdb_cmd_done(info, ring_res); else scsifront_sync_cmd_done(info, ring_res); } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); } else { info->ring.sring->rsp_event = i + 1; } out: spin_unlock_irqrestore(info->host->host_lock, flags); /* Yield point for this unbounded loop. */ cond_resched(); return more_to_do; } int scsifront_schedule(void *data) { struct vscsifrnt_info *info = (struct vscsifrnt_info *)data; while (!kthread_should_stop()) { wait_event_interruptible( info->wq, info->waiting_resp || kthread_should_stop()); info->waiting_resp = 0; smp_mb(); if (scsifront_cmd_done(info)) info->waiting_resp = 1; } return 0; } void scsifront_resume_io(struct vscsifrnt_info *info) { unsigned long flags; spin_lock_irqsave(info->host->host_lock, flags); /* scsifront_recover()еif (info->connected == VSCSIIF_STATE_CONNECTED)һ */ info->connected = VSCSIIF_STATE_CONNECTED; scsifront_do_request(info); spin_unlock_irqrestore(info->host->host_lock, flags); } static int map_data_for_request(struct vscsifrnt_info *info, struct scsi_cmnd *sc, vscsiif_request_t *ring_req, uint32_t id, int is_first_time_map) { grant_ref_t gref_head; int err, ref, ref_cnt = 0; int write = (sc->sc_data_direction == DMA_TO_DEVICE); unsigned int i, nr_pages, off, len, bytes; unsigned long buffer_mfn, buffer_pfn; if (sc->sc_data_direction == DMA_NONE) return 0; if (is_first_time_map) { err = gnttab_alloc_grant_references(VSCSIIF_SG_TABLESIZE, &gref_head); if (err) { printk(KERN_ERR "scsifront: gnttab_alloc_grant_references() error\n"); return -ENOMEM; } } if (scsi_bufflen(sc)) { /* quoted scsi_lib.c/scsi_req_map_sg . */ struct scatterlist *sg, *sgl = scsi_sglist(sc); unsigned int data_len = scsi_bufflen(sc); nr_pages = (data_len + sgl->offset + PAGE_SIZE - 1) >> PAGE_SHIFT; if (nr_pages > VSCSIIF_SG_TABLESIZE) { printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n"); ref_cnt = (-E2BIG); goto big_to_sg; } for_each_sg (sgl, sg, scsi_sg_count(sc), i) { off = sg->offset; len = sg->length; buffer_pfn = page_to_pfn(sg_page(sg)); while (len > 0 && data_len > 0) { /* * sg sends a scatterlist that is larger than * the data_len it wants transferred for certain * IO sizes */ bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); if (is_first_time_map) { ref = gnttab_claim_grant_reference(&gref_head); BUG_ON(ref == -ENOSPC); info->shadow[id].gref[ref_cnt] = ref; } else { ref = info->shadow[id].gref[ref_cnt]; } buffer_mfn = pfn_to_mfn(buffer_pfn); gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id, buffer_mfn, write); ring_req->seg[ref_cnt].gref = ref; ring_req->seg[ref_cnt].offset = (uint16_t)off; ring_req->seg[ref_cnt].length = (uint16_t)bytes; buffer_pfn++; len -= bytes; data_len -= bytes; off = 0; ref_cnt++; } } } big_to_sg: if (is_first_time_map) { gnttab_free_grant_references(gref_head); } return ref_cnt; } static void scsifront_device_block_io(struct Scsi_Host *host) { struct scsi_device *sdev; shost_for_each_device(sdev, host) scsi_internal_device_block(sdev); } static void scsifront_unblock_io_device(struct Scsi_Host *host) { struct scsi_device *sdev; shost_for_each_device(sdev, host) scsi_internal_device_unblock(sdev); } static void scsifront_fill_request(vscsiif_request_t *ring_req, struct scsi_cmnd *sc) { ring_req->id = sc->device->id; ring_req->lun = sc->device->lun; ring_req->channel = sc->device->channel; ring_req->cmd_len = sc->cmd_len; BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); if (sc->cmd_len) memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); else memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; ring_req->timeout_per_command = (sc->request->timeout / HZ); } void scsifront_resume_free(struct vscsifrnt_info *info) { struct Scsi_Host *host = info->host; unsigned long flags; spin_lock_irqsave(host->host_lock, flags); info->connected = VSCSIIF_STATE_SUSPENDED; spin_unlock_irqrestore(host->host_lock, flags); /* kthread(scsifront_schedule->scsifront_cmd_done)ͬһ */ spin_lock_irqsave(&info->io_lock, flags); spin_unlock_irqrestore(&info->io_lock, flags); /* Prevent new requests being issued until we fix things up. */ scsifront_device_block_io(host); scsi_block_requests(host); scsifront_xenfree(info); } static void scsifront_resend_queued(struct vscsifrnt_info *info, int shadow_id) { vscsiif_request_t *ring_req; struct scsi_cmnd *sc; int ref_cnt; /* Grab a request slot and copy shadow state into it. */ ring_req = io_ring_get_request(&(info->ring)); ring_req->rqid = (uint16_t)shadow_id; /* ֱpending requestshadow id */ sc = (struct scsi_cmnd *)info->shadow[shadow_id].req_scsi_cmnd; scsifront_fill_request(ring_req, sc); ring_req->act = VSCSIIF_ACT_SCSI_CDB; /* * Rewrite any grant references invalidated by suspend/resume. * Ӧʧܣʧqueue_commandʱӦʧܡref_cntһЧֵ */ ref_cnt = map_data_for_request(info, sc, ring_req, shadow_id, 0); if (info->shadow[shadow_id].nr_segments != ref_cnt) { printk(KERN_ERR "Error: resend queued: nr_segments[%u] ref_cnt[%d]\n", info->shadow[shadow_id].nr_segments, ref_cnt); } ring_req->nr_segments = (uint8_t)ref_cnt; } static void scsifront_resend_reset(struct vscsifrnt_info *info, int reset_shadow_id) { vscsiif_request_t *ring_req; struct scsi_cmnd *sc; /* Grab a request slot and copy shadow state into it. */ ring_req = io_ring_get_request(&(info->ring)); ring_req->rqid = (uint16_t)reset_shadow_id; /* ֱpending requestshadow id */ sc = (struct scsi_cmnd *)info->shadow[reset_shadow_id].req_scsi_cmnd; scsifront_fill_request(ring_req, sc); ring_req->act = VSCSIIF_ACT_SCSI_RESET; ring_req->nr_segments = (uint8_t)0; } void scsifront_recover(struct vscsifrnt_info *info) { int i; unsigned long flags; struct xenbus_device *dev = info->dev; int reset_shadow_id = -1; xenbus_switch_state(dev, XenbusStateInitialised); spin_lock_irqsave(info->host->host_lock, flags); /* Find pending requests and requeue them. */ /* * shadowget_idadd_idʹöhost_lockУûhost_lockеscsifront_cdb_cmd_done()io_lockУ * scsifront_resume_free()Ѻio_lockͬһΣkthreadеscsifront_cdb_cmd_done()ʱִ */ for (i = 0; i < VSCSIIF_MAX_REQS; i++) { /* resetɣqueue commandҪϲգҪ· */ if (is_shadow_item_in_use(info->shadow[i]) && is_reset_cmnd_and_completing(info->shadow[i])) { goto out; } } for (i = 0; i < VSCSIIF_MAX_REQS; i++) { /* Not in use? */ if (!is_shadow_item_in_use(info->shadow[i])) { continue; } if (is_shadow_item_cmnd_cdb(info->shadow[i])) { scsifront_resend_queued(info, i); } else { if (reset_shadow_id != -1) { printk(KERN_ERR "Error: recover: more than one reset! info[%p]\n", info); } reset_shadow_id = i; } } if (reset_shadow_id != -1) { scsifront_resend_reset(info, reset_shadow_id); } out: /* Now safe for us to use the shared ring */ /* scsifront_backend_changed()еcase XenbusStateConnectedһ */ spin_unlock_irqrestore(info->host->host_lock, flags); scsi_unblock_requests(info->host); scsifront_unblock_io_device(info->host); } static int scsifront_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { struct vscsifrnt_info *info = (struct vscsifrnt_info *) sc->device->host->hostdata; vscsiif_request_t *ring_req; int ref_cnt; uint16_t rqid; if (unlikely(info->connected != VSCSIIF_STATE_CONNECTED)) goto out_host_busy; if (RING_FULL(&info->ring)) { goto out_host_busy; } sc->scsi_done = done; sc->result = 0; ring_req = scsifront_pre_request(info); rqid = ring_req->rqid; ring_req->act = VSCSIIF_ACT_SCSI_CDB; scsifront_fill_request(ring_req, sc); info->shadow[rqid].req_scsi_cmnd = (unsigned long)sc; info->shadow[rqid].sc_data_direction = sc->sc_data_direction; info->shadow[rqid].act = ring_req->act; ref_cnt = map_data_for_request(info, sc, ring_req, rqid, 1); if (ref_cnt < 0) { printk(KERN_ERR "scsifront_queuecommand map failed! ref_cnt[%d]\n", ref_cnt); add_id_to_freelist(info, rqid); if (ref_cnt == (-ENOMEM)) goto out_host_busy; else { sc->result = (DID_ERROR << 16); goto out_fail_command; } } ring_req->nr_segments = (uint8_t)ref_cnt; info->shadow[rqid].nr_segments = ref_cnt; scsifront_do_request(info); return 0; out_host_busy: return SCSI_MLQUEUE_HOST_BUSY; out_fail_command: done(sc); return 0; } static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) { return (FAILED); } /* vscsi supports only device_reset, because it is each of LUNs */ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc) { struct Scsi_Host *host = sc->device->host; struct vscsifrnt_info *info = (struct vscsifrnt_info *) sc->device->host->hostdata; vscsiif_request_t *ring_req; uint16_t rqid; int err; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_lock_irq(host->host_lock); #endif retry: if (unlikely(info->connected != VSCSIIF_STATE_CONNECTED)) { spin_unlock_irq(host->host_lock); ssleep(1); spin_lock_irq(host->host_lock); goto retry; } /* µĿԴѼ˴Իļ⣬޸Ĵ϶࣬pvscsiδ */ if (RING_FULL(&info->ring)) printk(KERN_ERR "Error: reset: fing full! info[%p] sc[%p]!\n", info, sc); ring_req = scsifront_pre_request(info); ring_req->act = VSCSIIF_ACT_SCSI_RESET; rqid = ring_req->rqid; info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET; info->shadow[rqid].sc_data_direction = sc->sc_data_direction; info->shadow[rqid].req_scsi_cmnd = (unsigned long)sc; scsifront_fill_request(ring_req, sc); ring_req->nr_segments = 0; info->shadow[rqid].nr_segments = 0; scsifront_do_request(info); spin_unlock_irq(host->host_lock); wait_event_interruptible(info->shadow[rqid].wq_reset, info->shadow[rqid].wait_reset); spin_lock_irq(host->host_lock); err = info->shadow[rqid].rslt_reset; add_id_to_freelist(info, rqid); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) spin_unlock_irq(host->host_lock); #endif return (err); } struct scsi_host_template scsifront_sht = { .module = THIS_MODULE, .name = "Xen SCSI frontend driver", .queuecommand = scsifront_queuecommand, .eh_abort_handler = scsifront_eh_abort_handler, .eh_device_reset_handler= scsifront_dev_reset_handler, .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN, .can_queue = VSCSIIF_MAX_REQS, .this_id = -1, .sg_tablesize = VSCSIIF_SG_TABLESIZE, .use_clustering = DISABLE_CLUSTERING, .proc_name = "scsifront", }; static int __init scsifront_init(void) { int err; if (!xen_domain()) return -ENODEV; printk(KERN_INFO "scsifront_init.\n"); err = scsifront_xenbus_init(); return err; } static void __exit scsifront_exit(void) { printk(KERN_INFO "scsifront_exit.\n"); scsifront_xenbus_unregister(); } module_init(scsifront_init); module_exit(scsifront_exit); MODULE_DESCRIPTION("Xen SCSI frontend driver"); MODULE_LICENSE("GPL"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-scsi/xenbus.c000066400000000000000000000274751314037446600267600ustar00rootroot00000000000000/* * Xen SCSI frontend driver * * Copyright (c) 2008, FUJITSU Limited * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "common.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) #define DEFAULT_TASK_COMM_LEN 16 #else #define DEFAULT_TASK_COMM_LEN TASK_COMM_LEN #endif #define ADD_LUN_RETRY_NUM 3 extern struct scsi_host_template scsifront_sht; void scsifront_xenfree(struct vscsifrnt_info *info) { int i; for (i = 0; i < RING_REF_PAGE_COUNT; i++) { if (info->ring_ref[i] != GRANT_INVALID_REF) { gnttab_end_foreign_access(info->ring_ref[i], 0, 0UL); info->ring_ref[i] = GRANT_INVALID_REF; } } free_pages((unsigned long)info->ring.sring, get_order(VSCSIIF_RING_PAGE_SIZE)); info->ring.sring = NULL; if (info->irq) unbind_from_irqhandler(info->irq, info); info->irq = 0; } static void scsifront_free(struct vscsifrnt_info *info) { struct Scsi_Host *host = info->host; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) if (host->shost_state != SHOST_DEL) { #else if (!test_bit(SHOST_DEL, &host->shost_state)) { #endif scsi_remove_host(info->host); } scsifront_xenfree(info); scsi_host_put(info->host); } static int scsifront_alloc_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct vscsiif_sring *sring; int err = -ENOMEM; int i; for (i = 0; i < RING_REF_PAGE_COUNT; i++) info->ring_ref[i] = GRANT_INVALID_REF; /***** Frontend to Backend ring start *****/ sring = (struct vscsiif_sring *) __get_free_pages(GFP_KERNEL, get_order(VSCSIIF_RING_PAGE_SIZE)); if (!sring) { xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)"); return err; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, VSCSIIF_RING_PAGE_SIZE); for (i = 0; i < RING_REF_PAGE_COUNT; i++) { err = xenbus_grant_ring(dev, virt_to_mfn((unsigned long)sring + i * PAGE_SIZE)); if (err < 0) { info->ring.sring = NULL; xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)"); goto free_sring; } info->ring_ref[i] = err; } err = xenbus_alloc_evtchn(dev, &info->evtchn); if (err) goto free_sring; err = bind_evtchn_to_irqhandler( info->evtchn, scsifront_intr, IRQF_SAMPLE_RANDOM, "scsifront", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_evtchn_to_irqhandler"); goto free_sring; } info->irq = err; return 0; /* free resource */ free_sring: scsifront_free(info); return err; } static int scsifront_init_ring(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct xenbus_transaction xbt; int err; int i; char str[16]; DPRINTK("%s\n",__FUNCTION__); err = scsifront_alloc_ring(info); if (err) return err; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); } for (i = 0; i < RING_REF_PAGE_COUNT; i++) { snprintf(str, sizeof(str), "ring-ref-%d", i + 1); err = xenbus_printf(xbt, dev->nodename, str, "%u", info->ring_ref[i]); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing ring-ref"); goto fail; } } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", info->evtchn); if (err) { xenbus_dev_fatal(dev, err, "%s", "writing event-channel"); goto fail; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto free_sring; } return 0; fail: xenbus_transaction_end(xbt, 1); free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { struct vscsifrnt_info *info; struct Scsi_Host *host; int i, err = -ENOMEM; char name[DEFAULT_TASK_COMM_LEN]; host = scsi_host_alloc(&scsifront_sht, sizeof(*info)); if (!host) { xenbus_dev_fatal(dev, err, "fail to allocate scsi host"); return err; } info = (struct vscsifrnt_info *) host->hostdata; info->host = host; dev_set_drvdata(&dev->dev, info); info->dev = dev; for (i = 0; i < VSCSIIF_MAX_REQS; i++) { info->shadow[i].next_free = i + 1; init_waitqueue_head(&(info->shadow[i].wq_reset)); info->shadow[i].wait_reset = 0; } info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff; err = scsifront_init_ring(info); if (err) { scsi_host_put(host); return err; } init_waitqueue_head(&info->wq); spin_lock_init(&info->io_lock); spin_lock_init(&info->shadow_lock); snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no); info->kthread = kthread_run(scsifront_schedule, info, name); if (IS_ERR(info->kthread)) { err = PTR_ERR(info->kthread); info->kthread = NULL; printk(KERN_ERR "scsifront: kthread start err %d\n", err); goto free_sring; } host->max_id = VSCSIIF_MAX_TARGET; host->max_channel = 0; host->max_lun = VSCSIIF_MAX_LUN; host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512; host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE; err = scsi_add_host(host, &dev->dev); if (err) { printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err); goto free_sring; } xenbus_switch_state(dev, XenbusStateInitialised); return 0; free_sring: /* free resource */ scsifront_free(info); return err; } static int scsifront_resume(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); int err = -ENOMEM; if (gnttab_query_foreign_access(info->ring_ref[0])) { return 0; } /* * scsifront_resume_free()scsifront_init_ring()л³ʼIO/Ȩ * scsifront_recover()лѵǰshadowoutstandingIOطһ飬 * ׶λֱhost; Ϊˣqueuecommandreset_handlerkthreadУҪע׶ͬ */ scsifront_resume_free(info); err = scsifront_init_ring(info); if (!err) { scsifront_recover(info); return err; } return 0; } static int scsifront_remove(struct xenbus_device *dev) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename); if (info->kthread) { kthread_stop(info->kthread); info->kthread = NULL; } scsifront_free(info); return 0; } static int scsifront_disconnect(struct vscsifrnt_info *info) { struct xenbus_device *dev = info->dev; struct Scsi_Host *host = info->host; DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename); /* When this function is executed, all devices of Frontend have been deleted. Therefore, it need not block I/O before remove_host. */ scsi_remove_host(host); xenbus_frontend_closed(dev); return 0; } #define VSCSIFRONT_OP_ADD_LUN 1 #define VSCSIFRONT_OP_DEL_LUN 2 static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op) { struct xenbus_device *dev = info->dev; int i, err = 0; char str[64], state_str[64]; char **dir; unsigned int dir_n = 0; unsigned int device_state; unsigned int hst, chn, tgt, lun; struct scsi_device *sdev; int count = 0; dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n); if (IS_ERR(dir)) return; for (i = 0; i < dir_n; i++) { /* read status */ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u", &device_state); if (XENBUS_EXIST_ERR(err)) continue; /* virtual SCSI device */ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]); err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u:%u:%u:%u", &hst, &chn, &tgt, &lun); if (XENBUS_EXIST_ERR(err)) continue; /* front device state path */ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]); switch (op) { case VSCSIFRONT_OP_ADD_LUN: if (device_state == XenbusStateInitialised) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { printk(KERN_ERR "scsifront: Device already in use.\n"); scsi_device_put(sdev); #if 0 (void)xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); #else /* ߻Ѻ豸Ѵں */ (void)xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); #endif } else { add_lun_retry: ++count; scsi_add_device(info->host, chn, tgt, lun); sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { printk(KERN_INFO "scsifront: scsi_add_device ok.\n"); scsi_device_put(sdev); (void)xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateConnected); } else { if (count > ADD_LUN_RETRY_NUM) { break; } printk(KERN_ERR "scsifront: scsi_add_device failed.\n"); goto add_lun_retry; } } } break; case VSCSIFRONT_OP_DEL_LUN: if (device_state == XenbusStateClosing) { sdev = scsi_device_lookup(info->host, chn, tgt, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); (void)xenbus_printf(XBT_NIL, dev->nodename, state_str, "%d", XenbusStateClosed); } } break; default: break; } } kfree(dir); return; } static void scsifront_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); DPRINTK("%p %u %u\n", dev, dev->state, backend_state); switch (backend_state) { case XenbusStateUnknown: case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateClosed: break; case XenbusStateInitialised: break; case XenbusStateConnected: scsifront_resume_io(info); if (xenbus_read_driver_state(dev->nodename) == XenbusStateInitialised) { scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); } if (dev->state == XenbusStateConnected) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosing: scsifront_disconnect(info); break; case XenbusStateReconfiguring: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN); xenbus_switch_state(dev, XenbusStateReconfiguring); break; case XenbusStateReconfigured: scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); xenbus_switch_state(dev, XenbusStateConnected); break; } } static struct xenbus_device_id scsifront_ids[] = { { "vscsi" }, { "" } }; MODULE_ALIAS("xen:vscsi"); static struct xenbus_driver scsifront_driver = { .name = "vscsi", .owner = THIS_MODULE, .ids = scsifront_ids, .probe = scsifront_probe, .remove = scsifront_remove, .resume = scsifront_resume, .otherend_changed = scsifront_backend_changed, }; int scsifront_xenbus_init(void) { return xenbus_register_frontend(&scsifront_driver); } void scsifront_xenbus_unregister(void) { xenbus_unregister_driver(&scsifront_driver); } UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-vbd/000077500000000000000000000000001314037446600251035ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-vbd/Makefile000066400000000000000000000001201314037446600265340ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-blkfront.o # xen-vbd-objs := xen-blkfront.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-vbd/xen-blkfront.c000066400000000000000000002052201314037446600276610ustar00rootroot00000000000000/* * blkfront.c * * XenLinux virtual block device driver. * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * Copyright (c) 2004, Christian Limpach * Copyright (c) 2004, Andrew Warfield * Copyright (c) 2005, Christopher Clark * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ORACLE #include #else #include #endif #include #include #include #define UNPLUG_DISK_TMOUT 60 static int sda_is_xvda; module_param(sda_is_xvda, bool, 0); MODULE_PARM_DESC(sda_is_xvda, "sdX in guest config translates to xvdX, not xvd(X+4)"); enum blkif_state { BLKIF_STATE_DISCONNECTED, BLKIF_STATE_CONNECTED, BLKIF_STATE_SUSPENDED, }; struct grant { grant_ref_t gref; unsigned long pfn; struct list_head node; }; struct blk_shadow { struct blkif_request req; struct request *request; struct grant **grants_used; struct grant **indirect_grants; /* If use persistent grant, it will copy response * from grant page to sg when read io completion. */ struct scatterlist *sg; }; struct split_bio { struct bio *bio; atomic_t pending; int err; }; static const struct block_device_operations xlvbd_block_fops; /* * Maximum number of segments in indirect requests, the actual value used by * the frontend driver is the minimum of this value and the value provided * by the backend driver. */ static unsigned int xen_blkif_max_segments = 256; module_param_named(max, xen_blkif_max_segments, int, S_IRUGO); MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 256 (1M))"); //#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) #define BLK_MAX_RING_PAGE_ORDER 4U #define BLK_MAX_RING_PAGES (1U << BLK_MAX_RING_PAGE_ORDER) #define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, \ BLK_MAX_RING_PAGES * PAGE_SIZE) //#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They * hang in private_data off the gendisk structure. We may end up * putting all kinds of interesting stuff here :-) */ struct blkfront_info { struct mutex mutex; struct xenbus_device *xbdev; struct gendisk *gd; int vdevice; blkif_vdev_t handle; enum blkif_state connected; unsigned int ring_size; struct blkif_front_ring ring; /* move sg to blk_shadow struct */ //struct scatterlist *sg; unsigned int evtchn, irq; #if defined(ORACLE) struct tasklet_struct tasklet; #endif struct request_queue *rq; struct work_struct work; struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_MAX_RING_SIZE]; //for hot-migrate struct blk_shadow shadow_copy[BLK_MAX_RING_SIZE]; grant_ref_t ring_refs[BLK_MAX_RING_PAGES]; struct page *ring_pages[BLK_MAX_RING_PAGES]; //for hot-migrate //struct page *ring_pages_copy[BLK_MAX_RING_PAGES]; struct list_head grants; struct list_head indirect_pages; unsigned int persistent_gnts_c; unsigned int feature_persistent:1; unsigned long shadow_free; #if defined(ORACLE) int feature_barrier; #endif unsigned int feature_flush; unsigned int max_indirect_segments; int is_ready; #if defined(ORACLE) spinlock_t io_lock; #endif int users; int blkif_detach; }; static DEFINE_SPINLOCK(blkif_io_lock); static unsigned int nr_minors; static unsigned long *minors; static DEFINE_SPINLOCK(minor_lock); #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) #define GRANT_INVALID_REF 0 #define PARTS_PER_DISK 16 #define PARTS_PER_EXT_DISK 256 #define BLKIF_MAJOR(dev) ((dev)>>8) #define BLKIF_MINOR(dev) ((dev) & 0xff) #define EXT_SHIFT 28 #define EXTENDED (1<shadow_free; BUG_ON(free >= RING_SIZE(&info->ring)); info->shadow_free = info->shadow[free].req.u.rw.id; info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ return free; } static void add_id_to_freelist(struct blkfront_info *info, unsigned long id) { info->shadow[id].req.u.rw.id = info->shadow_free; info->shadow[id].request = NULL; info->shadow_free = id; } static int fill_grant_buffer(struct blkfront_info *info, int num) { struct page *granted_page; struct grant *gnt_list_entry, *n; int i = 0; while(i < num) { gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); if (!gnt_list_entry) goto out_of_memory; /* If frontend uses feature_persistent, it will preallocate * (256 + 1) * 256 pages, almost 257M. */ if (info->feature_persistent) { granted_page = alloc_page(GFP_NOIO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; } gnt_list_entry->pfn = page_to_pfn(granted_page); } gnt_list_entry->gref = GRANT_INVALID_REF; list_add(&gnt_list_entry->node, &info->grants); i++; } return 0; out_of_memory: list_for_each_entry_safe(gnt_list_entry, n, &info->grants, node) { list_del(&gnt_list_entry->node); if (info->feature_persistent) __free_page(pfn_to_page(gnt_list_entry->pfn)); kfree(gnt_list_entry); i--; } BUG_ON(i != 0); return -ENOMEM; } static struct grant *get_grant(grant_ref_t *gref_head, unsigned long pfn, struct blkfront_info *info) { struct grant *gnt_list_entry; unsigned long buffer_mfn; BUG_ON(list_empty(&info->grants)); gnt_list_entry = list_first_entry(&info->grants, struct grant, node); list_del(&gnt_list_entry->node); /* for persistent grant */ if (gnt_list_entry->gref != GRANT_INVALID_REF) { info->persistent_gnts_c--; return gnt_list_entry; } /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); if (!info->feature_persistent) { BUG_ON(!pfn); gnt_list_entry->pfn = pfn; } buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); gnttab_grant_foreign_access_ref(gnt_list_entry->gref, info->xbdev->otherend_id, buffer_mfn, 0); return gnt_list_entry; } static int xlbd_reserve_minors(unsigned int minor, unsigned int nr) { unsigned int end = minor + nr; int rc; if (end > nr_minors) { unsigned long *bitmap, *old; bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap), GFP_KERNEL); if (bitmap == NULL) return -ENOMEM; spin_lock(&minor_lock); if (end > nr_minors) { old = minors; memcpy(bitmap, minors, BITS_TO_LONGS(nr_minors) * sizeof(*bitmap)); minors = bitmap; nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG; } else old = bitmap; spin_unlock(&minor_lock); kfree(old); } spin_lock(&minor_lock); if (find_next_bit(minors, end, minor) >= end) { for (; minor < end; ++minor) __set_bit(minor, minors); rc = 0; } else rc = -EBUSY; spin_unlock(&minor_lock); return rc; } static void xlbd_release_minors(unsigned int minor, unsigned int nr) { unsigned int end = minor + nr; BUG_ON(end > nr_minors); spin_lock(&minor_lock); for (; minor < end; ++minor) __clear_bit(minor, minors); spin_unlock(&minor_lock); } static void blkif_restart_queue_callback(void *arg) { struct blkfront_info *info = (struct blkfront_info *)arg; schedule_work(&info->work); } static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) { /* We don't have real geometry info, but let's at least return values consistent with the size of the device */ sector_t nsect = get_capacity(bd->bd_disk); sector_t cylinders = nsect; hg->heads = 0xff; hg->sectors = 0x3f; sector_div(cylinders, hg->heads * hg->sectors); hg->cylinders = cylinders; if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) hg->cylinders = 0xffff; return 0; } static int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument) { struct blkfront_info *info = bdev->bd_disk->private_data; int i; dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", command, (long)argument); switch (command) { case CDROMMULTISESSION: dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; case CDROM_GET_CAPABILITY: { struct gendisk *gd = info->gd; if (gd->flags & GENHD_FL_CD) return 0; return -EINVAL; } default: /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", command);*/ return -EINVAL; /* same return as native Linux */ } return 0; } /* * blkif_queue_request * * request block io * * id: for guest use only. * operation: BLKIF_OP_{READ,WRITE,PROBE} * buffer: buffer to read/write into. this should be a * virtual address in the guest os. */ static int blkif_queue_request(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; struct blkif_request *ring_req; unsigned long id; unsigned int fsect, lsect; int i, ref, n; struct blkif_request_segment_aligned *segments = NULL; /* * Used to store if we are able to queue the request by just using * existing persistent grants, or if we have to get new grants, * as there are not sufficiently many free. */ bool new_persistent_gnts; grant_ref_t gref_head; struct grant *gnt_list_entry = NULL; struct scatterlist *sg; int nseg, max_grefs; if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) return 1; max_grefs = info->max_indirect_segments ? info->max_indirect_segments + INDIRECT_GREFS(info->max_indirect_segments) : BLKIF_MAX_SEGMENTS_PER_REQUEST; /* Check if we have enought grants to allocate a requests */ if (info->persistent_gnts_c < max_grefs) { new_persistent_gnts = 1; if (gnttab_alloc_grant_references( max_grefs - info->persistent_gnts_c, &gref_head) < 0) { gnttab_request_free_callback( &info->callback, blkif_restart_queue_callback, info, max_grefs); return 1; } } else new_persistent_gnts = 0; /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = get_id_from_freelist(info); info->shadow[id].request = req; BUG_ON(info->max_indirect_segments == 0 && req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); BUG_ON(info->max_indirect_segments && req->nr_phys_segments > info->max_indirect_segments); nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); ring_req->u.rw.id = id; if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { /* * The indirect operation can only be a BLKIF_OP_READ or * BLKIF_OP_WRITE */ #ifndef ORACLE BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA)); #endif ring_req->operation = BLKIF_OP_INDIRECT; ring_req->u.indirect.indirect_op = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->u.indirect.handle = info->handle; ring_req->u.indirect.nr_segments = nseg; } else { ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->u.rw.handle = info->handle; ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; if (req->cmd_flags & REQ_HARDBARRIER) ring_req->operation = BLKIF_OP_WRITE_BARRIER; ring_req->u.rw.nr_segments = nseg; } for_each_sg(info->shadow[id].sg, sg, nseg, i) { fsect = sg->offset >> 9; lsect = fsect + (sg->length >> 9) - 1; if ((ring_req->operation == BLKIF_OP_INDIRECT) && (i % SEGS_PER_INDIRECT_FRAME == 0)) { unsigned long uninitialized_var(pfn); if (segments) kunmap_atomic(segments, KM_USER0); n = i / SEGS_PER_INDIRECT_FRAME; if (!info->feature_persistent) { struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ BUG_ON(list_empty(&info->indirect_pages)); indirect_page = list_first_entry(&info->indirect_pages, struct page, lru); list_del(&indirect_page->lru); pfn = page_to_pfn(indirect_page); } gnt_list_entry = get_grant(&gref_head, pfn, info); info->shadow[id].indirect_grants[n] = gnt_list_entry; segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_USER0); ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; } gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); ref = gnt_list_entry->gref; info->shadow[id].grants_used[i] = gnt_list_entry; /* If use persistent grant, it will have a memcpy, * just copy the data from sg page to grant page. */ if (rq_data_dir(req) && info->feature_persistent) { char *bvec_data; void *shared_data; BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn), KM_USER0); if (in_interrupt()) { if (in_irq()) bvec_data = kmap_atomic(sg_page(sg), KM_IRQ0); else if (in_softirq()) bvec_data = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); else printk(KERN_WARNING "[kmap] in interrupt, but not irq!\n"); } else bvec_data = kmap_atomic(sg_page(sg), KM_USER0); /* * this does not wipe data stored outside the * range sg->offset..sg->offset+sg->length. * Therefore, blkback *could* see data from * previous requests. This is OK as long as * persistent grants are shared with just one * domain. It may need refactoring if this * changes */ memcpy(shared_data + sg->offset, bvec_data + sg->offset, sg->length); if (in_interrupt()) { if (in_irq()) kunmap_atomic(bvec_data, KM_IRQ0); else if (in_softirq()) kunmap_atomic(bvec_data, KM_SOFTIRQ0); else printk(KERN_WARNING "[kunmap] in interrupt, but not irq!\n"); } else kunmap_atomic(bvec_data, KM_USER0); kunmap_atomic(shared_data, KM_USER0); } if (ring_req->operation != BLKIF_OP_INDIRECT) { ring_req->u.rw.seg[i] = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } else { n = i % SEGS_PER_INDIRECT_FRAME; segments[n] = (struct blkif_request_segment_aligned) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; } } if (segments) kunmap_atomic(segments, KM_USER0); info->ring.req_prod_pvt++; /* Keep a private copy so we can reissue requests when recovering. */ info->shadow[id].req = *ring_req; /* for persistent grant */ if (new_persistent_gnts) gnttab_free_grant_references(gref_head); return 0; } static inline void flush_requests(struct blkfront_info *info) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); if (notify) notify_remote_via_irq(info->irq); } /* * do_blkif_request * read a block; request is in a request queue */ static void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; pr_debug("Entered do_blkif_request\n"); queued = 0; while ((req = blk_peek_request(rq)) != NULL) { info = req->rq_disk->private_data; if (RING_FULL(&info->ring)) goto wait; blk_start_request(req); if (req->cmd_type != REQ_TYPE_FS) { __blk_end_request_all(req, -EIO); continue; } if ( info->blkif_detach != 0) { if( rq_data_dir(req) ){ __blk_end_request_all(req, -ENOSPC); } else{ __blk_end_request_all(req, -EIO); } continue; } pr_debug("do_blk_req %p: cmd %p, sec %lx, " "(%u/%u) buffer:%p [%s]\n", req, req->cmd, (unsigned long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), req->buffer, rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); } static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, unsigned int segments) { struct request_queue *rq; rq = blk_init_queue(do_blkif_request, &blkif_io_lock); if (rq == NULL) return -1; queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_logical_block_size(rq, sector_size); blk_queue_max_hw_sectors(rq, segments*8); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ #if defined(ORACLE) blk_queue_max_phys_segments(rq, segments); blk_queue_max_hw_segments(rq, segments); #else blk_queue_max_segments(rq, segments); #endif printk("init queue set max_hw_sectors %d max_segments %d\n", segments*8, segments); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); /* Make sure we don't use bounce buffers. */ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); gd->queue = rq; return 0; } #if defined(ORACLE) static int xlvbd_barrier(struct blkfront_info *info) { int err; const char *barrier; switch (info->feature_barrier) { case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; case QUEUE_ORDERED_NONE: barrier = "disabled"; break; default: return -EINVAL; } err = blk_queue_ordered(info->rq, info->feature_barrier, NULL); if (err) return err; printk(KERN_INFO "blkfront: %s: barriers %s\n", info->gd->disk_name, barrier); return 0; } #else static void xlvbd_flush(struct blkfront_info *info) { blk_queue_flush(info->rq, info->feature_flush); printk(KERN_INFO "blkfront: %s: %s %s %s %s %s %s\n", info->gd->disk_name, "barriers", info->feature_flush ? "enabled;" : "disabled;", "persistent grants:", info->feature_persistent ? "enabled;" : "disabled;", "indirect descriptors:", info->max_indirect_segments ? "enabled;" : "disabled;"); } #endif static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) { int major; major = BLKIF_MAJOR(vdevice); *minor = BLKIF_MINOR(vdevice); switch (major) { case XEN_IDE0_MAJOR: *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET; *minor = ((*minor / 64) * PARTS_PER_DISK) + EMULATED_HD_DISK_MINOR_OFFSET; break; case XEN_IDE1_MAJOR: *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET; *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) + EMULATED_HD_DISK_MINOR_OFFSET; break; case XEN_SCSI_DISK0_MAJOR: *offset = (*minor / PARTS_PER_DISK) + emulated_sd_disk_name_offset; *minor = *minor + emulated_sd_disk_minor_offset; break; case XEN_SCSI_DISK1_MAJOR: case XEN_SCSI_DISK2_MAJOR: case XEN_SCSI_DISK3_MAJOR: case XEN_SCSI_DISK4_MAJOR: case XEN_SCSI_DISK5_MAJOR: case XEN_SCSI_DISK6_MAJOR: case XEN_SCSI_DISK7_MAJOR: *offset = (*minor / PARTS_PER_DISK) + ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) + emulated_sd_disk_name_offset; *minor = *minor + ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) + emulated_sd_disk_minor_offset; break; case XEN_SCSI_DISK8_MAJOR: case XEN_SCSI_DISK9_MAJOR: case XEN_SCSI_DISK10_MAJOR: case XEN_SCSI_DISK11_MAJOR: case XEN_SCSI_DISK12_MAJOR: case XEN_SCSI_DISK13_MAJOR: case XEN_SCSI_DISK14_MAJOR: case XEN_SCSI_DISK15_MAJOR: *offset = (*minor / PARTS_PER_DISK) + ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) + emulated_sd_disk_name_offset; *minor = *minor + ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) + emulated_sd_disk_minor_offset; break; case XENVBD_MAJOR: *offset = *minor / PARTS_PER_DISK; break; default: printk(KERN_WARNING "blkfront: your disk configuration is " "incorrect, please use an xvd device instead\n"); return -ENODEV; } return 0; } static int xlvbd_alloc_gendisk(blkif_sector_t capacity, struct blkfront_info *info, u16 vdisk_info, u16 sector_size) { struct gendisk *gd; int nr_minors = 1; int err; unsigned int offset; int minor; int nr_parts; BUG_ON(info->gd != NULL); BUG_ON(info->rq != NULL); if ((info->vdevice>>EXT_SHIFT) > 1) { /* this is above the extended range; something is wrong */ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice); return -ENODEV; } if (!VDEV_IS_EXTENDED(info->vdevice)) { err = xen_translate_vdev(info->vdevice, &minor, &offset); if (err) return err; nr_parts = PARTS_PER_DISK; } else { minor = BLKIF_MINOR_EXT(info->vdevice); nr_parts = PARTS_PER_EXT_DISK; offset = minor / nr_parts; if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4) printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with " "emulated IDE disks,\n\t choose an xvd device name" "from xvde on\n", info->vdevice); } err = -ENODEV; if ((minor % nr_parts) == 0) nr_minors = nr_parts; err = xlbd_reserve_minors(minor, nr_minors); if (err) goto out; err = -ENODEV; gd = alloc_disk(nr_minors); if (gd == NULL) goto release; if (nr_minors > 1) { if (offset < 26) sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset); else sprintf(gd->disk_name, "%s%c%c", DEV_NAME, 'a' + ((offset / 26)-1), 'a' + (offset % 26)); } else { if (offset < 26) sprintf(gd->disk_name, "%s%c%d", DEV_NAME, 'a' + offset, minor & (nr_parts - 1)); else sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME, 'a' + ((offset / 26) - 1), 'a' + (offset % 26), minor & (nr_parts - 1)); } gd->major = XENVBD_MAJOR; gd->first_minor = minor; gd->fops = &xlvbd_block_fops; gd->private_data = info; gd->driverfs_dev = &(info->xbdev->dev); set_capacity(gd, capacity); printk("Set blk queue %d\n", info->max_indirect_segments ? :BLKIF_MAX_SEGMENTS_PER_REQUEST); if (xlvbd_init_blk_queue(gd, sector_size, info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST)) { del_gendisk(gd); goto release; } info->rq = gd->queue; info->gd = gd; #if defined(ORACLE) xlvbd_barrier(info); #else xlvbd_flush(info); #endif if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); if (vdisk_info & VDISK_REMOVABLE) gd->flags |= GENHD_FL_REMOVABLE; if (vdisk_info & VDISK_CDROM) gd->flags |= GENHD_FL_CD; return 0; release: xlbd_release_minors(minor, nr_minors); out: return err; } static void xlvbd_release_gendisk(struct blkfront_info *info) { unsigned int minor, nr_minors; unsigned long flags; if (info->rq == NULL) return; spin_lock_irqsave(&blkif_io_lock, flags); /* No more blkif_request(). */ blk_stop_queue(info->rq); /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irqrestore(&blkif_io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); del_gendisk(info->gd); minor = info->gd->first_minor; nr_minors = info->gd->minors; xlbd_release_minors(minor, nr_minors); blk_cleanup_queue(info->rq); info->rq = NULL; put_disk(info->gd); info->gd = NULL; } static void kick_pending_request_queues(struct blkfront_info *info) { if (!RING_FULL(&info->ring)) { /* Re-enable calldowns. */ blk_start_queue(info->rq); /* Kick things off immediately. */ do_blkif_request(info->rq); } } static void blkif_restart_queue(struct work_struct *work) { struct blkfront_info *info = container_of(work, struct blkfront_info, work); spin_lock_irq(&blkif_io_lock); if (info->connected == BLKIF_STATE_CONNECTED) kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); } static void blkif_free(struct blkfront_info *info, int suspend) { struct grant *persistent_gnt; struct grant *n; int i, j, segs; /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&blkif_io_lock); info->connected = suspend ? BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; /* No more blkif_request(). */ if (info->rq) blk_stop_queue(info->rq); if(info->connected == BLKIF_STATE_SUSPENDED) { printk(KERN_INFO "persistent_gnt->pfn need not been free in suspend mode!\n"); /* Remove all persistent grants */ if (!list_empty(&info->grants)) { list_for_each_entry_safe(persistent_gnt, n, &info->grants, node) { if (persistent_gnt->gref != GRANT_INVALID_REF) { gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); info->persistent_gnts_c--; } } } BUG_ON(info->persistent_gnts_c != 0); } else { /* Remove all persistent grants */ if (!list_empty(&info->grants)) { list_for_each_entry_safe(persistent_gnt, n, &info->grants, node) { list_del(&persistent_gnt->node); if (persistent_gnt->gref != GRANT_INVALID_REF) { gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); info->persistent_gnts_c--; } if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } } BUG_ON(info->persistent_gnts_c != 0); } /* * Remove indirect pages, this only happens when using indirect * descriptors but not persistent grants */ if(info->connected == BLKIF_STATE_SUSPENDED) { printk(KERN_INFO "indirect_page need not been free in suspend mode!\n"); } else { if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; BUG_ON(info->feature_persistent); list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } } if(info->connected == BLKIF_STATE_SUSPENDED) { printk(KERN_INFO "list_add_tail persistent_gnt in suspend mode!\n"); for (i = 0; i < RING_SIZE(&info->ring); i++) { if (!info->shadow[i].request) continue; segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? info->shadow[i].req.u.indirect.nr_segments : info->shadow[i].req.u.rw.nr_segments; /*256 indirect_page*/ for (j = 0; j < segs; j++) { persistent_gnt = info->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); list_add_tail(&persistent_gnt->node, &info->grants); } if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT) continue; /*1 indirect_page_segment*/ for (j = 0; j < INDIRECT_GREFS(segs); j++) { persistent_gnt = info->shadow[i].indirect_grants[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); /*indirect_page is taken from info->indirect_pages when feature_persistent is false indirect_page is taken from info->grants when feature_persistent is true */ if (!info->feature_persistent) { struct page *indirect_page; indirect_page = pfn_to_page(persistent_gnt->pfn); printk(KERN_INFO "list_add(&indirect_page->lru, &info->indirect_pages) in suspend mode!\n"); list_add(&indirect_page->lru, &info->indirect_pages); } list_add_tail(&persistent_gnt->node, &info->grants); } } } else { for (i = 0; i < RING_SIZE(&info->ring); i++) { /* * Clear persistent grants present in requests already * on the shared ring */ if (!info->shadow[i].request) goto free_shadow; segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? info->shadow[i].req.u.indirect.nr_segments : info->shadow[i].req.u.rw.nr_segments; for (j = 0; j < segs; j++) { persistent_gnt = info->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); if (info->feature_persistent) __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT) /* * If this is not an indirect operation don't try to * free indirect segments */ goto free_shadow; for (j = 0; j < INDIRECT_GREFS(segs); j++) { persistent_gnt = info->shadow[i].indirect_grants[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); } free_shadow: kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; } } /* No more gnttab callback work. */ gnttab_cancel_free_callback(&info->callback); spin_unlock_irq(&blkif_io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ flush_scheduled_work(); vunmap(info->ring.sring); info->ring.sring = NULL; /* Free resources associated with old device channel. */ if(info->connected == BLKIF_STATE_SUSPENDED) { printk(KERN_INFO "ring_pages need not been free in suspend mode!\n"); for (i = 0; i < info->ring_size; i++) { if (info->ring_refs[i] != GRANT_INVALID_REF) { //gnttab_end_foreign_access free_page /*gnttab_end_foreign_access(info->ring_refs[i], 0, (unsigned long)mfn_to_virt(pfn_to_mfn(page_to_pfn(info->ring_pages[i]))));*/ gnttab_end_foreign_access(info->ring_refs[i], 0, 0UL); info->ring_refs[i] = GRANT_INVALID_REF; } } } else { for (i = 0; i < info->ring_size; i++) { if (info->ring_refs[i] != GRANT_INVALID_REF) { //gnttab_end_foreign_access free_page,Բʽڵ__free_page(info->ring_pages[i]); gnttab_end_foreign_access(info->ring_refs[i], 0, (unsigned long)mfn_to_virt(pfn_to_mfn(page_to_pfn(info->ring_pages[i])))); info->ring_refs[i] = GRANT_INVALID_REF; } //if(info->ring_pages_copy[i]) // __free_page(info->ring_pages_copy[i]); } } if (info->irq) unbind_from_irqhandler(info->irq, info); info->evtchn = info->irq = 0; } static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, struct blkif_response *bret) { int i = 0; int nseg; /* for persistent grant */ struct scatterlist *sg; char *bvec_data; void *shared_data; nseg = s->req.operation == BLKIF_OP_INDIRECT ? s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { /* * Copy the data received from the backend into the bvec. * Since bv_offset can be different than 0, and bv_len different * than PAGE_SIZE, we have to keep track of the current offset, * to be sure we are copying the data from the right shared page. */ for_each_sg(s->sg, sg, nseg, i) { BUG_ON(sg->offset + sg->length > PAGE_SIZE); shared_data = kmap_atomic( pfn_to_page(s->grants_used[i]->pfn), KM_USER0); if (in_interrupt()) { if (in_irq()) bvec_data = kmap_atomic(sg_page(sg), KM_IRQ0); else if (in_softirq()) bvec_data = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); else printk(KERN_WARNING "[kmap] in interrupt, but not irq!\n"); } else bvec_data = kmap_atomic(sg_page(sg), KM_USER0); memcpy(bvec_data + sg->offset, shared_data + sg->offset, sg->length); if (in_interrupt()) { if (in_irq()) kunmap_atomic(bvec_data, KM_IRQ0); else if (in_softirq()) kunmap_atomic(bvec_data, KM_SOFTIRQ0); else printk(KERN_WARNING "[kunmap] in interrupt, but not irq!\n"); } else kunmap_atomic(bvec_data, KM_USER0); kunmap_atomic(shared_data, KM_USER0); } } /* Add the persistent grant into the list of free grants */ for (i = 0; i < nseg; i++) { if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->grants_used[i]->gref); list_add(&s->grants_used[i]->node, &info->grants); info->persistent_gnts_c++; } else { /* * If the grant is not mapped by the backend we end the * foreign access and add it to the tail of the list, * so it will not be picked again unless we run out of * persistent grants. */ gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &info->grants); } } if (s->req.operation == BLKIF_OP_INDIRECT) { for (i = 0; i < INDIRECT_GREFS(nseg); i++) { if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { if (!info->feature_persistent) printk(KERN_WARNING "backed has not unmapped grant: %u\n", s->indirect_grants[i]->gref); list_add(&s->indirect_grants[i]->node, &info->grants); info->persistent_gnts_c++; } else { struct page *indirect_page; gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. */ if (!info->feature_persistent) { indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); list_add(&indirect_page->lru, &info->indirect_pages); } s->indirect_grants[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->indirect_grants[i]->node, &info->grants); } } } } static irqreturn_t blkif_interrupt(int irq, void *dev_id) { struct request *req; struct blkif_response *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; int error; spin_lock_irqsave(&blkif_io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } again: rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; req = info->shadow[id].request; blkif_completion(&info->shadow[id], info, bret); add_id_to_freelist(info, id); error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; switch (bret->operation) { case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", info->gd->disk_name); error = -EOPNOTSUPP; info->feature_flush = 0; #if defined(ORACLE) xlvbd_barrier(info); #else xlvbd_flush(info); #endif } /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: if (unlikely(bret->status != BLKIF_RSP_OKAY)) dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " "request: %x\n", bret->status); __blk_end_request_all(req, error); break; default: BUG(); } } info->ring.rsp_cons = i; if (i != info->ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); if (more_to_do) goto again; } else info->ring.sring->rsp_event = i + 1; kick_pending_request_queues(info); spin_unlock_irqrestore(&blkif_io_lock, flags); return IRQ_HANDLED; } static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info) { struct blkif_sring *sring; int err; unsigned int nr; int i; if(info->connected == BLKIF_STATE_SUSPENDED) { printk(KERN_INFO "ring_pages need not been realloc_page in suspend mode!\n"); //printk(KERN_INFO "info->ring_pages = info->ring_pages_copy in suspend mode!\n"); for (nr = 0; nr < info->ring_size; nr++) { info->ring_refs[nr] = GRANT_INVALID_REF; //info->ring_pages[nr] = info->ring_pages_copy[nr]; } } else { for (nr = 0; nr < info->ring_size; nr++) { info->ring_refs[nr] = GRANT_INVALID_REF; info->ring_pages[nr] = alloc_page(GFP_NOIO | __GFP_HIGH); if (!info->ring_pages[nr]) { break; } } } sring = (nr == info->ring_size) ? vmap(info->ring_pages, nr, VM_MAP, PAGE_KERNEL) : NULL; if (!sring) { while (nr--) __free_page(info->ring_pages[nr]); xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; } SHARED_RING_INIT(sring); FRONT_RING_INIT(&info->ring, sring, (unsigned long)info->ring_size << PAGE_SHIFT); for (i=0; iring_size; i++) { err = xenbus_grant_ring(dev, pfn_to_mfn(page_to_pfn(info->ring_pages[i]))); if (err < 0) { printk(KERN_ERR "failed to xenbus_grant_ring!\n"); goto fail; } info->ring_refs[i] = err; } err = xenbus_alloc_evtchn(dev, &info->evtchn); if (err) goto fail; err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, IRQF_SAMPLE_RANDOM, "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_evtchn_to_irqhandler failed"); goto fail; } info->irq = err; return 0; fail: blkif_free(info, 0); return err; } static void shadow_init(struct blk_shadow *shadow, unsigned int ring_size) { unsigned int i = 0; WARN_ON(!ring_size); while (++i < ring_size) shadow[i - 1].req.u.rw.id = i; shadow[i - 1].req.u.rw.id = 0x0fffffff; } /* Common code used when first setting up, and when resuming. */ static int talk_to_blkback(struct xenbus_device *dev, struct blkfront_info *info) { unsigned int ring_size, ring_order; const char *what = NULL; struct xenbus_transaction xbt; int err; int nr = 0; if (dev->state >= XenbusStateInitialised) return 0; err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-pages", "%u", &ring_size); if (err != 1) ring_size = 0; else if (!ring_size) pr_warn("blkfront: %s: zero max-ring-pages\n", dev->nodename); err = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &ring_order); if (err != 1) ring_order = ring_size ? ilog2(ring_size) : 0; else if (!ring_size) /* nothing */; else if ((ring_size - 1) >> ring_order) pr_warn("blkfront: %s: max-ring-pages (%#x) inconsistent with" " max-ring-page-order (%u)\n", dev->nodename, ring_size, ring_order); else ring_order = ilog2(ring_size); if (ring_order > BLK_MAX_RING_PAGE_ORDER) ring_order = BLK_MAX_RING_PAGE_ORDER; /* * While for larger rings not all pages are actually used, be on the * safe side and set up a full power of two to please as many backends * as possible. */ printk("talk_to_blkback ring_size %d, ring_order %d\n", 1U<ring_size = ring_size = 1U << ring_order; /* Create shared ring, alloc event channel. */ err = setup_blkring(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_blkring; } if (ring_size == 1) { what = "ring-ref"; err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[0]); if (err) { goto abort_transaction; } } else { unsigned int i; char buf[16]; what = "ring-page-order"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_order); if (err) goto abort_transaction; what = "num-ring-pages"; err = xenbus_printf(xbt, dev->nodename, what, "%u", ring_size); if (err) goto abort_transaction; what = buf; for (i = 0; i < ring_size; i++) { snprintf(buf, sizeof(buf), "ring-ref%u", i); err = xenbus_printf(xbt, dev->nodename, what, "%u", info->ring_refs[i]); if (err) goto abort_transaction; } } what = "event-channel"; err = xenbus_printf(xbt, dev->nodename, what, "%u", info->evtchn); if (err) { goto abort_transaction; } what = "protocol"; err = xenbus_printf(xbt, dev->nodename, what, "%s", XEN_IO_PROTO_ABI_NATIVE); if (err) { goto abort_transaction; } /* for persistent grant */ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1); if (err) dev_warn(&dev->dev, "writing persistent grants feature to xenbus"); err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_blkring; } xenbus_switch_state(dev, XenbusStateInitialised); ring_size = RING_SIZE(&info->ring); switch (info->connected) { case BLKIF_STATE_DISCONNECTED: shadow_init(info->shadow, ring_size); break; case BLKIF_STATE_SUSPENDED: err = blkif_recover(info); if (err) goto out; break; } #if 0 /*realloc_page ring_pages_copy for next hot-migrate after disk has resumed*/ for (nr = 0; nr < info->ring_size; nr++) { info->ring_pages_copy[nr] = alloc_page(GFP_NOIO | __GFP_HIGH); if (!info->ring_pages_copy[nr]) { printk(KERN_ERR "failed to alloc_page ring_pages_copy!\n"); err = -ENOMEM; goto destroy_blkring; } } #endif pr_info("blkfront: %s: ring-pages=%u nr_ents=%u\n", dev->nodename, info->ring_size, ring_size); return 0; abort_transaction: xenbus_transaction_end(xbt, 1); if (what) xenbus_dev_fatal(dev, err, "writing %s", what); destroy_blkring: blkif_free(info, 0); out: return err; } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffer for communication with the backend, and * inform the backend of the appropriate details for those. Switch to * Initialised state. */ static int blkfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err, vdevice; struct blkfront_info *info; enum xenbus_state backend_state; /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device", "%i", &vdevice); if (err != 1) { /* go looking in the extended area instead */ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", "%i", &vdevice); if (err != 1) { xenbus_dev_fatal(dev, err, "reading virtual-device"); return err; } } /* * Prevent hooking up IDE if ide-unplug not supported; * Never hook up SCSI devices on pv-on-hvm guest */ if (xen_hvm_domain()) { extern int xen_ide_unplug_unsupported; int major; int cfg = 1; char* type; int len; if (!VDEV_IS_EXTENDED(vdevice)) major = BLKIF_MAJOR(vdevice); else major = XENVBD_MAJOR; switch(major) { case IDE0_MAJOR: case IDE1_MAJOR: case IDE2_MAJOR: case IDE3_MAJOR: case IDE4_MAJOR: case IDE5_MAJOR: case IDE6_MAJOR: case IDE7_MAJOR: case IDE8_MAJOR: case IDE9_MAJOR: if (xen_ide_unplug_unsupported) cfg = 0; break; case SCSI_DISK0_MAJOR: case SCSI_DISK1_MAJOR: case SCSI_DISK2_MAJOR: case SCSI_DISK3_MAJOR: case SCSI_DISK4_MAJOR: case SCSI_DISK5_MAJOR: case SCSI_DISK6_MAJOR: case SCSI_DISK7_MAJOR: case SCSI_DISK8_MAJOR: case SCSI_DISK9_MAJOR: case SCSI_DISK10_MAJOR: case SCSI_DISK11_MAJOR: case SCSI_DISK12_MAJOR: case SCSI_DISK13_MAJOR: case SCSI_DISK14_MAJOR: case SCSI_DISK15_MAJOR: cfg = 0; break; } if (cfg == 0) { printk(KERN_INFO "%s: HVM does not support vbd %d as xen block device\n", __FUNCTION__, vdevice); return -ENODEV; } /* do not create a PV cdrom device if we are an HVM guest */ type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len); if (IS_ERR(type)) return -ENODEV; if (strncmp(type, "cdrom", 5) == 0) { kfree(type); return -ENODEV; } kfree(type); } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); return -ENOMEM; } mutex_init(&info->mutex); info->xbdev = dev; info->vdevice = vdevice; INIT_LIST_HEAD(&info->grants); INIT_LIST_HEAD(&info->indirect_pages); /* for persistent grant */ info->persistent_gnts_c = 0; info->connected = BLKIF_STATE_DISCONNECTED; //add by chijc info->blkif_detach = 0; info->users = 0; INIT_WORK(&info->work, blkif_restart_queue); /* Front end dir is a number, which is used as the id. */ info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); backend_state = xenbus_read_driver_state(dev->otherend); /* * XenbusStateInitWait would be the correct state to enter here, * but (at least) blkback considers this a fatal error. */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) return 0; err = talk_to_blkback(dev, info); if (err) { kfree(info); dev_set_drvdata(&dev->dev, NULL); return err; } return 0; } /* * This is a clone of md_trim_bio, used to split a bio into smaller ones */ static void trim_bio(struct bio *bio, int offset, int size) { /* 'bio' is a cloned bio which we need to trim to match * the given offset and size. * This requires adjusting bi_sector, bi_size, and bi_io_vec */ int i; struct bio_vec *bvec; int sofar = 0; size <<= 9; if (offset == 0 && size == bio->bi_size) return; bio->bi_sector += offset; bio->bi_size = size; offset <<= 9; clear_bit(BIO_SEG_VALID, &bio->bi_flags); while (bio->bi_idx < bio->bi_vcnt && bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { /* remove this whole bio_vec */ offset -= bio->bi_io_vec[bio->bi_idx].bv_len; bio->bi_idx++; } if (bio->bi_idx < bio->bi_vcnt) { bio->bi_io_vec[bio->bi_idx].bv_offset += offset; bio->bi_io_vec[bio->bi_idx].bv_len -= offset; } /* avoid any complications with bi_idx being non-zero*/ if (bio->bi_idx) { memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); bio->bi_vcnt -= bio->bi_idx; bio->bi_idx = 0; } /* Make sure vcnt and last bv are not too big */ bio_for_each_segment(bvec, bio, i) { if (sofar + bvec->bv_len > size) bvec->bv_len = size - sofar; if (bvec->bv_len == 0) { bio->bi_vcnt = i; break; } sofar += bvec->bv_len; } } static void split_bio_end(struct bio *bio, int error) { struct split_bio *split_bio = bio->bi_private; if (error) split_bio->err = error; if (atomic_dec_and_test(&split_bio->pending)) { split_bio->bio->bi_phys_segments = 0; bio_endio(split_bio->bio, split_bio->err); kfree(split_bio); } bio_put(bio); } static int blkif_recover(struct blkfront_info *info) { int i; struct request *req, *n; int rc; struct bio *bio, *cloned_bio; struct bio_list bio_list, merge_bio; unsigned int segs, offset; int pending, size; struct split_bio *split_bio; struct list_head requests; memcpy(info->shadow_copy, info->shadow, sizeof(info->shadow)); /* Stage 2: Set up free list. */ //memset(&info->shadow, 0, sizeof(info->shadow)); for(i = 0;i < RING_SIZE(&info->ring);i++) { memset(&info->shadow[i].req,0,sizeof(struct blkif_request)); info->shadow[i].request = NULL; } shadow_init(info->shadow, RING_SIZE(&info->ring)); info->shadow_free = info->ring.req_prod_pvt; rc = blkfront_setup_indirect(info); if (rc) { return rc; } segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; #if defined(ORACLE) blk_queue_max_phys_segments(info->rq, segs); blk_queue_max_hw_segments(info->rq, segs); #else blk_queue_max_segments(info->rq, segs); #endif bio_list_init(&bio_list); INIT_LIST_HEAD(&requests); /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < RING_SIZE(&info->ring); i++) { /* Not in use? */ if (!info->shadow_copy[i].request) continue; merge_bio.head = info->shadow_copy[i].request->bio; merge_bio.tail = info->shadow_copy[i].request->biotail; bio_list_merge(&bio_list, &merge_bio); info->shadow_copy[i].request->bio = NULL; blk_end_request_all(info->shadow_copy[i].request, 0); } /* * Empty the queue, this is important because we might have * requests in the queue with more segments than what we * can handle now. */ spin_lock_irq(&blkif_io_lock); while ((req = blk_fetch_request(info->rq)) != NULL) { merge_bio.head = req->bio; merge_bio.tail = req->biotail; bio_list_merge(&bio_list, &merge_bio); req->bio = NULL; #ifndef ORACLE if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) pr_alert("diskcache flush request found!\n"); #endif //__blk_put_request(info->rq, req); __blk_end_request_all(req, 0); } spin_unlock_irq(&blkif_io_lock); xenbus_switch_state(info->xbdev, XenbusStateConnected); spin_lock_irq(&blkif_io_lock); /* Now safe for us to use the shared ring */ info->connected = BLKIF_STATE_CONNECTED; /* Send off requeued requests */ //flush_requests(info); /* Kick any other new requests queued since we resumed */ kick_pending_request_queues(info); list_for_each_entry_safe(req, n, &requests, queuelist) { /* Requeue pending requests (flush or discard) */ list_del_init(&req->queuelist); BUG_ON(req->nr_phys_segments > segs); blk_requeue_request(info->rq, req); } spin_unlock_irq(&blkif_io_lock); while ((bio = bio_list_pop(&bio_list)) != NULL) { /* Traverse the list of pending bios and re-queue them */ if (bio_segments(bio) > segs) { /* * This bio has more segments than what we can * handle, we have to split it. */ pending = (bio_segments(bio) + segs - 1) / segs; split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO); BUG_ON(split_bio == NULL); atomic_set(&split_bio->pending, pending); split_bio->bio = bio; for (i = 0; i < pending; i++) { offset = (i * segs * PAGE_SIZE) >> 9; size = min((unsigned int)(segs * PAGE_SIZE) >> 9, (unsigned int)(bio->bi_size >> 9) - offset); cloned_bio = bio_clone(bio, GFP_NOIO); BUG_ON(cloned_bio == NULL); trim_bio(cloned_bio, offset, size); cloned_bio->bi_private = split_bio; cloned_bio->bi_end_io = split_bio_end; submit_bio(cloned_bio->bi_rw, cloned_bio); } /* * Now we have to wait for all those smaller bios to * end, so we can also end the "parent" bio. */ continue; } /* We don't need to split this bio */ submit_bio(bio->bi_rw, bio); } return 0; } #define SWAP_MODIFY 1 #define SWAP_RESTORE 2 static void modify_swappiness(int mode) { int ret = 0; static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char *modify_argv[] = { "/bin/sh", "/etc/.uvp-monitor/modify_swappiness.sh", "modify", NULL }; static char *restore_argv[] = { "/bin/sh", "/etc/.uvp-monitor/modify_swappiness.sh", "restore", NULL }; if(mode == SWAP_MODIFY) { ret = call_usermodehelper("/bin/sh", modify_argv, envp, UMH_WAIT_PROC); if (ret < 0) { printk(KERN_ERR "Failed to modify_swappiness in SWAP_MODIFY mode,ret(%d).\n",ret); return; } } else if(mode == SWAP_RESTORE) { ret = call_usermodehelper("/bin/sh", restore_argv, envp, UMH_WAIT_PROC); if (ret < 0) { printk(KERN_ERR "Failed to modify_swappiness in SWAP_RESTORE mode,ret(%d).\n",ret); return; } } return; } static int blkfront_suspend(struct xenbus_device *dev, pm_message_t state) { modify_swappiness(SWAP_MODIFY); return 0; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our blkif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); int err = 0; enum xenbus_state backend_state; int i; for (i=0; iring_size; i++) { if (gnttab_query_foreign_access(info->ring_refs[i])) { printk(KERN_ERR "gnttab_query_foreign_access is true,blkfront_resume is break!\n"); goto restore_swap; } } dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); backend_state = xenbus_read_driver_state(dev->otherend); /* See respective comment in blkfront_probe(). */ xenbus_switch_state(dev, XenbusStateInitialising); if (backend_state != XenbusStateInitWait) goto restore_swap; err = talk_to_blkback(dev, info); restore_swap: modify_swappiness(SWAP_RESTORE); /* * We have to wait for the backend to switch to * connected state, since we want to read which * features it supports. */ return err; } static void blkfront_closing(struct blkfront_info *info) { struct xenbus_device *xbdev = info->xbdev; struct block_device *bdev = NULL; int len; char *devname; char *bestate; int iTimeout = 0; devname = xenbus_read(XBT_NIL, xbdev->otherend, "dev", &len); if (IS_ERR(devname)) { printk(KERN_ERR "read %s xenstore error!\n", xbdev->otherend); } else{ xenbus_write(XBT_NIL, "control/uvp", "unplug-disk", devname); kfree(devname); } while(info && info->users != 0) { printk(KERN_INFO "info->users == %d wait %d!\n", info->users, iTimeout); if(iTimeout > UNPLUG_DISK_TMOUT) break; ssleep(1); iTimeout++; } bestate = xenbus_read(XBT_NIL, xbdev->otherend, "state", &len); if (IS_ERR(bestate)){ printk(KERN_ERR "read %s state error!\n", xbdev->otherend); } else{ if(strncmp(bestate, "5", 1) || iTimeout > UNPLUG_DISK_TMOUT) { info->blkif_detach = 0; kfree(bestate); return; } kfree(bestate); } mutex_lock(&info->mutex); if (xbdev->state == XenbusStateClosing) { mutex_unlock(&info->mutex); return; } if (info->gd) bdev = bdget_disk(info->gd, 0); mutex_unlock(&info->mutex); if (!bdev) { xenbus_frontend_closed(xbdev); return; } mutex_lock(&bdev->bd_mutex); if (bdev->bd_openers) { xenbus_dev_error(xbdev, -EBUSY, "Device in use; refusing to close"); } else { xlvbd_release_gendisk(info); xenbus_frontend_closed(xbdev); } mutex_unlock(&bdev->bd_mutex); bdput(bdev); } static int blkfront_setup_indirect(struct blkfront_info *info) { unsigned int indirect_segments, segs; int err, i; struct grant *persistent_gnt; struct grant *n; unsigned int grants_nums = 0; //for debug unsigned int cmp_grants_nums = 0; unsigned int cmp_indirect_pages_nums = 0; info->max_indirect_segments = 0; segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-max-indirect-segments", "%u", &indirect_segments, NULL); if (!err && indirect_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) { info->max_indirect_segments = min(indirect_segments, xen_blkif_max_segments); segs = info->max_indirect_segments; } printk("[%s:%d], segs %d\n", __func__, __LINE__, segs); grants_nums = (segs + INDIRECT_GREFS(segs)) * RING_SIZE(&info->ring); if(info->connected == BLKIF_STATE_SUSPENDED) { printk(KERN_INFO "granted_page(persistent_feature) need not been realloc_page in suspend mode!\n"); if (info->feature_persistent) { if (!list_empty(&info->grants)) { list_for_each_entry_safe(persistent_gnt, n, &info->grants, node) { persistent_gnt->gref = GRANT_INVALID_REF; cmp_grants_nums++; } printk(KERN_INFO "grants_nums should be %d,cmp_grants_nums is %d in suspend mode when persistent_feature is true!\n", grants_nums, cmp_grants_nums); } } else { if (!list_empty(&info->grants)) { list_for_each_entry_safe(persistent_gnt, n, &info->grants, node) { persistent_gnt->gref = GRANT_INVALID_REF; persistent_gnt->pfn = 0; cmp_grants_nums++; } printk(KERN_INFO "grants_nums should be %d,cmp_grants_nums is %d in suspend mode when persistent_feature is false!\n", grants_nums, cmp_grants_nums); } } } else { err = fill_grant_buffer(info, grants_nums); if (err) goto out_of_memory; } if (!info->feature_persistent && info->max_indirect_segments) { /* * We are using indirect descriptors but not persistent * grants, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int indirect_pages_nums = INDIRECT_GREFS(segs) * RING_SIZE(&info->ring); if(info->connected == BLKIF_STATE_SUSPENDED) { printk(KERN_INFO "indirect_page need not been realloc_page in suspend mode!\n"); if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; BUG_ON(info->feature_persistent); list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { cmp_indirect_pages_nums++; } printk(KERN_INFO "indirect_pages_nums should be %d,cmp_indirect_pages_nums is %d in suspend mode!\n", indirect_pages_nums, cmp_indirect_pages_nums); } } else { BUG_ON(!list_empty(&info->indirect_pages)); for (i = 0; i < indirect_pages_nums; i++) { struct page *indirect_page = alloc_page(GFP_NOIO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &info->indirect_pages); } } } if(info->connected == BLKIF_STATE_SUSPENDED) { printk(KERN_INFO "grants_used,sg and indirect_grants need not been rekzalloc in suspend mode!\n"); for (i = 0; i < RING_SIZE(&info->ring); i++) { memset(info->shadow[i].grants_used, 0, sizeof(info->shadow[i].grants_used[0]) * segs); memset(info->shadow[i].sg, 0, sizeof(info->shadow[i].sg[0]) * segs); if (info->max_indirect_segments) memset(info->shadow[i].indirect_grants, 0, sizeof(info->shadow[i].indirect_grants[0]) * INDIRECT_GREFS(segs)); /* initialise every shadow's sg */ sg_init_table(info->shadow[i].sg, segs); } } else { for (i = 0; i < RING_SIZE(&info->ring); i++) { info->shadow[i].grants_used = kzalloc( sizeof(info->shadow[i].grants_used[0]) * segs, GFP_NOIO); /* malloc space for every shadow's sg */ info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO); if (info->max_indirect_segments) info->shadow[i].indirect_grants = kzalloc( sizeof(info->shadow[i].indirect_grants[0]) * INDIRECT_GREFS(segs), GFP_NOIO); if ((info->shadow[i].grants_used == NULL) || (info->shadow[i].sg == NULL) || (info->max_indirect_segments && (info->shadow[i].indirect_grants == NULL))) goto out_of_memory; /* initialise every shadow's sg */ sg_init_table(info->shadow[i].sg, segs); } } return 0; out_of_memory: for (i = 0; i < RING_SIZE(&info->ring); i++) { kfree(info->shadow[i].grants_used); info->shadow[i].grants_used = NULL; /* free every shadow's sg */ kfree(info->shadow[i].sg); info->shadow[i].sg = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; } if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); } } return -ENOMEM; } /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). */ static void blkfront_connect(struct blkfront_info *info) { unsigned long long sectors; unsigned long sector_size; unsigned int binfo; int err; int barrier; dev_t devt; /* for persistent grant */ int persistent; switch (info->connected) { case BLKIF_STATE_CONNECTED: /* * Potentially, the back-end may be signalling * a capacity change; update the capacity. */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "sectors", "%Lu", §ors); if (XENBUS_EXIST_ERR(err)) return; devt = disk_devt(info->gd); printk(KERN_INFO "Changing capacity of (%u, %u) to %Lu " "sectors\n", (unsigned)MAJOR(devt), (unsigned)MINOR(devt), sectors); set_capacity(info->gd, sectors); revalidate_disk(info->gd); return; case BLKIF_STATE_SUSPENDED: /* * If we are recovering from suspension, we need to wait * for the backend to announce it's features before * reconnecting, at least we need to know if the backend * supports indirect descriptors, and how many. */ blkif_recover(info); return; default: /* keep gcc quiet; ISO C99 6.8.4.2p5, 6.8.3p6 */ ; } dev_dbg(&info->xbdev->dev, "%s:%s.\n", __func__, info->xbdev->otherend); err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "sectors", "%llu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(info->xbdev, err, "reading backend fields at %s", info->xbdev->otherend); return; } err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-barrier", "%d", &barrier, NULL); /* * If there's no "feature-barrier" defined, then it means * we're dealing with a very old backend which writes * synchronously; nothing to do. * * If there are barriers, then we use flush. */ info->feature_flush = 0; /* * The driver doesn't properly handled empty flushes, so * lets disable barrier support for now. */ #if 0 if (!err && barrier) info->feature_flush = REQ_FLUSH; #endif err = xenbus_gather(XBT_NIL, info->xbdev->otherend, "feature-persistent", "%u", &persistent, NULL); if (err) info->feature_persistent = 0; else info->feature_persistent = persistent; err = blkfront_setup_indirect(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); return; } err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); return; } xenbus_switch_state(info->xbdev, XenbusStateConnected); /* Kick pending requests. */ spin_lock_irq(&blkif_io_lock); info->connected = BLKIF_STATE_CONNECTED; kick_pending_request_queues(info); spin_unlock_irq(&blkif_io_lock); add_disk(info->gd); info->is_ready = 1; } /** * Callback received when the backend's state changes. */ static void blkback_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); printk("blkfront:blkback_changed to state %d.\n", backend_state); dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (talk_to_blkback(dev, info)) { dev_set_drvdata(&dev->dev, NULL); kfree(info); } break; case XenbusStateConnected: blkfront_connect(info); break; case XenbusStateClosing: kthread_run(blkfront_closing, info, "xenwatch_unplugdisk"); break; } } static int blkfront_remove(struct xenbus_device *xbdev) { struct blkfront_info *info = dev_get_drvdata(&xbdev->dev); struct block_device *bdev = NULL; struct gendisk *disk; dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); blkif_free(info, 0); mutex_lock(&info->mutex); disk = info->gd; if (disk) bdev = bdget_disk(disk, 0); info->xbdev = NULL; mutex_unlock(&info->mutex); if (!bdev) { if (!info->users){ kfree(info); } return 0; } /* * The xbdev was removed before we reached the Closed * state. See if it's safe to remove the disk. If the bdev * isn't closed yet, we let release take care of it. */ mutex_lock(&bdev->bd_mutex); info = disk->private_data; if (info && !bdev->bd_openers) { xlvbd_release_gendisk(info); disk->private_data = NULL; kfree(info); } mutex_unlock(&bdev->bd_mutex); bdput(bdev); return 0; } static int blkfront_is_ready(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); return info->is_ready && info->xbdev; } static int blkif_open(struct block_device *bdev, fmode_t mode) { struct gendisk *disk = bdev->bd_disk; struct blkfront_info *info; int err = 0; info = disk->private_data; if (!info) { /* xbdev gone */ err = -ERESTARTSYS; goto out; } mutex_lock(&info->mutex); if (!info->gd) /* xbdev is closed */ err = -ERESTARTSYS; else info->users++; mutex_unlock(&info->mutex); out: return err; } static int blkif_release(struct gendisk *disk, fmode_t mode) { struct blkfront_info *info = disk->private_data; struct block_device *bdev; struct xenbus_device *xbdev; bdev = bdget_disk(disk, 0); if ( info ){ info->users--; } else { goto out; } /* * Check if we have been instructed to close. We will have * deferred this request, because the bdev was still open. */ mutex_lock(&info->mutex); xbdev = info->xbdev; if (xbdev && xbdev->state && xbdev->state == XenbusStateClosing) { /* pending switch to state closed */ xlvbd_release_gendisk(info); xenbus_frontend_closed(info->xbdev); } mutex_unlock(&info->mutex); if (!xbdev) { /* sudden device removal */ xlvbd_release_gendisk(info); disk->private_data = NULL; if (!info->users){ kfree(info); } } out: bdput(bdev); return 0; } static const struct block_device_operations xlvbd_block_fops = { .owner = THIS_MODULE, .open = blkif_open, .release = blkif_release, .getgeo = blkif_getgeo, .locked_ioctl = blkif_ioctl, }; static struct xenbus_device_id blkfront_ids[] = { { "vbd" }, { "" } }; static struct xenbus_driver blkfront = { .name = "vbd", .owner = THIS_MODULE, .ids = blkfront_ids, .probe = blkfront_probe, .remove = blkfront_remove, .suspend = blkfront_suspend, .resume = blkfront_resume, .otherend_changed = blkback_changed, .is_ready = blkfront_is_ready, }; static int __init xlblk_init(void) { int ret; if (!xen_domain()) return -ENODEV; if (xen_hvm_domain() && !xen_platform_pci_unplug) return -ENODEV; printk("%s: register_blkdev major: %d \n", __FUNCTION__, XENVBD_MAJOR); if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", XENVBD_MAJOR, DEV_NAME); return -ENODEV; } if (sda_is_xvda) { emulated_sd_disk_minor_offset = 0; emulated_sd_disk_name_offset = emulated_sd_disk_minor_offset / 256; } ret = xenbus_register_frontend(&blkfront); if (ret) { unregister_blkdev(XENVBD_MAJOR, DEV_NAME); return ret; } return 0; } module_init(xlblk_init); static void __exit xlblk_exit(void) { return xenbus_unregister_driver(&blkfront); } module_exit(xlblk_exit); MODULE_DESCRIPTION("Xen virtual block device frontend"); MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR); MODULE_ALIAS("xen:vbd"); MODULE_ALIAS("xenblk"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-vnif/000077500000000000000000000000001314037446600252725ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-vnif/Makefile000066400000000000000000000001211314037446600267240ustar00rootroot00000000000000include $(M)/config.mk obj-m = xen-netfront.o # xen-vnif-objs := xen-netfront.o UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-vnif/debian8/000077500000000000000000000000001314037446600266045ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-vnif/debian8/xen-netfront.c000066400000000000000000001706641314037446600314150ustar00rootroot00000000000000/* * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Module parameters */ static unsigned int xennet_max_queues; module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { int pull_to; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #define RX_COPY_THRESHOLD 256 #define GRANT_INVALID_REF 0 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) /* Queue name is interface name with "-qNNN" appended */ #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) /* IRQ name is queue name with "-tx" or "-rx" appended */ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) struct netfront_stats { u64 rx_packets; u64 tx_packets; u64 rx_bytes; u64 tx_bytes; struct u64_stats_sync syncp; }; struct netfront_info; struct netfront_queue { unsigned int id; /* Queue ID, 0-based */ char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ struct netfront_info *info; struct napi_struct napi; /* Split event channels support, tx_* == rx_* when using * single event channel. */ unsigned int tx_evtchn, rx_evtchn; unsigned int tx_irq, rx_irq; /* Only used when split event channels support is enabled */ char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ spinlock_t tx_lock; struct xen_netif_tx_front_ring tx; int tx_ring_ref; /* * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries * are linked from tx_skb_freelist through skb_entry.link. * * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ union skb_entry { struct sk_buff *skb; unsigned long link; } tx_skbs[NET_TX_RING_SIZE]; grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; struct page *grant_tx_page[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; spinlock_t rx_lock ____cacheline_aligned_in_smp; struct xen_netif_rx_front_ring rx; int rx_ring_ref; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct xenbus_device *xbdev; /* Multi-queue support */ struct netfront_queue *queues; /* Statistics */ struct netfront_stats __percpu *stats; atomic_t rx_gso_checksum_fixup; }; struct netfront_rx_info { struct xen_netif_rx_response rx; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; static void skb_entry_set_link(union skb_entry *list, unsigned short id) { list->link = id; } static int skb_entry_is_link(const union skb_entry *list) { BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); return (unsigned long)list->skb < PAGE_OFFSET; } /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static void add_id_to_freelist(unsigned *head, union skb_entry *list, unsigned short id) { skb_entry_set_link(&list[id], *head); *head = id; } static unsigned short get_id_from_freelist(unsigned *head, union skb_entry *list) { unsigned int id = *head; *head = list[id].link; return id; } static int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = queue->rx_skbs[i]; queue->rx_skbs[i] = NULL; return skb; } static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = queue->grant_rx_ref[i]; queue->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while (0) #endif static bool xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } static void rx_refill_timeout(unsigned long data) { struct netfront_queue *queue = (struct netfront_queue *)data; napi_schedule(&queue->napi); } static int netfront_tx_slot_available(struct netfront_queue *queue) { return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2); } static void xennet_maybe_wake_tx(struct netfront_queue *queue) { struct net_device *dev = queue->info->netdev; struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); if (unlikely(netif_tx_queue_stopped(dev_queue)) && netfront_tx_slot_available(queue) && likely(netif_running(dev))) netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); } static void xennet_alloc_rx_buffers(struct netfront_queue *queue) { unsigned short id; struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = queue->rx.req_prod_pvt; grant_ref_t ref; unsigned long pfn; void *vaddr; struct xen_netif_rx_request *req; if (unlikely(!netif_carrier_ok(queue->info->netdev))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons); for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) { skb = __netdev_alloc_skb(queue->info->netdev, RX_COPY_THRESHOLD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; /* Align ip header to a 16 bytes boundary */ skb_reserve(skb, NET_IP_ALIGN); page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); __skb_queue_tail(&queue->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (queue->rx_target/2)) { if (req_prod > queue->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) && ((queue->rx_target *= 2) > queue->rx_max_target)) queue->rx_target = queue->rx_max_target; refill: for (i = 0; ; i++) { skb = __skb_dequeue(&queue->rx_batch); if (skb == NULL) break; skb->dev = queue->info->netdev; id = xennet_rxidx(req_prod + i); BUG_ON(queue->rx_skbs[id]); queue->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&queue->gref_rx_head); BUG_ON((signed short)ref < 0); queue->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); req = RING_GET_REQUEST(&queue->rx, req_prod + i); gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, pfn_to_mfn(pfn), 0); req->id = id; req->gref = ref; } wmb(); /* barrier so backend seens requests */ /* Above is a suitable barrier to ensure backend will see requests. */ queue->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); if (notify) notify_remote_via_irq(queue->rx_irq); } static int xennet_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); unsigned int num_queues = dev->real_num_tx_queues; unsigned int i = 0; struct netfront_queue *queue = NULL; for (i = 0; i < num_queues; ++i) { queue = &np->queues[i]; napi_enable(&queue->napi); spin_lock_bh(&queue->rx_lock); if (netif_carrier_ok(dev)) { xennet_alloc_rx_buffers(queue); queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) napi_schedule(&queue->napi); } spin_unlock_bh(&queue->rx_lock); } netif_tx_start_all_queues(dev); return 0; } static void xennet_tx_buf_gc(struct netfront_queue *queue) { RING_IDX cons, prod; unsigned short id; struct sk_buff *skb; BUG_ON(!netif_carrier_ok(queue->info->netdev)); do { prod = queue->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = queue->tx.rsp_cons; cons != prod; cons++) { struct xen_netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&queue->tx, cons); if (txrsp->status == XEN_NETIF_RSP_NULL) continue; id = txrsp->id; skb = queue->tx_skbs[id].skb; if (unlikely(gnttab_query_foreign_access( queue->grant_tx_ref[id]) != 0)) { pr_alert("%s: warning -- grant still in use by backend domain\n", __func__); BUG(); } gnttab_end_foreign_access_ref( queue->grant_tx_ref[id], GNTMAP_readonly); gnttab_release_grant_reference( &queue->gref_tx_head, queue->grant_tx_ref[id]); queue->grant_tx_ref[id] = GRANT_INVALID_REF; queue->grant_tx_page[id] = NULL; add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); dev_kfree_skb_irq(skb); } queue->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ queue->tx.sring->rsp_event = prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1; mb(); /* update shared area */ } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod)); xennet_maybe_wake_tx(queue); } static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue, struct xen_netif_tx_request *tx) { char *data = skb->data; unsigned long mfn; RING_IDX prod = queue->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; /* While the header overlaps a page boundary (including being larger than a page), split it it into page-sized chunks. */ while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= XEN_NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); queue->tx_skbs[id].skb = skb_get(skb); tx = RING_GET_REQUEST(&queue->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&queue->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly); queue->grant_tx_page[id] = virt_to_page(data); tx->gref = queue->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } /* Grant backend access to each skb fragment page. */ for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; struct page *page = skb_frag_page(frag); len = skb_frag_size(frag); offset = frag->page_offset; /* Data must not cross a page boundary. */ BUG_ON(len + offset > PAGE_SIZE<> PAGE_SHIFT; offset &= ~PAGE_MASK; while (len > 0) { unsigned long bytes; BUG_ON(offset >= PAGE_SIZE); bytes = PAGE_SIZE - offset; if (bytes > len) bytes = len; tx->flags |= XEN_NETTXF_more_data; id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); queue->tx_skbs[id].skb = skb_get(skb); tx = RING_GET_REQUEST(&queue->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&queue->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(page)); gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly); queue->grant_tx_page[id] = page; tx->gref = queue->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = bytes; tx->flags = 0; offset += bytes; len -= bytes; /* Next frame */ if (offset == PAGE_SIZE && len) { BUG_ON(!PageCompound(page)); page++; offset = 0; } } } queue->tx.req_prod_pvt = prod; } /* * Count how many ring slots are required to send the frags of this * skb. Each frag might be a compound page. */ static int xennet_count_skb_frag_slots(struct sk_buff *skb) { int i, frags = skb_shinfo(skb)->nr_frags; int pages = 0; for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; unsigned long size = skb_frag_size(frag); unsigned long offset = frag->page_offset; /* Skip unused frames from start of page */ offset &= ~PAGE_MASK; pages += PFN_UP(offset + size); } return pages; } static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { unsigned int num_queues = dev->real_num_tx_queues; u32 hash; u16 queue_idx; /* First, check if there is only one queue */ if (num_queues == 1) { queue_idx = 0; } else { hash = skb_get_hash(skb); queue_idx = hash % num_queues; } return queue_idx; } static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netfront_stats *stats = this_cpu_ptr(np->stats); struct xen_netif_tx_request *tx; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int slots; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned long flags; struct netfront_queue *queue = NULL; unsigned int num_queues = dev->real_num_tx_queues; u16 queue_index; /* Drop the packet if no queues are set up */ if (num_queues < 1) goto drop; /* Determine which queue to transmit this SKB on */ queue_index = skb_get_queue_mapping(skb); queue = &np->queues[queue_index]; /* If skb->len is too big for wire format, drop skb and alert * user about misconfiguration. */ if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { net_alert_ratelimited( "xennet: skb->len = %u, too big for wire format\n", skb->len); goto drop; } slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + xennet_count_skb_frag_slots(skb); if (unlikely(slots > MAX_SKB_FRAGS + 1)) { net_alert_ratelimited( "xennet: skb rides the rocket: %d slots\n", slots); goto drop; } spin_lock_irqsave(&queue->tx_lock, flags); if (unlikely(!netif_carrier_ok(dev) || (slots > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&queue->tx_lock, flags); goto drop; } i = queue->tx.req_prod_pvt; id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); queue->tx_skbs[id].skb = skb; tx = RING_GET_REQUEST(&queue->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&queue->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly); queue->grant_tx_page[id] = virt_to_page(data); tx->gref = queue->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ tx->flags |= XEN_NETTXF_data_validated; if (skb_shinfo(skb)->gso_size) { struct xen_netif_extra_info *gso; gso = (struct xen_netif_extra_info *) RING_GET_REQUEST(&queue->tx, ++i); tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? XEN_NETIF_GSO_TYPE_TCPV6 : XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; } queue->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, queue, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); if (notify) notify_remote_via_irq(queue->tx_irq); u64_stats_update_begin(&stats->syncp); stats->tx_bytes += skb->len; stats->tx_packets++; u64_stats_update_end(&stats->syncp); /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ xennet_tx_buf_gc(queue); if (!netfront_tx_slot_available(queue)) netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); spin_unlock_irqrestore(&queue->tx_lock, flags); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } static int xennet_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); unsigned int num_queues = dev->real_num_tx_queues; unsigned int i; struct netfront_queue *queue; netif_tx_stop_all_queues(np->netdev); for (i = 0; i < num_queues; ++i) { queue = &np->queues[i]; napi_disable(&queue->napi); } return 0; } static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(queue->rx.req_prod_pvt); BUG_ON(queue->rx_skbs[new]); queue->rx_skbs[new] = skb; queue->grant_rx_ref[new] = ref; RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; queue->rx.req_prod_pvt++; } static int xennet_get_extras(struct netfront_queue *queue, struct xen_netif_extra_info *extras, RING_IDX rp) { struct xen_netif_extra_info *extra; struct device *dev = &queue->info->netdev->dev; RING_IDX cons = queue->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) dev_warn(dev, "Missing extra info\n"); err = -EBADR; break; } extra = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&queue->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) dev_warn(dev, "Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(queue, cons); ref = xennet_get_rx_ref(queue, cons); xennet_move_rx_slot(queue, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); queue->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_queue *queue, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list) { struct xen_netif_rx_response *rx = &rinfo->rx; struct xen_netif_extra_info *extras = rinfo->extras; struct device *dev = &queue->info->netdev->dev; RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(queue, cons); grant_ref_t ref = xennet_get_rx_ref(queue, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int slots = 1; int err = 0; unsigned long ret; if (rx->flags & XEN_NETRXF_extra_info) { err = xennet_get_extras(queue, extras, rp); cons = queue->rx.rsp_cons; } for (;;) { if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) dev_warn(dev, "rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(queue, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backend. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) dev_warn(dev, "Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } ret = gnttab_end_foreign_access_ref(ref, 0); BUG_ON(!ret); gnttab_release_grant_reference(&queue->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & XEN_NETRXF_more_data)) break; if (cons + slots == rp) { if (net_ratelimit()) dev_warn(dev, "Need more slots\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&queue->rx, cons + slots); skb = xennet_get_rx_skb(queue, cons + slots); ref = xennet_get_rx_ref(queue, cons + slots); slots++; } if (unlikely(slots > max)) { if (net_ratelimit()) dev_warn(dev, "Too many slots\n"); err = -E2BIG; } if (unlikely(err)) queue->rx.rsp_cons = cons + slots; return err; } static int xennet_set_skb_gso(struct sk_buff *skb, struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) pr_warn("GSO size must not be zero\n"); return -EINVAL; } if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 && gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) { if (net_ratelimit()) pr_warn("Bad GSO type %d\n", gso->u.gso.type); return -EINVAL; } skb_shinfo(skb)->gso_size = gso->u.gso.size; skb_shinfo(skb)->gso_type = (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; return 0; } static RING_IDX xennet_fill_frags(struct netfront_queue *queue, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct xen_netif_rx_response *rx = RING_GET_RESPONSE(&queue->rx, ++cons); skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; if (shinfo->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; BUG_ON(pull_to <= skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), rx->offset, rx->status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); } return cons; } static int checksum_setup(struct net_device *dev, struct sk_buff *skb) { bool recalculate_partial_csum = false; /* * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy * peers can fail to set NETRXF_csum_blank when sending a GSO * frame. In this case force the SKB to CHECKSUM_PARTIAL and * recalculate the partial checksum. */ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { struct netfront_info *np = netdev_priv(dev); atomic_inc(&np->rx_gso_checksum_fixup); skb->ip_summed = CHECKSUM_PARTIAL; recalculate_partial_csum = true; } /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; return skb_checksum_setup(skb, recalculate_partial_csum); } static int handle_incoming_queue(struct netfront_queue *queue, struct sk_buff_head *rxq) { struct netfront_stats *stats = this_cpu_ptr(queue->info->stats); int packets_dropped = 0; struct sk_buff *skb; while ((skb = __skb_dequeue(rxq)) != NULL) { int pull_to = NETFRONT_SKB_CB(skb)->pull_to; if (pull_to > skb_headlen(skb)) __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, queue->info->netdev); skb_reset_network_header(skb); if (checksum_setup(queue->info->netdev, skb)) { kfree_skb(skb); packets_dropped++; queue->info->netdev->stats.rx_errors++; continue; } u64_stats_update_begin(&stats->syncp); stats->rx_packets++; stats->rx_bytes += skb->len; u64_stats_update_end(&stats->syncp); /* Pass it up. */ napi_gro_receive(&queue->napi, skb); } return packets_dropped; } static int xennet_poll(struct napi_struct *napi, int budget) { struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); struct net_device *dev = queue->info->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct xen_netif_rx_response *rx = &rinfo.rx; struct xen_netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; int work_done; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; int err; spin_lock(&queue->rx_lock); skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = queue->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = queue->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(queue, &rinfo, rp, &tmpq); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); dev->stats.rx_errors++; i = queue->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); queue->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->pull_to = rx->status; if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; skb_shinfo(skb)->frags[0].page_offset = rx->offset; skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); skb->data_len = rx->status; skb->len += rx->status; i = xennet_fill_frags(queue, skb, &tmpq); if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & XEN_NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; __skb_queue_tail(&rxq, skb); queue->rx.rsp_cons = ++i; work_done++; } __skb_queue_purge(&errq); work_done -= handle_incoming_queue(queue, &rxq); /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) > ((3*queue->rx_target) / 4)) && (--queue->rx_target < queue->rx_min_target)) queue->rx_target = queue->rx_min_target; xennet_alloc_rx_buffers(queue); if (work_done < budget) { int more_to_do = 0; napi_gro_flush(napi, false); local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); if (!more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&queue->rx_lock); return work_done; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct netfront_info *np = netdev_priv(dev); int cpu; for_each_possible_cpu(cpu) { struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu); u64 rx_packets, rx_bytes, tx_packets, tx_bytes; unsigned int start; do { start = u64_stats_fetch_begin_irq(&stats->syncp); rx_packets = stats->rx_packets; tx_packets = stats->tx_packets; rx_bytes = stats->rx_bytes; tx_bytes = stats->tx_bytes; } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; tot->rx_bytes += rx_bytes; tot->tx_bytes += tx_bytes; } tot->rx_errors = dev->stats.rx_errors; tot->tx_dropped = dev->stats.tx_dropped; return tot; } static void xennet_release_tx_bufs(struct netfront_queue *queue) { struct sk_buff *skb; int i; for (i = 0; i < NET_TX_RING_SIZE; i++) { /* Skip over entries which are actually freelist references */ if (skb_entry_is_link(&queue->tx_skbs[i])) continue; skb = queue->tx_skbs[i].skb; get_page(queue->grant_tx_page[i]); gnttab_end_foreign_access(queue->grant_tx_ref[i], GNTMAP_readonly, (unsigned long)page_address(queue->grant_tx_page[i])); queue->grant_tx_page[i] = NULL; queue->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void xennet_release_rx_bufs(struct netfront_queue *queue) { int id, ref; spin_lock_bh(&queue->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { struct sk_buff *skb; struct page *page; skb = queue->rx_skbs[id]; if (!skb) continue; ref = queue->grant_rx_ref[id]; if (ref == GRANT_INVALID_REF) continue; page = skb_frag_page(&skb_shinfo(skb)->frags[0]); /* gnttab_end_foreign_access() needs a page ref until * foreign access is ended (which may be deferred). */ get_page(page); gnttab_end_foreign_access(ref, 0, (unsigned long)page_address(page)); queue->grant_rx_ref[id] = GRANT_INVALID_REF; kfree_skb(skb); } spin_unlock_bh(&queue->rx_lock); } static void xennet_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); unsigned int num_queues = dev->real_num_tx_queues; struct netfront_queue *queue; unsigned int i; for (i = 0; i < num_queues; ++i) { queue = &np->queues[i]; xennet_release_tx_bufs(queue); xennet_release_rx_bufs(queue); gnttab_free_grant_references(queue->gref_tx_head); gnttab_free_grant_references(queue->gref_rx_head); } } static netdev_features_t xennet_fix_features(struct net_device *dev, netdev_features_t features) { struct netfront_info *np = netdev_priv(dev); int val; if (features & NETIF_F_SG) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_SG; } if (features & NETIF_F_IPV6_CSUM) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-ipv6-csum-offload", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_IPV6_CSUM; } if (features & NETIF_F_TSO) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_TSO; } if (features & NETIF_F_TSO6) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv6", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_TSO6; } return features; } static int xennet_set_features(struct net_device *dev, netdev_features_t features) { if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { netdev_info(dev, "Reducing MTU because no SG offload"); dev->mtu = ETH_DATA_LEN; } return 0; } static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) { struct netfront_queue *queue = dev_id; unsigned long flags; spin_lock_irqsave(&queue->tx_lock, flags); xennet_tx_buf_gc(queue); spin_unlock_irqrestore(&queue->tx_lock, flags); return IRQ_HANDLED; } static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) { struct netfront_queue *queue = dev_id; struct net_device *dev = queue->info->netdev; if (likely(netif_carrier_ok(dev) && RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) napi_schedule(&queue->napi); return IRQ_HANDLED; } static irqreturn_t xennet_interrupt(int irq, void *dev_id) { xennet_tx_interrupt(irq, dev_id); xennet_rx_interrupt(irq, dev_id); return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xennet_poll_controller(struct net_device *dev) { /* Poll each queue */ struct netfront_info *info = netdev_priv(dev); unsigned int num_queues = dev->real_num_tx_queues; unsigned int i; for (i = 0; i < num_queues; ++i) xennet_interrupt(0, &info->queues[i]); } #endif static const struct net_device_ops xennet_netdev_ops = { .ndo_open = xennet_open, .ndo_uninit = xennet_uninit, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats64 = xennet_get_stats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = xennet_fix_features, .ndo_set_features = xennet_set_features, .ndo_select_queue = xennet_select_queue, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xennet_poll_controller, #endif }; static struct net_device *xennet_create_dev(struct xenbus_device *dev) { int err; struct net_device *netdev; struct netfront_info *np; netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues); if (!netdev) return ERR_PTR(-ENOMEM); np = netdev_priv(netdev); np->xbdev = dev; /* No need to use rtnl_lock() before the call below as it * happens before register_netdev(). */ netif_set_real_num_tx_queues(netdev, 0); np->queues = NULL; err = -ENOMEM; np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); if (np->stats == NULL) goto exit; netdev->netdev_ops = &xennet_netdev_ops; netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_GSO_ROBUST; netdev->hw_features = NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; /* * Assume that all hw features are available for now. This set * will be adjusted by the call to netdev_update_features() in * xennet_connect() which is the earliest point where we can * negotiate with the backend regarding supported features. */ netdev->features |= netdev->hw_features; netdev->ethtool_ops = &xennet_ethtool_ops; SET_NETDEV_DEV(netdev, &dev->dev); netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); np->netdev = netdev; netif_carrier_off(netdev); return netdev; exit: free_netdev(netdev); return ERR_PTR(err); } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; netdev = xennet_create_dev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = register_netdev(info->netdev); if (err) { pr_warn("%s: register_netdev err=%d\n", __func__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); pr_warn("%s: add sysfs failed err=%d\n", __func__, err); goto fail; } return 0; fail: free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } static void xennet_end_access(int ref, void *page) { /* This frees the page as a side-effect */ if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, 0, (unsigned long)page); } static void xennet_disconnect_backend(struct netfront_info *info) { unsigned int i = 0; unsigned int num_queues = info->netdev->real_num_tx_queues; netif_carrier_off(info->netdev); for (i = 0; i < num_queues; ++i) { struct netfront_queue *queue = &info->queues[i]; if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) unbind_from_irqhandler(queue->tx_irq, queue); if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { unbind_from_irqhandler(queue->tx_irq, queue); unbind_from_irqhandler(queue->rx_irq, queue); } queue->tx_evtchn = queue->rx_evtchn = 0; queue->tx_irq = queue->rx_irq = 0; if (netif_running(info->netdev)) napi_synchronize(&queue->napi); /* End access and free the pages */ xennet_end_access(queue->tx_ring_ref, queue->tx.sring); xennet_end_access(queue->rx_ring_ref, queue->rx.sring); queue->tx_ring_ref = GRANT_INVALID_REF; queue->rx_ring_ref = GRANT_INVALID_REF; queue->tx.sring = NULL; queue->rx.sring = NULL; } } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "%s\n", dev->nodename); xennet_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } static int setup_netfront_single(struct netfront_queue *queue) { int err; err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); if (err < 0) goto fail; err = bind_evtchn_to_irqhandler(queue->tx_evtchn, xennet_interrupt, 0, queue->info->netdev->name, queue); if (err < 0) goto bind_fail; queue->rx_evtchn = queue->tx_evtchn; queue->rx_irq = queue->tx_irq = err; return 0; bind_fail: xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); queue->tx_evtchn = 0; fail: return err; } static int setup_netfront_split(struct netfront_queue *queue) { int err; err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); if (err < 0) goto fail; err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); if (err < 0) goto alloc_rx_evtchn_fail; snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), "%s-tx", queue->name); err = bind_evtchn_to_irqhandler(queue->tx_evtchn, xennet_tx_interrupt, 0, queue->tx_irq_name, queue); if (err < 0) goto bind_tx_fail; queue->tx_irq = err; snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), "%s-rx", queue->name); err = bind_evtchn_to_irqhandler(queue->rx_evtchn, xennet_rx_interrupt, 0, queue->rx_irq_name, queue); if (err < 0) goto bind_rx_fail; queue->rx_irq = err; return 0; bind_rx_fail: unbind_from_irqhandler(queue->tx_irq, queue); queue->tx_irq = 0; bind_tx_fail: xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); queue->rx_evtchn = 0; alloc_rx_evtchn_fail: xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); queue->tx_evtchn = 0; fail: return err; } static int setup_netfront(struct xenbus_device *dev, struct netfront_queue *queue, unsigned int feature_split_evtchn) { struct xen_netif_tx_sring *txs; struct xen_netif_rx_sring *rxs; int err; queue->tx_ring_ref = GRANT_INVALID_REF; queue->rx_ring_ref = GRANT_INVALID_REF; queue->rx.sring = NULL; queue->tx.sring = NULL; txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) goto grant_tx_ring_fail; queue->tx_ring_ref = err; rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto alloc_rx_ring_fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) goto grant_rx_ring_fail; queue->rx_ring_ref = err; if (feature_split_evtchn) err = setup_netfront_split(queue); /* setup single event channel if * a) feature-split-event-channels == 0 * b) feature-split-event-channels == 1 but failed to setup */ if (!feature_split_evtchn || (feature_split_evtchn && err)) err = setup_netfront_single(queue); if (err) goto alloc_evtchn_fail; return 0; /* If we fail to setup netfront, it is safe to just revoke access to * granted pages because backend is not accessing it at this point. */ alloc_evtchn_fail: gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); grant_rx_ring_fail: free_page((unsigned long)rxs); alloc_rx_ring_fail: gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); grant_tx_ring_fail: free_page((unsigned long)txs); fail: return err; } /* Queue-specific initialisation * This used to be done in xennet_create_dev() but must now * be run per-queue. */ static int xennet_init_queue(struct netfront_queue *queue) { unsigned short i; int err = 0; spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->rx_lock); skb_queue_head_init(&queue->rx_batch); queue->rx_target = RX_DFL_MIN_TARGET; queue->rx_min_target = RX_DFL_MIN_TARGET; queue->rx_max_target = RX_MAX_TARGET; init_timer(&queue->rx_refill_timer); queue->rx_refill_timer.data = (unsigned long)queue; queue->rx_refill_timer.function = rx_refill_timeout; snprintf(queue->name, sizeof(queue->name), "%s-q%u", queue->info->netdev->name, queue->id); /* Initialise tx_skbs as a free chain containing every entry. */ queue->tx_skb_freelist = 0; for (i = 0; i < NET_TX_RING_SIZE; i++) { skb_entry_set_link(&queue->tx_skbs[i], i+1); queue->grant_tx_ref[i] = GRANT_INVALID_REF; queue->grant_tx_page[i] = NULL; } /* Clear out rx_skbs */ for (i = 0; i < NET_RX_RING_SIZE; i++) { queue->rx_skbs[i] = NULL; queue->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &queue->gref_tx_head) < 0) { pr_alert("can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &queue->gref_rx_head) < 0) { pr_alert("can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } return 0; exit_free_tx: gnttab_free_grant_references(queue->gref_tx_head); exit: return err; } static int write_queue_xenstore_keys(struct netfront_queue *queue, struct xenbus_transaction *xbt, int write_hierarchical) { /* Write the queue-specific keys into XenStore in the traditional * way for a single queue, or in a queue subkeys for multiple * queues. */ struct xenbus_device *dev = queue->info->xbdev; int err; const char *message; char *path; size_t pathsize; /* Choose the correct place to write the keys */ if (write_hierarchical) { pathsize = strlen(dev->nodename) + 10; path = kzalloc(pathsize, GFP_KERNEL); if (!path) { err = -ENOMEM; message = "out of memory while writing ring references"; goto error; } snprintf(path, pathsize, "%s/queue-%u", dev->nodename, queue->id); } else { path = (char *)dev->nodename; } /* Write ring references */ err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u", queue->tx_ring_ref); if (err) { message = "writing tx-ring-ref"; goto error; } err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u", queue->rx_ring_ref); if (err) { message = "writing rx-ring-ref"; goto error; } /* Write event channels; taking into account both shared * and split event channel scenarios. */ if (queue->tx_evtchn == queue->rx_evtchn) { /* Shared event channel */ err = xenbus_printf(*xbt, path, "event-channel", "%u", queue->tx_evtchn); if (err) { message = "writing event-channel"; goto error; } } else { /* Split event channels */ err = xenbus_printf(*xbt, path, "event-channel-tx", "%u", queue->tx_evtchn); if (err) { message = "writing event-channel-tx"; goto error; } err = xenbus_printf(*xbt, path, "event-channel-rx", "%u", queue->rx_evtchn); if (err) { message = "writing event-channel-rx"; goto error; } } if (write_hierarchical) kfree(path); return 0; error: if (write_hierarchical) kfree(path); xenbus_dev_fatal(dev, err, "%s", message); return err; } static void xennet_destroy_queues(struct netfront_info *info) { unsigned int i; rtnl_lock(); for (i = 0; i < info->netdev->real_num_tx_queues; i++) { struct netfront_queue *queue = &info->queues[i]; if (netif_running(info->netdev)) napi_disable(&queue->napi); netif_napi_del(&queue->napi); } rtnl_unlock(); kfree(info->queues); info->queues = NULL; } static int xennet_create_queues(struct netfront_info *info, unsigned int num_queues) { unsigned int i; int ret; info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL); if (!info->queues) return -ENOMEM; rtnl_lock(); for (i = 0; i < num_queues; i++) { struct netfront_queue *queue = &info->queues[i]; queue->id = i; queue->info = info; ret = xennet_init_queue(queue); if (ret < 0) { dev_warn(&info->netdev->dev, "only created %d queues\n", num_queues); num_queues = i; break; } netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64); if (netif_running(info->netdev)) napi_enable(&queue->napi); } netif_set_real_num_tx_queues(info->netdev, num_queues); rtnl_unlock(); if (num_queues == 0) { dev_err(&info->netdev->dev, "no queues\n"); return -EINVAL; } return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_netback(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; unsigned int feature_split_evtchn; unsigned int i = 0; unsigned int max_queues = 0; struct netfront_queue *queue = NULL; unsigned int num_queues = 1; info->netdev->irq = 0; /* Check if backend supports multiple queues */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "multi-queue-max-queues", "%u", &max_queues); if (err < 0) max_queues = 1; num_queues = min(max_queues, xennet_max_queues); /* Check feature-split-event-channels */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-split-event-channels", "%u", &feature_split_evtchn); if (err < 0) feature_split_evtchn = 0; /* Read mac addr. */ err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } if (info->queues) xennet_destroy_queues(info); err = xennet_create_queues(info, num_queues); if (err < 0) goto destroy_ring; /* Create shared ring, alloc event channel -- for each queue */ for (i = 0; i < num_queues; ++i) { queue = &info->queues[i]; err = setup_netfront(dev, queue, feature_split_evtchn); if (err) { /* setup_netfront() will tidy up the current * queue on error, but we need to clean up * those already allocated. */ if (i > 0) { rtnl_lock(); netif_set_real_num_tx_queues(info->netdev, i); rtnl_unlock(); goto destroy_ring; } else { goto out; } } } again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } if (num_queues == 1) { err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */ if (err) goto abort_transaction_no_dev_fatal; } else { /* Write the number of queues */ err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u", num_queues); if (err) { message = "writing multi-queue-num-queues"; goto abort_transaction_no_dev_fatal; } /* Write the keys for each queue */ for (i = 0; i < num_queues; ++i) { queue = &info->queues[i]; err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ if (err) goto abort_transaction_no_dev_fatal; } } /* The remaining keys are not queue-specific */ err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); if (err) { message = "writing feature-gso-tcpv6"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", "1"); if (err) { message = "writing feature-ipv6-csum-offload"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_dev_fatal(dev, err, "%s", message); abort_transaction_no_dev_fatal: xenbus_transaction_end(xbt, 1); destroy_ring: xennet_disconnect_backend(info); kfree(info->queues); info->queues = NULL; rtnl_lock(); netif_set_real_num_tx_queues(info->netdev, 0); rtnl_lock(); out: return err; } static int xennet_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); unsigned int num_queues = 0; int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; struct xen_netif_rx_request *req; unsigned int feature_rx_copy; unsigned int j = 0; struct netfront_queue *queue = NULL; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; if (!feature_rx_copy) { dev_info(&dev->dev, "backend does not support copying receive path\n"); return -ENODEV; } err = talk_to_netback(np->xbdev, np); if (err) return err; /* talk_to_netback() sets the correct number of queues */ num_queues = dev->real_num_tx_queues; rtnl_lock(); netdev_update_features(dev); rtnl_unlock(); /* By now, the queue structures have been set up */ for (j = 0; j < num_queues; ++j) { queue = &np->queues[j]; /* Step 1: Discard all pending TX packet fragments. */ spin_lock_irq(&queue->tx_lock); xennet_release_tx_bufs(queue); spin_unlock_irq(&queue->tx_lock); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ spin_lock_bh(&queue->rx_lock); for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { skb_frag_t *frag; const struct page *page; if (!queue->rx_skbs[i]) continue; skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i); ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i); req = RING_GET_REQUEST(&queue->rx, requeue_idx); frag = &skb_shinfo(skb)->frags[0]; page = skb_frag_page(frag); gnttab_grant_foreign_access_ref( ref, queue->info->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(page)), 0); req->gref = ref; req->id = requeue_idx; requeue_idx++; } queue->rx.req_prod_pvt = requeue_idx; spin_unlock_bh(&queue->rx_lock); } /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netif_carrier_on(np->netdev); for (j = 0; j < num_queues; ++j) { queue = &np->queues[j]; notify_remote_via_irq(queue->tx_irq); if (queue->tx_irq != queue->rx_irq) notify_remote_via_irq(queue->rx_irq); spin_lock_irq(&queue->tx_lock); xennet_tx_buf_gc(queue); spin_unlock_irq(&queue->tx_lock); spin_lock_bh(&queue->rx_lock); xennet_alloc_rx_buffers(queue); spin_unlock_bh(&queue->rx_lock); } return 0; } /** * Callback received when the backend's state changes. */ static void netback_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (xennet_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: netdev_notify_peers(netdev); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state -- fallthrough */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } static const struct xennet_stat { char name[ETH_GSTRING_LEN]; u16 offset; } xennet_stats[] = { { "rx_gso_checksum_fixup", offsetof(struct netfront_info, rx_gso_checksum_fixup) }, }; static int xennet_get_sset_count(struct net_device *dev, int string_set) { switch (string_set) { case ETH_SS_STATS: return ARRAY_SIZE(xennet_stats); default: return -EINVAL; } } static void xennet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data) { void *np = netdev_priv(dev); int i; for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset)); } static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) { int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, xennet_stats[i].name, ETH_GSTRING_LEN); break; } } static const struct ethtool_ops xennet_ethtool_ops = { .get_link = ethtool_op_get_link, .get_sset_count = xennet_get_sset_count, .get_ethtool_stats = xennet_get_ethtool_stats, .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); unsigned int num_queues = netdev->real_num_tx_queues; if (num_queues) return sprintf(buf, "%u\n", info->queues[0].rx_min_target); else return sprintf(buf, "%u\n", RX_MIN_TARGET); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); unsigned int num_queues = netdev->real_num_tx_queues; char *endp; unsigned long target; unsigned int i; struct netfront_queue *queue; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; for (i = 0; i < num_queues; ++i) { queue = &np->queues[i]; spin_lock_bh(&queue->rx_lock); if (target > queue->rx_max_target) queue->rx_max_target = target; queue->rx_min_target = target; if (target > queue->rx_target) queue->rx_target = target; xennet_alloc_rx_buffers(queue); spin_unlock_bh(&queue->rx_lock); } return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); unsigned int num_queues = netdev->real_num_tx_queues; if (num_queues) return sprintf(buf, "%u\n", info->queues[0].rx_max_target); else return sprintf(buf, "%u\n", RX_MAX_TARGET); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); unsigned int num_queues = netdev->real_num_tx_queues; char *endp; unsigned long target; unsigned int i = 0; struct netfront_queue *queue = NULL; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; for (i = 0; i < num_queues; ++i) { queue = &np->queues[i]; spin_lock_bh(&queue->rx_lock); if (target < queue->rx_min_target) queue->rx_min_target = target; queue->rx_max_target = target; if (target < queue->rx_target) queue->rx_target = target; xennet_alloc_rx_buffers(queue); spin_unlock_bh(&queue->rx_lock); } return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); unsigned int num_queues = netdev->real_num_tx_queues; if (num_queues) return sprintf(buf, "%u\n", info->queues[0].rx_target); else return sprintf(buf, "0\n"); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int err; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { err = device_create_file(&netdev->dev, &xennet_attrs[i]); if (err) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return err; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; static int xennet_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); unsigned int num_queues = info->netdev->real_num_tx_queues; struct netfront_queue *queue = NULL; unsigned int i = 0; dev_dbg(&dev->dev, "%s\n", dev->nodename); xennet_disconnect_backend(info); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); for (i = 0; i < num_queues; ++i) { queue = &info->queues[i]; del_timer_sync(&queue->rx_refill_timer); } if (num_queues) { kfree(info->queues); info->queues = NULL; } free_percpu(info->stats); free_netdev(info->netdev); return 0; } static DEFINE_XENBUS_DRIVER(netfront, , .probe = netfront_probe, .remove = xennet_remove, .resume = netfront_resume, .otherend_changed = netback_changed, ); static int __init netif_init(void) { if (!xen_domain()) return -ENODEV; if (!xen_has_pv_nic_devices()) return -ENODEV; pr_info("Initialising Xen virtual ethernet driver\n"); /* Allow as many queues as there are CPUs, by default */ xennet_max_queues = num_online_cpus(); return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { xenbus_unregister_driver(&netfront_driver); } module_exit(netif_exit); MODULE_DESCRIPTION("Xen virtual network device frontend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("xen:vif"); MODULE_ALIAS("xennet"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-vnif/fedora22/000077500000000000000000000000001314037446600266765ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-vnif/fedora22/xen-netfront.c000066400000000000000000001525171314037446600315040ustar00rootroot00000000000000/* * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Module parameters */ static unsigned int xennet_max_queues; module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { int pull_to; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #define RX_COPY_THRESHOLD 256 #define GRANT_INVALID_REF 0 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) /* Minimum number of Rx slots (includes slot for GSO metadata). */ #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) /* Queue name is interface name with "-qNNN" appended */ #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) /* IRQ name is queue name with "-tx" or "-rx" appended */ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) struct netfront_stats { u64 packets; u64 bytes; struct u64_stats_sync syncp; }; struct netfront_info; struct netfront_queue { unsigned int id; /* Queue ID, 0-based */ char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ struct netfront_info *info; struct napi_struct napi; /* Split event channels support, tx_* == rx_* when using * single event channel. */ unsigned int tx_evtchn, rx_evtchn; unsigned int tx_irq, rx_irq; /* Only used when split event channels support is enabled */ char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ spinlock_t tx_lock; struct xen_netif_tx_front_ring tx; int tx_ring_ref; /* * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries * are linked from tx_skb_freelist through skb_entry.link. * * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ union skb_entry { struct sk_buff *skb; unsigned long link; } tx_skbs[NET_TX_RING_SIZE]; grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; struct page *grant_tx_page[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; spinlock_t rx_lock ____cacheline_aligned_in_smp; struct xen_netif_rx_front_ring rx; int rx_ring_ref; struct timer_list rx_refill_timer; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct xenbus_device *xbdev; /* Multi-queue support */ struct netfront_queue *queues; /* Statistics */ struct netfront_stats __percpu *rx_stats; struct netfront_stats __percpu *tx_stats; atomic_t rx_gso_checksum_fixup; }; struct netfront_rx_info { struct xen_netif_rx_response rx; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; static void skb_entry_set_link(union skb_entry *list, unsigned short id) { list->link = id; } static int skb_entry_is_link(const union skb_entry *list) { BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); return (unsigned long)list->skb < PAGE_OFFSET; } /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static void add_id_to_freelist(unsigned *head, union skb_entry *list, unsigned short id) { skb_entry_set_link(&list[id], *head); *head = id; } static unsigned short get_id_from_freelist(unsigned *head, union skb_entry *list) { unsigned int id = *head; *head = list[id].link; return id; } static int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = queue->rx_skbs[i]; queue->rx_skbs[i] = NULL; return skb; } static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = queue->grant_rx_ref[i]; queue->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #ifdef CONFIG_SYSFS static const struct attribute_group xennet_dev_group; #endif static bool xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } static void rx_refill_timeout(unsigned long data) { struct netfront_queue *queue = (struct netfront_queue *)data; napi_schedule(&queue->napi); } static int netfront_tx_slot_available(struct netfront_queue *queue) { return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); } static void xennet_maybe_wake_tx(struct netfront_queue *queue) { struct net_device *dev = queue->info->netdev; struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); if (unlikely(netif_tx_queue_stopped(dev_queue)) && netfront_tx_slot_available(queue) && likely(netif_running(dev))) netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); } static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) { struct sk_buff *skb; struct page *page; skb = __netdev_alloc_skb(queue->info->netdev, RX_COPY_THRESHOLD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); return NULL; } skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); /* Align ip header to a 16 bytes boundary */ skb_reserve(skb, NET_IP_ALIGN); skb->dev = queue->info->netdev; return skb; } static void xennet_alloc_rx_buffers(struct netfront_queue *queue) { RING_IDX req_prod = queue->rx.req_prod_pvt; int notify; if (unlikely(!netif_carrier_ok(queue->info->netdev))) return; for (req_prod = queue->rx.req_prod_pvt; req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; req_prod++) { struct sk_buff *skb; unsigned short id; grant_ref_t ref; unsigned long pfn; struct xen_netif_rx_request *req; skb = xennet_alloc_one_rx_buffer(queue); if (!skb) break; id = xennet_rxidx(req_prod); BUG_ON(queue->rx_skbs[id]); queue->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&queue->gref_rx_head); BUG_ON((signed short)ref < 0); queue->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); req = RING_GET_REQUEST(&queue->rx, req_prod); gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, pfn_to_mfn(pfn), 0); req->id = id; req->gref = ref; } queue->rx.req_prod_pvt = req_prod; /* Not enough requests? Try again later. */ if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); return; } wmb(); /* barrier so backend seens requests */ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); if (notify) notify_remote_via_irq(queue->rx_irq); } static int xennet_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); unsigned int num_queues = dev->real_num_tx_queues; unsigned int i = 0; struct netfront_queue *queue = NULL; for (i = 0; i < num_queues && np->queues; ++i) { queue = &np->queues[i]; napi_enable(&queue->napi); spin_lock_bh(&queue->rx_lock); if (netif_carrier_ok(dev)) { xennet_alloc_rx_buffers(queue); queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) napi_schedule(&queue->napi); } spin_unlock_bh(&queue->rx_lock); } netif_tx_start_all_queues(dev); return 0; } static void xennet_tx_buf_gc(struct netfront_queue *queue) { RING_IDX cons, prod; unsigned short id; struct sk_buff *skb; BUG_ON(!netif_carrier_ok(queue->info->netdev)); do { prod = queue->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = queue->tx.rsp_cons; cons != prod; cons++) { struct xen_netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&queue->tx, cons); if (txrsp->status == XEN_NETIF_RSP_NULL) continue; id = txrsp->id; skb = queue->tx_skbs[id].skb; if (unlikely(gnttab_query_foreign_access( queue->grant_tx_ref[id]) != 0)) { pr_alert("%s: warning -- grant still in use by backend domain\n", __func__); BUG(); } gnttab_end_foreign_access_ref( queue->grant_tx_ref[id], GNTMAP_readonly); gnttab_release_grant_reference( &queue->gref_tx_head, queue->grant_tx_ref[id]); queue->grant_tx_ref[id] = GRANT_INVALID_REF; queue->grant_tx_page[id] = NULL; add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); dev_kfree_skb_irq(skb); } queue->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ queue->tx.sring->rsp_event = prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1; mb(); /* update shared area */ } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod)); xennet_maybe_wake_tx(queue); } static struct xen_netif_tx_request *xennet_make_one_txreq( struct netfront_queue *queue, struct sk_buff *skb, struct page *page, unsigned int offset, unsigned int len) { unsigned int id; struct xen_netif_tx_request *tx; grant_ref_t ref; len = min_t(unsigned int, PAGE_SIZE - offset, len); id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); ref = gnttab_claim_grant_reference(&queue->gref_tx_head); BUG_ON((signed short)ref < 0); gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, page_to_mfn(page), GNTMAP_readonly); queue->tx_skbs[id].skb = skb; queue->grant_tx_page[id] = page; queue->grant_tx_ref[id] = ref; tx->id = id; tx->gref = ref; tx->offset = offset; tx->size = len; tx->flags = 0; return tx; } static struct xen_netif_tx_request *xennet_make_txreqs( struct netfront_queue *queue, struct xen_netif_tx_request *tx, struct sk_buff *skb, struct page *page, unsigned int offset, unsigned int len) { /* Skip unused frames from start of page */ page += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; while (len) { tx->flags |= XEN_NETTXF_more_data; tx = xennet_make_one_txreq(queue, skb_get(skb), page, offset, len); page++; offset = 0; len -= tx->size; } return tx; } /* * Count how many ring slots are required to send this skb. Each frag * might be a compound page. */ static int xennet_count_skb_slots(struct sk_buff *skb) { int i, frags = skb_shinfo(skb)->nr_frags; int pages; pages = PFN_UP(offset_in_page(skb->data) + skb_headlen(skb)); for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; unsigned long size = skb_frag_size(frag); unsigned long offset = frag->page_offset; /* Skip unused frames from start of page */ offset &= ~PAGE_MASK; pages += PFN_UP(offset + size); } return pages; } static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { unsigned int num_queues = dev->real_num_tx_queues; u32 hash; u16 queue_idx; /* First, check if there is only one queue */ if (num_queues == 1) { queue_idx = 0; } else { hash = skb_get_hash(skb); queue_idx = hash % num_queues; } return queue_idx; } static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); struct xen_netif_tx_request *tx, *first_tx; unsigned int i; int notify; int slots; struct page *page; unsigned int offset; unsigned int len; unsigned long flags; struct netfront_queue *queue = NULL; unsigned int num_queues = dev->real_num_tx_queues; u16 queue_index; /* Drop the packet if no queues are set up */ if (num_queues < 1) goto drop; /* Determine which queue to transmit this SKB on */ queue_index = skb_get_queue_mapping(skb); queue = &np->queues[queue_index]; /* If skb->len is too big for wire format, drop skb and alert * user about misconfiguration. */ if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { net_alert_ratelimited( "xennet: skb->len = %u, too big for wire format\n", skb->len); goto drop; } slots = xennet_count_skb_slots(skb); if (unlikely(slots > MAX_SKB_FRAGS + 1)) { net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", slots, skb->len); if (skb_linearize(skb)) goto drop; } page = virt_to_page(skb->data); offset = offset_in_page(skb->data); len = skb_headlen(skb); spin_lock_irqsave(&queue->tx_lock, flags); if (unlikely(!netif_carrier_ok(dev) || (slots > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&queue->tx_lock, flags); goto drop; } /* First request for the linear area. */ first_tx = tx = xennet_make_one_txreq(queue, skb, page, offset, len); page++; offset = 0; len -= tx->size; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ tx->flags |= XEN_NETTXF_data_validated; /* Optional extra info after the first request. */ if (skb_shinfo(skb)->gso_size) { struct xen_netif_extra_info *gso; gso = (struct xen_netif_extra_info *) RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? XEN_NETIF_GSO_TYPE_TCPV6 : XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; } /* Requests for the rest of the linear area. */ tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); /* Requests for all the frags. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag), frag->page_offset, skb_frag_size(frag)); } /* First request has the packet length. */ first_tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); if (notify) notify_remote_via_irq(queue->tx_irq); u64_stats_update_begin(&tx_stats->syncp); tx_stats->bytes += skb->len; tx_stats->packets++; u64_stats_update_end(&tx_stats->syncp); /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ xennet_tx_buf_gc(queue); if (!netfront_tx_slot_available(queue)) netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); spin_unlock_irqrestore(&queue->tx_lock, flags); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } static int xennet_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); unsigned int num_queues = dev->real_num_tx_queues; unsigned int i; struct netfront_queue *queue; netif_tx_stop_all_queues(np->netdev); for (i = 0; i < num_queues; ++i) { queue = &np->queues[i]; napi_disable(&queue->napi); } return 0; } static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(queue->rx.req_prod_pvt); BUG_ON(queue->rx_skbs[new]); queue->rx_skbs[new] = skb; queue->grant_rx_ref[new] = ref; RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; queue->rx.req_prod_pvt++; } static int xennet_get_extras(struct netfront_queue *queue, struct xen_netif_extra_info *extras, RING_IDX rp) { struct xen_netif_extra_info *extra; struct device *dev = &queue->info->netdev->dev; RING_IDX cons = queue->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) dev_warn(dev, "Missing extra info\n"); err = -EBADR; break; } extra = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&queue->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) dev_warn(dev, "Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(queue, cons); ref = xennet_get_rx_ref(queue, cons); xennet_move_rx_slot(queue, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); queue->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_queue *queue, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list) { struct xen_netif_rx_response *rx = &rinfo->rx; struct xen_netif_extra_info *extras = rinfo->extras; struct device *dev = &queue->info->netdev->dev; RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(queue, cons); grant_ref_t ref = xennet_get_rx_ref(queue, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int slots = 1; int err = 0; unsigned long ret; if (rx->flags & XEN_NETRXF_extra_info) { err = xennet_get_extras(queue, extras, rp); cons = queue->rx.rsp_cons; } for (;;) { if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) dev_warn(dev, "rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(queue, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backend. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) dev_warn(dev, "Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } ret = gnttab_end_foreign_access_ref(ref, 0); BUG_ON(!ret); gnttab_release_grant_reference(&queue->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & XEN_NETRXF_more_data)) break; if (cons + slots == rp) { if (net_ratelimit()) dev_warn(dev, "Need more slots\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&queue->rx, cons + slots); skb = xennet_get_rx_skb(queue, cons + slots); ref = xennet_get_rx_ref(queue, cons + slots); slots++; } if (unlikely(slots > max)) { if (net_ratelimit()) dev_warn(dev, "Too many slots\n"); err = -E2BIG; } if (unlikely(err)) queue->rx.rsp_cons = cons + slots; return err; } static int xennet_set_skb_gso(struct sk_buff *skb, struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) pr_warn("GSO size must not be zero\n"); return -EINVAL; } if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 && gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) { if (net_ratelimit()) pr_warn("Bad GSO type %d\n", gso->u.gso.type); return -EINVAL; } skb_shinfo(skb)->gso_size = gso->u.gso.size; skb_shinfo(skb)->gso_type = (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; return 0; } static RING_IDX xennet_fill_frags(struct netfront_queue *queue, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct xen_netif_rx_response *rx = RING_GET_RESPONSE(&queue->rx, ++cons); skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; if (shinfo->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; BUG_ON(pull_to <= skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), rx->offset, rx->status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); } return cons; } static int checksum_setup(struct net_device *dev, struct sk_buff *skb) { bool recalculate_partial_csum = false; /* * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy * peers can fail to set NETRXF_csum_blank when sending a GSO * frame. In this case force the SKB to CHECKSUM_PARTIAL and * recalculate the partial checksum. */ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { struct netfront_info *np = netdev_priv(dev); atomic_inc(&np->rx_gso_checksum_fixup); skb->ip_summed = CHECKSUM_PARTIAL; recalculate_partial_csum = true; } /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; return skb_checksum_setup(skb, recalculate_partial_csum); } static int handle_incoming_queue(struct netfront_queue *queue, struct sk_buff_head *rxq) { struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); int packets_dropped = 0; struct sk_buff *skb; while ((skb = __skb_dequeue(rxq)) != NULL) { int pull_to = NETFRONT_SKB_CB(skb)->pull_to; if (pull_to > skb_headlen(skb)) __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, queue->info->netdev); skb_reset_network_header(skb); if (checksum_setup(queue->info->netdev, skb)) { kfree_skb(skb); packets_dropped++; queue->info->netdev->stats.rx_errors++; continue; } u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; rx_stats->bytes += skb->len; u64_stats_update_end(&rx_stats->syncp); /* Pass it up. */ napi_gro_receive(&queue->napi, skb); } return packets_dropped; } static int xennet_poll(struct napi_struct *napi, int budget) { struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); struct net_device *dev = queue->info->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct xen_netif_rx_response *rx = &rinfo.rx; struct xen_netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; int work_done; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; int err; spin_lock(&queue->rx_lock); skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = queue->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = queue->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(queue, &rinfo, rp, &tmpq); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); dev->stats.rx_errors++; i = queue->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); queue->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->pull_to = rx->status; if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; skb_shinfo(skb)->frags[0].page_offset = rx->offset; skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); skb->data_len = rx->status; skb->len += rx->status; i = xennet_fill_frags(queue, skb, &tmpq); if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & XEN_NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; __skb_queue_tail(&rxq, skb); queue->rx.rsp_cons = ++i; work_done++; } __skb_queue_purge(&errq); work_done -= handle_incoming_queue(queue, &rxq); xennet_alloc_rx_buffers(queue); if (work_done < budget) { int more_to_do = 0; napi_complete(napi); RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); if (more_to_do) napi_schedule(napi); } spin_unlock(&queue->rx_lock); return work_done; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct netfront_info *np = netdev_priv(dev); int cpu; for_each_possible_cpu(cpu) { struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); u64 rx_packets, rx_bytes, tx_packets, tx_bytes; unsigned int start; do { start = u64_stats_fetch_begin_irq(&tx_stats->syncp); tx_packets = tx_stats->packets; tx_bytes = tx_stats->bytes; } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); do { start = u64_stats_fetch_begin_irq(&rx_stats->syncp); rx_packets = rx_stats->packets; rx_bytes = rx_stats->bytes; } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; tot->rx_bytes += rx_bytes; tot->tx_bytes += tx_bytes; } tot->rx_errors = dev->stats.rx_errors; tot->tx_dropped = dev->stats.tx_dropped; return tot; } static void xennet_release_tx_bufs(struct netfront_queue *queue) { struct sk_buff *skb; int i; for (i = 0; i < NET_TX_RING_SIZE; i++) { /* Skip over entries which are actually freelist references */ if (skb_entry_is_link(&queue->tx_skbs[i])) continue; skb = queue->tx_skbs[i].skb; gnttab_end_foreign_access_ref( queue->grant_tx_ref[i], GNTMAP_readonly); gnttab_release_grant_reference( &queue->gref_tx_head, queue->grant_tx_ref[i]); queue->grant_tx_page[i] = NULL; queue->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void xennet_release_rx_bufs(struct netfront_queue *queue) { int id, ref; spin_lock_bh(&queue->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { struct sk_buff *skb; struct page *page; skb = queue->rx_skbs[id]; if (!skb) continue; ref = queue->grant_rx_ref[id]; if (ref == GRANT_INVALID_REF) continue; page = skb_frag_page(&skb_shinfo(skb)->frags[0]); /* gnttab_end_foreign_access() needs a page ref until * foreign access is ended (which may be deferred). */ get_page(page); gnttab_end_foreign_access(ref, 0, (unsigned long)page_address(page)); queue->grant_rx_ref[id] = GRANT_INVALID_REF; kfree_skb(skb); } spin_unlock_bh(&queue->rx_lock); } static netdev_features_t xennet_fix_features(struct net_device *dev, netdev_features_t features) { struct netfront_info *np = netdev_priv(dev); int val; if (features & NETIF_F_SG) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_SG; } if (features & NETIF_F_IPV6_CSUM) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-ipv6-csum-offload", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_IPV6_CSUM; } if (features & NETIF_F_TSO) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_TSO; } if (features & NETIF_F_TSO6) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv6", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_TSO6; } return features; } static int xennet_set_features(struct net_device *dev, netdev_features_t features) { if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { netdev_info(dev, "Reducing MTU because no SG offload"); dev->mtu = ETH_DATA_LEN; } return 0; } static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) { struct netfront_queue *queue = dev_id; unsigned long flags; spin_lock_irqsave(&queue->tx_lock, flags); xennet_tx_buf_gc(queue); spin_unlock_irqrestore(&queue->tx_lock, flags); return IRQ_HANDLED; } static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) { struct netfront_queue *queue = dev_id; struct net_device *dev = queue->info->netdev; if (likely(netif_carrier_ok(dev) && RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) napi_schedule(&queue->napi); return IRQ_HANDLED; } static irqreturn_t xennet_interrupt(int irq, void *dev_id) { xennet_tx_interrupt(irq, dev_id); xennet_rx_interrupt(irq, dev_id); return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xennet_poll_controller(struct net_device *dev) { /* Poll each queue */ struct netfront_info *info = netdev_priv(dev); unsigned int num_queues = dev->real_num_tx_queues; unsigned int i; for (i = 0; i < num_queues; ++i) xennet_interrupt(0, &info->queues[i]); } #endif static const struct net_device_ops xennet_netdev_ops = { .ndo_open = xennet_open, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats64 = xennet_get_stats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = xennet_fix_features, .ndo_set_features = xennet_set_features, .ndo_select_queue = xennet_select_queue, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xennet_poll_controller, #endif }; static void xennet_free_netdev(struct net_device *netdev) { struct netfront_info *np = netdev_priv(netdev); free_percpu(np->rx_stats); free_percpu(np->tx_stats); free_netdev(netdev); } static struct net_device *xennet_create_dev(struct xenbus_device *dev) { int err; struct net_device *netdev; struct netfront_info *np; netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues); if (!netdev) return ERR_PTR(-ENOMEM); np = netdev_priv(netdev); np->xbdev = dev; /* No need to use rtnl_lock() before the call below as it * happens before register_netdev(). */ netdev->real_num_tx_queues = 0; np->queues = NULL; err = -ENOMEM; np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); if (np->rx_stats == NULL) goto exit; np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); if (np->tx_stats == NULL) goto exit; netdev->netdev_ops = &xennet_netdev_ops; netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_GSO_ROBUST; netdev->hw_features = NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; /* * Assume that all hw features are available for now. This set * will be adjusted by the call to netdev_update_features() in * xennet_connect() which is the earliest point where we can * negotiate with the backend regarding supported features. */ netdev->features |= netdev->hw_features; netdev->ethtool_ops = &xennet_ethtool_ops; SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netif_carrier_off(netdev); return netdev; exit: xennet_free_netdev(netdev); return ERR_PTR(err); } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; netdev = xennet_create_dev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); #ifdef CONFIG_SYSFS info->netdev->sysfs_groups[0] = &xennet_dev_group; #endif err = register_netdev(info->netdev); if (err) { pr_warn("%s: register_netdev err=%d\n", __func__, err); goto fail; } return 0; fail: xennet_free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } static void xennet_end_access(int ref, void *page) { /* This frees the page as a side-effect */ if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, 0, (unsigned long)page); } static void xennet_disconnect_backend(struct netfront_info *info) { unsigned int i = 0; unsigned int num_queues = info->netdev->real_num_tx_queues; netif_carrier_off(info->netdev); for (i = 0; i < num_queues && info->queues; ++i) { struct netfront_queue *queue = &info->queues[i]; if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) unbind_from_irqhandler(queue->tx_irq, queue); if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { unbind_from_irqhandler(queue->tx_irq, queue); unbind_from_irqhandler(queue->rx_irq, queue); } queue->tx_evtchn = queue->rx_evtchn = 0; queue->tx_irq = queue->rx_irq = 0; if (netif_running(info->netdev)) napi_synchronize(&queue->napi); xennet_release_tx_bufs(queue); xennet_release_rx_bufs(queue); gnttab_free_grant_references(queue->gref_tx_head); gnttab_free_grant_references(queue->gref_rx_head); /* End access and free the pages */ xennet_end_access(queue->tx_ring_ref, queue->tx.sring); xennet_end_access(queue->rx_ring_ref, queue->rx.sring); queue->tx_ring_ref = GRANT_INVALID_REF; queue->rx_ring_ref = GRANT_INVALID_REF; queue->tx.sring = NULL; queue->rx.sring = NULL; } } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "%s\n", dev->nodename); xennet_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } static int setup_netfront_single(struct netfront_queue *queue) { int err; err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); if (err < 0) goto fail; err = bind_evtchn_to_irqhandler(queue->tx_evtchn, xennet_interrupt, 0, queue->info->netdev->name, queue); if (err < 0) goto bind_fail; queue->rx_evtchn = queue->tx_evtchn; queue->rx_irq = queue->tx_irq = err; return 0; bind_fail: xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); queue->tx_evtchn = 0; fail: return err; } static int setup_netfront_split(struct netfront_queue *queue) { int err; err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); if (err < 0) goto fail; err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); if (err < 0) goto alloc_rx_evtchn_fail; snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), "%s-tx", queue->name); err = bind_evtchn_to_irqhandler(queue->tx_evtchn, xennet_tx_interrupt, 0, queue->tx_irq_name, queue); if (err < 0) goto bind_tx_fail; queue->tx_irq = err; snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), "%s-rx", queue->name); err = bind_evtchn_to_irqhandler(queue->rx_evtchn, xennet_rx_interrupt, 0, queue->rx_irq_name, queue); if (err < 0) goto bind_rx_fail; queue->rx_irq = err; return 0; bind_rx_fail: unbind_from_irqhandler(queue->tx_irq, queue); queue->tx_irq = 0; bind_tx_fail: xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); queue->rx_evtchn = 0; alloc_rx_evtchn_fail: xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); queue->tx_evtchn = 0; fail: return err; } static int setup_netfront(struct xenbus_device *dev, struct netfront_queue *queue, unsigned int feature_split_evtchn) { struct xen_netif_tx_sring *txs; struct xen_netif_rx_sring *rxs; int err; queue->tx_ring_ref = GRANT_INVALID_REF; queue->rx_ring_ref = GRANT_INVALID_REF; queue->rx.sring = NULL; queue->tx.sring = NULL; txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) goto grant_tx_ring_fail; queue->tx_ring_ref = err; rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto alloc_rx_ring_fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) goto grant_rx_ring_fail; queue->rx_ring_ref = err; if (feature_split_evtchn) err = setup_netfront_split(queue); /* setup single event channel if * a) feature-split-event-channels == 0 * b) feature-split-event-channels == 1 but failed to setup */ if (!feature_split_evtchn || (feature_split_evtchn && err)) err = setup_netfront_single(queue); if (err) goto alloc_evtchn_fail; return 0; /* If we fail to setup netfront, it is safe to just revoke access to * granted pages because backend is not accessing it at this point. */ alloc_evtchn_fail: gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); grant_rx_ring_fail: free_page((unsigned long)rxs); alloc_rx_ring_fail: gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); grant_tx_ring_fail: free_page((unsigned long)txs); fail: return err; } /* Queue-specific initialisation * This used to be done in xennet_create_dev() but must now * be run per-queue. */ static int xennet_init_queue(struct netfront_queue *queue) { unsigned short i; int err = 0; spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->rx_lock); init_timer(&queue->rx_refill_timer); queue->rx_refill_timer.data = (unsigned long)queue; queue->rx_refill_timer.function = rx_refill_timeout; snprintf(queue->name, sizeof(queue->name), "%s-q%u", queue->info->netdev->name, queue->id); /* Initialise tx_skbs as a free chain containing every entry. */ queue->tx_skb_freelist = 0; for (i = 0; i < NET_TX_RING_SIZE; i++) { skb_entry_set_link(&queue->tx_skbs[i], i+1); queue->grant_tx_ref[i] = GRANT_INVALID_REF; queue->grant_tx_page[i] = NULL; } /* Clear out rx_skbs */ for (i = 0; i < NET_RX_RING_SIZE; i++) { queue->rx_skbs[i] = NULL; queue->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, &queue->gref_tx_head) < 0) { pr_alert("can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, &queue->gref_rx_head) < 0) { pr_alert("can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } return 0; exit_free_tx: gnttab_free_grant_references(queue->gref_tx_head); exit: return err; } static int write_queue_xenstore_keys(struct netfront_queue *queue, struct xenbus_transaction *xbt, int write_hierarchical) { /* Write the queue-specific keys into XenStore in the traditional * way for a single queue, or in a queue subkeys for multiple * queues. */ struct xenbus_device *dev = queue->info->xbdev; int err; const char *message; char *path; size_t pathsize; /* Choose the correct place to write the keys */ if (write_hierarchical) { pathsize = strlen(dev->nodename) + 10; path = kzalloc(pathsize, GFP_KERNEL); if (!path) { err = -ENOMEM; message = "out of memory while writing ring references"; goto error; } snprintf(path, pathsize, "%s/queue-%u", dev->nodename, queue->id); } else { path = (char *)dev->nodename; } /* Write ring references */ err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u", queue->tx_ring_ref); if (err) { message = "writing tx-ring-ref"; goto error; } err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u", queue->rx_ring_ref); if (err) { message = "writing rx-ring-ref"; goto error; } /* Write event channels; taking into account both shared * and split event channel scenarios. */ if (queue->tx_evtchn == queue->rx_evtchn) { /* Shared event channel */ err = xenbus_printf(*xbt, path, "event-channel", "%u", queue->tx_evtchn); if (err) { message = "writing event-channel"; goto error; } } else { /* Split event channels */ err = xenbus_printf(*xbt, path, "event-channel-tx", "%u", queue->tx_evtchn); if (err) { message = "writing event-channel-tx"; goto error; } err = xenbus_printf(*xbt, path, "event-channel-rx", "%u", queue->rx_evtchn); if (err) { message = "writing event-channel-rx"; goto error; } } if (write_hierarchical) kfree(path); return 0; error: if (write_hierarchical) kfree(path); xenbus_dev_fatal(dev, err, "%s", message); return err; } static void xennet_destroy_queues(struct netfront_info *info) { unsigned int i; rtnl_lock(); for (i = 0; i < info->netdev->real_num_tx_queues; i++) { struct netfront_queue *queue = &info->queues[i]; if (netif_running(info->netdev)) napi_disable(&queue->napi); del_timer_sync(&queue->rx_refill_timer); netif_napi_del(&queue->napi); } info->netdev->real_num_tx_queues = 0; rtnl_unlock(); kfree(info->queues); info->queues = NULL; } static int xennet_create_queues(struct netfront_info *info, unsigned int num_queues) { unsigned int i; int ret; info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL); if (!info->queues) return -ENOMEM; rtnl_lock(); for (i = 0; i < num_queues; i++) { struct netfront_queue *queue = &info->queues[i]; queue->id = i; queue->info = info; ret = xennet_init_queue(queue); if (ret < 0) { dev_warn(&info->netdev->dev, "only created %d queues\n", i); num_queues = i; break; } netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64); if (netif_running(info->netdev)) napi_enable(&queue->napi); } netif_set_real_num_tx_queues(info->netdev, num_queues); rtnl_unlock(); if (num_queues == 0) { dev_err(&info->netdev->dev, "no queues\n"); return -EINVAL; } return 0; } /* Common code used when first setting up, and when resuming. */ static int talk_to_netback(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; unsigned int feature_split_evtchn; unsigned int i = 0; unsigned int max_queues = 0; struct netfront_queue *queue = NULL; unsigned int num_queues = 1; info->netdev->irq = 0; /* Check if backend supports multiple queues */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "multi-queue-max-queues", "%u", &max_queues); if (err < 0) max_queues = 1; num_queues = min(max_queues, xennet_max_queues); /* Check feature-split-event-channels */ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, "feature-split-event-channels", "%u", &feature_split_evtchn); if (err < 0) feature_split_evtchn = 0; /* Read mac addr. */ err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } if (info->queues) xennet_destroy_queues(info); err = xennet_create_queues(info, num_queues); if (err < 0) goto destroy_ring; /* Create shared ring, alloc event channel -- for each queue */ for (i = 0; i < num_queues; ++i) { queue = &info->queues[i]; err = setup_netfront(dev, queue, feature_split_evtchn); if (err) { /* setup_netfront() will tidy up the current * queue on error, but we need to clean up * those already allocated. */ if (i > 0) { rtnl_lock(); netif_set_real_num_tx_queues(info->netdev, i); rtnl_unlock(); goto destroy_ring; } else { goto out; } } } again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } if (num_queues == 1) { err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */ if (err) goto abort_transaction_no_dev_fatal; } else { /* Write the number of queues */ err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u", num_queues); if (err) { message = "writing multi-queue-num-queues"; goto abort_transaction_no_dev_fatal; } /* Write the keys for each queue */ for (i = 0; i < num_queues; ++i) { queue = &info->queues[i]; err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ if (err) goto abort_transaction_no_dev_fatal; } } /* The remaining keys are not queue-specific */ err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); if (err) { message = "writing feature-gso-tcpv6"; goto abort_transaction; } err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", "1"); if (err) { message = "writing feature-ipv6-csum-offload"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_dev_fatal(dev, err, "%s", message); abort_transaction_no_dev_fatal: xenbus_transaction_end(xbt, 1); destroy_ring: xennet_disconnect_backend(info); kfree(info->queues); info->queues = NULL; rtnl_lock(); info->netdev->real_num_tx_queues = 0; rtnl_unlock(); out: return err; } static int xennet_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); unsigned int num_queues = 0; int err; unsigned int feature_rx_copy; unsigned int j = 0; struct netfront_queue *queue = NULL; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; if (!feature_rx_copy) { dev_info(&dev->dev, "backend does not support copying receive path\n"); return -ENODEV; } err = talk_to_netback(np->xbdev, np); if (err) return err; /* talk_to_netback() sets the correct number of queues */ num_queues = dev->real_num_tx_queues; rtnl_lock(); netdev_update_features(dev); rtnl_unlock(); /* * All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netif_carrier_on(np->netdev); for (j = 0; j < num_queues; ++j) { queue = &np->queues[j]; notify_remote_via_irq(queue->tx_irq); if (queue->tx_irq != queue->rx_irq) notify_remote_via_irq(queue->rx_irq); spin_lock_irq(&queue->tx_lock); xennet_tx_buf_gc(queue); spin_unlock_irq(&queue->tx_lock); spin_lock_bh(&queue->rx_lock); xennet_alloc_rx_buffers(queue); spin_unlock_bh(&queue->rx_lock); } return 0; } /** * Callback received when the backend's state changes. */ static void netback_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (xennet_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: netdev_notify_peers(netdev); break; case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state -- fallthrough */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } static const struct xennet_stat { char name[ETH_GSTRING_LEN]; u16 offset; } xennet_stats[] = { { "rx_gso_checksum_fixup", offsetof(struct netfront_info, rx_gso_checksum_fixup) }, }; static int xennet_get_sset_count(struct net_device *dev, int string_set) { switch (string_set) { case ETH_SS_STATS: return ARRAY_SIZE(xennet_stats); default: return -EINVAL; } } static void xennet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data) { void *np = netdev_priv(dev); int i; for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset)); } static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) { int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, xennet_stats[i].name, ETH_GSTRING_LEN); break; } } static const struct ethtool_ops xennet_ethtool_ops = { .get_link = ethtool_op_get_link, .get_sset_count = xennet_get_sset_count, .get_ethtool_stats = xennet_get_ethtool_stats, .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", NET_RX_RING_SIZE); } static ssize_t store_rxbuf(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; /* rxbuf_min and rxbuf_max are no longer configurable. */ return len; } static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf); static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf); static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL); static struct attribute *xennet_dev_attrs[] = { &dev_attr_rxbuf_min.attr, &dev_attr_rxbuf_max.attr, &dev_attr_rxbuf_cur.attr, NULL }; static const struct attribute_group xennet_dev_group = { .attrs = xennet_dev_attrs }; #endif /* CONFIG_SYSFS */ static int xennet_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "%s\n", dev->nodename); xennet_disconnect_backend(info); unregister_netdev(info->netdev); if (info->queues) xennet_destroy_queues(info); xennet_free_netdev(info->netdev); return 0; } static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; static struct xenbus_driver netfront_driver = { .ids = netfront_ids, .probe = netfront_probe, .remove = xennet_remove, .resume = netfront_resume, .otherend_changed = netback_changed, }; static int __init netif_init(void) { if (!xen_domain()) return -ENODEV; if (!xen_has_pv_nic_devices()) return -ENODEV; pr_info("Initialising Xen virtual ethernet driver\n"); /* Allow as many queues as there are CPUs, by default */ xennet_max_queues = num_online_cpus(); return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { xenbus_unregister_driver(&netfront_driver); } module_exit(netif_exit); MODULE_DESCRIPTION("Xen virtual network device frontend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("xen:vif"); MODULE_ALIAS("xennet"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-vnif/oracle6/000077500000000000000000000000001314037446600266255ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-vnif/oracle6/xen-netfront.c000066400000000000000000001423641314037446600314320ustar00rootroot00000000000000/* * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #define RX_COPY_THRESHOLD 256 #define GRANT_INVALID_REF 0 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) struct netfront_stats { u64 rx_packets; u64 tx_packets; u64 rx_bytes; u64 tx_bytes; struct u64_stats_sync syncp; }; struct netfront_info { struct list_head list; struct net_device *netdev; struct napi_struct napi; unsigned int evtchn; struct xenbus_device *xbdev; spinlock_t tx_lock; struct xen_netif_tx_front_ring tx; int tx_ring_ref; /* * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries * are linked from tx_skb_freelist through skb_entry.link. * * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ union skb_entry { struct sk_buff *skb; unsigned long link; } tx_skbs[NET_TX_RING_SIZE]; grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; spinlock_t rx_lock ____cacheline_aligned_in_smp; struct xen_netif_rx_front_ring rx; int rx_ring_ref; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Statistics */ struct netfront_stats __percpu *stats; unsigned long rx_gso_checksum_fixup; }; struct netfront_rx_info { struct xen_netif_rx_response rx; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; static void skb_entry_set_link(union skb_entry *list, unsigned short id) { list->link = id; } static int skb_entry_is_link(const union skb_entry *list) { BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); return (unsigned long)list->skb < PAGE_OFFSET; } /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static void add_id_to_freelist(unsigned *head, union skb_entry *list, unsigned short id) { skb_entry_set_link(&list[id], *head); *head = id; } static unsigned short get_id_from_freelist(unsigned *head, union skb_entry *list) { unsigned int id = *head; *head = list[id].link; return id; } static int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while (0) #endif static int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); napi_schedule(&np->napi); } static int netfront_tx_slot_available(struct netfront_info *np) { return (np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2); } static void xennet_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev))) netif_wake_queue(dev); } static void xennet_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; grant_ref_t ref; unsigned long pfn; void *vaddr; struct xen_netif_rx_request *req; if (unlikely(!netif_carrier_ok(dev))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; /* Align ip header to a 16 bytes boundary */ skb_reserve(skb, NET_IP_ALIGN); page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (i = 0; ; i++) { skb = __skb_dequeue(&np->rx_batch); if (skb == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); req->id = id; req->gref = ref; } wmb(); /* barrier so backend seens requests */ /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->netdev->irq); } static int xennet_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netif_carrier_ok(dev)) { xennet_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) napi_schedule(&np->napi); } spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void xennet_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netif_carrier_ok(dev)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct xen_netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == XEN_NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id].skb; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { printk(KERN_ALERT "xennet_tx_buf_gc: warning " "-- grant still in use by backend " "domain.\n"); BUG(); } gnttab_end_foreign_access_ref( np->grant_tx_ref[id], GNTMAP_readonly); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); /* update shared area */ } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); xennet_maybe_wake_tx(dev); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct xen_netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; /* While the header overlaps a page boundary (including being larger than a page), split it it into page-sized chunks. */ while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= XEN_NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } /* Grant backend access to each skb fragment page. */ for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= XEN_NETTXF_more_data; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct netfront_stats *stats = this_cpu_ptr(np->stats); struct xen_netif_tx_request *tx; struct xen_netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned long flags; frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irqsave(&np->tx_lock, flags); if (unlikely(!netif_carrier_ok(dev) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&np->tx_lock, flags); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; extra = NULL; tx->flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ tx->flags |= XEN_NETTXF_data_validated; if (skb_shinfo(skb)->gso_size) { struct xen_netif_extra_info *gso; gso = (struct xen_netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->netdev->irq); u64_stats_update_begin(&stats->syncp); stats->tx_bytes += skb->len; stats->tx_packets++; u64_stats_update_end(&stats->syncp); /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ xennet_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irqrestore(&np->tx_lock, flags); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static int xennet_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } static int xennet_get_extras(struct netfront_info *np, struct xen_netif_extra_info *extras, RING_IDX rp) { struct xen_netif_extra_info *extra; struct device *dev = &np->netdev->dev; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) dev_warn(dev, "Missing extra info\n"); err = -EBADR; break; } extra = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) dev_warn(dev, "Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list) { struct xen_netif_rx_response *rx = &rinfo->rx; struct xen_netif_extra_info *extras = rinfo->extras; struct device *dev = &np->netdev->dev; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & XEN_NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) dev_warn(dev, "rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) dev_warn(dev, "Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } ret = gnttab_end_foreign_access_ref(ref, 0); BUG_ON(!ret); gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & XEN_NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) dev_warn(dev, "Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) dev_warn(dev, "Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; return err; } static int xennet_set_skb_gso(struct sk_buff *skb, struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) printk(KERN_WARNING "GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } skb_shinfo(skb)->gso_size = gso->u.gso.size; skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; return 0; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct xen_netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int checksum_setup(struct net_device *dev, struct sk_buff *skb) { struct iphdr *iph; unsigned char *th; int err = -EPROTO; int recalculate_partial_csum = 0; /* * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy * peers can fail to set NETRXF_csum_blank when sending a GSO * frame. In this case force the SKB to CHECKSUM_PARTIAL and * recalculate the partial checksum. */ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { struct netfront_info *np = netdev_priv(dev); np->rx_gso_checksum_fixup++; skb->ip_summed = CHECKSUM_PARTIAL; recalculate_partial_csum = 1; } /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; if (skb->protocol != htons(ETH_P_IP)) goto out; iph = (void *)skb->data; th = skb->data + 4 * iph->ihl; if (th >= skb_tail_pointer(skb)) goto out; skb->csum_start = th - skb->head; switch (iph->protocol) { case IPPROTO_TCP: skb->csum_offset = offsetof(struct tcphdr, check); if (recalculate_partial_csum) { struct tcphdr *tcph = (struct tcphdr *)th; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - iph->ihl*4, IPPROTO_TCP, 0); } break; case IPPROTO_UDP: skb->csum_offset = offsetof(struct udphdr, check); if (recalculate_partial_csum) { struct udphdr *udph = (struct udphdr *)th; udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - iph->ihl*4, IPPROTO_UDP, 0); } break; default: if (net_ratelimit()) printk(KERN_ERR "Attempting to checksum a non-" "TCP/UDP packet, dropping a protocol" " %d packet", iph->protocol); goto out; } if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) goto out; err = 0; out: return err; } static int handle_incoming_queue(struct net_device *dev, struct sk_buff_head *rxq) { struct netfront_info *np = netdev_priv(dev); struct netfront_stats *stats = this_cpu_ptr(np->stats); int packets_dropped = 0; struct sk_buff *skb; while ((skb = __skb_dequeue(rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); if (checksum_setup(dev, skb)) { kfree_skb(skb); packets_dropped++; dev->stats.rx_errors++; continue; } u64_stats_update_begin(&stats->syncp); stats->rx_packets++; stats->rx_bytes += skb->len; u64_stats_update_end(&stats->syncp); /* Pass it up. */ netif_receive_skb(skb); } return packets_dropped; } static int xennet_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct xen_netif_rx_response *rx = &rinfo.rx; struct xen_netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; int work_done; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int err; spin_lock(&np->rx_lock); skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); dev->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize approximates the size of true data plus * any supervisor overheads. Adding hypervisor * overheads has been shown to significantly reduce * achievable bandwidth with the default receive * buffer size. It is therefore not wise to account * for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set * to RX_COPY_THRESHOLD + the supervisor * overheads. Here, we add the size of the data pulled * in xennet_fill_frags(). * * We also adjust for any unused space in the main * data area by subtracting (RX_COPY_THRESHOLD - * len). This is especially important with drivers * which split incoming packets into header and data, * using only 66 bytes of the main data area (see the * e1000 driver for example.) On such systems, * without this last adjustement, our achievable * receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & XEN_NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } __skb_queue_purge(&errq); work_done -= handle_incoming_queue(dev, &rxq); /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; xennet_alloc_rx_buffers(dev); if (work_done < budget) { int more_to_do = 0; local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct netfront_info *np = netdev_priv(dev); int cpu; for_each_possible_cpu(cpu) { struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu); u64 rx_packets, rx_bytes, tx_packets, tx_bytes; unsigned int start; do { start = u64_stats_fetch_begin_bh(&stats->syncp); rx_packets = stats->rx_packets; tx_packets = stats->tx_packets; rx_bytes = stats->rx_bytes; tx_bytes = stats->tx_bytes; } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; tot->rx_bytes += rx_bytes; tot->tx_bytes += tx_bytes; } tot->rx_errors = dev->stats.rx_errors; tot->tx_dropped = dev->stats.tx_dropped; return tot; } static void xennet_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 0; i < NET_TX_RING_SIZE; i++) { /* Skip over entries which are actually freelist references */ if (skb_entry_is_link(&np->tx_skbs[i])) continue; skb = np->tx_skbs[i].skb; gnttab_end_foreign_access_ref(np->grant_tx_ref[i], GNTMAP_readonly); gnttab_release_grant_reference(&np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void xennet_release_rx_bufs(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref; dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", __func__); return; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { ref = np->grant_rx_ref[id]; if (ref == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; if (0 == mfn) { skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, mfn_pte(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((u64)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", __func__, xfer, noxfer, unused); if (xfer) { if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, NULL, DOMID_SELF); mcl++; HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); } } __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } static void xennet_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); xennet_release_tx_bufs(np); xennet_release_rx_bufs(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static u32 xennet_fix_features(struct net_device *dev, u32 features) { struct netfront_info *np = netdev_priv(dev); int val; if (features & NETIF_F_SG) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_SG; } if (features & NETIF_F_TSO) { if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) features &= ~NETIF_F_TSO; } return features; } static int xennet_set_features(struct net_device *dev, u32 features) { if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { netdev_info(dev, "Reducing MTU because no SG offload"); dev->mtu = ETH_DATA_LEN; } return 0; } static irqreturn_t xennet_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netif_carrier_ok(dev))) { xennet_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) napi_schedule(&np->napi); } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER static void xennet_poll_controller(struct net_device *dev) { xennet_interrupt(0, dev); } #endif static const struct net_device_ops xennet_netdev_ops = { .ndo_open = xennet_open, .ndo_uninit = xennet_uninit, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, .ndo_change_mtu = xennet_change_mtu, .ndo_get_stats64 = xennet_get_stats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_fix_features = xennet_fix_features, .ndo_set_features = xennet_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xennet_poll_controller, #endif }; static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) { int i, err; struct net_device *netdev; struct netfront_info *np; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __func__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; err = -ENOMEM; np->stats = alloc_percpu(struct netfront_stats); if (np->stats == NULL) goto exit; /* Initialise tx_skbs as a free chain containing every entry. */ np->tx_skb_freelist = 0; for (i = 0; i < NET_TX_RING_SIZE; i++) { skb_entry_set_link(&np->tx_skbs[i], i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } /* Clear out rx_skbs */ for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit_free_stats; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, xennet_poll, 64); netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_GSO_ROBUST; netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; /* * Assume that all hw features are available for now. This set * will be adjusted by the call to netdev_update_features() in * xennet_connect() which is the earliest point where we can * negotiate with the backend regarding supported features. */ netdev->features |= netdev->hw_features; SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netif_carrier_off(netdev); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit_free_stats: free_percpu(np->stats); exit: free_netdev(netdev); return ERR_PTR(err); } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]); static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; char mac_addr[ETH_ALEN]; err = xen_net_read_mac(dev, mac_addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } netdev = xennet_create_dev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { printk(KERN_WARNING "%s: register_netdev err=%d\n", __func__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); printk(KERN_WARNING "%s: add sysfs failed err=%d\n", __func__, err); goto fail; } return 0; fail: free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); out: return err; } static void xennet_end_access(int ref, void *page) { /* This frees the page as a side-effect */ if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, 0, (unsigned long)page); } static void xennet_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netif_carrier_off(info->netdev); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->netdev->irq) unbind_from_irqhandler(info->netdev->irq, info->netdev); info->evtchn = info->netdev->irq = 0; /* End access and free the pages */ xennet_end_access(info->tx_ring_ref, info->tx.sring); xennet_end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "%s\n", dev->nodename); xennet_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) { struct xen_netif_tx_sring *txs; struct xen_netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; netdev->irq = 0; err = xen_net_read_mac(dev, netdev->dev_addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto fail; } txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; err = xenbus_alloc_evtchn(dev, &info->evtchn); if (err) goto fail; err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, 0, netdev->name, netdev); if (err < 0) goto fail; netdev->irq = err; return 0; fail: return err; } /* Common code used when first setting up, and when resuming. */ static int talk_to_netback(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Create shared ring, alloc event channel. */ err = setup_netfront(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", info->evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: xennet_disconnect_backend(info); out: return err; } static int xennet_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; struct xen_netif_rx_request *req; unsigned int feature_rx_copy; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; if (!feature_rx_copy) { dev_info(&dev->dev, "backend does not support copying receive path\n"); return -ENODEV; } err = talk_to_netback(np->xbdev, np); if (err) return err; rtnl_lock(); netdev_update_features(dev); rtnl_unlock(); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* Step 1: Discard all pending TX packet fragments. */ xennet_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netif_carrier_on(np->netdev); notify_remote_via_irq(np->netdev->irq); xennet_tx_buf_gc(dev); xennet_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } /** * Callback received when the backend's state changes. */ static void netback_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateConnected: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (xennet_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); netif_notify_peers(netdev); break; case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } static const struct xennet_stat { char name[ETH_GSTRING_LEN]; u16 offset; } xennet_stats[] = { { "rx_gso_checksum_fixup", offsetof(struct netfront_info, rx_gso_checksum_fixup) }, }; static int xennet_get_sset_count(struct net_device *dev, int string_set) { switch (string_set) { case ETH_SS_STATS: return ARRAY_SIZE(xennet_stats); default: return -EINVAL; } } static void xennet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data) { void *np = netdev_priv(dev); int i; for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) data[i] = *(unsigned long *)(np + xennet_stats[i].offset); } static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) { int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, xennet_stats[i].name, ETH_GSTRING_LEN); break; } } static const struct ethtool_ops xennet_ethtool_ops = { .get_link = ethtool_op_get_link, .get_sset_count = xennet_get_sset_count, .get_ethtool_stats = xennet_get_ethtool_stats, .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; xennet_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; xennet_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int err; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { err = device_create_file(&netdev->dev, &xennet_attrs[i]); if (err) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return err; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ static const struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; static int __devexit xennet_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "%s\n", dev->nodename); xennet_disconnect_backend(info); xennet_sysfs_delif(info->netdev); unregister_netdev(info->netdev); del_timer_sync(&info->rx_refill_timer); free_percpu(info->stats); free_netdev(info->netdev); return 0; } static DEFINE_XENBUS_DRIVER(netfront, , .probe = netfront_probe, .remove = __devexit_p(xennet_remove), .resume = netfront_resume, .otherend_changed = netback_changed, ); static int __init netif_init(void) { if (!xen_domain()) return -ENODEV; if (xen_initial_domain()) return 0; if (xen_hvm_domain() && !xen_platform_pci_unplug) return -ENODEV; printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { if (xen_initial_domain()) return; xenbus_unregister_driver(&netfront_driver); } module_exit(netif_exit); MODULE_DESCRIPTION("Xen virtual network device frontend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("xen:vif"); MODULE_ALIAS("xennet"); UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-vnif/other/000077500000000000000000000000001314037446600264135ustar00rootroot00000000000000UVP-Tools-2.2.0.316/uvp-xenpv/uvp-pvops_xen_driver-2.6.32to4.0.x/xen-vnif/other/xen-netfront.c000066400000000000000000001423561314037446600312210ustar00rootroot00000000000000/* * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static const struct ethtool_ops xennet_ethtool_ops; static int print_once; struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #define RX_COPY_THRESHOLD 256 #define GRANT_INVALID_REF 0 #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) struct netfront_info { struct list_head list; struct net_device *netdev; struct napi_struct napi; unsigned int evtchn; struct xenbus_device *xbdev; spinlock_t tx_lock; struct xen_netif_tx_front_ring tx; int tx_ring_ref; /* * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries * are linked from tx_skb_freelist through skb_entry.link. * * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ union skb_entry { struct sk_buff *skb; unsigned long link; } tx_skbs[NET_TX_RING_SIZE]; grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; struct page *grant_tx_page[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; spinlock_t rx_lock ____cacheline_aligned_in_smp; struct xen_netif_rx_front_ring rx; int rx_ring_ref; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; }; struct netfront_rx_info { struct xen_netif_rx_response rx; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; static void skb_entry_set_link(union skb_entry *list, unsigned short id) { list->link = id; } static int skb_entry_is_link(const union skb_entry *list) { BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); return ((unsigned long)list->skb < PAGE_OFFSET); } /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static void add_id_to_freelist(unsigned *head, union skb_entry *list, unsigned short id) { skb_entry_set_link(&list[id], *head); *head = id; } static unsigned short get_id_from_freelist(unsigned *head, union skb_entry *list) { unsigned int id = *head; *head = list[id].link; return id; } static int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while (0) #endif static int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); napi_schedule(&np->napi); } static int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static void xennet_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev))) netif_wake_queue(dev); } static void xennet_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; grant_ref_t ref; unsigned long pfn; void *vaddr; struct xen_netif_rx_request *req; if (unlikely(!netif_carrier_ok(dev))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; /* Align ip header to a 16 bytes boundary */ skb_reserve(skb, NET_IP_ALIGN); page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; break; } skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (i = 0; ; i++) { skb = __skb_dequeue(&np->rx_batch); if (skb == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); req->id = id; req->gref = ref; } wmb(); /* barrier so backend seens requests */ /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->netdev->irq); } static int xennet_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netif_carrier_ok(dev)) { xennet_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) napi_schedule(&np->napi); } spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void xennet_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netif_carrier_ok(dev)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct xen_netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id].skb; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { printk(KERN_ALERT "xennet_tx_buf_gc: warning " "-- grant still in use by backend " "domain.\n"); BUG(); } gnttab_end_foreign_access_ref( np->grant_tx_ref[id], GNTMAP_readonly); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; np->grant_tx_page[id] = NULL; add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); /* update shared area */ } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); xennet_maybe_wake_tx(dev); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct xen_netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; /* While the header overlaps a page boundary (including being larger than a page), split it it into page-sized chunks. */ while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); np->grant_tx_page[id] = virt_to_page(data); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } /* Grant backend access to each skb fragment page. */ for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= NETTXF_more_data; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); np->grant_tx_page[id] = frag->page; tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct xen_netif_tx_request *tx; struct xen_netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irq(&np->tx_lock); if (unlikely(!netif_carrier_ok(dev) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb))) { spin_unlock_irq(&np->tx_lock); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); np->grant_tx_page[id] = virt_to_page(data); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; extra = NULL; tx->flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ tx->flags |= NETTXF_data_validated; if (skb_shinfo(skb)->gso_size) { struct xen_netif_extra_info *gso; gso = (struct xen_netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->netdev->irq); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ xennet_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irq(&np->tx_lock); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static int xennet_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } static int xennet_get_extras(struct netfront_info *np, struct xen_netif_extra_info *extras, RING_IDX rp) { struct xen_netif_extra_info *extra; struct device *dev = &np->netdev->dev; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) dev_warn(dev, "Missing extra info\n"); err = -EBADR; break; } extra = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) dev_warn(dev, "Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list) { struct xen_netif_rx_response *rx = &rinfo->rx; struct xen_netif_extra_info *extras = rinfo->extras; struct device *dev = &np->netdev->dev; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) dev_warn(dev, "rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) dev_warn(dev, "Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } ret = gnttab_end_foreign_access_ref(ref, 0); BUG_ON(!ret); gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) dev_warn(dev, "Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) dev_warn(dev, "Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; return err; } static int xennet_set_skb_gso(struct sk_buff *skb, struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) printk(KERN_WARNING "GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } skb_shinfo(skb)->gso_size = gso->u.gso.size; skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; return 0; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct xen_netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int skb_checksum_setup(struct sk_buff *skb) { struct iphdr *iph; unsigned char *th; int err = -EPROTO; if (skb->protocol != htons(ETH_P_IP)) goto out; iph = (void *)skb->data; th = skb->data + 4 * iph->ihl; if (th >= skb_tail_pointer(skb)) goto out; skb->csum_start = th - skb->head; switch (iph->protocol) { case IPPROTO_TCP: skb->csum_offset = offsetof(struct tcphdr, check); break; case IPPROTO_UDP: skb->csum_offset = offsetof(struct udphdr, check); break; default: if (net_ratelimit()) printk(KERN_ERR "Attempting to checksum a non-" "TCP/UDP packet, dropping a protocol" " %d packet", iph->protocol); goto out; } if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) goto out; err = 0; out: return err; } static int handle_incoming_queue(struct net_device *dev, struct sk_buff_head *rxq) { int packets_dropped = 0; struct sk_buff *skb; while ((skb = __skb_dequeue(rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb_checksum_setup(skb)) { kfree_skb(skb); packets_dropped++; dev->stats.rx_errors++; continue; } } dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; /* Pass it up. */ netif_receive_skb(skb); } return packets_dropped; } static int xennet_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct xen_netif_rx_response *rx = &rinfo.rx; struct xen_netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; int work_done; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int err; spin_lock(&np->rx_lock); skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); dev->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize approximates the size of true data plus * any supervisor overheads. Adding hypervisor * overheads has been shown to significantly reduce * achievable bandwidth with the default receive * buffer size. It is therefore not wise to account * for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set * to RX_COPY_THRESHOLD + the supervisor * overheads. Here, we add the size of the data pulled * in xennet_fill_frags(). * * We also adjust for any unused space in the main * data area by subtracting (RX_COPY_THRESHOLD - * len). This is especially important with drivers * which split incoming packets into header and data, * using only 66 bytes of the main data area (see the * e1000 driver for example.) On such systems, * without this last adjustement, our achievable * receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; if (rx->flags & NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } __skb_queue_purge(&errq); work_done -= handle_incoming_queue(dev, &rxq); /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; xennet_alloc_rx_buffers(dev); if (work_done < budget) { int more_to_do = 0; local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static void xennet_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 0; i < NET_TX_RING_SIZE; i++) { /* Skip over entries which are actually freelist references */ if (skb_entry_is_link(&np->tx_skbs[i])) continue; skb = np->tx_skbs[i].skb; get_page(np->grant_tx_page[i]); gnttab_end_foreign_access(np->grant_tx_ref[i], GNTMAP_readonly, (unsigned long)page_address(np->grant_tx_page[i])); np->grant_tx_page[i] = NULL; np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void xennet_release_rx_bufs(struct netfront_info *np) { int id, ref; spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { struct sk_buff *skb; struct page *page; skb = np->rx_skbs[id]; if (!skb) continue; ref = np->grant_rx_ref[id]; if (ref == GRANT_INVALID_REF) continue; page = skb_shinfo(skb)->frags[0].page; /* gnttab_end_foreign_access() needs a page ref until * foreign access is ended (which may be deferred). */ get_page(page); gnttab_end_foreign_access(ref, 0, (unsigned long)page_address(page)); np->grant_rx_ref[id] = GRANT_INVALID_REF; kfree_skb(skb); } spin_unlock_bh(&np->rx_lock); } static void xennet_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); xennet_release_tx_bufs(np); xennet_release_rx_bufs(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static const struct net_device_ops xennet_netdev_ops = { .ndo_open = xennet_open, .ndo_uninit = xennet_uninit, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, .ndo_change_mtu = xennet_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) { int i, err; struct net_device *netdev; struct netfront_info *np; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __func__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise tx_skbs as a free chain containing every entry. */ np->tx_skb_freelist = 0; for (i = 0; i < NET_TX_RING_SIZE; i++) { skb_entry_set_link(&np->tx_skbs[i], i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } /* Clear out rx_skbs */ for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; np->grant_tx_page[i] = NULL; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, xennet_poll, 64); /* Assume all features and let xennet_set_features fix up. */ netdev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netif_carrier_off(netdev); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct net_device *netdev_found=NULL; struct netfront_info *info; char mac_addr[ETH_ALEN]; /* * Before anything normal is done, check for the abnormal setup of xen tools * starting up a guest with an 8139 _and_ a xen-vnif with the same mac addr; * this makes network tools, like udev &/or NetworkManager have a nutty. * So, if this situation exists, don't register the xen-vnif, and print out * a message to change the xen guest configuration to add type=netfront * to the vif spec if xen-vnif is desired network configuration. */ err = xen_net_read_mac(dev, mac_addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out; } rtnl_lock(); /* dev_getbyhwaddr() throws assert w/o this lock */ netdev_found = dev_getbyhwaddr(&init_net, ARPHRD_ETHER, mac_addr); rtnl_unlock(); if (netdev_found) { struct pci_dev *pdev_8139_in_netdev; struct pci_dev *pdev_8139=NULL; /* Now check if 8139 connected to PCI; * if so, check that it's the same one associated with net_dev * if so, 3 strikes, you're out... don't configure xen-vnif */ pdev_8139 = pci_get_subsys(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139, PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM, NULL); if (pdev_8139) { pdev_8139_in_netdev = to_pci_dev(netdev_found->dev.parent); if ((pdev_8139_in_netdev == pdev_8139) && !print_once) { printk("Xen: found realtek-8139 w/same mac-addr as xen-vnif; "); printk(" skipping xen-vnif configuration \n"); printk(" Add 'type=netfront' to xen guest's vif config line so"); printk(" xen-vnif is only, primary network device\n"); print_once++; return -ENODEV; } } } netdev = xennet_create_dev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = xen_net_read_mac(dev, info->netdev->dev_addr); if (err) { printk(KERN_ERR "%s: xen-netfront read nic hwaddr error.\n", __FUNCTION__); } err = register_netdev(info->netdev); if (err) { printk(KERN_WARNING "%s: register_netdev err=%d\n", __func__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); printk(KERN_WARNING "%s: add sysfs failed err=%d\n", __func__, err); goto fail; } return 0; fail: free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); out: return err; } static void xennet_end_access(int ref, void *page) { /* This frees the page as a side-effect */ if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, 0, (unsigned long)page); } static void xennet_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netif_carrier_off(info->netdev); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->netdev->irq) unbind_from_irqhandler(info->netdev->irq, info->netdev); info->evtchn = info->netdev->irq = 0; /* End access and free the pages */ xennet_end_access(info->tx_ring_ref, info->tx.sring); xennet_end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); if (gnttab_query_foreign_access(info->tx_ring_ref) || \ gnttab_query_foreign_access(info->rx_ring_ref)) return 0; dev_dbg(&dev->dev, "%s\n", dev->nodename); xennet_disconnect_backend(info); return 0; } static irqreturn_t xennet_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netif_carrier_ok(dev))) { xennet_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) napi_schedule(&np->napi); } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) { struct xen_netif_tx_sring *txs; struct xen_netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; netdev->irq = 0; /* Read mac only in the first setup. */ if (!is_valid_ether_addr(netdev->dev_addr)) { err = xen_net_read_mac(dev, netdev->dev_addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto fail; } } txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; err = xenbus_alloc_evtchn(dev, &info->evtchn); if (err) goto fail; err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, IRQF_SAMPLE_RANDOM, netdev->name, netdev); if (err < 0) goto fail; netdev->irq = err; return 0; fail: return err; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Create shared ring, alloc event channel. */ err = setup_netfront(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", info->evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: xennet_disconnect_backend(info); out: return err; } static int xennet_set_sg(struct net_device *dev, u32 data) { int val, rc; if (data) { struct netfront_info *np = netdev_priv(dev); if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; } else val = 0; rc = ethtool_op_set_sg(dev, val); if (rc == 0 && !val) { if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; if (data) rc = -ENOSYS; } return rc; } static int xennet_set_tso(struct net_device *dev, u32 data) { int val, rc; if (data) { struct netfront_info *np = netdev_priv(dev); if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; } else val = 0; rc = ethtool_op_set_tso(dev, val); if (rc == 0 && !val && data) rc = -ENOSYS; return rc; } static void xennet_set_features(struct net_device *dev) { /* Set ROBUST, turn off all other GSO bits except TSO. */ dev->features = (dev->features & NETIF_F_TSO) | (dev->features & ~NETIF_F_GSO_MASK) | NETIF_F_GSO_ROBUST; /* * We need checksum offload to enable scatter/gather, and * scatter/gather to enable TSO. Calling xennet_set_sg and * xennet_set_tso ensures that Xenstore is probed for feature * support in the backend. */ xennet_set_sg(dev, ((dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG)) == (NETIF_F_IP_CSUM | NETIF_F_SG))); xennet_set_tso(dev, ((dev->features & (NETIF_F_SG | NETIF_F_TSO)) == (NETIF_F_SG | NETIF_F_TSO))); } #ifdef CONFIG_INET #include #include #include /*get ip for send arp only in this net_device*/ static __be32 xennet_inet_select_addr(const struct net_device *dev, __be32 dst, int scope) { __be32 addr = 0; struct in_device *in_dev; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (!in_dev) goto out_unlock; for_primary_ifa(in_dev) { if (ifa->ifa_scope > scope) continue; if (!dst || inet_ifa_match(dst, ifa)) { addr = ifa->ifa_local; break; } if (!addr) addr = ifa->ifa_local; } endfor_ifa(in_dev); out_unlock: rcu_read_unlock(); return addr; } static int xennet_gratuitous_arp(struct net_device *dev, int type, int count) { struct sk_buff *skb = NULL; __be32 src_ip, dst_ip; dst_ip = INADDR_BROADCAST; src_ip = xennet_inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); /* Nothing to do. */ if (src_ip == 0) { printk(KERN_INFO "xen-netfront: %s has no ip, don't send garp.\n", dev->name); return -ENODATA; } switch (type) { case ARPOP_REQUEST: do { skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, src_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ NULL); if (skb == NULL) { printk(KERN_INFO "xen-netfront: arp has no skb.\n"); return -ENODATA; } xennet_start_xmit(skb, dev); } while (count-- > 0); break; case ARPOP_REPLY: do { skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dst_ip, dev, src_ip, /*dst_hw*/ NULL, /*src_hw*/ NULL, /*target_hw*/ dev->dev_addr); if (skb == NULL) { printk(KERN_INFO "xen-netfront: arp has no skb.\n"); return -ENODATA; } xennet_start_xmit(skb, dev); } while (count-- > 0); break; default: break; } return 0; } static int inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) { struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; /* UP event and is it one of our devices? */ if (event == NETDEV_UP && dev->netdev_ops->ndo_open == xennet_open) { (void)xennet_gratuitous_arp(dev, ARPOP_REQUEST, 3); (void)xennet_gratuitous_arp(dev, ARPOP_REPLY, 3); } return NOTIFY_DONE; } static struct notifier_block notifier_inetdev = { .notifier_call = inetdev_notify, .next = NULL, .priority = 0 }; #else static int xennet_gratuitous_arp(struct net_device *dev, int type) { /* TODO: Fix me? */ } #endif static int xennet_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; struct xen_netif_rx_request *req; unsigned int feature_rx_copy; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; if (!feature_rx_copy) { dev_info(&dev->dev, "backend does not support copying receive path\n"); return -ENODEV; } err = talk_to_backend(np->xbdev, np); if (err) return err; xennet_set_features(dev); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* Step 1: Discard all pending TX packet fragments. */ xennet_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netif_carrier_on(np->netdev); notify_remote_via_irq(np->netdev->irq); xennet_tx_buf_gc(dev); xennet_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (xennet_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateConnected: xennet_gratuitous_arp(netdev, ARPOP_REQUEST, 3); xennet_gratuitous_arp(netdev, ARPOP_REPLY, 3); netif_notify_peers(netdev); break; case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } static const struct ethtool_ops xennet_ethtool_ops = { .set_tx_csum = ethtool_op_set_tx_csum, .set_sg = xennet_set_sg, .set_tso = xennet_set_tso, .get_link = ethtool_op_get_link, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; xennet_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; xennet_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int err; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { err = device_create_file(&netdev->dev, &xennet_attrs[i]); if (err) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return err; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ static struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; static int __devexit xennet_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "%s\n", dev->nodename); xennet_disconnect_backend(info); unregister_netdev(info->netdev); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); free_netdev(info->netdev); return 0; } static struct xenbus_driver netfront_driver = { .name = "vif", .owner = THIS_MODULE, .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(xennet_remove), .resume = netfront_resume, .otherend_changed = backend_changed, }; static int __init netif_init(void) { if (!xen_domain()) return -ENODEV; if (xen_initial_domain()) return 0; if (xen_hvm_domain() && !xen_platform_pci_unplug) return -ENODEV; printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); register_inetaddr_notifier(¬ifier_inetdev); return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { if (xen_initial_domain()) return; xenbus_unregister_driver(&netfront_driver); } module_exit(netif_exit); MODULE_DESCRIPTION("Xen virtual network device frontend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("xen:vif"); MODULE_ALIAS("xennet"); UVP-Tools-2.2.0.316/version.ini000066400000000000000000000000321314037446600157640ustar00rootroot00000000000000UVPToolsVersion=2.2.0.112